Skip to content

Commit a864da7

Browse files
committed
Use asm-based atomic load/store on thumbv6m
1 parent 8a6daef commit a864da7

File tree

8 files changed

+346
-5
lines changed

8 files changed

+346
-5
lines changed

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ Portable atomic types including support for 128-bit atomics, atomic float, etc.
1313
- Provide `AtomicI128` and `AtomicU128`.
1414
- Provide `AtomicF32` and `AtomicF64`. (optional)
1515
<!-- - Provide generic `Atomic<T>` type. (optional) -->
16-
- Provide atomic load/store for targets where atomic is not available at all in the standard library. (riscv without A-extension, msp430, avr)
16+
- Provide atomic load/store for targets where atomic is not available at all in the standard library. (thumbv6m, riscv without A-extension, msp430, avr)
1717
- Provide atomic CAS for targets where atomic CAS is not available in the standard library. (thumbv6m, riscv without A-extension, msp430, avr) (optional, [single-core only](#optional-cfg))
1818

1919
## 128-bit atomics support

no_atomic.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -69,4 +69,5 @@ const NO_ATOMIC: &[&str] = &[
6969
"riscv32i-unknown-none-elf",
7070
"riscv32im-unknown-none-elf",
7171
"riscv32imc-unknown-none-elf",
72+
"thumbv6m-none-eabi",
7273
];

specs/thumbv6m-none-eabi.json

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
{
2+
"abi": "eabi",
3+
"arch": "arm",
4+
"atomic-cas": false,
5+
"max-atomic-width": 0,
6+
"c-enum-min-bits": 8,
7+
"data-layout": "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64",
8+
"emit-debug-gdb-scripts": false,
9+
"features": "+strict-align",
10+
"frame-pointer": "always",
11+
"is-builtin": false,
12+
"linker": "rust-lld",
13+
"linker-flavor": "ld.lld",
14+
"llvm-target": "thumbv6m-none-eabi",
15+
"panic-strategy": "abort",
16+
"relocation-model": "static",
17+
"target-pointer-width": "32"
18+
}

src/imp/arm.rs

Lines changed: 288 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,288 @@
1+
// Atomic load/store implementation on ARMv6-M.
2+
//
3+
// Refs:
4+
// - atomic-maybe-uninit https://github.com/taiki-e/atomic-maybe-uninit
5+
//
6+
// Generated asm: https://godbolt.org/z/hx3a6j9vv
7+
8+
#[cfg(not(portable_atomic_no_asm))]
9+
use core::arch::asm;
10+
use core::{cell::UnsafeCell, sync::atomic::Ordering};
11+
12+
use crate::utils::{assert_load_ordering, assert_store_ordering};
13+
14+
// Only a full system barrier exists in the M-class architectures.
15+
macro_rules! dmb {
16+
() => {
17+
"dmb sy"
18+
};
19+
}
20+
21+
#[repr(transparent)]
22+
pub(crate) struct AtomicBool {
23+
v: UnsafeCell<u8>,
24+
}
25+
26+
// Send is implicitly implemented.
27+
// SAFETY: any data races are prevented by atomic operations.
28+
unsafe impl Sync for AtomicBool {}
29+
30+
impl AtomicBool {
31+
#[cfg(any(test, not(portable_atomic_unsafe_assume_single_core)))]
32+
#[inline]
33+
pub(crate) const fn new(v: bool) -> Self {
34+
Self { v: UnsafeCell::new(v as u8) }
35+
}
36+
37+
#[cfg(any(test, not(portable_atomic_unsafe_assume_single_core)))]
38+
#[inline]
39+
pub(crate) fn is_lock_free() -> bool {
40+
Self::is_always_lock_free()
41+
}
42+
#[cfg(any(test, not(portable_atomic_unsafe_assume_single_core)))]
43+
#[inline]
44+
pub(crate) const fn is_always_lock_free() -> bool {
45+
true
46+
}
47+
48+
#[cfg(any(test, not(portable_atomic_unsafe_assume_single_core)))]
49+
#[inline]
50+
pub(crate) fn get_mut(&mut self) -> &mut bool {
51+
// SAFETY: the mutable reference guarantees unique ownership.
52+
unsafe { &mut *(self.v.get() as *mut bool) }
53+
}
54+
55+
#[cfg(any(test, not(portable_atomic_unsafe_assume_single_core)))]
56+
#[inline]
57+
pub(crate) fn into_inner(self) -> bool {
58+
self.v.into_inner() != 0
59+
}
60+
61+
#[inline]
62+
pub(crate) fn load(&self, order: Ordering) -> bool {
63+
assert_load_ordering(order);
64+
// SAFETY: any data races are prevented by atomic intrinsics and the raw
65+
// pointer passed in is valid because we got it from a reference.
66+
unsafe { u8::atomic_load(self.v.get(), order) != 0 }
67+
}
68+
69+
#[inline]
70+
pub(crate) fn store(&self, val: bool, order: Ordering) {
71+
assert_store_ordering(order);
72+
// SAFETY: any data races are prevented by atomic intrinsics and the raw
73+
// pointer passed in is valid because we got it from a reference.
74+
unsafe {
75+
u8::atomic_store(self.v.get(), val as u8, order);
76+
}
77+
}
78+
}
79+
80+
#[repr(transparent)]
81+
pub(crate) struct AtomicPtr<T> {
82+
p: UnsafeCell<*mut T>,
83+
}
84+
85+
// SAFETY: any data races are prevented by atomic operations.
86+
unsafe impl<T> Send for AtomicPtr<T> {}
87+
// SAFETY: any data races are prevented by atomic operations.
88+
unsafe impl<T> Sync for AtomicPtr<T> {}
89+
90+
impl<T> AtomicPtr<T> {
91+
#[cfg(any(test, not(portable_atomic_unsafe_assume_single_core)))]
92+
#[inline]
93+
pub(crate) const fn new(p: *mut T) -> Self {
94+
Self { p: UnsafeCell::new(p) }
95+
}
96+
97+
#[cfg(any(test, not(portable_atomic_unsafe_assume_single_core)))]
98+
#[inline]
99+
pub(crate) fn is_lock_free() -> bool {
100+
Self::is_always_lock_free()
101+
}
102+
#[cfg(any(test, not(portable_atomic_unsafe_assume_single_core)))]
103+
#[inline]
104+
pub(crate) const fn is_always_lock_free() -> bool {
105+
true
106+
}
107+
108+
#[cfg(any(test, not(portable_atomic_unsafe_assume_single_core)))]
109+
#[inline]
110+
pub(crate) fn get_mut(&mut self) -> &mut *mut T {
111+
self.p.get_mut()
112+
}
113+
114+
#[cfg(any(test, not(portable_atomic_unsafe_assume_single_core)))]
115+
#[inline]
116+
pub(crate) fn into_inner(self) -> *mut T {
117+
self.p.into_inner()
118+
}
119+
120+
#[inline]
121+
pub(crate) fn load(&self, order: Ordering) -> *mut T {
122+
assert_load_ordering(order);
123+
// SAFETY: any data races are prevented by atomic intrinsics and the raw
124+
// pointer passed in is valid because we got it from a reference.
125+
// TODO: remove int to ptr cast
126+
unsafe { usize::atomic_load(self.p.get() as *mut usize, order) as *mut T }
127+
}
128+
129+
#[inline]
130+
pub(crate) fn store(&self, ptr: *mut T, order: Ordering) {
131+
assert_store_ordering(order);
132+
// SAFETY: any data races are prevented by atomic intrinsics and the raw
133+
// pointer passed in is valid because we got it from a reference.
134+
// TODO: remove int to ptr cast
135+
unsafe {
136+
usize::atomic_store(self.p.get() as *mut usize, ptr as usize, order);
137+
}
138+
}
139+
}
140+
141+
macro_rules! atomic_int {
142+
($int_type:ident, $atomic_type:ident, $asm_suffix:expr) => {
143+
#[repr(transparent)]
144+
pub(crate) struct $atomic_type {
145+
v: UnsafeCell<$int_type>,
146+
}
147+
148+
// Send is implicitly implemented.
149+
// SAFETY: any data races are prevented by atomic operations.
150+
unsafe impl Sync for $atomic_type {}
151+
152+
impl $atomic_type {
153+
#[cfg(any(test, not(portable_atomic_unsafe_assume_single_core)))]
154+
#[inline]
155+
pub(crate) const fn new(v: $int_type) -> Self {
156+
Self { v: UnsafeCell::new(v) }
157+
}
158+
159+
#[cfg(any(test, not(portable_atomic_unsafe_assume_single_core)))]
160+
#[inline]
161+
pub(crate) fn is_lock_free() -> bool {
162+
Self::is_always_lock_free()
163+
}
164+
#[cfg(any(test, not(portable_atomic_unsafe_assume_single_core)))]
165+
#[inline]
166+
pub(crate) const fn is_always_lock_free() -> bool {
167+
true
168+
}
169+
170+
#[cfg(any(test, not(portable_atomic_unsafe_assume_single_core)))]
171+
#[inline]
172+
pub(crate) fn get_mut(&mut self) -> &mut $int_type {
173+
self.v.get_mut()
174+
}
175+
176+
#[cfg(any(test, not(portable_atomic_unsafe_assume_single_core)))]
177+
#[inline]
178+
pub(crate) fn into_inner(self) -> $int_type {
179+
self.v.into_inner()
180+
}
181+
182+
#[inline]
183+
pub(crate) fn load(&self, order: Ordering) -> $int_type {
184+
assert_load_ordering(order);
185+
// SAFETY: any data races are prevented by atomic intrinsics and the raw
186+
// pointer passed in is valid because we got it from a reference.
187+
unsafe { $int_type::atomic_load(self.v.get(), order) }
188+
}
189+
190+
#[inline]
191+
pub(crate) fn store(&self, val: $int_type, order: Ordering) {
192+
assert_store_ordering(order);
193+
// SAFETY: any data races are prevented by atomic intrinsics and the raw
194+
// pointer passed in is valid because we got it from a reference.
195+
unsafe {
196+
$int_type::atomic_store(self.v.get(), val, order);
197+
}
198+
}
199+
}
200+
201+
impl AtomicLoadStore for $int_type {
202+
#[inline]
203+
unsafe fn atomic_load(src: *const Self, order: Ordering) -> Self {
204+
// SAFETY: the caller must uphold the safety contract for `atomic_load`.
205+
unsafe {
206+
let out;
207+
match order {
208+
Ordering::Relaxed => {
209+
asm!(
210+
concat!("ldr", $asm_suffix, " {out}, [{src}]"),
211+
src = in(reg) src,
212+
out = lateout(reg) out,
213+
options(nostack, readonly),
214+
);
215+
}
216+
// Acquire and SeqCst loads are equivalent.
217+
Ordering::Acquire | Ordering::SeqCst => {
218+
asm!(
219+
concat!("ldr", $asm_suffix, " {out}, [{src}]"),
220+
dmb!(),
221+
src = in(reg) src,
222+
out = lateout(reg) out,
223+
options(nostack),
224+
);
225+
}
226+
_ => unreachable!("{:?}", order),
227+
}
228+
out
229+
}
230+
}
231+
232+
#[inline]
233+
unsafe fn atomic_store(dst: *mut Self, val: Self, order: Ordering) {
234+
// SAFETY: the caller must uphold the safety contract for `atomic_store`.
235+
unsafe {
236+
macro_rules! atomic_store {
237+
($acquire:expr, $release:expr) => {
238+
asm!(
239+
$release,
240+
concat!("str", $asm_suffix, " {val}, [{dst}]"),
241+
$acquire,
242+
dst = in(reg) dst,
243+
val = in(reg) val,
244+
options(nostack),
245+
)
246+
};
247+
}
248+
match order {
249+
Ordering::Relaxed => atomic_store!("", ""),
250+
Ordering::Release => atomic_store!("", dmb!()),
251+
Ordering::SeqCst => atomic_store!(dmb!(), dmb!()),
252+
_ => unreachable!("{:?}", order),
253+
}
254+
}
255+
}
256+
}
257+
}
258+
}
259+
260+
atomic_int!(i8, AtomicI8, "b");
261+
atomic_int!(u8, AtomicU8, "b");
262+
atomic_int!(i16, AtomicI16, "h");
263+
atomic_int!(u16, AtomicU16, "h");
264+
atomic_int!(i32, AtomicI32, "");
265+
atomic_int!(u32, AtomicU32, "");
266+
atomic_int!(isize, AtomicIsize, "");
267+
atomic_int!(usize, AtomicUsize, "");
268+
269+
trait AtomicLoadStore: Sized {
270+
unsafe fn atomic_load(src: *const Self, order: Ordering) -> Self;
271+
unsafe fn atomic_store(dst: *mut Self, val: Self, order: Ordering);
272+
}
273+
274+
#[cfg(test)]
275+
mod tests {
276+
use super::*;
277+
278+
test_atomic_bool_load_store!();
279+
test_atomic_ptr_load_store!();
280+
test_atomic_int_load_store!(i8);
281+
test_atomic_int_load_store!(u8);
282+
test_atomic_int_load_store!(i16);
283+
test_atomic_int_load_store!(u16);
284+
test_atomic_int_load_store!(i32);
285+
test_atomic_int_load_store!(u32);
286+
test_atomic_int_load_store!(isize);
287+
test_atomic_int_load_store!(usize);
288+
}

src/imp/interrupt/mod.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,12 +16,12 @@
1616
// CAS together with atomic load/store. The load/store will not be
1717
// called while interrupts are disabled, and since the load/store is
1818
// atomic, it is not affected by interrupts even if interrupts are enabled.
19+
#[cfg(portable_atomic_armv6m)]
20+
use super::arm as atomic;
1921
#[cfg(target_arch = "msp430")]
2022
use super::msp430 as atomic;
2123
#[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))]
2224
use super::riscv as atomic;
23-
#[cfg(target_arch = "arm")]
24-
use core::sync::atomic;
2525

2626
#[cfg_attr(portable_atomic_armv6m, path = "armv6m.rs")]
2727
#[cfg_attr(target_arch = "avr", path = "avr.rs")]

src/imp/mod.rs

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,14 @@ mod s390x;
3939
#[cfg(target_arch = "msp430")]
4040
mod msp430;
4141

42+
#[cfg(any(not(portable_atomic_no_asm), portable_atomic_nightly))]
43+
#[cfg(portable_atomic_armv6m)]
44+
mod arm;
45+
#[cfg(not(any(not(portable_atomic_no_asm), portable_atomic_nightly)))]
46+
#[cfg(portable_atomic_armv6m)]
47+
#[path = "core_atomic.rs"]
48+
mod arm;
49+
4250
#[cfg_attr(portable_atomic_no_cfg_target_has_atomic, cfg(any(test, portable_atomic_no_atomic_cas)))]
4351
#[cfg_attr(
4452
not(portable_atomic_no_cfg_target_has_atomic),
@@ -124,11 +132,19 @@ mod interrupt;
124132
pub(crate) use self::core_atomic::{
125133
AtomicBool, AtomicI16, AtomicI8, AtomicIsize, AtomicPtr, AtomicU16, AtomicU8, AtomicUsize,
126134
};
135+
// armv6m
136+
#[cfg(not(portable_atomic_unsafe_assume_single_core))]
137+
#[cfg(portable_atomic_armv6m)]
138+
pub(crate) use self::arm::{
139+
AtomicBool, AtomicI16, AtomicI8, AtomicIsize, AtomicPtr, AtomicU16, AtomicU8, AtomicUsize,
140+
};
141+
// msp430
127142
#[cfg(not(portable_atomic_unsafe_assume_single_core))]
128143
#[cfg(target_arch = "msp430")]
129144
pub(crate) use self::msp430::{
130145
AtomicBool, AtomicI16, AtomicI8, AtomicIsize, AtomicPtr, AtomicU16, AtomicU8, AtomicUsize,
131146
};
147+
// riscv32 without A-extension
132148
#[cfg(not(portable_atomic_unsafe_assume_single_core))]
133149
#[cfg(target_arch = "riscv32")]
134150
#[cfg_attr(portable_atomic_no_cfg_target_has_atomic, cfg(portable_atomic_no_atomic_cas))]
@@ -173,6 +189,11 @@ pub(crate) use self::interrupt::{
173189
))
174190
)]
175191
pub(crate) use self::core_atomic::{AtomicI32, AtomicU32};
192+
// armv6m
193+
#[cfg(not(portable_atomic_unsafe_assume_single_core))]
194+
#[cfg(portable_atomic_armv6m)]
195+
pub(crate) use self::arm::{AtomicI32, AtomicU32};
196+
// riscv32 without A-extension
176197
#[cfg(not(portable_atomic_unsafe_assume_single_core))]
177198
#[cfg(target_arch = "riscv32")]
178199
#[cfg_attr(portable_atomic_no_cfg_target_has_atomic, cfg(portable_atomic_no_atomic_cas))]

src/lib.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ Portable atomic types including support for 128-bit atomics, atomic float, etc.
55
- Provide `AtomicI128` and `AtomicU128`.
66
- Provide `AtomicF32` and `AtomicF64`. (optional)
77
<!-- - Provide generic `Atomic<T>` type. (optional) -->
8-
- Provide atomic load/store for targets where atomic is not available at all in the standard library. (riscv without A-extension, msp430, avr)
8+
- Provide atomic load/store for targets where atomic is not available at all in the standard library. (thumbv6m, riscv without A-extension, msp430, avr)
99
- Provide atomic CAS for targets where atomic CAS is not available in the standard library. (thumbv6m, riscv without A-extension, msp430, avr) (optional, [single-core only](#optional-cfg))
1010
1111
## 128-bit atomics support

0 commit comments

Comments
 (0)