|
| 1 | +// LLVM does not support some atomic RMW operations on pointers, so inside codegen we lower those |
| 2 | +// to integer atomics, surrounded by casts to and from integer type. |
| 3 | +// This test ensures that we do the round-trip correctly for AtomicPtr::fetch_byte_add, and also |
| 4 | +// ensures that we do not have such a round-trip for AtomicPtr::swap, because LLVM supports pointer |
| 5 | +// arguments to `atomicrmw xchg`. |
| 6 | + |
| 7 | +//@ compile-flags: -O -Cno-prepopulate-passes |
| 8 | +#![crate_type = "lib"] |
| 9 | + |
| 10 | +#![feature(strict_provenance)] |
| 11 | +#![feature(strict_provenance_atomic_ptr)] |
| 12 | + |
| 13 | +use std::sync::atomic::AtomicPtr; |
| 14 | +use std::sync::atomic::Ordering::Relaxed; |
| 15 | +use std::ptr::without_provenance_mut; |
| 16 | + |
| 17 | +// Portability hack so that we can say [[USIZE]] instead of i64/i32/i16 for usize. |
| 18 | +// CHECK: @helper([[USIZE:i[0-9]+]] noundef %_1) |
| 19 | +#[no_mangle] |
| 20 | +pub fn helper(_: usize) {} |
| 21 | + |
| 22 | +// CHECK-LABEL: @atomicptr_fetch_byte_add |
| 23 | +#[no_mangle] |
| 24 | +pub fn atomicptr_fetch_byte_add(a: &AtomicPtr<u8>, v: usize) -> *mut u8 { |
| 25 | + // CHECK: %[[INTPTR:.*]] = ptrtoint ptr %{{.*}} to [[USIZE]] |
| 26 | + // CHECK-NEXT: %[[RET:.*]] = atomicrmw add ptr %{{.*}}, [[USIZE]] %[[INTPTR]] |
| 27 | + // CHECK-NEXT: inttoptr [[USIZE]] %[[RET]] to ptr |
| 28 | + a.fetch_byte_add(v, Relaxed) |
| 29 | +} |
| 30 | + |
| 31 | +// CHECK-LABEL: @atomicptr_swap |
| 32 | +#[no_mangle] |
| 33 | +pub fn atomicptr_swap(a: &AtomicPtr<u8>, ptr1: *mut u8, ptr2: *mut u8, ptr3: *mut u8) -> *mut u8 { |
| 34 | + // CHECK-NOT: ptrtoint |
| 35 | + // CHECK: atomicrmw xchg ptr %{{.*}}, ptr %{{.*}} monotonic |
| 36 | + // CHECK-NOT: inttoptr |
| 37 | + a.swap(ptr1, Relaxed) |
| 38 | +} |
0 commit comments