Skip to content

Commit 5d2a710

Browse files
[ADT] Introduce LazyAtomicPointer
LazyAtomicPointer is a lock-free pointer that can coordinate concurrent writes to a pointer using a generator. Reviewed By: benlangmuir Differential Revision: https://reviews.llvm.org/D133714
1 parent d07c3cf commit 5d2a710

File tree

3 files changed

+249
-0
lines changed

3 files changed

+249
-0
lines changed
Lines changed: 166 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,166 @@
1+
//===- LazyAtomicPointer.----------------------------------------*- C++ -*-===//
2+
//
3+
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4+
// See https://llvm.org/LICENSE.txt for license information.
5+
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6+
//
7+
//===----------------------------------------------------------------------===//
8+
9+
#ifndef LLVM_ADT_LAZYATOMICPOINTER_H
10+
#define LLVM_ADT_LAZYATOMICPOINTER_H
11+
12+
#include "llvm/ADT/STLFunctionalExtras.h"
13+
#include "llvm/Support/Compiler.h"
14+
#include <assert.h>
15+
#include <atomic>
16+
17+
namespace llvm {
18+
19+
/// Atomic pointer that's lock-free, but that can coordinate concurrent writes
20+
/// from a lazy generator. Should be reserved for cases where concurrent uses of
21+
/// a generator for the same storage is unlikely.
22+
///
23+
/// The laziness comes in with \a loadOrGenerate(), which lazily calls the
24+
/// provided generator ONLY when the value is currently \c nullptr. With
25+
/// concurrent calls, only one generator is called and the rest see that value.
26+
///
27+
/// Most other APIs treat an in-flight \a loadOrGenerate() as if \c nullptr
28+
/// were stored. APIs that are required to write a value will spin.
29+
///
30+
/// The underlying storage is \a std::atomic<uintptr_t>.
31+
///
32+
/// TODO: In C++20, use std::atomic<T>::wait() instead of spinning and call
33+
/// std::atomic<T>::notify_all() in \a loadOrGenerate().
34+
template <class T> class LazyAtomicPointer {
35+
static constexpr uintptr_t getNull() { return 0; }
36+
static constexpr uintptr_t getBusy() { return -1ULL; }
37+
38+
static T *makePointer(uintptr_t Value) {
39+
assert(Value != getBusy());
40+
return Value ? reinterpret_cast<T *>(Value) : nullptr;
41+
}
42+
static uintptr_t makeRaw(T *Value) {
43+
uintptr_t Raw = Value ? reinterpret_cast<uintptr_t>(Value) : getNull();
44+
assert(Raw != getBusy());
45+
return Raw;
46+
}
47+
48+
public:
49+
/// Store a value. Waits for concurrent \a loadOrGenerate() calls.
50+
void store(T *Value) { return (void)exchange(Value); }
51+
52+
/// Set a value. Return the old value. Waits for concurrent \a
53+
/// loadOrGenerate() calls.
54+
T *exchange(T *Value) {
55+
// Note: the call to compare_exchange_weak() fails "spuriously" if the
56+
// current value is \a getBusy(), causing the loop to spin.
57+
T *Old = nullptr;
58+
while (!compare_exchange_weak(Old, Value)) {
59+
}
60+
return Old;
61+
}
62+
63+
/// Compare-exchange. Returns \c false if there is a concurrent \a
64+
/// loadOrGenerate() call, setting \p ExistingValue to \c nullptr.
65+
bool compare_exchange_weak(T *&ExistingValue, T *NewValue) {
66+
uintptr_t RawExistingValue = makeRaw(ExistingValue);
67+
if (Storage.compare_exchange_weak(RawExistingValue, makeRaw(NewValue)))
68+
return true;
69+
70+
/// Report the existing value as "None" if busy.
71+
if (RawExistingValue == getBusy())
72+
ExistingValue = nullptr;
73+
else
74+
ExistingValue = makePointer(RawExistingValue);
75+
return false;
76+
}
77+
78+
/// Compare-exchange. Keeps trying if there is a concurrent
79+
/// \a loadOrGenerate() call.
80+
bool compare_exchange_strong(T *&ExistingValue, T *NewValue) {
81+
uintptr_t RawExistingValue = makeRaw(ExistingValue);
82+
const uintptr_t OriginalRawExistingValue = RawExistingValue;
83+
if (Storage.compare_exchange_strong(RawExistingValue, makeRaw(NewValue)))
84+
return true;
85+
86+
/// Keep trying as long as it's busy.
87+
if (LLVM_UNLIKELY(RawExistingValue == getBusy())) {
88+
while (RawExistingValue == getBusy()) {
89+
RawExistingValue = OriginalRawExistingValue;
90+
if (Storage.compare_exchange_weak(RawExistingValue, makeRaw(NewValue)))
91+
return true;
92+
}
93+
}
94+
ExistingValue = makePointer(RawExistingValue);
95+
return false;
96+
}
97+
98+
/// Return the current stored value. Returns \a None if there is a concurrent
99+
/// \a loadOrGenerate() in flight.
100+
T *load() const {
101+
uintptr_t RawValue = Storage.load();
102+
return RawValue == getBusy() ? nullptr : makePointer(RawValue);
103+
}
104+
105+
/// Get the current value, or call \p Generator to generate a value.
106+
/// Guarantees that only one thread's \p Generator will run.
107+
///
108+
/// \pre \p Generator doesn't return \c nullptr.
109+
T &loadOrGenerate(function_ref<T *()> Generator) {
110+
// Return existing value, if already set.
111+
uintptr_t Raw = Storage.load();
112+
if (Raw != getNull() && Raw != getBusy())
113+
return *makePointer(Raw);
114+
115+
// Try to mark as busy, then generate and store a new value.
116+
if (LLVM_LIKELY(Raw == getNull() &&
117+
Storage.compare_exchange_strong(Raw, getBusy()))) {
118+
Raw = makeRaw(Generator());
119+
assert(Raw != getNull() && "Expected non-null from generator");
120+
Storage.store(Raw);
121+
return *makePointer(Raw);
122+
}
123+
124+
// Contended with another generator. Wait for it to complete.
125+
while (Raw == getBusy())
126+
Raw = Storage.load();
127+
assert(Raw != getNull() && "Expected non-null from competing generator");
128+
return *makePointer(Raw);
129+
}
130+
131+
explicit operator bool() const { return load(); }
132+
operator T *() const { return load(); }
133+
134+
T &operator*() const {
135+
T *P = load();
136+
assert(P && "Unexpected null dereference");
137+
return *P;
138+
}
139+
T *operator->() const { return &operator*(); }
140+
141+
LazyAtomicPointer() : Storage(0) {}
142+
LazyAtomicPointer(std::nullptr_t) : Storage(0) {}
143+
LazyAtomicPointer(T *Value) : Storage(makeRaw(Value)) {}
144+
LazyAtomicPointer(const LazyAtomicPointer &RHS)
145+
: Storage(makeRaw(RHS.load())) {}
146+
147+
LazyAtomicPointer &operator=(std::nullptr_t) {
148+
store(nullptr);
149+
return *this;
150+
}
151+
LazyAtomicPointer &operator=(T *RHS) {
152+
store(RHS);
153+
return *this;
154+
}
155+
LazyAtomicPointer &operator=(const LazyAtomicPointer &RHS) {
156+
store(RHS.load());
157+
return *this;
158+
}
159+
160+
private:
161+
std::atomic<uintptr_t> Storage;
162+
};
163+
164+
} // end namespace llvm
165+
166+
#endif // LLVM_ADT_LAZYATOMICPOINTER_H

llvm/unittests/ADT/CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@ add_llvm_unittest(ADTTests
4747
IntervalTreeTest.cpp
4848
IntrusiveRefCntPtrTest.cpp
4949
IteratorTest.cpp
50+
LazyAtomicPointerTest.cpp
5051
MappedIteratorTest.cpp
5152
MapVectorTest.cpp
5253
MoveOnly.cpp
Lines changed: 82 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,82 @@
1+
//===- LazyAtomicPointerTest.cpp ------------------------------------------===//
2+
//
3+
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4+
// See https://llvm.org/LICENSE.txt for license information.
5+
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6+
//
7+
//===----------------------------------------------------------------------===//
8+
9+
#include "llvm/ADT/LazyAtomicPointer.h"
10+
#include "llvm/Config/llvm-config.h"
11+
#include "llvm/Support/ThreadPool.h"
12+
#include "gtest/gtest.h"
13+
14+
using namespace llvm;
15+
16+
namespace {
17+
18+
TEST(LazyAtomicPointer, loadOrGenerate) {
19+
int Value = 0;
20+
LazyAtomicPointer<int> Ptr;
21+
ThreadPool Threads;
22+
for (unsigned I = 0; I < 4; ++I)
23+
Threads.async([&]() {
24+
Ptr.loadOrGenerate([&]() {
25+
// Make sure this is only called once.
26+
static std::atomic<bool> Once(false);
27+
bool Current = false;
28+
EXPECT_TRUE(Once.compare_exchange_strong(Current, true));
29+
return &Value;
30+
});
31+
});
32+
33+
Threads.wait();
34+
EXPECT_EQ(Ptr.load(), &Value);
35+
}
36+
37+
#if (LLVM_ENABLE_THREADS)
38+
TEST(LazyAtomicPointer, BusyState) {
39+
int Value = 0;
40+
LazyAtomicPointer<int> Ptr;
41+
ThreadPool Threads;
42+
43+
std::mutex BusyLock, EndLock;
44+
std::condition_variable Busy, End;
45+
bool IsBusy = false, IsEnd = false;
46+
Threads.async([&]() {
47+
Ptr.loadOrGenerate([&]() {
48+
// Notify busy state.
49+
{
50+
std::lock_guard<std::mutex> Lock(BusyLock);
51+
IsBusy = true;
52+
}
53+
Busy.notify_all();
54+
std::unique_lock<std::mutex> LEnd(EndLock);
55+
// Wait for end state.
56+
End.wait(LEnd, [&]() { return IsEnd; });
57+
return &Value;
58+
});
59+
});
60+
61+
// Wait for busy state.
62+
std::unique_lock<std::mutex> LBusy(BusyLock);
63+
Busy.wait(LBusy, [&]() { return IsBusy; });
64+
int *ExistingValue = nullptr;
65+
// Busy state will not exchange the value.
66+
EXPECT_FALSE(Ptr.compare_exchange_weak(ExistingValue, nullptr));
67+
// Busy state return nullptr on load/compare_exchange_weak.
68+
EXPECT_EQ(ExistingValue, nullptr);
69+
EXPECT_EQ(Ptr.load(), nullptr);
70+
71+
// End busy state.
72+
{
73+
std::lock_guard<std::mutex> Lock(EndLock);
74+
IsEnd = true;
75+
}
76+
End.notify_all();
77+
Threads.wait();
78+
EXPECT_EQ(Ptr.load(), &Value);
79+
}
80+
#endif
81+
82+
} // namespace

0 commit comments

Comments
 (0)