Skip to content

Commit bedba19

Browse files
SC llvm teamSC llvm team
SC llvm team
authored and
SC llvm team
committed
Merged main:ea2036e1e56b into amd-gfx:e1b9ddbd68c9
Local branch amd-gfx e1b9ddb Merged main:4f98fb2e937a into amd-gfx:5c2235853bc1 Remote branch main ea2036e [scudo] Fix the use of ASSERT_CAPABILITY in TSD (llvm#68273)
2 parents e1b9ddb + ea2036e commit bedba19

27 files changed

+541
-142
lines changed

compiler-rt/lib/scudo/standalone/combined.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -245,12 +245,14 @@ class Allocator {
245245
// - unlinking the local stats from the global ones (destroying the cache does
246246
// the last two items).
247247
void commitBack(TSD<ThisT> *TSD) {
248+
TSD->assertLocked(/*BypassCheck=*/true);
248249
Quarantine.drain(&TSD->getQuarantineCache(),
249250
QuarantineCallback(*this, TSD->getCache()));
250251
TSD->getCache().destroy(&Stats);
251252
}
252253

253254
void drainCache(TSD<ThisT> *TSD) {
255+
TSD->assertLocked(/*BypassCheck=*/true);
254256
Quarantine.drainAndRecycle(&TSD->getQuarantineCache(),
255257
QuarantineCallback(*this, TSD->getCache()));
256258
TSD->getCache().drain();
@@ -363,6 +365,7 @@ class Allocator {
363365
DCHECK_NE(ClassId, 0U);
364366
bool UnlockRequired;
365367
auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
368+
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
366369
Block = TSD->getCache().allocate(ClassId);
367370
// If the allocation failed, the most likely reason with a 32-bit primary
368371
// is the region being full. In that event, retry in each successively
@@ -1147,6 +1150,7 @@ class Allocator {
11471150
if (LIKELY(ClassId)) {
11481151
bool UnlockRequired;
11491152
auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
1153+
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
11501154
const bool CacheDrained =
11511155
TSD->getCache().deallocate(ClassId, BlockBegin);
11521156
if (UnlockRequired)
@@ -1166,6 +1170,7 @@ class Allocator {
11661170
} else {
11671171
bool UnlockRequired;
11681172
auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
1173+
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
11691174
Quarantine.put(&TSD->getQuarantineCache(),
11701175
QuarantineCallback(*this, TSD->getCache()), Ptr, Size);
11711176
if (UnlockRequired)

compiler-rt/lib/scudo/standalone/tests/combined_test.cpp

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -512,6 +512,7 @@ SCUDO_TYPED_TEST(ScudoCombinedTest, CacheDrain) NO_THREAD_SAFETY_ANALYSIS {
512512

513513
bool UnlockRequired;
514514
auto *TSD = Allocator->getTSDRegistry()->getTSDAndLock(&UnlockRequired);
515+
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
515516
EXPECT_TRUE(!TSD->getCache().isEmpty());
516517
TSD->getCache().drain();
517518
EXPECT_TRUE(TSD->getCache().isEmpty());
@@ -536,6 +537,7 @@ SCUDO_TYPED_TEST(ScudoCombinedTest, ForceCacheDrain) NO_THREAD_SAFETY_ANALYSIS {
536537

537538
bool UnlockRequired;
538539
auto *TSD = Allocator->getTSDRegistry()->getTSDAndLock(&UnlockRequired);
540+
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
539541
EXPECT_TRUE(TSD->getCache().isEmpty());
540542
EXPECT_EQ(TSD->getQuarantineCache().getSize(), 0U);
541543
EXPECT_TRUE(Allocator->getQuarantine()->isEmpty());
@@ -842,6 +844,7 @@ TEST(ScudoCombinedTest, BasicTrustyConfig) {
842844

843845
bool UnlockRequired;
844846
auto *TSD = Allocator->getTSDRegistry()->getTSDAndLock(&UnlockRequired);
847+
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
845848
TSD->getCache().drain();
846849

847850
Allocator->releaseToOS(scudo::ReleaseToOS::Force);

compiler-rt/lib/scudo/standalone/tests/tsd_test.cpp

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -101,13 +101,15 @@ template <class AllocatorT> static void testRegistry() {
101101

102102
bool UnlockRequired;
103103
auto TSD = Registry->getTSDAndLock(&UnlockRequired);
104+
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
104105
EXPECT_NE(TSD, nullptr);
105106
EXPECT_EQ(TSD->getCache().Canary, 0U);
106107
if (UnlockRequired)
107108
TSD->unlock();
108109

109110
Registry->initThreadMaybe(Allocator.get(), /*MinimalInit=*/false);
110111
TSD = Registry->getTSDAndLock(&UnlockRequired);
112+
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
111113
EXPECT_NE(TSD, nullptr);
112114
EXPECT_EQ(TSD->getCache().Canary, 0U);
113115
memset(&TSD->getCache(), 0x42, sizeof(TSD->getCache()));
@@ -137,6 +139,7 @@ template <typename AllocatorT> static void stressCache(AllocatorT *Allocator) {
137139
Registry->initThreadMaybe(Allocator, /*MinimalInit=*/false);
138140
bool UnlockRequired;
139141
auto TSD = Registry->getTSDAndLock(&UnlockRequired);
142+
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
140143
EXPECT_NE(TSD, nullptr);
141144
// For an exclusive TSD, the cache should be empty. We cannot guarantee the
142145
// same for a shared TSD.
@@ -195,6 +198,7 @@ static void stressSharedRegistry(MockAllocator<SharedCaches> *Allocator) {
195198
bool UnlockRequired;
196199
for (scudo::uptr I = 0; I < 4096U; I++) {
197200
auto TSD = Registry->getTSDAndLock(&UnlockRequired);
201+
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
198202
EXPECT_NE(TSD, nullptr);
199203
Set.insert(reinterpret_cast<void *>(TSD));
200204
if (UnlockRequired)

compiler-rt/lib/scudo/standalone/tsd.h

Lines changed: 10 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -53,8 +53,14 @@ template <class Allocator> struct alignas(SCUDO_CACHE_LINE_SIZE) TSD {
5353
inline void unlock() NO_THREAD_SAFETY_ANALYSIS { Mutex.unlock(); }
5454
inline uptr getPrecedence() { return atomic_load_relaxed(&Precedence); }
5555

56-
void commitBack(Allocator *Instance) ASSERT_CAPABILITY(Mutex) {
57-
Instance->commitBack(this);
56+
void commitBack(Allocator *Instance) { Instance->commitBack(this); }
57+
58+
// As the comments attached to `getCache()`, the TSD doesn't always need to be
59+
// locked. In that case, we would only skip the check before we have all TSDs
60+
// locked in all paths.
61+
void assertLocked(bool BypassCheck) ASSERT_CAPABILITY(Mutex) {
62+
if (SCUDO_DEBUG && !BypassCheck)
63+
Mutex.assertHeld();
5864
}
5965

6066
// Ideally, we may want to assert that all the operations on
@@ -66,11 +72,8 @@ template <class Allocator> struct alignas(SCUDO_CACHE_LINE_SIZE) TSD {
6672
// TODO(chiahungduan): Ideally, we want to do `Mutex.assertHeld` but acquiring
6773
// TSD doesn't always require holding the lock. Add this assertion while the
6874
// lock is always acquired.
69-
typename Allocator::CacheT &getCache() ASSERT_CAPABILITY(Mutex) {
70-
return Cache;
71-
}
72-
typename Allocator::QuarantineCacheT &getQuarantineCache()
73-
ASSERT_CAPABILITY(Mutex) {
75+
typename Allocator::CacheT &getCache() REQUIRES(Mutex) { return Cache; }
76+
typename Allocator::QuarantineCacheT &getQuarantineCache() REQUIRES(Mutex) {
7477
return QuarantineCache;
7578
}
7679

compiler-rt/lib/scudo/standalone/tsd_shared.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -120,6 +120,11 @@ struct TSDRegistrySharedT {
120120
TSDsArraySize);
121121
for (uptr I = 0; I < NumberOfTSDs; ++I) {
122122
TSDs[I].lock();
123+
// Theoretically, we want to mark TSD::lock()/TSD::unlock() with proper
124+
// thread annotations. However, given the TSD is only locked on shared
125+
// path, do the assertion in a separate path to avoid confusing the
126+
// analyzer.
127+
TSDs[I].assertLocked(/*BypassCheck=*/true);
123128
Str->append(" Shared TSD[%zu]:\n", I);
124129
TSDs[I].getCache().getStats(Str);
125130
TSDs[I].unlock();

llvm/include/llvm/BinaryFormat/DXContainer.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -453,7 +453,7 @@ struct ProgramSignatureHeader {
453453

454454
void swapBytes() {
455455
sys::swapByteOrder(ParamCount);
456-
sys::swapByteOrder(ParamCount);
456+
sys::swapByteOrder(FirstParamOffset);
457457
}
458458
};
459459

llvm/include/llvm/CodeGen/AccelTable.h

Lines changed: 28 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -106,6 +106,7 @@ namespace llvm {
106106
class AsmPrinter;
107107
class DwarfCompileUnit;
108108
class DwarfDebug;
109+
class DwarfTypeUnit;
109110
class MCSymbol;
110111
class raw_ostream;
111112

@@ -197,6 +198,9 @@ template <typename DataT> class AccelTable : public AccelTableBase {
197198

198199
template <typename... Types>
199200
void addName(DwarfStringPoolEntryRef Name, Types &&... Args);
201+
void clear() { Entries.clear(); }
202+
void addEntries(AccelTable<DataT> &Table);
203+
const StringEntries getEntries() const { return Entries; }
200204
};
201205

202206
template <typename AccelTableDataT>
@@ -215,6 +219,16 @@ void AccelTable<AccelTableDataT>::addName(DwarfStringPoolEntryRef Name,
215219
AccelTableDataT(std::forward<Types>(Args)...));
216220
}
217221

222+
template <typename AccelTableDataT>
223+
void AccelTable<AccelTableDataT>::addEntries(
224+
AccelTable<AccelTableDataT> &Table) {
225+
for (auto &Entry : Table.getEntries()) {
226+
for (AccelTableData *Value : Entry.second.Values)
227+
addName(Entry.second.Name,
228+
static_cast<AccelTableDataT *>(Value)->getDie());
229+
}
230+
}
231+
218232
/// A base class for different implementations of Data classes for Apple
219233
/// Accelerator Tables. The columns in the table are defined by the static Atoms
220234
/// variable defined on the subclasses.
@@ -250,6 +264,10 @@ class AppleAccelTableData : public AccelTableData {
250264
/// emitDWARF5AccelTable function.
251265
class DWARF5AccelTableData : public AccelTableData {
252266
public:
267+
struct AttributeEncoding {
268+
dwarf::Index Index;
269+
dwarf::Form Form;
270+
};
253271
static uint32_t hash(StringRef Name) { return caseFoldingDjbHash(Name); }
254272

255273
DWARF5AccelTableData(const DIE &Die) : Die(Die) {}
@@ -309,17 +327,20 @@ void emitAppleAccelTable(AsmPrinter *Asm, AccelTable<DataT> &Contents,
309327
void emitDWARF5AccelTable(AsmPrinter *Asm,
310328
AccelTable<DWARF5AccelTableData> &Contents,
311329
const DwarfDebug &DD,
312-
ArrayRef<std::unique_ptr<DwarfCompileUnit>> CUs);
313-
330+
ArrayRef<std::unique_ptr<DwarfCompileUnit>> CUs,
331+
ArrayRef<std::unique_ptr<DwarfTypeUnit>> TUs);
332+
using GetIndexForEntryReturnType =
333+
std::optional<std::pair<unsigned, DWARF5AccelTableData::AttributeEncoding>>;
314334
/// Emit a DWARFv5 Accelerator Table consisting of entries in the specified
315335
/// AccelTable. The \p CUs contains either symbols keeping offsets to the
316336
/// start of compilation unit, either offsets to the start of compilation
317337
/// unit themselves.
318-
void emitDWARF5AccelTable(
319-
AsmPrinter *Asm, AccelTable<DWARF5AccelTableStaticData> &Contents,
320-
ArrayRef<std::variant<MCSymbol *, uint64_t>> CUs,
321-
llvm::function_ref<unsigned(const DWARF5AccelTableStaticData &)>
322-
getCUIndexForEntry);
338+
void emitDWARF5AccelTable(AsmPrinter *Asm,
339+
AccelTable<DWARF5AccelTableStaticData> &Contents,
340+
ArrayRef<std::variant<MCSymbol *, uint64_t>> CUs,
341+
llvm::function_ref<GetIndexForEntryReturnType(
342+
const DWARF5AccelTableStaticData &)>
343+
getIndexForEntry);
323344

324345
/// Accelerator table data implementation for simple Apple accelerator tables
325346
/// with just a DIE reference.

llvm/include/llvm/Config/llvm-config.h.cmake

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616

1717
/* Indicate that this is LLVM compiled from the amd-gfx branch. */
1818
#define LLVM_HAVE_BRANCH_AMD_GFX
19-
#define LLVM_MAIN_REVISION 476916
19+
#define LLVM_MAIN_REVISION 476920
2020

2121
/* Define if LLVM_ENABLE_DUMP is enabled */
2222
#cmakedefine LLVM_ENABLE_DUMP

0 commit comments

Comments
 (0)