Skip to content

Commit 93b4705

Browse files
authored
[compiler-rt][fuchsia] Preallocate a vmar for sanitizer internals (#75256)
In an effort to reduce more mmap fragmentation, allocate a large enough vmar where we can map sanitizer internals via DoAnonymousMmap. Objects being mapped here include asan's FakeStack, LowLevelAllocator mappings, the primary allocator's TwoLevelMap, InternalMmapVector, StackStore, and asan's thread internals. The vmar is large enough to hold the total size of these objects seen in a "typical" process lifetime. If the vmar is full, it will fallback to mapping in the root vmar.
1 parent 721dd3b commit 93b4705

File tree

1 file changed

+63
-14
lines changed

1 file changed

+63
-14
lines changed

compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp

Lines changed: 63 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -129,6 +129,55 @@ uptr GetMaxVirtualAddress() { return GetMaxUserVirtualAddress(); }
129129

130130
bool ErrorIsOOM(error_t err) { return err == ZX_ERR_NO_MEMORY; }
131131

132+
// For any sanitizer internal that needs to map something which can be unmapped
133+
// later, first attempt to map to a pre-allocated VMAR. This helps reduce
134+
// fragmentation from many small anonymous mmap calls. A good value for this
135+
// VMAR size would be the total size of your typical sanitizer internal objects
136+
// allocated in an "average" process lifetime. Examples of this include:
137+
// FakeStack, LowLevelAllocator mappings, TwoLevelMap, InternalMmapVector,
138+
// StackStore, CreateAsanThread, etc.
139+
//
140+
// This is roughly equal to the total sum of sanitizer internal mappings for a
141+
// large test case.
142+
constexpr size_t kSanitizerHeapVmarSize = 13ULL << 20;
143+
static zx_handle_t gSanitizerHeapVmar = ZX_HANDLE_INVALID;
144+
145+
static zx_status_t GetSanitizerHeapVmar(zx_handle_t *vmar) {
146+
zx_status_t status = ZX_OK;
147+
if (gSanitizerHeapVmar == ZX_HANDLE_INVALID) {
148+
CHECK_EQ(kSanitizerHeapVmarSize % GetPageSizeCached(), 0);
149+
uintptr_t base;
150+
status = _zx_vmar_allocate(
151+
_zx_vmar_root_self(),
152+
ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | ZX_VM_CAN_MAP_SPECIFIC, 0,
153+
kSanitizerHeapVmarSize, &gSanitizerHeapVmar, &base);
154+
}
155+
*vmar = gSanitizerHeapVmar;
156+
if (status == ZX_OK)
157+
CHECK_NE(gSanitizerHeapVmar, ZX_HANDLE_INVALID);
158+
return status;
159+
}
160+
161+
static zx_status_t TryVmoMapSanitizerVmar(zx_vm_option_t options,
162+
size_t vmar_offset, zx_handle_t vmo,
163+
size_t size, uintptr_t *addr) {
164+
zx_handle_t vmar;
165+
zx_status_t status = GetSanitizerHeapVmar(&vmar);
166+
if (status != ZX_OK)
167+
return status;
168+
169+
status = _zx_vmar_map(gSanitizerHeapVmar, options, vmar_offset, vmo,
170+
/*vmo_offset=*/0, size, addr);
171+
if (status == ZX_ERR_NO_RESOURCES) {
172+
// This means there's no space in the heap VMAR, so fallback to the root
173+
// VMAR.
174+
status = _zx_vmar_map(_zx_vmar_root_self(), options, vmar_offset, vmo,
175+
/*vmo_offset=*/0, size, addr);
176+
}
177+
178+
return status;
179+
}
180+
132181
static void *DoAnonymousMmapOrDie(uptr size, const char *mem_type,
133182
bool raw_report, bool die_for_nomem) {
134183
size = RoundUpTo(size, GetPageSize());
@@ -144,11 +193,9 @@ static void *DoAnonymousMmapOrDie(uptr size, const char *mem_type,
144193
_zx_object_set_property(vmo, ZX_PROP_NAME, mem_type,
145194
internal_strlen(mem_type));
146195

147-
// TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that?
148196
uintptr_t addr;
149-
status =
150-
_zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 0,
151-
vmo, 0, size, &addr);
197+
status = TryVmoMapSanitizerVmar(ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
198+
/*vmar_offset=*/0, vmo, size, &addr);
152199
_zx_handle_close(vmo);
153200

154201
if (status != ZX_OK) {
@@ -243,6 +290,12 @@ void UnmapOrDieVmar(void *addr, uptr size, zx_handle_t target_vmar) {
243290

244291
zx_status_t status =
245292
_zx_vmar_unmap(target_vmar, reinterpret_cast<uintptr_t>(addr), size);
293+
if (status == ZX_ERR_INVALID_ARGS && target_vmar == gSanitizerHeapVmar) {
294+
// If there wasn't any space in the heap vmar, the fallback was the root
295+
// vmar.
296+
status = _zx_vmar_unmap(_zx_vmar_root_self(),
297+
reinterpret_cast<uintptr_t>(addr), size);
298+
}
246299
if (status != ZX_OK) {
247300
Report("ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n",
248301
SanitizerToolName, size, size, addr);
@@ -308,32 +361,28 @@ void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
308361
_zx_object_set_property(vmo, ZX_PROP_NAME, mem_type,
309362
internal_strlen(mem_type));
310363

311-
// TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that?
312-
313364
// Map a larger size to get a chunk of address space big enough that
314365
// it surely contains an aligned region of the requested size. Then
315366
// overwrite the aligned middle portion with a mapping from the
316367
// beginning of the VMO, and unmap the excess before and after.
317368
size_t map_size = size + alignment;
318369
uintptr_t addr;
319-
status =
320-
_zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 0,
321-
vmo, 0, map_size, &addr);
370+
status = TryVmoMapSanitizerVmar(ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
371+
/*vmar_offset=*/0, vmo, map_size, &addr);
322372
if (status == ZX_OK) {
323373
uintptr_t map_addr = addr;
324374
uintptr_t map_end = map_addr + map_size;
325375
addr = RoundUpTo(map_addr, alignment);
326376
uintptr_t end = addr + size;
327377
if (addr != map_addr) {
328378
zx_info_vmar_t info;
329-
status = _zx_object_get_info(_zx_vmar_root_self(), ZX_INFO_VMAR, &info,
379+
status = _zx_object_get_info(gSanitizerHeapVmar, ZX_INFO_VMAR, &info,
330380
sizeof(info), NULL, NULL);
331381
if (status == ZX_OK) {
332382
uintptr_t new_addr;
333-
status = _zx_vmar_map(
334-
_zx_vmar_root_self(),
383+
status = TryVmoMapSanitizerVmar(
335384
ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC_OVERWRITE,
336-
addr - info.base, vmo, 0, size, &new_addr);
385+
addr - info.base, vmo, size, &new_addr);
337386
if (status == ZX_OK)
338387
CHECK_EQ(new_addr, addr);
339388
}
@@ -357,7 +406,7 @@ void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
357406
}
358407

359408
void UnmapOrDie(void *addr, uptr size) {
360-
UnmapOrDieVmar(addr, size, _zx_vmar_root_self());
409+
UnmapOrDieVmar(addr, size, gSanitizerHeapVmar);
361410
}
362411

363412
void ReleaseMemoryPagesToOS(uptr beg, uptr end) {

0 commit comments

Comments
 (0)