@@ -129,6 +129,55 @@ uptr GetMaxVirtualAddress() { return GetMaxUserVirtualAddress(); }
129
129
130
130
bool ErrorIsOOM (error_t err) { return err == ZX_ERR_NO_MEMORY; }
131
131
132
+ // For any sanitizer internal that needs to map something which can be unmapped
133
+ // later, first attempt to map to a pre-allocated VMAR. This helps reduce
134
+ // fragmentation from many small anonymous mmap calls. A good value for this
135
+ // VMAR size would be the total size of your typical sanitizer internal objects
136
+ // allocated in an "average" process lifetime. Examples of this include:
137
+ // FakeStack, LowLevelAllocator mappings, TwoLevelMap, InternalMmapVector,
138
+ // StackStore, CreateAsanThread, etc.
139
+ //
140
+ // This is roughly equal to the total sum of sanitizer internal mappings for a
141
+ // large test case.
142
+ constexpr size_t kSanitizerHeapVmarSize = 13ULL << 20 ;
143
+ static zx_handle_t gSanitizerHeapVmar = ZX_HANDLE_INVALID;
144
+
145
+ static zx_status_t GetSanitizerHeapVmar (zx_handle_t *vmar) {
146
+ zx_status_t status = ZX_OK;
147
+ if (gSanitizerHeapVmar == ZX_HANDLE_INVALID) {
148
+ CHECK_EQ (kSanitizerHeapVmarSize % GetPageSizeCached (), 0 );
149
+ uintptr_t base;
150
+ status = _zx_vmar_allocate (
151
+ _zx_vmar_root_self (),
152
+ ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | ZX_VM_CAN_MAP_SPECIFIC, 0 ,
153
+ kSanitizerHeapVmarSize , &gSanitizerHeapVmar , &base);
154
+ }
155
+ *vmar = gSanitizerHeapVmar ;
156
+ if (status == ZX_OK)
157
+ CHECK_NE (gSanitizerHeapVmar , ZX_HANDLE_INVALID);
158
+ return status;
159
+ }
160
+
161
+ static zx_status_t TryVmoMapSanitizerVmar (zx_vm_option_t options,
162
+ size_t vmar_offset, zx_handle_t vmo,
163
+ size_t size, uintptr_t *addr) {
164
+ zx_handle_t vmar;
165
+ zx_status_t status = GetSanitizerHeapVmar (&vmar);
166
+ if (status != ZX_OK)
167
+ return status;
168
+
169
+ status = _zx_vmar_map (gSanitizerHeapVmar , options, vmar_offset, vmo,
170
+ /* vmo_offset=*/ 0 , size, addr);
171
+ if (status == ZX_ERR_NO_RESOURCES) {
172
+ // This means there's no space in the heap VMAR, so fallback to the root
173
+ // VMAR.
174
+ status = _zx_vmar_map (_zx_vmar_root_self (), options, vmar_offset, vmo,
175
+ /* vmo_offset=*/ 0 , size, addr);
176
+ }
177
+
178
+ return status;
179
+ }
180
+
132
181
static void *DoAnonymousMmapOrDie (uptr size, const char *mem_type,
133
182
bool raw_report, bool die_for_nomem) {
134
183
size = RoundUpTo (size, GetPageSize ());
@@ -144,11 +193,9 @@ static void *DoAnonymousMmapOrDie(uptr size, const char *mem_type,
144
193
_zx_object_set_property (vmo, ZX_PROP_NAME, mem_type,
145
194
internal_strlen (mem_type));
146
195
147
- // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that?
148
196
uintptr_t addr;
149
- status =
150
- _zx_vmar_map (_zx_vmar_root_self (), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 0 ,
151
- vmo, 0 , size, &addr);
197
+ status = TryVmoMapSanitizerVmar (ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
198
+ /* vmar_offset=*/ 0 , vmo, size, &addr);
152
199
_zx_handle_close (vmo);
153
200
154
201
if (status != ZX_OK) {
@@ -243,6 +290,12 @@ void UnmapOrDieVmar(void *addr, uptr size, zx_handle_t target_vmar) {
243
290
244
291
zx_status_t status =
245
292
_zx_vmar_unmap (target_vmar, reinterpret_cast <uintptr_t >(addr), size);
293
+ if (status == ZX_ERR_INVALID_ARGS && target_vmar == gSanitizerHeapVmar ) {
294
+ // If there wasn't any space in the heap vmar, the fallback was the root
295
+ // vmar.
296
+ status = _zx_vmar_unmap (_zx_vmar_root_self (),
297
+ reinterpret_cast <uintptr_t >(addr), size);
298
+ }
246
299
if (status != ZX_OK) {
247
300
Report (" ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n " ,
248
301
SanitizerToolName, size, size, addr);
@@ -308,32 +361,28 @@ void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
308
361
_zx_object_set_property (vmo, ZX_PROP_NAME, mem_type,
309
362
internal_strlen (mem_type));
310
363
311
- // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that?
312
-
313
364
// Map a larger size to get a chunk of address space big enough that
314
365
// it surely contains an aligned region of the requested size. Then
315
366
// overwrite the aligned middle portion with a mapping from the
316
367
// beginning of the VMO, and unmap the excess before and after.
317
368
size_t map_size = size + alignment;
318
369
uintptr_t addr;
319
- status =
320
- _zx_vmar_map (_zx_vmar_root_self (), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 0 ,
321
- vmo, 0 , map_size, &addr);
370
+ status = TryVmoMapSanitizerVmar (ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
371
+ /* vmar_offset=*/ 0 , vmo, map_size, &addr);
322
372
if (status == ZX_OK) {
323
373
uintptr_t map_addr = addr;
324
374
uintptr_t map_end = map_addr + map_size;
325
375
addr = RoundUpTo (map_addr, alignment);
326
376
uintptr_t end = addr + size;
327
377
if (addr != map_addr) {
328
378
zx_info_vmar_t info;
329
- status = _zx_object_get_info (_zx_vmar_root_self () , ZX_INFO_VMAR, &info,
379
+ status = _zx_object_get_info (gSanitizerHeapVmar , ZX_INFO_VMAR, &info,
330
380
sizeof (info), NULL , NULL );
331
381
if (status == ZX_OK) {
332
382
uintptr_t new_addr;
333
- status = _zx_vmar_map (
334
- _zx_vmar_root_self (),
383
+ status = TryVmoMapSanitizerVmar (
335
384
ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC_OVERWRITE,
336
- addr - info.base , vmo, 0 , size, &new_addr);
385
+ addr - info.base , vmo, size, &new_addr);
337
386
if (status == ZX_OK)
338
387
CHECK_EQ (new_addr, addr);
339
388
}
@@ -357,7 +406,7 @@ void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
357
406
}
358
407
359
408
void UnmapOrDie (void *addr, uptr size) {
360
- UnmapOrDieVmar (addr, size, _zx_vmar_root_self () );
409
+ UnmapOrDieVmar (addr, size, gSanitizerHeapVmar );
361
410
}
362
411
363
412
void ReleaseMemoryPagesToOS (uptr beg, uptr end) {
0 commit comments