@@ -44,24 +44,111 @@ const char *GetEnv(const char *name) { return nullptr; }
44
44
45
45
uptr GetPageSize () { return PAGESIZE; }
46
46
47
+ class EmulatedMmap {
48
+
49
+ struct MmapEntry {
50
+ void *addr;
51
+ void *addr_to_free;
52
+ uptr size;
53
+ const char *mem_type;
54
+ MmapEntry *next;
55
+ } __attribute__((aligned(1 )));
56
+
57
+ SpinMutex head_lock_;
58
+ MmapEntry *head_ SANITIZER_GUARDED_BY (head_lock_);
59
+
60
+ public:
61
+ EmulatedMmap () : head_lock_(), head_(nullptr ) {}
62
+
63
+ bool Unmap (void *addr, uptr size) {
64
+ SpinMutexLock l (&head_lock_);
65
+ MmapEntry *entry = head_;
66
+ MmapEntry *prev = nullptr ;
67
+ while (entry) {
68
+ if (entry->addr == addr) {
69
+ // Remove the entry from the linked list
70
+ if (prev) {
71
+ prev->next = entry->next ;
72
+ } else {
73
+ head_ = entry->next ;
74
+ }
75
+ // Free the memory allocated for the entry
76
+ __libc_free (entry->addr_to_free );
77
+ return true ;
78
+ }
79
+ prev = entry;
80
+ entry = entry->next ;
81
+ }
82
+ return false ;
83
+ }
84
+
85
+ void *Mmap (uptr size, const char *mem_type, uptr alignment = GetPageSize()) {
86
+ uptr mmap_size = size + alignment;
87
+ uptr malloc_size = mmap_size + sizeof (MmapEntry);
88
+ void *ptr = __libc_malloc (malloc_size);
89
+ if (!ptr) {
90
+ return nullptr ;
91
+ }
92
+ MmapEntry *entry = (MmapEntry *)(((uptr)ptr) + mmap_size);
93
+ entry->addr = (void *)RoundUpTo ((uptr)ptr, alignment);
94
+ entry->addr_to_free = ptr;
95
+ entry->size = size;
96
+ entry->mem_type = mem_type;
97
+ {
98
+ // Add the entry to the linked list
99
+ SpinMutexLock l (&head_lock_);
100
+ entry->next = head_;
101
+ head_ = entry;
102
+ }
103
+ return entry->addr ;
104
+ }
105
+
106
+ void *MmapAligned (uptr size, const char *mem_type, uptr alignment) {
107
+ uptr mmap_size = size + alignment;
108
+ uptr malloc_size = mmap_size + sizeof (MmapEntry);
109
+ void *ptr = __libc_malloc (malloc_size);
110
+ if (!ptr) {
111
+ return nullptr ;
112
+ }
113
+ MmapEntry *entry = (MmapEntry *)(((uptr)ptr) + mmap_size);
114
+ entry->addr = (void *)RoundUpTo ((uptr)ptr, alignment);
115
+ entry->addr_to_free = ptr;
116
+ entry->size = size;
117
+ entry->mem_type = mem_type;
118
+ {
119
+ // Add the entry to the linked list
120
+ SpinMutexLock l (&head_lock_);
121
+ entry->next = head_;
122
+ head_ = entry;
123
+ }
124
+ return entry->addr ;
125
+ }
126
+ };
127
+
128
+ static EmulatedMmap emulated_mmap;
129
+
47
130
void *MmapOrDie (uptr size, const char *mem_type, bool raw_report) {
48
- size = RoundUpTo (size, GetPageSize ());
49
- void *ptr = __libc_malloc (size);
131
+ void *ptr = emulated_mmap.Mmap (size, mem_type);
50
132
if (!ptr) {
51
133
if (raw_report) {
52
- Report (" MmapOrDie: failed to allocate %zu bytes\n " , size);
134
+ Report (" MmapOrDie: failed to allocate %u bytes\n " , size);
53
135
}
54
136
Die ();
55
137
}
56
138
return ptr;
57
139
}
58
140
59
141
void UnmapOrDie (void *addr, uptr size, bool raw_report) {
60
- __libc_free (addr);
142
+ if (!emulated_mmap.Unmap (addr, size)) {
143
+ if (raw_report) {
144
+ Report (" UnmapOrDie: failed to unmap %u bytes at %p\n " , size, addr);
145
+ }
146
+ Die ();
147
+ }
61
148
}
62
149
63
150
void *MmapNoReserveOrDie (uptr size, const char *mem_type) {
64
- return MmapOrDie (size, mem_type, false );
151
+ return emulated_mmap. Mmap (size, mem_type);
65
152
}
66
153
67
154
void DumpProcessMap () {
@@ -97,33 +184,21 @@ void Symbolizer::LateInitialize() {
97
184
98
185
// Additional mandatory functions
99
186
void *MmapOrDieOnFatalError (uptr size, const char *mem_type) {
100
- size = RoundUpTo (size, GetPageSizeCached ()) + GetPageSizeCached ();
101
- void *ptr = __libc_malloc (size);
187
+ void *ptr = emulated_mmap.Mmap (size, mem_type);
102
188
if (!ptr) {
103
- Report (" MmapOrDieOnFatalError: failed to allocate %zu bytes\n " , size);
189
+ Report (" MmapOrDieOnFatalError: failed to allocate %u bytes\n " , size);
104
190
Die ();
105
191
}
106
192
IncreaseTotalMmap (size);
107
- ptr = (void *)RoundUpTo ((uptr)ptr, GetPageSizeCached ());
108
- return (void *)ptr;
193
+ return ptr;
109
194
}
110
195
void *MmapAlignedOrDieOnFatalError (uptr size, uptr alignment, const char *mem_type) {
111
196
CHECK (IsPowerOfTwo (size));
112
197
CHECK (IsPowerOfTwo (alignment));
113
- uptr map_size = size + alignment;
114
- // mmap maps entire pages and rounds up map_size needs to be a an integral
115
- // number of pages.
116
- // We need to be aware of this size for calculating end and for unmapping
117
- // fragments before and after the alignment region.
118
- map_size = RoundUpTo (map_size, GetPageSizeCached ());
119
- uptr map_res = (uptr)MmapOrDieOnFatalError (map_size, mem_type);
198
+ uptr map_res = (uptr)emulated_mmap.MmapAligned (size, mem_type, alignment);
120
199
if (UNLIKELY (!map_res))
121
200
return nullptr ;
122
- uptr res = map_res;
123
- if (!IsAligned (res, alignment)) {
124
- res = (map_res + alignment - 1 ) & ~(alignment - 1 );
125
- }
126
- return (void *)res;
201
+ return (void *)map_res;
127
202
}
128
203
129
204
uptr ReadLongProcessName (char *buf, uptr buf_len) {
@@ -265,7 +340,7 @@ void BufferedStackTrace::UnwindSlow(uptr pc, void *context, u32 max_depth) {
265
340
}
266
341
267
342
268
- class WASISymbolizerTool : public SymbolizerTool {
343
+ class WASISymbolizerTool final : public SymbolizerTool {
269
344
public:
270
345
bool SymbolizePC (uptr addr, SymbolizedStack *stack) override ;
271
346
bool SymbolizeData (uptr addr, DataInfo *info) override {
0 commit comments