@@ -217,6 +217,8 @@ typedef zend_mm_bitset zend_mm_page_map[ZEND_MM_PAGE_MAP_LEN]; /* 64B */
217
217
218
218
#define ZEND_MM_BINS 30
219
219
220
+ #define ZEND_MM_RO_HEAP (heap ) ((zend_mm_ro_heap*)((char*)(heap) + REAL_PAGE_SIZE))
221
+
220
222
#if defined(_MSC_VER )
221
223
# if UINTPTR_MAX == UINT64_MAX
222
224
# define BSWAPPTR (u ) _byteswap_uint64(u)
@@ -263,6 +265,23 @@ typedef struct _zend_mm_huge_list zend_mm_huge_list;
263
265
264
266
static bool zend_mm_use_huge_pages = false;
265
267
268
+ #define ZEND_MM_DEFAULT_PAGE_SIZE 4096
269
+
270
+ static size_t zend_mm_get_page_size (void )
271
+ {
272
+ static size_t page_size = 0 ;
273
+
274
+ if (!page_size ) {
275
+ page_size = zend_get_page_size ();
276
+ if (!page_size || (page_size & (page_size - 1 ))) {
277
+ /* anyway, we have to return a valid result */
278
+ page_size = ZEND_MM_DEFAULT_PAGE_SIZE ;
279
+ }
280
+ }
281
+
282
+ return page_size ;
283
+ }
284
+
266
285
/*
267
286
* Memory is retrieved from OS by chunks of fixed size 2MB.
268
287
* Inside chunk it's managed by pages of fixed size 4096B.
@@ -306,7 +325,6 @@ struct _zend_mm_heap {
306
325
size_t size ; /* current memory usage */
307
326
size_t peak ; /* peak memory usage */
308
327
#endif
309
- uintptr_t shadow_key ; /* free slot shadow ptr xor key */
310
328
zend_mm_free_slot * free_slot [ZEND_MM_BINS ]; /* free lists for small sizes */
311
329
#if ZEND_MM_STAT || ZEND_MM_LIMIT
312
330
size_t real_size ; /* current size of allocated pages */
@@ -329,16 +347,25 @@ struct _zend_mm_heap {
329
347
double avg_chunks_count ; /* average number of chunks allocated per request */
330
348
int last_chunks_delete_boundary ; /* number of chunks after last deletion */
331
349
int last_chunks_delete_count ; /* number of deletion over the last boundary */
350
+ #if ZEND_MM_CUSTOM
351
+ HashTable * tracked_allocs ;
352
+ #endif
353
+ pid_t pid ;
354
+ zend_random_bytes_insecure_state rand_state ;
355
+ };
356
+
357
+ /* This contains security-sensitive data, and is thus mapped as read-only at run-time right after the _zend_mm_heap struct
358
+ * and accessed via the ZEND_MM_RO_HEAP macro.*/
359
+ struct _zend_mm_ro_heap {
360
+ uintptr_t shadow_key ; /* free slot shadow ptr xor key */
361
+
332
362
#if ZEND_MM_CUSTOM
333
363
struct {
334
364
void * (* _malloc )(size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC );
335
365
void (* _free )(void * ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC );
336
366
void * (* _realloc )(void * , size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC );
337
367
} custom_heap ;
338
- HashTable * tracked_allocs ;
339
368
#endif
340
- pid_t pid ;
341
- zend_random_bytes_insecure_state rand_state ;
342
369
};
343
370
344
371
struct _zend_mm_chunk {
@@ -349,7 +376,6 @@ struct _zend_mm_chunk {
349
376
uint32_t free_tail ; /* number of free pages at the end of chunk */
350
377
uint32_t num ;
351
378
char reserve [64 - (sizeof (void * ) * 3 + sizeof (uint32_t ) * 3 )];
352
- zend_mm_heap heap_slot ; /* used only in main chunk */
353
379
zend_mm_page_map free_map ; /* 512 bits or 64 bytes */
354
380
zend_mm_page_info map [ZEND_MM_PAGES ]; /* 2 KB = 512 * 4 */
355
381
};
@@ -1331,18 +1357,18 @@ static zend_always_inline int zend_mm_small_size_to_bin(size_t size)
1331
1357
static zend_always_inline zend_mm_free_slot * zend_mm_encode_free_slot (const zend_mm_heap * heap , const zend_mm_free_slot * slot )
1332
1358
{
1333
1359
#ifdef WORDS_BIGENDIAN
1334
- return (zend_mm_free_slot * )(((uintptr_t )slot ) ^ heap -> shadow_key );
1360
+ return (zend_mm_free_slot * )(((uintptr_t )slot ) ^ ZEND_MM_RO_HEAP ( heap ) -> shadow_key );
1335
1361
#else
1336
- return (zend_mm_free_slot * )(BSWAPPTR ((uintptr_t )slot ) ^ heap -> shadow_key );
1362
+ return (zend_mm_free_slot * )(BSWAPPTR ((uintptr_t )slot ) ^ ZEND_MM_RO_HEAP ( heap ) -> shadow_key );
1337
1363
#endif
1338
1364
}
1339
1365
1340
1366
static zend_always_inline zend_mm_free_slot * zend_mm_decode_free_slot (zend_mm_heap * heap , zend_mm_free_slot * slot )
1341
1367
{
1342
1368
#ifdef WORDS_BIGENDIAN
1343
- return (zend_mm_free_slot * )((uintptr_t )slot ^ heap -> shadow_key );
1369
+ return (zend_mm_free_slot * )((uintptr_t )slot ^ ZEND_MM_RO_HEAP ( heap ) -> shadow_key );
1344
1370
#else
1345
- return (zend_mm_free_slot * )(BSWAPPTR ((uintptr_t )slot ^ heap -> shadow_key ));
1371
+ return (zend_mm_free_slot * )(BSWAPPTR ((uintptr_t )slot ^ ZEND_MM_RO_HEAP ( heap ) -> shadow_key ));
1346
1372
#endif
1347
1373
}
1348
1374
@@ -2045,7 +2071,7 @@ static void zend_mm_free_huge(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZE
2045
2071
2046
2072
static void zend_mm_refresh_key (zend_mm_heap * heap )
2047
2073
{
2048
- zend_random_bytes_insecure (& heap -> rand_state , & heap -> shadow_key , sizeof (heap -> shadow_key ));
2074
+ zend_random_bytes_insecure (& heap -> rand_state , & ( ZEND_MM_RO_HEAP ( heap ) -> shadow_key ) , sizeof (ZEND_MM_RO_HEAP ( heap ) -> shadow_key ));
2049
2075
}
2050
2076
2051
2077
static void zend_mm_init_key (zend_mm_heap * heap )
@@ -2056,16 +2082,15 @@ static void zend_mm_init_key(zend_mm_heap *heap)
2056
2082
2057
2083
static zend_mm_heap * zend_mm_init (void )
2058
2084
{
2085
+ zend_mm_heap * heap = (zend_mm_heap * )zend_mm_chunk_alloc_int (zend_mm_get_page_size () * 2 , zend_mm_get_page_size ());
2059
2086
zend_mm_chunk * chunk = (zend_mm_chunk * )zend_mm_chunk_alloc_int (ZEND_MM_CHUNK_SIZE , ZEND_MM_CHUNK_SIZE );
2060
- zend_mm_heap * heap ;
2061
2087
2062
2088
if (UNEXPECTED (chunk == NULL )) {
2063
2089
#if ZEND_MM_ERROR
2064
2090
fprintf (stderr , "Can't initialize heap\n" );
2065
2091
#endif
2066
2092
return NULL ;
2067
2093
}
2068
- heap = & chunk -> heap_slot ;
2069
2094
chunk -> heap = heap ;
2070
2095
chunk -> next = chunk ;
2071
2096
chunk -> prev = chunk ;
@@ -2103,6 +2128,9 @@ static zend_mm_heap *zend_mm_init(void)
2103
2128
#endif
2104
2129
heap -> huge_list = NULL ;
2105
2130
heap -> pid = getpid ();
2131
+
2132
+ mprotect (ZEND_MM_RO_HEAP (heap ), zend_mm_get_page_size (), PROT_READ );
2133
+
2106
2134
return heap ;
2107
2135
}
2108
2136
@@ -2431,7 +2459,7 @@ void zend_mm_shutdown(zend_mm_heap *heap, bool full, bool silent)
2431
2459
2432
2460
#if ZEND_MM_CUSTOM
2433
2461
if (heap -> use_custom_heap ) {
2434
- if (heap -> custom_heap ._malloc == tracked_malloc ) {
2462
+ if (ZEND_MM_RO_HEAP ( heap ) -> custom_heap ._malloc == tracked_malloc ) {
2435
2463
if (silent ) {
2436
2464
tracked_free_all ();
2437
2465
}
@@ -2440,13 +2468,15 @@ void zend_mm_shutdown(zend_mm_heap *heap, bool full, bool silent)
2440
2468
zend_hash_destroy (heap -> tracked_allocs );
2441
2469
free (heap -> tracked_allocs );
2442
2470
/* Make sure the heap free below does not use tracked_free(). */
2443
- heap -> custom_heap ._free = __zend_free ;
2471
+ mprotect (ZEND_MM_RO_HEAP (heap ), zend_mm_get_page_size (), PROT_WRITE );
2472
+ ZEND_MM_RO_HEAP (heap )-> custom_heap ._free = __zend_free ;
2473
+ mprotect (ZEND_MM_RO_HEAP (heap ), zend_mm_get_page_size (), PROT_READ );
2444
2474
}
2445
2475
heap -> size = 0 ;
2446
2476
}
2447
2477
2448
2478
if (full ) {
2449
- heap -> custom_heap ._free (heap ZEND_FILE_LINE_CC ZEND_FILE_LINE_EMPTY_CC );
2479
+ ZEND_MM_RO_HEAP ( heap ) -> custom_heap ._free (heap ZEND_FILE_LINE_CC ZEND_FILE_LINE_EMPTY_CC );
2450
2480
}
2451
2481
return ;
2452
2482
}
@@ -2511,7 +2541,7 @@ void zend_mm_shutdown(zend_mm_heap *heap, bool full, bool silent)
2511
2541
2512
2542
/* reinitialize the first chunk and heap */
2513
2543
p = heap -> main_chunk ;
2514
- p -> heap = & p -> heap_slot ;
2544
+ // p->heap = &p->heap_slot;
2515
2545
p -> next = p ;
2516
2546
p -> prev = p ;
2517
2547
p -> free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE ;
@@ -2575,7 +2605,7 @@ ZEND_API size_t ZEND_FASTCALL _zend_mm_block_size(zend_mm_heap *heap, void *ptr
2575
2605
{
2576
2606
#if ZEND_MM_CUSTOM
2577
2607
if (UNEXPECTED (heap -> use_custom_heap )) {
2578
- if (heap -> custom_heap ._malloc == tracked_malloc ) {
2608
+ if (ZEND_MM_RO_HEAP ( heap ) -> custom_heap ._malloc == tracked_malloc ) {
2579
2609
zend_ulong h = ((uintptr_t ) ptr ) >> ZEND_MM_ALIGNMENT_LOG2 ;
2580
2610
zval * size_zv = zend_hash_index_find (heap -> tracked_allocs , h );
2581
2611
if (size_zv ) {
@@ -2618,7 +2648,7 @@ ZEND_API bool is_zend_ptr(const void *ptr)
2618
2648
{
2619
2649
#if ZEND_MM_CUSTOM
2620
2650
if (AG (mm_heap )-> use_custom_heap ) {
2621
- if (AG (mm_heap )-> custom_heap ._malloc == tracked_malloc ) {
2651
+ if (ZEND_MM_RO_HEAP ( AG (mm_heap ) )-> custom_heap ._malloc == tracked_malloc ) {
2622
2652
zend_ulong h = ((uintptr_t ) ptr ) >> ZEND_MM_ALIGNMENT_LOG2 ;
2623
2653
zval * size_zv = zend_hash_index_find (AG (mm_heap )-> tracked_allocs , h );
2624
2654
if (size_zv ) {
@@ -2661,12 +2691,12 @@ ZEND_API bool is_zend_ptr(const void *ptr)
2661
2691
#if ZEND_MM_CUSTOM
2662
2692
# define ZEND_MM_CUSTOM_ALLOCATOR (size ) do { \
2663
2693
if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) { \
2664
- return AG(mm_heap)->custom_heap._malloc(size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
2694
+ return ZEND_MM_RO_HEAP( AG(mm_heap) )->custom_heap._malloc(size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
2665
2695
} \
2666
2696
} while (0)
2667
2697
# define ZEND_MM_CUSTOM_DEALLOCATOR (ptr ) do { \
2668
2698
if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) { \
2669
- AG(mm_heap)->custom_heap._free(ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
2699
+ ZEND_MM_RO_HEAP( AG(mm_heap) )->custom_heap._free(ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
2670
2700
return; \
2671
2701
} \
2672
2702
} while (0)
@@ -2762,7 +2792,7 @@ ZEND_API void* ZEND_FASTCALL _emalloc(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LI
2762
2792
{
2763
2793
#if ZEND_MM_CUSTOM
2764
2794
if (UNEXPECTED (AG (mm_heap )-> use_custom_heap )) {
2765
- return AG (mm_heap )-> custom_heap ._malloc (size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ); \
2795
+ return ZEND_MM_RO_HEAP ( AG (mm_heap ) )-> custom_heap ._malloc (size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ); \
2766
2796
}
2767
2797
#endif
2768
2798
return zend_mm_alloc_heap (AG (mm_heap ), size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC );
@@ -2772,7 +2802,7 @@ ZEND_API void ZEND_FASTCALL _efree(void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_OR
2772
2802
{
2773
2803
#if ZEND_MM_CUSTOM
2774
2804
if (UNEXPECTED (AG (mm_heap )-> use_custom_heap )) {
2775
- AG (mm_heap )-> custom_heap ._free (ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC );
2805
+ ZEND_MM_RO_HEAP ( AG (mm_heap ) )-> custom_heap ._free (ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC );
2776
2806
return ;
2777
2807
}
2778
2808
#endif
@@ -2783,7 +2813,7 @@ ZEND_API void* ZEND_FASTCALL _erealloc(void *ptr, size_t size ZEND_FILE_LINE_DC
2783
2813
{
2784
2814
#if ZEND_MM_CUSTOM
2785
2815
if (UNEXPECTED (AG (mm_heap )-> use_custom_heap )) {
2786
- return AG (mm_heap )-> custom_heap ._realloc (ptr , size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC );
2816
+ return ZEND_MM_RO_HEAP ( AG (mm_heap ) )-> custom_heap ._realloc (ptr , size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC );
2787
2817
}
2788
2818
#endif
2789
2819
return zend_mm_realloc_heap (AG (mm_heap ), ptr , size , 0 , size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC );
@@ -2793,7 +2823,7 @@ ZEND_API void* ZEND_FASTCALL _erealloc2(void *ptr, size_t size, size_t copy_size
2793
2823
{
2794
2824
#if ZEND_MM_CUSTOM
2795
2825
if (UNEXPECTED (AG (mm_heap )-> use_custom_heap )) {
2796
- return AG (mm_heap )-> custom_heap ._realloc (ptr , size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC );
2826
+ return ZEND_MM_RO_HEAP ( AG (mm_heap ) )-> custom_heap ._realloc (ptr , size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC );
2797
2827
}
2798
2828
#endif
2799
2829
return zend_mm_realloc_heap (AG (mm_heap ), ptr , size , 1 , copy_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC );
@@ -3057,25 +3087,28 @@ static void alloc_globals_ctor(zend_alloc_globals *alloc_globals)
3057
3087
tmp = getenv ("USE_ZEND_ALLOC" );
3058
3088
if (tmp && !ZEND_ATOL (tmp )) {
3059
3089
bool tracked = (tmp = getenv ("USE_TRACKED_ALLOC" )) && ZEND_ATOL (tmp );
3060
- zend_mm_heap * mm_heap = alloc_globals -> mm_heap = malloc (sizeof (zend_mm_heap ));
3090
+ //TODO(jvoisin) fix this
3091
+ zend_mm_heap * mm_heap = alloc_globals -> mm_heap = malloc (zend_mm_get_page_size () * 2 );
3061
3092
memset (mm_heap , 0 , sizeof (zend_mm_heap ));
3062
3093
mm_heap -> use_custom_heap = ZEND_MM_CUSTOM_HEAP_STD ;
3063
3094
mm_heap -> limit = (size_t )Z_L (-1 ) >> 1 ;
3064
3095
mm_heap -> overflow = 0 ;
3065
3096
3097
+ mprotect (ZEND_MM_RO_HEAP (mm_heap ), zend_mm_get_page_size (), PROT_WRITE );
3066
3098
if (!tracked ) {
3067
3099
/* Use system allocator. */
3068
- mm_heap -> custom_heap ._malloc = __zend_malloc ;
3069
- mm_heap -> custom_heap ._free = __zend_free ;
3070
- mm_heap -> custom_heap ._realloc = __zend_realloc ;
3100
+ ZEND_MM_RO_HEAP ( mm_heap ) -> custom_heap ._malloc = __zend_malloc ;
3101
+ ZEND_MM_RO_HEAP ( mm_heap ) -> custom_heap ._free = __zend_free ;
3102
+ ZEND_MM_RO_HEAP ( mm_heap ) -> custom_heap ._realloc = __zend_realloc ;
3071
3103
} else {
3072
3104
/* Use system allocator and track allocations for auto-free. */
3073
- mm_heap -> custom_heap ._malloc = tracked_malloc ;
3074
- mm_heap -> custom_heap ._free = tracked_free ;
3075
- mm_heap -> custom_heap ._realloc = tracked_realloc ;
3105
+ ZEND_MM_RO_HEAP ( mm_heap ) -> custom_heap ._malloc = tracked_malloc ;
3106
+ ZEND_MM_RO_HEAP ( mm_heap ) -> custom_heap ._free = tracked_free ;
3107
+ ZEND_MM_RO_HEAP ( mm_heap ) -> custom_heap ._realloc = tracked_realloc ;
3076
3108
mm_heap -> tracked_allocs = malloc (sizeof (HashTable ));
3077
3109
zend_hash_init (mm_heap -> tracked_allocs , 1024 , NULL , NULL , 1 );
3078
3110
}
3111
+ mprotect (ZEND_MM_RO_HEAP (mm_heap ), zend_mm_get_page_size (), PROT_READ );
3079
3112
return ;
3080
3113
}
3081
3114
#endif
@@ -3145,9 +3178,11 @@ ZEND_API void zend_mm_set_custom_handlers(zend_mm_heap *heap,
3145
3178
_heap -> use_custom_heap = ZEND_MM_CUSTOM_HEAP_NONE ;
3146
3179
} else {
3147
3180
_heap -> use_custom_heap = ZEND_MM_CUSTOM_HEAP_STD ;
3148
- _heap -> custom_heap ._malloc = _malloc ;
3149
- _heap -> custom_heap ._free = _free ;
3150
- _heap -> custom_heap ._realloc = _realloc ;
3181
+ mprotect (ZEND_MM_RO_HEAP (_heap ), zend_mm_get_page_size (), PROT_WRITE );
3182
+ ZEND_MM_RO_HEAP (_heap )-> custom_heap ._malloc = _malloc ;
3183
+ ZEND_MM_RO_HEAP (_heap )-> custom_heap ._free = _free ;
3184
+ ZEND_MM_RO_HEAP (_heap )-> custom_heap ._realloc = _realloc ;
3185
+ mprotect (ZEND_MM_RO_HEAP (_heap ), zend_mm_get_page_size (), PROT_READ );
3151
3186
}
3152
3187
#endif
3153
3188
}
@@ -3161,9 +3196,9 @@ ZEND_API void zend_mm_get_custom_handlers(zend_mm_heap *heap,
3161
3196
zend_mm_heap * _heap = (zend_mm_heap * )heap ;
3162
3197
3163
3198
if (heap -> use_custom_heap ) {
3164
- * _malloc = _heap -> custom_heap ._malloc ;
3165
- * _free = _heap -> custom_heap ._free ;
3166
- * _realloc = _heap -> custom_heap ._realloc ;
3199
+ * _malloc = ZEND_MM_RO_HEAP ( _heap ) -> custom_heap ._malloc ;
3200
+ * _free = ZEND_MM_RO_HEAP ( _heap ) -> custom_heap ._free ;
3201
+ * _realloc = ZEND_MM_RO_HEAP ( _heap ) -> custom_heap ._realloc ;
3167
3202
} else {
3168
3203
* _malloc = NULL ;
3169
3204
* _free = NULL ;
@@ -3195,7 +3230,7 @@ ZEND_API zend_mm_heap *zend_mm_startup_ex(const zend_mm_handlers *handlers, void
3195
3230
#if ZEND_MM_STORAGE
3196
3231
zend_mm_storage tmp_storage , * storage ;
3197
3232
zend_mm_chunk * chunk ;
3198
- zend_mm_heap * heap ;
3233
+ zend_mm_heap * heap = ( zend_mm_heap * ) zend_mm_chunk_alloc_int ( REAL_PAGE_SIZE * 2 , REAL_PAGE_SIZE ) ;
3199
3234
3200
3235
memcpy ((zend_mm_handlers * )& tmp_storage .handlers , handlers , sizeof (zend_mm_handlers ));
3201
3236
tmp_storage .data = data ;
@@ -3206,7 +3241,6 @@ ZEND_API zend_mm_heap *zend_mm_startup_ex(const zend_mm_handlers *handlers, void
3206
3241
#endif
3207
3242
return NULL ;
3208
3243
}
3209
- heap = & chunk -> heap_slot ;
3210
3244
chunk -> heap = heap ;
3211
3245
chunk -> next = chunk ;
3212
3246
chunk -> prev = chunk ;
0 commit comments