@@ -217,6 +217,8 @@ typedef zend_mm_bitset zend_mm_page_map[ZEND_MM_PAGE_MAP_LEN]; /* 64B */
217
217
218
218
#define ZEND_MM_BINS 30
219
219
220
+ #define ZEND_MM_RO_HEAP (heap ) ((zend_mm_ro_heap*)((char*)(heap) + REAL_PAGE_SIZE))
221
+
220
222
#if defined(_MSC_VER )
221
223
# if UINTPTR_MAX == UINT64_MAX
222
224
# define BSWAPPTR (u ) _byteswap_uint64(u)
@@ -263,6 +265,23 @@ typedef struct _zend_mm_huge_list zend_mm_huge_list;
263
265
264
266
static bool zend_mm_use_huge_pages = false;
265
267
268
+ #define ZEND_MM_DEFAULT_PAGE_SIZE 4096
269
+
270
+ static size_t zend_mm_get_page_size (void )
271
+ {
272
+ static size_t page_size = 0 ;
273
+
274
+ if (!page_size ) {
275
+ page_size = zend_get_page_size ();
276
+ if (!page_size || (page_size & (page_size - 1 ))) {
277
+ /* anyway, we have to return a valid result */
278
+ page_size = ZEND_MM_DEFAULT_PAGE_SIZE ;
279
+ }
280
+ }
281
+
282
+ return page_size ;
283
+ }
284
+
266
285
/*
267
286
* Memory is retrieved from OS by chunks of fixed size 2MB.
268
287
* Inside chunk it's managed by pages of fixed size 4096B.
@@ -306,7 +325,6 @@ struct _zend_mm_heap {
306
325
size_t size ; /* current memory usage */
307
326
size_t peak ; /* peak memory usage */
308
327
#endif
309
- uintptr_t shadow_key ; /* free slot shadow ptr xor key */
310
328
zend_mm_free_slot * free_slot [ZEND_MM_BINS ]; /* free lists for small sizes */
311
329
#if ZEND_MM_STAT || ZEND_MM_LIMIT
312
330
size_t real_size ; /* current size of allocated pages */
@@ -329,16 +347,25 @@ struct _zend_mm_heap {
329
347
double avg_chunks_count ; /* average number of chunks allocated per request */
330
348
int last_chunks_delete_boundary ; /* number of chunks after last deletion */
331
349
int last_chunks_delete_count ; /* number of deletion over the last boundary */
350
+ #if ZEND_MM_CUSTOM
351
+ HashTable * tracked_allocs ;
352
+ #endif
353
+ pid_t pid ;
354
+ zend_random_bytes_insecure_state rand_state ;
355
+ };
356
+
357
+ /* This contains security-sensitive data, and is thus mapped as read-only at run-time right after the _zend_mm_heap struct
358
+ * and accessed via the ZEND_MM_RO_HEAP macro.*/
359
+ struct _zend_mm_ro_heap {
360
+ uintptr_t shadow_key ; /* free slot shadow ptr xor key */
361
+
332
362
#if ZEND_MM_CUSTOM
333
363
struct {
334
364
void * (* _malloc )(size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC );
335
365
void (* _free )(void * ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC );
336
366
void * (* _realloc )(void * , size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC );
337
367
} custom_heap ;
338
- HashTable * tracked_allocs ;
339
368
#endif
340
- pid_t pid ;
341
- zend_random_bytes_insecure_state rand_state ;
342
369
};
343
370
344
371
struct _zend_mm_chunk {
@@ -349,7 +376,6 @@ struct _zend_mm_chunk {
349
376
uint32_t free_tail ; /* number of free pages at the end of chunk */
350
377
uint32_t num ;
351
378
char reserve [64 - (sizeof (void * ) * 3 + sizeof (uint32_t ) * 3 )];
352
- zend_mm_heap heap_slot ; /* used only in main chunk */
353
379
zend_mm_page_map free_map ; /* 512 bits or 64 bytes */
354
380
zend_mm_page_info map [ZEND_MM_PAGES ]; /* 2 KB = 512 * 4 */
355
381
};
@@ -1331,18 +1357,18 @@ static zend_always_inline int zend_mm_small_size_to_bin(size_t size)
1331
1357
static zend_always_inline zend_mm_free_slot * zend_mm_encode_free_slot (const zend_mm_heap * heap , const zend_mm_free_slot * slot )
1332
1358
{
1333
1359
#ifdef WORDS_BIGENDIAN
1334
- return (zend_mm_free_slot * )(((uintptr_t )slot ) ^ heap -> shadow_key );
1360
+ return (zend_mm_free_slot * )(((uintptr_t )slot ) ^ ZEND_MM_RO_HEAP ( heap ) -> shadow_key );
1335
1361
#else
1336
- return (zend_mm_free_slot * )(BSWAPPTR ((uintptr_t )slot ) ^ heap -> shadow_key );
1362
+ return (zend_mm_free_slot * )(BSWAPPTR ((uintptr_t )slot ) ^ ZEND_MM_RO_HEAP ( heap ) -> shadow_key );
1337
1363
#endif
1338
1364
}
1339
1365
1340
1366
static zend_always_inline zend_mm_free_slot * zend_mm_decode_free_slot (zend_mm_heap * heap , zend_mm_free_slot * slot )
1341
1367
{
1342
1368
#ifdef WORDS_BIGENDIAN
1343
- return (zend_mm_free_slot * )((uintptr_t )slot ^ heap -> shadow_key );
1369
+ return (zend_mm_free_slot * )((uintptr_t )slot ^ ZEND_MM_RO_HEAP ( heap ) -> shadow_key );
1344
1370
#else
1345
- return (zend_mm_free_slot * )(BSWAPPTR ((uintptr_t )slot ^ heap -> shadow_key ));
1371
+ return (zend_mm_free_slot * )(BSWAPPTR ((uintptr_t )slot ^ ZEND_MM_RO_HEAP ( heap ) -> shadow_key ));
1346
1372
#endif
1347
1373
}
1348
1374
@@ -2045,7 +2071,9 @@ static void zend_mm_free_huge(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZE
2045
2071
2046
2072
static void zend_mm_refresh_key (zend_mm_heap * heap )
2047
2073
{
2048
- zend_random_bytes_insecure (& heap -> rand_state , & heap -> shadow_key , sizeof (heap -> shadow_key ));
2074
+ mprotect (ZEND_MM_RO_HEAP (heap ), zend_mm_get_page_size (), PROT_WRITE );
2075
+ zend_random_bytes_insecure (& heap -> rand_state , & (ZEND_MM_RO_HEAP (heap )-> shadow_key ), sizeof (ZEND_MM_RO_HEAP (heap )-> shadow_key ));
2076
+ mprotect (ZEND_MM_RO_HEAP (heap ), zend_mm_get_page_size (), PROT_READ );
2049
2077
}
2050
2078
2051
2079
static void zend_mm_init_key (zend_mm_heap * heap )
@@ -2056,16 +2084,15 @@ static void zend_mm_init_key(zend_mm_heap *heap)
2056
2084
2057
2085
static zend_mm_heap * zend_mm_init (void )
2058
2086
{
2087
+ zend_mm_heap * heap = (zend_mm_heap * )zend_mm_chunk_alloc_int (zend_mm_get_page_size () * 2 , zend_mm_get_page_size ());
2059
2088
zend_mm_chunk * chunk = (zend_mm_chunk * )zend_mm_chunk_alloc_int (ZEND_MM_CHUNK_SIZE , ZEND_MM_CHUNK_SIZE );
2060
- zend_mm_heap * heap ;
2061
2089
2062
2090
if (UNEXPECTED (chunk == NULL )) {
2063
2091
#if ZEND_MM_ERROR
2064
2092
fprintf (stderr , "Can't initialize heap\n" );
2065
2093
#endif
2066
2094
return NULL ;
2067
2095
}
2068
- heap = & chunk -> heap_slot ;
2069
2096
chunk -> heap = heap ;
2070
2097
chunk -> next = chunk ;
2071
2098
chunk -> prev = chunk ;
@@ -2103,6 +2130,9 @@ static zend_mm_heap *zend_mm_init(void)
2103
2130
#endif
2104
2131
heap -> huge_list = NULL ;
2105
2132
heap -> pid = getpid ();
2133
+
2134
+ mprotect (ZEND_MM_RO_HEAP (heap ), zend_mm_get_page_size (), PROT_READ );
2135
+
2106
2136
return heap ;
2107
2137
}
2108
2138
@@ -2431,7 +2461,7 @@ void zend_mm_shutdown(zend_mm_heap *heap, bool full, bool silent)
2431
2461
2432
2462
#if ZEND_MM_CUSTOM
2433
2463
if (heap -> use_custom_heap ) {
2434
- if (heap -> custom_heap ._malloc == tracked_malloc ) {
2464
+ if (ZEND_MM_RO_HEAP ( heap ) -> custom_heap ._malloc == tracked_malloc ) {
2435
2465
if (silent ) {
2436
2466
tracked_free_all ();
2437
2467
}
@@ -2440,13 +2470,15 @@ void zend_mm_shutdown(zend_mm_heap *heap, bool full, bool silent)
2440
2470
zend_hash_destroy (heap -> tracked_allocs );
2441
2471
free (heap -> tracked_allocs );
2442
2472
/* Make sure the heap free below does not use tracked_free(). */
2443
- heap -> custom_heap ._free = __zend_free ;
2473
+ mprotect (ZEND_MM_RO_HEAP (heap ), zend_mm_get_page_size (), PROT_WRITE );
2474
+ ZEND_MM_RO_HEAP (heap )-> custom_heap ._free = __zend_free ;
2475
+ mprotect (ZEND_MM_RO_HEAP (heap ), zend_mm_get_page_size (), PROT_READ );
2444
2476
}
2445
2477
heap -> size = 0 ;
2446
2478
}
2447
2479
2448
2480
if (full ) {
2449
- heap -> custom_heap ._free (heap ZEND_FILE_LINE_CC ZEND_FILE_LINE_EMPTY_CC );
2481
+ ZEND_MM_RO_HEAP ( heap ) -> custom_heap ._free (heap ZEND_FILE_LINE_CC ZEND_FILE_LINE_EMPTY_CC );
2450
2482
}
2451
2483
return ;
2452
2484
}
@@ -2511,7 +2543,7 @@ void zend_mm_shutdown(zend_mm_heap *heap, bool full, bool silent)
2511
2543
2512
2544
/* reinitialize the first chunk and heap */
2513
2545
p = heap -> main_chunk ;
2514
- p -> heap = & p -> heap_slot ;
2546
+ // p->heap = &p->heap_slot;
2515
2547
p -> next = p ;
2516
2548
p -> prev = p ;
2517
2549
p -> free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE ;
@@ -2575,7 +2607,7 @@ ZEND_API size_t ZEND_FASTCALL _zend_mm_block_size(zend_mm_heap *heap, void *ptr
2575
2607
{
2576
2608
#if ZEND_MM_CUSTOM
2577
2609
if (UNEXPECTED (heap -> use_custom_heap )) {
2578
- if (heap -> custom_heap ._malloc == tracked_malloc ) {
2610
+ if (ZEND_MM_RO_HEAP ( heap ) -> custom_heap ._malloc == tracked_malloc ) {
2579
2611
zend_ulong h = ((uintptr_t ) ptr ) >> ZEND_MM_ALIGNMENT_LOG2 ;
2580
2612
zval * size_zv = zend_hash_index_find (heap -> tracked_allocs , h );
2581
2613
if (size_zv ) {
@@ -2618,7 +2650,7 @@ ZEND_API bool is_zend_ptr(const void *ptr)
2618
2650
{
2619
2651
#if ZEND_MM_CUSTOM
2620
2652
if (AG (mm_heap )-> use_custom_heap ) {
2621
- if (AG (mm_heap )-> custom_heap ._malloc == tracked_malloc ) {
2653
+ if (ZEND_MM_RO_HEAP ( AG (mm_heap ) )-> custom_heap ._malloc == tracked_malloc ) {
2622
2654
zend_ulong h = ((uintptr_t ) ptr ) >> ZEND_MM_ALIGNMENT_LOG2 ;
2623
2655
zval * size_zv = zend_hash_index_find (AG (mm_heap )-> tracked_allocs , h );
2624
2656
if (size_zv ) {
@@ -2661,12 +2693,12 @@ ZEND_API bool is_zend_ptr(const void *ptr)
2661
2693
#if ZEND_MM_CUSTOM
2662
2694
# define ZEND_MM_CUSTOM_ALLOCATOR (size ) do { \
2663
2695
if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) { \
2664
- return AG(mm_heap)->custom_heap._malloc(size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
2696
+ return ZEND_MM_RO_HEAP( AG(mm_heap) )->custom_heap._malloc(size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
2665
2697
} \
2666
2698
} while (0)
2667
2699
# define ZEND_MM_CUSTOM_DEALLOCATOR (ptr ) do { \
2668
2700
if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) { \
2669
- AG(mm_heap)->custom_heap._free(ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
2701
+ ZEND_MM_RO_HEAP( AG(mm_heap) )->custom_heap._free(ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
2670
2702
return; \
2671
2703
} \
2672
2704
} while (0)
@@ -2762,7 +2794,7 @@ ZEND_API void* ZEND_FASTCALL _emalloc(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LI
2762
2794
{
2763
2795
#if ZEND_MM_CUSTOM
2764
2796
if (UNEXPECTED (AG (mm_heap )-> use_custom_heap )) {
2765
- return AG (mm_heap )-> custom_heap ._malloc (size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ); \
2797
+ return ZEND_MM_RO_HEAP ( AG (mm_heap ) )-> custom_heap ._malloc (size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ); \
2766
2798
}
2767
2799
#endif
2768
2800
return zend_mm_alloc_heap (AG (mm_heap ), size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC );
@@ -2772,7 +2804,7 @@ ZEND_API void ZEND_FASTCALL _efree(void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_OR
2772
2804
{
2773
2805
#if ZEND_MM_CUSTOM
2774
2806
if (UNEXPECTED (AG (mm_heap )-> use_custom_heap )) {
2775
- AG (mm_heap )-> custom_heap ._free (ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC );
2807
+ ZEND_MM_RO_HEAP ( AG (mm_heap ) )-> custom_heap ._free (ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC );
2776
2808
return ;
2777
2809
}
2778
2810
#endif
@@ -2783,7 +2815,7 @@ ZEND_API void* ZEND_FASTCALL _erealloc(void *ptr, size_t size ZEND_FILE_LINE_DC
2783
2815
{
2784
2816
#if ZEND_MM_CUSTOM
2785
2817
if (UNEXPECTED (AG (mm_heap )-> use_custom_heap )) {
2786
- return AG (mm_heap )-> custom_heap ._realloc (ptr , size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC );
2818
+ return ZEND_MM_RO_HEAP ( AG (mm_heap ) )-> custom_heap ._realloc (ptr , size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC );
2787
2819
}
2788
2820
#endif
2789
2821
return zend_mm_realloc_heap (AG (mm_heap ), ptr , size , 0 , size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC );
@@ -2793,7 +2825,7 @@ ZEND_API void* ZEND_FASTCALL _erealloc2(void *ptr, size_t size, size_t copy_size
2793
2825
{
2794
2826
#if ZEND_MM_CUSTOM
2795
2827
if (UNEXPECTED (AG (mm_heap )-> use_custom_heap )) {
2796
- return AG (mm_heap )-> custom_heap ._realloc (ptr , size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC );
2828
+ return ZEND_MM_RO_HEAP ( AG (mm_heap ) )-> custom_heap ._realloc (ptr , size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC );
2797
2829
}
2798
2830
#endif
2799
2831
return zend_mm_realloc_heap (AG (mm_heap ), ptr , size , 1 , copy_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC );
@@ -3057,25 +3089,28 @@ static void alloc_globals_ctor(zend_alloc_globals *alloc_globals)
3057
3089
tmp = getenv ("USE_ZEND_ALLOC" );
3058
3090
if (tmp && !ZEND_ATOL (tmp )) {
3059
3091
bool tracked = (tmp = getenv ("USE_TRACKED_ALLOC" )) && ZEND_ATOL (tmp );
3060
- zend_mm_heap * mm_heap = alloc_globals -> mm_heap = malloc (sizeof (zend_mm_heap ));
3092
+ //TODO(jvoisin) fix this
3093
+ zend_mm_heap * mm_heap = alloc_globals -> mm_heap = malloc (zend_mm_get_page_size () * 2 );
3061
3094
memset (mm_heap , 0 , sizeof (zend_mm_heap ));
3062
3095
mm_heap -> use_custom_heap = ZEND_MM_CUSTOM_HEAP_STD ;
3063
3096
mm_heap -> limit = (size_t )Z_L (-1 ) >> 1 ;
3064
3097
mm_heap -> overflow = 0 ;
3065
3098
3099
+ mprotect (ZEND_MM_RO_HEAP (mm_heap ), zend_mm_get_page_size (), PROT_WRITE );
3066
3100
if (!tracked ) {
3067
3101
/* Use system allocator. */
3068
- mm_heap -> custom_heap ._malloc = __zend_malloc ;
3069
- mm_heap -> custom_heap ._free = __zend_free ;
3070
- mm_heap -> custom_heap ._realloc = __zend_realloc ;
3102
+ ZEND_MM_RO_HEAP ( mm_heap ) -> custom_heap ._malloc = __zend_malloc ;
3103
+ ZEND_MM_RO_HEAP ( mm_heap ) -> custom_heap ._free = __zend_free ;
3104
+ ZEND_MM_RO_HEAP ( mm_heap ) -> custom_heap ._realloc = __zend_realloc ;
3071
3105
} else {
3072
3106
/* Use system allocator and track allocations for auto-free. */
3073
- mm_heap -> custom_heap ._malloc = tracked_malloc ;
3074
- mm_heap -> custom_heap ._free = tracked_free ;
3075
- mm_heap -> custom_heap ._realloc = tracked_realloc ;
3107
+ ZEND_MM_RO_HEAP ( mm_heap ) -> custom_heap ._malloc = tracked_malloc ;
3108
+ ZEND_MM_RO_HEAP ( mm_heap ) -> custom_heap ._free = tracked_free ;
3109
+ ZEND_MM_RO_HEAP ( mm_heap ) -> custom_heap ._realloc = tracked_realloc ;
3076
3110
mm_heap -> tracked_allocs = malloc (sizeof (HashTable ));
3077
3111
zend_hash_init (mm_heap -> tracked_allocs , 1024 , NULL , NULL , 1 );
3078
3112
}
3113
+ mprotect (ZEND_MM_RO_HEAP (mm_heap ), zend_mm_get_page_size (), PROT_READ );
3079
3114
return ;
3080
3115
}
3081
3116
#endif
@@ -3145,9 +3180,11 @@ ZEND_API void zend_mm_set_custom_handlers(zend_mm_heap *heap,
3145
3180
_heap -> use_custom_heap = ZEND_MM_CUSTOM_HEAP_NONE ;
3146
3181
} else {
3147
3182
_heap -> use_custom_heap = ZEND_MM_CUSTOM_HEAP_STD ;
3148
- _heap -> custom_heap ._malloc = _malloc ;
3149
- _heap -> custom_heap ._free = _free ;
3150
- _heap -> custom_heap ._realloc = _realloc ;
3183
+ mprotect (ZEND_MM_RO_HEAP (_heap ), zend_mm_get_page_size (), PROT_WRITE );
3184
+ ZEND_MM_RO_HEAP (_heap )-> custom_heap ._malloc = _malloc ;
3185
+ ZEND_MM_RO_HEAP (_heap )-> custom_heap ._free = _free ;
3186
+ ZEND_MM_RO_HEAP (_heap )-> custom_heap ._realloc = _realloc ;
3187
+ mprotect (ZEND_MM_RO_HEAP (_heap ), zend_mm_get_page_size (), PROT_READ );
3151
3188
}
3152
3189
#endif
3153
3190
}
@@ -3161,9 +3198,9 @@ ZEND_API void zend_mm_get_custom_handlers(zend_mm_heap *heap,
3161
3198
zend_mm_heap * _heap = (zend_mm_heap * )heap ;
3162
3199
3163
3200
if (heap -> use_custom_heap ) {
3164
- * _malloc = _heap -> custom_heap ._malloc ;
3165
- * _free = _heap -> custom_heap ._free ;
3166
- * _realloc = _heap -> custom_heap ._realloc ;
3201
+ * _malloc = ZEND_MM_RO_HEAP ( _heap ) -> custom_heap ._malloc ;
3202
+ * _free = ZEND_MM_RO_HEAP ( _heap ) -> custom_heap ._free ;
3203
+ * _realloc = ZEND_MM_RO_HEAP ( _heap ) -> custom_heap ._realloc ;
3167
3204
} else {
3168
3205
* _malloc = NULL ;
3169
3206
* _free = NULL ;
@@ -3195,7 +3232,7 @@ ZEND_API zend_mm_heap *zend_mm_startup_ex(const zend_mm_handlers *handlers, void
3195
3232
#if ZEND_MM_STORAGE
3196
3233
zend_mm_storage tmp_storage , * storage ;
3197
3234
zend_mm_chunk * chunk ;
3198
- zend_mm_heap * heap ;
3235
+ zend_mm_heap * heap = ( zend_mm_heap * ) zend_mm_chunk_alloc_int ( REAL_PAGE_SIZE * 2 , REAL_PAGE_SIZE ) ;
3199
3236
3200
3237
memcpy ((zend_mm_handlers * )& tmp_storage .handlers , handlers , sizeof (zend_mm_handlers ));
3201
3238
tmp_storage .data = data ;
@@ -3206,7 +3243,6 @@ ZEND_API zend_mm_heap *zend_mm_startup_ex(const zend_mm_handlers *handlers, void
3206
3243
#endif
3207
3244
return NULL ;
3208
3245
}
3209
- heap = & chunk -> heap_slot ;
3210
3246
chunk -> heap = heap ;
3211
3247
chunk -> next = chunk ;
3212
3248
chunk -> prev = chunk ;
0 commit comments