@@ -195,6 +195,11 @@ typedef struct _zend_mm_free_slot zend_mm_free_slot;
195
195
typedef struct _zend_mm_chunk zend_mm_chunk ;
196
196
typedef struct _zend_mm_huge_list zend_mm_huge_list ;
197
197
198
+ /*
199
+ * 0 means disabled
200
+ * 1 means huge pages
201
+ * 2 means transparent huge pages
202
+ */
198
203
int zend_mm_use_huge_pages = 0 ;
199
204
200
205
/*
@@ -229,6 +234,13 @@ int zend_mm_use_huge_pages = 0;
229
234
* 2 for 5-8, 3 for 9-16 etc) see zend_alloc_sizes.h
230
235
*/
231
236
237
+ /*
238
+ * For environments where mmap is expensive it can be
239
+ * worthwhile to avoid mmap/munmap churn by raising
240
+ * the minimum number of chunks in emalloc
241
+ */
242
+ int zend_mm_min_chunks = 0 ;
243
+
232
244
struct _zend_mm_heap {
233
245
#if ZEND_MM_CUSTOM
234
246
int use_custom_heap ;
@@ -462,7 +474,7 @@ static void *zend_mm_mmap(size_t size)
462
474
void * ptr ;
463
475
464
476
#ifdef MAP_HUGETLB
465
- if (zend_mm_use_huge_pages && size == ZEND_MM_CHUNK_SIZE ) {
477
+ if (zend_mm_use_huge_pages == 1 && size == ZEND_MM_CHUNK_SIZE ) {
466
478
ptr = mmap (NULL , size , PROT_READ | PROT_WRITE , MAP_PRIVATE | MAP_ANON | MAP_HUGETLB , -1 , 0 );
467
479
if (ptr != MAP_FAILED ) {
468
480
return ptr ;
@@ -669,7 +681,7 @@ static void *zend_mm_chunk_alloc_int(size_t size, size_t alignment)
669
681
return NULL ;
670
682
} else if (ZEND_MM_ALIGNED_OFFSET (ptr , alignment ) == 0 ) {
671
683
#ifdef MADV_HUGEPAGE
672
- if (zend_mm_use_huge_pages ) {
684
+ if (zend_mm_use_huge_pages == 2 ) {
673
685
madvise (ptr , size , MADV_HUGEPAGE );
674
686
}
675
687
#endif
@@ -702,7 +714,7 @@ static void *zend_mm_chunk_alloc_int(size_t size, size_t alignment)
702
714
zend_mm_munmap ((char * )ptr + size , alignment - REAL_PAGE_SIZE );
703
715
}
704
716
# ifdef MADV_HUGEPAGE
705
- if (zend_mm_use_huge_pages ) {
717
+ if (zend_mm_use_huge_pages == 2 ) {
706
718
madvise (ptr , size , MADV_HUGEPAGE );
707
719
}
708
720
# endif
@@ -2270,14 +2282,15 @@ void zend_mm_shutdown(zend_mm_heap *heap, int full, int silent)
2270
2282
zend_mm_chunk_free (heap , heap -> main_chunk , ZEND_MM_CHUNK_SIZE );
2271
2283
} else {
2272
2284
/* free some cached chunks to keep average count */
2273
- heap -> avg_chunks_count = ( heap -> avg_chunks_count + (double )heap -> peak_chunks_count ) / 2.0 ;
2285
+ heap -> avg_chunks_count = MAX (( heap -> avg_chunks_count + (double )heap -> peak_chunks_count ) / 2.0 , zend_mm_min_chunks ) ;
2274
2286
while ((double )heap -> cached_chunks_count + 0.9 > heap -> avg_chunks_count &&
2275
2287
heap -> cached_chunks ) {
2276
2288
p = heap -> cached_chunks ;
2277
2289
heap -> cached_chunks = p -> next ;
2278
2290
zend_mm_chunk_free (heap , p , ZEND_MM_CHUNK_SIZE );
2279
2291
heap -> cached_chunks_count -- ;
2280
2292
}
2293
+
2281
2294
/* clear cached chunks */
2282
2295
p = heap -> cached_chunks ;
2283
2296
while (p != NULL ) {
@@ -2759,8 +2772,16 @@ static void alloc_globals_ctor(zend_alloc_globals *alloc_globals)
2759
2772
#endif
2760
2773
2761
2774
tmp = getenv ("USE_ZEND_ALLOC_HUGE_PAGES" );
2762
- if (tmp && zend_atoi (tmp , 0 )) {
2763
- zend_mm_use_huge_pages = 1 ;
2775
+ if (tmp ) {
2776
+ zend_mm_use_huge_pages = zend_atoi (tmp , 0 );
2777
+ if (zend_mm_use_huge_pages > 2 ) {
2778
+ zend_mm_use_huge_pages = 1 ;
2779
+ }
2780
+ }
2781
+
2782
+ tmp = getenv ("USE_ZEND_MIN_CHUNKS" );
2783
+ if (tmp ) {
2784
+ zend_mm_min_chunks = zend_atoi (tmp , 0 );
2764
2785
}
2765
2786
alloc_globals -> mm_heap = zend_mm_init ();
2766
2787
}
0 commit comments