@@ -195,11 +195,6 @@ typedef struct _zend_mm_free_slot zend_mm_free_slot;
195
195
typedef struct _zend_mm_chunk zend_mm_chunk ;
196
196
typedef struct _zend_mm_huge_list zend_mm_huge_list ;
197
197
198
- /*
199
- * 0 means disabled
200
- * 1 means huge pages
201
- * 2 means transparent huge pages
202
- */
203
198
int zend_mm_use_huge_pages = 0 ;
204
199
205
200
/*
@@ -234,13 +229,6 @@ int zend_mm_use_huge_pages = 0;
234
229
* 2 for 5-8, 3 for 9-16 etc) see zend_alloc_sizes.h
235
230
*/
236
231
237
- /*
238
- * For environments where mmap is expensive it can be
239
- * worthwhile to avoid mmap/munmap churn by raising
240
- * the minimum number of chunks in emalloc
241
- */
242
- int zend_mm_min_chunks = 0 ;
243
-
244
232
struct _zend_mm_heap {
245
233
#if ZEND_MM_CUSTOM
246
234
int use_custom_heap ;
@@ -474,7 +462,7 @@ static void *zend_mm_mmap(size_t size)
474
462
void * ptr ;
475
463
476
464
#ifdef MAP_HUGETLB
477
- if (zend_mm_use_huge_pages == 1 && size == ZEND_MM_CHUNK_SIZE ) {
465
+ if (zend_mm_use_huge_pages && size == ZEND_MM_CHUNK_SIZE ) {
478
466
ptr = mmap (NULL , size , PROT_READ | PROT_WRITE , MAP_PRIVATE | MAP_ANON | MAP_HUGETLB , -1 , 0 );
479
467
if (ptr != MAP_FAILED ) {
480
468
return ptr ;
@@ -681,7 +669,7 @@ static void *zend_mm_chunk_alloc_int(size_t size, size_t alignment)
681
669
return NULL ;
682
670
} else if (ZEND_MM_ALIGNED_OFFSET (ptr , alignment ) == 0 ) {
683
671
#ifdef MADV_HUGEPAGE
684
- if (zend_mm_use_huge_pages == 2 ) {
672
+ if (zend_mm_use_huge_pages ) {
685
673
madvise (ptr , size , MADV_HUGEPAGE );
686
674
}
687
675
#endif
@@ -714,7 +702,7 @@ static void *zend_mm_chunk_alloc_int(size_t size, size_t alignment)
714
702
zend_mm_munmap ((char * )ptr + size , alignment - REAL_PAGE_SIZE );
715
703
}
716
704
# ifdef MADV_HUGEPAGE
717
- if (zend_mm_use_huge_pages == 2 ) {
705
+ if (zend_mm_use_huge_pages ) {
718
706
madvise (ptr , size , MADV_HUGEPAGE );
719
707
}
720
708
# endif
@@ -2282,15 +2270,14 @@ void zend_mm_shutdown(zend_mm_heap *heap, int full, int silent)
2282
2270
zend_mm_chunk_free (heap , heap -> main_chunk , ZEND_MM_CHUNK_SIZE );
2283
2271
} else {
2284
2272
/* free some cached chunks to keep average count */
2285
- heap -> avg_chunks_count = MAX (( heap -> avg_chunks_count + (double )heap -> peak_chunks_count ) / 2.0 , zend_mm_min_chunks ) ;
2273
+ heap -> avg_chunks_count = ( heap -> avg_chunks_count + (double )heap -> peak_chunks_count ) / 2.0 ;
2286
2274
while ((double )heap -> cached_chunks_count + 0.9 > heap -> avg_chunks_count &&
2287
2275
heap -> cached_chunks ) {
2288
2276
p = heap -> cached_chunks ;
2289
2277
heap -> cached_chunks = p -> next ;
2290
2278
zend_mm_chunk_free (heap , p , ZEND_MM_CHUNK_SIZE );
2291
2279
heap -> cached_chunks_count -- ;
2292
2280
}
2293
-
2294
2281
/* clear cached chunks */
2295
2282
p = heap -> cached_chunks ;
2296
2283
while (p != NULL ) {
@@ -2772,16 +2759,8 @@ static void alloc_globals_ctor(zend_alloc_globals *alloc_globals)
2772
2759
#endif
2773
2760
2774
2761
tmp = getenv ("USE_ZEND_ALLOC_HUGE_PAGES" );
2775
- if (tmp ) {
2776
- zend_mm_use_huge_pages = zend_atoi (tmp , 0 );
2777
- if (zend_mm_use_huge_pages > 2 ) {
2778
- zend_mm_use_huge_pages = 1 ;
2779
- }
2780
- }
2781
-
2782
- tmp = getenv ("USE_ZEND_MIN_CHUNKS" );
2783
- if (tmp ) {
2784
- zend_mm_min_chunks = zend_atoi (tmp , 0 );
2762
+ if (tmp && zend_atoi (tmp , 0 )) {
2763
+ zend_mm_use_huge_pages = 1 ;
2785
2764
}
2786
2765
alloc_globals -> mm_heap = zend_mm_init ();
2787
2766
}
0 commit comments