@@ -367,29 +367,19 @@ struct _zend_mm_huge_list {
367
367
#define ZEND_MM_PAGE_ADDR (chunk , page_num ) \
368
368
((void*)(((zend_mm_page*)(chunk)) + (page_num)))
369
369
370
- #define _BIN_DATA_SIZE (num , size , elements , pages , x , y ) \
371
- /* Need two words for free slot pointer and shadow */ \
372
- (ZEND_MM_HEAP_PROTECTION ? MAX(size, sizeof(zend_mm_free_slot*)*2) : size)
373
- #define _BIN_DATA_SIZE_C (num , size , elements , pages , x , y ) \
374
- _BIN_DATA_SIZE(num, size, elements, pages, x, y),
370
+ #define _BIN_DATA_SIZE (num , size , elements , pages , x , y ) size,
375
371
static const uint32_t bin_data_size [] = {
376
- ZEND_MM_BINS_INFO (_BIN_DATA_SIZE_C , x , y )
372
+ ZEND_MM_BINS_INFO (_BIN_DATA_SIZE , x , y )
377
373
};
378
374
379
- #define _BIN_DATA_ELEMENTS (num , size , elements , pages , x , y ) \
380
- /* Adjusting size requires adjusting elements */ \
381
- (elements / (_BIN_DATA_SIZE(num, size, elements, pages, x, y) / size))
382
- #define _BIN_DATA_ELEMENTS_C (num , size , elements , pages , x , y ) \
383
- _BIN_DATA_ELEMENTS(num, size, elements, pages, x, y),
375
+ #define _BIN_DATA_ELEMENTS (num , size , elements , pages , x , y ) elements,
384
376
static const uint32_t bin_elements [] = {
385
- ZEND_MM_BINS_INFO (_BIN_DATA_ELEMENTS_C , x , y )
377
+ ZEND_MM_BINS_INFO (_BIN_DATA_ELEMENTS , x , y )
386
378
};
387
379
388
- #define _BIN_DATA_PAGES (num , size , elements , pages , x , y ) pages
389
- #define _BIN_DATA_PAGES_C (num , size , elements , pages , x , y ) \
390
- _BIN_DATA_PAGES(num, size, elements, pages, x, y),
380
+ #define _BIN_DATA_PAGES (num , size , elements , pages , x , y ) pages,
391
381
static const uint32_t bin_pages [] = {
392
- ZEND_MM_BINS_INFO (_BIN_DATA_PAGES_C , x , y )
382
+ ZEND_MM_BINS_INFO (_BIN_DATA_PAGES , x , y )
393
383
};
394
384
395
385
#if ZEND_DEBUG
@@ -1343,6 +1333,8 @@ static zend_always_inline zend_mm_free_slot* zend_mm_decode_free_slot(zend_mm_he
1343
1333
1344
1334
static zend_always_inline void zend_mm_set_next_free_slot (zend_mm_heap * heap , uint32_t bin_num , zend_mm_free_slot * slot , zend_mm_free_slot * next )
1345
1335
{
1336
+ ZEND_ASSERT (bin_data_size [bin_num ] >= ZEND_MM_MIN_SMALL_SIZE );
1337
+
1346
1338
slot -> next_free_slot = next ;
1347
1339
ZEND_MM_FREE_SLOT_PTR_SHADOW (slot , bin_num ) = zend_mm_encode_free_slot (heap , next );
1348
1340
}
@@ -1424,6 +1416,8 @@ static zend_never_inline void *zend_mm_alloc_small_slow(zend_mm_heap *heap, uint
1424
1416
1425
1417
static zend_always_inline void * zend_mm_alloc_small (zend_mm_heap * heap , int bin_num ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
1426
1418
{
1419
+ ZEND_ASSERT (bin_data_size [bin_num ] >= ZEND_MM_MIN_SMALL_SIZE );
1420
+
1427
1421
#if ZEND_MM_STAT
1428
1422
do {
1429
1423
size_t size = heap -> size + bin_data_size [bin_num ];
@@ -1444,6 +1438,8 @@ static zend_always_inline void *zend_mm_alloc_small(zend_mm_heap *heap, int bin_
1444
1438
1445
1439
static zend_always_inline void zend_mm_free_small (zend_mm_heap * heap , void * ptr , int bin_num )
1446
1440
{
1441
+ ZEND_ASSERT (bin_data_size [bin_num ] >= ZEND_MM_MIN_SMALL_SIZE );
1442
+
1447
1443
zend_mm_free_slot * p ;
1448
1444
1449
1445
#if ZEND_MM_STAT
@@ -1493,6 +1489,11 @@ static zend_always_inline zend_mm_debug_info *zend_mm_get_debug_info(zend_mm_hea
1493
1489
static zend_always_inline void * zend_mm_alloc_heap (zend_mm_heap * heap , size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
1494
1490
{
1495
1491
void * ptr ;
1492
+ #if ZEND_MM_HEAP_PROTECTION
1493
+ if (size < ZEND_MM_MIN_SMALL_SIZE ) {
1494
+ size = ZEND_MM_MIN_SMALL_SIZE ;
1495
+ }
1496
+ #endif /* ZEND_MM_HEAP_PROTECTION */
1496
1497
#if ZEND_DEBUG
1497
1498
size_t real_size = size ;
1498
1499
zend_mm_debug_info * dbg ;
@@ -1714,6 +1715,11 @@ static zend_always_inline void *zend_mm_realloc_heap(zend_mm_heap *heap, void *p
1714
1715
zend_mm_chunk * chunk = (zend_mm_chunk * )ZEND_MM_ALIGNED_BASE (ptr , ZEND_MM_CHUNK_SIZE );
1715
1716
int page_num = (int )(page_offset / ZEND_MM_PAGE_SIZE );
1716
1717
zend_mm_page_info info = chunk -> map [page_num ];
1718
+ #if ZEND_MM_HEAP_PROTECTION
1719
+ if (size < ZEND_MM_MIN_SMALL_SIZE ) {
1720
+ size = ZEND_MM_MIN_SMALL_SIZE ;
1721
+ }
1722
+ #endif /* ZEND_MM_HEAP_PROTECTION */
1717
1723
#if ZEND_DEBUG
1718
1724
size_t real_size = size ;
1719
1725
@@ -2677,6 +2683,7 @@ ZEND_API bool is_zend_ptr(const void *ptr)
2677
2683
2678
2684
# define _ZEND_BIN_ALLOCATOR (_num , _size , _elements , _pages , x , y ) \
2679
2685
ZEND_API void* ZEND_FASTCALL _emalloc_ ## _size(void) { \
2686
+ ZEND_ASSERT(_size >= ZEND_MM_MIN_SMALL_SIZE); \
2680
2687
ZEND_MM_CUSTOM_ALLOCATOR(_size); \
2681
2688
return zend_mm_alloc_small(AG(mm_heap), _num ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
2682
2689
}
@@ -2698,6 +2705,7 @@ ZEND_API void* ZEND_FASTCALL _emalloc_huge(size_t size)
2698
2705
#if ZEND_DEBUG
2699
2706
# define _ZEND_BIN_FREE (_num , _size , _elements , _pages , x , y ) \
2700
2707
ZEND_API void ZEND_FASTCALL _efree_ ## _size(void *ptr) { \
2708
+ ZEND_ASSERT(_size >= ZEND_MM_MIN_SMALL_SIZE); \
2701
2709
ZEND_MM_CUSTOM_DEALLOCATOR(ptr); \
2702
2710
{ \
2703
2711
size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE); \
@@ -2712,6 +2720,7 @@ ZEND_API void* ZEND_FASTCALL _emalloc_huge(size_t size)
2712
2720
#else
2713
2721
# define _ZEND_BIN_FREE (_num , _size , _elements , _pages , x , y ) \
2714
2722
ZEND_API void ZEND_FASTCALL _efree_ ## _size(void *ptr) { \
2723
+ ZEND_ASSERT(_size >= ZEND_MM_MIN_SMALL_SIZE); \
2715
2724
ZEND_MM_CUSTOM_DEALLOCATOR(ptr); \
2716
2725
{ \
2717
2726
zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE); \
0 commit comments