Skip to content
This repository was archived by the owner on Feb 5, 2019. It is now read-only.

Commit e9192ea

Browse files
committed
Merge branch 'dev'
2 parents 486d249 + 0270968 commit e9192ea

File tree

7 files changed

+183
-23
lines changed

7 files changed

+183
-23
lines changed

ChangeLog

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,20 @@ brevity. Much more detail can be found in the git revision history:
44

55
https://github.com/jemalloc/jemalloc
66

7+
* 4.0.3 (September 24, 2015)
8+
9+
This bugfix release continues the trend of xallocx() and heap profiling fixes.
10+
11+
Bug fixes:
12+
- Fix xallocx(..., MALLOCX_ZERO) to zero all trailing bytes of large
13+
allocations when --enable-cache-oblivious configure option is enabled.
14+
- Fix xallocx(..., MALLOCX_ZERO) to zero trailing bytes of huge allocations
15+
when resizing from/to a size class that is not a multiple of the chunk size.
16+
- Fix prof_tctx_dump_iter() to filter out nodes that were created after heap
17+
profile dumping started.
18+
- Work around a potentially bad thread-specific data initialization
19+
interaction with NPTL (glibc's pthreads implementation).
20+
721
* 4.0.2 (September 21, 2015)
822

923
This bugfix release addresses a few bugs specific to heap profiling.

src/arena.c

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2679,6 +2679,16 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
26792679
if (arena_run_split_large(arena, run, splitsize, zero))
26802680
goto label_fail;
26812681

2682+
if (config_cache_oblivious && zero) {
2683+
/*
2684+
* Zero the trailing bytes of the original allocation's
2685+
* last page, since they are in an indeterminate state.
2686+
*/
2687+
assert(PAGE_CEILING(oldsize) == oldsize);
2688+
memset((void *)((uintptr_t)ptr + oldsize), 0,
2689+
PAGE_CEILING((uintptr_t)ptr) - (uintptr_t)ptr);
2690+
}
2691+
26822692
size = oldsize + splitsize;
26832693
npages = (size + large_pad) >> LG_PAGE;
26842694

src/huge.c

Lines changed: 16 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -133,7 +133,7 @@ huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize_min,
133133
extent_node_t *node;
134134
arena_t *arena;
135135
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
136-
bool zeroed;
136+
bool pre_zeroed, post_zeroed;
137137

138138
/* Increase usize to incorporate extra. */
139139
for (usize = usize_min; usize < usize_max && (usize_next = s2u(usize+1))
@@ -145,34 +145,35 @@ huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize_min,
145145

146146
node = huge_node_get(ptr);
147147
arena = extent_node_arena_get(node);
148+
pre_zeroed = extent_node_zeroed_get(node);
148149

149150
/* Fill if necessary (shrinking). */
150151
if (oldsize > usize) {
151152
size_t sdiff = oldsize - usize;
152153
if (config_fill && unlikely(opt_junk_free)) {
153154
memset((void *)((uintptr_t)ptr + usize), 0x5a, sdiff);
154-
zeroed = false;
155+
post_zeroed = false;
155156
} else {
156-
zeroed = !chunk_purge_wrapper(arena, &chunk_hooks, ptr,
157-
CHUNK_CEILING(oldsize), usize, sdiff);
157+
post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
158+
ptr, CHUNK_CEILING(oldsize), usize, sdiff);
158159
}
159160
} else
160-
zeroed = true;
161+
post_zeroed = pre_zeroed;
161162

162163
malloc_mutex_lock(&arena->huge_mtx);
163164
/* Update the size of the huge allocation. */
164165
assert(extent_node_size_get(node) != usize);
165166
extent_node_size_set(node, usize);
166-
/* Clear node's zeroed field if zeroing failed above. */
167-
extent_node_zeroed_set(node, extent_node_zeroed_get(node) && zeroed);
167+
/* Update zeroed. */
168+
extent_node_zeroed_set(node, post_zeroed);
168169
malloc_mutex_unlock(&arena->huge_mtx);
169170

170171
arena_chunk_ralloc_huge_similar(arena, ptr, oldsize, usize);
171172

172173
/* Fill if necessary (growing). */
173174
if (oldsize < usize) {
174175
if (zero || (config_fill && unlikely(opt_zero))) {
175-
if (!zeroed) {
176+
if (!pre_zeroed) {
176177
memset((void *)((uintptr_t)ptr + oldsize), 0,
177178
usize - oldsize);
178179
}
@@ -190,10 +191,11 @@ huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
190191
arena_t *arena;
191192
chunk_hooks_t chunk_hooks;
192193
size_t cdiff;
193-
bool zeroed;
194+
bool pre_zeroed, post_zeroed;
194195

195196
node = huge_node_get(ptr);
196197
arena = extent_node_arena_get(node);
198+
pre_zeroed = extent_node_zeroed_get(node);
197199
chunk_hooks = chunk_hooks_get(arena);
198200

199201
assert(oldsize > usize);
@@ -209,21 +211,21 @@ huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
209211
if (config_fill && unlikely(opt_junk_free)) {
210212
huge_dalloc_junk((void *)((uintptr_t)ptr + usize),
211213
sdiff);
212-
zeroed = false;
214+
post_zeroed = false;
213215
} else {
214-
zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
216+
post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
215217
CHUNK_ADDR2BASE((uintptr_t)ptr + usize),
216218
CHUNK_CEILING(oldsize),
217219
CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff);
218220
}
219221
} else
220-
zeroed = true;
222+
post_zeroed = pre_zeroed;
221223

222224
malloc_mutex_lock(&arena->huge_mtx);
223225
/* Update the size of the huge allocation. */
224226
extent_node_size_set(node, usize);
225-
/* Clear node's zeroed field if zeroing failed above. */
226-
extent_node_zeroed_set(node, extent_node_zeroed_get(node) && zeroed);
227+
/* Update zeroed. */
228+
extent_node_zeroed_set(node, post_zeroed);
227229
malloc_mutex_unlock(&arena->huge_mtx);
228230

229231
/* Zap the excess chunks. */

src/prof.c

Lines changed: 17 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1102,11 +1102,23 @@ prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
11021102
{
11031103
bool propagate_err = *(bool *)arg;
11041104

1105-
if (prof_dump_printf(propagate_err,
1106-
" t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
1107-
tctx->thr_uid, tctx->dump_cnts.curobjs, tctx->dump_cnts.curbytes,
1108-
tctx->dump_cnts.accumobjs, tctx->dump_cnts.accumbytes))
1109-
return (tctx);
1105+
switch (tctx->state) {
1106+
case prof_tctx_state_initializing:
1107+
case prof_tctx_state_nominal:
1108+
/* Not captured by this dump. */
1109+
break;
1110+
case prof_tctx_state_dumping:
1111+
case prof_tctx_state_purgatory:
1112+
if (prof_dump_printf(propagate_err,
1113+
" t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": "
1114+
"%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs,
1115+
tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs,
1116+
tctx->dump_cnts.accumbytes))
1117+
return (tctx);
1118+
break;
1119+
default:
1120+
not_reached();
1121+
}
11101122
return (NULL);
11111123
}
11121124

src/tsd.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -73,6 +73,9 @@ tsd_cleanup(void *arg)
7373
tsd_t *tsd = (tsd_t *)arg;
7474

7575
switch (tsd->state) {
76+
case tsd_state_uninitialized:
77+
/* Do nothing. */
78+
break;
7679
case tsd_state_nominal:
7780
#define O(n, t) \
7881
n##_cleanup(tsd);

test/integration/mallocx.c

Lines changed: 14 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -52,9 +52,20 @@ TEST_BEGIN(test_oom)
5252

5353
hugemax = get_huge_size(get_nhuge()-1);
5454

55-
/* In practice hugemax is too large to be allocated. */
56-
assert_ptr_null(mallocx(hugemax, 0),
57-
"Expected OOM for mallocx(size=%#zx, 0)", hugemax);
55+
/*
56+
* It should be impossible to allocate two objects that each consume
57+
* more than half the virtual address space.
58+
*/
59+
{
60+
void *p;
61+
62+
p = mallocx(hugemax, 0);
63+
if (p != NULL) {
64+
assert_ptr_null(mallocx(hugemax, 0),
65+
"Expected OOM for mallocx(size=%#zx, 0)", hugemax);
66+
dallocx(p, 0);
67+
}
68+
}
5869

5970
#if LG_SIZEOF_PTR == 3
6071
size = ZU(0x8000000000000000);

test/integration/xallocx.c

Lines changed: 109 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -347,6 +347,112 @@ TEST_BEGIN(test_extra_huge)
347347
}
348348
TEST_END
349349

350+
static void
351+
print_filled_extents(const void *p, uint8_t c, size_t len)
352+
{
353+
const uint8_t *pc = (const uint8_t *)p;
354+
size_t i, range0;
355+
uint8_t c0;
356+
357+
malloc_printf(" p=%p, c=%#x, len=%zu:", p, c, len);
358+
range0 = 0;
359+
c0 = pc[0];
360+
for (i = 0; i < len; i++) {
361+
if (pc[i] != c0) {
362+
malloc_printf(" %#x[%zu..%zu)", c0, range0, i);
363+
range0 = i;
364+
c0 = pc[i];
365+
}
366+
}
367+
malloc_printf(" %#x[%zu..%zu)\n", c0, range0, i);
368+
}
369+
370+
static bool
371+
validate_fill(const void *p, uint8_t c, size_t offset, size_t len)
372+
{
373+
const uint8_t *pc = (const uint8_t *)p;
374+
bool err;
375+
size_t i;
376+
377+
for (i = offset, err = false; i < offset+len; i++) {
378+
if (pc[i] != c)
379+
err = true;
380+
}
381+
382+
if (err)
383+
print_filled_extents(p, c, offset + len);
384+
385+
return (err);
386+
}
387+
388+
static void
389+
test_zero(size_t szmin, size_t szmax)
390+
{
391+
size_t sz, nsz;
392+
void *p;
393+
#define FILL_BYTE 0x7aU
394+
395+
sz = szmax;
396+
p = mallocx(sz, MALLOCX_ZERO);
397+
assert_ptr_not_null(p, "Unexpected mallocx() error");
398+
assert_false(validate_fill(p, 0x00, 0, sz), "Memory not filled: sz=%zu",
399+
sz);
400+
401+
/*
402+
* Fill with non-zero so that non-debug builds are more likely to detect
403+
* errors.
404+
*/
405+
memset(p, FILL_BYTE, sz);
406+
assert_false(validate_fill(p, FILL_BYTE, 0, sz),
407+
"Memory not filled: sz=%zu", sz);
408+
409+
/* Shrink in place so that we can expect growing in place to succeed. */
410+
sz = szmin;
411+
assert_zu_eq(xallocx(p, sz, 0, MALLOCX_ZERO), sz,
412+
"Unexpected xallocx() error");
413+
assert_false(validate_fill(p, FILL_BYTE, 0, sz),
414+
"Memory not filled: sz=%zu", sz);
415+
416+
for (sz = szmin; sz < szmax; sz = nsz) {
417+
nsz = nallocx(sz+1, MALLOCX_ZERO);
418+
assert_zu_eq(xallocx(p, sz+1, 0, MALLOCX_ZERO), nsz,
419+
"Unexpected xallocx() failure");
420+
assert_false(validate_fill(p, FILL_BYTE, 0, sz),
421+
"Memory not filled: sz=%zu", sz);
422+
assert_false(validate_fill(p, 0x00, sz, nsz-sz),
423+
"Memory not filled: sz=%zu, nsz-sz=%zu", sz, nsz-sz);
424+
memset((void *)((uintptr_t)p + sz), FILL_BYTE, nsz-sz);
425+
assert_false(validate_fill(p, FILL_BYTE, 0, nsz),
426+
"Memory not filled: nsz=%zu", nsz);
427+
}
428+
429+
dallocx(p, 0);
430+
}
431+
432+
TEST_BEGIN(test_zero_large)
433+
{
434+
size_t large0, largemax;
435+
436+
/* Get size classes. */
437+
large0 = get_large_size(0);
438+
largemax = get_large_size(get_nlarge()-1);
439+
440+
test_zero(large0, largemax);
441+
}
442+
TEST_END
443+
444+
TEST_BEGIN(test_zero_huge)
445+
{
446+
size_t huge0, huge1;
447+
448+
/* Get size classes. */
449+
huge0 = get_huge_size(0);
450+
huge1 = get_huge_size(1);
451+
452+
test_zero(huge1, huge0 * 2);
453+
}
454+
TEST_END
455+
350456
int
351457
main(void)
352458
{
@@ -359,5 +465,7 @@ main(void)
359465
test_size_extra_overflow,
360466
test_extra_small,
361467
test_extra_large,
362-
test_extra_huge));
468+
test_extra_huge,
469+
test_zero_large,
470+
test_zero_huge));
363471
}

0 commit comments

Comments
 (0)