Skip to content

Commit 3de0353

Browse files
committed
Merge branch.
2 parents f70a254 + 5271b67 commit 3de0353

File tree

9 files changed

+96
-38
lines changed

9 files changed

+96
-38
lines changed

ChangeLog

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,17 @@ brevity. Much more detail can be found in the git revision history:
44

55
https://github.com/jemalloc/jemalloc
66

7+
* 4.2.1 (June 8, 2016)
8+
9+
Bug fixes:
10+
- Fix bootstrapping issues for configurations that require allocation during
11+
tsd initialization (e.g. --disable-tls). (@cferris1000, @jasone)
12+
- Fix gettimeofday() version of nstime_update(). (@ronawho)
13+
- Fix Valgrind regressions in calloc() and chunk_alloc_wrapper(). (@ronawho)
14+
- Fix potential VM map fragmentation regression. (@jasone)
15+
- Fix opt_zero-triggered in-place huge reallocation zeroing. (@jasone)
16+
- Fix heap profiling context leaks in reallocation edge cases. (@jasone)
17+
718
* 4.2.0 (May 12, 2016)
819

920
New features:

include/jemalloc/internal/jemalloc_internal.h.in

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -754,7 +754,7 @@ sa2u(size_t size, size_t alignment)
754754
* Calculate the size of the over-size run that arena_palloc()
755755
* would need to allocate in order to guarantee the alignment.
756756
*/
757-
if (usize + large_pad + alignment <= arena_maxrun)
757+
if (usize + large_pad + alignment - PAGE <= arena_maxrun)
758758
return (usize);
759759
}
760760

@@ -784,7 +784,7 @@ sa2u(size_t size, size_t alignment)
784784
* Calculate the multi-chunk mapping that huge_palloc() would need in
785785
* order to guarantee the alignment.
786786
*/
787-
if (usize + alignment < usize) {
787+
if (usize + alignment - PAGE < usize) {
788788
/* size_t overflow. */
789789
return (0);
790790
}

include/jemalloc/internal/prof.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -513,6 +513,7 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
513513
* though its actual usize was insufficient to cross the
514514
* sample threshold.
515515
*/
516+
prof_alloc_rollback(tsd, tctx, true);
516517
tctx = (prof_tctx_t *)(uintptr_t)1U;
517518
}
518519
}

src/arena.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2687,7 +2687,7 @@ arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
26872687
return (NULL);
26882688

26892689
alignment = PAGE_CEILING(alignment);
2690-
alloc_size = usize + large_pad + alignment;
2690+
alloc_size = usize + large_pad + alignment - PAGE;
26912691

26922692
malloc_mutex_lock(tsdn, &arena->lock);
26932693
run = arena_run_alloc_large(tsdn, arena, alloc_size, false);

src/chunk.c

Lines changed: 66 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -421,15 +421,11 @@ chunk_arena_get(tsdn_t *tsdn, unsigned arena_ind)
421421
}
422422

423423
static void *
424-
chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
425-
bool *commit, unsigned arena_ind)
424+
chunk_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
425+
size_t size, size_t alignment, bool *zero, bool *commit)
426426
{
427427
void *ret;
428-
tsdn_t *tsdn;
429-
arena_t *arena;
430428

431-
tsdn = tsdn_fetch();
432-
arena = chunk_arena_get(tsdn, arena_ind);
433429
ret = chunk_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
434430
commit, arena->dss_prec);
435431
if (ret == NULL)
@@ -440,6 +436,20 @@ chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
440436
return (ret);
441437
}
442438

439+
static void *
440+
chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
441+
bool *commit, unsigned arena_ind)
442+
{
443+
tsdn_t *tsdn;
444+
arena_t *arena;
445+
446+
tsdn = tsdn_fetch();
447+
arena = chunk_arena_get(tsdn, arena_ind);
448+
449+
return (chunk_alloc_default_impl(tsdn, arena, new_addr, size, alignment,
450+
zero, commit));
451+
}
452+
443453
static void *
444454
chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
445455
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit)
@@ -472,14 +482,23 @@ chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
472482
ret = chunk_alloc_retained(tsdn, arena, chunk_hooks, new_addr, size,
473483
alignment, zero, commit);
474484
if (ret == NULL) {
475-
ret = chunk_hooks->alloc(new_addr, size, alignment, zero,
476-
commit, arena->ind);
485+
if (chunk_hooks->alloc == chunk_alloc_default) {
486+
/* Call directly to propagate tsdn. */
487+
ret = chunk_alloc_default_impl(tsdn, arena, new_addr,
488+
size, alignment, zero, commit);
489+
} else {
490+
ret = chunk_hooks->alloc(new_addr, size, alignment,
491+
zero, commit, arena->ind);
492+
}
493+
477494
if (ret == NULL)
478495
return (NULL);
496+
497+
if (config_valgrind && chunk_hooks->alloc !=
498+
chunk_alloc_default)
499+
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
479500
}
480501

481-
if (config_valgrind && chunk_hooks->alloc != chunk_alloc_default)
482-
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
483502
return (ret);
484503
}
485504

@@ -591,19 +610,30 @@ chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
591610
}
592611

593612
static bool
594-
chunk_dalloc_default(void *chunk, size_t size, bool committed,
595-
unsigned arena_ind)
613+
chunk_dalloc_default_impl(tsdn_t *tsdn, void *chunk, size_t size)
596614
{
597615

598-
if (!have_dss || !chunk_in_dss(tsdn_fetch(), chunk))
616+
if (!have_dss || !chunk_in_dss(tsdn, chunk))
599617
return (chunk_dalloc_mmap(chunk, size));
600618
return (true);
601619
}
602620

621+
static bool
622+
chunk_dalloc_default(void *chunk, size_t size, bool committed,
623+
unsigned arena_ind)
624+
{
625+
tsdn_t *tsdn;
626+
627+
tsdn = tsdn_fetch();
628+
629+
return (chunk_dalloc_default_impl(tsdn, chunk, size));
630+
}
631+
603632
void
604633
chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
605634
void *chunk, size_t size, bool zeroed, bool committed)
606635
{
636+
bool err;
607637

608638
assert(chunk != NULL);
609639
assert(CHUNK_ADDR2BASE(chunk) == chunk);
@@ -612,7 +642,13 @@ chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
612642

613643
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
614644
/* Try to deallocate. */
615-
if (!chunk_hooks->dalloc(chunk, size, committed, arena->ind))
645+
if (chunk_hooks->dalloc == chunk_dalloc_default) {
646+
/* Call directly to propagate tsdn. */
647+
err = chunk_dalloc_default_impl(tsdn, chunk, size);
648+
} else
649+
err = chunk_hooks->dalloc(chunk, size, committed, arena->ind);
650+
651+
if (!err)
616652
return;
617653
/* Try to decommit; purge if that fails. */
618654
if (committed) {
@@ -681,26 +717,34 @@ chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b,
681717
}
682718

683719
static bool
684-
chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
685-
bool committed, unsigned arena_ind)
720+
chunk_merge_default_impl(tsdn_t *tsdn, void *chunk_a, void *chunk_b)
686721
{
687722

688723
if (!maps_coalesce)
689724
return (true);
690-
if (have_dss) {
691-
tsdn_t *tsdn = tsdn_fetch();
692-
if (chunk_in_dss(tsdn, chunk_a) != chunk_in_dss(tsdn, chunk_b))
693-
return (true);
694-
}
725+
if (have_dss && chunk_in_dss(tsdn, chunk_a) != chunk_in_dss(tsdn,
726+
chunk_b))
727+
return (true);
695728

696729
return (false);
697730
}
698731

732+
static bool
733+
chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
734+
bool committed, unsigned arena_ind)
735+
{
736+
tsdn_t *tsdn;
737+
738+
tsdn = tsdn_fetch();
739+
740+
return (chunk_merge_default_impl(tsdn, chunk_a, chunk_b));
741+
}
742+
699743
static rtree_node_elm_t *
700744
chunks_rtree_node_alloc(size_t nelms)
701745
{
702746

703-
return ((rtree_node_elm_t *)base_alloc(tsdn_fetch(), nelms *
747+
return ((rtree_node_elm_t *)base_alloc(TSDN_NULL, nelms *
704748
sizeof(rtree_node_elm_t)));
705749
}
706750

src/chunk_mmap.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit)
99
void *ret;
1010
size_t alloc_size;
1111

12-
alloc_size = size + alignment;
12+
alloc_size = size + alignment - PAGE;
1313
/* Beware size_t wrap-around. */
1414
if (alloc_size < size)
1515
return (NULL);

src/huge.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -262,19 +262,19 @@ huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize,
262262
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
263263

264264
/*
265-
* Copy zero into is_zeroed_chunk and pass the copy to chunk_alloc(), so
266-
* that it is possible to make correct junk/zero fill decisions below.
265+
* Use is_zeroed_chunk to detect whether the trailing memory is zeroed,
266+
* update extent's zeroed field, and zero as necessary.
267267
*/
268-
is_zeroed_chunk = zero;
269-
268+
is_zeroed_chunk = false;
270269
if (arena_chunk_ralloc_huge_expand(tsdn, arena, ptr, oldsize, usize,
271270
&is_zeroed_chunk))
272271
return (true);
273272

274273
malloc_mutex_lock(tsdn, &arena->huge_mtx);
275-
/* Update the size of the huge allocation. */
276274
huge_node_unset(ptr, node);
277275
extent_node_size_set(node, usize);
276+
extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
277+
is_zeroed_chunk);
278278
huge_node_reset(tsdn, ptr, node);
279279
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
280280

src/jemalloc.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1739,7 +1739,7 @@ je_calloc(size_t num, size_t size)
17391739
ret = ialloc_body(num_size, true, &tsdn, &usize, true);
17401740
ialloc_post_check(ret, tsdn, usize, "calloc", true, true);
17411741
UTRACE(0, num_size, ret);
1742-
JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, false);
1742+
JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, true);
17431743
}
17441744

17451745
return (ret);
@@ -2222,7 +2222,7 @@ irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
22222222

22232223
prof_active = prof_active_get_unlocked();
22242224
old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr);
2225-
tctx = prof_alloc_prep(tsd, *usize, prof_active, true);
2225+
tctx = prof_alloc_prep(tsd, *usize, prof_active, false);
22262226
if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
22272227
p = irallocx_prof_sample(tsd, old_ptr, old_usize, *usize,
22282228
alignment, zero, tcache, arena, tctx);
@@ -2231,7 +2231,7 @@ irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
22312231
tcache, arena);
22322232
}
22332233
if (unlikely(p == NULL)) {
2234-
prof_alloc_rollback(tsd, tctx, true);
2234+
prof_alloc_rollback(tsd, tctx, false);
22352235
return (NULL);
22362236
}
22372237

@@ -2246,7 +2246,7 @@ irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
22462246
*/
22472247
*usize = isalloc(tsd_tsdn(tsd), p, config_prof);
22482248
}
2249-
prof_realloc(tsd, p, *usize, tctx, prof_active, true, old_ptr,
2249+
prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr,
22502250
old_usize, old_tctx);
22512251

22522252
return (p);

src/nstime.c

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -128,9 +128,11 @@ nstime_update(nstime_t *time)
128128
time->ns = ts.tv_sec * BILLION + ts.tv_nsec;
129129
}
130130
#else
131-
struct timeval tv;
132-
gettimeofday(&tv, NULL);
133-
time->ns = tv.tv_sec * BILLION + tv.tv_usec * 1000;
131+
{
132+
struct timeval tv;
133+
gettimeofday(&tv, NULL);
134+
time->ns = tv.tv_sec * BILLION + tv.tv_usec * 1000;
135+
}
134136
#endif
135137

136138
/* Handle non-monotonic clocks. */

0 commit comments

Comments
 (0)