Update jemalloc to version 3.4.0.

Operating Systems / FreeBSD - jasone [FreeBSD.org] - 3 June 2013 09:36 UTC



###

diff --git a/contrib/jemalloc/ChangeLog b/contrib/jemalloc/ChangeLog
index 6578225..8ab8848 100644
--- a/contrib/jemalloc/ChangeLog
+++ b/contrib/jemalloc/ChangeLog
@@ -6,6 +6,47 @@ found in the git revision history:
http://www.canonware.com/cgi-bin/gitweb.cgi?p=jemalloc.git
git://canonware.com/jemalloc.git

+* 3.4.0 (June 2, 2013)
+
+ This version is essentially a small bugfix release, but the addition of
+ aarch64 support requires that the minor version be incremented.
+
+ Bug fixes:
+ - Fix race-triggered deadlocks in chunk_record(). These deadlocks were
+ typically triggered by multiple threads concurrently deallocating huge
+ objects.
+
+ New features:
+ - Add support for the aarch64 architecture.
+
+* 3.3.1 (March 6, 2013)
+
+ This version fixes bugs that are typically encountered only when utilizing
+ custom run-time options.
+
+ Bug fixes:
+ - Fix a locking order bug that could cause deadlock during fork if heap
+ profiling were enabled.
+ - Fix a chunk recycling bug that could cause the allocator to lose track of
+ whether a chunk was zeroed. On FreeBSD, NetBSD, and OS X, it could cause
+ corruption if allocating via sbrk(2) (unlikely unless running with the
+ "dss:primary" option specified). This was completely harmless on Linux
+ unless using mlockall(2) (and unlikely even then, unless the
+ --disable-munmap configure option or the "dss:primary" option was
+ specified). This regression was introduced in 3.1.0 by the
+ mlockall(2)/madvise(2) interaction fix.
+ - Fix TLS-related memory corruption that could occur during thread exit if the
+ thread never allocated memory. Only the quarantine and prof facilities were
+ susceptible.
+ - Fix two quarantine bugs:
+ + Internal reallocation of the quarantined object array leaked the old
+ array.
+ + Reallocation failure for internal reallocation of the quarantined object
+ array (very unlikely) resulted in memory corruption.
+ - Fix Valgrind integration to annotate all internally allocated memory in a
+ way that keeps Valgrind happy about internal data structure access.
+ - Fix building for s390 systems.
+
* 3.3.0 (January 23, 2013)

This version includes a few minor performance improvements in addition to the
diff --git a/contrib/jemalloc/FREEBSD-diffs b/contrib/jemalloc/FREEBSD-diffs
index de2549a..499fd0f 100644
--- a/contrib/jemalloc/FREEBSD-diffs
+++ b/contrib/jemalloc/FREEBSD-diffs
@@ -1,5 +1,5 @@
diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in
-index 0930580..d45fa3d 100644
+index abd5e6f..1d7491a 100644
--- a/doc/jemalloc.xml.in
+++ b/doc/jemalloc.xml.in
@@ -51,12 +51,23 @@
@@ -27,7 +27,7 @@ index 0930580..d45fa3d 100644

Standard API

-@@ -2173,4 +2184,16 @@ malloc_conf = "lg_chunk:24";]]>
+@@ -2180,4 +2191,16 @@ malloc_conf = "lg_chunk:24";]]>
The posix_memalign function conforms
to IEEE Std 1003.1-2001 (“POSIX.1”).


@@ -45,7 +45,7 @@ index 0930580..d45fa3d 100644
+

diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in
-index c606c12..0d46d9d 100644
+index e46ac54..527449d 100644
--- a/include/jemalloc/internal/jemalloc_internal.h.in
+++ b/include/jemalloc/internal/jemalloc_internal.h.in
@@ -1,5 +1,8 @@
@@ -97,7 +97,7 @@ index de44e14..564d604 100644

bool malloc_mutex_init(malloc_mutex_t *mutex);
diff --git a/include/jemalloc/internal/private_namespace.h b/include/jemalloc/internal/private_namespace.h
-index 903fb4d..d6638df 100644
+index 65de316..366676b 100644
--- a/include/jemalloc/internal/private_namespace.h
+++ b/include/jemalloc/internal/private_namespace.h
@@ -216,7 +216,6 @@
@@ -122,10 +122,10 @@ index 31b1304..c3ef2f5 100644
#define ALLOCM_LG_ALIGN(la) (la)
diff --git a/include/jemalloc/jemalloc_FreeBSD.h b/include/jemalloc/jemalloc_FreeBSD.h
new file mode 100644
-index 0000000..9c97a13
+index 0000000..e6c8407
--- /dev/null
+++ b/include/jemalloc/jemalloc_FreeBSD.h
-@@ -0,0 +1,76 @@
+@@ -0,0 +1,117 @@
+/*
+ * Override settings that were generated in jemalloc_defs.h as necessary.
+ */
@@ -196,14 +196,55 @@ index 0000000..9c97a13
+#define isthreaded ((bool)__isthreaded)
+
+/* Mangle. */
++#undef je_malloc
++#undef je_calloc
++#undef je_realloc
++#undef je_free
++#undef je_posix_memalign
++#undef je_malloc_usable_size
++#undef je_allocm
++#undef je_rallocm
++#undef je_sallocm
++#undef je_dallocm
++#undef je_nallocm
++#define je_malloc __malloc
++#define je_calloc __calloc
++#define je_realloc __realloc
++#define je_free __free
++#define je_posix_memalign __posix_memalign
++#define je_malloc_usable_size __malloc_usable_size
++#define je_allocm __allocm
++#define je_rallocm __rallocm
++#define je_sallocm __sallocm
++#define je_dallocm __dallocm
++#define je_nallocm __nallocm
+#define open _open
+#define read _read
+#define write _write
+#define close _close
+#define pthread_mutex_lock _pthread_mutex_lock
+#define pthread_mutex_unlock _pthread_mutex_unlock
++
++#ifdef JEMALLOC_C_
++/*
++ * Define 'weak' symbols so that an application can have its own versions
++ * of malloc, calloc, realloc, free, et al.
++ */
++__weak_reference(__malloc, malloc);
++__weak_reference(__calloc, calloc);
++__weak_reference(__realloc, realloc);
++__weak_reference(__free, free);
++__weak_reference(__posix_memalign, posix_memalign);
++__weak_reference(__malloc_usable_size, malloc_usable_size);
++__weak_reference(__allocm, allocm);
++__weak_reference(__rallocm, rallocm);
++__weak_reference(__sallocm, sallocm);
++__weak_reference(__dallocm, dallocm);
++__weak_reference(__nallocm, nallocm);
++#endif
++
diff --git a/src/jemalloc.c b/src/jemalloc.c
-index c117685..665d98f 100644
+index bc350ed..352c98e 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
@@ -8,6 +8,10 @@ malloc_tsd_data(, arenas, arena_t *, NULL)
@@ -217,7 +258,7 @@ index c117685..665d98f 100644
/* Runtime configuration options. */
const char *je_malloc_conf;
bool opt_abort =
-@@ -453,7 +457,8 @@ malloc_conf_init(void)
+@@ -471,7 +475,8 @@ malloc_conf_init(void)
#endif
;

diff --git a/contrib/jemalloc/VERSION b/contrib/jemalloc/VERSION
index 43f96d9..84c9c55 100644
--- a/contrib/jemalloc/VERSION
+++ b/contrib/jemalloc/VERSION
@@ -1 +1 @@
-3.3.0-0-g83789f45307379e096c4e8be81d9e9a51e3f5a4a
+3.4.0-0-g0ed518e5dab789ad2171bb38977a8927e2a26775
diff --git a/contrib/jemalloc/doc/jemalloc.3 b/contrib/jemalloc/doc/jemalloc.3
index b0122ee..4444505 100644
--- a/contrib/jemalloc/doc/jemalloc.3
+++ b/contrib/jemalloc/doc/jemalloc.3
@@ -2,12 +2,12 @@
.\" Title: JEMALLOC
.\" Author: Jason Evans
.\" Generator: DocBook XSL Stylesheets v1.76.1
-.\" Date: 01/23/2013
+.\" Date: 06/02/2013
.\" Manual: User Manual
-.\" Source: jemalloc 3.3.0-0-g83789f45307379e096c4e8be81d9e9a51e3f5a4a
+.\" Source: jemalloc 3.4.0-0-g0ed518e5dab789ad2171bb38977a8927e2a26775
.\" Language: English
.\"
-.TH "JEMALLOC" "3" "01/23/2013" "jemalloc 3.3.0-0-g83789f453073" "User Manual"
+.TH "JEMALLOC" "3" "06/02/2013" "jemalloc 3.4.0-0-g0ed518e5dab7" "User Manual"
.\" -----------------------------------------------------------------
.\" * Define some portability stuff
.\" -----------------------------------------------------------------
@@ -31,7 +31,7 @@
jemalloc \- general purpose memory allocation functions
.SH "LIBRARY"
.PP
-This manual describes jemalloc 3\&.3\&.0\-0\-g83789f45307379e096c4e8be81d9e9a51e3f5a4a\&. More information can be found at the
+This manual describes jemalloc 3\&.4\&.0\-0\-g0ed518e5dab789ad2171bb38977a8927e2a26775\&. More information can be found at the
\m[blue]\fBjemalloc website\fR\m[]\&\s-2\u[1]\d\s+2\&.
.PP
The following configuration options are enabled in libc\*(Aqs built\-in jemalloc:
@@ -392,7 +392,19 @@ Once, when the first call is made to one of the memory allocation routines, the
The string pointed to by the global variable
\fImalloc_conf\fR, the \(lqname\(rq of the file referenced by the symbolic link named
/etc/malloc\&.conf, and the value of the environment variable
-\fBMALLOC_CONF\fR, will be interpreted, in that order, from left to right as options\&.
+\fBMALLOC_CONF\fR, will be interpreted, in that order, from left to right as options\&. Note that
+\fImalloc_conf\fR
+may be read before
+\fBmain\fR\fB\fR
+is entered, so the declaration of
+\fImalloc_conf\fR
+should specify an initializer that contains the final value to be read by jemalloc\&.
+\fImalloc_conf\fR
+is a compile\-time setting, whereas
+/etc/malloc\&.conf
+and
+\fBMALLOC_CONF\fR
+can be safely set any time prior to program invocation\&.
.PP
An options string is a comma\-separated list of option:value pairs\&. There is one key corresponding to each
"opt\&.*"
diff --git a/contrib/jemalloc/include/jemalloc/internal/arena.h b/contrib/jemalloc/include/jemalloc/internal/arena.h
index 8fdee93..f2c18f4 100644
--- a/contrib/jemalloc/include/jemalloc/internal/arena.h
+++ b/contrib/jemalloc/include/jemalloc/internal/arena.h
@@ -463,9 +463,9 @@ void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind,
size_t runind, size_t binind, size_t flags);
void arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind,
size_t unzeroed);
-void arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes);
-void arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes);
-void arena_prof_accum(arena_t *arena, uint64_t accumbytes);
+bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes);
+bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes);
+bool arena_prof_accum(arena_t *arena, uint64_t accumbytes);
size_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
size_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
@@ -663,7 +663,7 @@ arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind,
*mapbitsp = (*mapbitsp & ~CHUNK_MAP_UNZEROED) | unzeroed;
}

-JEMALLOC_INLINE void
+JEMALLOC_INLINE bool
arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes)
{

@@ -672,33 +672,40 @@ arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes)

arena->prof_accumbytes += accumbytes;
if (arena->prof_accumbytes >= prof_interval) {
- prof_idump();
arena->prof_accumbytes -= prof_interval;
+ return (true);
}
+ return (false);
}

-JEMALLOC_INLINE void
+JEMALLOC_INLINE bool
arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes)
{

cassert(config_prof);

if (prof_interval == 0)
- return;
- arena_prof_accum_impl(arena, accumbytes);
+ return (false);
+ return (arena_prof_accum_impl(arena, accumbytes));
}

-JEMALLOC_INLINE void
+JEMALLOC_INLINE bool
arena_prof_accum(arena_t *arena, uint64_t accumbytes)
{

cassert(config_prof);

if (prof_interval == 0)
- return;
- malloc_mutex_lock(&arena->lock);
- arena_prof_accum_impl(arena, accumbytes);
- malloc_mutex_unlock(&arena->lock);
+ return (false);
+
+ {
+ bool ret;
+
+ malloc_mutex_lock(&arena->lock);
+ ret = arena_prof_accum_impl(arena, accumbytes);
+ malloc_mutex_unlock(&arena->lock);
+ return (ret);
+ }
}

JEMALLOC_ALWAYS_INLINE size_t
diff --git a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h
index 9647ffe..d5829f5 100644
--- a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h
+++ b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h
@@ -278,6 +278,9 @@ static const bool config_ivsalloc =
# ifdef __arm__
# define LG_QUANTUM 3
# endif
+# ifdef __aarch64__
+# define LG_QUANTUM 4
+# endif
# ifdef __hppa__
# define LG_QUANTUM 4
# endif
@@ -287,7 +290,7 @@ static const bool config_ivsalloc =
# ifdef __powerpc__
# define LG_QUANTUM 4
# endif
-# ifdef __s390x__
+# ifdef __s390__
# define LG_QUANTUM 4
# endif
# ifdef __SH4__
@@ -440,15 +443,18 @@ static const bool config_ivsalloc =
} while (0)
#else
#define RUNNING_ON_VALGRIND ((unsigned)0)
-#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed)
-#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB)
-#define VALGRIND_FREELIKE_BLOCK(addr, rzB)
-#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len)
-#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len)
-#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero)
+#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \
+ do {} while (0)
+#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB) \
+ do {} while (0)
+#define VALGRIND_FREELIKE_BLOCK(addr, rzB) do {} while (0)
+#define VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr, _qzz_len) do {} while (0)
+#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len) do {} while (0)
+#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len) do {} while (0)
+#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do {} while (0)
#define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \
- old_rzsize, zero)
-#define JEMALLOC_VALGRIND_FREE(ptr, rzsize)
+ old_rzsize, zero) do {} while (0)
+#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0)
#endif

#include "jemalloc/internal/util.h"
diff --git a/contrib/jemalloc/include/jemalloc/internal/private_namespace.h b/contrib/jemalloc/include/jemalloc/internal/private_namespace.h
index d6638df..366676b 100644
--- a/contrib/jemalloc/include/jemalloc/internal/private_namespace.h
+++ b/contrib/jemalloc/include/jemalloc/internal/private_namespace.h
@@ -305,7 +305,13 @@
#define prof_tdata_tsd_get_wrapper JEMALLOC_N(prof_tdata_tsd_get_wrapper)
#define prof_tdata_tsd_set JEMALLOC_N(prof_tdata_tsd_set)
#define quarantine JEMALLOC_N(quarantine)
+#define quarantine_alloc_hook JEMALLOC_N(quarantine_alloc_hook)
#define quarantine_boot JEMALLOC_N(quarantine_boot)
+#define quarantine_booted JEMALLOC_N(quarantine_booted)
+#define quarantine_cleanup JEMALLOC_N(quarantine_cleanup)
+#define quarantine_init JEMALLOC_N(quarantine_init)
+#define quarantine_tls JEMALLOC_N(quarantine_tls)
+#define quarantine_tsd JEMALLOC_N(quarantine_tsd)
#define quarantine_tsd_boot JEMALLOC_N(quarantine_tsd_boot)
#define quarantine_tsd_cleanup_wrapper JEMALLOC_N(quarantine_tsd_cleanup_wrapper)
#define quarantine_tsd_get JEMALLOC_N(quarantine_tsd_get)
diff --git a/contrib/jemalloc/include/jemalloc/internal/prof.h b/contrib/jemalloc/include/jemalloc/internal/prof.h
index 47f22ad..119a5b1 100644
--- a/contrib/jemalloc/include/jemalloc/internal/prof.h
+++ b/contrib/jemalloc/include/jemalloc/internal/prof.h
@@ -237,7 +237,7 @@ void prof_postfork_child(void);
\
assert(size == s2u(size)); \
\
- prof_tdata = prof_tdata_get(); \
+ prof_tdata = prof_tdata_get(true); \
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) { \
if (prof_tdata != NULL) \
ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
@@ -286,7 +286,7 @@ void prof_postfork_child(void);
#ifndef JEMALLOC_ENABLE_INLINE
malloc_tsd_protos(JEMALLOC_ATTR(unused), prof_tdata, prof_tdata_t *)

-prof_tdata_t *prof_tdata_get(void);
+prof_tdata_t *prof_tdata_get(bool create);
void prof_sample_threshold_update(prof_tdata_t *prof_tdata);
prof_ctx_t *prof_ctx_get(const void *ptr);
void prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
@@ -304,17 +304,15 @@ malloc_tsd_funcs(JEMALLOC_INLINE, prof_tdata, prof_tdata_t *, NULL,
prof_tdata_cleanup)

JEMALLOC_INLINE prof_tdata_t *
-prof_tdata_get(void)
+prof_tdata_get(bool create)
{
prof_tdata_t *prof_tdata;

cassert(config_prof);

prof_tdata = *prof_tdata_tsd_get();
- if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) {
- if (prof_tdata == NULL)
- prof_tdata = prof_tdata_init();
- }
+ if (create && prof_tdata == NULL)
+ prof_tdata = prof_tdata_init();

return (prof_tdata);
}
@@ -397,7 +395,7 @@ prof_sample_accum_update(size_t size)
/* Sampling logic is unnecessary if the interval is 1. */
assert(opt_lg_prof_sample != 0);

- prof_tdata = *prof_tdata_tsd_get();
+ prof_tdata = prof_tdata_get(false);
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
return (true);

diff --git a/contrib/jemalloc/include/jemalloc/internal/quarantine.h b/contrib/jemalloc/include/jemalloc/internal/quarantine.h
index 38f3d69..16f677f 100644
--- a/contrib/jemalloc/include/jemalloc/internal/quarantine.h
+++ b/contrib/jemalloc/include/jemalloc/internal/quarantine.h
@@ -1,6 +1,9 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES

+typedef struct quarantine_obj_s quarantine_obj_t;
+typedef struct quarantine_s quarantine_t;
+
/* Default per thread quarantine size if valgrind is enabled. */
#define JEMALLOC_VALGRIND_QUARANTINE_DEFAULT (ZU(1) << 24)

@@ -8,17 +11,57 @@
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS

+struct quarantine_obj_s {
+ void *ptr;
+ size_t usize;
+};
+
+struct quarantine_s {
+ size_t curbytes;
+ size_t curobjs;
+ size_t first;
+#define LG_MAXOBJS_INIT 10
+ size_t lg_maxobjs;
+ quarantine_obj_t objs[1]; /* Dynamically sized ring buffer. */
+};
+
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS

+quarantine_t *quarantine_init(size_t lg_maxobjs);
void quarantine(void *ptr);
+void quarantine_cleanup(void *arg);
bool quarantine_boot(void);

#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES

+#ifndef JEMALLOC_ENABLE_INLINE
+malloc_tsd_protos(JEMALLOC_ATTR(unused), quarantine, quarantine_t *)
+
+void quarantine_alloc_hook(void);
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_QUARANTINE_C_))
+malloc_tsd_externs(quarantine, quarantine_t *)
+malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, quarantine, quarantine_t *, NULL,
+ quarantine_cleanup)
+
+JEMALLOC_ALWAYS_INLINE void
+quarantine_alloc_hook(void)
+{
+ quarantine_t *quarantine;
+
+ assert(config_fill && opt_quarantine);
+
+ quarantine = *quarantine_tsd_get();
+ if (quarantine == NULL)
+ quarantine_init(LG_MAXOBJS_INIT);
+}
+#endif
+
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

diff --git a/contrib/jemalloc/include/jemalloc/internal/tcache.h b/contrib/jemalloc/include/jemalloc/internal/tcache.h
index 71900c2..ba36204 100644
--- a/contrib/jemalloc/include/jemalloc/internal/tcache.h
+++ b/contrib/jemalloc/include/jemalloc/internal/tcache.h
@@ -320,8 +320,8 @@ tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
}
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
- VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
}
+ VALGRIND_MAKE_MEM_UNDEFINED(ret, size);

if (config_stats)
tbin->tstats.nrequests++;
@@ -371,8 +371,8 @@ tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
} else {
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
- VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
}
+ VALGRIND_MAKE_MEM_UNDEFINED(ret, size);

if (config_stats)
tbin->tstats.nrequests++;
diff --git a/contrib/jemalloc/include/jemalloc/jemalloc.h b/contrib/jemalloc/include/jemalloc/jemalloc.h
index 43c1a38..1c1e8a5 100644
--- a/contrib/jemalloc/include/jemalloc/jemalloc.h
+++ b/contrib/jemalloc/include/jemalloc/jemalloc.h
@@ -7,12 +7,12 @@ extern "C" {
#include
#include

-#define JEMALLOC_VERSION "3.3.0-0-g83789f45307379e096c4e8be81d9e9a51e3f5a4a"
+#define JEMALLOC_VERSION "3.4.0-0-g0ed518e5dab789ad2171bb38977a8927e2a26775"
#define JEMALLOC_VERSION_MAJOR 3
-#define JEMALLOC_VERSION_MINOR 3
+#define JEMALLOC_VERSION_MINOR 4
#define JEMALLOC_VERSION_BUGFIX 0
#define JEMALLOC_VERSION_NREV 0
-#define JEMALLOC_VERSION_GID "83789f45307379e096c4e8be81d9e9a51e3f5a4a"
+#define JEMALLOC_VERSION_GID "0ed518e5dab789ad2171bb38977a8927e2a26775"

#include "jemalloc_defs.h"
#include "jemalloc_FreeBSD.h"
diff --git a/contrib/jemalloc/src/arena.c b/contrib/jemalloc/src/arena.c
index 8d50f4d..05a787f 100644
--- a/contrib/jemalloc/src/arena.c
+++ b/contrib/jemalloc/src/arena.c
@@ -366,8 +366,6 @@ arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
LG_PAGE)), (npages << LG_PAGE));
memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0,
(npages << LG_PAGE));
- VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind <<
- LG_PAGE)), (npages << LG_PAGE));
}

static inline void
@@ -380,8 +378,6 @@ arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
LG_PAGE)), PAGE);
for (i = 0; i < PAGE / sizeof(size_t); i++)
assert(p[i] == 0);
- VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind <<
- LG_PAGE)), PAGE);
}

static void
@@ -513,6 +509,8 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
run_ind+need_pages-1);
}
}
+ VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind <<
+ LG_PAGE)), (need_pages << LG_PAGE));
}

static arena_chunk_t *
@@ -574,6 +572,11 @@ arena_chunk_alloc(arena_t *arena)
for (i = map_bias+1; i < chunk_npages-1; i++)
arena_mapbits_unzeroed_set(chunk, i, unzeroed);
} else if (config_debug) {
+ VALGRIND_MAKE_MEM_DEFINED(
+ (void *)arena_mapp_get(chunk, map_bias+1),
+ (void *)((uintptr_t)
+ arena_mapp_get(chunk, chunk_npages-1)
+ - (uintptr_t)arena_mapp_get(chunk, map_bias+1)));
for (i = map_bias+1; i < chunk_npages-1; i++) {
assert(arena_mapbits_unzeroed_get(chunk, i) ==
unzeroed);
@@ -1246,8 +1249,6 @@ arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
(uintptr_t)bin_info->bitmap_offset);

/* Initialize run internals. */
- VALGRIND_MAKE_MEM_UNDEFINED(run, bin_info->reg0_offset -
- bin_info->redzone_size);
run->bin = bin;
run->nextind = 0;
run->nfree = bin_info->nregs;
@@ -1337,8 +1338,8 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,

assert(tbin->ncached == 0);

- if (config_prof)
- arena_prof_accum(arena, prof_accumbytes);
+ if (config_prof && arena_prof_accum(arena, prof_accumbytes))
+ prof_idump();
bin = &arena->bins[binind];
malloc_mutex_lock(&bin->lock);
for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
@@ -1446,8 +1447,8 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero)
bin->stats.nrequests++;
}
malloc_mutex_unlock(&bin->lock);
- if (config_prof && isthreaded == false)
- arena_prof_accum(arena, size);
+ if (config_prof && isthreaded == false && arena_prof_accum(arena, size))
+ prof_idump();

if (zero == false) {
if (config_fill) {
@@ -1464,8 +1465,8 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero)
}
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
- VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
}
+ VALGRIND_MAKE_MEM_UNDEFINED(ret, size);

return (ret);
}
@@ -1474,6 +1475,7 @@ void *
arena_malloc_large(arena_t *arena, size_t size, bool zero)
{
void *ret;
+ UNUSED bool idump;

/* Large allocation. */
size = PAGE_CEILING(size);
@@ -1492,8 +1494,10 @@ arena_malloc_large(arena_t *arena, size_t size, bool zero)
arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
}
if (config_prof)
- arena_prof_accum_locked(arena, size);
+ idump = arena_prof_accum_locked(arena, size);
malloc_mutex_unlock(&arena->lock);
+ if (config_prof && idump)
+ prof_idump();

if (zero == false) {
if (config_fill) {
diff --git a/contrib/jemalloc/src/base.c b/contrib/jemalloc/src/base.c
index b1a5945..4e62e8f 100644
--- a/contrib/jemalloc/src/base.c
+++ b/contrib/jemalloc/src/base.c
@@ -63,6 +63,7 @@ base_alloc(size_t size)
ret = base_next_addr;
base_next_addr = (void *)((uintptr_t)base_next_addr + csize);
malloc_mutex_unlock(&base_mtx);
+ VALGRIND_MAKE_MEM_UNDEFINED(ret, csize);

return (ret);
}
@@ -88,6 +89,7 @@ base_node_alloc(void)
ret = base_nodes;
base_nodes = *(extent_node_t **)ret;
malloc_mutex_unlock(&base_mtx);
+ VALGRIND_MAKE_MEM_UNDEFINED(ret, sizeof(extent_node_t));
} else {
malloc_mutex_unlock(&base_mtx);
ret = (extent_node_t *)base_alloc(sizeof(extent_node_t));
@@ -100,6 +102,7 @@ void
base_node_dealloc(extent_node_t *node)
{

+ VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
malloc_mutex_lock(&base_mtx);
*(extent_node_t **)node = base_nodes;
base_nodes = node;
diff --git a/contrib/jemalloc/src/chunk.c b/contrib/jemalloc/src/chunk.c
index 46e387e..aef3fed 100644
--- a/contrib/jemalloc/src/chunk.c
+++ b/contrib/jemalloc/src/chunk.c
@@ -111,6 +111,7 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
}
node->addr = (void *)((uintptr_t)(ret) + size);
node->size = trailsize;
+ node->zeroed = zeroed;
extent_tree_szad_insert(chunks_szad, node);
extent_tree_ad_insert(chunks_ad, node);
node = NULL;
@@ -119,7 +120,6 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,

if (node != NULL)
base_node_dealloc(node);
- VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
if (*zero) {
if (zeroed == false)
memset(ret, 0, size);
@@ -130,7 +130,6 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
VALGRIND_MAKE_MEM_DEFINED(ret, size);
for (i = 0; i < size / sizeof(size_t); i++)
assert(p[i] == 0);
- VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
}
}
return (ret);
@@ -179,27 +178,32 @@ chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
/* All strategies for allocation failed. */
ret = NULL;
label_return:
- if (config_ivsalloc && base == false && ret != NULL) {
- if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) {
- chunk_dealloc(ret, size, true);
- return (NULL);
+ if (ret != NULL) {
+ if (config_ivsalloc && base == false) {
+ if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) {
+ chunk_dealloc(ret, size, true);
+ return (NULL);
+ }
}
- }
- if ((config_stats || config_prof) && ret != NULL) {
- bool gdump;
- malloc_mutex_lock(&chunks_mtx);
- if (config_stats)
- stats_chunks.nchunks += (size / chunksize);
- stats_chunks.curchunks += (size / chunksize);
- if (stats_chunks.curchunks > stats_chunks.highchunks) {
- stats_chunks.highchunks = stats_chunks.curchunks;
- if (config_prof)
- gdump = true;
- } else if (config_prof)
- gdump = false;
- malloc_mutex_unlock(&chunks_mtx);
- if (config_prof && opt_prof && opt_prof_gdump && gdump)
- prof_gdump();
+ if (config_stats || config_prof) {
+ bool gdump;
+ malloc_mutex_lock(&chunks_mtx);
+ if (config_stats)
+ stats_chunks.nchunks += (size / chunksize);
+ stats_chunks.curchunks += (size / chunksize);
+ if (stats_chunks.curchunks > stats_chunks.highchunks) {
+ stats_chunks.highchunks =
+ stats_chunks.curchunks;
+ if (config_prof)
+ gdump = true;
+ } else if (config_prof)
+ gdump = false;
+ malloc_mutex_unlock(&chunks_mtx);
+ if (config_prof && opt_prof && opt_prof_gdump && gdump)
+ prof_gdump();
+ }
+ if (config_valgrind)
+ VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
}
assert(CHUNK_ADDR2BASE(ret) == ret);
return (ret);
@@ -210,9 +214,10 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
size_t size)
{
bool unzeroed;
- extent_node_t *xnode, *node, *prev, key;
+ extent_node_t *xnode, *node, *prev, *xprev, key;

unzeroed = pages_purge(chunk, size);
+ VALGRIND_MAKE_MEM_NOACCESS(chunk, size);

/*
* Allocate a node before acquiring chunks_mtx even though it might not
@@ -221,6 +226,8 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
* held.
*/
xnode = base_node_alloc();
+ /* Use xprev to implement conditional deferred deallocation of prev. */
+ xprev = NULL;

malloc_mutex_lock(&chunks_mtx);
key.addr = (void *)((uintptr_t)chunk + size);
@@ -237,8 +244,6 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
node->size += size;
node->zeroed = (node->zeroed && (unzeroed == false));
extent_tree_szad_insert(chunks_szad, node);
- if (xnode != NULL)
- base_node_dealloc(xnode);
} else {
/* Coalescing forward failed, so insert a new node. */
if (xnode == NULL) {
@@ -248,10 +253,10 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
* already been purged, so this is only a virtual
* memory leak.
*/
- malloc_mutex_unlock(&chunks_mtx);
- return;
+ goto label_return;
}
node = xnode;
+ xnode = NULL; /* Prevent deallocation below. */
node->addr = chunk;
node->size = size;
node->zeroed = (unzeroed == false);
@@ -277,9 +282,19 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
node->zeroed = (node->zeroed && prev->zeroed);
extent_tree_szad_insert(chunks_szad, node);

- base_node_dealloc(prev);
+ xprev = prev;
}
+
+label_return:
malloc_mutex_unlock(&chunks_mtx);
+ /*
+ * Deallocate xnode and/or xprev after unlocking chunks_mtx in order to
+ * avoid potential deadlock.
+ */
+ if (xnode != NULL)
+ base_node_dealloc(xnode);
+ if (xprev != NULL)
+ base_node_dealloc(prev);
}

void
diff --git a/contrib/jemalloc/src/chunk_dss.c b/contrib/jemalloc/src/chunk_dss.c
index d1aea93..24781cc 100644
--- a/contrib/jemalloc/src/chunk_dss.c
+++ b/contrib/jemalloc/src/chunk_dss.c
@@ -127,7 +127,6 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
if (*zero) {
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
- VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
}
return (ret);
}
diff --git a/contrib/jemalloc/src/jemalloc.c b/contrib/jemalloc/src/jemalloc.c
index 665d98f..352c98e 100644
--- a/contrib/jemalloc/src/jemalloc.c
+++ b/contrib/jemalloc/src/jemalloc.c
@@ -286,12 +286,30 @@ arenas_cleanup(void *arg)
malloc_mutex_unlock(&arenas_lock);
}

+static JEMALLOC_ATTR(always_inline) void
+malloc_thread_init(void)
+{
+
+ /*
+ * TSD initialization can't be safely done as a side effect of
+ * deallocation, because it is possible for a thread to do nothing but
+ * deallocate its TLS data via free(), in which case writing to TLS
+ * would cause write-after-free memory corruption. The quarantine
+ * facility *only* gets used as a side effect of deallocation, so make
+ * a best effort attempt at initializing its TSD by hooking all
+ * allocation events.
+ */
+ if (config_fill && opt_quarantine)
+ quarantine_alloc_hook();
+}
+
static JEMALLOC_ATTR(always_inline) bool
malloc_init(void)
{

- if (malloc_initialized == false)
- return (malloc_init_hard());
+ if (malloc_initialized == false && malloc_init_hard())
+ return (true);
+ malloc_thread_init();

return (false);
}
@@ -1100,6 +1118,7 @@ je_realloc(void *ptr, size_t size)
if (size == 0) {
if (ptr != NULL) {
/* realloc(ptr, 0) is equivalent to free(p). */
+ assert(malloc_initialized || IS_INITIALIZER);
if (config_prof) {
old_size = isalloc(ptr, true);
if (config_valgrind && opt_valgrind)
@@ -1125,6 +1144,7 @@ je_realloc(void *ptr, size_t size)

if (ptr != NULL) {
assert(malloc_initialized || IS_INITIALIZER);
+ malloc_thread_init();

if (config_prof) {
old_size = isalloc(ptr, true);
@@ -1328,6 +1348,7 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
size_t ret;

assert(malloc_initialized || IS_INITIALIZER);
+ malloc_thread_init();

if (config_ivsalloc)
ret = ivsalloc(ptr, config_prof);
@@ -1502,6 +1523,7 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
assert(size != 0);
assert(SIZE_T_MAX - size >= extra);
assert(malloc_initialized || IS_INITIALIZER);
+ malloc_thread_init();

if (arena_ind != UINT_MAX) {
arena_chunk_t *chunk;
@@ -1616,6 +1638,7 @@ je_sallocm(const void *ptr, size_t *rsize, int flags)
size_t sz;

assert(malloc_initialized || IS_INITIALIZER);
+ malloc_thread_init();

if (config_ivsalloc)
sz = ivsalloc(ptr, config_prof);
@@ -1735,12 +1758,12 @@ _malloc_prefork(void)

/* Acquire all mutexes in a safe order. */
ctl_prefork();
+ prof_prefork();
malloc_mutex_prefork(&arenas_lock);
for (i = 0; i < narenas_total; i++) {
if (arenas[i] != NULL)
arena_prefork(arenas[i]);
}
- prof_prefork();
chunk_prefork();
base_prefork();
huge_prefork();
@@ -1766,12 +1789,12 @@ _malloc_postfork(void)
huge_postfork_parent();
base_postfork_parent();
chunk_postfork_parent();
- prof_postfork_parent();
for (i = 0; i < narenas_total; i++) {
if (arenas[i] != NULL)
arena_postfork_parent(arenas[i]);
}
malloc_mutex_postfork_parent(&arenas_lock);
+ prof_postfork_parent();
ctl_postfork_parent();
}

@@ -1786,12 +1809,12 @@ jemalloc_postfork_child(void)
huge_postfork_child();
base_postfork_child();
chunk_postfork_child();
- prof_postfork_child();
for (i = 0; i < narenas_total; i++) {
if (arenas[i] != NULL)
arena_postfork_child(arenas[i]);
}
malloc_mutex_postfork_child(&arenas_lock);
+ prof_postfork_child();
ctl_postfork_child();
}

diff --git a/contrib/jemalloc/src/prof.c b/contrib/jemalloc/src/prof.c
index b9f03a0..c133b95 100644
--- a/contrib/jemalloc/src/prof.c
+++ b/contrib/jemalloc/src/prof.c
@@ -438,7 +438,7 @@ prof_lookup(prof_bt_t *bt)

cassert(config_prof);

- prof_tdata = prof_tdata_get();
+ prof_tdata = prof_tdata_get(false);
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
return (NULL);

@@ -684,7 +684,7 @@ prof_ctx_destroy(prof_ctx_t *ctx)
* avoid a race between the main body of prof_ctx_merge() and entry
* into this function.
*/
- prof_tdata = *prof_tdata_tsd_get();
+ prof_tdata = prof_tdata_get(false);
assert((uintptr_t)prof_tdata > (uintptr_t)PROF_TDATA_STATE_MAX);
prof_enter(prof_tdata);
malloc_mutex_lock(ctx->lock);
@@ -844,7 +844,7 @@ prof_dump(bool propagate_err, const char *filename, bool leakcheck)

cassert(config_prof);

- prof_tdata = prof_tdata_get();
+ prof_tdata = prof_tdata_get(false);
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
return (true);
prof_enter(prof_tdata);
@@ -966,11 +966,7 @@ prof_idump(void)

if (prof_booted == false)
return;
- /*
- * Don't call prof_tdata_get() here, because it could cause recursive
- * allocation.
- */
- prof_tdata = *prof_tdata_tsd_get();
+ prof_tdata = prof_tdata_get(false);
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
return;
if (prof_tdata->enq) {
@@ -1020,11 +1016,7 @@ prof_gdump(void)

if (prof_booted == false)
return;
- /*
- * Don't call prof_tdata_get() here, because it could cause recursive
- * allocation.
- */
- prof_tdata = *prof_tdata_tsd_get();
+ prof_tdata = prof_tdata_get(false);
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
return;
if (prof_tdata->enq) {
diff --git a/contrib/jemalloc/src/quarantine.c b/contrib/jemalloc/src/quarantine.c
index 9005ab3..f96a948 100644
--- a/contrib/jemalloc/src/quarantine.c
+++ b/contrib/jemalloc/src/quarantine.c
@@ -1,3 +1,4 @@
+#define JEMALLOC_QUARANTINE_C_
#include "jemalloc/internal/jemalloc_internal.h"

/*
@@ -11,39 +12,18 @@
/******************************************************************************/
/* Data. */

-typedef struct quarantine_obj_s quarantine_obj_t;
-typedef struct quarantine_s quarantine_t;
-
-struct quarantine_obj_s {
- void *ptr;
- size_t usize;
-};
-
-struct quarantine_s {
- size_t curbytes;
- size_t curobjs;
- size_t first;
-#define LG_MAXOBJS_INIT 10
- size_t lg_maxobjs;
- quarantine_obj_t objs[1]; /* Dynamically sized ring buffer. */
-};
-
-static void quarantine_cleanup(void *arg);
-
-malloc_tsd_data(static, quarantine, quarantine_t *, NULL)
-malloc_tsd_funcs(JEMALLOC_INLINE, quarantine, quarantine_t *, NULL,
- quarantine_cleanup)
+malloc_tsd_data(, quarantine, quarantine_t *, NULL)

/******************************************************************************/
/* Function prototypes for non-inline static functions. */

-static quarantine_t *quarantine_init(size_t lg_maxobjs);
static quarantine_t *quarantine_grow(quarantine_t *quarantine);
+static void quarantine_drain_one(quarantine_t *quarantine);
static void quarantine_drain(quarantine_t *quarantine, size_t upper_bound);

/******************************************************************************/

-static quarantine_t *
+quarantine_t *
quarantine_init(size_t lg_maxobjs)
{
quarantine_t *quarantine;
@@ -68,8 +48,10 @@ quarantine_grow(quarantine_t *quarantine)
quarantine_t *ret;

ret = quarantine_init(quarantine->lg_maxobjs + 1);
- if (ret == NULL)
+ if (ret == NULL) {
+ quarantine_drain_one(quarantine);
return (quarantine);
+ }

ret->curbytes = quarantine->curbytes;
ret->curobjs = quarantine->curobjs;
@@ -89,23 +71,29 @@ quarantine_grow(quarantine_t *quarantine)
memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b *
sizeof(quarantine_obj_t));
}
+ idalloc(quarantine);

return (ret);
}

static void
+quarantine_drain_one(quarantine_t *quarantine)
+{
+ quarantine_obj_t *obj = &quarantine->objs[quarantine->first];
+ assert(obj->usize == isalloc(obj->ptr, config_prof));
+ idalloc(obj->ptr);
+ quarantine->curbytes -= obj->usize;
+ quarantine->curobjs--;
+ quarantine->first = (quarantine->first + 1) & ((ZU(1) <<
+ quarantine->lg_maxobjs) - 1);
+}
+
+static void
quarantine_drain(quarantine_t *quarantine, size_t upper_bound)
{

- while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0) {
- quarantine_obj_t *obj = &quarantine->objs[quarantine->first];
- assert(obj->usize == isalloc(obj->ptr, config_prof));
- idalloc(obj->ptr);
- quarantine->curbytes -= obj->usize;
- quarantine->curobjs--;
- quarantine->first = (quarantine->first + 1) & ((ZU(1) <<
- quarantine->lg_maxobjs) - 1);
- }
+ while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0)
+ quarantine_drain_one(quarantine);
}

void
@@ -119,24 +107,16 @@ quarantine(void *ptr)

quarantine = *quarantine_tsd_get();
if ((uintptr_t)quarantine <= (uintptr_t)QUARANTINE_STATE_MAX) {
- if (quarantine == NULL) {
- if ((quarantine = quarantine_init(LG_MAXOBJS_INIT)) ==
- NULL) {
- idalloc(ptr);
- return;
- }
- } else {
- if (quarantine == QUARANTINE_STATE_PURGATORY) {
- /*
- * Make a note that quarantine() was called
- * after quarantine_cleanup() was called.
- */
- quarantine = QUARANTINE_STATE_REINCARNATED;
- quarantine_tsd_set(&quarantine);
- }
- idalloc(ptr);
- return;
+ if (quarantine == QUARANTINE_STATE_PURGATORY) {
+ /*
+ * Make a note that quarantine() was called after
+ * quarantine_cleanup() was called.
+ */
+ quarantine = QUARANTINE_STATE_REINCARNATED;
+ quarantine_tsd_set(&quarantine);
}
+ idalloc(ptr);
+ return;
}
/*
* Drain one or more objects if the quarantine size limit would be
@@ -169,7 +149,7 @@ quarantine(void *ptr)
}
}

-static void
+void
quarantine_cleanup(void *arg)
{
quarantine_t *quarantine = *(quarantine_t **)arg;
diff --git a/contrib/jemalloc/src/tcache.c b/contrib/jemalloc/src/tcache.c
index 7befdc8..98ed19e 100644
--- a/contrib/jemalloc/src/tcache.c
+++ b/contrib/jemalloc/src/tcache.c
@@ -97,7 +97,8 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
arena_bin_t *bin = &arena->bins[binind];

if (config_prof && arena == tcache->arena) {
- arena_prof_accum(arena, tcache->prof_accumbytes);
+ if (arena_prof_accum(arena, tcache->prof_accumbytes))
+ prof_idump();
tcache->prof_accumbytes = 0;
}

@@ -174,11 +175,14 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
tbin->avail[0]);
arena_t *arena = chunk->arena;
+ UNUSED bool idump;

+ if (config_prof)
+ idump = false;
malloc_mutex_lock(&arena->lock);
if ((config_prof || config_stats) && arena == tcache->arena) {
if (config_prof) {
- arena_prof_accum_locked(arena,
+ idump = arena_prof_accum_locked(arena,
tcache->prof_accumbytes);
tcache->prof_accumbytes = 0;
}
@@ -210,6 +214,8 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
}
}
malloc_mutex_unlock(&arena->lock);
+ if (config_prof && idump)
+ prof_idump();
}
if (config_stats && merged_stats == false) {
/*
@@ -341,8 +347,9 @@ tcache_destroy(tcache_t *tcache)
}
}

- if (config_prof && tcache->prof_accumbytes > 0)
- arena_prof_accum(tcache->arena, tcache->prof_accumbytes);
+ if (config_prof && tcache->prof_accumbytes > 0 &&
+ arena_prof_accum(tcache->arena, tcache->prof_accumbytes))
+ prof_idump();

tcache_size = arena_salloc(tcache, false);
if (tcache_size <= SMALL_MAXCLASS) {

e1000ee Update jemalloc to version 3.4.0.
contrib/jemalloc/ChangeLog | 41 ++++++++++
contrib/jemalloc/FREEBSD-diffs | 57 +++++++++++--
contrib/jemalloc/VERSION | 2 +-
contrib/jemalloc/doc/jemalloc.3 | 22 +++--
contrib/jemalloc/include/jemalloc/internal/arena.h | 33 +++++---
.../include/jemalloc/internal/jemalloc_internal.h | 24 +++---
.../include/jemalloc/internal/private_namespace.h | 6 ++
contrib/jemalloc/include/jemalloc/internal/prof.h | 14 ++--
.../include/jemalloc/internal/quarantine.h | 43 ++++++++++
.../jemalloc/include/jemalloc/internal/tcache.h | 4 +-
contrib/jemalloc/include/jemalloc/jemalloc.h | 6 +-
contrib/jemalloc/src/arena.c | 28 ++++---
contrib/jemalloc/src/base.c | 3 +
contrib/jemalloc/src/chunk.c | 71 ++++++++++-------
contrib/jemalloc/src/chunk_dss.c | 1 -
contrib/jemalloc/src/jemalloc.c | 33 ++++++--
contrib/jemalloc/src/prof.c | 18 ++---
contrib/jemalloc/src/quarantine.c | 84 ++++++++------------
contrib/jemalloc/src/tcache.c | 15 +++-
19 files changed, 341 insertions(+), 164 deletions(-)

Upstream: github.com


  • Share