diff --git a/src/rt/jemalloc/ChangeLog b/src/rt/jemalloc/ChangeLog
index fc096d8f42fde..8ab884875b0d6 100644
--- a/src/rt/jemalloc/ChangeLog
+++ b/src/rt/jemalloc/ChangeLog
@@ -6,6 +6,19 @@ found in the git revision history:
http://www.canonware.com/cgi-bin/gitweb.cgi?p=jemalloc.git
git://canonware.com/jemalloc.git
+* 3.4.0 (June 2, 2013)
+
+ This version is essentially a small bugfix release, but the addition of
+ aarch64 support requires that the minor version be incremented.
+
+ Bug fixes:
+ - Fix race-triggered deadlocks in chunk_record(). These deadlocks were
+ typically triggered by multiple threads concurrently deallocating huge
+ objects.
+
+ New features:
+ - Add support for the aarch64 architecture.
+
* 3.3.1 (March 6, 2013)
This version fixes bugs that are typically encountered only when utilizing
@@ -15,7 +28,7 @@ found in the git revision history:
- Fix a locking order bug that could cause deadlock during fork if heap
profiling were enabled.
- Fix a chunk recycling bug that could cause the allocator to lose track of
- whether a chunk was zeroed. On FreeBSD, NetBSD, and OS X, it could cause
+ whether a chunk was zeroed. On FreeBSD, NetBSD, and OS X, it could cause
corruption if allocating via sbrk(2) (unlikely unless running with the
"dss:primary" option specified). This was completely harmless on Linux
unless using mlockall(2) (and unlikely even then, unless the
diff --git a/src/rt/jemalloc/VERSION b/src/rt/jemalloc/VERSION
index 900c82d1043de..84c9c557ac9c7 100644
--- a/src/rt/jemalloc/VERSION
+++ b/src/rt/jemalloc/VERSION
@@ -1 +1 @@
-3.3.1-0-g9ef9d9e8c271cdf14f664b871a8f98c827714784
+3.4.0-0-g0ed518e5dab789ad2171bb38977a8927e2a26775
diff --git a/src/rt/jemalloc/bin/pprof b/src/rt/jemalloc/bin/pprof
index 46f4f3f70a005..727eb43704f8c 100755
--- a/src/rt/jemalloc/bin/pprof
+++ b/src/rt/jemalloc/bin/pprof
@@ -2,11 +2,11 @@
# Copyright (c) 1998-2007, Google Inc.
# All rights reserved.
-#
+#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
-#
+#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
@@ -16,7 +16,7 @@
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
-#
+#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -1683,23 +1683,23 @@ sub PrintSource {
HtmlPrintNumber($c2),
UnparseAddress($offset, $e->[0]),
CleanDisassembly($e->[3]));
-
+
# Append the most specific source line associated with this instruction
if (length($dis) < 80) { $dis .= (' ' x (80 - length($dis))) };
$dis = HtmlEscape($dis);
my $f = $e->[5];
my $l = $e->[6];
if ($f ne $last_dis_filename) {
- $dis .= sprintf("%s:%d",
+ $dis .= sprintf("%s:%d",
HtmlEscape(CleanFileName($f)), $l);
} elsif ($l ne $last_dis_linenum) {
# De-emphasize the unchanged file name portion
$dis .= sprintf("%s" .
- ":%d",
+ ":%d",
HtmlEscape(CleanFileName($f)), $l);
} else {
# De-emphasize the entire location
- $dis .= sprintf("%s:%d",
+ $dis .= sprintf("%s:%d",
HtmlEscape(CleanFileName($f)), $l);
}
$last_dis_filename = $f;
@@ -1788,8 +1788,8 @@ sub PrintSource {
if (defined($dis) && $dis ne '') {
$asm = "" . $dis . "";
}
- my $source_class = (($n1 + $n2 > 0)
- ? "livesrc"
+ my $source_class = (($n1 + $n2 > 0)
+ ? "livesrc"
: (($asm ne "") ? "deadsrc" : "nop"));
printf $output (
"%5d " .
@@ -4723,7 +4723,7 @@ sub MapToSymbols {
}
}
}
-
+
# Prepend to accumulated symbols for pcstr
# (so that caller comes before callee)
my $sym = $symbols->{$pcstr};
@@ -4917,7 +4917,7 @@ sub ConfigureTool {
my $dirname = $`; # this is everything up to and including the last slash
if (-x "$dirname$tool") {
$path = "$dirname$tool";
- } else {
+ } else {
$path = $tool;
}
}
diff --git a/src/rt/jemalloc/configure.ac b/src/rt/jemalloc/configure.ac
index 1189942c90fdf..882d3b3f3b02a 100644
--- a/src/rt/jemalloc/configure.ac
+++ b/src/rt/jemalloc/configure.ac
@@ -26,7 +26,7 @@ AC_COMPILE_IFELSE([AC_LANG_PROGRAM(
])
dnl JE_COMPILABLE(label, hcode, mcode, rvar)
-dnl
+dnl
dnl Use AC_LINK_IFELSE() rather than AC_COMPILE_IFELSE() so that linker errors
dnl cause failure.
AC_DEFUN([JE_COMPILABLE],
@@ -232,7 +232,7 @@ CC_MM=1
dnl Platform-specific settings. abi and RPATH can probably be determined
dnl programmatically, but doing so is error-prone, which makes it generally
dnl not worth the trouble.
-dnl
+dnl
dnl Define cpp macros in CPPFLAGS, rather than doing AC_DEFINE(macro), since the
dnl definitions need to be seen before any headers are included, which is a pain
dnl to make happen otherwise.
@@ -965,7 +965,7 @@ fi
dnl ============================================================================
dnl jemalloc configuration.
-dnl
+dnl
dnl Set VERSION if source directory has an embedded git repository.
if test -d "${srcroot}.git" ; then
diff --git a/src/rt/jemalloc/doc/jemalloc.3 b/src/rt/jemalloc/doc/jemalloc.3
index 1462e2c2b34b9..d0e0a23bab328 100644
--- a/src/rt/jemalloc/doc/jemalloc.3
+++ b/src/rt/jemalloc/doc/jemalloc.3
@@ -2,12 +2,12 @@
.\" Title: JEMALLOC
.\" Author: Jason Evans
.\" Generator: DocBook XSL Stylesheets v1.76.1
-.\" Date: 03/06/2013
+.\" Date: 06/02/2013
.\" Manual: User Manual
-.\" Source: jemalloc 3.3.1-0-g9ef9d9e8c271cdf14f664b871a8f98c827714784
+.\" Source: jemalloc 3.4.0-0-g0ed518e5dab789ad2171bb38977a8927e2a26775
.\" Language: English
.\"
-.TH "JEMALLOC" "3" "03/06/2013" "jemalloc 3.3.1-0-g9ef9d9e8c271" "User Manual"
+.TH "JEMALLOC" "3" "06/02/2013" "jemalloc 3.4.0-0-g0ed518e5dab7" "User Manual"
.\" -----------------------------------------------------------------
.\" * Define some portability stuff
.\" -----------------------------------------------------------------
@@ -31,7 +31,7 @@
jemalloc \- general purpose memory allocation functions
.SH "LIBRARY"
.PP
-This manual describes jemalloc 3\&.3\&.1\-0\-g9ef9d9e8c271cdf14f664b871a8f98c827714784\&. More information can be found at the
+This manual describes jemalloc 3\&.4\&.0\-0\-g0ed518e5dab789ad2171bb38977a8927e2a26775\&. More information can be found at the
\m[blue]\fBjemalloc website\fR\m[]\&\s-2\u[1]\d\s+2\&.
.SH "SYNOPSIS"
.sp
@@ -376,7 +376,19 @@ Once, when the first call is made to one of the memory allocation routines, the
The string pointed to by the global variable
\fImalloc_conf\fR, the \(lqname\(rq of the file referenced by the symbolic link named
/etc/malloc\&.conf, and the value of the environment variable
-\fBMALLOC_CONF\fR, will be interpreted, in that order, from left to right as options\&.
+\fBMALLOC_CONF\fR, will be interpreted, in that order, from left to right as options\&. Note that
+\fImalloc_conf\fR
+may be read before
+\fBmain\fR\fB\fR
+is entered, so the declaration of
+\fImalloc_conf\fR
+should specify an initializer that contains the final value to be read by jemalloc\&.
+\fImalloc_conf\fR
+is a compile\-time setting, whereas
+/etc/malloc\&.conf
+and
+\fBMALLOC_CONF\fR
+can be safely set any time prior to program invocation\&.
.PP
An options string is a comma\-separated list of option:value pairs\&. There is one key corresponding to each
"opt\&.*"
diff --git a/src/rt/jemalloc/doc/jemalloc.xml.in b/src/rt/jemalloc/doc/jemalloc.xml.in
index 09305801babfc..abd5e6fcd0f29 100644
--- a/src/rt/jemalloc/doc/jemalloc.xml.in
+++ b/src/rt/jemalloc/doc/jemalloc.xml.in
@@ -432,7 +432,14 @@ for (i = 0; i < nbins; i++) {
referenced by the symbolic link named /etc/malloc.conf, and the value of the
environment variable MALLOC_CONF, will be interpreted, in
- that order, from left to right as options.
+ that order, from left to right as options. Note that
+ malloc_conf may be read before
+ main is entered, so the declaration of
+ malloc_conf should specify an initializer that contains
+ the final value to be read by jemalloc. malloc_conf is
+ a compile-time setting, whereas /etc/malloc.conf and MALLOC_CONF
+ can be safely set any time prior to program invocation.
An options string is a comma-separated list of option:value pairs.
There is one key corresponding to each = map_bias);
- assert(pageind < chunk_npages);
+ assert(pageind >= map_bias);
+ assert(pageind < chunk_npages);
- return (&chunk->map[pageind-map_bias]);
+ return (&chunk->map[pageind-map_bias]);
}
JEMALLOC_ALWAYS_INLINE size_t *
arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind)
{
- return (&arena_mapp_get(chunk, pageind)->bits);
+ return (&arena_mapp_get(chunk, pageind)->bits);
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_get(arena_chunk_t *chunk, size_t pageind)
{
- return (*arena_mapbitsp_get(chunk, pageind));
+ return (*arena_mapbitsp_get(chunk, pageind));
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind)
{
- size_t mapbits;
+ size_t mapbits;
- mapbits = arena_mapbits_get(chunk, pageind);
- assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
- return (mapbits & ~PAGE_MASK);
+ mapbits = arena_mapbits_get(chunk, pageind);
+ assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
+ return (mapbits & ~PAGE_MASK);
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind)
{
- size_t mapbits;
+ size_t mapbits;
- mapbits = arena_mapbits_get(chunk, pageind);
- assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
- (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED));
- return (mapbits & ~PAGE_MASK);
+ mapbits = arena_mapbits_get(chunk, pageind);
+ assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
+ (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED));
+ return (mapbits & ~PAGE_MASK);
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind)
{
- size_t mapbits;
+ size_t mapbits;
- mapbits = arena_mapbits_get(chunk, pageind);
- assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
- CHUNK_MAP_ALLOCATED);
- return (mapbits >> LG_PAGE);
+ mapbits = arena_mapbits_get(chunk, pageind);
+ assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
+ CHUNK_MAP_ALLOCATED);
+ return (mapbits >> LG_PAGE);
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind)
{
- size_t mapbits;
- size_t binind;
+ size_t mapbits;
+ size_t binind;
- mapbits = arena_mapbits_get(chunk, pageind);
- binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
- assert(binind < NBINS || binind == BININD_INVALID);
- return (binind);
+ mapbits = arena_mapbits_get(chunk, pageind);
+ binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
+ assert(binind < NBINS || binind == BININD_INVALID);
+ return (binind);
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind)
{
- size_t mapbits;
+ size_t mapbits;
- mapbits = arena_mapbits_get(chunk, pageind);
- return (mapbits & CHUNK_MAP_DIRTY);
+ mapbits = arena_mapbits_get(chunk, pageind);
+ return (mapbits & CHUNK_MAP_DIRTY);
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind)
{
- size_t mapbits;
+ size_t mapbits;
- mapbits = arena_mapbits_get(chunk, pageind);
- return (mapbits & CHUNK_MAP_UNZEROED);
+ mapbits = arena_mapbits_get(chunk, pageind);
+ return (mapbits & CHUNK_MAP_UNZEROED);
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind)
{
- size_t mapbits;
+ size_t mapbits;
- mapbits = arena_mapbits_get(chunk, pageind);
- return (mapbits & CHUNK_MAP_LARGE);
+ mapbits = arena_mapbits_get(chunk, pageind);
+ return (mapbits & CHUNK_MAP_LARGE);
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind)
{
- size_t mapbits;
+ size_t mapbits;
- mapbits = arena_mapbits_get(chunk, pageind);
- return (mapbits & CHUNK_MAP_ALLOCATED);
+ mapbits = arena_mapbits_get(chunk, pageind);
+ return (mapbits & CHUNK_MAP_ALLOCATED);
}
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size,
size_t flags)
{
- size_t *mapbitsp;
+ size_t *mapbitsp;
- mapbitsp = arena_mapbitsp_get(chunk, pageind);
- assert((size & PAGE_MASK) == 0);
- assert((flags & ~CHUNK_MAP_FLAGS_MASK) == 0);
- assert((flags & (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == flags);
- *mapbitsp = size | CHUNK_MAP_BININD_INVALID | flags;
+ mapbitsp = arena_mapbitsp_get(chunk, pageind);
+ assert((size & PAGE_MASK) == 0);
+ assert((flags & ~CHUNK_MAP_FLAGS_MASK) == 0);
+ assert((flags & (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == flags);
+ *mapbitsp = size | CHUNK_MAP_BININD_INVALID | flags;
}
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
size_t size)
{
- size_t *mapbitsp;
+ size_t *mapbitsp;
- mapbitsp = arena_mapbitsp_get(chunk, pageind);
- assert((size & PAGE_MASK) == 0);
- assert((*mapbitsp & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
- *mapbitsp = size | (*mapbitsp & PAGE_MASK);
+ mapbitsp = arena_mapbitsp_get(chunk, pageind);
+ assert((size & PAGE_MASK) == 0);
+ assert((*mapbitsp & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
+ *mapbitsp = size | (*mapbitsp & PAGE_MASK);
}
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size,
size_t flags)
{
- size_t *mapbitsp;
- size_t unzeroed;
-
- mapbitsp = arena_mapbitsp_get(chunk, pageind);
- assert((size & PAGE_MASK) == 0);
- assert((flags & CHUNK_MAP_DIRTY) == flags);
- unzeroed = *mapbitsp & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */
- *mapbitsp = size | CHUNK_MAP_BININD_INVALID | flags | unzeroed |
- CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
+ size_t *mapbitsp;
+ size_t unzeroed;
+
+ mapbitsp = arena_mapbitsp_get(chunk, pageind);
+ assert((size & PAGE_MASK) == 0);
+ assert((flags & CHUNK_MAP_DIRTY) == flags);
+ unzeroed = *mapbitsp & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */
+ *mapbitsp = size | CHUNK_MAP_BININD_INVALID | flags | unzeroed |
+ CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
}
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
size_t binind)
{
- size_t *mapbitsp;
+ size_t *mapbitsp;
- assert(binind <= BININD_INVALID);
- mapbitsp = arena_mapbitsp_get(chunk, pageind);
- assert(arena_mapbits_large_size_get(chunk, pageind) == PAGE);
- *mapbitsp = (*mapbitsp & ~CHUNK_MAP_BININD_MASK) | (binind <<
- CHUNK_MAP_BININD_SHIFT);
+ assert(binind <= BININD_INVALID);
+ mapbitsp = arena_mapbitsp_get(chunk, pageind);
+ assert(arena_mapbits_large_size_get(chunk, pageind) == PAGE);
+ *mapbitsp = (*mapbitsp & ~CHUNK_MAP_BININD_MASK) | (binind <<
+ CHUNK_MAP_BININD_SHIFT);
}
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind,
size_t binind, size_t flags)
{
- size_t *mapbitsp;
- size_t unzeroed;
-
- assert(binind < BININD_INVALID);
- mapbitsp = arena_mapbitsp_get(chunk, pageind);
- assert(pageind - runind >= map_bias);
- assert((flags & CHUNK_MAP_DIRTY) == flags);
- unzeroed = *mapbitsp & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */
- *mapbitsp = (runind << LG_PAGE) | (binind << CHUNK_MAP_BININD_SHIFT) |
- flags | unzeroed | CHUNK_MAP_ALLOCATED;
+ size_t *mapbitsp;
+ size_t unzeroed;
+
+ assert(binind < BININD_INVALID);
+ mapbitsp = arena_mapbitsp_get(chunk, pageind);
+ assert(pageind - runind >= map_bias);
+ assert((flags & CHUNK_MAP_DIRTY) == flags);
+ unzeroed = *mapbitsp & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */
+ *mapbitsp = (runind << LG_PAGE) | (binind << CHUNK_MAP_BININD_SHIFT) |
+ flags | unzeroed | CHUNK_MAP_ALLOCATED;
}
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind,
size_t unzeroed)
{
- size_t *mapbitsp;
+ size_t *mapbitsp;
- mapbitsp = arena_mapbitsp_get(chunk, pageind);
- *mapbitsp = (*mapbitsp & ~CHUNK_MAP_UNZEROED) | unzeroed;
+ mapbitsp = arena_mapbitsp_get(chunk, pageind);
+ *mapbitsp = (*mapbitsp & ~CHUNK_MAP_UNZEROED) | unzeroed;
}
JEMALLOC_INLINE bool
arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes)
{
- cassert(config_prof);
- assert(prof_interval != 0);
+ cassert(config_prof);
+ assert(prof_interval != 0);
- arena->prof_accumbytes += accumbytes;
- if (arena->prof_accumbytes >= prof_interval) {
- arena->prof_accumbytes -= prof_interval;
- return (true);
- }
- return (false);
+ arena->prof_accumbytes += accumbytes;
+ if (arena->prof_accumbytes >= prof_interval) {
+ arena->prof_accumbytes -= prof_interval;
+ return (true);
+ }
+ return (false);
}
JEMALLOC_INLINE bool
arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes)
{
- cassert(config_prof);
+ cassert(config_prof);
- if (prof_interval == 0)
- return (false);
- return (arena_prof_accum_impl(arena, accumbytes));
+ if (prof_interval == 0)
+ return (false);
+ return (arena_prof_accum_impl(arena, accumbytes));
}
JEMALLOC_INLINE bool
arena_prof_accum(arena_t *arena, uint64_t accumbytes)
{
- cassert(config_prof);
+ cassert(config_prof);
- if (prof_interval == 0)
- return (false);
+ if (prof_interval == 0)
+ return (false);
- {
- bool ret;
+ {
+ bool ret;
- malloc_mutex_lock(&arena->lock);
- ret = arena_prof_accum_impl(arena, accumbytes);
- malloc_mutex_unlock(&arena->lock);
- return (ret);
- }
+ malloc_mutex_lock(&arena->lock);
+ ret = arena_prof_accum_impl(arena, accumbytes);
+ malloc_mutex_unlock(&arena->lock);
+ return (ret);
+ }
}
JEMALLOC_ALWAYS_INLINE size_t
arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
{
- size_t binind;
-
- binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
-
- if (config_debug) {
- arena_chunk_t *chunk;
- arena_t *arena;
- size_t pageind;
- size_t actual_mapbits;
- arena_run_t *run;
- arena_bin_t *bin;
- size_t actual_binind;
- arena_bin_info_t *bin_info;
-
- assert(binind != BININD_INVALID);
- assert(binind < NBINS);
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- arena = chunk->arena;
- pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- actual_mapbits = arena_mapbits_get(chunk, pageind);
- assert(mapbits == actual_mapbits);
- assert(arena_mapbits_large_get(chunk, pageind) == 0);
- assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
- run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
- (actual_mapbits >> LG_PAGE)) << LG_PAGE));
- bin = run->bin;
- actual_binind = bin - arena->bins;
- assert(binind == actual_binind);
- bin_info = &arena_bin_info[actual_binind];
- assert(((uintptr_t)ptr - ((uintptr_t)run +
- (uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval
- == 0);
- }
-
- return (binind);
+ size_t binind;
+
+ binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
+
+ if (config_debug) {
+ arena_chunk_t *chunk;
+ arena_t *arena;
+ size_t pageind;
+ size_t actual_mapbits;
+ arena_run_t *run;
+ arena_bin_t *bin;
+ size_t actual_binind;
+ arena_bin_info_t *bin_info;
+
+ assert(binind != BININD_INVALID);
+ assert(binind < NBINS);
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ arena = chunk->arena;
+ pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+ actual_mapbits = arena_mapbits_get(chunk, pageind);
+ assert(mapbits == actual_mapbits);
+ assert(arena_mapbits_large_get(chunk, pageind) == 0);
+ assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
+ run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
+ (actual_mapbits >> LG_PAGE)) << LG_PAGE));
+ bin = run->bin;
+ actual_binind = bin - arena->bins;
+ assert(binind == actual_binind);
+ bin_info = &arena_bin_info[actual_binind];
+ assert(((uintptr_t)ptr - ((uintptr_t)run +
+ (uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval
+ == 0);
+ }
+
+ return (binind);
}
# endif /* JEMALLOC_ARENA_INLINE_A */
@@ -753,267 +753,267 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
JEMALLOC_INLINE size_t
arena_bin_index(arena_t *arena, arena_bin_t *bin)
{
- size_t binind = bin - arena->bins;
- assert(binind < NBINS);
- return (binind);
+ size_t binind = bin - arena->bins;
+ assert(binind < NBINS);
+ return (binind);
}
JEMALLOC_INLINE unsigned
arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
{
- unsigned shift, diff, regind;
- size_t interval;
-
- /*
- * Freeing a pointer lower than region zero can cause assertion
- * failure.
- */
- assert((uintptr_t)ptr >= (uintptr_t)run +
- (uintptr_t)bin_info->reg0_offset);
-
- /*
- * Avoid doing division with a variable divisor if possible. Using
- * actual division here can reduce allocator throughput by over 20%!
- */
- diff = (unsigned)((uintptr_t)ptr - (uintptr_t)run -
- bin_info->reg0_offset);
-
- /* Rescale (factor powers of 2 out of the numerator and denominator). */
- interval = bin_info->reg_interval;
- shift = ffs(interval) - 1;
- diff >>= shift;
- interval >>= shift;
-
- if (interval == 1) {
- /* The divisor was a power of 2. */
- regind = diff;
- } else {
- /*
- * To divide by a number D that is not a power of two we
- * multiply by (2^21 / D) and then right shift by 21 positions.
- *
- * X / D
- *
- * becomes
- *
- * (X * interval_invs[D - 3]) >> SIZE_INV_SHIFT
- *
- * We can omit the first three elements, because we never
- * divide by 0, and 1 and 2 are both powers of two, which are
- * handled above.
- */
+ unsigned shift, diff, regind;
+ size_t interval;
+
+ /*
+ * Freeing a pointer lower than region zero can cause assertion
+ * failure.
+ */
+ assert((uintptr_t)ptr >= (uintptr_t)run +
+ (uintptr_t)bin_info->reg0_offset);
+
+ /*
+ * Avoid doing division with a variable divisor if possible. Using
+ * actual division here can reduce allocator throughput by over 20%!
+ */
+ diff = (unsigned)((uintptr_t)ptr - (uintptr_t)run -
+ bin_info->reg0_offset);
+
+ /* Rescale (factor powers of 2 out of the numerator and denominator). */
+ interval = bin_info->reg_interval;
+ shift = ffs(interval) - 1;
+ diff >>= shift;
+ interval >>= shift;
+
+ if (interval == 1) {
+ /* The divisor was a power of 2. */
+ regind = diff;
+ } else {
+ /*
+ * To divide by a number D that is not a power of two we
+ * multiply by (2^21 / D) and then right shift by 21 positions.
+ *
+ * X / D
+ *
+ * becomes
+ *
+ * (X * interval_invs[D - 3]) >> SIZE_INV_SHIFT
+ *
+ * We can omit the first three elements, because we never
+ * divide by 0, and 1 and 2 are both powers of two, which are
+ * handled above.
+ */
#define SIZE_INV_SHIFT ((sizeof(unsigned) << 3) - LG_RUN_MAXREGS)
#define SIZE_INV(s) (((1U << SIZE_INV_SHIFT) / (s)) + 1)
- static const unsigned interval_invs[] = {
- SIZE_INV(3),
- SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7),
- SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11),
- SIZE_INV(12), SIZE_INV(13), SIZE_INV(14), SIZE_INV(15),
- SIZE_INV(16), SIZE_INV(17), SIZE_INV(18), SIZE_INV(19),
- SIZE_INV(20), SIZE_INV(21), SIZE_INV(22), SIZE_INV(23),
- SIZE_INV(24), SIZE_INV(25), SIZE_INV(26), SIZE_INV(27),
- SIZE_INV(28), SIZE_INV(29), SIZE_INV(30), SIZE_INV(31)
- };
-
- if (interval <= ((sizeof(interval_invs) / sizeof(unsigned)) +
- 2)) {
- regind = (diff * interval_invs[interval - 3]) >>
- SIZE_INV_SHIFT;
- } else
- regind = diff / interval;
+ static const unsigned interval_invs[] = {
+ SIZE_INV(3),
+ SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7),
+ SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11),
+ SIZE_INV(12), SIZE_INV(13), SIZE_INV(14), SIZE_INV(15),
+ SIZE_INV(16), SIZE_INV(17), SIZE_INV(18), SIZE_INV(19),
+ SIZE_INV(20), SIZE_INV(21), SIZE_INV(22), SIZE_INV(23),
+ SIZE_INV(24), SIZE_INV(25), SIZE_INV(26), SIZE_INV(27),
+ SIZE_INV(28), SIZE_INV(29), SIZE_INV(30), SIZE_INV(31)
+ };
+
+ if (interval <= ((sizeof(interval_invs) / sizeof(unsigned)) +
+ 2)) {
+ regind = (diff * interval_invs[interval - 3]) >>
+ SIZE_INV_SHIFT;
+ } else
+ regind = diff / interval;
#undef SIZE_INV
#undef SIZE_INV_SHIFT
- }
- assert(diff == regind * interval);
- assert(regind < bin_info->nregs);
+ }
+ assert(diff == regind * interval);
+ assert(regind < bin_info->nregs);
- return (regind);
+ return (regind);
}
JEMALLOC_INLINE prof_ctx_t *
arena_prof_ctx_get(const void *ptr)
{
- prof_ctx_t *ret;
- arena_chunk_t *chunk;
- size_t pageind, mapbits;
-
- cassert(config_prof);
- assert(ptr != NULL);
- assert(CHUNK_ADDR2BASE(ptr) != ptr);
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- mapbits = arena_mapbits_get(chunk, pageind);
- assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
- if ((mapbits & CHUNK_MAP_LARGE) == 0) {
- if (prof_promote)
- ret = (prof_ctx_t *)(uintptr_t)1U;
- else {
- arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
- (uintptr_t)((pageind - (mapbits >> LG_PAGE)) <<
- LG_PAGE));
- size_t binind = arena_ptr_small_binind_get(ptr,
- mapbits);
- arena_bin_info_t *bin_info = &arena_bin_info[binind];
- unsigned regind;
-
- regind = arena_run_regind(run, bin_info, ptr);
- ret = *(prof_ctx_t **)((uintptr_t)run +
- bin_info->ctx0_offset + (regind *
- sizeof(prof_ctx_t *)));
- }
- } else
- ret = arena_mapp_get(chunk, pageind)->prof_ctx;
-
- return (ret);
+ prof_ctx_t *ret;
+ arena_chunk_t *chunk;
+ size_t pageind, mapbits;
+
+ cassert(config_prof);
+ assert(ptr != NULL);
+ assert(CHUNK_ADDR2BASE(ptr) != ptr);
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+ mapbits = arena_mapbits_get(chunk, pageind);
+ assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
+ if ((mapbits & CHUNK_MAP_LARGE) == 0) {
+ if (prof_promote)
+ ret = (prof_ctx_t *)(uintptr_t)1U;
+ else {
+ arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
+ (uintptr_t)((pageind - (mapbits >> LG_PAGE)) <<
+ LG_PAGE));
+ size_t binind = arena_ptr_small_binind_get(ptr,
+ mapbits);
+ arena_bin_info_t *bin_info = &arena_bin_info[binind];
+ unsigned regind;
+
+ regind = arena_run_regind(run, bin_info, ptr);
+ ret = *(prof_ctx_t **)((uintptr_t)run +
+ bin_info->ctx0_offset + (regind *
+ sizeof(prof_ctx_t *)));
+ }
+ } else
+ ret = arena_mapp_get(chunk, pageind)->prof_ctx;
+
+ return (ret);
}
JEMALLOC_INLINE void
arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
{
- arena_chunk_t *chunk;
- size_t pageind, mapbits;
-
- cassert(config_prof);
- assert(ptr != NULL);
- assert(CHUNK_ADDR2BASE(ptr) != ptr);
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- mapbits = arena_mapbits_get(chunk, pageind);
- assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
- if ((mapbits & CHUNK_MAP_LARGE) == 0) {
- if (prof_promote == false) {
- arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
- (uintptr_t)((pageind - (mapbits >> LG_PAGE)) <<
- LG_PAGE));
- size_t binind;
- arena_bin_info_t *bin_info;
- unsigned regind;
-
- binind = arena_ptr_small_binind_get(ptr, mapbits);
- bin_info = &arena_bin_info[binind];
- regind = arena_run_regind(run, bin_info, ptr);
-
- *((prof_ctx_t **)((uintptr_t)run + bin_info->ctx0_offset
- + (regind * sizeof(prof_ctx_t *)))) = ctx;
- } else
- assert((uintptr_t)ctx == (uintptr_t)1U);
- } else
- arena_mapp_get(chunk, pageind)->prof_ctx = ctx;
+ arena_chunk_t *chunk;
+ size_t pageind, mapbits;
+
+ cassert(config_prof);
+ assert(ptr != NULL);
+ assert(CHUNK_ADDR2BASE(ptr) != ptr);
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+ mapbits = arena_mapbits_get(chunk, pageind);
+ assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
+ if ((mapbits & CHUNK_MAP_LARGE) == 0) {
+ if (prof_promote == false) {
+ arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
+ (uintptr_t)((pageind - (mapbits >> LG_PAGE)) <<
+ LG_PAGE));
+ size_t binind;
+ arena_bin_info_t *bin_info;
+ unsigned regind;
+
+ binind = arena_ptr_small_binind_get(ptr, mapbits);
+ bin_info = &arena_bin_info[binind];
+ regind = arena_run_regind(run, bin_info, ptr);
+
+ *((prof_ctx_t **)((uintptr_t)run + bin_info->ctx0_offset
+ + (regind * sizeof(prof_ctx_t *)))) = ctx;
+ } else
+ assert((uintptr_t)ctx == (uintptr_t)1U);
+ } else
+ arena_mapp_get(chunk, pageind)->prof_ctx = ctx;
}
JEMALLOC_ALWAYS_INLINE void *
arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache)
{
- tcache_t *tcache;
-
- assert(size != 0);
- assert(size <= arena_maxclass);
-
- if (size <= SMALL_MAXCLASS) {
- if (try_tcache && (tcache = tcache_get(true)) != NULL)
- return (tcache_alloc_small(tcache, size, zero));
- else {
- return (arena_malloc_small(choose_arena(arena), size,
- zero));
- }
- } else {
- /*
- * Initialize tcache after checking size in order to avoid
- * infinite recursion during tcache initialization.
- */
- if (try_tcache && size <= tcache_maxclass && (tcache =
- tcache_get(true)) != NULL)
- return (tcache_alloc_large(tcache, size, zero));
- else {
- return (arena_malloc_large(choose_arena(arena), size,
- zero));
- }
- }
+ tcache_t *tcache;
+
+ assert(size != 0);
+ assert(size <= arena_maxclass);
+
+ if (size <= SMALL_MAXCLASS) {
+ if (try_tcache && (tcache = tcache_get(true)) != NULL)
+ return (tcache_alloc_small(tcache, size, zero));
+ else {
+ return (arena_malloc_small(choose_arena(arena), size,
+ zero));
+ }
+ } else {
+ /*
+ * Initialize tcache after checking size in order to avoid
+ * infinite recursion during tcache initialization.
+ */
+ if (try_tcache && size <= tcache_maxclass && (tcache =
+ tcache_get(true)) != NULL)
+ return (tcache_alloc_large(tcache, size, zero));
+ else {
+ return (arena_malloc_large(choose_arena(arena), size,
+ zero));
+ }
+ }
}
/* Return the size of the allocation pointed to by ptr. */
JEMALLOC_ALWAYS_INLINE size_t
arena_salloc(const void *ptr, bool demote)
{
- size_t ret;
- arena_chunk_t *chunk;
- size_t pageind, binind;
-
- assert(ptr != NULL);
- assert(CHUNK_ADDR2BASE(ptr) != ptr);
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
- binind = arena_mapbits_binind_get(chunk, pageind);
- if (binind == BININD_INVALID || (config_prof && demote == false &&
- prof_promote && arena_mapbits_large_get(chunk, pageind) != 0)) {
- /*
- * Large allocation. In the common case (demote == true), and
- * as this is an inline function, most callers will only end up
- * looking at binind to determine that ptr is a small
- * allocation.
- */
- assert(((uintptr_t)ptr & PAGE_MASK) == 0);
- ret = arena_mapbits_large_size_get(chunk, pageind);
- assert(ret != 0);
- assert(pageind + (ret>>LG_PAGE) <= chunk_npages);
- assert(ret == PAGE || arena_mapbits_large_size_get(chunk,
- pageind+(ret>>LG_PAGE)-1) == 0);
- assert(binind == arena_mapbits_binind_get(chunk,
- pageind+(ret>>LG_PAGE)-1));
- assert(arena_mapbits_dirty_get(chunk, pageind) ==
- arena_mapbits_dirty_get(chunk, pageind+(ret>>LG_PAGE)-1));
- } else {
- /*
- * Small allocation (possibly promoted to a large object due to
- * prof_promote).
- */
- assert(arena_mapbits_large_get(chunk, pageind) != 0 ||
- arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
- pageind)) == binind);
- ret = arena_bin_info[binind].reg_size;
- }
-
- return (ret);
+ size_t ret;
+ arena_chunk_t *chunk;
+ size_t pageind, binind;
+
+ assert(ptr != NULL);
+ assert(CHUNK_ADDR2BASE(ptr) != ptr);
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+ assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
+ binind = arena_mapbits_binind_get(chunk, pageind);
+ if (binind == BININD_INVALID || (config_prof && demote == false &&
+ prof_promote && arena_mapbits_large_get(chunk, pageind) != 0)) {
+ /*
+ * Large allocation. In the common case (demote == true), and
+ * as this is an inline function, most callers will only end up
+ * looking at binind to determine that ptr is a small
+ * allocation.
+ */
+ assert(((uintptr_t)ptr & PAGE_MASK) == 0);
+ ret = arena_mapbits_large_size_get(chunk, pageind);
+ assert(ret != 0);
+ assert(pageind + (ret>>LG_PAGE) <= chunk_npages);
+ assert(ret == PAGE || arena_mapbits_large_size_get(chunk,
+ pageind+(ret>>LG_PAGE)-1) == 0);
+ assert(binind == arena_mapbits_binind_get(chunk,
+ pageind+(ret>>LG_PAGE)-1));
+ assert(arena_mapbits_dirty_get(chunk, pageind) ==
+ arena_mapbits_dirty_get(chunk, pageind+(ret>>LG_PAGE)-1));
+ } else {
+ /*
+ * Small allocation (possibly promoted to a large object due to
+ * prof_promote).
+ */
+ assert(arena_mapbits_large_get(chunk, pageind) != 0 ||
+ arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
+ pageind)) == binind);
+ ret = arena_bin_info[binind].reg_size;
+ }
+
+ return (ret);
}
JEMALLOC_ALWAYS_INLINE void
arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr, bool try_tcache)
{
- size_t pageind, mapbits;
- tcache_t *tcache;
-
- assert(arena != NULL);
- assert(chunk->arena == arena);
- assert(ptr != NULL);
- assert(CHUNK_ADDR2BASE(ptr) != ptr);
-
- pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- mapbits = arena_mapbits_get(chunk, pageind);
- assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
- if ((mapbits & CHUNK_MAP_LARGE) == 0) {
- /* Small allocation. */
- if (try_tcache && (tcache = tcache_get(false)) != NULL) {
- size_t binind;
-
- binind = arena_ptr_small_binind_get(ptr, mapbits);
- tcache_dalloc_small(tcache, ptr, binind);
- } else
- arena_dalloc_small(arena, chunk, ptr, pageind);
- } else {
- size_t size = arena_mapbits_large_size_get(chunk, pageind);
-
- assert(((uintptr_t)ptr & PAGE_MASK) == 0);
-
- if (try_tcache && size <= tcache_maxclass && (tcache =
- tcache_get(false)) != NULL) {
- tcache_dalloc_large(tcache, ptr, size);
- } else
- arena_dalloc_large(arena, chunk, ptr);
- }
+ size_t pageind, mapbits;
+ tcache_t *tcache;
+
+ assert(arena != NULL);
+ assert(chunk->arena == arena);
+ assert(ptr != NULL);
+ assert(CHUNK_ADDR2BASE(ptr) != ptr);
+
+ pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+ mapbits = arena_mapbits_get(chunk, pageind);
+ assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
+ if ((mapbits & CHUNK_MAP_LARGE) == 0) {
+ /* Small allocation. */
+ if (try_tcache && (tcache = tcache_get(false)) != NULL) {
+ size_t binind;
+
+ binind = arena_ptr_small_binind_get(ptr, mapbits);
+ tcache_dalloc_small(tcache, ptr, binind);
+ } else
+ arena_dalloc_small(arena, chunk, ptr, pageind);
+ } else {
+ size_t size = arena_mapbits_large_size_get(chunk, pageind);
+
+ assert(((uintptr_t)ptr & PAGE_MASK) == 0);
+
+ if (try_tcache && size <= tcache_maxclass && (tcache =
+ tcache_get(false)) != NULL) {
+ tcache_dalloc_large(tcache, ptr, size);
+ } else
+ arena_dalloc_large(arena, chunk, ptr);
+ }
}
# endif /* JEMALLOC_ARENA_INLINE_B */
#endif
diff --git a/src/rt/jemalloc/include/jemalloc/internal/atomic.h b/src/rt/jemalloc/include/jemalloc/internal/atomic.h
index 76e7a350fc11b..11a7b47fe0f99 100644
--- a/src/rt/jemalloc/include/jemalloc/internal/atomic.h
+++ b/src/rt/jemalloc/include/jemalloc/internal/atomic.h
@@ -38,105 +38,105 @@ JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
- return (__sync_add_and_fetch(p, x));
+ return (__sync_add_and_fetch(p, x));
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
- return (__sync_sub_and_fetch(p, x));
+ return (__sync_sub_and_fetch(p, x));
}
#elif (defined(_MSC_VER))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
- return (InterlockedExchangeAdd64(p, x));
+ return (InterlockedExchangeAdd64(p, x));
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
- return (InterlockedExchangeAdd64(p, -((int64_t)x)));
+ return (InterlockedExchangeAdd64(p, -((int64_t)x)));
}
#elif (defined(JEMALLOC_OSATOMIC))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
- return (OSAtomicAdd64((int64_t)x, (int64_t *)p));
+ return (OSAtomicAdd64((int64_t)x, (int64_t *)p));
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
- return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p));
+ return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p));
}
# elif (defined(__amd64__) || defined(__x86_64__))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
- asm volatile (
- "lock; xaddq %0, %1;"
- : "+r" (x), "=m" (*p) /* Outputs. */
- : "m" (*p) /* Inputs. */
- );
+ asm volatile (
+ "lock; xaddq %0, %1;"
+ : "+r" (x), "=m" (*p) /* Outputs. */
+ : "m" (*p) /* Inputs. */
+ );
- return (x);
+ return (x);
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
- x = (uint64_t)(-(int64_t)x);
- asm volatile (
- "lock; xaddq %0, %1;"
- : "+r" (x), "=m" (*p) /* Outputs. */
- : "m" (*p) /* Inputs. */
- );
+ x = (uint64_t)(-(int64_t)x);
+ asm volatile (
+ "lock; xaddq %0, %1;"
+ : "+r" (x), "=m" (*p) /* Outputs. */
+ : "m" (*p) /* Inputs. */
+ );
- return (x);
+ return (x);
}
# elif (defined(JEMALLOC_ATOMIC9))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
- /*
- * atomic_fetchadd_64() doesn't exist, but we only ever use this
- * function on LP64 systems, so atomic_fetchadd_long() will do.
- */
- assert(sizeof(uint64_t) == sizeof(unsigned long));
+ /*
+ * atomic_fetchadd_64() doesn't exist, but we only ever use this
+ * function on LP64 systems, so atomic_fetchadd_long() will do.
+ */
+ assert(sizeof(uint64_t) == sizeof(unsigned long));
- return (atomic_fetchadd_long(p, (unsigned long)x) + x);
+ return (atomic_fetchadd_long(p, (unsigned long)x) + x);
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
- assert(sizeof(uint64_t) == sizeof(unsigned long));
+ assert(sizeof(uint64_t) == sizeof(unsigned long));
- return (atomic_fetchadd_long(p, (unsigned long)(-(long)x)) - x);
+ return (atomic_fetchadd_long(p, (unsigned long)(-(long)x)) - x);
}
# elif (defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
- return (__sync_add_and_fetch(p, x));
+ return (__sync_add_and_fetch(p, x));
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
- return (__sync_sub_and_fetch(p, x));
+ return (__sync_sub_and_fetch(p, x));
}
# else
# error "Missing implementation for 64-bit atomic operations"
@@ -150,97 +150,97 @@ JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
- return (__sync_add_and_fetch(p, x));
+ return (__sync_add_and_fetch(p, x));
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
- return (__sync_sub_and_fetch(p, x));
+ return (__sync_sub_and_fetch(p, x));
}
#elif (defined(_MSC_VER))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
- return (InterlockedExchangeAdd(p, x));
+ return (InterlockedExchangeAdd(p, x));
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
- return (InterlockedExchangeAdd(p, -((int32_t)x)));
+ return (InterlockedExchangeAdd(p, -((int32_t)x)));
}
#elif (defined(JEMALLOC_OSATOMIC))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
- return (OSAtomicAdd32((int32_t)x, (int32_t *)p));
+ return (OSAtomicAdd32((int32_t)x, (int32_t *)p));
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
- return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p));
+ return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p));
}
#elif (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
- asm volatile (
- "lock; xaddl %0, %1;"
- : "+r" (x), "=m" (*p) /* Outputs. */
- : "m" (*p) /* Inputs. */
- );
+ asm volatile (
+ "lock; xaddl %0, %1;"
+ : "+r" (x), "=m" (*p) /* Outputs. */
+ : "m" (*p) /* Inputs. */
+ );
- return (x);
+ return (x);
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
- x = (uint32_t)(-(int32_t)x);
- asm volatile (
- "lock; xaddl %0, %1;"
- : "+r" (x), "=m" (*p) /* Outputs. */
- : "m" (*p) /* Inputs. */
- );
+ x = (uint32_t)(-(int32_t)x);
+ asm volatile (
+ "lock; xaddl %0, %1;"
+ : "+r" (x), "=m" (*p) /* Outputs. */
+ : "m" (*p) /* Inputs. */
+ );
- return (x);
+ return (x);
}
#elif (defined(JEMALLOC_ATOMIC9))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
- return (atomic_fetchadd_32(p, x) + x);
+ return (atomic_fetchadd_32(p, x) + x);
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
- return (atomic_fetchadd_32(p, (uint32_t)(-(int32_t)x)) - x);
+ return (atomic_fetchadd_32(p, (uint32_t)(-(int32_t)x)) - x);
}
#elif (defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
- return (__sync_add_and_fetch(p, x));
+ return (__sync_add_and_fetch(p, x));
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
- return (__sync_sub_and_fetch(p, x));
+ return (__sync_sub_and_fetch(p, x));
}
#else
# error "Missing implementation for 32-bit atomic operations"
@@ -253,9 +253,9 @@ atomic_add_z(size_t *p, size_t x)
{
#if (LG_SIZEOF_PTR == 3)
- return ((size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
+ return ((size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
#elif (LG_SIZEOF_PTR == 2)
- return ((size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
+ return ((size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
#endif
}
@@ -264,11 +264,11 @@ atomic_sub_z(size_t *p, size_t x)
{
#if (LG_SIZEOF_PTR == 3)
- return ((size_t)atomic_add_uint64((uint64_t *)p,
- (uint64_t)-((int64_t)x)));
+ return ((size_t)atomic_add_uint64((uint64_t *)p,
+ (uint64_t)-((int64_t)x)));
#elif (LG_SIZEOF_PTR == 2)
- return ((size_t)atomic_add_uint32((uint32_t *)p,
- (uint32_t)-((int32_t)x)));
+ return ((size_t)atomic_add_uint32((uint32_t *)p,
+ (uint32_t)-((int32_t)x)));
#endif
}
@@ -279,9 +279,9 @@ atomic_add_u(unsigned *p, unsigned x)
{
#if (LG_SIZEOF_INT == 3)
- return ((unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
+ return ((unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
#elif (LG_SIZEOF_INT == 2)
- return ((unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
+ return ((unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
#endif
}
@@ -290,11 +290,11 @@ atomic_sub_u(unsigned *p, unsigned x)
{
#if (LG_SIZEOF_INT == 3)
- return ((unsigned)atomic_add_uint64((uint64_t *)p,
- (uint64_t)-((int64_t)x)));
+ return ((unsigned)atomic_add_uint64((uint64_t *)p,
+ (uint64_t)-((int64_t)x)));
#elif (LG_SIZEOF_INT == 2)
- return ((unsigned)atomic_add_uint32((uint32_t *)p,
- (uint32_t)-((int32_t)x)));
+ return ((unsigned)atomic_add_uint32((uint32_t *)p,
+ (uint32_t)-((int32_t)x)));
#endif
}
/******************************************************************************/
diff --git a/src/rt/jemalloc/include/jemalloc/internal/bitmap.h b/src/rt/jemalloc/include/jemalloc/internal/bitmap.h
index 90064183c22ba..605ebac58c17a 100644
--- a/src/rt/jemalloc/include/jemalloc/internal/bitmap.h
+++ b/src/rt/jemalloc/include/jemalloc/internal/bitmap.h
@@ -24,22 +24,22 @@ typedef unsigned long bitmap_t;
#ifdef JEMALLOC_H_STRUCTS
struct bitmap_level_s {
- /* Offset of this level's groups within the array of groups. */
- size_t group_offset;
+ /* Offset of this level's groups within the array of groups. */
+ size_t group_offset;
};
struct bitmap_info_s {
- /* Logical number of bits in bitmap (stored at bottom level). */
- size_t nbits;
+ /* Logical number of bits in bitmap (stored at bottom level). */
+ size_t nbits;
- /* Number of levels necessary for nbits. */
- unsigned nlevels;
+ /* Number of levels necessary for nbits. */
+ unsigned nlevels;
- /*
- * Only the first (nlevels+1) elements are used, and levels are ordered
- * bottom to top (e.g. the bottom level is stored in levels[0]).
- */
- bitmap_level_t levels[BITMAP_MAX_LEVELS+1];
+ /*
+ * Only the first (nlevels+1) elements are used, and levels are ordered
+ * bottom to top (e.g. the bottom level is stored in levels[0]).
+ */
+ bitmap_level_t levels[BITMAP_MAX_LEVELS+1];
};
#endif /* JEMALLOC_H_STRUCTS */
@@ -67,115 +67,115 @@ void bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
JEMALLOC_INLINE bool
bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo)
{
- unsigned rgoff = binfo->levels[binfo->nlevels].group_offset - 1;
- bitmap_t rg = bitmap[rgoff];
- /* The bitmap is full iff the root group is 0. */
- return (rg == 0);
+ unsigned rgoff = binfo->levels[binfo->nlevels].group_offset - 1;
+ bitmap_t rg = bitmap[rgoff];
+ /* The bitmap is full iff the root group is 0. */
+ return (rg == 0);
}
JEMALLOC_INLINE bool
bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
{
- size_t goff;
- bitmap_t g;
+ size_t goff;
+ bitmap_t g;
- assert(bit < binfo->nbits);
- goff = bit >> LG_BITMAP_GROUP_NBITS;
- g = bitmap[goff];
- return (!(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))));
+ assert(bit < binfo->nbits);
+ goff = bit >> LG_BITMAP_GROUP_NBITS;
+ g = bitmap[goff];
+ return (!(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))));
}
JEMALLOC_INLINE void
bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
{
- size_t goff;
- bitmap_t *gp;
- bitmap_t g;
-
- assert(bit < binfo->nbits);
- assert(bitmap_get(bitmap, binfo, bit) == false);
- goff = bit >> LG_BITMAP_GROUP_NBITS;
- gp = &bitmap[goff];
- g = *gp;
- assert(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)));
- g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
- *gp = g;
- assert(bitmap_get(bitmap, binfo, bit));
- /* Propagate group state transitions up the tree. */
- if (g == 0) {
- unsigned i;
- for (i = 1; i < binfo->nlevels; i++) {
- bit = goff;
- goff = bit >> LG_BITMAP_GROUP_NBITS;
- gp = &bitmap[binfo->levels[i].group_offset + goff];
- g = *gp;
- assert(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)));
- g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
- *gp = g;
- if (g != 0)
- break;
- }
- }
+ size_t goff;
+ bitmap_t *gp;
+ bitmap_t g;
+
+ assert(bit < binfo->nbits);
+ assert(bitmap_get(bitmap, binfo, bit) == false);
+ goff = bit >> LG_BITMAP_GROUP_NBITS;
+ gp = &bitmap[goff];
+ g = *gp;
+ assert(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)));
+ g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
+ *gp = g;
+ assert(bitmap_get(bitmap, binfo, bit));
+ /* Propagate group state transitions up the tree. */
+ if (g == 0) {
+ unsigned i;
+ for (i = 1; i < binfo->nlevels; i++) {
+ bit = goff;
+ goff = bit >> LG_BITMAP_GROUP_NBITS;
+ gp = &bitmap[binfo->levels[i].group_offset + goff];
+ g = *gp;
+ assert(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)));
+ g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
+ *gp = g;
+ if (g != 0)
+ break;
+ }
+ }
}
/* sfu: set first unset. */
JEMALLOC_INLINE size_t
bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo)
{
- size_t bit;
- bitmap_t g;
- unsigned i;
-
- assert(bitmap_full(bitmap, binfo) == false);
-
- i = binfo->nlevels - 1;
- g = bitmap[binfo->levels[i].group_offset];
- bit = ffsl(g) - 1;
- while (i > 0) {
- i--;
- g = bitmap[binfo->levels[i].group_offset + bit];
- bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffsl(g) - 1);
- }
-
- bitmap_set(bitmap, binfo, bit);
- return (bit);
+ size_t bit;
+ bitmap_t g;
+ unsigned i;
+
+ assert(bitmap_full(bitmap, binfo) == false);
+
+ i = binfo->nlevels - 1;
+ g = bitmap[binfo->levels[i].group_offset];
+ bit = ffsl(g) - 1;
+ while (i > 0) {
+ i--;
+ g = bitmap[binfo->levels[i].group_offset + bit];
+ bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffsl(g) - 1);
+ }
+
+ bitmap_set(bitmap, binfo, bit);
+ return (bit);
}
JEMALLOC_INLINE void
bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
{
- size_t goff;
- bitmap_t *gp;
- bitmap_t g;
- bool propagate;
-
- assert(bit < binfo->nbits);
- assert(bitmap_get(bitmap, binfo, bit));
- goff = bit >> LG_BITMAP_GROUP_NBITS;
- gp = &bitmap[goff];
- g = *gp;
- propagate = (g == 0);
- assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))) == 0);
- g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
- *gp = g;
- assert(bitmap_get(bitmap, binfo, bit) == false);
- /* Propagate group state transitions up the tree. */
- if (propagate) {
- unsigned i;
- for (i = 1; i < binfo->nlevels; i++) {
- bit = goff;
- goff = bit >> LG_BITMAP_GROUP_NBITS;
- gp = &bitmap[binfo->levels[i].group_offset + goff];
- g = *gp;
- propagate = (g == 0);
- assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)))
- == 0);
- g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
- *gp = g;
- if (propagate == false)
- break;
- }
- }
+ size_t goff;
+ bitmap_t *gp;
+ bitmap_t g;
+ bool propagate;
+
+ assert(bit < binfo->nbits);
+ assert(bitmap_get(bitmap, binfo, bit));
+ goff = bit >> LG_BITMAP_GROUP_NBITS;
+ gp = &bitmap[goff];
+ g = *gp;
+ propagate = (g == 0);
+ assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))) == 0);
+ g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
+ *gp = g;
+ assert(bitmap_get(bitmap, binfo, bit) == false);
+ /* Propagate group state transitions up the tree. */
+ if (propagate) {
+ unsigned i;
+ for (i = 1; i < binfo->nlevels; i++) {
+ bit = goff;
+ goff = bit >> LG_BITMAP_GROUP_NBITS;
+ gp = &bitmap[binfo->levels[i].group_offset + goff];
+ g = *gp;
+ propagate = (g == 0);
+ assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)))
+ == 0);
+ g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
+ *gp = g;
+ if (propagate == false)
+ break;
+ }
+ }
}
#endif
diff --git a/src/rt/jemalloc/include/jemalloc/internal/chunk.h b/src/rt/jemalloc/include/jemalloc/internal/chunk.h
index ffd96ea01a80b..87d8700dac8ad 100644
--- a/src/rt/jemalloc/include/jemalloc/internal/chunk.h
+++ b/src/rt/jemalloc/include/jemalloc/internal/chunk.h
@@ -9,15 +9,15 @@
/* Return the chunk address for allocation address a. */
#define CHUNK_ADDR2BASE(a) \
- ((void *)((uintptr_t)(a) & ~chunksize_mask))
+ ((void *)((uintptr_t)(a) & ~chunksize_mask))
/* Return the chunk offset of address a. */
#define CHUNK_ADDR2OFFSET(a) \
- ((size_t)((uintptr_t)(a) & chunksize_mask))
+ ((size_t)((uintptr_t)(a) & chunksize_mask))
/* Return the smallest chunk multiple that is >= s. */
#define CHUNK_CEILING(s) \
- (((s) + chunksize_mask) & ~chunksize_mask)
+ (((s) + chunksize_mask) & ~chunksize_mask)
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
diff --git a/src/rt/jemalloc/include/jemalloc/internal/chunk_dss.h b/src/rt/jemalloc/include/jemalloc/internal/chunk_dss.h
index ebe501ba289ce..6585f071bbecd 100644
--- a/src/rt/jemalloc/include/jemalloc/internal/chunk_dss.h
+++ b/src/rt/jemalloc/include/jemalloc/internal/chunk_dss.h
@@ -2,11 +2,11 @@
#ifdef JEMALLOC_H_TYPES
typedef enum {
- dss_prec_disabled = 0,
- dss_prec_primary = 1,
- dss_prec_secondary = 2,
+ dss_prec_disabled = 0,
+ dss_prec_primary = 1,
+ dss_prec_secondary = 2,
- dss_prec_limit = 3
+ dss_prec_limit = 3
} dss_prec_t ;
#define DSS_PREC_DEFAULT dss_prec_secondary
#define DSS_DEFAULT "secondary"
diff --git a/src/rt/jemalloc/include/jemalloc/internal/ckh.h b/src/rt/jemalloc/include/jemalloc/internal/ckh.h
index f58630a65a266..50c39ed958192 100644
--- a/src/rt/jemalloc/include/jemalloc/internal/ckh.h
+++ b/src/rt/jemalloc/include/jemalloc/internal/ckh.h
@@ -25,41 +25,41 @@ typedef bool ckh_keycomp_t (const void *, const void *);
/* Hash table cell. */
struct ckhc_s {
- const void *key;
- const void *data;
+ const void *key;
+ const void *data;
};
struct ckh_s {
#ifdef CKH_COUNT
- /* Counters used to get an idea of performance. */
- uint64_t ngrows;
- uint64_t nshrinks;
- uint64_t nshrinkfails;
- uint64_t ninserts;
- uint64_t nrelocs;
+ /* Counters used to get an idea of performance. */
+ uint64_t ngrows;
+ uint64_t nshrinks;
+ uint64_t nshrinkfails;
+ uint64_t ninserts;
+ uint64_t nrelocs;
#endif
- /* Used for pseudo-random number generation. */
+ /* Used for pseudo-random number generation. */
#define CKH_A 1103515241
#define CKH_C 12347
- uint32_t prng_state;
+ uint32_t prng_state;
- /* Total number of items. */
- size_t count;
+ /* Total number of items. */
+ size_t count;
- /*
- * Minimum and current number of hash table buckets. There are
- * 2^LG_CKH_BUCKET_CELLS cells per bucket.
- */
- unsigned lg_minbuckets;
- unsigned lg_curbuckets;
+ /*
+ * Minimum and current number of hash table buckets. There are
+ * 2^LG_CKH_BUCKET_CELLS cells per bucket.
+ */
+ unsigned lg_minbuckets;
+ unsigned lg_curbuckets;
- /* Hash and comparison functions. */
- ckh_hash_t *hash;
- ckh_keycomp_t *keycomp;
+ /* Hash and comparison functions. */
+ ckh_hash_t *hash;
+ ckh_keycomp_t *keycomp;
- /* Hash table with 2^lg_curbuckets buckets. */
- ckhc_t *tab;
+ /* Hash table with 2^lg_curbuckets buckets. */
+ ckhc_t *tab;
};
#endif /* JEMALLOC_H_STRUCTS */
diff --git a/src/rt/jemalloc/include/jemalloc/internal/ctl.h b/src/rt/jemalloc/include/jemalloc/internal/ctl.h
index cc17461d840b9..0ffecc5f2a23f 100644
--- a/src/rt/jemalloc/include/jemalloc/internal/ctl.h
+++ b/src/rt/jemalloc/include/jemalloc/internal/ctl.h
@@ -12,58 +12,58 @@ typedef struct ctl_stats_s ctl_stats_t;
#ifdef JEMALLOC_H_STRUCTS
struct ctl_node_s {
- bool named;
+ bool named;
};
struct ctl_named_node_s {
- struct ctl_node_s node;
- const char *name;
- /* If (nchildren == 0), this is a terminal node. */
- unsigned nchildren;
- const ctl_node_t *children;
- int (*ctl)(const size_t *, size_t, void *, size_t *,
- void *, size_t);
+ struct ctl_node_s node;
+ const char *name;
+ /* If (nchildren == 0), this is a terminal node. */
+ unsigned nchildren;
+ const ctl_node_t *children;
+ int (*ctl)(const size_t *, size_t, void *, size_t *,
+ void *, size_t);
};
struct ctl_indexed_node_s {
- struct ctl_node_s node;
- const ctl_named_node_t *(*index)(const size_t *, size_t, size_t);
+ struct ctl_node_s node;
+ const ctl_named_node_t *(*index)(const size_t *, size_t, size_t);
};
struct ctl_arena_stats_s {
- bool initialized;
- unsigned nthreads;
- const char *dss;
- size_t pactive;
- size_t pdirty;
- arena_stats_t astats;
-
- /* Aggregate stats for small size classes, based on bin stats. */
- size_t allocated_small;
- uint64_t nmalloc_small;
- uint64_t ndalloc_small;
- uint64_t nrequests_small;
-
- malloc_bin_stats_t bstats[NBINS];
- malloc_large_stats_t *lstats; /* nlclasses elements. */
+ bool initialized;
+ unsigned nthreads;
+ const char *dss;
+ size_t pactive;
+ size_t pdirty;
+ arena_stats_t astats;
+
+ /* Aggregate stats for small size classes, based on bin stats. */
+ size_t allocated_small;
+ uint64_t nmalloc_small;
+ uint64_t ndalloc_small;
+ uint64_t nrequests_small;
+
+ malloc_bin_stats_t bstats[NBINS];
+ malloc_large_stats_t *lstats; /* nlclasses elements. */
};
struct ctl_stats_s {
- size_t allocated;
- size_t active;
- size_t mapped;
- struct {
- size_t current; /* stats_chunks.curchunks */
- uint64_t total; /* stats_chunks.nchunks */
- size_t high; /* stats_chunks.highchunks */
- } chunks;
- struct {
- size_t allocated; /* huge_allocated */
- uint64_t nmalloc; /* huge_nmalloc */
- uint64_t ndalloc; /* huge_ndalloc */
- } huge;
- unsigned narenas;
- ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */
+ size_t allocated;
+ size_t active;
+ size_t mapped;
+ struct {
+ size_t current; /* stats_chunks.curchunks */
+ uint64_t total; /* stats_chunks.nchunks */
+ size_t high; /* stats_chunks.highchunks */
+ } chunks;
+ struct {
+ size_t allocated; /* huge_allocated */
+ uint64_t nmalloc; /* huge_nmalloc */
+ uint64_t ndalloc; /* huge_ndalloc */
+ } huge;
+ unsigned narenas;
+ ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */
};
#endif /* JEMALLOC_H_STRUCTS */
@@ -82,30 +82,30 @@ void ctl_postfork_parent(void);
void ctl_postfork_child(void);
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
- if (je_mallctl(name, oldp, oldlenp, newp, newlen) \
- != 0) { \
- malloc_printf( \
- ": Failure in xmallctl(\"%s\", ...)\n", \
- name); \
- abort(); \
- } \
+ if (je_mallctl(name, oldp, oldlenp, newp, newlen) \
+ != 0) { \
+ malloc_printf( \
+ ": Failure in xmallctl(\"%s\", ...)\n", \
+ name); \
+ abort(); \
+ } \
} while (0)
#define xmallctlnametomib(name, mibp, miblenp) do { \
- if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \
- malloc_printf(": Failure in " \
- "xmallctlnametomib(\"%s\", ...)\n", name); \
- abort(); \
- } \
+ if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \
+ malloc_printf(": Failure in " \
+ "xmallctlnametomib(\"%s\", ...)\n", name); \
+ abort(); \
+ } \
} while (0)
#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \
- if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \
- newlen) != 0) { \
- malloc_write( \
- ": Failure in xmallctlbymib()\n"); \
- abort(); \
- } \
+ if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \
+ newlen) != 0) { \
+ malloc_write( \
+ ": Failure in xmallctlbymib()\n"); \
+ abort(); \
+ } \
} while (0)
#endif /* JEMALLOC_H_EXTERNS */
@@ -114,3 +114,4 @@ void ctl_postfork_child(void);
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
+
diff --git a/src/rt/jemalloc/include/jemalloc/internal/extent.h b/src/rt/jemalloc/include/jemalloc/internal/extent.h
index b79c1e0c9044c..ba95ca816bd9a 100644
--- a/src/rt/jemalloc/include/jemalloc/internal/extent.h
+++ b/src/rt/jemalloc/include/jemalloc/internal/extent.h
@@ -9,23 +9,23 @@ typedef struct extent_node_s extent_node_t;
/* Tree of extents. */
struct extent_node_s {
- /* Linkage for the size/address-ordered tree. */
- rb_node(extent_node_t) link_szad;
+ /* Linkage for the size/address-ordered tree. */
+ rb_node(extent_node_t) link_szad;
- /* Linkage for the address-ordered tree. */
- rb_node(extent_node_t) link_ad;
+ /* Linkage for the address-ordered tree. */
+ rb_node(extent_node_t) link_ad;
- /* Profile counters, used for huge objects. */
- prof_ctx_t *prof_ctx;
+ /* Profile counters, used for huge objects. */
+ prof_ctx_t *prof_ctx;
- /* Pointer to the extent that this tree node is responsible for. */
- void *addr;
+ /* Pointer to the extent that this tree node is responsible for. */
+ void *addr;
- /* Total region size. */
- size_t size;
+ /* Total region size. */
+ size_t size;
- /* True if zero-filled; used by chunk recycling code. */
- bool zeroed;
+ /* True if zero-filled; used by chunk recycling code. */
+ bool zeroed;
};
typedef rb_tree(extent_node_t) extent_tree_t;
@@ -43,3 +43,4 @@ rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
+
diff --git a/src/rt/jemalloc/include/jemalloc/internal/hash.h b/src/rt/jemalloc/include/jemalloc/internal/hash.h
index d59c45a5fb31c..56ecc793b365e 100644
--- a/src/rt/jemalloc/include/jemalloc/internal/hash.h
+++ b/src/rt/jemalloc/include/jemalloc/internal/hash.h
@@ -30,284 +30,284 @@ JEMALLOC_INLINE uint32_t
hash_rotl_32(uint32_t x, int8_t r)
{
- return (x << r) | (x >> (32 - r));
+ return (x << r) | (x >> (32 - r));
}
JEMALLOC_INLINE uint64_t
hash_rotl_64(uint64_t x, int8_t r)
{
- return (x << r) | (x >> (64 - r));
+ return (x << r) | (x >> (64 - r));
}
JEMALLOC_INLINE uint32_t
hash_get_block_32(const uint32_t *p, int i)
{
- return p[i];
+ return p[i];
}
JEMALLOC_INLINE uint64_t
hash_get_block_64(const uint64_t *p, int i)
{
- return p[i];
+ return p[i];
}
JEMALLOC_INLINE uint32_t
hash_fmix_32(uint32_t h)
{
- h ^= h >> 16;
- h *= 0x85ebca6b;
- h ^= h >> 13;
- h *= 0xc2b2ae35;
- h ^= h >> 16;
+ h ^= h >> 16;
+ h *= 0x85ebca6b;
+ h ^= h >> 13;
+ h *= 0xc2b2ae35;
+ h ^= h >> 16;
- return h;
+ return h;
}
JEMALLOC_INLINE uint64_t
hash_fmix_64(uint64_t k)
{
- k ^= k >> 33;
- k *= QU(0xff51afd7ed558ccdLLU);
- k ^= k >> 33;
- k *= QU(0xc4ceb9fe1a85ec53LLU);
- k ^= k >> 33;
+ k ^= k >> 33;
+ k *= QU(0xff51afd7ed558ccdLLU);
+ k ^= k >> 33;
+ k *= QU(0xc4ceb9fe1a85ec53LLU);
+ k ^= k >> 33;
- return k;
+ return k;
}
JEMALLOC_INLINE uint32_t
hash_x86_32(const void *key, int len, uint32_t seed)
{
- const uint8_t *data = (const uint8_t *) key;
- const int nblocks = len / 4;
+ const uint8_t *data = (const uint8_t *) key;
+ const int nblocks = len / 4;
- uint32_t h1 = seed;
+ uint32_t h1 = seed;
- const uint32_t c1 = 0xcc9e2d51;
- const uint32_t c2 = 0x1b873593;
+ const uint32_t c1 = 0xcc9e2d51;
+ const uint32_t c2 = 0x1b873593;
- /* body */
- {
- const uint32_t *blocks = (const uint32_t *) (data + nblocks*4);
- int i;
+ /* body */
+ {
+ const uint32_t *blocks = (const uint32_t *) (data + nblocks*4);
+ int i;
- for (i = -nblocks; i; i++) {
- uint32_t k1 = hash_get_block_32(blocks, i);
+ for (i = -nblocks; i; i++) {
+ uint32_t k1 = hash_get_block_32(blocks, i);
- k1 *= c1;
- k1 = hash_rotl_32(k1, 15);
- k1 *= c2;
+ k1 *= c1;
+ k1 = hash_rotl_32(k1, 15);
+ k1 *= c2;
- h1 ^= k1;
- h1 = hash_rotl_32(h1, 13);
- h1 = h1*5 + 0xe6546b64;
- }
- }
+ h1 ^= k1;
+ h1 = hash_rotl_32(h1, 13);
+ h1 = h1*5 + 0xe6546b64;
+ }
+ }
- /* tail */
- {
- const uint8_t *tail = (const uint8_t *) (data + nblocks*4);
+ /* tail */
+ {
+ const uint8_t *tail = (const uint8_t *) (data + nblocks*4);
- uint32_t k1 = 0;
+ uint32_t k1 = 0;
- switch (len & 3) {
- case 3: k1 ^= tail[2] << 16;
- case 2: k1 ^= tail[1] << 8;
- case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15);
- k1 *= c2; h1 ^= k1;
- }
- }
+ switch (len & 3) {
+ case 3: k1 ^= tail[2] << 16;
+ case 2: k1 ^= tail[1] << 8;
+ case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15);
+ k1 *= c2; h1 ^= k1;
+ }
+ }
- /* finalization */
- h1 ^= len;
+ /* finalization */
+ h1 ^= len;
- h1 = hash_fmix_32(h1);
+ h1 = hash_fmix_32(h1);
- return h1;
+ return h1;
}
UNUSED JEMALLOC_INLINE void
hash_x86_128(const void *key, const int len, uint32_t seed,
uint64_t r_out[2])
{
- const uint8_t * data = (const uint8_t *) key;
- const int nblocks = len / 16;
-
- uint32_t h1 = seed;
- uint32_t h2 = seed;
- uint32_t h3 = seed;
- uint32_t h4 = seed;
-
- const uint32_t c1 = 0x239b961b;
- const uint32_t c2 = 0xab0e9789;
- const uint32_t c3 = 0x38b34ae5;
- const uint32_t c4 = 0xa1e38b93;
-
- /* body */
- {
- const uint32_t *blocks = (const uint32_t *) (data + nblocks*16);
- int i;
-
- for (i = -nblocks; i; i++) {
- uint32_t k1 = hash_get_block_32(blocks, i*4 + 0);
- uint32_t k2 = hash_get_block_32(blocks, i*4 + 1);
- uint32_t k3 = hash_get_block_32(blocks, i*4 + 2);
- uint32_t k4 = hash_get_block_32(blocks, i*4 + 3);
-
- k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
-
- h1 = hash_rotl_32(h1, 19); h1 += h2;
- h1 = h1*5 + 0x561ccd1b;
-
- k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
-
- h2 = hash_rotl_32(h2, 17); h2 += h3;
- h2 = h2*5 + 0x0bcaa747;
-
- k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
-
- h3 = hash_rotl_32(h3, 15); h3 += h4;
- h3 = h3*5 + 0x96cd1c35;
-
- k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
-
- h4 = hash_rotl_32(h4, 13); h4 += h1;
- h4 = h4*5 + 0x32ac3b17;
- }
- }
-
- /* tail */
- {
- const uint8_t *tail = (const uint8_t *) (data + nblocks*16);
- uint32_t k1 = 0;
- uint32_t k2 = 0;
- uint32_t k3 = 0;
- uint32_t k4 = 0;
-
- switch (len & 15) {
- case 15: k4 ^= tail[14] << 16;
- case 14: k4 ^= tail[13] << 8;
- case 13: k4 ^= tail[12] << 0;
- k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
-
- case 12: k3 ^= tail[11] << 24;
- case 11: k3 ^= tail[10] << 16;
- case 10: k3 ^= tail[ 9] << 8;
- case 9: k3 ^= tail[ 8] << 0;
- k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
-
- case 8: k2 ^= tail[ 7] << 24;
- case 7: k2 ^= tail[ 6] << 16;
- case 6: k2 ^= tail[ 5] << 8;
- case 5: k2 ^= tail[ 4] << 0;
- k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
-
- case 4: k1 ^= tail[ 3] << 24;
- case 3: k1 ^= tail[ 2] << 16;
- case 2: k1 ^= tail[ 1] << 8;
- case 1: k1 ^= tail[ 0] << 0;
- k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
- }
- }
-
- /* finalization */
- h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len;
-
- h1 += h2; h1 += h3; h1 += h4;
- h2 += h1; h3 += h1; h4 += h1;
-
- h1 = hash_fmix_32(h1);
- h2 = hash_fmix_32(h2);
- h3 = hash_fmix_32(h3);
- h4 = hash_fmix_32(h4);
-
- h1 += h2; h1 += h3; h1 += h4;
- h2 += h1; h3 += h1; h4 += h1;
-
- r_out[0] = (((uint64_t) h2) << 32) | h1;
- r_out[1] = (((uint64_t) h4) << 32) | h3;
+ const uint8_t * data = (const uint8_t *) key;
+ const int nblocks = len / 16;
+
+ uint32_t h1 = seed;
+ uint32_t h2 = seed;
+ uint32_t h3 = seed;
+ uint32_t h4 = seed;
+
+ const uint32_t c1 = 0x239b961b;
+ const uint32_t c2 = 0xab0e9789;
+ const uint32_t c3 = 0x38b34ae5;
+ const uint32_t c4 = 0xa1e38b93;
+
+ /* body */
+ {
+ const uint32_t *blocks = (const uint32_t *) (data + nblocks*16);
+ int i;
+
+ for (i = -nblocks; i; i++) {
+ uint32_t k1 = hash_get_block_32(blocks, i*4 + 0);
+ uint32_t k2 = hash_get_block_32(blocks, i*4 + 1);
+ uint32_t k3 = hash_get_block_32(blocks, i*4 + 2);
+ uint32_t k4 = hash_get_block_32(blocks, i*4 + 3);
+
+ k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
+
+ h1 = hash_rotl_32(h1, 19); h1 += h2;
+ h1 = h1*5 + 0x561ccd1b;
+
+ k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
+
+ h2 = hash_rotl_32(h2, 17); h2 += h3;
+ h2 = h2*5 + 0x0bcaa747;
+
+ k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
+
+ h3 = hash_rotl_32(h3, 15); h3 += h4;
+ h3 = h3*5 + 0x96cd1c35;
+
+ k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
+
+ h4 = hash_rotl_32(h4, 13); h4 += h1;
+ h4 = h4*5 + 0x32ac3b17;
+ }
+ }
+
+ /* tail */
+ {
+ const uint8_t *tail = (const uint8_t *) (data + nblocks*16);
+ uint32_t k1 = 0;
+ uint32_t k2 = 0;
+ uint32_t k3 = 0;
+ uint32_t k4 = 0;
+
+ switch (len & 15) {
+ case 15: k4 ^= tail[14] << 16;
+ case 14: k4 ^= tail[13] << 8;
+ case 13: k4 ^= tail[12] << 0;
+ k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
+
+ case 12: k3 ^= tail[11] << 24;
+ case 11: k3 ^= tail[10] << 16;
+ case 10: k3 ^= tail[ 9] << 8;
+ case 9: k3 ^= tail[ 8] << 0;
+ k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
+
+ case 8: k2 ^= tail[ 7] << 24;
+ case 7: k2 ^= tail[ 6] << 16;
+ case 6: k2 ^= tail[ 5] << 8;
+ case 5: k2 ^= tail[ 4] << 0;
+ k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
+
+ case 4: k1 ^= tail[ 3] << 24;
+ case 3: k1 ^= tail[ 2] << 16;
+ case 2: k1 ^= tail[ 1] << 8;
+ case 1: k1 ^= tail[ 0] << 0;
+ k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
+ }
+ }
+
+ /* finalization */
+ h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len;
+
+ h1 += h2; h1 += h3; h1 += h4;
+ h2 += h1; h3 += h1; h4 += h1;
+
+ h1 = hash_fmix_32(h1);
+ h2 = hash_fmix_32(h2);
+ h3 = hash_fmix_32(h3);
+ h4 = hash_fmix_32(h4);
+
+ h1 += h2; h1 += h3; h1 += h4;
+ h2 += h1; h3 += h1; h4 += h1;
+
+ r_out[0] = (((uint64_t) h2) << 32) | h1;
+ r_out[1] = (((uint64_t) h4) << 32) | h3;
}
UNUSED JEMALLOC_INLINE void
hash_x64_128(const void *key, const int len, const uint32_t seed,
uint64_t r_out[2])
{
- const uint8_t *data = (const uint8_t *) key;
- const int nblocks = len / 16;
-
- uint64_t h1 = seed;
- uint64_t h2 = seed;
-
- const uint64_t c1 = QU(0x87c37b91114253d5LLU);
- const uint64_t c2 = QU(0x4cf5ad432745937fLLU);
-
- /* body */
- {
- const uint64_t *blocks = (const uint64_t *) (data);
- int i;
-
- for (i = 0; i < nblocks; i++) {
- uint64_t k1 = hash_get_block_64(blocks, i*2 + 0);
- uint64_t k2 = hash_get_block_64(blocks, i*2 + 1);
-
- k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
-
- h1 = hash_rotl_64(h1, 27); h1 += h2;
- h1 = h1*5 + 0x52dce729;
-
- k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
-
- h2 = hash_rotl_64(h2, 31); h2 += h1;
- h2 = h2*5 + 0x38495ab5;
- }
- }
-
- /* tail */
- {
- const uint8_t *tail = (const uint8_t*)(data + nblocks*16);
- uint64_t k1 = 0;
- uint64_t k2 = 0;
-
- switch (len & 15) {
- case 15: k2 ^= ((uint64_t)(tail[14])) << 48;
- case 14: k2 ^= ((uint64_t)(tail[13])) << 40;
- case 13: k2 ^= ((uint64_t)(tail[12])) << 32;
- case 12: k2 ^= ((uint64_t)(tail[11])) << 24;
- case 11: k2 ^= ((uint64_t)(tail[10])) << 16;
- case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8;
- case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0;
- k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
-
- case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56;
- case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48;
- case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40;
- case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32;
- case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24;
- case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16;
- case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8;
- case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0;
- k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
- }
- }
-
- /* finalization */
- h1 ^= len; h2 ^= len;
-
- h1 += h2;
- h2 += h1;
-
- h1 = hash_fmix_64(h1);
- h2 = hash_fmix_64(h2);
-
- h1 += h2;
- h2 += h1;
-
- r_out[0] = h1;
- r_out[1] = h2;
+ const uint8_t *data = (const uint8_t *) key;
+ const int nblocks = len / 16;
+
+ uint64_t h1 = seed;
+ uint64_t h2 = seed;
+
+ const uint64_t c1 = QU(0x87c37b91114253d5LLU);
+ const uint64_t c2 = QU(0x4cf5ad432745937fLLU);
+
+ /* body */
+ {
+ const uint64_t *blocks = (const uint64_t *) (data);
+ int i;
+
+ for (i = 0; i < nblocks; i++) {
+ uint64_t k1 = hash_get_block_64(blocks, i*2 + 0);
+ uint64_t k2 = hash_get_block_64(blocks, i*2 + 1);
+
+ k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
+
+ h1 = hash_rotl_64(h1, 27); h1 += h2;
+ h1 = h1*5 + 0x52dce729;
+
+ k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
+
+ h2 = hash_rotl_64(h2, 31); h2 += h1;
+ h2 = h2*5 + 0x38495ab5;
+ }
+ }
+
+ /* tail */
+ {
+ const uint8_t *tail = (const uint8_t*)(data + nblocks*16);
+ uint64_t k1 = 0;
+ uint64_t k2 = 0;
+
+ switch (len & 15) {
+ case 15: k2 ^= ((uint64_t)(tail[14])) << 48;
+ case 14: k2 ^= ((uint64_t)(tail[13])) << 40;
+ case 13: k2 ^= ((uint64_t)(tail[12])) << 32;
+ case 12: k2 ^= ((uint64_t)(tail[11])) << 24;
+ case 11: k2 ^= ((uint64_t)(tail[10])) << 16;
+ case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8;
+ case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0;
+ k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
+
+ case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56;
+ case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48;
+ case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40;
+ case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32;
+ case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24;
+ case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16;
+ case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8;
+ case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0;
+ k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
+ }
+ }
+
+ /* finalization */
+ h1 ^= len; h2 ^= len;
+
+ h1 += h2;
+ h2 += h1;
+
+ h1 = hash_fmix_64(h1);
+ h2 = hash_fmix_64(h2);
+
+ h1 += h2;
+ h2 += h1;
+
+ r_out[0] = h1;
+ r_out[1] = h2;
}
@@ -317,12 +317,12 @@ JEMALLOC_INLINE void
hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2])
{
#if (LG_SIZEOF_PTR == 3)
- hash_x64_128(key, len, seed, (uint64_t *)r_hash);
+ hash_x64_128(key, len, seed, (uint64_t *)r_hash);
#else
- uint64_t hashes[2];
- hash_x86_128(key, len, seed, hashes);
- r_hash[0] = (size_t)hashes[0];
- r_hash[1] = (size_t)hashes[1];
+ uint64_t hashes[2];
+ hash_x86_128(key, len, seed, hashes);
+ r_hash[0] = (size_t)hashes[0];
+ r_hash[1] = (size_t)hashes[1];
#endif
}
#endif
diff --git a/src/rt/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in b/src/rt/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in
index 50d84cabf6954..e46ac5440f290 100644
--- a/src/rt/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in
+++ b/src/rt/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in
@@ -278,6 +278,9 @@ static const bool config_ivsalloc =
# ifdef __arm__
# define LG_QUANTUM 3
# endif
+# ifdef __aarch64__
+# define LG_QUANTUM 4
+# endif
# ifdef __hppa__
# define LG_QUANTUM 4
# endif
diff --git a/src/rt/jemalloc/include/jemalloc/internal/mb.h b/src/rt/jemalloc/include/jemalloc/internal/mb.h
index c60413e7e68f4..3cfa7872942b5 100644
--- a/src/rt/jemalloc/include/jemalloc/internal/mb.h
+++ b/src/rt/jemalloc/include/jemalloc/internal/mb.h
@@ -33,25 +33,25 @@ mb_write(void)
{
# if 0
- /* This is a true memory barrier. */
- asm volatile ("pusha;"
- "xor %%eax,%%eax;"
- "cpuid;"
- "popa;"
- : /* Outputs. */
- : /* Inputs. */
- : "memory" /* Clobbers. */
- );
+ /* This is a true memory barrier. */
+ asm volatile ("pusha;"
+ "xor %%eax,%%eax;"
+ "cpuid;"
+ "popa;"
+ : /* Outputs. */
+ : /* Inputs. */
+ : "memory" /* Clobbers. */
+ );
#else
- /*
- * This is hopefully enough to keep the compiler from reordering
- * instructions around this one.
- */
- asm volatile ("nop;"
- : /* Outputs. */
- : /* Inputs. */
- : "memory" /* Clobbers. */
- );
+ /*
+ * This is hopefully enough to keep the compiler from reordering
+ * instructions around this one.
+ */
+ asm volatile ("nop;"
+ : /* Outputs. */
+ : /* Inputs. */
+ : "memory" /* Clobbers. */
+ );
#endif
}
#elif (defined(__amd64__) || defined(__x86_64__))
@@ -59,40 +59,40 @@ JEMALLOC_INLINE void
mb_write(void)
{
- asm volatile ("sfence"
- : /* Outputs. */
- : /* Inputs. */
- : "memory" /* Clobbers. */
- );
+ asm volatile ("sfence"
+ : /* Outputs. */
+ : /* Inputs. */
+ : "memory" /* Clobbers. */
+ );
}
#elif defined(__powerpc__)
JEMALLOC_INLINE void
mb_write(void)
{
- asm volatile ("eieio"
- : /* Outputs. */
- : /* Inputs. */
- : "memory" /* Clobbers. */
- );
+ asm volatile ("eieio"
+ : /* Outputs. */
+ : /* Inputs. */
+ : "memory" /* Clobbers. */
+ );
}
#elif defined(__sparc64__)
JEMALLOC_INLINE void
mb_write(void)
{
- asm volatile ("membar #StoreStore"
- : /* Outputs. */
- : /* Inputs. */
- : "memory" /* Clobbers. */
- );
+ asm volatile ("membar #StoreStore"
+ : /* Outputs. */
+ : /* Inputs. */
+ : "memory" /* Clobbers. */
+ );
}
#elif defined(__tile__)
JEMALLOC_INLINE void
mb_write(void)
{
- __sync_synchronize();
+ __sync_synchronize();
}
#else
/*
@@ -102,11 +102,11 @@ mb_write(void)
JEMALLOC_INLINE void
mb_write(void)
{
- malloc_mutex_t mtx;
+ malloc_mutex_t mtx;
- malloc_mutex_init(&mtx);
- malloc_mutex_lock(&mtx);
- malloc_mutex_unlock(&mtx);
+ malloc_mutex_init(&mtx);
+ malloc_mutex_lock(&mtx);
+ malloc_mutex_unlock(&mtx);
}
#endif
#endif
diff --git a/src/rt/jemalloc/include/jemalloc/internal/mutex.h b/src/rt/jemalloc/include/jemalloc/internal/mutex.h
index 6a40432cd3cd1..de44e1435ad31 100644
--- a/src/rt/jemalloc/include/jemalloc/internal/mutex.h
+++ b/src/rt/jemalloc/include/jemalloc/internal/mutex.h
@@ -26,14 +26,14 @@ typedef struct malloc_mutex_s malloc_mutex_t;
struct malloc_mutex_s {
#ifdef _WIN32
- CRITICAL_SECTION lock;
+ CRITICAL_SECTION lock;
#elif (defined(JEMALLOC_OSSPIN))
- OSSpinLock lock;
+ OSSpinLock lock;
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
- pthread_mutex_t lock;
- malloc_mutex_t *postponed_next;
+ pthread_mutex_t lock;
+ malloc_mutex_t *postponed_next;
#else
- pthread_mutex_t lock;
+ pthread_mutex_t lock;
#endif
};
@@ -68,30 +68,30 @@ JEMALLOC_INLINE void
malloc_mutex_lock(malloc_mutex_t *mutex)
{
- if (isthreaded) {
+ if (isthreaded) {
#ifdef _WIN32
- EnterCriticalSection(&mutex->lock);
+ EnterCriticalSection(&mutex->lock);
#elif (defined(JEMALLOC_OSSPIN))
- OSSpinLockLock(&mutex->lock);
+ OSSpinLockLock(&mutex->lock);
#else
- pthread_mutex_lock(&mutex->lock);
+ pthread_mutex_lock(&mutex->lock);
#endif
- }
+ }
}
JEMALLOC_INLINE void
malloc_mutex_unlock(malloc_mutex_t *mutex)
{
- if (isthreaded) {
+ if (isthreaded) {
#ifdef _WIN32
- LeaveCriticalSection(&mutex->lock);
+ LeaveCriticalSection(&mutex->lock);
#elif (defined(JEMALLOC_OSSPIN))
- OSSpinLockUnlock(&mutex->lock);
+ OSSpinLockUnlock(&mutex->lock);
#else
- pthread_mutex_unlock(&mutex->lock);
+ pthread_mutex_unlock(&mutex->lock);
#endif
- }
+ }
}
#endif
diff --git a/src/rt/jemalloc/include/jemalloc/internal/prng.h b/src/rt/jemalloc/include/jemalloc/internal/prng.h
index 89fbfa9535f2c..83a5462b4dd0d 100644
--- a/src/rt/jemalloc/include/jemalloc/internal/prng.h
+++ b/src/rt/jemalloc/include/jemalloc/internal/prng.h
@@ -26,22 +26,22 @@
* const uint32_t a, c : See above discussion.
*/
#define prng32(r, lg_range, state, a, c) do { \
- assert(lg_range > 0); \
- assert(lg_range <= 32); \
- \
- r = (state * (a)) + (c); \
- state = r; \
- r >>= (32 - lg_range); \
+ assert(lg_range > 0); \
+ assert(lg_range <= 32); \
+ \
+ r = (state * (a)) + (c); \
+ state = r; \
+ r >>= (32 - lg_range); \
} while (false)
/* Same as prng32(), but 64 bits of pseudo-randomness, using uint64_t. */
#define prng64(r, lg_range, state, a, c) do { \
- assert(lg_range > 0); \
- assert(lg_range <= 64); \
- \
- r = (state * (a)) + (c); \
- state = r; \
- r >>= (64 - lg_range); \
+ assert(lg_range > 0); \
+ assert(lg_range <= 64); \
+ \
+ r = (state * (a)) + (c); \
+ state = r; \
+ r >>= (64 - lg_range); \
} while (false)
#endif /* JEMALLOC_H_TYPES */
diff --git a/src/rt/jemalloc/include/jemalloc/internal/prof.h b/src/rt/jemalloc/include/jemalloc/internal/prof.h
index 2c4576f97e343..119a5b1bcb7bf 100644
--- a/src/rt/jemalloc/include/jemalloc/internal/prof.h
+++ b/src/rt/jemalloc/include/jemalloc/internal/prof.h
@@ -50,131 +50,131 @@ typedef struct prof_tdata_s prof_tdata_t;
#ifdef JEMALLOC_H_STRUCTS
struct prof_bt_s {
- /* Backtrace, stored as len program counters. */
- void **vec;
- unsigned len;
+ /* Backtrace, stored as len program counters. */
+ void **vec;
+ unsigned len;
};
#ifdef JEMALLOC_PROF_LIBGCC
/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */
typedef struct {
- prof_bt_t *bt;
- unsigned nignore;
- unsigned max;
+ prof_bt_t *bt;
+ unsigned nignore;
+ unsigned max;
} prof_unwind_data_t;
#endif
struct prof_cnt_s {
- /*
- * Profiling counters. An allocation/deallocation pair can operate on
- * different prof_thr_cnt_t objects that are linked into the same
- * prof_ctx_t cnts_ql, so it is possible for the cur* counters to go
- * negative. In principle it is possible for the *bytes counters to
- * overflow/underflow, but a general solution would require something
- * like 128-bit counters; this implementation doesn't bother to solve
- * that problem.
- */
- int64_t curobjs;
- int64_t curbytes;
- uint64_t accumobjs;
- uint64_t accumbytes;
+ /*
+ * Profiling counters. An allocation/deallocation pair can operate on
+ * different prof_thr_cnt_t objects that are linked into the same
+ * prof_ctx_t cnts_ql, so it is possible for the cur* counters to go
+ * negative. In principle it is possible for the *bytes counters to
+ * overflow/underflow, but a general solution would require something
+ * like 128-bit counters; this implementation doesn't bother to solve
+ * that problem.
+ */
+ int64_t curobjs;
+ int64_t curbytes;
+ uint64_t accumobjs;
+ uint64_t accumbytes;
};
struct prof_thr_cnt_s {
- /* Linkage into prof_ctx_t's cnts_ql. */
- ql_elm(prof_thr_cnt_t) cnts_link;
-
- /* Linkage into thread's LRU. */
- ql_elm(prof_thr_cnt_t) lru_link;
-
- /*
- * Associated context. If a thread frees an object that it did not
- * allocate, it is possible that the context is not cached in the
- * thread's hash table, in which case it must be able to look up the
- * context, insert a new prof_thr_cnt_t into the thread's hash table,
- * and link it into the prof_ctx_t's cnts_ql.
- */
- prof_ctx_t *ctx;
-
- /*
- * Threads use memory barriers to update the counters. Since there is
- * only ever one writer, the only challenge is for the reader to get a
- * consistent read of the counters.
- *
- * The writer uses this series of operations:
- *
- * 1) Increment epoch to an odd number.
- * 2) Update counters.
- * 3) Increment epoch to an even number.
- *
- * The reader must assure 1) that the epoch is even while it reads the
- * counters, and 2) that the epoch doesn't change between the time it
- * starts and finishes reading the counters.
- */
- unsigned epoch;
-
- /* Profiling counters. */
- prof_cnt_t cnts;
+ /* Linkage into prof_ctx_t's cnts_ql. */
+ ql_elm(prof_thr_cnt_t) cnts_link;
+
+ /* Linkage into thread's LRU. */
+ ql_elm(prof_thr_cnt_t) lru_link;
+
+ /*
+ * Associated context. If a thread frees an object that it did not
+ * allocate, it is possible that the context is not cached in the
+ * thread's hash table, in which case it must be able to look up the
+ * context, insert a new prof_thr_cnt_t into the thread's hash table,
+ * and link it into the prof_ctx_t's cnts_ql.
+ */
+ prof_ctx_t *ctx;
+
+ /*
+ * Threads use memory barriers to update the counters. Since there is
+ * only ever one writer, the only challenge is for the reader to get a
+ * consistent read of the counters.
+ *
+ * The writer uses this series of operations:
+ *
+ * 1) Increment epoch to an odd number.
+ * 2) Update counters.
+ * 3) Increment epoch to an even number.
+ *
+ * The reader must assure 1) that the epoch is even while it reads the
+ * counters, and 2) that the epoch doesn't change between the time it
+ * starts and finishes reading the counters.
+ */
+ unsigned epoch;
+
+ /* Profiling counters. */
+ prof_cnt_t cnts;
};
struct prof_ctx_s {
- /* Associated backtrace. */
- prof_bt_t *bt;
-
- /* Protects nlimbo, cnt_merged, and cnts_ql. */
- malloc_mutex_t *lock;
-
- /*
- * Number of threads that currently cause this ctx to be in a state of
- * limbo due to one of:
- * - Initializing per thread counters associated with this ctx.
- * - Preparing to destroy this ctx.
- * nlimbo must be 1 (single destroyer) in order to safely destroy the
- * ctx.
- */
- unsigned nlimbo;
-
- /* Temporary storage for summation during dump. */
- prof_cnt_t cnt_summed;
-
- /* When threads exit, they merge their stats into cnt_merged. */
- prof_cnt_t cnt_merged;
-
- /*
- * List of profile counters, one for each thread that has allocated in
- * this context.
- */
- ql_head(prof_thr_cnt_t) cnts_ql;
+ /* Associated backtrace. */
+ prof_bt_t *bt;
+
+ /* Protects nlimbo, cnt_merged, and cnts_ql. */
+ malloc_mutex_t *lock;
+
+ /*
+ * Number of threads that currently cause this ctx to be in a state of
+ * limbo due to one of:
+ * - Initializing per thread counters associated with this ctx.
+ * - Preparing to destroy this ctx.
+ * nlimbo must be 1 (single destroyer) in order to safely destroy the
+ * ctx.
+ */
+ unsigned nlimbo;
+
+ /* Temporary storage for summation during dump. */
+ prof_cnt_t cnt_summed;
+
+ /* When threads exit, they merge their stats into cnt_merged. */
+ prof_cnt_t cnt_merged;
+
+ /*
+ * List of profile counters, one for each thread that has allocated in
+ * this context.
+ */
+ ql_head(prof_thr_cnt_t) cnts_ql;
};
struct prof_tdata_s {
- /*
- * Hash of (prof_bt_t *)-->(prof_thr_cnt_t *). Each thread keeps a
- * cache of backtraces, with associated thread-specific prof_thr_cnt_t
- * objects. Other threads may read the prof_thr_cnt_t contents, but no
- * others will ever write them.
- *
- * Upon thread exit, the thread must merge all the prof_thr_cnt_t
- * counter data into the associated prof_ctx_t objects, and unlink/free
- * the prof_thr_cnt_t objects.
- */
- ckh_t bt2cnt;
-
- /* LRU for contents of bt2cnt. */
- ql_head(prof_thr_cnt_t) lru_ql;
-
- /* Backtrace vector, used for calls to prof_backtrace(). */
- void **vec;
-
- /* Sampling state. */
- uint64_t prng_state;
- uint64_t threshold;
- uint64_t accum;
-
- /* State used to avoid dumping while operating on prof internals. */
- bool enq;
- bool enq_idump;
- bool enq_gdump;
+ /*
+ * Hash of (prof_bt_t *)-->(prof_thr_cnt_t *). Each thread keeps a
+ * cache of backtraces, with associated thread-specific prof_thr_cnt_t
+ * objects. Other threads may read the prof_thr_cnt_t contents, but no
+ * others will ever write them.
+ *
+ * Upon thread exit, the thread must merge all the prof_thr_cnt_t
+ * counter data into the associated prof_ctx_t objects, and unlink/free
+ * the prof_thr_cnt_t objects.
+ */
+ ckh_t bt2cnt;
+
+ /* LRU for contents of bt2cnt. */
+ ql_head(prof_thr_cnt_t) lru_ql;
+
+ /* Backtrace vector, used for calls to prof_backtrace(). */
+ void **vec;
+
+ /* Sampling state. */
+ uint64_t prng_state;
+ uint64_t threshold;
+ uint64_t accum;
+
+ /* State used to avoid dumping while operating on prof internals. */
+ bool enq;
+ bool enq_idump;
+ bool enq_gdump;
};
#endif /* JEMALLOC_H_STRUCTS */
@@ -232,55 +232,55 @@ void prof_postfork_child(void);
#ifdef JEMALLOC_H_INLINES
#define PROF_ALLOC_PREP(nignore, size, ret) do { \
- prof_tdata_t *prof_tdata; \
- prof_bt_t bt; \
- \
- assert(size == s2u(size)); \
- \
- prof_tdata = prof_tdata_get(true); \
- if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) { \
- if (prof_tdata != NULL) \
- ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
- else \
- ret = NULL; \
- break; \
- } \
- \
- if (opt_prof_active == false) { \
- /* Sampling is currently inactive, so avoid sampling. */\
- ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
- } else if (opt_lg_prof_sample == 0) { \
- /* Don't bother with sampling logic, since sampling */\
- /* interval is 1. */\
- bt_init(&bt, prof_tdata->vec); \
- prof_backtrace(&bt, nignore); \
- ret = prof_lookup(&bt); \
- } else { \
- if (prof_tdata->threshold == 0) { \
- /* Initialize. Seed the prng differently for */\
- /* each thread. */\
- prof_tdata->prng_state = \
- (uint64_t)(uintptr_t)&size; \
- prof_sample_threshold_update(prof_tdata); \
- } \
- \
- /* Determine whether to capture a backtrace based on */\
- /* whether size is enough for prof_accum to reach */\
- /* prof_tdata->threshold. However, delay updating */\
- /* these variables until prof_{m,re}alloc(), because */\
- /* we don't know for sure that the allocation will */\
- /* succeed. */\
- /* */\
- /* Use subtraction rather than addition to avoid */\
- /* potential integer overflow. */\
- if (size >= prof_tdata->threshold - \
- prof_tdata->accum) { \
- bt_init(&bt, prof_tdata->vec); \
- prof_backtrace(&bt, nignore); \
- ret = prof_lookup(&bt); \
- } else \
- ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
- } \
+ prof_tdata_t *prof_tdata; \
+ prof_bt_t bt; \
+ \
+ assert(size == s2u(size)); \
+ \
+ prof_tdata = prof_tdata_get(true); \
+ if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) { \
+ if (prof_tdata != NULL) \
+ ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
+ else \
+ ret = NULL; \
+ break; \
+ } \
+ \
+ if (opt_prof_active == false) { \
+ /* Sampling is currently inactive, so avoid sampling. */\
+ ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
+ } else if (opt_lg_prof_sample == 0) { \
+ /* Don't bother with sampling logic, since sampling */\
+ /* interval is 1. */\
+ bt_init(&bt, prof_tdata->vec); \
+ prof_backtrace(&bt, nignore); \
+ ret = prof_lookup(&bt); \
+ } else { \
+ if (prof_tdata->threshold == 0) { \
+ /* Initialize. Seed the prng differently for */\
+ /* each thread. */\
+ prof_tdata->prng_state = \
+ (uint64_t)(uintptr_t)&size; \
+ prof_sample_threshold_update(prof_tdata); \
+ } \
+ \
+ /* Determine whether to capture a backtrace based on */\
+ /* whether size is enough for prof_accum to reach */\
+ /* prof_tdata->threshold. However, delay updating */\
+ /* these variables until prof_{m,re}alloc(), because */\
+ /* we don't know for sure that the allocation will */\
+ /* succeed. */\
+ /* */\
+ /* Use subtraction rather than addition to avoid */\
+ /* potential integer overflow. */\
+ if (size >= prof_tdata->threshold - \
+ prof_tdata->accum) { \
+ bt_init(&bt, prof_tdata->vec); \
+ prof_backtrace(&bt, nignore); \
+ ret = prof_lookup(&bt); \
+ } else \
+ ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
+ } \
} while (0)
#ifndef JEMALLOC_ENABLE_INLINE
@@ -306,272 +306,272 @@ malloc_tsd_funcs(JEMALLOC_INLINE, prof_tdata, prof_tdata_t *, NULL,
JEMALLOC_INLINE prof_tdata_t *
prof_tdata_get(bool create)
{
- prof_tdata_t *prof_tdata;
+ prof_tdata_t *prof_tdata;
- cassert(config_prof);
+ cassert(config_prof);
- prof_tdata = *prof_tdata_tsd_get();
- if (create && prof_tdata == NULL)
- prof_tdata = prof_tdata_init();
+ prof_tdata = *prof_tdata_tsd_get();
+ if (create && prof_tdata == NULL)
+ prof_tdata = prof_tdata_init();
- return (prof_tdata);
+ return (prof_tdata);
}
JEMALLOC_INLINE void
prof_sample_threshold_update(prof_tdata_t *prof_tdata)
{
- uint64_t r;
- double u;
-
- cassert(config_prof);
-
- /*
- * Compute sample threshold as a geometrically distributed random
- * variable with mean (2^opt_lg_prof_sample).
- *
- * __ __
- * | log(u) | 1
- * prof_tdata->threshold = | -------- |, where p = -------------------
- * | log(1-p) | opt_lg_prof_sample
- * 2
- *
- * For more information on the math, see:
- *
- * Non-Uniform Random Variate Generation
- * Luc Devroye
- * Springer-Verlag, New York, 1986
- * pp 500
- * (http://cg.scs.carleton.ca/~luc/rnbookindex.html)
- */
- prng64(r, 53, prof_tdata->prng_state,
- UINT64_C(6364136223846793005), UINT64_C(1442695040888963407));
- u = (double)r * (1.0/9007199254740992.0L);
- prof_tdata->threshold = (uint64_t)(log(u) /
- log(1.0 - (1.0 / (double)((uint64_t)1U << opt_lg_prof_sample))))
- + (uint64_t)1U;
+ uint64_t r;
+ double u;
+
+ cassert(config_prof);
+
+ /*
+ * Compute sample threshold as a geometrically distributed random
+ * variable with mean (2^opt_lg_prof_sample).
+ *
+ * __ __
+ * | log(u) | 1
+ * prof_tdata->threshold = | -------- |, where p = -------------------
+ * | log(1-p) | opt_lg_prof_sample
+ * 2
+ *
+ * For more information on the math, see:
+ *
+ * Non-Uniform Random Variate Generation
+ * Luc Devroye
+ * Springer-Verlag, New York, 1986
+ * pp 500
+ * (http://cg.scs.carleton.ca/~luc/rnbookindex.html)
+ */
+ prng64(r, 53, prof_tdata->prng_state,
+ UINT64_C(6364136223846793005), UINT64_C(1442695040888963407));
+ u = (double)r * (1.0/9007199254740992.0L);
+ prof_tdata->threshold = (uint64_t)(log(u) /
+ log(1.0 - (1.0 / (double)((uint64_t)1U << opt_lg_prof_sample))))
+ + (uint64_t)1U;
}
JEMALLOC_INLINE prof_ctx_t *
prof_ctx_get(const void *ptr)
{
- prof_ctx_t *ret;
- arena_chunk_t *chunk;
+ prof_ctx_t *ret;
+ arena_chunk_t *chunk;
- cassert(config_prof);
- assert(ptr != NULL);
+ cassert(config_prof);
+ assert(ptr != NULL);
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- if (chunk != ptr) {
- /* Region. */
- ret = arena_prof_ctx_get(ptr);
- } else
- ret = huge_prof_ctx_get(ptr);
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ if (chunk != ptr) {
+ /* Region. */
+ ret = arena_prof_ctx_get(ptr);
+ } else
+ ret = huge_prof_ctx_get(ptr);
- return (ret);
+ return (ret);
}
JEMALLOC_INLINE void
prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
{
- arena_chunk_t *chunk;
+ arena_chunk_t *chunk;
- cassert(config_prof);
- assert(ptr != NULL);
+ cassert(config_prof);
+ assert(ptr != NULL);
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- if (chunk != ptr) {
- /* Region. */
- arena_prof_ctx_set(ptr, ctx);
- } else
- huge_prof_ctx_set(ptr, ctx);
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ if (chunk != ptr) {
+ /* Region. */
+ arena_prof_ctx_set(ptr, ctx);
+ } else
+ huge_prof_ctx_set(ptr, ctx);
}
JEMALLOC_INLINE bool
prof_sample_accum_update(size_t size)
{
- prof_tdata_t *prof_tdata;
-
- cassert(config_prof);
- /* Sampling logic is unnecessary if the interval is 1. */
- assert(opt_lg_prof_sample != 0);
-
- prof_tdata = prof_tdata_get(false);
- if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
- return (true);
-
- /* Take care to avoid integer overflow. */
- if (size >= prof_tdata->threshold - prof_tdata->accum) {
- prof_tdata->accum -= (prof_tdata->threshold - size);
- /* Compute new sample threshold. */
- prof_sample_threshold_update(prof_tdata);
- while (prof_tdata->accum >= prof_tdata->threshold) {
- prof_tdata->accum -= prof_tdata->threshold;
- prof_sample_threshold_update(prof_tdata);
- }
- return (false);
- } else {
- prof_tdata->accum += size;
- return (true);
- }
+ prof_tdata_t *prof_tdata;
+
+ cassert(config_prof);
+ /* Sampling logic is unnecessary if the interval is 1. */
+ assert(opt_lg_prof_sample != 0);
+
+ prof_tdata = prof_tdata_get(false);
+ if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
+ return (true);
+
+ /* Take care to avoid integer overflow. */
+ if (size >= prof_tdata->threshold - prof_tdata->accum) {
+ prof_tdata->accum -= (prof_tdata->threshold - size);
+ /* Compute new sample threshold. */
+ prof_sample_threshold_update(prof_tdata);
+ while (prof_tdata->accum >= prof_tdata->threshold) {
+ prof_tdata->accum -= prof_tdata->threshold;
+ prof_sample_threshold_update(prof_tdata);
+ }
+ return (false);
+ } else {
+ prof_tdata->accum += size;
+ return (true);
+ }
}
JEMALLOC_INLINE void
prof_malloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt)
{
- cassert(config_prof);
- assert(ptr != NULL);
- assert(size == isalloc(ptr, true));
-
- if (opt_lg_prof_sample != 0) {
- if (prof_sample_accum_update(size)) {
- /*
- * Don't sample. For malloc()-like allocation, it is
- * always possible to tell in advance how large an
- * object's usable size will be, so there should never
- * be a difference between the size passed to
- * PROF_ALLOC_PREP() and prof_malloc().
- */
- assert((uintptr_t)cnt == (uintptr_t)1U);
- }
- }
-
- if ((uintptr_t)cnt > (uintptr_t)1U) {
- prof_ctx_set(ptr, cnt->ctx);
-
- cnt->epoch++;
- /*********/
- mb_write();
- /*********/
- cnt->cnts.curobjs++;
- cnt->cnts.curbytes += size;
- if (opt_prof_accum) {
- cnt->cnts.accumobjs++;
- cnt->cnts.accumbytes += size;
- }
- /*********/
- mb_write();
- /*********/
- cnt->epoch++;
- /*********/
- mb_write();
- /*********/
- } else
- prof_ctx_set(ptr, (prof_ctx_t *)(uintptr_t)1U);
+ cassert(config_prof);
+ assert(ptr != NULL);
+ assert(size == isalloc(ptr, true));
+
+ if (opt_lg_prof_sample != 0) {
+ if (prof_sample_accum_update(size)) {
+ /*
+ * Don't sample. For malloc()-like allocation, it is
+ * always possible to tell in advance how large an
+ * object's usable size will be, so there should never
+ * be a difference between the size passed to
+ * PROF_ALLOC_PREP() and prof_malloc().
+ */
+ assert((uintptr_t)cnt == (uintptr_t)1U);
+ }
+ }
+
+ if ((uintptr_t)cnt > (uintptr_t)1U) {
+ prof_ctx_set(ptr, cnt->ctx);
+
+ cnt->epoch++;
+ /*********/
+ mb_write();
+ /*********/
+ cnt->cnts.curobjs++;
+ cnt->cnts.curbytes += size;
+ if (opt_prof_accum) {
+ cnt->cnts.accumobjs++;
+ cnt->cnts.accumbytes += size;
+ }
+ /*********/
+ mb_write();
+ /*********/
+ cnt->epoch++;
+ /*********/
+ mb_write();
+ /*********/
+ } else
+ prof_ctx_set(ptr, (prof_ctx_t *)(uintptr_t)1U);
}
JEMALLOC_INLINE void
prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt,
size_t old_size, prof_ctx_t *old_ctx)
{
- prof_thr_cnt_t *told_cnt;
-
- cassert(config_prof);
- assert(ptr != NULL || (uintptr_t)cnt <= (uintptr_t)1U);
-
- if (ptr != NULL) {
- assert(size == isalloc(ptr, true));
- if (opt_lg_prof_sample != 0) {
- if (prof_sample_accum_update(size)) {
- /*
- * Don't sample. The size passed to
- * PROF_ALLOC_PREP() was larger than what
- * actually got allocated, so a backtrace was
- * captured for this allocation, even though
- * its actual size was insufficient to cross
- * the sample threshold.
- */
- cnt = (prof_thr_cnt_t *)(uintptr_t)1U;
- }
- }
- }
-
- if ((uintptr_t)old_ctx > (uintptr_t)1U) {
- told_cnt = prof_lookup(old_ctx->bt);
- if (told_cnt == NULL) {
- /*
- * It's too late to propagate OOM for this realloc(),
- * so operate directly on old_cnt->ctx->cnt_merged.
- */
- malloc_mutex_lock(old_ctx->lock);
- old_ctx->cnt_merged.curobjs--;
- old_ctx->cnt_merged.curbytes -= old_size;
- malloc_mutex_unlock(old_ctx->lock);
- told_cnt = (prof_thr_cnt_t *)(uintptr_t)1U;
- }
- } else
- told_cnt = (prof_thr_cnt_t *)(uintptr_t)1U;
-
- if ((uintptr_t)told_cnt > (uintptr_t)1U)
- told_cnt->epoch++;
- if ((uintptr_t)cnt > (uintptr_t)1U) {
- prof_ctx_set(ptr, cnt->ctx);
- cnt->epoch++;
- } else if (ptr != NULL)
- prof_ctx_set(ptr, (prof_ctx_t *)(uintptr_t)1U);
- /*********/
- mb_write();
- /*********/
- if ((uintptr_t)told_cnt > (uintptr_t)1U) {
- told_cnt->cnts.curobjs--;
- told_cnt->cnts.curbytes -= old_size;
- }
- if ((uintptr_t)cnt > (uintptr_t)1U) {
- cnt->cnts.curobjs++;
- cnt->cnts.curbytes += size;
- if (opt_prof_accum) {
- cnt->cnts.accumobjs++;
- cnt->cnts.accumbytes += size;
- }
- }
- /*********/
- mb_write();
- /*********/
- if ((uintptr_t)told_cnt > (uintptr_t)1U)
- told_cnt->epoch++;
- if ((uintptr_t)cnt > (uintptr_t)1U)
- cnt->epoch++;
- /*********/
- mb_write(); /* Not strictly necessary. */
+ prof_thr_cnt_t *told_cnt;
+
+ cassert(config_prof);
+ assert(ptr != NULL || (uintptr_t)cnt <= (uintptr_t)1U);
+
+ if (ptr != NULL) {
+ assert(size == isalloc(ptr, true));
+ if (opt_lg_prof_sample != 0) {
+ if (prof_sample_accum_update(size)) {
+ /*
+ * Don't sample. The size passed to
+ * PROF_ALLOC_PREP() was larger than what
+ * actually got allocated, so a backtrace was
+ * captured for this allocation, even though
+ * its actual size was insufficient to cross
+ * the sample threshold.
+ */
+ cnt = (prof_thr_cnt_t *)(uintptr_t)1U;
+ }
+ }
+ }
+
+ if ((uintptr_t)old_ctx > (uintptr_t)1U) {
+ told_cnt = prof_lookup(old_ctx->bt);
+ if (told_cnt == NULL) {
+ /*
+ * It's too late to propagate OOM for this realloc(),
+ * so operate directly on old_cnt->ctx->cnt_merged.
+ */
+ malloc_mutex_lock(old_ctx->lock);
+ old_ctx->cnt_merged.curobjs--;
+ old_ctx->cnt_merged.curbytes -= old_size;
+ malloc_mutex_unlock(old_ctx->lock);
+ told_cnt = (prof_thr_cnt_t *)(uintptr_t)1U;
+ }
+ } else
+ told_cnt = (prof_thr_cnt_t *)(uintptr_t)1U;
+
+ if ((uintptr_t)told_cnt > (uintptr_t)1U)
+ told_cnt->epoch++;
+ if ((uintptr_t)cnt > (uintptr_t)1U) {
+ prof_ctx_set(ptr, cnt->ctx);
+ cnt->epoch++;
+ } else if (ptr != NULL)
+ prof_ctx_set(ptr, (prof_ctx_t *)(uintptr_t)1U);
+ /*********/
+ mb_write();
+ /*********/
+ if ((uintptr_t)told_cnt > (uintptr_t)1U) {
+ told_cnt->cnts.curobjs--;
+ told_cnt->cnts.curbytes -= old_size;
+ }
+ if ((uintptr_t)cnt > (uintptr_t)1U) {
+ cnt->cnts.curobjs++;
+ cnt->cnts.curbytes += size;
+ if (opt_prof_accum) {
+ cnt->cnts.accumobjs++;
+ cnt->cnts.accumbytes += size;
+ }
+ }
+ /*********/
+ mb_write();
+ /*********/
+ if ((uintptr_t)told_cnt > (uintptr_t)1U)
+ told_cnt->epoch++;
+ if ((uintptr_t)cnt > (uintptr_t)1U)
+ cnt->epoch++;
+ /*********/
+ mb_write(); /* Not strictly necessary. */
}
JEMALLOC_INLINE void
prof_free(const void *ptr, size_t size)
{
- prof_ctx_t *ctx = prof_ctx_get(ptr);
-
- cassert(config_prof);
-
- if ((uintptr_t)ctx > (uintptr_t)1) {
- prof_thr_cnt_t *tcnt;
- assert(size == isalloc(ptr, true));
- tcnt = prof_lookup(ctx->bt);
-
- if (tcnt != NULL) {
- tcnt->epoch++;
- /*********/
- mb_write();
- /*********/
- tcnt->cnts.curobjs--;
- tcnt->cnts.curbytes -= size;
- /*********/
- mb_write();
- /*********/
- tcnt->epoch++;
- /*********/
- mb_write();
- /*********/
- } else {
- /*
- * OOM during free() cannot be propagated, so operate
- * directly on cnt->ctx->cnt_merged.
- */
- malloc_mutex_lock(ctx->lock);
- ctx->cnt_merged.curobjs--;
- ctx->cnt_merged.curbytes -= size;
- malloc_mutex_unlock(ctx->lock);
- }
- }
+ prof_ctx_t *ctx = prof_ctx_get(ptr);
+
+ cassert(config_prof);
+
+ if ((uintptr_t)ctx > (uintptr_t)1) {
+ prof_thr_cnt_t *tcnt;
+ assert(size == isalloc(ptr, true));
+ tcnt = prof_lookup(ctx->bt);
+
+ if (tcnt != NULL) {
+ tcnt->epoch++;
+ /*********/
+ mb_write();
+ /*********/
+ tcnt->cnts.curobjs--;
+ tcnt->cnts.curbytes -= size;
+ /*********/
+ mb_write();
+ /*********/
+ tcnt->epoch++;
+ /*********/
+ mb_write();
+ /*********/
+ } else {
+ /*
+ * OOM during free() cannot be propagated, so operate
+ * directly on cnt->ctx->cnt_merged.
+ */
+ malloc_mutex_lock(ctx->lock);
+ ctx->cnt_merged.curobjs--;
+ ctx->cnt_merged.curbytes -= size;
+ malloc_mutex_unlock(ctx->lock);
+ }
+ }
}
#endif
diff --git a/src/rt/jemalloc/include/jemalloc/internal/ql.h b/src/rt/jemalloc/include/jemalloc/internal/ql.h
index 60613379ec16b..a9ed2393f0c2e 100644
--- a/src/rt/jemalloc/include/jemalloc/internal/ql.h
+++ b/src/rt/jemalloc/include/jemalloc/internal/ql.h
@@ -3,7 +3,7 @@
*/
#define ql_head(a_type) \
struct { \
- a_type *qlh_first; \
+ a_type *qlh_first; \
}
#define ql_head_initializer(a_head) {NULL}
@@ -12,7 +12,7 @@ struct { \
/* List functions. */
#define ql_new(a_head) do { \
- (a_head)->qlh_first = NULL; \
+ (a_head)->qlh_first = NULL; \
} while (0)
#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
@@ -20,64 +20,64 @@ struct { \
#define ql_first(a_head) ((a_head)->qlh_first)
#define ql_last(a_head, a_field) \
- ((ql_first(a_head) != NULL) \
- ? qr_prev(ql_first(a_head), a_field) : NULL)
+ ((ql_first(a_head) != NULL) \
+ ? qr_prev(ql_first(a_head), a_field) : NULL)
#define ql_next(a_head, a_elm, a_field) \
- ((ql_last(a_head, a_field) != (a_elm)) \
- ? qr_next((a_elm), a_field) : NULL)
+ ((ql_last(a_head, a_field) != (a_elm)) \
+ ? qr_next((a_elm), a_field) : NULL)
#define ql_prev(a_head, a_elm, a_field) \
- ((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \
- : NULL)
+ ((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \
+ : NULL)
#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \
- qr_before_insert((a_qlelm), (a_elm), a_field); \
- if (ql_first(a_head) == (a_qlelm)) { \
- ql_first(a_head) = (a_elm); \
- } \
+ qr_before_insert((a_qlelm), (a_elm), a_field); \
+ if (ql_first(a_head) == (a_qlelm)) { \
+ ql_first(a_head) = (a_elm); \
+ } \
} while (0)
#define ql_after_insert(a_qlelm, a_elm, a_field) \
- qr_after_insert((a_qlelm), (a_elm), a_field)
+ qr_after_insert((a_qlelm), (a_elm), a_field)
#define ql_head_insert(a_head, a_elm, a_field) do { \
- if (ql_first(a_head) != NULL) { \
- qr_before_insert(ql_first(a_head), (a_elm), a_field); \
- } \
- ql_first(a_head) = (a_elm); \
+ if (ql_first(a_head) != NULL) { \
+ qr_before_insert(ql_first(a_head), (a_elm), a_field); \
+ } \
+ ql_first(a_head) = (a_elm); \
} while (0)
#define ql_tail_insert(a_head, a_elm, a_field) do { \
- if (ql_first(a_head) != NULL) { \
- qr_before_insert(ql_first(a_head), (a_elm), a_field); \
- } \
- ql_first(a_head) = qr_next((a_elm), a_field); \
+ if (ql_first(a_head) != NULL) { \
+ qr_before_insert(ql_first(a_head), (a_elm), a_field); \
+ } \
+ ql_first(a_head) = qr_next((a_elm), a_field); \
} while (0)
#define ql_remove(a_head, a_elm, a_field) do { \
- if (ql_first(a_head) == (a_elm)) { \
- ql_first(a_head) = qr_next(ql_first(a_head), a_field); \
- } \
- if (ql_first(a_head) != (a_elm)) { \
- qr_remove((a_elm), a_field); \
- } else { \
- ql_first(a_head) = NULL; \
- } \
+ if (ql_first(a_head) == (a_elm)) { \
+ ql_first(a_head) = qr_next(ql_first(a_head), a_field); \
+ } \
+ if (ql_first(a_head) != (a_elm)) { \
+ qr_remove((a_elm), a_field); \
+ } else { \
+ ql_first(a_head) = NULL; \
+ } \
} while (0)
#define ql_head_remove(a_head, a_type, a_field) do { \
- a_type *t = ql_first(a_head); \
- ql_remove((a_head), t, a_field); \
+ a_type *t = ql_first(a_head); \
+ ql_remove((a_head), t, a_field); \
} while (0)
#define ql_tail_remove(a_head, a_type, a_field) do { \
- a_type *t = ql_last(a_head, a_field); \
- ql_remove((a_head), t, a_field); \
+ a_type *t = ql_last(a_head, a_field); \
+ ql_remove((a_head), t, a_field); \
} while (0)
#define ql_foreach(a_var, a_head, a_field) \
- qr_foreach((a_var), ql_first(a_head), a_field)
+ qr_foreach((a_var), ql_first(a_head), a_field)
#define ql_reverse_foreach(a_var, a_head, a_field) \
- qr_reverse_foreach((a_var), ql_first(a_head), a_field)
+ qr_reverse_foreach((a_var), ql_first(a_head), a_field)
diff --git a/src/rt/jemalloc/include/jemalloc/internal/qr.h b/src/rt/jemalloc/include/jemalloc/internal/qr.h
index f7af73e30b6d2..fe22352feddc4 100644
--- a/src/rt/jemalloc/include/jemalloc/internal/qr.h
+++ b/src/rt/jemalloc/include/jemalloc/internal/qr.h
@@ -1,14 +1,14 @@
/* Ring definitions. */
#define qr(a_type) \
struct { \
- a_type *qre_next; \
- a_type *qre_prev; \
+ a_type *qre_next; \
+ a_type *qre_prev; \
}
/* Ring functions. */
#define qr_new(a_qr, a_field) do { \
- (a_qr)->a_field.qre_next = (a_qr); \
- (a_qr)->a_field.qre_prev = (a_qr); \
+ (a_qr)->a_field.qre_next = (a_qr); \
+ (a_qr)->a_field.qre_prev = (a_qr); \
} while (0)
#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next)
@@ -16,52 +16,52 @@ struct { \
#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev)
#define qr_before_insert(a_qrelm, a_qr, a_field) do { \
- (a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \
- (a_qr)->a_field.qre_next = (a_qrelm); \
- (a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \
- (a_qrelm)->a_field.qre_prev = (a_qr); \
+ (a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \
+ (a_qr)->a_field.qre_next = (a_qrelm); \
+ (a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \
+ (a_qrelm)->a_field.qre_prev = (a_qr); \
} while (0)
#define qr_after_insert(a_qrelm, a_qr, a_field) \
do \
{ \
- (a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \
- (a_qr)->a_field.qre_prev = (a_qrelm); \
- (a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \
- (a_qrelm)->a_field.qre_next = (a_qr); \
+ (a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \
+ (a_qr)->a_field.qre_prev = (a_qrelm); \
+ (a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \
+ (a_qrelm)->a_field.qre_next = (a_qr); \
} while (0)
#define qr_meld(a_qr_a, a_qr_b, a_field) do { \
- void *t; \
- (a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \
- (a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \
- t = (a_qr_a)->a_field.qre_prev; \
- (a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \
- (a_qr_b)->a_field.qre_prev = t; \
+ void *t; \
+ (a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \
+ (a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \
+ t = (a_qr_a)->a_field.qre_prev; \
+ (a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \
+ (a_qr_b)->a_field.qre_prev = t; \
} while (0)
/* qr_meld() and qr_split() are functionally equivalent, so there's no need to
* have two copies of the code. */
#define qr_split(a_qr_a, a_qr_b, a_field) \
- qr_meld((a_qr_a), (a_qr_b), a_field)
+ qr_meld((a_qr_a), (a_qr_b), a_field)
#define qr_remove(a_qr, a_field) do { \
- (a_qr)->a_field.qre_prev->a_field.qre_next \
- = (a_qr)->a_field.qre_next; \
- (a_qr)->a_field.qre_next->a_field.qre_prev \
- = (a_qr)->a_field.qre_prev; \
- (a_qr)->a_field.qre_next = (a_qr); \
- (a_qr)->a_field.qre_prev = (a_qr); \
+ (a_qr)->a_field.qre_prev->a_field.qre_next \
+ = (a_qr)->a_field.qre_next; \
+ (a_qr)->a_field.qre_next->a_field.qre_prev \
+ = (a_qr)->a_field.qre_prev; \
+ (a_qr)->a_field.qre_next = (a_qr); \
+ (a_qr)->a_field.qre_prev = (a_qr); \
} while (0)
#define qr_foreach(var, a_qr, a_field) \
- for ((var) = (a_qr); \
- (var) != NULL; \
- (var) = (((var)->a_field.qre_next != (a_qr)) \
- ? (var)->a_field.qre_next : NULL))
+ for ((var) = (a_qr); \
+ (var) != NULL; \
+ (var) = (((var)->a_field.qre_next != (a_qr)) \
+ ? (var)->a_field.qre_next : NULL))
#define qr_reverse_foreach(var, a_qr, a_field) \
- for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \
- (var) != NULL; \
- (var) = (((var) != (a_qr)) \
- ? (var)->a_field.qre_prev : NULL))
+ for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \
+ (var) != NULL; \
+ (var) = (((var) != (a_qr)) \
+ ? (var)->a_field.qre_prev : NULL))
diff --git a/src/rt/jemalloc/include/jemalloc/internal/quarantine.h b/src/rt/jemalloc/include/jemalloc/internal/quarantine.h
index 7a0b7c32cabc2..16f677f73da09 100644
--- a/src/rt/jemalloc/include/jemalloc/internal/quarantine.h
+++ b/src/rt/jemalloc/include/jemalloc/internal/quarantine.h
@@ -12,17 +12,17 @@ typedef struct quarantine_s quarantine_t;
#ifdef JEMALLOC_H_STRUCTS
struct quarantine_obj_s {
- void *ptr;
- size_t usize;
+ void *ptr;
+ size_t usize;
};
struct quarantine_s {
- size_t curbytes;
- size_t curobjs;
- size_t first;
+ size_t curbytes;
+ size_t curobjs;
+ size_t first;
#define LG_MAXOBJS_INIT 10
- size_t lg_maxobjs;
- quarantine_obj_t objs[1]; /* Dynamically sized ring buffer. */
+ size_t lg_maxobjs;
+ quarantine_obj_t objs[1]; /* Dynamically sized ring buffer. */
};
#endif /* JEMALLOC_H_STRUCTS */
@@ -52,15 +52,16 @@ malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, quarantine, quarantine_t *, NULL,
JEMALLOC_ALWAYS_INLINE void
quarantine_alloc_hook(void)
{
- quarantine_t *quarantine;
+ quarantine_t *quarantine;
- assert(config_fill && opt_quarantine);
+ assert(config_fill && opt_quarantine);
- quarantine = *quarantine_tsd_get();
- if (quarantine == NULL)
- quarantine_init(LG_MAXOBJS_INIT);
+ quarantine = *quarantine_tsd_get();
+ if (quarantine == NULL)
+ quarantine_init(LG_MAXOBJS_INIT);
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
+
diff --git a/src/rt/jemalloc/include/jemalloc/internal/rb.h b/src/rt/jemalloc/include/jemalloc/internal/rb.h
index cdc89d73859e1..7b675f09051e5 100644
--- a/src/rt/jemalloc/include/jemalloc/internal/rb.h
+++ b/src/rt/jemalloc/include/jemalloc/internal/rb.h
@@ -123,20 +123,20 @@ struct { \
#define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \
(r_node) = (a_root); \
if ((r_node) != &(a_rbt)->rbt_nil) { \
- for (; \
- rbtn_left_get(a_type, a_field, (r_node)) != &(a_rbt)->rbt_nil;\
- (r_node) = rbtn_left_get(a_type, a_field, (r_node))) { \
- } \
+ for (; \
+ rbtn_left_get(a_type, a_field, (r_node)) != &(a_rbt)->rbt_nil;\
+ (r_node) = rbtn_left_get(a_type, a_field, (r_node))) { \
+ } \
} \
} while (0)
#define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \
(r_node) = (a_root); \
if ((r_node) != &(a_rbt)->rbt_nil) { \
- for (; rbtn_right_get(a_type, a_field, (r_node)) != \
- &(a_rbt)->rbt_nil; (r_node) = rbtn_right_get(a_type, a_field, \
- (r_node))) { \
- } \
+ for (; rbtn_right_get(a_type, a_field, (r_node)) != \
+ &(a_rbt)->rbt_nil; (r_node) = rbtn_right_get(a_type, a_field, \
+ (r_node))) { \
+ } \
} \
} while (0)
@@ -318,7 +318,7 @@ a_prefix##first(a_rbt_type *rbtree) { \
a_type *ret; \
rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret); \
if (ret == &rbtree->rbt_nil) { \
- ret = NULL; \
+ ret = NULL; \
} \
return (ret); \
} \
@@ -327,7 +327,7 @@ a_prefix##last(a_rbt_type *rbtree) { \
a_type *ret; \
rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret); \
if (ret == &rbtree->rbt_nil) { \
- ret = NULL; \
+ ret = NULL; \
} \
return (ret); \
} \
@@ -335,27 +335,27 @@ a_attr a_type * \
a_prefix##next(a_rbt_type *rbtree, a_type *node) { \
a_type *ret; \
if (rbtn_right_get(a_type, a_field, node) != &rbtree->rbt_nil) { \
- rbtn_first(a_type, a_field, rbtree, rbtn_right_get(a_type, \
- a_field, node), ret); \
+ rbtn_first(a_type, a_field, rbtree, rbtn_right_get(a_type, \
+ a_field, node), ret); \
} else { \
- a_type *tnode = rbtree->rbt_root; \
- assert(tnode != &rbtree->rbt_nil); \
- ret = &rbtree->rbt_nil; \
- while (true) { \
- int cmp = (a_cmp)(node, tnode); \
- if (cmp < 0) { \
- ret = tnode; \
- tnode = rbtn_left_get(a_type, a_field, tnode); \
- } else if (cmp > 0) { \
- tnode = rbtn_right_get(a_type, a_field, tnode); \
- } else { \
- break; \
- } \
- assert(tnode != &rbtree->rbt_nil); \
- } \
+ a_type *tnode = rbtree->rbt_root; \
+ assert(tnode != &rbtree->rbt_nil); \
+ ret = &rbtree->rbt_nil; \
+ while (true) { \
+ int cmp = (a_cmp)(node, tnode); \
+ if (cmp < 0) { \
+ ret = tnode; \
+ tnode = rbtn_left_get(a_type, a_field, tnode); \
+ } else if (cmp > 0) { \
+ tnode = rbtn_right_get(a_type, a_field, tnode); \
+ } else { \
+ break; \
+ } \
+ assert(tnode != &rbtree->rbt_nil); \
+ } \
} \
if (ret == &rbtree->rbt_nil) { \
- ret = (NULL); \
+ ret = (NULL); \
} \
return (ret); \
} \
@@ -363,27 +363,27 @@ a_attr a_type * \
a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \
a_type *ret; \
if (rbtn_left_get(a_type, a_field, node) != &rbtree->rbt_nil) { \
- rbtn_last(a_type, a_field, rbtree, rbtn_left_get(a_type, \
- a_field, node), ret); \
+ rbtn_last(a_type, a_field, rbtree, rbtn_left_get(a_type, \
+ a_field, node), ret); \
} else { \
- a_type *tnode = rbtree->rbt_root; \
- assert(tnode != &rbtree->rbt_nil); \
- ret = &rbtree->rbt_nil; \
- while (true) { \
- int cmp = (a_cmp)(node, tnode); \
- if (cmp < 0) { \
- tnode = rbtn_left_get(a_type, a_field, tnode); \
- } else if (cmp > 0) { \
- ret = tnode; \
- tnode = rbtn_right_get(a_type, a_field, tnode); \
- } else { \
- break; \
- } \
- assert(tnode != &rbtree->rbt_nil); \
- } \
+ a_type *tnode = rbtree->rbt_root; \
+ assert(tnode != &rbtree->rbt_nil); \
+ ret = &rbtree->rbt_nil; \
+ while (true) { \
+ int cmp = (a_cmp)(node, tnode); \
+ if (cmp < 0) { \
+ tnode = rbtn_left_get(a_type, a_field, tnode); \
+ } else if (cmp > 0) { \
+ ret = tnode; \
+ tnode = rbtn_right_get(a_type, a_field, tnode); \
+ } else { \
+ break; \
+ } \
+ assert(tnode != &rbtree->rbt_nil); \
+ } \
} \
if (ret == &rbtree->rbt_nil) { \
- ret = (NULL); \
+ ret = (NULL); \
} \
return (ret); \
} \
@@ -394,14 +394,14 @@ a_prefix##search(a_rbt_type *rbtree, a_type *key) { \
ret = rbtree->rbt_root; \
while (ret != &rbtree->rbt_nil \
&& (cmp = (a_cmp)(key, ret)) != 0) { \
- if (cmp < 0) { \
- ret = rbtn_left_get(a_type, a_field, ret); \
- } else { \
- ret = rbtn_right_get(a_type, a_field, ret); \
- } \
+ if (cmp < 0) { \
+ ret = rbtn_left_get(a_type, a_field, ret); \
+ } else { \
+ ret = rbtn_right_get(a_type, a_field, ret); \
+ } \
} \
if (ret == &rbtree->rbt_nil) { \
- ret = (NULL); \
+ ret = (NULL); \
} \
return (ret); \
} \
@@ -411,19 +411,19 @@ a_prefix##nsearch(a_rbt_type *rbtree, a_type *key) { \
a_type *tnode = rbtree->rbt_root; \
ret = &rbtree->rbt_nil; \
while (tnode != &rbtree->rbt_nil) { \
- int cmp = (a_cmp)(key, tnode); \
- if (cmp < 0) { \
- ret = tnode; \
- tnode = rbtn_left_get(a_type, a_field, tnode); \
- } else if (cmp > 0) { \
- tnode = rbtn_right_get(a_type, a_field, tnode); \
- } else { \
- ret = tnode; \
- break; \
- } \
+ int cmp = (a_cmp)(key, tnode); \
+ if (cmp < 0) { \
+ ret = tnode; \
+ tnode = rbtn_left_get(a_type, a_field, tnode); \
+ } else if (cmp > 0) { \
+ tnode = rbtn_right_get(a_type, a_field, tnode); \
+ } else { \
+ ret = tnode; \
+ break; \
+ } \
} \
if (ret == &rbtree->rbt_nil) { \
- ret = (NULL); \
+ ret = (NULL); \
} \
return (ret); \
} \
@@ -433,85 +433,85 @@ a_prefix##psearch(a_rbt_type *rbtree, a_type *key) { \
a_type *tnode = rbtree->rbt_root; \
ret = &rbtree->rbt_nil; \
while (tnode != &rbtree->rbt_nil) { \
- int cmp = (a_cmp)(key, tnode); \
- if (cmp < 0) { \
- tnode = rbtn_left_get(a_type, a_field, tnode); \
- } else if (cmp > 0) { \
- ret = tnode; \
- tnode = rbtn_right_get(a_type, a_field, tnode); \
- } else { \
- ret = tnode; \
- break; \
- } \
+ int cmp = (a_cmp)(key, tnode); \
+ if (cmp < 0) { \
+ tnode = rbtn_left_get(a_type, a_field, tnode); \
+ } else if (cmp > 0) { \
+ ret = tnode; \
+ tnode = rbtn_right_get(a_type, a_field, tnode); \
+ } else { \
+ ret = tnode; \
+ break; \
+ } \
} \
if (ret == &rbtree->rbt_nil) { \
- ret = (NULL); \
+ ret = (NULL); \
} \
return (ret); \
} \
a_attr void \
a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
struct { \
- a_type *node; \
- int cmp; \
+ a_type *node; \
+ int cmp; \
} path[sizeof(void *) << 4], *pathp; \
rbt_node_new(a_type, a_field, rbtree, node); \
/* Wind. */ \
path->node = rbtree->rbt_root; \
for (pathp = path; pathp->node != &rbtree->rbt_nil; pathp++) { \
- int cmp = pathp->cmp = a_cmp(node, pathp->node); \
- assert(cmp != 0); \
- if (cmp < 0) { \
- pathp[1].node = rbtn_left_get(a_type, a_field, \
- pathp->node); \
- } else { \
- pathp[1].node = rbtn_right_get(a_type, a_field, \
- pathp->node); \
- } \
+ int cmp = pathp->cmp = a_cmp(node, pathp->node); \
+ assert(cmp != 0); \
+ if (cmp < 0) { \
+ pathp[1].node = rbtn_left_get(a_type, a_field, \
+ pathp->node); \
+ } else { \
+ pathp[1].node = rbtn_right_get(a_type, a_field, \
+ pathp->node); \
+ } \
} \
pathp->node = node; \
/* Unwind. */ \
for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \
- a_type *cnode = pathp->node; \
- if (pathp->cmp < 0) { \
- a_type *left = pathp[1].node; \
- rbtn_left_set(a_type, a_field, cnode, left); \
- if (rbtn_red_get(a_type, a_field, left)) { \
- a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
- if (rbtn_red_get(a_type, a_field, leftleft)) { \
- /* Fix up 4-node. */ \
- a_type *tnode; \
- rbtn_black_set(a_type, a_field, leftleft); \
- rbtn_rotate_right(a_type, a_field, cnode, tnode); \
- cnode = tnode; \
- } \
- } else { \
- return; \
- } \
- } else { \
- a_type *right = pathp[1].node; \
- rbtn_right_set(a_type, a_field, cnode, right); \
- if (rbtn_red_get(a_type, a_field, right)) { \
- a_type *left = rbtn_left_get(a_type, a_field, cnode); \
- if (rbtn_red_get(a_type, a_field, left)) { \
- /* Split 4-node. */ \
- rbtn_black_set(a_type, a_field, left); \
- rbtn_black_set(a_type, a_field, right); \
- rbtn_red_set(a_type, a_field, cnode); \
- } else { \
- /* Lean left. */ \
- a_type *tnode; \
- bool tred = rbtn_red_get(a_type, a_field, cnode); \
- rbtn_rotate_left(a_type, a_field, cnode, tnode); \
- rbtn_color_set(a_type, a_field, tnode, tred); \
- rbtn_red_set(a_type, a_field, cnode); \
- cnode = tnode; \
- } \
- } else { \
- return; \
- } \
- } \
- pathp->node = cnode; \
+ a_type *cnode = pathp->node; \
+ if (pathp->cmp < 0) { \
+ a_type *left = pathp[1].node; \
+ rbtn_left_set(a_type, a_field, cnode, left); \
+ if (rbtn_red_get(a_type, a_field, left)) { \
+ a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
+ if (rbtn_red_get(a_type, a_field, leftleft)) { \
+ /* Fix up 4-node. */ \
+ a_type *tnode; \
+ rbtn_black_set(a_type, a_field, leftleft); \
+ rbtn_rotate_right(a_type, a_field, cnode, tnode); \
+ cnode = tnode; \
+ } \
+ } else { \
+ return; \
+ } \
+ } else { \
+ a_type *right = pathp[1].node; \
+ rbtn_right_set(a_type, a_field, cnode, right); \
+ if (rbtn_red_get(a_type, a_field, right)) { \
+ a_type *left = rbtn_left_get(a_type, a_field, cnode); \
+ if (rbtn_red_get(a_type, a_field, left)) { \
+ /* Split 4-node. */ \
+ rbtn_black_set(a_type, a_field, left); \
+ rbtn_black_set(a_type, a_field, right); \
+ rbtn_red_set(a_type, a_field, cnode); \
+ } else { \
+ /* Lean left. */ \
+ a_type *tnode; \
+ bool tred = rbtn_red_get(a_type, a_field, cnode); \
+ rbtn_rotate_left(a_type, a_field, cnode, tnode); \
+ rbtn_color_set(a_type, a_field, tnode, tred); \
+ rbtn_red_set(a_type, a_field, cnode); \
+ cnode = tnode; \
+ } \
+ } else { \
+ return; \
+ } \
+ } \
+ pathp->node = cnode; \
} \
/* Set root, and make it black. */ \
rbtree->rbt_root = path->node; \
@@ -520,336 +520,336 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
a_attr void \
a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
struct { \
- a_type *node; \
- int cmp; \
+ a_type *node; \
+ int cmp; \
} *pathp, *nodep, path[sizeof(void *) << 4]; \
/* Wind. */ \
nodep = NULL; /* Silence compiler warning. */ \
path->node = rbtree->rbt_root; \
for (pathp = path; pathp->node != &rbtree->rbt_nil; pathp++) { \
- int cmp = pathp->cmp = a_cmp(node, pathp->node); \
- if (cmp < 0) { \
- pathp[1].node = rbtn_left_get(a_type, a_field, \
- pathp->node); \
- } else { \
- pathp[1].node = rbtn_right_get(a_type, a_field, \
- pathp->node); \
- if (cmp == 0) { \
- /* Find node's successor, in preparation for swap. */ \
- pathp->cmp = 1; \
- nodep = pathp; \
- for (pathp++; pathp->node != &rbtree->rbt_nil; \
- pathp++) { \
- pathp->cmp = -1; \
- pathp[1].node = rbtn_left_get(a_type, a_field, \
- pathp->node); \
- } \
- break; \
- } \
- } \
+ int cmp = pathp->cmp = a_cmp(node, pathp->node); \
+ if (cmp < 0) { \
+ pathp[1].node = rbtn_left_get(a_type, a_field, \
+ pathp->node); \
+ } else { \
+ pathp[1].node = rbtn_right_get(a_type, a_field, \
+ pathp->node); \
+ if (cmp == 0) { \
+ /* Find node's successor, in preparation for swap. */ \
+ pathp->cmp = 1; \
+ nodep = pathp; \
+ for (pathp++; pathp->node != &rbtree->rbt_nil; \
+ pathp++) { \
+ pathp->cmp = -1; \
+ pathp[1].node = rbtn_left_get(a_type, a_field, \
+ pathp->node); \
+ } \
+ break; \
+ } \
+ } \
} \
assert(nodep->node == node); \
pathp--; \
if (pathp->node != node) { \
- /* Swap node with its successor. */ \
- bool tred = rbtn_red_get(a_type, a_field, pathp->node); \
- rbtn_color_set(a_type, a_field, pathp->node, \
- rbtn_red_get(a_type, a_field, node)); \
- rbtn_left_set(a_type, a_field, pathp->node, \
- rbtn_left_get(a_type, a_field, node)); \
- /* If node's successor is its right child, the following code */\
- /* will do the wrong thing for the right child pointer. */\
- /* However, it doesn't matter, because the pointer will be */\
- /* properly set when the successor is pruned. */\
- rbtn_right_set(a_type, a_field, pathp->node, \
- rbtn_right_get(a_type, a_field, node)); \
- rbtn_color_set(a_type, a_field, node, tred); \
- /* The pruned leaf node's child pointers are never accessed */\
- /* again, so don't bother setting them to nil. */\
- nodep->node = pathp->node; \
- pathp->node = node; \
- if (nodep == path) { \
- rbtree->rbt_root = nodep->node; \
- } else { \
- if (nodep[-1].cmp < 0) { \
- rbtn_left_set(a_type, a_field, nodep[-1].node, \
- nodep->node); \
- } else { \
- rbtn_right_set(a_type, a_field, nodep[-1].node, \
- nodep->node); \
- } \
- } \
+ /* Swap node with its successor. */ \
+ bool tred = rbtn_red_get(a_type, a_field, pathp->node); \
+ rbtn_color_set(a_type, a_field, pathp->node, \
+ rbtn_red_get(a_type, a_field, node)); \
+ rbtn_left_set(a_type, a_field, pathp->node, \
+ rbtn_left_get(a_type, a_field, node)); \
+ /* If node's successor is its right child, the following code */\
+ /* will do the wrong thing for the right child pointer. */\
+ /* However, it doesn't matter, because the pointer will be */\
+ /* properly set when the successor is pruned. */\
+ rbtn_right_set(a_type, a_field, pathp->node, \
+ rbtn_right_get(a_type, a_field, node)); \
+ rbtn_color_set(a_type, a_field, node, tred); \
+ /* The pruned leaf node's child pointers are never accessed */\
+ /* again, so don't bother setting them to nil. */\
+ nodep->node = pathp->node; \
+ pathp->node = node; \
+ if (nodep == path) { \
+ rbtree->rbt_root = nodep->node; \
+ } else { \
+ if (nodep[-1].cmp < 0) { \
+ rbtn_left_set(a_type, a_field, nodep[-1].node, \
+ nodep->node); \
+ } else { \
+ rbtn_right_set(a_type, a_field, nodep[-1].node, \
+ nodep->node); \
+ } \
+ } \
} else { \
- a_type *left = rbtn_left_get(a_type, a_field, node); \
- if (left != &rbtree->rbt_nil) { \
- /* node has no successor, but it has a left child. */\
- /* Splice node out, without losing the left child. */\
- assert(rbtn_red_get(a_type, a_field, node) == false); \
- assert(rbtn_red_get(a_type, a_field, left)); \
- rbtn_black_set(a_type, a_field, left); \
- if (pathp == path) { \
- rbtree->rbt_root = left; \
- } else { \
- if (pathp[-1].cmp < 0) { \
- rbtn_left_set(a_type, a_field, pathp[-1].node, \
- left); \
- } else { \
- rbtn_right_set(a_type, a_field, pathp[-1].node, \
- left); \
- } \
- } \
- return; \
- } else if (pathp == path) { \
- /* The tree only contained one node. */ \
- rbtree->rbt_root = &rbtree->rbt_nil; \
- return; \
- } \
+ a_type *left = rbtn_left_get(a_type, a_field, node); \
+ if (left != &rbtree->rbt_nil) { \
+ /* node has no successor, but it has a left child. */\
+ /* Splice node out, without losing the left child. */\
+ assert(rbtn_red_get(a_type, a_field, node) == false); \
+ assert(rbtn_red_get(a_type, a_field, left)); \
+ rbtn_black_set(a_type, a_field, left); \
+ if (pathp == path) { \
+ rbtree->rbt_root = left; \
+ } else { \
+ if (pathp[-1].cmp < 0) { \
+ rbtn_left_set(a_type, a_field, pathp[-1].node, \
+ left); \
+ } else { \
+ rbtn_right_set(a_type, a_field, pathp[-1].node, \
+ left); \
+ } \
+ } \
+ return; \
+ } else if (pathp == path) { \
+ /* The tree only contained one node. */ \
+ rbtree->rbt_root = &rbtree->rbt_nil; \
+ return; \
+ } \
} \
if (rbtn_red_get(a_type, a_field, pathp->node)) { \
- /* Prune red node, which requires no fixup. */ \
- assert(pathp[-1].cmp < 0); \
- rbtn_left_set(a_type, a_field, pathp[-1].node, \
- &rbtree->rbt_nil); \
- return; \
+ /* Prune red node, which requires no fixup. */ \
+ assert(pathp[-1].cmp < 0); \
+ rbtn_left_set(a_type, a_field, pathp[-1].node, \
+ &rbtree->rbt_nil); \
+ return; \
} \
/* The node to be pruned is black, so unwind until balance is */\
/* restored. */\
pathp->node = &rbtree->rbt_nil; \
for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \
- assert(pathp->cmp != 0); \
- if (pathp->cmp < 0) { \
- rbtn_left_set(a_type, a_field, pathp->node, \
- pathp[1].node); \
- assert(rbtn_red_get(a_type, a_field, pathp[1].node) \
- == false); \
- if (rbtn_red_get(a_type, a_field, pathp->node)) { \
- a_type *right = rbtn_right_get(a_type, a_field, \
- pathp->node); \
- a_type *rightleft = rbtn_left_get(a_type, a_field, \
- right); \
- a_type *tnode; \
- if (rbtn_red_get(a_type, a_field, rightleft)) { \
- /* In the following diagrams, ||, //, and \\ */\
- /* indicate the path to the removed node. */\
- /* */\
- /* || */\
- /* pathp(r) */\
- /* // \ */\
- /* (b) (b) */\
- /* / */\
- /* (r) */\
- /* */\
- rbtn_black_set(a_type, a_field, pathp->node); \
- rbtn_rotate_right(a_type, a_field, right, tnode); \
- rbtn_right_set(a_type, a_field, pathp->node, tnode);\
- rbtn_rotate_left(a_type, a_field, pathp->node, \
- tnode); \
- } else { \
- /* || */\
- /* pathp(r) */\
- /* // \ */\
- /* (b) (b) */\
- /* / */\
- /* (b) */\
- /* */\
- rbtn_rotate_left(a_type, a_field, pathp->node, \
- tnode); \
- } \
- /* Balance restored, but rotation modified subtree */\
- /* root. */\
- assert((uintptr_t)pathp > (uintptr_t)path); \
- if (pathp[-1].cmp < 0) { \
- rbtn_left_set(a_type, a_field, pathp[-1].node, \
- tnode); \
- } else { \
- rbtn_right_set(a_type, a_field, pathp[-1].node, \
- tnode); \
- } \
- return; \
- } else { \
- a_type *right = rbtn_right_get(a_type, a_field, \
- pathp->node); \
- a_type *rightleft = rbtn_left_get(a_type, a_field, \
- right); \
- if (rbtn_red_get(a_type, a_field, rightleft)) { \
- /* || */\
- /* pathp(b) */\
- /* // \ */\
- /* (b) (b) */\
- /* / */\
- /* (r) */\
- a_type *tnode; \
- rbtn_black_set(a_type, a_field, rightleft); \
- rbtn_rotate_right(a_type, a_field, right, tnode); \
- rbtn_right_set(a_type, a_field, pathp->node, tnode);\
- rbtn_rotate_left(a_type, a_field, pathp->node, \
- tnode); \
- /* Balance restored, but rotation modified */\
- /* subree root, which may actually be the tree */\
- /* root. */\
- if (pathp == path) { \
- /* Set root. */ \
- rbtree->rbt_root = tnode; \
- } else { \
- if (pathp[-1].cmp < 0) { \
- rbtn_left_set(a_type, a_field, \
- pathp[-1].node, tnode); \
- } else { \
- rbtn_right_set(a_type, a_field, \
- pathp[-1].node, tnode); \
- } \
- } \
- return; \
- } else { \
- /* || */\
- /* pathp(b) */\
- /* // \ */\
- /* (b) (b) */\
- /* / */\
- /* (b) */\
- a_type *tnode; \
- rbtn_red_set(a_type, a_field, pathp->node); \
- rbtn_rotate_left(a_type, a_field, pathp->node, \
- tnode); \
- pathp->node = tnode; \
- } \
- } \
- } else { \
- a_type *left; \
- rbtn_right_set(a_type, a_field, pathp->node, \
- pathp[1].node); \
- left = rbtn_left_get(a_type, a_field, pathp->node); \
- if (rbtn_red_get(a_type, a_field, left)) { \
- a_type *tnode; \
- a_type *leftright = rbtn_right_get(a_type, a_field, \
- left); \
- a_type *leftrightleft = rbtn_left_get(a_type, a_field, \
- leftright); \
- if (rbtn_red_get(a_type, a_field, leftrightleft)) { \
- /* || */\
- /* pathp(b) */\
- /* / \\ */\
- /* (r) (b) */\
- /* \ */\
- /* (b) */\
- /* / */\
- /* (r) */\
- a_type *unode; \
- rbtn_black_set(a_type, a_field, leftrightleft); \
- rbtn_rotate_right(a_type, a_field, pathp->node, \
- unode); \
- rbtn_rotate_right(a_type, a_field, pathp->node, \
- tnode); \
- rbtn_right_set(a_type, a_field, unode, tnode); \
- rbtn_rotate_left(a_type, a_field, unode, tnode); \
- } else { \
- /* || */\
- /* pathp(b) */\
- /* / \\ */\
- /* (r) (b) */\
- /* \ */\
- /* (b) */\
- /* / */\
- /* (b) */\
- assert(leftright != &rbtree->rbt_nil); \
- rbtn_red_set(a_type, a_field, leftright); \
- rbtn_rotate_right(a_type, a_field, pathp->node, \
- tnode); \
- rbtn_black_set(a_type, a_field, tnode); \
- } \
- /* Balance restored, but rotation modified subtree */\
- /* root, which may actually be the tree root. */\
- if (pathp == path) { \
- /* Set root. */ \
- rbtree->rbt_root = tnode; \
- } else { \
- if (pathp[-1].cmp < 0) { \
- rbtn_left_set(a_type, a_field, pathp[-1].node, \
- tnode); \
- } else { \
- rbtn_right_set(a_type, a_field, pathp[-1].node, \
- tnode); \
- } \
- } \
- return; \
- } else if (rbtn_red_get(a_type, a_field, pathp->node)) { \
- a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
- if (rbtn_red_get(a_type, a_field, leftleft)) { \
- /* || */\
- /* pathp(r) */\
- /* / \\ */\
- /* (b) (b) */\
- /* / */\
- /* (r) */\
- a_type *tnode; \
- rbtn_black_set(a_type, a_field, pathp->node); \
- rbtn_red_set(a_type, a_field, left); \
- rbtn_black_set(a_type, a_field, leftleft); \
- rbtn_rotate_right(a_type, a_field, pathp->node, \
- tnode); \
- /* Balance restored, but rotation modified */\
- /* subtree root. */\
- assert((uintptr_t)pathp > (uintptr_t)path); \
- if (pathp[-1].cmp < 0) { \
- rbtn_left_set(a_type, a_field, pathp[-1].node, \
- tnode); \
- } else { \
- rbtn_right_set(a_type, a_field, pathp[-1].node, \
- tnode); \
- } \
- return; \
- } else { \
- /* || */\
- /* pathp(r) */\
- /* / \\ */\
- /* (b) (b) */\
- /* / */\
- /* (b) */\
- rbtn_red_set(a_type, a_field, left); \
- rbtn_black_set(a_type, a_field, pathp->node); \
- /* Balance restored. */ \
- return; \
- } \
- } else { \
- a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
- if (rbtn_red_get(a_type, a_field, leftleft)) { \
- /* || */\
- /* pathp(b) */\
- /* / \\ */\
- /* (b) (b) */\
- /* / */\
- /* (r) */\
- a_type *tnode; \
- rbtn_black_set(a_type, a_field, leftleft); \
- rbtn_rotate_right(a_type, a_field, pathp->node, \
- tnode); \
- /* Balance restored, but rotation modified */\
- /* subtree root, which may actually be the tree */\
- /* root. */\
- if (pathp == path) { \
- /* Set root. */ \
- rbtree->rbt_root = tnode; \
- } else { \
- if (pathp[-1].cmp < 0) { \
- rbtn_left_set(a_type, a_field, \
- pathp[-1].node, tnode); \
- } else { \
- rbtn_right_set(a_type, a_field, \
- pathp[-1].node, tnode); \
- } \
- } \
- return; \
- } else { \
- /* || */\
- /* pathp(b) */\
- /* / \\ */\
- /* (b) (b) */\
- /* / */\
- /* (b) */\
- rbtn_red_set(a_type, a_field, left); \
- } \
- } \
- } \
+ assert(pathp->cmp != 0); \
+ if (pathp->cmp < 0) { \
+ rbtn_left_set(a_type, a_field, pathp->node, \
+ pathp[1].node); \
+ assert(rbtn_red_get(a_type, a_field, pathp[1].node) \
+ == false); \
+ if (rbtn_red_get(a_type, a_field, pathp->node)) { \
+ a_type *right = rbtn_right_get(a_type, a_field, \
+ pathp->node); \
+ a_type *rightleft = rbtn_left_get(a_type, a_field, \
+ right); \
+ a_type *tnode; \
+ if (rbtn_red_get(a_type, a_field, rightleft)) { \
+ /* In the following diagrams, ||, //, and \\ */\
+ /* indicate the path to the removed node. */\
+ /* */\
+ /* || */\
+ /* pathp(r) */\
+ /* // \ */\
+ /* (b) (b) */\
+ /* / */\
+ /* (r) */\
+ /* */\
+ rbtn_black_set(a_type, a_field, pathp->node); \
+ rbtn_rotate_right(a_type, a_field, right, tnode); \
+ rbtn_right_set(a_type, a_field, pathp->node, tnode);\
+ rbtn_rotate_left(a_type, a_field, pathp->node, \
+ tnode); \
+ } else { \
+ /* || */\
+ /* pathp(r) */\
+ /* // \ */\
+ /* (b) (b) */\
+ /* / */\
+ /* (b) */\
+ /* */\
+ rbtn_rotate_left(a_type, a_field, pathp->node, \
+ tnode); \
+ } \
+ /* Balance restored, but rotation modified subtree */\
+ /* root. */\
+ assert((uintptr_t)pathp > (uintptr_t)path); \
+ if (pathp[-1].cmp < 0) { \
+ rbtn_left_set(a_type, a_field, pathp[-1].node, \
+ tnode); \
+ } else { \
+ rbtn_right_set(a_type, a_field, pathp[-1].node, \
+ tnode); \
+ } \
+ return; \
+ } else { \
+ a_type *right = rbtn_right_get(a_type, a_field, \
+ pathp->node); \
+ a_type *rightleft = rbtn_left_get(a_type, a_field, \
+ right); \
+ if (rbtn_red_get(a_type, a_field, rightleft)) { \
+ /* || */\
+ /* pathp(b) */\
+ /* // \ */\
+ /* (b) (b) */\
+ /* / */\
+ /* (r) */\
+ a_type *tnode; \
+ rbtn_black_set(a_type, a_field, rightleft); \
+ rbtn_rotate_right(a_type, a_field, right, tnode); \
+ rbtn_right_set(a_type, a_field, pathp->node, tnode);\
+ rbtn_rotate_left(a_type, a_field, pathp->node, \
+ tnode); \
+ /* Balance restored, but rotation modified */\
+ /* subree root, which may actually be the tree */\
+ /* root. */\
+ if (pathp == path) { \
+ /* Set root. */ \
+ rbtree->rbt_root = tnode; \
+ } else { \
+ if (pathp[-1].cmp < 0) { \
+ rbtn_left_set(a_type, a_field, \
+ pathp[-1].node, tnode); \
+ } else { \
+ rbtn_right_set(a_type, a_field, \
+ pathp[-1].node, tnode); \
+ } \
+ } \
+ return; \
+ } else { \
+ /* || */\
+ /* pathp(b) */\
+ /* // \ */\
+ /* (b) (b) */\
+ /* / */\
+ /* (b) */\
+ a_type *tnode; \
+ rbtn_red_set(a_type, a_field, pathp->node); \
+ rbtn_rotate_left(a_type, a_field, pathp->node, \
+ tnode); \
+ pathp->node = tnode; \
+ } \
+ } \
+ } else { \
+ a_type *left; \
+ rbtn_right_set(a_type, a_field, pathp->node, \
+ pathp[1].node); \
+ left = rbtn_left_get(a_type, a_field, pathp->node); \
+ if (rbtn_red_get(a_type, a_field, left)) { \
+ a_type *tnode; \
+ a_type *leftright = rbtn_right_get(a_type, a_field, \
+ left); \
+ a_type *leftrightleft = rbtn_left_get(a_type, a_field, \
+ leftright); \
+ if (rbtn_red_get(a_type, a_field, leftrightleft)) { \
+ /* || */\
+ /* pathp(b) */\
+ /* / \\ */\
+ /* (r) (b) */\
+ /* \ */\
+ /* (b) */\
+ /* / */\
+ /* (r) */\
+ a_type *unode; \
+ rbtn_black_set(a_type, a_field, leftrightleft); \
+ rbtn_rotate_right(a_type, a_field, pathp->node, \
+ unode); \
+ rbtn_rotate_right(a_type, a_field, pathp->node, \
+ tnode); \
+ rbtn_right_set(a_type, a_field, unode, tnode); \
+ rbtn_rotate_left(a_type, a_field, unode, tnode); \
+ } else { \
+ /* || */\
+ /* pathp(b) */\
+ /* / \\ */\
+ /* (r) (b) */\
+ /* \ */\
+ /* (b) */\
+ /* / */\
+ /* (b) */\
+ assert(leftright != &rbtree->rbt_nil); \
+ rbtn_red_set(a_type, a_field, leftright); \
+ rbtn_rotate_right(a_type, a_field, pathp->node, \
+ tnode); \
+ rbtn_black_set(a_type, a_field, tnode); \
+ } \
+ /* Balance restored, but rotation modified subtree */\
+ /* root, which may actually be the tree root. */\
+ if (pathp == path) { \
+ /* Set root. */ \
+ rbtree->rbt_root = tnode; \
+ } else { \
+ if (pathp[-1].cmp < 0) { \
+ rbtn_left_set(a_type, a_field, pathp[-1].node, \
+ tnode); \
+ } else { \
+ rbtn_right_set(a_type, a_field, pathp[-1].node, \
+ tnode); \
+ } \
+ } \
+ return; \
+ } else if (rbtn_red_get(a_type, a_field, pathp->node)) { \
+ a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
+ if (rbtn_red_get(a_type, a_field, leftleft)) { \
+ /* || */\
+ /* pathp(r) */\
+ /* / \\ */\
+ /* (b) (b) */\
+ /* / */\
+ /* (r) */\
+ a_type *tnode; \
+ rbtn_black_set(a_type, a_field, pathp->node); \
+ rbtn_red_set(a_type, a_field, left); \
+ rbtn_black_set(a_type, a_field, leftleft); \
+ rbtn_rotate_right(a_type, a_field, pathp->node, \
+ tnode); \
+ /* Balance restored, but rotation modified */\
+ /* subtree root. */\
+ assert((uintptr_t)pathp > (uintptr_t)path); \
+ if (pathp[-1].cmp < 0) { \
+ rbtn_left_set(a_type, a_field, pathp[-1].node, \
+ tnode); \
+ } else { \
+ rbtn_right_set(a_type, a_field, pathp[-1].node, \
+ tnode); \
+ } \
+ return; \
+ } else { \
+ /* || */\
+ /* pathp(r) */\
+ /* / \\ */\
+ /* (b) (b) */\
+ /* / */\
+ /* (b) */\
+ rbtn_red_set(a_type, a_field, left); \
+ rbtn_black_set(a_type, a_field, pathp->node); \
+ /* Balance restored. */ \
+ return; \
+ } \
+ } else { \
+ a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
+ if (rbtn_red_get(a_type, a_field, leftleft)) { \
+ /* || */\
+ /* pathp(b) */\
+ /* / \\ */\
+ /* (b) (b) */\
+ /* / */\
+ /* (r) */\
+ a_type *tnode; \
+ rbtn_black_set(a_type, a_field, leftleft); \
+ rbtn_rotate_right(a_type, a_field, pathp->node, \
+ tnode); \
+ /* Balance restored, but rotation modified */\
+ /* subtree root, which may actually be the tree */\
+ /* root. */\
+ if (pathp == path) { \
+ /* Set root. */ \
+ rbtree->rbt_root = tnode; \
+ } else { \
+ if (pathp[-1].cmp < 0) { \
+ rbtn_left_set(a_type, a_field, \
+ pathp[-1].node, tnode); \
+ } else { \
+ rbtn_right_set(a_type, a_field, \
+ pathp[-1].node, tnode); \
+ } \
+ } \
+ return; \
+ } else { \
+ /* || */\
+ /* pathp(b) */\
+ /* / \\ */\
+ /* (b) (b) */\
+ /* / */\
+ /* (b) */\
+ rbtn_red_set(a_type, a_field, left); \
+ } \
+ } \
+ } \
} \
/* Set root. */ \
rbtree->rbt_root = path->node; \
@@ -859,16 +859,16 @@ a_attr a_type * \
a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
if (node == &rbtree->rbt_nil) { \
- return (&rbtree->rbt_nil); \
+ return (&rbtree->rbt_nil); \
} else { \
- a_type *ret; \
- if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type, \
- a_field, node), cb, arg)) != &rbtree->rbt_nil \
- || (ret = cb(rbtree, node, arg)) != NULL) { \
- return (ret); \
- } \
- return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
- a_field, node), cb, arg)); \
+ a_type *ret; \
+ if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type, \
+ a_field, node), cb, arg)) != &rbtree->rbt_nil \
+ || (ret = cb(rbtree, node, arg)) != NULL) { \
+ return (ret); \
+ } \
+ return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
+ a_field, node), cb, arg)); \
} \
} \
a_attr a_type * \
@@ -876,24 +876,24 @@ a_prefix##iter_start(a_rbt_type *rbtree, a_type *start, a_type *node, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
int cmp = a_cmp(start, node); \
if (cmp < 0) { \
- a_type *ret; \
- if ((ret = a_prefix##iter_start(rbtree, start, \
- rbtn_left_get(a_type, a_field, node), cb, arg)) != \
- &rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \
- return (ret); \
- } \
- return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
- a_field, node), cb, arg)); \
+ a_type *ret; \
+ if ((ret = a_prefix##iter_start(rbtree, start, \
+ rbtn_left_get(a_type, a_field, node), cb, arg)) != \
+ &rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \
+ return (ret); \
+ } \
+ return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
+ a_field, node), cb, arg)); \
} else if (cmp > 0) { \
- return (a_prefix##iter_start(rbtree, start, \
- rbtn_right_get(a_type, a_field, node), cb, arg)); \
+ return (a_prefix##iter_start(rbtree, start, \
+ rbtn_right_get(a_type, a_field, node), cb, arg)); \
} else { \
- a_type *ret; \
- if ((ret = cb(rbtree, node, arg)) != NULL) { \
- return (ret); \
- } \
- return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
- a_field, node), cb, arg)); \
+ a_type *ret; \
+ if ((ret = cb(rbtree, node, arg)) != NULL) { \
+ return (ret); \
+ } \
+ return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
+ a_field, node), cb, arg)); \
} \
} \
a_attr a_type * \
@@ -901,13 +901,13 @@ a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \
a_rbt_type *, a_type *, void *), void *arg) { \
a_type *ret; \
if (start != NULL) { \
- ret = a_prefix##iter_start(rbtree, start, rbtree->rbt_root, \
- cb, arg); \
+ ret = a_prefix##iter_start(rbtree, start, rbtree->rbt_root, \
+ cb, arg); \
} else { \
- ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\
+ ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\
} \
if (ret == &rbtree->rbt_nil) { \
- ret = NULL; \
+ ret = NULL; \
} \
return (ret); \
} \
@@ -915,16 +915,16 @@ a_attr a_type * \
a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
if (node == &rbtree->rbt_nil) { \
- return (&rbtree->rbt_nil); \
+ return (&rbtree->rbt_nil); \
} else { \
- a_type *ret; \
- if ((ret = a_prefix##reverse_iter_recurse(rbtree, \
- rbtn_right_get(a_type, a_field, node), cb, arg)) != \
- &rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \
- return (ret); \
- } \
- return (a_prefix##reverse_iter_recurse(rbtree, \
- rbtn_left_get(a_type, a_field, node), cb, arg)); \
+ a_type *ret; \
+ if ((ret = a_prefix##reverse_iter_recurse(rbtree, \
+ rbtn_right_get(a_type, a_field, node), cb, arg)) != \
+ &rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \
+ return (ret); \
+ } \
+ return (a_prefix##reverse_iter_recurse(rbtree, \
+ rbtn_left_get(a_type, a_field, node), cb, arg)); \
} \
} \
a_attr a_type * \
@@ -933,24 +933,24 @@ a_prefix##reverse_iter_start(a_rbt_type *rbtree, a_type *start, \
void *arg) { \
int cmp = a_cmp(start, node); \
if (cmp > 0) { \
- a_type *ret; \
- if ((ret = a_prefix##reverse_iter_start(rbtree, start, \
- rbtn_right_get(a_type, a_field, node), cb, arg)) != \
- &rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \
- return (ret); \
- } \
- return (a_prefix##reverse_iter_recurse(rbtree, \
- rbtn_left_get(a_type, a_field, node), cb, arg)); \
+ a_type *ret; \
+ if ((ret = a_prefix##reverse_iter_start(rbtree, start, \
+ rbtn_right_get(a_type, a_field, node), cb, arg)) != \
+ &rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \
+ return (ret); \
+ } \
+ return (a_prefix##reverse_iter_recurse(rbtree, \
+ rbtn_left_get(a_type, a_field, node), cb, arg)); \
} else if (cmp < 0) { \
- return (a_prefix##reverse_iter_start(rbtree, start, \
- rbtn_left_get(a_type, a_field, node), cb, arg)); \
+ return (a_prefix##reverse_iter_start(rbtree, start, \
+ rbtn_left_get(a_type, a_field, node), cb, arg)); \
} else { \
- a_type *ret; \
- if ((ret = cb(rbtree, node, arg)) != NULL) { \
- return (ret); \
- } \
- return (a_prefix##reverse_iter_recurse(rbtree, \
- rbtn_left_get(a_type, a_field, node), cb, arg)); \
+ a_type *ret; \
+ if ((ret = cb(rbtree, node, arg)) != NULL) { \
+ return (ret); \
+ } \
+ return (a_prefix##reverse_iter_recurse(rbtree, \
+ rbtn_left_get(a_type, a_field, node), cb, arg)); \
} \
} \
a_attr a_type * \
@@ -958,14 +958,14 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
a_type *ret; \
if (start != NULL) { \
- ret = a_prefix##reverse_iter_start(rbtree, start, \
- rbtree->rbt_root, cb, arg); \
+ ret = a_prefix##reverse_iter_start(rbtree, start, \
+ rbtree->rbt_root, cb, arg); \
} else { \
- ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root, \
- cb, arg); \
+ ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root, \
+ cb, arg); \
} \
if (ret == &rbtree->rbt_nil) { \
- ret = NULL; \
+ ret = NULL; \
} \
return (ret); \
}
diff --git a/src/rt/jemalloc/include/jemalloc/internal/rtree.h b/src/rt/jemalloc/include/jemalloc/internal/rtree.h
index 196d02717e967..9bd98548cfed7 100644
--- a/src/rt/jemalloc/include/jemalloc/internal/rtree.h
+++ b/src/rt/jemalloc/include/jemalloc/internal/rtree.h
@@ -25,10 +25,10 @@ typedef struct rtree_s rtree_t;
#ifdef JEMALLOC_H_STRUCTS
struct rtree_s {
- malloc_mutex_t mutex;
- void **root;
- unsigned height;
- unsigned level2bits[1]; /* Dynamically sized. */
+ malloc_mutex_t mutex;
+ void **root;
+ unsigned height;
+ unsigned level2bits[1]; /* Dynamically sized. */
};
#endif /* JEMALLOC_H_STRUCTS */
@@ -58,37 +58,37 @@ bool rtree_set(rtree_t *rtree, uintptr_t key, void *val);
JEMALLOC_INLINE void * \
f(rtree_t *rtree, uintptr_t key) \
{ \
- void *ret; \
- uintptr_t subkey; \
- unsigned i, lshift, height, bits; \
- void **node, **child; \
- \
- RTREE_LOCK(&rtree->mutex); \
- for (i = lshift = 0, height = rtree->height, node = rtree->root;\
- i < height - 1; \
- i++, lshift += bits, node = child) { \
- bits = rtree->level2bits[i]; \
- subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR + \
- 3)) - bits); \
- child = (void**)node[subkey]; \
- if (child == NULL) { \
- RTREE_UNLOCK(&rtree->mutex); \
- return (NULL); \
- } \
- } \
- \
- /* \
- * node is a leaf, so it contains values rather than node \
- * pointers. \
- */ \
- bits = rtree->level2bits[i]; \
- subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - \
- bits); \
- ret = node[subkey]; \
- RTREE_UNLOCK(&rtree->mutex); \
- \
- RTREE_GET_VALIDATE \
- return (ret); \
+ void *ret; \
+ uintptr_t subkey; \
+ unsigned i, lshift, height, bits; \
+ void **node, **child; \
+ \
+ RTREE_LOCK(&rtree->mutex); \
+ for (i = lshift = 0, height = rtree->height, node = rtree->root;\
+ i < height - 1; \
+ i++, lshift += bits, node = child) { \
+ bits = rtree->level2bits[i]; \
+ subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR + \
+ 3)) - bits); \
+ child = (void**)node[subkey]; \
+ if (child == NULL) { \
+ RTREE_UNLOCK(&rtree->mutex); \
+ return (NULL); \
+ } \
+ } \
+ \
+ /* \
+ * node is a leaf, so it contains values rather than node \
+ * pointers. \
+ */ \
+ bits = rtree->level2bits[i]; \
+ subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - \
+ bits); \
+ ret = node[subkey]; \
+ RTREE_UNLOCK(&rtree->mutex); \
+ \
+ RTREE_GET_VALIDATE \
+ return (ret); \
}
#ifdef JEMALLOC_DEBUG
@@ -113,7 +113,7 @@ RTREE_GET_GENERATE(rtree_get_locked)
* seems impossible, but the following assertion is a prudent sanity check.
*/
# define RTREE_GET_VALIDATE \
- assert(rtree_get_locked(rtree, key) == ret);
+ assert(rtree_get_locked(rtree, key) == ret);
#else
# define RTREE_GET_VALIDATE
#endif
@@ -125,38 +125,38 @@ RTREE_GET_GENERATE(rtree_get)
JEMALLOC_INLINE bool
rtree_set(rtree_t *rtree, uintptr_t key, void *val)
{
- uintptr_t subkey;
- unsigned i, lshift, height, bits;
- void **node, **child;
-
- malloc_mutex_lock(&rtree->mutex);
- for (i = lshift = 0, height = rtree->height, node = rtree->root;
- i < height - 1;
- i++, lshift += bits, node = child) {
- bits = rtree->level2bits[i];
- subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) -
- bits);
- child = (void**)node[subkey];
- if (child == NULL) {
- child = (void**)base_alloc(sizeof(void *) <<
- rtree->level2bits[i+1]);
- if (child == NULL) {
- malloc_mutex_unlock(&rtree->mutex);
- return (true);
- }
- memset(child, 0, sizeof(void *) <<
- rtree->level2bits[i+1]);
- node[subkey] = child;
- }
- }
-
- /* node is a leaf, so it contains values rather than node pointers. */
- bits = rtree->level2bits[i];
- subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - bits);
- node[subkey] = val;
- malloc_mutex_unlock(&rtree->mutex);
-
- return (false);
+ uintptr_t subkey;
+ unsigned i, lshift, height, bits;
+ void **node, **child;
+
+ malloc_mutex_lock(&rtree->mutex);
+ for (i = lshift = 0, height = rtree->height, node = rtree->root;
+ i < height - 1;
+ i++, lshift += bits, node = child) {
+ bits = rtree->level2bits[i];
+ subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) -
+ bits);
+ child = (void**)node[subkey];
+ if (child == NULL) {
+ child = (void**)base_alloc(sizeof(void *) <<
+ rtree->level2bits[i+1]);
+ if (child == NULL) {
+ malloc_mutex_unlock(&rtree->mutex);
+ return (true);
+ }
+ memset(child, 0, sizeof(void *) <<
+ rtree->level2bits[i+1]);
+ node[subkey] = child;
+ }
+ }
+
+ /* node is a leaf, so it contains values rather than node pointers. */
+ bits = rtree->level2bits[i];
+ subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - bits);
+ node[subkey] = val;
+ malloc_mutex_unlock(&rtree->mutex);
+
+ return (false);
}
#endif
diff --git a/src/rt/jemalloc/include/jemalloc/internal/stats.h b/src/rt/jemalloc/include/jemalloc/internal/stats.h
index 302c809195bb2..27f68e3681cfa 100644
--- a/src/rt/jemalloc/include/jemalloc/internal/stats.h
+++ b/src/rt/jemalloc/include/jemalloc/internal/stats.h
@@ -12,117 +12,117 @@ typedef struct chunk_stats_s chunk_stats_t;
#ifdef JEMALLOC_H_STRUCTS
struct tcache_bin_stats_s {
- /*
- * Number of allocation requests that corresponded to the size of this
- * bin.
- */
- uint64_t nrequests;
+ /*
+ * Number of allocation requests that corresponded to the size of this
+ * bin.
+ */
+ uint64_t nrequests;
};
struct malloc_bin_stats_s {
- /*
- * Current number of bytes allocated, including objects currently
- * cached by tcache.
- */
- size_t allocated;
-
- /*
- * Total number of allocation/deallocation requests served directly by
- * the bin. Note that tcache may allocate an object, then recycle it
- * many times, resulting many increments to nrequests, but only one
- * each to nmalloc and ndalloc.
- */
- uint64_t nmalloc;
- uint64_t ndalloc;
-
- /*
- * Number of allocation requests that correspond to the size of this
- * bin. This includes requests served by tcache, though tcache only
- * periodically merges into this counter.
- */
- uint64_t nrequests;
-
- /* Number of tcache fills from this bin. */
- uint64_t nfills;
-
- /* Number of tcache flushes to this bin. */
- uint64_t nflushes;
-
- /* Total number of runs created for this bin's size class. */
- uint64_t nruns;
-
- /*
- * Total number of runs reused by extracting them from the runs tree for
- * this bin's size class.
- */
- uint64_t reruns;
-
- /* Current number of runs in this bin. */
- size_t curruns;
+ /*
+ * Current number of bytes allocated, including objects currently
+ * cached by tcache.
+ */
+ size_t allocated;
+
+ /*
+ * Total number of allocation/deallocation requests served directly by
+ * the bin. Note that tcache may allocate an object, then recycle it
+ * many times, resulting many increments to nrequests, but only one
+ * each to nmalloc and ndalloc.
+ */
+ uint64_t nmalloc;
+ uint64_t ndalloc;
+
+ /*
+ * Number of allocation requests that correspond to the size of this
+ * bin. This includes requests served by tcache, though tcache only
+ * periodically merges into this counter.
+ */
+ uint64_t nrequests;
+
+ /* Number of tcache fills from this bin. */
+ uint64_t nfills;
+
+ /* Number of tcache flushes to this bin. */
+ uint64_t nflushes;
+
+ /* Total number of runs created for this bin's size class. */
+ uint64_t nruns;
+
+ /*
+ * Total number of runs reused by extracting them from the runs tree for
+ * this bin's size class.
+ */
+ uint64_t reruns;
+
+ /* Current number of runs in this bin. */
+ size_t curruns;
};
struct malloc_large_stats_s {
- /*
- * Total number of allocation/deallocation requests served directly by
- * the arena. Note that tcache may allocate an object, then recycle it
- * many times, resulting many increments to nrequests, but only one
- * each to nmalloc and ndalloc.
- */
- uint64_t nmalloc;
- uint64_t ndalloc;
-
- /*
- * Number of allocation requests that correspond to this size class.
- * This includes requests served by tcache, though tcache only
- * periodically merges into this counter.
- */
- uint64_t nrequests;
-
- /* Current number of runs of this size class. */
- size_t curruns;
+ /*
+ * Total number of allocation/deallocation requests served directly by
+ * the arena. Note that tcache may allocate an object, then recycle it
+ * many times, resulting many increments to nrequests, but only one
+ * each to nmalloc and ndalloc.
+ */
+ uint64_t nmalloc;
+ uint64_t ndalloc;
+
+ /*
+ * Number of allocation requests that correspond to this size class.
+ * This includes requests served by tcache, though tcache only
+ * periodically merges into this counter.
+ */
+ uint64_t nrequests;
+
+ /* Current number of runs of this size class. */
+ size_t curruns;
};
struct arena_stats_s {
- /* Number of bytes currently mapped. */
- size_t mapped;
-
- /*
- * Total number of purge sweeps, total number of madvise calls made,
- * and total pages purged in order to keep dirty unused memory under
- * control.
- */
- uint64_t npurge;
- uint64_t nmadvise;
- uint64_t purged;
-
- /* Per-size-category statistics. */
- size_t allocated_large;
- uint64_t nmalloc_large;
- uint64_t ndalloc_large;
- uint64_t nrequests_large;
-
- /*
- * One element for each possible size class, including sizes that
- * overlap with bin size classes. This is necessary because ipalloc()
- * sometimes has to use such large objects in order to assure proper
- * alignment.
- */
- malloc_large_stats_t *lstats;
+ /* Number of bytes currently mapped. */
+ size_t mapped;
+
+ /*
+ * Total number of purge sweeps, total number of madvise calls made,
+ * and total pages purged in order to keep dirty unused memory under
+ * control.
+ */
+ uint64_t npurge;
+ uint64_t nmadvise;
+ uint64_t purged;
+
+ /* Per-size-category statistics. */
+ size_t allocated_large;
+ uint64_t nmalloc_large;
+ uint64_t ndalloc_large;
+ uint64_t nrequests_large;
+
+ /*
+ * One element for each possible size class, including sizes that
+ * overlap with bin size classes. This is necessary because ipalloc()
+ * sometimes has to use such large objects in order to assure proper
+ * alignment.
+ */
+ malloc_large_stats_t *lstats;
};
struct chunk_stats_s {
- /* Number of chunks that were allocated. */
- uint64_t nchunks;
-
- /* High-water mark for number of chunks allocated. */
- size_t highchunks;
-
- /*
- * Current number of chunks allocated. This value isn't maintained for
- * any other purpose, so keep track of it in order to be able to set
- * highchunks.
- */
- size_t curchunks;
+ /* Number of chunks that were allocated. */
+ uint64_t nchunks;
+
+ /* High-water mark for number of chunks allocated. */
+ size_t highchunks;
+
+ /*
+ * Current number of chunks allocated. This value isn't maintained for
+ * any other purpose, so keep track of it in order to be able to set
+ * highchunks.
+ */
+ size_t curchunks;
};
#endif /* JEMALLOC_H_STRUCTS */
@@ -151,21 +151,21 @@ JEMALLOC_INLINE size_t
stats_cactive_get(void)
{
- return (atomic_read_z(&stats_cactive));
+ return (atomic_read_z(&stats_cactive));
}
JEMALLOC_INLINE void
stats_cactive_add(size_t size)
{
- atomic_add_z(&stats_cactive, size);
+ atomic_add_z(&stats_cactive, size);
}
JEMALLOC_INLINE void
stats_cactive_sub(size_t size)
{
- atomic_sub_z(&stats_cactive, size);
+ atomic_sub_z(&stats_cactive, size);
}
#endif
diff --git a/src/rt/jemalloc/include/jemalloc/internal/tcache.h b/src/rt/jemalloc/include/jemalloc/internal/tcache.h
index 686412fb115c7..ba36204ff210b 100644
--- a/src/rt/jemalloc/include/jemalloc/internal/tcache.h
+++ b/src/rt/jemalloc/include/jemalloc/internal/tcache.h
@@ -46,9 +46,9 @@ typedef struct tcache_s tcache_t;
#ifdef JEMALLOC_H_STRUCTS
typedef enum {
- tcache_enabled_false = 0, /* Enable cast to/from bool. */
- tcache_enabled_true = 1,
- tcache_enabled_default = 2
+ tcache_enabled_false = 0, /* Enable cast to/from bool. */
+ tcache_enabled_true = 1,
+ tcache_enabled_default = 2
} tcache_enabled_t;
/*
@@ -56,30 +56,30 @@ typedef enum {
* is stored separately, mainly to reduce memory usage.
*/
struct tcache_bin_info_s {
- unsigned ncached_max; /* Upper limit on ncached. */
+ unsigned ncached_max; /* Upper limit on ncached. */
};
struct tcache_bin_s {
- tcache_bin_stats_t tstats;
- int low_water; /* Min # cached since last GC. */
- unsigned lg_fill_div; /* Fill (ncached_max >> lg_fill_div). */
- unsigned ncached; /* # of cached objects. */
- void **avail; /* Stack of available objects. */
+ tcache_bin_stats_t tstats;
+ int low_water; /* Min # cached since last GC. */
+ unsigned lg_fill_div; /* Fill (ncached_max >> lg_fill_div). */
+ unsigned ncached; /* # of cached objects. */
+ void **avail; /* Stack of available objects. */
};
struct tcache_s {
- ql_elm(tcache_t) link; /* Used for aggregating stats. */
- uint64_t prof_accumbytes;/* Cleared after arena_prof_accum() */
- arena_t *arena; /* This thread's arena. */
- unsigned ev_cnt; /* Event count since incremental GC. */
- unsigned next_gc_bin; /* Next bin to GC. */
- tcache_bin_t tbins[1]; /* Dynamically sized. */
- /*
- * The pointer stacks associated with tbins follow as a contiguous
- * array. During tcache initialization, the avail pointer in each
- * element of tbins is initialized to point to the proper offset within
- * this array.
- */
+ ql_elm(tcache_t) link; /* Used for aggregating stats. */
+ uint64_t prof_accumbytes;/* Cleared after arena_prof_accum() */
+ arena_t *arena; /* This thread's arena. */
+ unsigned ev_cnt; /* Event count since incremental GC. */
+ unsigned next_gc_bin; /* Next bin to GC. */
+ tcache_bin_t tbins[1]; /* Dynamically sized. */
+ /*
+ * The pointer stacks associated with tbins follow as a contiguous
+ * array. During tcache initialization, the avail pointer in each
+ * element of tbins is initialized to point to the proper offset within
+ * this array.
+ */
};
#endif /* JEMALLOC_H_STRUCTS */
@@ -150,291 +150,291 @@ malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, tcache_enabled, tcache_enabled_t,
JEMALLOC_INLINE void
tcache_flush(void)
{
- tcache_t *tcache;
+ tcache_t *tcache;
- cassert(config_tcache);
+ cassert(config_tcache);
- tcache = *tcache_tsd_get();
- if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX)
- return;
- tcache_destroy(tcache);
- tcache = NULL;
- tcache_tsd_set(&tcache);
+ tcache = *tcache_tsd_get();
+ if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX)
+ return;
+ tcache_destroy(tcache);
+ tcache = NULL;
+ tcache_tsd_set(&tcache);
}
JEMALLOC_INLINE bool
tcache_enabled_get(void)
{
- tcache_enabled_t tcache_enabled;
+ tcache_enabled_t tcache_enabled;
- cassert(config_tcache);
+ cassert(config_tcache);
- tcache_enabled = *tcache_enabled_tsd_get();
- if (tcache_enabled == tcache_enabled_default) {
- tcache_enabled = (tcache_enabled_t)opt_tcache;
- tcache_enabled_tsd_set(&tcache_enabled);
- }
+ tcache_enabled = *tcache_enabled_tsd_get();
+ if (tcache_enabled == tcache_enabled_default) {
+ tcache_enabled = (tcache_enabled_t)opt_tcache;
+ tcache_enabled_tsd_set(&tcache_enabled);
+ }
- return ((bool)tcache_enabled);
+ return ((bool)tcache_enabled);
}
JEMALLOC_INLINE void
tcache_enabled_set(bool enabled)
{
- tcache_enabled_t tcache_enabled;
- tcache_t *tcache;
-
- cassert(config_tcache);
-
- tcache_enabled = (tcache_enabled_t)enabled;
- tcache_enabled_tsd_set(&tcache_enabled);
- tcache = *tcache_tsd_get();
- if (enabled) {
- if (tcache == TCACHE_STATE_DISABLED) {
- tcache = NULL;
- tcache_tsd_set(&tcache);
- }
- } else /* disabled */ {
- if (tcache > TCACHE_STATE_MAX) {
- tcache_destroy(tcache);
- tcache = NULL;
- }
- if (tcache == NULL) {
- tcache = TCACHE_STATE_DISABLED;
- tcache_tsd_set(&tcache);
- }
- }
+ tcache_enabled_t tcache_enabled;
+ tcache_t *tcache;
+
+ cassert(config_tcache);
+
+ tcache_enabled = (tcache_enabled_t)enabled;
+ tcache_enabled_tsd_set(&tcache_enabled);
+ tcache = *tcache_tsd_get();
+ if (enabled) {
+ if (tcache == TCACHE_STATE_DISABLED) {
+ tcache = NULL;
+ tcache_tsd_set(&tcache);
+ }
+ } else /* disabled */ {
+ if (tcache > TCACHE_STATE_MAX) {
+ tcache_destroy(tcache);
+ tcache = NULL;
+ }
+ if (tcache == NULL) {
+ tcache = TCACHE_STATE_DISABLED;
+ tcache_tsd_set(&tcache);
+ }
+ }
}
JEMALLOC_ALWAYS_INLINE tcache_t *
tcache_get(bool create)
{
- tcache_t *tcache;
-
- if (config_tcache == false)
- return (NULL);
- if (config_lazy_lock && isthreaded == false)
- return (NULL);
-
- tcache = *tcache_tsd_get();
- if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX) {
- if (tcache == TCACHE_STATE_DISABLED)
- return (NULL);
- if (tcache == NULL) {
- if (create == false) {
- /*
- * Creating a tcache here would cause
- * allocation as a side effect of free().
- * Ordinarily that would be okay since
- * tcache_create() failure is a soft failure
- * that doesn't propagate. However, if TLS
- * data are freed via free() as in glibc,
- * subtle corruption could result from setting
- * a TLS variable after its backing memory is
- * freed.
- */
- return (NULL);
- }
- if (tcache_enabled_get() == false) {
- tcache_enabled_set(false); /* Memoize. */
- return (NULL);
- }
- return (tcache_create(choose_arena(NULL)));
- }
- if (tcache == TCACHE_STATE_PURGATORY) {
- /*
- * Make a note that an allocator function was called
- * after tcache_thread_cleanup() was called.
- */
- tcache = TCACHE_STATE_REINCARNATED;
- tcache_tsd_set(&tcache);
- return (NULL);
- }
- if (tcache == TCACHE_STATE_REINCARNATED)
- return (NULL);
- not_reached();
- }
-
- return (tcache);
+ tcache_t *tcache;
+
+ if (config_tcache == false)
+ return (NULL);
+ if (config_lazy_lock && isthreaded == false)
+ return (NULL);
+
+ tcache = *tcache_tsd_get();
+ if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX) {
+ if (tcache == TCACHE_STATE_DISABLED)
+ return (NULL);
+ if (tcache == NULL) {
+ if (create == false) {
+ /*
+ * Creating a tcache here would cause
+ * allocation as a side effect of free().
+ * Ordinarily that would be okay since
+ * tcache_create() failure is a soft failure
+ * that doesn't propagate. However, if TLS
+ * data are freed via free() as in glibc,
+ * subtle corruption could result from setting
+ * a TLS variable after its backing memory is
+ * freed.
+ */
+ return (NULL);
+ }
+ if (tcache_enabled_get() == false) {
+ tcache_enabled_set(false); /* Memoize. */
+ return (NULL);
+ }
+ return (tcache_create(choose_arena(NULL)));
+ }
+ if (tcache == TCACHE_STATE_PURGATORY) {
+ /*
+ * Make a note that an allocator function was called
+ * after tcache_thread_cleanup() was called.
+ */
+ tcache = TCACHE_STATE_REINCARNATED;
+ tcache_tsd_set(&tcache);
+ return (NULL);
+ }
+ if (tcache == TCACHE_STATE_REINCARNATED)
+ return (NULL);
+ not_reached();
+ }
+
+ return (tcache);
}
JEMALLOC_ALWAYS_INLINE void
tcache_event(tcache_t *tcache)
{
- if (TCACHE_GC_INCR == 0)
- return;
+ if (TCACHE_GC_INCR == 0)
+ return;
- tcache->ev_cnt++;
- assert(tcache->ev_cnt <= TCACHE_GC_INCR);
- if (tcache->ev_cnt == TCACHE_GC_INCR)
- tcache_event_hard(tcache);
+ tcache->ev_cnt++;
+ assert(tcache->ev_cnt <= TCACHE_GC_INCR);
+ if (tcache->ev_cnt == TCACHE_GC_INCR)
+ tcache_event_hard(tcache);
}
JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_easy(tcache_bin_t *tbin)
{
- void *ret;
-
- if (tbin->ncached == 0) {
- tbin->low_water = -1;
- return (NULL);
- }
- tbin->ncached--;
- if ((int)tbin->ncached < tbin->low_water)
- tbin->low_water = tbin->ncached;
- ret = tbin->avail[tbin->ncached];
- return (ret);
+ void *ret;
+
+ if (tbin->ncached == 0) {
+ tbin->low_water = -1;
+ return (NULL);
+ }
+ tbin->ncached--;
+ if ((int)tbin->ncached < tbin->low_water)
+ tbin->low_water = tbin->ncached;
+ ret = tbin->avail[tbin->ncached];
+ return (ret);
}
JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
{
- void *ret;
- size_t binind;
- tcache_bin_t *tbin;
-
- binind = SMALL_SIZE2BIN(size);
- assert(binind < NBINS);
- tbin = &tcache->tbins[binind];
- ret = tcache_alloc_easy(tbin);
- if (ret == NULL) {
- ret = tcache_alloc_small_hard(tcache, tbin, binind);
- if (ret == NULL)
- return (NULL);
- }
- assert(tcache_salloc(ret) == arena_bin_info[binind].reg_size);
-
- if (zero == false) {
- if (config_fill) {
- if (opt_junk) {
- arena_alloc_junk_small(ret,
- &arena_bin_info[binind], false);
- } else if (opt_zero)
- memset(ret, 0, size);
- }
- } else {
- if (config_fill && opt_junk) {
- arena_alloc_junk_small(ret, &arena_bin_info[binind],
- true);
- }
- VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
- memset(ret, 0, size);
- }
- VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
-
- if (config_stats)
- tbin->tstats.nrequests++;
- if (config_prof)
- tcache->prof_accumbytes += arena_bin_info[binind].reg_size;
- tcache_event(tcache);
- return (ret);
+ void *ret;
+ size_t binind;
+ tcache_bin_t *tbin;
+
+ binind = SMALL_SIZE2BIN(size);
+ assert(binind < NBINS);
+ tbin = &tcache->tbins[binind];
+ ret = tcache_alloc_easy(tbin);
+ if (ret == NULL) {
+ ret = tcache_alloc_small_hard(tcache, tbin, binind);
+ if (ret == NULL)
+ return (NULL);
+ }
+ assert(tcache_salloc(ret) == arena_bin_info[binind].reg_size);
+
+ if (zero == false) {
+ if (config_fill) {
+ if (opt_junk) {
+ arena_alloc_junk_small(ret,
+ &arena_bin_info[binind], false);
+ } else if (opt_zero)
+ memset(ret, 0, size);
+ }
+ } else {
+ if (config_fill && opt_junk) {
+ arena_alloc_junk_small(ret, &arena_bin_info[binind],
+ true);
+ }
+ VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
+ memset(ret, 0, size);
+ }
+ VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
+
+ if (config_stats)
+ tbin->tstats.nrequests++;
+ if (config_prof)
+ tcache->prof_accumbytes += arena_bin_info[binind].reg_size;
+ tcache_event(tcache);
+ return (ret);
}
JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
{
- void *ret;
- size_t binind;
- tcache_bin_t *tbin;
-
- size = PAGE_CEILING(size);
- assert(size <= tcache_maxclass);
- binind = NBINS + (size >> LG_PAGE) - 1;
- assert(binind < nhbins);
- tbin = &tcache->tbins[binind];
- ret = tcache_alloc_easy(tbin);
- if (ret == NULL) {
- /*
- * Only allocate one large object at a time, because it's quite
- * expensive to create one and not use it.
- */
- ret = arena_malloc_large(tcache->arena, size, zero);
- if (ret == NULL)
- return (NULL);
- } else {
- if (config_prof && prof_promote && size == PAGE) {
- arena_chunk_t *chunk =
- (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
- size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
- LG_PAGE);
- arena_mapbits_large_binind_set(chunk, pageind,
- BININD_INVALID);
- }
- if (zero == false) {
- if (config_fill) {
- if (opt_junk)
- memset(ret, 0xa5, size);
- else if (opt_zero)
- memset(ret, 0, size);
- }
- } else {
- VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
- memset(ret, 0, size);
- }
- VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
-
- if (config_stats)
- tbin->tstats.nrequests++;
- if (config_prof)
- tcache->prof_accumbytes += size;
- }
-
- tcache_event(tcache);
- return (ret);
+ void *ret;
+ size_t binind;
+ tcache_bin_t *tbin;
+
+ size = PAGE_CEILING(size);
+ assert(size <= tcache_maxclass);
+ binind = NBINS + (size >> LG_PAGE) - 1;
+ assert(binind < nhbins);
+ tbin = &tcache->tbins[binind];
+ ret = tcache_alloc_easy(tbin);
+ if (ret == NULL) {
+ /*
+ * Only allocate one large object at a time, because it's quite
+ * expensive to create one and not use it.
+ */
+ ret = arena_malloc_large(tcache->arena, size, zero);
+ if (ret == NULL)
+ return (NULL);
+ } else {
+ if (config_prof && prof_promote && size == PAGE) {
+ arena_chunk_t *chunk =
+ (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
+ size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
+ LG_PAGE);
+ arena_mapbits_large_binind_set(chunk, pageind,
+ BININD_INVALID);
+ }
+ if (zero == false) {
+ if (config_fill) {
+ if (opt_junk)
+ memset(ret, 0xa5, size);
+ else if (opt_zero)
+ memset(ret, 0, size);
+ }
+ } else {
+ VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
+ memset(ret, 0, size);
+ }
+ VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
+
+ if (config_stats)
+ tbin->tstats.nrequests++;
+ if (config_prof)
+ tcache->prof_accumbytes += size;
+ }
+
+ tcache_event(tcache);
+ return (ret);
}
JEMALLOC_ALWAYS_INLINE void
tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind)
{
- tcache_bin_t *tbin;
- tcache_bin_info_t *tbin_info;
+ tcache_bin_t *tbin;
+ tcache_bin_info_t *tbin_info;
- assert(tcache_salloc(ptr) <= SMALL_MAXCLASS);
+ assert(tcache_salloc(ptr) <= SMALL_MAXCLASS);
- if (config_fill && opt_junk)
- arena_dalloc_junk_small(ptr, &arena_bin_info[binind]);
+ if (config_fill && opt_junk)
+ arena_dalloc_junk_small(ptr, &arena_bin_info[binind]);
- tbin = &tcache->tbins[binind];
- tbin_info = &tcache_bin_info[binind];
- if (tbin->ncached == tbin_info->ncached_max) {
- tcache_bin_flush_small(tbin, binind, (tbin_info->ncached_max >>
- 1), tcache);
- }
- assert(tbin->ncached < tbin_info->ncached_max);
- tbin->avail[tbin->ncached] = ptr;
- tbin->ncached++;
+ tbin = &tcache->tbins[binind];
+ tbin_info = &tcache_bin_info[binind];
+ if (tbin->ncached == tbin_info->ncached_max) {
+ tcache_bin_flush_small(tbin, binind, (tbin_info->ncached_max >>
+ 1), tcache);
+ }
+ assert(tbin->ncached < tbin_info->ncached_max);
+ tbin->avail[tbin->ncached] = ptr;
+ tbin->ncached++;
- tcache_event(tcache);
+ tcache_event(tcache);
}
JEMALLOC_ALWAYS_INLINE void
tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size)
{
- size_t binind;
- tcache_bin_t *tbin;
- tcache_bin_info_t *tbin_info;
-
- assert((size & PAGE_MASK) == 0);
- assert(tcache_salloc(ptr) > SMALL_MAXCLASS);
- assert(tcache_salloc(ptr) <= tcache_maxclass);
-
- binind = NBINS + (size >> LG_PAGE) - 1;
-
- if (config_fill && opt_junk)
- memset(ptr, 0x5a, size);
-
- tbin = &tcache->tbins[binind];
- tbin_info = &tcache_bin_info[binind];
- if (tbin->ncached == tbin_info->ncached_max) {
- tcache_bin_flush_large(tbin, binind, (tbin_info->ncached_max >>
- 1), tcache);
- }
- assert(tbin->ncached < tbin_info->ncached_max);
- tbin->avail[tbin->ncached] = ptr;
- tbin->ncached++;
-
- tcache_event(tcache);
+ size_t binind;
+ tcache_bin_t *tbin;
+ tcache_bin_info_t *tbin_info;
+
+ assert((size & PAGE_MASK) == 0);
+ assert(tcache_salloc(ptr) > SMALL_MAXCLASS);
+ assert(tcache_salloc(ptr) <= tcache_maxclass);
+
+ binind = NBINS + (size >> LG_PAGE) - 1;
+
+ if (config_fill && opt_junk)
+ memset(ptr, 0x5a, size);
+
+ tbin = &tcache->tbins[binind];
+ tbin_info = &tcache_bin_info[binind];
+ if (tbin->ncached == tbin_info->ncached_max) {
+ tcache_bin_flush_large(tbin, binind, (tbin_info->ncached_max >>
+ 1), tcache);
+ }
+ assert(tbin->ncached < tbin_info->ncached_max);
+ tbin->avail[tbin->ncached] = ptr;
+ tbin->ncached++;
+
+ tcache_event(tcache);
}
#endif
diff --git a/src/rt/jemalloc/include/jemalloc/internal/tsd.h b/src/rt/jemalloc/include/jemalloc/internal/tsd.h
index 5ac96c92d4c11..0037cf35e7031 100644
--- a/src/rt/jemalloc/include/jemalloc/internal/tsd.h
+++ b/src/rt/jemalloc/include/jemalloc/internal/tsd.h
@@ -116,40 +116,40 @@ a_attr bool a_name##_booted = false;
a_attr bool \
a_name##_tsd_cleanup_wrapper(void) \
{ \
- \
- if (a_name##_initialized) { \
- a_name##_initialized = false; \
- a_cleanup(&a_name##_tls); \
- } \
- return (a_name##_initialized); \
+ \
+ if (a_name##_initialized) { \
+ a_name##_initialized = false; \
+ a_cleanup(&a_name##_tls); \
+ } \
+ return (a_name##_initialized); \
} \
a_attr bool \
a_name##_tsd_boot(void) \
{ \
- \
- if (a_cleanup != malloc_tsd_no_cleanup) { \
- malloc_tsd_cleanup_register( \
- &a_name##_tsd_cleanup_wrapper); \
- } \
- a_name##_booted = true; \
- return (false); \
+ \
+ if (a_cleanup != malloc_tsd_no_cleanup) { \
+ malloc_tsd_cleanup_register( \
+ &a_name##_tsd_cleanup_wrapper); \
+ } \
+ a_name##_booted = true; \
+ return (false); \
} \
/* Get/set. */ \
a_attr a_type * \
a_name##_tsd_get(void) \
{ \
- \
- assert(a_name##_booted); \
- return (&a_name##_tls); \
+ \
+ assert(a_name##_booted); \
+ return (&a_name##_tls); \
} \
a_attr void \
a_name##_tsd_set(a_type *val) \
{ \
- \
- assert(a_name##_booted); \
- a_name##_tls = (*val); \
- if (a_cleanup != malloc_tsd_no_cleanup) \
- a_name##_initialized = true; \
+ \
+ assert(a_name##_booted); \
+ a_name##_tls = (*val); \
+ if (a_cleanup != malloc_tsd_no_cleanup) \
+ a_name##_initialized = true; \
}
#elif (defined(JEMALLOC_TLS))
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
@@ -158,220 +158,220 @@ a_name##_tsd_set(a_type *val) \
a_attr bool \
a_name##_tsd_boot(void) \
{ \
- \
- if (a_cleanup != malloc_tsd_no_cleanup) { \
- if (pthread_key_create(&a_name##_tsd, a_cleanup) != 0) \
- return (true); \
- } \
- a_name##_booted = true; \
- return (false); \
+ \
+ if (a_cleanup != malloc_tsd_no_cleanup) { \
+ if (pthread_key_create(&a_name##_tsd, a_cleanup) != 0) \
+ return (true); \
+ } \
+ a_name##_booted = true; \
+ return (false); \
} \
/* Get/set. */ \
a_attr a_type * \
a_name##_tsd_get(void) \
{ \
- \
- assert(a_name##_booted); \
- return (&a_name##_tls); \
+ \
+ assert(a_name##_booted); \
+ return (&a_name##_tls); \
} \
a_attr void \
a_name##_tsd_set(a_type *val) \
{ \
- \
- assert(a_name##_booted); \
- a_name##_tls = (*val); \
- if (a_cleanup != malloc_tsd_no_cleanup) { \
- if (pthread_setspecific(a_name##_tsd, \
- (void *)(&a_name##_tls))) { \
- malloc_write(": Error" \
- " setting TSD for "#a_name"\n"); \
- if (opt_abort) \
- abort(); \
- } \
- } \
+ \
+ assert(a_name##_booted); \
+ a_name##_tls = (*val); \
+ if (a_cleanup != malloc_tsd_no_cleanup) { \
+ if (pthread_setspecific(a_name##_tsd, \
+ (void *)(&a_name##_tls))) { \
+ malloc_write(": Error" \
+ " setting TSD for "#a_name"\n"); \
+ if (opt_abort) \
+ abort(); \
+ } \
+ } \
}
#elif (defined(_WIN32))
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
a_cleanup) \
/* Data structure. */ \
typedef struct { \
- bool initialized; \
- a_type val; \
+ bool initialized; \
+ a_type val; \
} a_name##_tsd_wrapper_t; \
/* Initialization/cleanup. */ \
a_attr bool \
a_name##_tsd_cleanup_wrapper(void) \
{ \
- a_name##_tsd_wrapper_t *wrapper; \
- \
- wrapper = (a_name##_tsd_wrapper_t *) TlsGetValue(a_name##_tsd); \
- if (wrapper == NULL) \
- return (false); \
- if (a_cleanup != malloc_tsd_no_cleanup && \
- wrapper->initialized) { \
- a_type val = wrapper->val; \
- a_type tsd_static_data = a_initializer; \
- wrapper->initialized = false; \
- wrapper->val = tsd_static_data; \
- a_cleanup(&val); \
- if (wrapper->initialized) { \
- /* Trigger another cleanup round. */ \
- return (true); \
- } \
- } \
- malloc_tsd_dalloc(wrapper); \
- return (false); \
+ a_name##_tsd_wrapper_t *wrapper; \
+ \
+ wrapper = (a_name##_tsd_wrapper_t *) TlsGetValue(a_name##_tsd); \
+ if (wrapper == NULL) \
+ return (false); \
+ if (a_cleanup != malloc_tsd_no_cleanup && \
+ wrapper->initialized) { \
+ a_type val = wrapper->val; \
+ a_type tsd_static_data = a_initializer; \
+ wrapper->initialized = false; \
+ wrapper->val = tsd_static_data; \
+ a_cleanup(&val); \
+ if (wrapper->initialized) { \
+ /* Trigger another cleanup round. */ \
+ return (true); \
+ } \
+ } \
+ malloc_tsd_dalloc(wrapper); \
+ return (false); \
} \
a_attr bool \
a_name##_tsd_boot(void) \
{ \
- \
- a_name##_tsd = TlsAlloc(); \
- if (a_name##_tsd == TLS_OUT_OF_INDEXES) \
- return (true); \
- if (a_cleanup != malloc_tsd_no_cleanup) { \
- malloc_tsd_cleanup_register( \
- &a_name##_tsd_cleanup_wrapper); \
- } \
- a_name##_booted = true; \
- return (false); \
+ \
+ a_name##_tsd = TlsAlloc(); \
+ if (a_name##_tsd == TLS_OUT_OF_INDEXES) \
+ return (true); \
+ if (a_cleanup != malloc_tsd_no_cleanup) { \
+ malloc_tsd_cleanup_register( \
+ &a_name##_tsd_cleanup_wrapper); \
+ } \
+ a_name##_booted = true; \
+ return (false); \
} \
/* Get/set. */ \
a_attr a_name##_tsd_wrapper_t * \
a_name##_tsd_get_wrapper(void) \
{ \
- a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *) \
- TlsGetValue(a_name##_tsd); \
- \
- if (wrapper == NULL) { \
- wrapper = (a_name##_tsd_wrapper_t *) \
- malloc_tsd_malloc(sizeof(a_name##_tsd_wrapper_t)); \
- if (wrapper == NULL) { \
- malloc_write(": Error allocating" \
- " TSD for "#a_name"\n"); \
- abort(); \
- } else { \
- static a_type tsd_static_data = a_initializer; \
- wrapper->initialized = false; \
- wrapper->val = tsd_static_data; \
- } \
- if (!TlsSetValue(a_name##_tsd, (void *)wrapper)) { \
- malloc_write(": Error setting" \
- " TSD for "#a_name"\n"); \
- abort(); \
- } \
- } \
- return (wrapper); \
+ a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *) \
+ TlsGetValue(a_name##_tsd); \
+ \
+ if (wrapper == NULL) { \
+ wrapper = (a_name##_tsd_wrapper_t *) \
+ malloc_tsd_malloc(sizeof(a_name##_tsd_wrapper_t)); \
+ if (wrapper == NULL) { \
+ malloc_write(": Error allocating" \
+ " TSD for "#a_name"\n"); \
+ abort(); \
+ } else { \
+ static a_type tsd_static_data = a_initializer; \
+ wrapper->initialized = false; \
+ wrapper->val = tsd_static_data; \
+ } \
+ if (!TlsSetValue(a_name##_tsd, (void *)wrapper)) { \
+ malloc_write(": Error setting" \
+ " TSD for "#a_name"\n"); \
+ abort(); \
+ } \
+ } \
+ return (wrapper); \
} \
a_attr a_type * \
a_name##_tsd_get(void) \
{ \
- a_name##_tsd_wrapper_t *wrapper; \
- \
- assert(a_name##_booted); \
- wrapper = a_name##_tsd_get_wrapper(); \
- return (&wrapper->val); \
+ a_name##_tsd_wrapper_t *wrapper; \
+ \
+ assert(a_name##_booted); \
+ wrapper = a_name##_tsd_get_wrapper(); \
+ return (&wrapper->val); \
} \
a_attr void \
a_name##_tsd_set(a_type *val) \
{ \
- a_name##_tsd_wrapper_t *wrapper; \
- \
- assert(a_name##_booted); \
- wrapper = a_name##_tsd_get_wrapper(); \
- wrapper->val = *(val); \
- if (a_cleanup != malloc_tsd_no_cleanup) \
- wrapper->initialized = true; \
+ a_name##_tsd_wrapper_t *wrapper; \
+ \
+ assert(a_name##_booted); \
+ wrapper = a_name##_tsd_get_wrapper(); \
+ wrapper->val = *(val); \
+ if (a_cleanup != malloc_tsd_no_cleanup) \
+ wrapper->initialized = true; \
}
#else
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
a_cleanup) \
/* Data structure. */ \
typedef struct { \
- bool initialized; \
- a_type val; \
+ bool initialized; \
+ a_type val; \
} a_name##_tsd_wrapper_t; \
/* Initialization/cleanup. */ \
a_attr void \
a_name##_tsd_cleanup_wrapper(void *arg) \
{ \
- a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *)arg;\
- \
- if (a_cleanup != malloc_tsd_no_cleanup && \
- wrapper->initialized) { \
- wrapper->initialized = false; \
- a_cleanup(&wrapper->val); \
- if (wrapper->initialized) { \
- /* Trigger another cleanup round. */ \
- if (pthread_setspecific(a_name##_tsd, \
- (void *)wrapper)) { \
- malloc_write(": Error" \
- " setting TSD for "#a_name"\n"); \
- if (opt_abort) \
- abort(); \
- } \
- return; \
- } \
- } \
- malloc_tsd_dalloc(wrapper); \
+ a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *)arg;\
+ \
+ if (a_cleanup != malloc_tsd_no_cleanup && \
+ wrapper->initialized) { \
+ wrapper->initialized = false; \
+ a_cleanup(&wrapper->val); \
+ if (wrapper->initialized) { \
+ /* Trigger another cleanup round. */ \
+ if (pthread_setspecific(a_name##_tsd, \
+ (void *)wrapper)) { \
+ malloc_write(": Error" \
+ " setting TSD for "#a_name"\n"); \
+ if (opt_abort) \
+ abort(); \
+ } \
+ return; \
+ } \
+ } \
+ malloc_tsd_dalloc(wrapper); \
} \
a_attr bool \
a_name##_tsd_boot(void) \
{ \
- \
- if (pthread_key_create(&a_name##_tsd, \
- a_name##_tsd_cleanup_wrapper) != 0) \
- return (true); \
- a_name##_booted = true; \
- return (false); \
+ \
+ if (pthread_key_create(&a_name##_tsd, \
+ a_name##_tsd_cleanup_wrapper) != 0) \
+ return (true); \
+ a_name##_booted = true; \
+ return (false); \
} \
/* Get/set. */ \
a_attr a_name##_tsd_wrapper_t * \
a_name##_tsd_get_wrapper(void) \
{ \
- a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *) \
- pthread_getspecific(a_name##_tsd); \
- \
- if (wrapper == NULL) { \
- wrapper = (a_name##_tsd_wrapper_t *) \
- malloc_tsd_malloc(sizeof(a_name##_tsd_wrapper_t)); \
- if (wrapper == NULL) { \
- malloc_write(": Error allocating" \
- " TSD for "#a_name"\n"); \
- abort(); \
- } else { \
- static a_type tsd_static_data = a_initializer; \
- wrapper->initialized = false; \
- wrapper->val = tsd_static_data; \
- } \
- if (pthread_setspecific(a_name##_tsd, \
- (void *)wrapper)) { \
- malloc_write(": Error setting" \
- " TSD for "#a_name"\n"); \
- abort(); \
- } \
- } \
- return (wrapper); \
+ a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *) \
+ pthread_getspecific(a_name##_tsd); \
+ \
+ if (wrapper == NULL) { \
+ wrapper = (a_name##_tsd_wrapper_t *) \
+ malloc_tsd_malloc(sizeof(a_name##_tsd_wrapper_t)); \
+ if (wrapper == NULL) { \
+ malloc_write(": Error allocating" \
+ " TSD for "#a_name"\n"); \
+ abort(); \
+ } else { \
+ static a_type tsd_static_data = a_initializer; \
+ wrapper->initialized = false; \
+ wrapper->val = tsd_static_data; \
+ } \
+ if (pthread_setspecific(a_name##_tsd, \
+ (void *)wrapper)) { \
+ malloc_write(": Error setting" \
+ " TSD for "#a_name"\n"); \
+ abort(); \
+ } \
+ } \
+ return (wrapper); \
} \
a_attr a_type * \
a_name##_tsd_get(void) \
{ \
- a_name##_tsd_wrapper_t *wrapper; \
- \
- assert(a_name##_booted); \
- wrapper = a_name##_tsd_get_wrapper(); \
- return (&wrapper->val); \
+ a_name##_tsd_wrapper_t *wrapper; \
+ \
+ assert(a_name##_booted); \
+ wrapper = a_name##_tsd_get_wrapper(); \
+ return (&wrapper->val); \
} \
a_attr void \
a_name##_tsd_set(a_type *val) \
{ \
- a_name##_tsd_wrapper_t *wrapper; \
- \
- assert(a_name##_booted); \
- wrapper = a_name##_tsd_get_wrapper(); \
- wrapper->val = *(val); \
- if (a_cleanup != malloc_tsd_no_cleanup) \
- wrapper->initialized = true; \
+ a_name##_tsd_wrapper_t *wrapper; \
+ \
+ assert(a_name##_booted); \
+ wrapper = a_name##_tsd_get_wrapper(); \
+ wrapper->val = *(val); \
+ if (a_cleanup != malloc_tsd_no_cleanup) \
+ wrapper->initialized = true; \
}
#endif
diff --git a/src/rt/jemalloc/include/jemalloc/internal/util.h b/src/rt/jemalloc/include/jemalloc/internal/util.h
index 2fdf756667d1b..8479693631abd 100644
--- a/src/rt/jemalloc/include/jemalloc/internal/util.h
+++ b/src/rt/jemalloc/include/jemalloc/internal/util.h
@@ -33,45 +33,45 @@
*/
#ifndef assert
#define assert(e) do { \
- if (config_debug && !(e)) { \
- malloc_printf( \
- ": %s:%d: Failed assertion: \"%s\"\n", \
- __FILE__, __LINE__, #e); \
- abort(); \
- } \
+ if (config_debug && !(e)) { \
+ malloc_printf( \
+ ": %s:%d: Failed assertion: \"%s\"\n", \
+ __FILE__, __LINE__, #e); \
+ abort(); \
+ } \
} while (0)
#endif
/* Use to assert a particular configuration, e.g., cassert(config_debug). */
#define cassert(c) do { \
- if ((c) == false) \
- assert(false); \
+ if ((c) == false) \
+ assert(false); \
} while (0)
#ifndef not_reached
#define not_reached() do { \
- if (config_debug) { \
- malloc_printf( \
- ": %s:%d: Unreachable code reached\n", \
- __FILE__, __LINE__); \
- abort(); \
- } \
+ if (config_debug) { \
+ malloc_printf( \
+ ": %s:%d: Unreachable code reached\n", \
+ __FILE__, __LINE__); \
+ abort(); \
+ } \
} while (0)
#endif
#ifndef not_implemented
#define not_implemented() do { \
- if (config_debug) { \
- malloc_printf(": %s:%d: Not implemented\n", \
- __FILE__, __LINE__); \
- abort(); \
- } \
+ if (config_debug) { \
+ malloc_printf(": %s:%d: Not implemented\n", \
+ __FILE__, __LINE__); \
+ abort(); \
+ } \
} while (0)
#endif
#define assert_not_implemented(e) do { \
- if (config_debug && !(e)) \
- not_implemented(); \
+ if (config_debug && !(e)) \
+ not_implemented(); \
} while (0)
#endif /* JEMALLOC_H_TYPES */
@@ -118,17 +118,17 @@ JEMALLOC_INLINE size_t
pow2_ceil(size_t x)
{
- x--;
- x |= x >> 1;
- x |= x >> 2;
- x |= x >> 4;
- x |= x >> 8;
- x |= x >> 16;
+ x--;
+ x |= x >> 1;
+ x |= x >> 2;
+ x |= x >> 4;
+ x |= x >> 8;
+ x |= x >> 16;
#if (LG_SIZEOF_PTR == 3)
- x |= x >> 32;
+ x |= x >> 32;
#endif
- x++;
- return (x);
+ x++;
+ return (x);
}
/* Sets error code */
@@ -137,9 +137,9 @@ set_errno(int errnum)
{
#ifdef _WIN32
- SetLastError(errnum);
+ SetLastError(errnum);
#else
- errno = errnum;
+ errno = errnum;
#endif
}
@@ -149,9 +149,9 @@ get_errno(void)
{
#ifdef _WIN32
- return (GetLastError());
+ return (GetLastError());
#else
- return (errno);
+ return (errno);
#endif
}
#endif
diff --git a/src/rt/jemalloc/include/msvc_compat/inttypes.h b/src/rt/jemalloc/include/msvc_compat/inttypes.h
index e7a9b35281ec0..a4e6b75cb9146 100644
--- a/src/rt/jemalloc/include/msvc_compat/inttypes.h
+++ b/src/rt/jemalloc/include/msvc_compat/inttypes.h
@@ -1,32 +1,32 @@
// ISO C9x compliant inttypes.h for Microsoft Visual Studio
-// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
-//
+// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
+//
// Copyright (c) 2006 Alexander Chemeris
-//
+//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
-//
+//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
-//
+//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
-//
+//
// 3. The name of the author may be used to endorse or promote products
// derived from this software without specific prior written permission.
-//
+//
// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
-// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
+//
///////////////////////////////////////////////////////////////////////////////
#ifndef _MSC_VER // [
diff --git a/src/rt/jemalloc/include/msvc_compat/stdint.h b/src/rt/jemalloc/include/msvc_compat/stdint.h
index c66fbb817c0d1..d02608a597264 100644
--- a/src/rt/jemalloc/include/msvc_compat/stdint.h
+++ b/src/rt/jemalloc/include/msvc_compat/stdint.h
@@ -1,32 +1,32 @@
// ISO C9x compliant stdint.h for Microsoft Visual Studio
-// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
-//
+// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
+//
// Copyright (c) 2006-2008 Alexander Chemeris
-//
+//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
-//
+//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
-//
+//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
-//
+//
// 3. The name of the author may be used to endorse or promote products
// derived from this software without specific prior written permission.
-//
+//
// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
-// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
+//
///////////////////////////////////////////////////////////////////////////////
#ifndef _MSC_VER // [
diff --git a/src/rt/jemalloc/include/msvc_compat/strings.h b/src/rt/jemalloc/include/msvc_compat/strings.h
index 1ca24fa0f9ee6..c84975b6b8e1d 100644
--- a/src/rt/jemalloc/include/msvc_compat/strings.h
+++ b/src/rt/jemalloc/include/msvc_compat/strings.h
@@ -7,17 +7,17 @@
#pragma intrinsic(_BitScanForward)
static __forceinline int ffsl(long x)
{
- unsigned long i;
+ unsigned long i;
- if (_BitScanForward(&i, x))
- return (i + 1);
- return (0);
+ if (_BitScanForward(&i, x))
+ return (i + 1);
+ return (0);
}
static __forceinline int ffs(int x)
{
- return (ffsl(x));
+ return (ffsl(x));
}
#endif
diff --git a/src/rt/jemalloc/install-sh b/src/rt/jemalloc/install-sh
index 058b26c82d243..ebc66913e9401 100755
--- a/src/rt/jemalloc/install-sh
+++ b/src/rt/jemalloc/install-sh
@@ -115,7 +115,7 @@ fi
if [ x"$dir_arg" != x ]; then
dst=$src
src=""
-
+
if [ -d $dst ]; then
instcmd=:
else
@@ -124,7 +124,7 @@ if [ x"$dir_arg" != x ]; then
else
# Waiting for this to be detected by the "$instcmd $src $dsttmp" command
-# might cause directories to be created, which would be especially bad
+# might cause directories to be created, which would be especially bad
# if $src (and thus $dsttmp) contains '*'.
if [ -f $src -o -d $src ]
@@ -134,7 +134,7 @@ else
echo "install: $src does not exist"
exit 1
fi
-
+
if [ x"$dst" = x ]
then
echo "install: no destination specified"
@@ -162,7 +162,7 @@ dstdir=`echo $dst | sed -e 's,[^/]*$,,;s,/$,,;s,^$,.,'`
# Skip lots of stat calls in the usual case.
if [ ! -d "$dstdir" ]; then
-defaultIFS='
+defaultIFS='
'
IFS="${IFS-${defaultIFS}}"
@@ -201,17 +201,17 @@ else
# If we're going to rename the final executable, determine the name now.
- if [ x"$transformarg" = x ]
+ if [ x"$transformarg" = x ]
then
dstfile=`basename $dst`
else
- dstfile=`basename $dst $transformbasename |
+ dstfile=`basename $dst $transformbasename |
sed $transformarg`$transformbasename
fi
# don't allow the sed command to completely eliminate the filename
- if [ x"$dstfile" = x ]
+ if [ x"$dstfile" = x ]
then
dstfile=`basename $dst`
else
@@ -242,7 +242,7 @@ else
# Now rename the file to the real destination.
$doit $rmcmd -f $dstdir/$dstfile &&
- $doit $mvcmd $dsttmp $dstdir/$dstfile
+ $doit $mvcmd $dsttmp $dstdir/$dstfile
fi &&
diff --git a/src/rt/jemalloc/src/chunk.c b/src/rt/jemalloc/src/chunk.c
index 044f76be96c29..aef3fede6dcee 100644
--- a/src/rt/jemalloc/src/chunk.c
+++ b/src/rt/jemalloc/src/chunk.c
@@ -214,7 +214,7 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
size_t size)
{
bool unzeroed;
- extent_node_t *xnode, *node, *prev, key;
+ extent_node_t *xnode, *node, *prev, *xprev, key;
unzeroed = pages_purge(chunk, size);
VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
@@ -226,6 +226,8 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
* held.
*/
xnode = base_node_alloc();
+ /* Use xprev to implement conditional deferred deallocation of prev. */
+ xprev = NULL;
malloc_mutex_lock(&chunks_mtx);
key.addr = (void *)((uintptr_t)chunk + size);
@@ -242,8 +244,6 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
node->size += size;
node->zeroed = (node->zeroed && (unzeroed == false));
extent_tree_szad_insert(chunks_szad, node);
- if (xnode != NULL)
- base_node_dealloc(xnode);
} else {
/* Coalescing forward failed, so insert a new node. */
if (xnode == NULL) {
@@ -253,10 +253,10 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
* already been purged, so this is only a virtual
* memory leak.
*/
- malloc_mutex_unlock(&chunks_mtx);
- return;
+ goto label_return;
}
node = xnode;
+ xnode = NULL; /* Prevent deallocation below. */
node->addr = chunk;
node->size = size;
node->zeroed = (unzeroed == false);
@@ -282,9 +282,19 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
node->zeroed = (node->zeroed && prev->zeroed);
extent_tree_szad_insert(chunks_szad, node);
- base_node_dealloc(prev);
+ xprev = prev;
}
+
+label_return:
malloc_mutex_unlock(&chunks_mtx);
+ /*
+ * Deallocate xnode and/or xprev after unlocking chunks_mtx in order to
+ * avoid potential deadlock.
+ */
+ if (xnode != NULL)
+ base_node_dealloc(xnode);
+ if (xprev != NULL)
+ base_node_dealloc(prev);
}
void