[Top][All Lists]
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Gnash-commit] gnash configure.ac ChangeLog libbase/Makefile.a...
From: |
Rob Savoye |
Subject: |
[Gnash-commit] gnash configure.ac ChangeLog libbase/Makefile.a... |
Date: |
Tue, 29 Apr 2008 16:50:51 +0000 |
CVSROOT: /sources/gnash
Module name: gnash
Changes by: Rob Savoye <rsavoye> 08/04/29 16:50:50
Modified files:
. : configure.ac ChangeLog
libbase : Makefile.am
libnet : http.cpp
Added files:
libbase : jemalloc.c jemtree.h
Log message:
* configure.ac: Add --enable-jemalloc option to use our own
copy of
jemalloc() instead of the system malloc. Test for local thread
storage via __thread. Expand OS specific tests.
* libbase/Makefile.am: Optionally build jemalloc.
* libbase/jamalloc.c, jemtree.h: Add the jemalloc memory
allocator
from Mozilla/Firefox/FreeBSD, tweaked to configure and build the
way the rest of Gnash does.
* libnet/http.cpp: Include unistd.h to keep OpenBSD happy
looking
for ::read() and ::close().
CVSWeb URLs:
http://cvs.savannah.gnu.org/viewcvs/gnash/configure.ac?cvsroot=gnash&r1=1.523&r2=1.524
http://cvs.savannah.gnu.org/viewcvs/gnash/ChangeLog?cvsroot=gnash&r1=1.6446&r2=1.6447
http://cvs.savannah.gnu.org/viewcvs/gnash/libbase/Makefile.am?cvsroot=gnash&r1=1.111&r2=1.112
http://cvs.savannah.gnu.org/viewcvs/gnash/libbase/jemalloc.c?cvsroot=gnash&rev=1.1
http://cvs.savannah.gnu.org/viewcvs/gnash/libbase/jemtree.h?cvsroot=gnash&rev=1.1
http://cvs.savannah.gnu.org/viewcvs/gnash/libnet/http.cpp?cvsroot=gnash&r1=1.7&r2=1.8
Patches:
Index: configure.ac
===================================================================
RCS file: /sources/gnash/gnash/configure.ac,v
retrieving revision 1.523
retrieving revision 1.524
diff -u -b -r1.523 -r1.524
--- configure.ac 28 Apr 2008 22:34:04 -0000 1.523
+++ configure.ac 29 Apr 2008 16:50:48 -0000 1.524
@@ -118,20 +118,45 @@
dnl Some things you can only do by looking at the platform name.
case "${host}" in
powerpc-apple-darwin*)
- AC_DEFINE([__powerpc64__], [], [this is a 64 bit powerpc])
+ AC_DEFINE([__powerpc64__], [1], [this is a 64 bit powerpc])
darwin=yes
;;
- i*86-apple-darwin*)
+ *-apple-darwin*)
darwin=yes
+ AC_DEFINE([DARWIN], [1], [this is a Darwin platform])
;;
- i*86-*-openbsd*)
- openbsd_os=openbsd
+ dnl Unfortunately, all BSD distributions are not identical, so as tacky as
it is
+ dnl to look for the distribution name, we don't have much choice. The use
of these
+ dnl should be avoid as much as possible.
+ *-openbsd*)
+ bsd=yes
+ openbsd=yes
+ AC_DEFINE([OPENBSD], [1], [this is an OpenBSD platform])
+ ;;
+ *-freebsd*)
+ bsd=yes
+ freebsd=yes
+ AC_DEFINE([FREEBSD], [1], [this is a FreeBSD platform])
+ ;;
+ *-netbsd*)
+ bsd=yes
+ netbsd=yes
+ AC_DEFINE([NETBSD], [1], [this is a NetBSD platform])
+ ;;
+ *-*solaris*)
+ solaris=yes
+ AC_DEFINE([SOLARIS], [1], [this is a Solaris platform])
+ ;;
+ *-*linux*)
+ linux=yes
+ AC_DEFINE([LINUX], [1], [this is a Linux platform])
;;
*-cygwin* | *-mingw* | *-pw32*)
windows=yes
+ AC_DEFINE([WIN32], [1], [this is a Win32 platform])
;;
- *64-*-openbsd*)
- openbsd_os=openbsd
+ *64-*-*bsd*)
+ bsd_os=bsd
AC_DEFINE([WORDSIZE], [64], [this is a 64 platform])
;;
esac
@@ -234,6 +259,38 @@
LC_KEY=${lckey}
AC_SUBST(LC_KEY)
+# Maybe use jemalloc, which handles memory fragmentation for
+# ECAMscript languages better than the regular system malloc.
+# This seems like a good idea, as both the other player and
+# Mozilla/Firefox both recently switched to using jemalloc.
+AC_ARG_ENABLE(jemalloc,
+ AC_HELP_STRING([--enable-jemalloc],[Enable jemalloc instead of system
malloc]),
+[case "${enableval}" in
+ yes) jemalloc=yes ;;
+ no) jemalloc=no ;;
+ *) AC_MSG_ERROR([bad value ${enableval} for --enable-jemalloc option]) ;;
+esac],jemalloc=no)
+
+dnl There is some weird stuff going on with NetBSD and jemalloc, so don't
built it for now.
+if test x"${netbsd}" = x"yes"; then
+ jemalloc=no
+fi
+dnl If the compiler doesn't have local thread storage enabled, don't try to
+dnl use jemalloc.
+if test x"${jemalloc}" = x"yes"; then
+ AC_TRY_COMPILE([], [
+ extern __thread int global_i; ],
+ has_local_thread_storage=yes
+ )
+ if test x"${has_local_thread_storage}" = x"yes"; then
+ AC_DEFINE([HAVE_LOCAL_THREAD_STORAGE], [1], [Has __thread (local thread
storage) support])
+ AC_DEFINE([USE_JEMALLOC], [], [Use jemalloc instead of system malloc])
+ else
+ jemalloc=no
+ fi
+fi
+AM_CONDITIONAL(JEMALLOC, test x$jemalloc = xyes)
+
AC_ARG_ENABLE(debugger,
AC_HELP_STRING([--enable-debugger],[Enable the Flash debugger]),
[case "${enableval}" in
@@ -386,21 +443,6 @@
esac],cygnal=no)
AM_CONDITIONAL(CYGNAL, test x$cygnal = xyes)
-dnl Build adding statistics collecting on both memory and performance.
-dnl Warning: this can be a performance hit by itself, due to the overhead
-dnl of collecting and storing the information.
-AC_ARG_ENABLE(statistics,
- AC_HELP_STRING([--enable-statistics], [Enable statistics gathering]),
-[case "${enableval}" in
- yes) statistics=yes
- AC_DEFINE(USE_STATISTICS, [1], [Collect statistics on memopry and
performance])
- ;;
- no) statistics=no ;;
- *) AC_MSG_ERROR([bad value ${enableval} for enable-statistics option]) ;;
-esac],statistics=no)
-
-AM_CONDITIONAL(STATISTICS, test x$statistics = xyes)
-
dnl Fix the Intel 810 LOD bias problem
AC_ARG_ENABLE(i810-lod-bias,
AC_HELP_STRING([--enable-i810-lod-bias], [Enable fix for Intel 810 LOD bias
problem]),
@@ -825,7 +867,7 @@
dnl primarily only used for tuning the queueing API in Gnash. Memoryis the
same, it's only used
dnl by developers for tuning performance of memory allocations in Gnash.
buffers=no
-que=yes
+que=no
memory=no
AC_ARG_WITH(statistics,
AC_HELP_STRING([--with-statistics=], [Specify which statistics features to
enable]),
@@ -1218,6 +1260,7 @@
AC_CHECK_HEADERS(libgen.h)
AC_CHECK_HEADERS(pwd.h)
AC_CHECK_HEADERS(sys/utsname.h)
+AC_CHECK_LIB(lber, der_free)
AC_CHECK_LIB(m, sqrt)
AC_CHECK_LIB(c, getpwnam, AC_DEFINE(HAVE_GETPWNAM, 1, [Has getpwnam] ))
@@ -1241,7 +1284,7 @@
struct mallinfo x = mallinfo(); ],
AC_DEFINE(HAVE_MALLINFO, [1], [Has mallinfo()])
)
-AM_CONDITIONAL(HAVE_MALLINFO, [test x$HAVE_MALLINFO = xyes])
+AM_CONDITIONAL(HAVE_MALLINFO, [test x$HAVE_MALLINFO = xyes -a x$jemalloc =
xyes])
AC_CHECK_LIB(rt, shm_unlink)
AC_CHECK_FUNCS(shm_open shm_unlink)
Index: ChangeLog
===================================================================
RCS file: /sources/gnash/gnash/ChangeLog,v
retrieving revision 1.6446
retrieving revision 1.6447
diff -u -b -r1.6446 -r1.6447
--- ChangeLog 29 Apr 2008 14:47:24 -0000 1.6446
+++ ChangeLog 29 Apr 2008 16:50:48 -0000 1.6447
@@ -1,3 +1,15 @@
+2008-04-29 Rob Savoye <address@hidden>
+
+ * configure.ac: Add --enable-jemalloc option to use our own copy of
+ jemalloc() instead of the system malloc. Test for local thread
+ storage via __thread. Expand OS specific tests.
+ * libbase/Makefile.am: Optionally build jemalloc.
+ * libbase/jamalloc.c, jemtree.h: Add the jemalloc memory allocator
+ from Mozilla/Firefox/FreeBSD, tweaked to configure and build the
+ way the rest of Gnash does.
+ * libnet/http.cpp: Include unistd.h to keep OpenBSD happy looking
+ for ::read() and ::close().
+
2008-04-29 Benjamin Wolsey <address@hidden>
* server/namedStrings.{cpp,h}: add onFullScreen.
Index: libbase/Makefile.am
===================================================================
RCS file: /sources/gnash/gnash/libbase/Makefile.am,v
retrieving revision 1.111
retrieving revision 1.112
diff -u -b -r1.111 -r1.112
--- libbase/Makefile.am 27 Apr 2008 14:11:48 -0000 1.111
+++ libbase/Makefile.am 29 Apr 2008 16:50:50 -0000 1.112
@@ -92,10 +92,21 @@
EXTRA_DIST += tu_file_SDL.cpp
endif
+# Maybe use jemalloc, which handles memory fragmentation for
+# ECAMscript languages better than the regular system malloc.
+# This is controlled by the --enable-jemalloc (disabled by default)
+# configure option.
+if JEMALLOC
+MALLOC = jemalloc.c
+else
+MALLOC =
+endif
+
libgnashbase_la_SOURCES = \
$(DMALLOC_FILE) \
extension.cpp \
image.cpp \
+ $(MALLOC) \
jpeg.cpp \
log.cpp \
memory.cpp \
@@ -129,6 +140,7 @@
gettext.h \
grid_index.h \
image.h \
+ jemtree.h \
jpeg.h \
gmemory.h \
log.h \
Index: libnet/http.cpp
===================================================================
RCS file: /sources/gnash/gnash/libnet/http.cpp,v
retrieving revision 1.7
retrieving revision 1.8
diff -u -b -r1.7 -r1.8
--- libnet/http.cpp 7 Apr 2008 14:16:51 -0000 1.7
+++ libnet/http.cpp 29 Apr 2008 16:50:50 -0000 1.8
@@ -35,6 +35,7 @@
#include <sys/types.h>
#include <sys/stat.h>
#include <algorithm>
+#include <unistd.h>
#include "amf.h"
#include "http.h"
@@ -1276,7 +1277,7 @@
st.st_size, filefd);
do {
amf::Buffer *buf = new amf::Buffer;
- ret = read(filefd, buf->reference(), buf->size());
+ ret = ::read(filefd, buf->reference(), buf->size());
if (ret == 0) { // the file is done
delete buf;
break;
Index: libbase/jemalloc.c
===================================================================
RCS file: libbase/jemalloc.c
diff -N libbase/jemalloc.c
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ libbase/jemalloc.c 29 Apr 2008 16:50:49 -0000 1.1
@@ -0,0 +1,6239 @@
+/*-
+ * Copyright (C) 2006-2008 Jason Evans <address@hidden>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice(s), this list of conditions and the following disclaimer as
+ * the first lines of this file unmodified other than the possible
+ * addition of one or more copyright notices.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice(s), this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+
*******************************************************************************
+ *
+ * This allocator implementation is designed to provide scalable performance
+ * for multi-threaded programs on multi-processor systems. The following
+ * features are included for this purpose:
+ *
+ * + Multiple arenas are used if there are multiple CPUs, which reduces lock
+ * contention and cache sloshing.
+ *
+ * + Cache line sharing between arenas is avoided for internal data
+ * structures.
+ *
+ * + Memory is managed in chunks and runs (chunks can be split into runs),
+ * rather than as individual pages. This provides a constant-time
+ * mechanism for associating allocations with particular arenas.
+ *
+ * Allocation requests are rounded up to the nearest size class, and no record
+ * of the original request size is maintained. Allocations are broken into
+ * categories according to size class. Assuming runtime defaults, 4 kB pages
+ * and a 16 byte quantum, the size classes in each category are as follows:
+ *
+ * |=====================================|
+ * | Category | Subcategory | Size |
+ * |=====================================|
+ * | Small | Tiny | 2 |
+ * | | | 4 |
+ * | | | 8 |
+ * | |----------------+---------|
+ * | | Quantum-spaced | 16 |
+ * | | | 32 |
+ * | | | 48 |
+ * | | | ... |
+ * | | | 480 |
+ * | | | 496 |
+ * | | | 512 |
+ * | |----------------+---------|
+ * | | Sub-page | 1 kB |
+ * | | | 2 kB |
+ * |=====================================|
+ * | Large | 4 kB |
+ * | | 8 kB |
+ * | | 12 kB |
+ * | | ... |
+ * | | 1012 kB |
+ * | | 1016 kB |
+ * | | 1020 kB |
+ * |=====================================|
+ * | Huge | 1 MB |
+ * | | 2 MB |
+ * | | 3 MB |
+ * | | ... |
+ * |=====================================|
+ *
+ * A different mechanism is used for each category:
+ *
+ * Small : Each size class is segregated into its own set of runs. Each run
+ * maintains a bitmap of which regions are free/allocated.
+ *
+ * Large : Each allocation is backed by a dedicated run. Metadata are stored
+ * in the associated arena chunk header maps.
+ *
+ * Huge : Each allocation is backed by a dedicated contiguous set of chunks.
+ * Metadata are stored in a separate red-black tree.
+ *
+
*******************************************************************************
+ */
+
+/*
+ * This has been hacked on heavily by Rob Savoye, so it compiles
+ * within Gnash, using the same configuration settings as
+ * everything else in Gnash.
+ */
+#ifdef HAVE_CONFIG_H
+# include "gnashconfig.h"
+#endif
+
+#include "dsodefs.h"
+
+/*
+ * MALLOC_PRODUCTION disables assertions and statistics gathering. It also
+ * defaults the A and J runtime options to off. These settings are appropriate
+ * for production systems.
+ */
+#ifndef USE_STATS_MEMORY
+# define MALLOC_PRODUCTION 1
+#endif
+
+#ifndef MALLOC_PRODUCTION
+ /*
+ * MALLOC_DEBUG enables assertions and other sanity checks, and disables
+ * inline functions.
+ */
+# define MALLOC_DEBUG 1
+
+ /* MALLOC_STATS enables statistics calculation. */
+# define MALLOC_STATS 1
+
+ /* Memory filling (junk/zero). */
+# define MALLOC_FILL 1
+
+ /* Allocation tracing. */
+# define MALLOC_UTRACE 1
+
+ /* Support optional abort() on OOM. */
+# define MALLOC_XMALLOC 1
+
+ /* Support SYSV semantics. */
+# define MALLOC_SYSV 1
+#endif
+
+/*
+ * MALLOC_LAZY_FREE enables the use of a per-thread vector of slots that free()
+ * can atomically stuff object pointers into. This can reduce arena lock
+ * contention.
+ */
+/* #define MALLOC_LAZY_FREE 1 */
+
+/*
+ * MALLOC_BALANCE enables monitoring of arena lock contention and dynamically
+ * re-balances arena load if exponentially averaged contention exceeds a
+ * certain threshold.
+ */
+/* #define MALLOC_BALANCE 1 */
+
+/*
+ * MALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage
+ * segment (DSS). In an ideal world, this functionality would be completely
+ * unnecessary, but we are burdened by history and the lack of resource limits
+ * for anonymous mapped memory.
+ */
+#if (!defined(DARWIN) && !defined(WIN32))
+# define MALLOC_DSS
+#endif
+
+#ifdef LINUX
+# define _GNU_SOURCE /* For mremap(2). */
+# define issetugid() 0
+# if 0 /* Enable in order to test decommit code on Linux. */
+# define MALLOC_DECOMMIT 1
+/*
+ * The decommit code for Unix doesn't bother to make sure deallocated DSS
+ * chunks are writable.
+ */
+# undef MALLOC_DSS
+# endif
+#endif
+
+#include <sys/types.h>
+
+#include <errno.h>
+#include <limits.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#ifdef WIN32
+# include <cruntime.h>
+# include <internal.h>
+# include <windows.h>
+# include <io.h>
+# include "jemtree.h"
+
+# pragma warning( disable: 4267 4996 4146 )
+
+# define bool BOOL
+# define false FALSE
+# define true TRUE
+# define inline __inline
+# define SIZE_T_MAX SIZE_MAX
+# define STDERR_FILENO 2
+# define PATH_MAX MAX_PATH
+# define vsnprintf _vsnprintf
+# define assert(f) /* we can't assert in the CRT */
+
+static unsigned long tlsIndex = 0xffffffff;
+
+# define __thread
+# define _pthread_self() __threadid()
+# define issetugid() 0
+
+/* use MSVC intrinsics */
+# pragma intrinsic(_BitScanForward)
+static __forceinline int
+ffs(int x)
+{
+ unsigned long i;
+
+ if (_BitScanForward(&i, x) != 0)
+ return (i + 1);
+
+ return (0);
+}
+
+typedef unsigned char uint8_t;
+typedef unsigned uint32_t;
+typedef unsigned long long uint64_t;
+typedef unsigned long long uintmax_t;
+
+# define MALLOC_DECOMMIT
+#endif /* end of WIN32 */
+
+#ifdef HAVE_PTHREADS
+# include <pthread.h>
+#endif
+
+#ifndef WIN32
+# include <sys/cdefs.h>
+# ifndef __DECONST
+# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
+# endif
+# include <sys/mman.h>
+# ifndef MADV_FREE
+# define MADV_FREE MADV_DONTNEED
+# endif
+# include <sys/param.h>
+# include <sys/time.h>
+# include <sys/types.h>
+# include <sys/sysctl.h>
+# include "jemtree.h"
+# include <sys/uio.h>
+
+# include <errno.h>
+# include <limits.h>
+# ifndef SIZE_T_MAX
+# define SIZE_T_MAX SIZE_MAX
+# endif
+# if defined(DARWIN) || defined(LINUX)
+# define _pthread_self pthread_self
+# define _pthread_mutex_init pthread_mutex_init
+# define _pthread_mutex_trylock pthread_mutex_trylock
+# define _pthread_mutex_lock pthread_mutex_lock
+# define _pthread_mutex_unlock pthread_mutex_unlock
+# endif
+# include <sched.h>
+# include <stdarg.h>
+# include <stdbool.h>
+# include <stdio.h>
+# include <stdint.h>
+# include <stdlib.h>
+# include <string.h>
+# ifndef DARWIN
+# include <strings.h>
+# endif
+# include <unistd.h>
+
+# ifdef DARWIN
+# include <libkern/OSAtomic.h>
+# include <mach/mach_error.h>
+# include <mach/mach_init.h>
+# include <mach/vm_map.h>
+# include <malloc/malloc.h>
+# endif /* end of DARWIN */
+#endif /* end of WIN32 */
+
+#ifdef DARWIN
+static const bool g_isthreaded = true;
+#endif
+
+#define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
+
+#ifdef MALLOC_DEBUG
+# ifdef NDEBUG
+# undef NDEBUG
+# endif
+#else
+# ifndef NDEBUG
+# define NDEBUG
+# endif
+#endif
+#ifndef WIN32
+# include <assert.h>
+#endif
+
+#ifdef MALLOC_DEBUG
+/* Disable inlining to make debugging easier. */
+# ifdef inline
+# undef inline
+# endif
+
+# define inline
+#endif /* end of MALLOC_DEBUG */
+
+/* Size of stack-allocated buffer passed to strerror_r(). */
+#define STRERROR_BUF 64
+
+/* Minimum alignment of allocations is 2^QUANTUM_2POW_MIN bytes. */
+#define QUANTUM_2POW_MIN 4
+#ifdef MOZ_MEMORY_SIZEOF_PTR_2POW
+# define SIZEOF_PTR_2POW MOZ_MEMORY_SIZEOF_PTR_2POW
+#else
+# define SIZEOF_PTR_2POW 2
+#endif
+// __as __isthreaded is already defined on FreeBSD with a different value that
does
+// the same thing, rename our version to be unique. Althought jemalloc is the
default
+// allocator on FreeBSD< we still want to use our own version, as it has
additional Gnash
+// specific tweaks.
+#ifndef DARWIN
+static const bool g_isthreaded = true;
+#else
+# define NO_TLS
+#endif
+#if 0
+#ifdef __i386__
+# define QUANTUM_2POW_MIN 4
+# define SIZEOF_PTR_2POW 2
+# define CPU_SPINWAIT __asm__ volatile("pause")
+#endif
+#ifdef __ia64__
+# define QUANTUM_2POW_MIN 4
+# define SIZEOF_PTR_2POW 3
+#endif
+#ifdef __alpha__
+# define QUANTUM_2POW_MIN 4
+# define SIZEOF_PTR_2POW 3
+# define NO_TLS
+#endif
+#ifdef __sparc64__
+# define QUANTUM_2POW_MIN 4
+# define SIZEOF_PTR_2POW 3
+# define NO_TLS
+#endif
+#ifdef __amd64__
+# define QUANTUM_2POW_MIN 4
+# define SIZEOF_PTR_2POW 3
+# define CPU_SPINWAIT __asm__ volatile("pause")
+#endif
+#ifdef __arm__
+# define QUANTUM_2POW_MIN 3
+# define SIZEOF_PTR_2POW 2
+# define NO_TLS
+#endif
+#ifdef __powerpc__
+# define QUANTUM_2POW_MIN 4
+# define SIZEOF_PTR_2POW 2
+#endif
+#endif
+
+#define SIZEOF_PTR (1U << SIZEOF_PTR_2POW)
+
+/* sizeof(int) == (1U << SIZEOF_INT_2POW). */
+#ifndef SIZEOF_INT_2POW
+# define SIZEOF_INT_2POW 2
+#endif
+
+/* We can't use TLS in non-PIC programs, since TLS relies on loader magic. */
+#if (!defined(PIC) && !defined(NO_TLS))
+# define NO_TLS
+#endif
+
+#ifdef NO_TLS
+ /* MALLOC_BALANCE requires TLS. */
+# ifdef MALLOC_BALANCE
+# undef MALLOC_BALANCE
+# endif
+ /* MALLOC_LAZY_FREE requires TLS. */
+# ifdef MALLOC_LAZY_FREE
+# undef MALLOC_LAZY_FREE
+# endif
+#endif
+
+/*
+ * Size and alignment of memory chunks that are allocated by the OS's virtual
+ * memory system.
+ */
+#define CHUNK_2POW_DEFAULT 20
+
+/* Maximum number of dirty pages per arena. */
+#define DIRTY_MAX_DEFAULT (1U << 9)
+
+/*
+ * Maximum size of L1 cache line. This is used to avoid cache line aliasing,
+ * so over-estimates are okay (up to a point), but under-estimates will
+ * negatively affect performance.
+ */
+#define CACHELINE_2POW 6
+#define CACHELINE ((size_t)(1U << CACHELINE_2POW))
+
+/* Smallest size class to support. */
+#define TINY_MIN_2POW 1
+
+/*
+ * Maximum size class that is a multiple of the quantum, but not (necessarily)
+ * a power of 2. Above this size, allocations are rounded up to the nearest
+ * power of 2.
+ */
+#define SMALL_MAX_2POW_DEFAULT 9
+#define SMALL_MAX_DEFAULT (1U << SMALL_MAX_2POW_DEFAULT)
+
+/*
+ * RUN_MAX_OVRHD indicates maximum desired run header overhead. Runs are sized
+ * as small as possible such that this setting is still honored, without
+ * violating other constraints. The goal is to make runs as small as possible
+ * without exceeding a per run external fragmentation threshold.
+ *
+ * We use binary fixed point math for overhead computations, where the binary
+ * point is implicitly RUN_BFP bits to the left.
+ *
+ * Note that it is possible to set RUN_MAX_OVRHD low enough that it cannot be
+ * honored for some/all object sizes, since there is one bit of header overhead
+ * per object (plus a constant). This constraint is relaxed (ignored) for runs
+ * that are so small that the per-region overhead is greater than:
+ *
+ * (RUN_MAX_OVRHD / (reg_size << (3+RUN_BFP))
+ */
+#define RUN_BFP 12
+/* \/ Implicit binary fixed point. */
+#define RUN_MAX_OVRHD 0x0000003dU
+#define RUN_MAX_OVRHD_RELAX 0x00001800U
+
+/*
+ * Put a cap on small object run size. This overrides RUN_MAX_OVRHD. Note
+ * that small runs must be small enough that page offsets can fit within the
+ * CHUNK_MAP_POS_MASK bits.
+ */
+#define RUN_MAX_SMALL_2POW 15
+#define RUN_MAX_SMALL (1U << RUN_MAX_SMALL_2POW)
+
+#ifdef MALLOC_LAZY_FREE
+ /* Default size of each arena's lazy free cache. */
+# define LAZY_FREE_2POW_DEFAULT 8
+ /*
+ * Number of pseudo-random probes to conduct before considering the cache to
+ * be overly full. It takes on average n probes to detect fullness of
+ * (n-1)/n. However, we are effectively doing multiple non-independent
+ * trials (each deallocation is a trial), so the actual average threshold
+ * for clearing the cache is somewhat lower.
+ */
+# define LAZY_FREE_NPROBES 5
+#endif
+
+/*
+ * Hyper-threaded CPUs may need a special instruction inside spin loops in
+ * order to yield to another virtual CPU. If no such instruction is defined
+ * above, make CPU_SPINWAIT a no-op.
+ */
+#ifndef CPU_SPINWAIT
+# define CPU_SPINWAIT
+#endif
+
+/*
+ * Adaptive spinning must eventually switch to blocking, in order to avoid the
+ * potential for priority inversion deadlock. Backing off past a certain point
+ * can actually waste time.
+ */
+#define SPIN_LIMIT_2POW 11
+
+/*
+ * Conversion from spinning to blocking is expensive; we use (1U <<
+ * BLOCK_COST_2POW) to estimate how many more times costly blocking is than
+ * worst-case spinning.
+ */
+#define BLOCK_COST_2POW 4
+
+#ifdef MALLOC_BALANCE
+ /*
+ * We use an exponential moving average to track recent lock contention,
+ * where the size of the history window is N, and alpha=2/(N+1).
+ *
+ * Due to integer math rounding, very small values here can cause
+ * substantial degradation in accuracy, thus making the moving average decay
+ * faster than it would with precise calculation.
+ */
+# define BALANCE_ALPHA_INV_2POW 9
+
+ /*
+ * Threshold value for the exponential moving contention average at which to
+ * re-assign a thread.
+ */
+# define BALANCE_THRESHOLD_DEFAULT (1U << (SPIN_LIMIT_2POW-4))
+#endif
+
+/******************************************************************************/
+
+/*
+ * Mutexes based on spinlocks. We can't use normal pthread spinlocks in all
+ * places, because they require malloc()ed memory, which causes bootstrapping
+ * issues in some cases.
+ */
+#if defined(WIN32)
+#define malloc_mutex_t CRITICAL_SECTION
+#define malloc_spinlock_t CRITICAL_SECTION
+#elif defined(DARWIN)
+typedef struct {
+ OSSpinLock lock;
+} malloc_mutex_t;
+typedef struct {
+ OSSpinLock lock;
+} malloc_spinlock_t;
+#elif defined(USE_JEMALLOC)
+typedef pthread_mutex_t malloc_mutex_t;
+typedef pthread_mutex_t malloc_spinlock_t;
+#else
+/* XXX these should #ifdef these for freebsd (and linux?) only */
+typedef struct {
+ spinlock_t lock;
+} malloc_mutex_t;
+typedef malloc_spinlock_t malloc_mutex_t;
+#endif
+
+/* Set to true once the allocator has been initialized. */
+static bool malloc_initialized = false;
+
+#ifdef WIN32
+/* No init lock for Windows. */
+#elif defined(DARWIN)
+static malloc_mutex_t init_lock = {OS_SPINLOCK_INIT};
+#elif defined(LINUX)
+static malloc_mutex_t init_lock = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP;
+#elif defined(USE_JEMALLOC)
+static malloc_mutex_t init_lock = PTHREAD_MUTEX_INITIALIZER;
+#else
+static malloc_mutex_t init_lock = {_SPINLOCK_INITIALIZER};
+#endif
+
+/******************************************************************************/
+/*
+ * Statistics data structures.
+ */
+
+#ifdef MALLOC_STATS
+
+/* Borrowed from malloc.h, as this is Linux specific. This has been
+ * added to jemalloc so the existing memory profiling in Gnash will
+ * continue to work. Most of these fields aren't used by the Gnash
+ * memory profiling, but we leave them here for a semblance of
+ * portability. The only fields Gnash uses are arena, uordblks. and
+ * fordblks.
+ *
+ * This gets more interesting, as jemalloc maintains multiple
+ * arenas, one for each CPU in a multiprocessor system. We cheat
+ * by just accumulating the stats for all arenas, since our primary
+ * purpose is to track memory leaks.
+ */
+struct mallinfo {
+ int arena; /* non-mmapped space allocated from system */
+ int ordblks; /* number of free chunks UNUSED */
+ int smblks; /* number of fastbin blocks UNUSED */
+ int hblks; /* number of mmapped regions UNUSED */
+ int hblkhd; /* space in mmapped regions UNUSED */
+ int usmblks; /* maximum total allocated space UNUSED */
+ int fsmblks; /* space available in freed fastbin blocks UNUSED */
+ int uordblks; /* total allocated space */
+ int fordblks; /* total free space */
+ int keepcost; /* top-most, releasable space UNUSED */
+};
+
+typedef struct malloc_bin_stats_s malloc_bin_stats_t;
+struct malloc_bin_stats_s {
+ /*
+ * Number of allocation requests that corresponded to the size of this
+ * bin.
+ */
+ uint64_t nrequests;
+
+ /* Total number of runs created for this bin's size class. */
+ uint64_t nruns;
+
+ /*
+ * Total number of runs reused by extracting them from the runs tree for
+ * this bin's size class.
+ */
+ uint64_t reruns;
+
+ /* High-water mark for this bin. */
+ unsigned long highruns;
+
+ /* Current number of runs in this bin. */
+ unsigned long curruns;
+};
+
+typedef struct arena_stats_s arena_stats_t;
+struct arena_stats_s {
+ /* Number of bytes currently mapped. */
+ size_t mapped;
+
+ /*
+ * Total number of purge sweeps, total number of madvise calls made,
+ * and total pages purged in order to keep dirty unused memory under
+ * control.
+ */
+ uint64_t npurge;
+ uint64_t nmadvise;
+ uint64_t purged;
+#ifdef MALLOC_DECOMMIT
+ /*
+ * Total number of decommit/commit operations, and total number of
+ * pages decommitted.
+ */
+ uint64_t ndecommit;
+ uint64_t ncommit;
+ uint64_t decommitted;
+#endif
+
+ /* Per-size-category statistics. */
+ size_t allocated_small;
+ uint64_t nmalloc_small;
+ uint64_t ndalloc_small;
+
+ size_t allocated_large;
+ uint64_t nmalloc_large;
+ uint64_t ndalloc_large;
+
+#ifdef MALLOC_BALANCE
+ /* Number of times this arena reassigned a thread due to contention. */
+ uint64_t nbalance;
+#endif
+};
+
+typedef struct chunk_stats_s chunk_stats_t;
+struct chunk_stats_s {
+ /* Number of chunks that were allocated. */
+ uint64_t nchunks;
+
+ /* High-water mark for number of chunks allocated. */
+ unsigned long highchunks;
+
+ /*
+ * Current number of chunks allocated. This value isn't maintained for
+ * any other purpose, so keep track of it in order to be able to set
+ * highchunks.
+ */
+ unsigned long curchunks;
+};
+
+#endif /* #ifdef MALLOC_STATS */
+
+/******************************************************************************/
+/*
+ * Extent data structures.
+ */
+
+/* Tree of extents. */
+typedef struct extent_node_s extent_node_t;
+struct extent_node_s {
+ /* Linkage for the size/address-ordered tree. */
+ RB_ENTRY(extent_node_s) link_szad;
+
+ /* Linkage for the address-ordered tree. */
+ RB_ENTRY(extent_node_s) link_ad;
+
+ /* Pointer to the extent that this tree node is responsible for. */
+ void *addr;
+
+ /* Total region size. */
+ size_t size;
+};
+typedef struct extent_tree_szad_s extent_tree_szad_t;
+RB_HEAD(extent_tree_szad_s, extent_node_s);
+typedef struct extent_tree_ad_s extent_tree_ad_t;
+RB_HEAD(extent_tree_ad_s, extent_node_s);
+
+/******************************************************************************/
+/*
+ * Arena data structures.
+ */
+
+typedef struct arena_s arena_t;
+typedef struct arena_bin_s arena_bin_t;
+
+/*
+ * Each map element contains several flags, plus page position for runs that
+ * service small allocations.
+ */
+typedef uint8_t arena_chunk_map_t;
+#define CHUNK_MAP_UNTOUCHED 0x80U
+#define CHUNK_MAP_DIRTY 0x40U
+#define CHUNK_MAP_LARGE 0x20U
+#ifdef MALLOC_DECOMMIT
+#define CHUNK_MAP_DECOMMITTED 0x10U
+#define CHUNK_MAP_POS_MASK 0x0fU
+#else
+#define CHUNK_MAP_POS_MASK 0x1fU
+#endif
+
+/* Arena chunk header. */
+typedef struct arena_chunk_s arena_chunk_t;
+struct arena_chunk_s {
+ /* Arena that owns the chunk. */
+ arena_t *arena;
+
+ /* Linkage for the arena's chunk tree. */
+ RB_ENTRY(arena_chunk_s) link;
+
+ /*
+ * Number of pages in use. This is maintained in order to make
+ * detection of empty chunks fast.
+ */
+ size_t pages_used;
+
+ /* Number of dirty pages. */
+ size_t ndirty;
+
+ /*
+ * Tree of extent nodes that are embedded in the arena chunk header
+ * page(s). These nodes are used by arena_chunk_node_alloc().
+ */
+ extent_tree_ad_t nodes;
+ extent_node_t *nodes_past;
+
+ /*
+ * Map of pages within chunk that keeps track of free/large/small. For
+ * free runs, only the map entries for the first and last pages are
+ * kept up to date, so that free runs can be quickly coalesced.
+ */
+ arena_chunk_map_t map[1]; /* Dynamically sized. */
+};
+typedef struct arena_chunk_tree_s arena_chunk_tree_t;
+RB_HEAD(arena_chunk_tree_s, arena_chunk_s);
+
+typedef struct arena_run_s arena_run_t;
+struct arena_run_s {
+ /* Linkage for run trees. */
+ RB_ENTRY(arena_run_s) link;
+
+#ifdef MALLOC_DEBUG
+ uint32_t magic;
+# define ARENA_RUN_MAGIC 0x384adf93
+#endif
+
+ /* Bin this run is associated with. */
+ arena_bin_t *bin;
+
+ /* Index of first element that might have a free region. */
+ unsigned regs_minelm;
+
+ /* Number of free regions in run. */
+ unsigned nfree;
+
+ /* Bitmask of in-use regions (0: in use, 1: free). */
+ unsigned regs_mask[1]; /* Dynamically sized. */
+};
+typedef struct arena_run_tree_s arena_run_tree_t;
+RB_HEAD(arena_run_tree_s, arena_run_s);
+
+struct arena_bin_s {
+ /*
+ * Current run being used to service allocations of this bin's size
+ * class.
+ */
+ arena_run_t *runcur;
+
+ /*
+ * Tree of non-full runs. This tree is used when looking for an
+ * existing run when runcur is no longer usable. We choose the
+ * non-full run that is lowest in memory; this policy tends to keep
+ * objects packed well, and it can also help reduce the number of
+ * almost-empty chunks.
+ */
+ arena_run_tree_t runs;
+
+ /* Size of regions in a run for this bin's size class. */
+ size_t reg_size;
+
+ /* Total size of a run for this bin's size class. */
+ size_t run_size;
+
+ /* Total number of regions in a run for this bin's size class. */
+ uint32_t nregs;
+
+ /* Number of elements in a run's regs_mask for this bin's size class. */
+ uint32_t regs_mask_nelms;
+
+ /* Offset of first region in a run for this bin's size class. */
+ uint32_t reg0_offset;
+
+#ifdef MALLOC_STATS
+ /* Bin statistics. */
+ malloc_bin_stats_t stats;
+#endif
+};
+
+struct arena_s {
+#ifdef MALLOC_DEBUG
+ uint32_t magic;
+# define ARENA_MAGIC 0x947d3d24
+#endif
+
+ /* All operations on this arena require that lock be locked. */
+ malloc_spinlock_t lock;
+
+#ifdef MALLOC_STATS
+ arena_stats_t stats;
+#endif
+
+ /*
+ * Tree of chunks this arena manages.
+ */
+ arena_chunk_tree_t chunks;
+
+ /*
+ * In order to avoid rapid chunk allocation/deallocation when an arena
+ * oscillates right on the cusp of needing a new chunk, cache the most
+ * recently freed chunk. The spare is left in the arena's chunk tree
+ * until it is deleted.
+ *
+ * There is one spare chunk per arena, rather than one spare total, in
+ * order to avoid interactions between multiple threads that could make
+ * a single spare inadequate.
+ */
+ arena_chunk_t *spare;
+
+ /*
+ * Current count of pages within unused runs that are potentially
+ * dirty, and for which madvise(... MADV_FREE) has not been called. By
+ * tracking this, we can institute a limit on how much dirty unused
+ * memory is mapped for each arena.
+ */
+ size_t ndirty;
+
+ /*
+ * Trees of this arena's available runs. Two trees are maintained
+ * using one set of nodes, since one is needed for first-best-fit run
+ * allocation, and the other is needed for coalescing.
+ */
+ extent_tree_szad_t runs_avail_szad;
+ extent_tree_ad_t runs_avail_ad;
+
+ /* Tree of this arena's allocated (in-use) runs. */
+ extent_tree_ad_t runs_alloced_ad;
+
+#ifdef MALLOC_BALANCE
+ /*
+ * The arena load balancing machinery needs to keep track of how much
+ * lock contention there is. This value is exponentially averaged.
+ */
+ uint32_t contention;
+#endif
+
+#ifdef MALLOC_LAZY_FREE
+ /*
+ * Deallocation of small objects can be lazy, in which case free_cache
+ * stores pointers to those objects that have not yet been deallocated.
+ * In order to avoid lock contention, slots are chosen randomly. Empty
+ * slots contain NULL.
+ */
+ void **free_cache;
+#endif
+
+ /*
+ * bins is used to store rings of free regions of the following sizes,
+ * assuming a 16-byte quantum, 4kB pagesize, and default MALLOC_OPTIONS.
+ *
+ * bins[i] | size |
+ * --------+------+
+ * 0 | 2 |
+ * 1 | 4 |
+ * 2 | 8 |
+ * --------+------+
+ * 3 | 16 |
+ * 4 | 32 |
+ * 5 | 48 |
+ * 6 | 64 |
+ * : :
+ * : :
+ * 33 | 496 |
+ * 34 | 512 |
+ * --------+------+
+ * 35 | 1024 |
+ * 36 | 2048 |
+ * --------+------+
+ */
+ arena_bin_t bins[1]; /* Dynamically sized. */
+};
+
+/******************************************************************************/
+/*
+ * Data.
+ */
+
+/* Number of CPUs. */
+static unsigned ncpus;
+
+/* VM page size. */
+static size_t pagesize;
+static size_t pagesize_mask;
+static size_t pagesize_2pow;
+
+/* Various bin-related settings. */
+static size_t bin_maxclass; /* Max size class for bins. */
+static unsigned ntbins; /* Number of (2^n)-spaced tiny bins. */
+static unsigned nqbins; /* Number of quantum-spaced bins. */
+static unsigned nsbins; /* Number of (2^n)-spaced sub-page
bins. */
+static size_t small_min;
+static size_t small_max;
+
+/* Various quantum-related settings. */
+static size_t quantum;
+static size_t quantum_mask; /* (quantum - 1). */
+
+/* Various chunk-related settings. */
+static size_t chunksize;
+static size_t chunksize_mask; /* (chunksize - 1). */
+static size_t chunk_npages;
+static size_t arena_chunk_header_npages;
+static size_t arena_maxclass; /* Max size class for arenas. */
+
+/********/
+/*
+ * Chunks.
+ */
+
+/* Protects chunk-related data structures. */
+static malloc_mutex_t huge_mtx;
+
+/* Tree of chunks that are stand-alone huge allocations. */
+static extent_tree_ad_t huge;
+
+#ifdef MALLOC_DSS
+/*
+ * Protects sbrk() calls. This avoids malloc races among threads, though it
+ * does not protect against races with threads that call sbrk() directly.
+ */
+static malloc_mutex_t dss_mtx;
+/* Base address of the DSS. */
+static void *dss_base;
+/* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */
+static void *dss_prev;
+/* Current upper limit on DSS addresses. */
+static void *dss_max;
+
+/*
+ * Trees of chunks that were previously allocated (trees differ only in node
+ * ordering). These are used when allocating chunks, in an attempt to re-use
+ * address space. Depending on function, different tree orderings are needed,
+ * which is why there are two trees with the same contents.
+ */
+static extent_tree_szad_t dss_chunks_szad;
+static extent_tree_ad_t dss_chunks_ad;
+#endif
+
+#ifdef MALLOC_STATS
+/* Huge allocation statistics. */
+static uint64_t huge_nmalloc;
+static uint64_t huge_ndalloc;
+static size_t huge_allocated;
+#endif
+
+/****************************/
+/*
+ * base (internal allocation).
+ */
+
+/*
+ * Current pages that are being used for internal memory allocations. These
+ * pages are carved up in cacheline-size quanta, so that there is no chance of
+ * false cache line sharing.
+ */
+static void *base_pages;
+static void *base_next_addr;
+static void *base_past_addr; /* Addr immediately past base_pages. */
+static extent_node_t *base_nodes;
+static malloc_mutex_t base_mtx;
+#ifdef MALLOC_STATS
+static size_t base_mapped;
+#endif
+
+/********/
+/*
+ * Arenas.
+ */
+
+/*
+ * Arenas that are used to service external requests. Not all elements of the
+ * arenas array are necessarily used; arenas are created lazily as needed.
+ */
+static arena_t **arenas;
+static unsigned narenas;
+#ifndef NO_TLS
+# ifdef MALLOC_BALANCE
+static unsigned narenas_2pow;
+# else
+static unsigned next_arena;
+# endif
+#endif
+static malloc_spinlock_t arenas_lock; /* Protects arenas initialization. */
+
+#ifndef NO_TLS
+/*
+ * Map of pthread_self() --> arenas[???], used for selecting an arena to use
+ * for allocations.
+ */
+#ifdef HAVE_LOCAL_THREAD_STORAGE
+static __thread arena_t *arenas_map;
+#endif
+#endif
+
+#ifdef MALLOC_STATS
+/* Chunk statistics. */
+static chunk_stats_t stats_chunks;
+#endif
+
+/*******************************/
+/*
+ * Runtime configuration options.
+ */
+const char *_malloc_options
+#ifdef WIN32
+= "A10n2F"
+#elif (defined(DARWIN))
+= "AP10n"
+#elif (defined(LINUX))
+= "A10n2F"
+#endif
+;
+
+#ifndef MALLOC_PRODUCTION
+static bool opt_abort = true;
+#ifdef MALLOC_FILL
+static bool opt_junk = true;
+#endif
+#else
+static bool opt_abort = false;
+#ifdef MALLOC_FILL
+static bool opt_junk = false;
+#endif
+#endif
+#ifdef MALLOC_DSS
+static bool opt_dss = true;
+static bool opt_mmap = true;
+#endif
+static size_t opt_dirty_max = DIRTY_MAX_DEFAULT;
+#ifdef MALLOC_LAZY_FREE
+static int opt_lazy_free_2pow = LAZY_FREE_2POW_DEFAULT;
+#endif
+#ifdef MALLOC_BALANCE
+static uint64_t opt_balance_threshold = BALANCE_THRESHOLD_DEFAULT;
+#endif
+/*
+ * this toggles the printing of statistics when the program exists.
+ */
+static bool opt_print_stats = true;
+static size_t opt_quantum_2pow = QUANTUM_2POW_MIN;
+static size_t opt_small_max_2pow = SMALL_MAX_2POW_DEFAULT;
+static size_t opt_chunk_2pow = CHUNK_2POW_DEFAULT;
+#ifdef MALLOC_UTRACE
+static bool opt_utrace = false;
+#endif
+#ifdef MALLOC_SYSV
+static bool opt_sysv = false;
+#endif
+#ifdef MALLOC_XMALLOC
+static bool opt_xmalloc = false;
+#endif
+#ifdef MALLOC_FILL
+static bool opt_zero = false;
+#endif
+static int opt_narenas_lshift = 0;
+
+#ifdef MALLOC_UTRACE
+typedef struct {
+ void *p;
+ size_t s;
+ void *r;
+} malloc_utrace_t;
+
+#define UTRACE(a, b, c)
\
+ if (opt_utrace) { \
+ malloc_utrace_t ut; \
+ ut.p = (a); \
+ ut.s = (b); \
+ ut.r = (c); \
+ utrace(&ut, sizeof(ut)); \
+ }
+#else
+#define UTRACE(a, b, c)
+#endif
+
+/******************************************************************************/
+/*
+ * Begin function prototypes for non-inline static functions.
+ */
+
+static bool malloc_mutex_init(malloc_mutex_t *mutex);
+static bool malloc_spin_init(malloc_spinlock_t *lock);
+static void wrtmessage(const char *p1, const char *p2, const char *p3,
+ const char *p4);
+#ifdef MALLOC_STATS
+#ifdef DARWIN
+/* Avoid namespace collision with OS X's malloc APIs. */
+#define malloc_printf xmalloc_printf
+#endif
+static void malloc_printf(const char *format, ...);
+#endif
+static char *umax2s(uintmax_t x, char *s);
+#ifdef MALLOC_DSS
+static bool base_pages_alloc_dss(size_t minsize);
+#endif
+static bool base_pages_alloc_mmap(size_t minsize);
+static bool base_pages_alloc(size_t minsize);
+static void *base_alloc(size_t size);
+static void *base_calloc(size_t number, size_t size);
+static extent_node_t *base_node_alloc(void);
+static void base_node_dealloc(extent_node_t *node);
+#ifdef MALLOC_STATS
+static void stats_print(arena_t *arena);
+#endif
+static void *pages_map(void *addr, size_t size);
+static void pages_unmap(void *addr, size_t size);
+#ifdef MALLOC_DSS
+static void *chunk_alloc_dss(size_t size);
+static void *chunk_recycle_dss(size_t size, bool zero);
+#endif
+static void *chunk_alloc_mmap(size_t size);
+static void *chunk_alloc(size_t size, bool zero);
+#ifdef MALLOC_DSS
+static extent_node_t *chunk_dealloc_dss_record(void *chunk, size_t size);
+static bool chunk_dealloc_dss(void *chunk, size_t size);
+#endif
+static void chunk_dealloc_mmap(void *chunk, size_t size);
+static void chunk_dealloc(void *chunk, size_t size);
+#ifndef NO_TLS
+static arena_t *choose_arena_hard(void);
+#endif
+static extent_node_t *arena_chunk_node_alloc(arena_chunk_t *chunk);
+static void arena_chunk_node_dealloc(arena_chunk_t *chunk,
+ extent_node_t *node);
+static void arena_run_split(arena_t *arena, arena_run_t *run, size_t size,
+ bool small, bool zero);
+static arena_chunk_t *arena_chunk_alloc(arena_t *arena);
+static void arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk);
+static arena_run_t *arena_run_alloc(arena_t *arena, size_t size, bool small,
+ bool zero);
+static void arena_purge(arena_t *arena);
+static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty);
+static void arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk,
+ extent_node_t *nodeB, arena_run_t *run, size_t oldsize, size_t newsize);
+static void arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk,
+ extent_node_t *nodeA, arena_run_t *run, size_t oldsize, size_t newsize,
+ bool dirty);
+static arena_run_t *arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t
*bin);
+static void *arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin);
+static size_t arena_bin_run_size_calc(arena_bin_t *bin, size_t min_run_size);
+#ifdef MALLOC_BALANCE
+static void arena_lock_balance_hard(arena_t *arena);
+#endif
+static void *arena_malloc_large(arena_t *arena, size_t size, bool zero);
+static void *arena_palloc(arena_t *arena, size_t alignment, size_t size,
+ size_t alloc_size);
+static size_t arena_salloc(const void *ptr);
+#ifdef MALLOC_LAZY_FREE
+static void arena_dalloc_lazy_hard(arena_t *arena, arena_chunk_t *chunk,
+ void *ptr, size_t pageind, arena_chunk_map_t *mapelm);
+#endif
+static void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk,
+ void *ptr);
+static void arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk,
+ void *ptr, size_t size, size_t oldsize);
+static bool arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk,
+ void *ptr, size_t size, size_t oldsize);
+static bool arena_ralloc_large(void *ptr, size_t size, size_t oldsize);
+static void *arena_ralloc(void *ptr, size_t size, size_t oldsize);
+static bool arena_new(arena_t *arena);
+static arena_t *arenas_extend(unsigned ind);
+static void *huge_malloc(size_t size, bool zero);
+static void *huge_palloc(size_t alignment, size_t size);
+static void *huge_ralloc(void *ptr, size_t size, size_t oldsize);
+static void huge_dalloc(void *ptr);
+static void malloc_print_stats(void);
+#ifndef WIN32
+static
+#endif
+bool malloc_init_hard(void);
+
+/*
+ * End function prototypes.
+ */
+/******************************************************************************/
+/*
+ * Begin mutex. We can't use normal pthread mutexes in all places, because
+ * they require malloc()ed memory, which causes bootstrapping issues in some
+ * cases.
+ */
+
+static bool
+malloc_mutex_init(malloc_mutex_t *mutex)
+{
+#if defined(WIN32)
+ if (g_isthreaded)
+ if (! __crtInitCritSecAndSpinCount(mutex, _CRT_SPINCOUNT))
+ return (true);
+#elif defined(DARWIN)
+ mutex->lock = OS_SPINLOCK_INIT;
+#elif defined(LINUX)
+ pthread_mutexattr_t attr;
+ if (pthread_mutexattr_init(&attr) != 0)
+ return (true);
+ pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP);
+ if (pthread_mutex_init(mutex, &attr) != 0) {
+ pthread_mutexattr_destroy(&attr);
+ return (true);
+ }
+ pthread_mutexattr_destroy(&attr);
+#elif defined(USE_JEMALLOC)
+ if (pthread_mutex_init(mutex, NULL) != 0)
+ return (true);
+#else
+ static const spinlock_t lock = _SPINLOCK_INITIALIZER;
+
+ mutex->lock = lock;
+#endif
+ return (false);
+}
+
+static inline void
+malloc_mutex_lock(malloc_mutex_t *mutex)
+{
+
+#if defined(WIN32)
+ EnterCriticalSection(mutex);
+#elif defined(DARWIN)
+ OSSpinLockLock(&mutex->lock);
+#elif defined(USE_JEMALLOC)
+ pthread_mutex_lock(mutex);
+#else
+ if (g_isthreaded)
+ _SPINLOCK(&mutex->lock);
+#endif
+}
+
+static inline void
+malloc_mutex_unlock(malloc_mutex_t *mutex)
+{
+
+#if defined(WIN32)
+ LeaveCriticalSection(mutex);
+#elif defined(DARWIN)
+ OSSpinLockUnlock(&mutex->lock);
+#elif defined(USE_JEMALLOC)
+ pthread_mutex_unlock(mutex);
+#else
+ if (g_isthreaded)
+ _SPINUNLOCK(&mutex->lock);
+#endif
+}
+
+static bool
+malloc_spin_init(malloc_spinlock_t *lock)
+{
+#if defined(WIN32)
+ if (g_isthreaded)
+ if (! __crtInitCritSecAndSpinCount(lock, _CRT_SPINCOUNT))
+ return (true);
+#elif defined(DARWIN)
+ lock->lock = OS_SPINLOCK_INIT;
+#elif defined(LINUX)
+ pthread_mutexattr_t attr;
+ if (pthread_mutexattr_init(&attr) != 0)
+ return (true);
+ pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP);
+ if (pthread_mutex_init(lock, &attr) != 0) {
+ pthread_mutexattr_destroy(&attr);
+ return (true);
+ }
+ pthread_mutexattr_destroy(&attr);
+#elif defined(USE_JEMALLOC)
+ if (pthread_mutex_init(lock, NULL) != 0)
+ return (true);
+#else
+ lock->lock = _SPINLOCK_INITIALIZER;
+#endif
+ return (false);
+}
+
+static inline void
+malloc_spin_lock(malloc_spinlock_t *lock)
+{
+
+#if defined(WIN32)
+ EnterCriticalSection(lock);
+#elif defined(DARWIN)
+ OSSpinLockLock(&lock->lock);
+#elif defined(USE_JEMALLOC)
+ pthread_mutex_lock(lock);
+#else
+ if (g_isthreaded)
+ _SPINLOCK(&lock->lock);
+#endif
+}
+
+static inline void
+malloc_spin_unlock(malloc_spinlock_t *lock)
+{
+#if defined(WIN32)
+ LeaveCriticalSection(lock);
+#elif defined(DARWIN)
+ OSSpinLockUnlock(&lock->lock);
+#elif defined(USE_JEMALLOC)
+ pthread_mutex_unlock(lock);
+#else
+ if (g_isthreaded)
+ _SPINUNLOCK(&lock->lock);
+#endif
+}
+
+/*
+ * End mutex.
+ */
+/******************************************************************************/
+/*
+ * Begin spin lock. Spin locks here are actually adaptive mutexes that block
+ * after a period of spinning, because unbounded spinning would allow for
+ * priority inversion.
+ */
+
+#ifndef DARWIN
+# define malloc_spin_init malloc_mutex_init
+# define malloc_spin_lock malloc_mutex_lock
+# define malloc_spin_unlock malloc_mutex_unlock
+#endif
+
+/*
+ * End spin lock.
+ */
+
+/******************************************************************************/
+/*
+ * Begin Utility functions/macros.
+ */
+
+/* Return the chunk address for allocation address a. */
+#define CHUNK_ADDR2BASE(a)
\
+ ((void *)((uintptr_t)(a) & ~chunksize_mask))
+
+/* Return the chunk offset of address a. */
+#define CHUNK_ADDR2OFFSET(a)
\
+ ((size_t)((uintptr_t)(a) & chunksize_mask))
+
+/* Return the smallest chunk multiple that is >= s. */
+#define CHUNK_CEILING(s)
\
+ (((s) + chunksize_mask) & ~chunksize_mask)
+
+/* Return the smallest cacheline multiple that is >= s. */
+#define CACHELINE_CEILING(s)
\
+ (((s) + (CACHELINE - 1)) & ~(CACHELINE - 1))
+
+/* Return the smallest quantum multiple that is >= a. */
+#define QUANTUM_CEILING(a)
\
+ (((a) + quantum_mask) & ~quantum_mask)
+
+/* Return the smallest pagesize multiple that is >= s. */
+#define PAGE_CEILING(s)
\
+ (((s) + pagesize_mask) & ~pagesize_mask)
+
+/* Compute the smallest power of 2 that is >= x. */
+static inline size_t
+pow2_ceil(size_t x)
+{
+
+ x--;
+ x |= x >> 1;
+ x |= x >> 2;
+ x |= x >> 4;
+ x |= x >> 8;
+ x |= x >> 16;
+#if (SIZEOF_PTR == 8)
+ x |= x >> 32;
+#endif
+ x++;
+ return (x);
+}
+
+#if (defined(MALLOC_LAZY_FREE) || defined(MALLOC_BALANCE))
+/*
+ * Use a simple linear congruential pseudo-random number generator:
+ *
+ * prn(y) = (a*x + c) % m
+ *
+ * where the following constants ensure maximal period:
+ *
+ * a == Odd number (relatively prime to 2^n), and (a-1) is a multiple of 4.
+ * c == Odd number (relatively prime to 2^n).
+ * m == 2^32
+ *
+ * See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints.
+ *
+ * This choice of m has the disadvantage that the quality of the bits is
+ * proportional to bit position. For example. the lowest bit has a cycle of 2,
+ * the next has a cycle of 4, etc. For this reason, we prefer to use the upper
+ * bits.
+ */
+# define PRN_DEFINE(suffix, var, a, c)
\
+static inline void \
+sprn_##suffix(uint32_t seed) \
+{ \
+ var = seed; \
+} \
+ \
+static inline uint32_t \
+prn_##suffix(uint32_t lg_range)
\
+{ \
+ uint32_t ret, x; \
+ \
+ assert(lg_range > 0); \
+ assert(lg_range <= 32); \
+ \
+ x = (var * (a)) + (c); \
+ var = x; \
+ ret = x >> (32 - lg_range); \
+ \
+ return (ret); \
+}
+# define SPRN(suffix, seed) sprn_##suffix(seed)
+# define PRN(suffix, lg_range) prn_##suffix(lg_range)
+#endif
+
+/*
+ * Define PRNGs, one for each purpose, in order to avoid auto-correlation
+ * problems.
+ */
+
+#ifdef MALLOC_LAZY_FREE
+/* Define the per-thread PRNG used for lazy deallocation. */
+static __thread uint32_t lazy_free_x;
+PRN_DEFINE(lazy_free, lazy_free_x, 12345, 12347)
+#endif
+
+#ifdef MALLOC_BALANCE
+/* Define the PRNG used for arena assignment. */
+static __thread uint32_t balance_x;
+PRN_DEFINE(balance, balance_x, 1297, 1301)
+#endif
+
+#ifdef MALLOC_UTRACE
+static int
+utrace(const void *addr, size_t len)
+{
+ malloc_utrace_t *ut = (malloc_utrace_t *)addr;
+
+ assert(len == sizeof(malloc_utrace_t));
+
+ if (ut->p == NULL && ut->s == 0 && ut->r == NULL)
+ malloc_printf("%d x USER malloc_init()\n", getpid());
+ else if (ut->p == NULL && ut->r != NULL) {
+ malloc_printf("%d x USER %p = malloc(%zu)\n", getpid(), ut->r,
+ ut->s);
+ } else if (ut->p != NULL && ut->r != NULL) {
+ malloc_printf("%d x USER %p = realloc(%p, %zu)\n", getpid(),
+ ut->r, ut->p, ut->s);
+ } else
+ malloc_printf("%d x USER free(%p)\n", getpid(), ut->p);
+
+ return (0);
+}
+#endif
+
+static inline const char *
+_getprogname(void)
+{
+
+ return ("<jemalloc>");
+}
+
+static void
+wrtmessage(const char *p1, const char *p2, const char *p3, const char *p4)
+{
+#ifndef WIN32
+#define _write write
+#endif
+ _write(STDERR_FILENO, p1, (unsigned int) strlen(p1));
+ _write(STDERR_FILENO, p2, (unsigned int) strlen(p2));
+ _write(STDERR_FILENO, p3, (unsigned int) strlen(p3));
+ _write(STDERR_FILENO, p4, (unsigned int) strlen(p4));
+}
+
+#define _malloc_message malloc_message
+
+void (*_malloc_message)(const char *p1, const char *p2, const char *p3,
+ const char *p4) = wrtmessage;
+
+#ifdef MALLOC_STATS
+/*
+ * Print to stderr in such a way as to (hopefully) avoid memory allocation.
+ */
+static void
+malloc_printf(const char *format, ...)
+{
+ char buf[4096];
+ va_list ap;
+
+ va_start(ap, format);
+ vsnprintf(buf, sizeof(buf), format, ap);
+ va_end(ap);
+ _malloc_message(buf, "", "", "");
+}
+#endif
+
+/*
+ * We don't want to depend on vsnprintf() for production builds, since that can
+ * cause unnecessary bloat for static binaries. umax2s() provides minimal
+ * integer printing functionality, so that malloc_printf() use can be limited
to
+ * MALLOC_STATS code.
+ */
+#define UMAX2S_BUFSIZE 21
+static char *
+umax2s(uintmax_t x, char *s)
+{
+ unsigned i;
+
+ /* Make sure UMAX2S_BUFSIZE is large enough. */
+ assert(sizeof(uintmax_t) <= 8);
+
+ i = UMAX2S_BUFSIZE - 1;
+ s[i] = '\0';
+ do {
+ i--;
+ s[i] = "0123456789"[x % 10];
+ x /= 10;
+ } while (x > 0);
+
+ return (&s[i]);
+}
+
+/******************************************************************************/
+
+#ifdef MALLOC_DSS
+static bool
+base_pages_alloc_dss(size_t minsize)
+{
+
+ /*
+ * Do special DSS allocation here, since base allocations don't need to
+ * be chunk-aligned.
+ */
+ malloc_mutex_lock(&dss_mtx);
+ if (dss_prev != (void *)-1) {
+ intptr_t incr;
+ size_t csize = CHUNK_CEILING(minsize);
+
+ do {
+ /* Get the current end of the DSS. */
+ dss_max = sbrk(0);
+
+ /*
+ * Calculate how much padding is necessary to
+ * chunk-align the end of the DSS. Don't worry about
+ * dss_max not being chunk-aligned though.
+ */
+ incr = (intptr_t)chunksize
+ - (intptr_t)CHUNK_ADDR2OFFSET(dss_max);
+ assert(incr >= 0);
+ if ((size_t)incr < minsize)
+ incr += csize;
+
+ dss_prev = sbrk(incr);
+ if (dss_prev == dss_max) {
+ /* Success. */
+ dss_max = (void *)((intptr_t)dss_prev + incr);
+ base_pages = dss_prev;
+ base_next_addr = base_pages;
+ base_past_addr = dss_max;
+#ifdef MALLOC_STATS
+ base_mapped += incr;
+#endif
+ malloc_mutex_unlock(&dss_mtx);
+ return (false);
+ }
+ } while (dss_prev != (void *)-1);
+ }
+ malloc_mutex_unlock(&dss_mtx);
+
+ return (true);
+}
+#endif
+
+static bool
+base_pages_alloc_mmap(size_t minsize)
+{
+ size_t csize;
+
+ assert(minsize != 0);
+ csize = PAGE_CEILING(minsize);
+ base_pages = pages_map(NULL, csize);
+ if (base_pages == NULL)
+ return (true);
+ base_next_addr = base_pages;
+ base_past_addr = (void *)((uintptr_t)base_pages + csize);
+#ifdef MALLOC_STATS
+ base_mapped += csize;
+#endif
+
+ return (false);
+}
+
+static bool
+base_pages_alloc(size_t minsize)
+{
+
+#ifdef MALLOC_DSS
+ if (opt_dss) {
+ if (base_pages_alloc_dss(minsize) == false)
+ return (false);
+ }
+
+ if (opt_mmap && minsize != 0)
+#endif
+ {
+ if (base_pages_alloc_mmap(minsize) == false)
+ return (false);
+ }
+
+ return (true);
+}
+
+static void *
+base_alloc(size_t size)
+{
+ void *ret;
+ size_t csize;
+
+ /* Round size up to nearest multiple of the cacheline size. */
+ csize = CACHELINE_CEILING(size);
+
+ malloc_mutex_lock(&base_mtx);
+ /* Make sure there's enough space for the allocation. */
+ if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) {
+ if (base_pages_alloc(csize))
+ return (NULL);
+ }
+ /* Allocate. */
+ ret = base_next_addr;
+ base_next_addr = (void *)((uintptr_t)base_next_addr + csize);
+ malloc_mutex_unlock(&base_mtx);
+
+ return (ret);
+}
+
+static void *
+base_calloc(size_t number, size_t size)
+{
+ void *ret;
+
+ ret = base_alloc(number * size);
+ memset(ret, 0, number * size);
+
+ return (ret);
+}
+
+static extent_node_t *
+base_node_alloc(void)
+{
+ extent_node_t *ret;
+
+ malloc_mutex_lock(&base_mtx);
+ if (base_nodes != NULL) {
+ ret = base_nodes;
+ base_nodes = *(extent_node_t **)ret;
+ malloc_mutex_unlock(&base_mtx);
+ } else {
+ malloc_mutex_unlock(&base_mtx);
+ ret = (extent_node_t *)base_alloc(sizeof(extent_node_t));
+ }
+
+ return (ret);
+}
+
+static void
+base_node_dealloc(extent_node_t *node)
+{
+
+ malloc_mutex_lock(&base_mtx);
+ *(extent_node_t **)node = base_nodes;
+ base_nodes = node;
+ malloc_mutex_unlock(&base_mtx);
+}
+
+/******************************************************************************/
+
+#ifdef MALLOC_STATS
+static void
+stats_print(arena_t *arena)
+{
+ unsigned i, gap_start;
+
+#ifdef WIN32
+ malloc_printf("dirty: %Iu page%s dirty, %I64u sweep%s,"
+ " %I64u madvise%s, %I64u page%s purged\n",
+ arena->ndirty, arena->ndirty == 1 ? "" : "s",
+ arena->stats.npurge, arena->stats.npurge == 1 ? "" : "s",
+ arena->stats.nmadvise, arena->stats.nmadvise == 1 ? "" : "s",
+ arena->stats.purged, arena->stats.purged == 1 ? "" : "s");
+# ifdef MALLOC_DECOMMIT
+ malloc_printf("decommit: %I64u decommit%s, %I64u commit%s,"
+ " %I64u page%s decommitted\n",
+ arena->stats.ndecommit, (arena->stats.ndecommit == 1) ? "" : "s",
+ arena->stats.ncommit, (arena->stats.ncommit == 1) ? "" : "s",
+ arena->stats.decommitted,
+ (arena->stats.decommitted == 1) ? "" : "s");
+# endif
+
+ malloc_printf(" allocated nmalloc ndalloc\n");
+ malloc_printf("small: %12Iu %12I64u %12I64u\n",
+ arena->stats.allocated_small, arena->stats.nmalloc_small,
+ arena->stats.ndalloc_small);
+ malloc_printf("large: %12Iu %12I64u %12I64u\n",
+ arena->stats.allocated_large, arena->stats.nmalloc_large,
+ arena->stats.ndalloc_large);
+ malloc_printf("total: %12Iu %12I64u %12I64u\n",
+ arena->stats.allocated_small + arena->stats.allocated_large,
+ arena->stats.nmalloc_small + arena->stats.nmalloc_large,
+ arena->stats.ndalloc_small + arena->stats.ndalloc_large);
+ malloc_printf("mapped: %12Iu\n", arena->stats.mapped);
+#else
+ malloc_printf("dirty: %zu page%s dirty, %llu sweep%s,"
+ " %llu madvise%s, %llu page%s purged\n",
+ arena->ndirty, arena->ndirty == 1 ? "" : "s",
+ arena->stats.npurge, arena->stats.npurge == 1 ? "" : "s",
+ arena->stats.nmadvise, arena->stats.nmadvise == 1 ? "" : "s",
+ arena->stats.purged, arena->stats.purged == 1 ? "" : "s");
+# ifdef MALLOC_DECOMMIT
+ malloc_printf("decommit: %llu decommit%s, %llu commit%s,"
+ " %llu page%s decommitted\n",
+ arena->stats.ndecommit, (arena->stats.ndecommit == 1) ? "" : "s",
+ arena->stats.ncommit, (arena->stats.ncommit == 1) ? "" : "s",
+ arena->stats.decommitted,
+ (arena->stats.decommitted == 1) ? "" : "s");
+# endif
+
+ malloc_printf(" allocated nmalloc ndalloc\n");
+ malloc_printf("small: %12zu %12llu %12llu\n",
+ arena->stats.allocated_small, arena->stats.nmalloc_small,
+ arena->stats.ndalloc_small);
+ malloc_printf("large: %12zu %12llu %12llu\n",
+ arena->stats.allocated_large, arena->stats.nmalloc_large,
+ arena->stats.ndalloc_large);
+ malloc_printf("total: %12zu %12llu %12llu\n",
+ arena->stats.allocated_small + arena->stats.allocated_large,
+ arena->stats.nmalloc_small + arena->stats.nmalloc_large,
+ arena->stats.ndalloc_small + arena->stats.ndalloc_large);
+ malloc_printf("mapped: %12zu\n", arena->stats.mapped);
+#endif
+ malloc_printf("bins: bin size regs pgs requests newruns"
+ " reruns maxruns curruns\n");
+ for (i = 0, gap_start = UINT_MAX; i < ntbins + nqbins + nsbins; i++) {
+ if (arena->bins[i].stats.nrequests == 0) {
+ if (gap_start == UINT_MAX)
+ gap_start = i;
+ } else {
+ if (gap_start != UINT_MAX) {
+ if (i > gap_start + 1) {
+ /* Gap of more than one size class. */
+ malloc_printf("[%u..%u]\n",
+ gap_start, i - 1);
+ } else {
+ /* Gap of one size class. */
+ malloc_printf("[%u]\n", gap_start);
+ }
+ gap_start = UINT_MAX;
+ }
+ malloc_printf(
+#if defined(WIN32)
+ "%13u %1s %4u %4u %3u %9I64u %9I64u"
+ " %9I64u %7u %7u\n",
+#else
+ "%13u %1s %4u %4u %3u %9llu %9llu"
+ " %9llu %7lu %7lu\n",
+#endif
+ i,
+ i < ntbins ? "T" : i < ntbins + nqbins ? "Q" : "S",
+ arena->bins[i].reg_size,
+ arena->bins[i].nregs,
+ arena->bins[i].run_size >> pagesize_2pow,
+ arena->bins[i].stats.nrequests,
+ arena->bins[i].stats.nruns,
+ arena->bins[i].stats.reruns,
+ arena->bins[i].stats.highruns,
+ arena->bins[i].stats.curruns);
+ }
+ }
+ if (gap_start != UINT_MAX) {
+ if (i > gap_start + 1) {
+ /* Gap of more than one size class. */
+ malloc_printf("[%u..%u]\n", gap_start, i - 1);
+ } else {
+ /* Gap of one size class. */
+ malloc_printf("[%u]\n", gap_start);
+ }
+ }
+}
+#endif
+
+/*
+ * End Utility functions/macros.
+ */
+/******************************************************************************/
+/*
+ * Begin extent tree code.
+ */
+
+static inline int
+extent_szad_comp(extent_node_t *a, extent_node_t *b)
+{
+ int ret;
+ size_t a_size = a->size;
+ size_t b_size = b->size;
+
+ ret = (a_size > b_size) - (a_size < b_size);
+ if (ret == 0) {
+ uintptr_t a_addr = (uintptr_t)a->addr;
+ uintptr_t b_addr = (uintptr_t)b->addr;
+
+ ret = (a_addr > b_addr) - (a_addr < b_addr);
+ }
+
+ return (ret);
+}
+
+/* Generate red-black tree code for size/address-ordered extents. */
+RB_GENERATE_STATIC(extent_tree_szad_s, extent_node_s, link_szad,
+ extent_szad_comp)
+
+static inline int
+extent_ad_comp(extent_node_t *a, extent_node_t *b)
+{
+ uintptr_t a_addr = (uintptr_t)a->addr;
+ uintptr_t b_addr = (uintptr_t)b->addr;
+
+ return ((a_addr > b_addr) - (a_addr < b_addr));
+}
+
+/* Generate red-black tree code for address-ordered extents. */
+RB_GENERATE_STATIC(extent_tree_ad_s, extent_node_s, link_ad, extent_ad_comp)
+
+
+/*
+ * End extent tree code.
+ */
+/******************************************************************************/
+/*
+ * Begin chunk management functions.
+ */
+
+#ifdef WIN32
+static void *
+pages_map(void *addr, size_t size)
+{
+ void *ret;
+
+ ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE,
+ PAGE_READWRITE);
+
+ return (ret);
+}
+
+static void
+pages_unmap(void *addr, size_t size)
+{
+
+ if (VirtualFree(addr, 0, MEM_RELEASE) == 0) {
+ _malloc_message(_getprogname(),
+ ": (malloc) Error in VirtualFree()\n", "", "");
+ if (opt_abort)
+ abort();
+ }
+}
+#elif (defined(DARWIN))
+static void *
+pages_map(void *addr, size_t size)
+{
+ void *ret;
+ kern_return_t err;
+ int flags;
+
+ if (addr != NULL) {
+ ret = addr;
+ flags = 0;
+ } else
+ flags = VM_FLAGS_ANYWHERE;
+
+ err = vm_allocate((vm_map_t)mach_task_self(), (vm_address_t *)&ret,
+ (vm_size_t)size, flags);
+ if (err != KERN_SUCCESS)
+ ret = NULL;
+
+ assert(ret == NULL || (addr == NULL && ret != addr)
+ || (addr != NULL && ret == addr));
+ return (ret);
+}
+
+static void
+pages_unmap(void *addr, size_t size)
+{
+ kern_return_t err;
+
+ err = vm_deallocate((vm_map_t)mach_task_self(), (vm_address_t)addr,
+ (vm_size_t)size);
+ if (err != KERN_SUCCESS) {
+ malloc_message(_getprogname(),
+ ": (malloc) Error in vm_deallocate(): ",
+ mach_error_string(err), "\n");
+ if (opt_abort)
+ abort();
+ }
+}
+
+#define VM_COPY_MIN (pagesize << 5)
+static inline void
+pages_copy(void *dest, const void *src, size_t n)
+{
+
+ assert((void *)((uintptr_t)dest & ~pagesize_mask) == dest);
+ assert(n >= VM_COPY_MIN);
+ assert((void *)((uintptr_t)src & ~pagesize_mask) == src);
+
+ vm_copy(mach_task_self(), (vm_address_t)src, (vm_size_t)n,
+ (vm_address_t)dest);
+}
+#else /* DARWIN */
+static void *
+pages_map(void *addr, size_t size)
+{
+ void *ret;
+
+ /*
+ * We don't use MAP_FIXED here, because it can cause the *replacement*
+ * of existing mappings, and we only want to create new mappings.
+ */
+ ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
+ -1, 0);
+ assert(ret != NULL);
+
+ if (ret == MAP_FAILED)
+ ret = NULL;
+ else if (addr != NULL && ret != addr) {
+ /*
+ * We succeeded in mapping memory, but not in the right place.
+ */
+ if (munmap(ret, size) == -1) {
+ char buf[STRERROR_BUF];
+
+ strerror_r(errno, buf, sizeof(buf));
+ _malloc_message(_getprogname(),
+ ": (malloc) Error in munmap(): ", buf, "\n");
+ if (opt_abort)
+ abort();
+ }
+ ret = NULL;
+ }
+
+ assert(ret == NULL || (addr == NULL && ret != addr)
+ || (addr != NULL && ret == addr));
+ return (ret);
+}
+
+static void
+pages_unmap(void *addr, size_t size)
+{
+
+ if (munmap(addr, size) == -1) {
+ char buf[STRERROR_BUF];
+
+ strerror_r(errno, buf, sizeof(buf));
+ _malloc_message(_getprogname(),
+ ": (malloc) Error in munmap(): ", buf, "\n");
+ if (opt_abort)
+ abort();
+ }
+}
+#endif
+
+#ifdef MALLOC_DECOMMIT
+static inline void
+pages_decommit(void *addr, size_t size)
+{
+
+#ifdef WIN32
+ VirtualFree(addr, size, MEM_DECOMMIT);
+#else
+ if (mmap(addr, size, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1,
+ 0) == MAP_FAILED)
+ abort();
+#endif
+}
+
+static inline void
+pages_commit(void *addr, size_t size)
+{
+
+# ifdef WIN32
+ VirtualAlloc(addr, size, MEM_COMMIT, PAGE_READWRITE);
+# else
+ if (mmap(addr, size, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_PRIVATE |
+ MAP_ANON, -1, 0) == MAP_FAILED)
+ abort();
+# endif
+}
+#endif
+
+#ifdef MALLOC_DSS
+static void *
+chunk_alloc_dss(size_t size)
+{
+
+ malloc_mutex_lock(&dss_mtx);
+ if (dss_prev != (void *)-1) {
+ intptr_t incr;
+
+ /*
+ * The loop is necessary to recover from races with other
+ * threads that are using the DSS for something other than
+ * malloc.
+ */
+ do {
+ void *ret;
+
+ /* Get the current end of the DSS. */
+ dss_max = sbrk(0);
+
+ /*
+ * Calculate how much padding is necessary to
+ * chunk-align the end of the DSS.
+ */
+ incr = (intptr_t)size
+ - (intptr_t)CHUNK_ADDR2OFFSET(dss_max);
+ if (incr == (intptr_t)size)
+ ret = dss_max;
+ else {
+ ret = (void *)((intptr_t)dss_max + incr);
+ incr += size;
+ }
+
+ dss_prev = sbrk(incr);
+ if (dss_prev == dss_max) {
+ /* Success. */
+ dss_max = (void *)((intptr_t)dss_prev + incr);
+ malloc_mutex_unlock(&dss_mtx);
+ return (ret);
+ }
+ } while (dss_prev != (void *)-1);
+ }
+ malloc_mutex_unlock(&dss_mtx);
+
+ return (NULL);
+}
+
+static void *
+chunk_recycle_dss(size_t size, bool zero)
+{
+ extent_node_t *node, key;
+
+ key.addr = NULL;
+ key.size = size;
+ malloc_mutex_lock(&dss_mtx);
+ node = RB_NFIND(extent_tree_szad_s, &dss_chunks_szad, &key);
+ if (node != NULL) {
+ void *ret = node->addr;
+
+ /* Remove node from the tree. */
+ RB_REMOVE(extent_tree_szad_s, &dss_chunks_szad, node);
+ if (node->size == size) {
+ RB_REMOVE(extent_tree_ad_s, &dss_chunks_ad, node);
+ base_node_dealloc(node);
+ } else {
+ /*
+ * Insert the remainder of node's address range as a
+ * smaller chunk. Its position within dss_chunks_ad
+ * does not change.
+ */
+ assert(node->size > size);
+ node->addr = (void *)((uintptr_t)node->addr + size);
+ node->size -= size;
+ RB_INSERT(extent_tree_szad_s, &dss_chunks_szad, node);
+ }
+ malloc_mutex_unlock(&dss_mtx);
+
+ if (zero)
+ memset(ret, 0, size);
+ return (ret);
+ }
+ malloc_mutex_unlock(&dss_mtx);
+
+ return (NULL);
+}
+#endif
+
+#ifdef WIN32
+static inline void *
+chunk_alloc_mmap(size_t size)
+{
+ void *ret;
+ size_t offset;
+
+ /*
+ * Windows requires that there be a 1:1 mapping between VM
+ * allocation/deallocation operations. Therefore, take care here to
+ * acquire the final result via one mapping operation. This means
+ * unmapping any preliminary result that is not correctly aligned.
+ */
+
+ ret = pages_map(NULL, size);
+ if (ret == NULL)
+ return (NULL);
+
+ offset = CHUNK_ADDR2OFFSET(ret);
+ if (offset != 0) {
+ /* Deallocate, then try to allocate at (ret + size - offset). */
+ pages_unmap(ret, size);
+ ret = pages_map((void *)((uintptr_t)ret + size - offset), size);
+ while (ret == NULL) {
+ /*
+ * Over-allocate in order to map a memory region that
+ * is definitely large enough.
+ */
+ ret = pages_map(NULL, size + chunksize);
+ if (ret == NULL)
+ return (NULL);
+ /*
+ * Deallocate, then allocate the correct size, within
+ * the over-sized mapping.
+ */
+ offset = CHUNK_ADDR2OFFSET(ret);
+ pages_unmap(ret, size + chunksize);
+ if (offset == 0)
+ ret = pages_map(ret, size);
+ else {
+ ret = pages_map((void *)((uintptr_t)ret +
+ chunksize - offset), size);
+ }
+ /*
+ * Failure here indicates a race with another thread, so
+ * try again.
+ */
+ }
+ }
+
+ return (ret);
+}
+#else
+static inline void *
+chunk_alloc_mmap(size_t size)
+{
+ void *ret;
+ size_t offset;
+
+ /*
+ * Ideally, there would be a way to specify alignment to mmap() (like
+ * NetBSD has), but in the absence of such a feature, we have to work
+ * hard to efficiently create aligned mappings. The reliable, but
+ * expensive method is to create a mapping that is over-sized, then
+ * trim the excess. However, that always results in at least one call
+ * to pages_unmap().
+ *
+ * A more optimistic approach is to try mapping precisely the right
+ * amount, then try to append another mapping if alignment is off. In
+ * practice, this works out well as long as the application is not
+ * interleaving mappings via direct mmap() calls. If we do run into a
+ * situation where there is an interleaved mapping and we are unable to
+ * extend an unaligned mapping, our best option is to momentarily
+ * revert to the reliable-but-expensive method. This will tend to
+ * leave a gap in the memory map that is too small to cause later
+ * problems for the optimistic method.
+ */
+
+ ret = pages_map(NULL, size);
+ if (ret == NULL)
+ return (NULL);
+
+ offset = CHUNK_ADDR2OFFSET(ret);
+ if (offset != 0) {
+ /* Try to extend chunk boundary. */
+ if (pages_map((void *)((uintptr_t)ret + size),
+ chunksize - offset) == NULL) {
+ /*
+ * Extension failed. Clean up, then revert to the
+ * reliable-but-expensive method.
+ */
+ pages_unmap(ret, size);
+
+ /* Beware size_t wrap-around. */
+ if (size + chunksize <= size)
+ return NULL;
+
+ ret = pages_map(NULL, size + chunksize);
+ if (ret == NULL)
+ return (NULL);
+
+ /* Clean up unneeded leading/trailing space. */
+ offset = CHUNK_ADDR2OFFSET(ret);
+ if (offset != 0) {
+ /* Leading space. */
+ pages_unmap(ret, chunksize - offset);
+
+ ret = (void *)((uintptr_t)ret +
+ (chunksize - offset));
+
+ /* Trailing space. */
+ pages_unmap((void *)((uintptr_t)ret + size),
+ offset);
+ } else {
+ /* Trailing space only. */
+ pages_unmap((void *)((uintptr_t)ret + size),
+ chunksize);
+ }
+ } else {
+ /* Clean up unneeded leading space. */
+ pages_unmap(ret, chunksize - offset);
+ ret = (void *)((uintptr_t)ret + (chunksize - offset));
+ }
+ }
+
+ return (ret);
+}
+#endif
+
+static void *
+chunk_alloc(size_t size, bool zero)
+{
+ void *ret;
+
+ assert(size != 0);
+ assert((size & chunksize_mask) == 0);
+
+#ifdef MALLOC_DSS
+ if (opt_dss) {
+ ret = chunk_recycle_dss(size, zero);
+ if (ret != NULL) {
+ goto RETURN;
+ }
+
+ ret = chunk_alloc_dss(size);
+ if (ret != NULL)
+ goto RETURN;
+ }
+
+ if (opt_mmap)
+#endif
+ {
+ ret = chunk_alloc_mmap(size);
+ if (ret != NULL)
+ goto RETURN;
+ }
+
+ /* All strategies for allocation failed. */
+ ret = NULL;
+RETURN:
+#ifdef MALLOC_STATS
+ if (ret != NULL) {
+ stats_chunks.nchunks += (size / chunksize);
+ stats_chunks.curchunks += (size / chunksize);
+ }
+ if (stats_chunks.curchunks > stats_chunks.highchunks)
+ stats_chunks.highchunks = stats_chunks.curchunks;
+#endif
+
+ assert(CHUNK_ADDR2BASE(ret) == ret);
+ return (ret);
+}
+
+#ifdef MALLOC_DSS
+static extent_node_t *
+chunk_dealloc_dss_record(void *chunk, size_t size)
+{
+ extent_node_t *node, *prev, key;
+
+ key.addr = (void *)((uintptr_t)chunk + size);
+ node = RB_NFIND(extent_tree_ad_s, &dss_chunks_ad, &key);
+ /* Try to coalesce forward. */
+ if (node != NULL && node->addr == key.addr) {
+ /*
+ * Coalesce chunk with the following address range. This does
+ * not change the position within dss_chunks_ad, so only
+ * remove/insert from/into dss_chunks_szad.
+ */
+ RB_REMOVE(extent_tree_szad_s, &dss_chunks_szad, node);
+ node->addr = chunk;
+ node->size += size;
+ RB_INSERT(extent_tree_szad_s, &dss_chunks_szad, node);
+ } else {
+ /*
+ * Coalescing forward failed, so insert a new node. Drop
+ * dss_mtx during node allocation, since it is possible that a
+ * new base chunk will be allocated.
+ */
+ malloc_mutex_unlock(&dss_mtx);
+ node = base_node_alloc();
+ malloc_mutex_lock(&dss_mtx);
+ if (node == NULL)
+ return (NULL);
+ node->addr = chunk;
+ node->size = size;
+ RB_INSERT(extent_tree_ad_s, &dss_chunks_ad, node);
+ RB_INSERT(extent_tree_szad_s, &dss_chunks_szad, node);
+ }
+
+ /* Try to coalesce backward. */
+ prev = RB_PREV(extent_tree_ad_s, &dss_chunks_ad, node);
+ if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
+ chunk) {
+ /*
+ * Coalesce chunk with the previous address range. This does
+ * not change the position within dss_chunks_ad, so only
+ * remove/insert node from/into dss_chunks_szad.
+ */
+ RB_REMOVE(extent_tree_szad_s, &dss_chunks_szad, prev);
+ RB_REMOVE(extent_tree_ad_s, &dss_chunks_ad, prev);
+
+ RB_REMOVE(extent_tree_szad_s, &dss_chunks_szad, node);
+ node->addr = prev->addr;
+ node->size += prev->size;
+ RB_INSERT(extent_tree_szad_s, &dss_chunks_szad, node);
+
+ base_node_dealloc(prev);
+ }
+
+ return (node);
+}
+
+static bool
+chunk_dealloc_dss(void *chunk, size_t size)
+{
+
+ malloc_mutex_lock(&dss_mtx);
+ if ((uintptr_t)chunk >= (uintptr_t)dss_base
+ && (uintptr_t)chunk < (uintptr_t)dss_max) {
+ extent_node_t *node;
+
+ /* Try to coalesce with other unused chunks. */
+ node = chunk_dealloc_dss_record(chunk, size);
+ if (node != NULL) {
+ chunk = node->addr;
+ size = node->size;
+ }
+
+ /* Get the current end of the DSS. */
+ dss_max = sbrk(0);
+
+ /*
+ * Try to shrink the DSS if this chunk is at the end of the
+ * DSS. The sbrk() call here is subject to a race condition
+ * with threads that use brk(2) or sbrk(2) directly, but the
+ * alternative would be to leak memory for the sake of poorly
+ * designed multi-threaded programs.
+ */
+ if ((void *)((uintptr_t)chunk + size) == dss_max
+ && (dss_prev = sbrk(-(intptr_t)size)) == dss_max) {
+ /* Success. */
+ dss_max = (void *)((intptr_t)dss_prev - (intptr_t)size);
+
+ if (node != NULL) {
+ RB_REMOVE(extent_tree_szad_s, &dss_chunks_szad,
+ node);
+ RB_REMOVE(extent_tree_ad_s, &dss_chunks_ad,
+ node);
+ base_node_dealloc(node);
+ }
+ malloc_mutex_unlock(&dss_mtx);
+ } else {
+ malloc_mutex_unlock(&dss_mtx);
+#ifdef WIN32
+ VirtualAlloc(chunk, size, MEM_RESET, PAGE_READWRITE);
+#elif (defined(DARWIN))
+ mmap(chunk, size, PROT_READ | PROT_WRITE, MAP_PRIVATE
+ | MAP_ANON | MAP_FIXED, -1, 0);
+#else
+ madvise(chunk, size, MADV_FREE);
+#endif
+ }
+
+ return (false);
+ }
+ malloc_mutex_unlock(&dss_mtx);
+
+ return (true);
+}
+#endif
+
+static void
+chunk_dealloc_mmap(void *chunk, size_t size)
+{
+
+ pages_unmap(chunk, size);
+}
+
+static void
+chunk_dealloc(void *chunk, size_t size)
+{
+
+ assert(chunk != NULL);
+ assert(CHUNK_ADDR2BASE(chunk) == chunk);
+ assert(size != 0);
+ assert((size & chunksize_mask) == 0);
+
+#ifdef MALLOC_STATS
+ stats_chunks.curchunks -= (size / chunksize);
+#endif
+
+#ifdef MALLOC_DSS
+ if (opt_dss) {
+ if (chunk_dealloc_dss(chunk, size) == false)
+ return;
+ }
+
+ if (opt_mmap)
+#endif
+ chunk_dealloc_mmap(chunk, size);
+}
+
+/*
+ * End chunk management functions.
+ */
+/******************************************************************************/
+/*
+ * Begin arena.
+ */
+
+/*
+ * Choose an arena based on a per-thread value (fast-path code, calls slow-path
+ * code if necessary).
+ */
+static inline arena_t *
+choose_arena(void)
+{
+ arena_t *ret;
+
+ /*
+ * We can only use TLS if this is a PIC library, since for the static
+ * library version, libc's malloc is used by TLS allocation, which
+ * introduces a bootstrapping issue.
+ */
+#ifndef NO_TLS
+ if (g_isthreaded == false) {
+ /* Avoid the overhead of TLS for single-threaded operation. */
+ return (arenas[0]);
+ }
+
+# ifdef WIN32
+ ret = TlsGetValue(tlsIndex);
+# else
+ ret = arenas_map;
+# endif
+
+ if (ret == NULL) {
+ ret = choose_arena_hard();
+ assert(ret != NULL);
+ }
+#else
+ if (g_isthreaded && narenas > 1) {
+ unsigned long ind;
+
+ /*
+ * Hash _pthread_self() to one of the arenas. There is a prime
+ * number of arenas, so this has a reasonable chance of
+ * working. Even so, the hashing can be easily thwarted by
+ * inconvenient _pthread_self() values. Without specific
+ * knowledge of how _pthread_self() calculates values, we can't
+ * easily do much better than this.
+ */
+ ind = (unsigned long) _pthread_self() % narenas;
+
+ /*
+ * Optimistially assume that arenas[ind] has been initialized.
+ * At worst, we find out that some other thread has already
+ * done so, after acquiring the lock in preparation. Note that
+ * this lazy locking also has the effect of lazily forcing
+ * cache coherency; without the lock acquisition, there's no
+ * guarantee that modification of arenas[ind] by another thread
+ * would be seen on this CPU for an arbitrary amount of time.
+ *
+ * In general, this approach to modifying a synchronized value
+ * isn't a good idea, but in this case we only ever modify the
+ * value once, so things work out well.
+ */
+ ret = arenas[ind];
+ if (ret == NULL) {
+ /*
+ * Avoid races with another thread that may have already
+ * initialized arenas[ind].
+ */
+ malloc_spin_lock(&arenas_lock);
+ if (arenas[ind] == NULL)
+ ret = arenas_extend((unsigned)ind);
+ else
+ ret = arenas[ind];
+ malloc_spin_unlock(&arenas_lock);
+ }
+ } else
+ ret = arenas[0];
+#endif
+
+ assert(ret != NULL);
+ return (ret);
+}
+
+#ifndef NO_TLS
+/*
+ * Choose an arena based on a per-thread value (slow-path code only, called
+ * only by choose_arena()).
+ */
+static arena_t *
+choose_arena_hard(void)
+{
+ arena_t *ret;
+
+ assert(g_isthreaded);
+
+#ifdef MALLOC_LAZY_FREE
+ /*
+ * Seed the PRNG used for lazy deallocation. Since seeding only occurs
+ * on the first allocation by a thread, it is possible for a thread to
+ * deallocate before seeding. This is not a critical issue though,
+ * since it is extremely unusual for an application to to use threads
+ * that deallocate but *never* allocate, and because even if seeding
+ * never occurs for multiple threads, they will tend to drift apart
+ * unless some aspect of the application forces deallocation
+ * synchronization.
+ */
+ SPRN(lazy_free, (uint32_t)(uintptr_t)(_pthread_self()));
+#endif
+
+#ifdef MALLOC_BALANCE
+ /*
+ * Seed the PRNG used for arena load balancing. We can get away with
+ * using the same seed here as for the lazy_free PRNG without
+ * introducing autocorrelation because the PRNG parameters are
+ * distinct.
+ */
+ SPRN(balance, (uint32_t)(uintptr_t)(_pthread_self()));
+#endif
+
+ if (narenas > 1) {
+#ifdef MALLOC_BALANCE
+ unsigned ind;
+
+ ind = PRN(balance, narenas_2pow);
+ if ((ret = arenas[ind]) == NULL) {
+ malloc_spin_lock(&arenas_lock);
+ if ((ret = arenas[ind]) == NULL)
+ ret = arenas_extend(ind);
+ malloc_spin_unlock(&arenas_lock);
+ }
+#else
+ malloc_spin_lock(&arenas_lock);
+ if ((ret = arenas[next_arena]) == NULL)
+ ret = arenas_extend(next_arena);
+ next_arena = (next_arena + 1) % narenas;
+ malloc_spin_unlock(&arenas_lock);
+#endif
+ } else
+ ret = arenas[0];
+
+#ifdef WIN32
+ TlsSetValue(tlsIndex, ret);
+#else
+ arenas_map = ret;
+#endif
+
+ return (ret);
+}
+#endif
+
+static inline int
+arena_chunk_comp(arena_chunk_t *a, arena_chunk_t *b)
+{
+ uintptr_t a_chunk = (uintptr_t)a;
+ uintptr_t b_chunk = (uintptr_t)b;
+
+ assert(a != NULL);
+ assert(b != NULL);
+
+ return ((a_chunk > b_chunk) - (a_chunk < b_chunk));
+}
+
+/* Generate red-black tree code for arena chunks. */
+RB_GENERATE_STATIC(arena_chunk_tree_s, arena_chunk_s, link, arena_chunk_comp)
+
+static inline int
+arena_run_comp(arena_run_t *a, arena_run_t *b)
+{
+ uintptr_t a_run = (uintptr_t)a;
+ uintptr_t b_run = (uintptr_t)b;
+
+ assert(a != NULL);
+ assert(b != NULL);
+
+ return ((a_run > b_run) - (a_run < b_run));
+}
+
+/* Generate red-black tree code for arena runs. */
+RB_GENERATE_STATIC(arena_run_tree_s, arena_run_s, link, arena_run_comp)
+
+static extent_node_t *
+arena_chunk_node_alloc(arena_chunk_t *chunk)
+{
+ extent_node_t *ret;
+
+ ret = RB_MIN(extent_tree_ad_s, &chunk->nodes);
+ if (ret != NULL)
+ RB_REMOVE(extent_tree_ad_s, &chunk->nodes, ret);
+ else {
+ ret = chunk->nodes_past;
+ chunk->nodes_past = (extent_node_t *)
+ ((uintptr_t)chunk->nodes_past + sizeof(extent_node_t));
+ assert((uintptr_t)ret + sizeof(extent_node_t) <=
+ (uintptr_t)chunk + (arena_chunk_header_npages <<
+ pagesize_2pow));
+ }
+
+ return (ret);
+}
+
+static void
+arena_chunk_node_dealloc(arena_chunk_t *chunk, extent_node_t *node)
+{
+
+ node->addr = (void *)node;
+ RB_INSERT(extent_tree_ad_s, &chunk->nodes, node);
+}
+
+static inline void *
+arena_run_reg_alloc(arena_run_t *run, arena_bin_t *bin)
+{
+ void *ret;
+ unsigned i, mask, bit, regind;
+
+ assert(run->magic == ARENA_RUN_MAGIC);
+ assert(run->regs_minelm < bin->regs_mask_nelms);
+
+ /*
+ * Move the first check outside the loop, so that run->regs_minelm can
+ * be updated unconditionally, without the possibility of updating it
+ * multiple times.
+ */
+ i = run->regs_minelm;
+ mask = run->regs_mask[i];
+ if (mask != 0) {
+ /* Usable allocation found. */
+ bit = ffs((int)mask) - 1;
+
+ regind = ((i << (SIZEOF_INT_2POW + 3)) + bit);
+ assert(regind < bin->nregs);
+ ret = (void *)(((uintptr_t)run) + bin->reg0_offset
+ + (bin->reg_size * regind));
+
+ /* Clear bit. */
+ mask ^= (1U << bit);
+ run->regs_mask[i] = mask;
+
+ return (ret);
+ }
+
+ for (i++; i < bin->regs_mask_nelms; i++) {
+ mask = run->regs_mask[i];
+ if (mask != 0) {
+ /* Usable allocation found. */
+ bit = ffs((int)mask) - 1;
+
+ regind = ((i << (SIZEOF_INT_2POW + 3)) + bit);
+ assert(regind < bin->nregs);
+ ret = (void *)(((uintptr_t)run) + bin->reg0_offset
+ + (bin->reg_size * regind));
+
+ /* Clear bit. */
+ mask ^= (1U << bit);
+ run->regs_mask[i] = mask;
+
+ /*
+ * Make a note that nothing before this element
+ * contains a free region.
+ */
+ run->regs_minelm = i; /* Low payoff: + (mask == 0); */
+
+ return (ret);
+ }
+ }
+ /* Not reached. */
+ assert(0);
+ return (NULL);
+}
+
+static inline void
+arena_run_reg_dalloc(arena_run_t *run, arena_bin_t *bin, void *ptr, size_t
size)
+{
+ /*
+ * To divide by a number D that is not a power of two we multiply
+ * by (2^21 / D) and then right shift by 21 positions.
+ *
+ * X / D
+ *
+ * becomes
+ *
+ * (X * size_invs[(D >> QUANTUM_2POW_MIN) - 3]) >> SIZE_INV_SHIFT
+ */
+#define SIZE_INV_SHIFT 21
+#define SIZE_INV(s) (((1U << SIZE_INV_SHIFT) / (s << QUANTUM_2POW_MIN))
+ 1)
+ static const unsigned size_invs[] = {
+ SIZE_INV(3),
+ SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7),
+ SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11),
+ SIZE_INV(12),SIZE_INV(13), SIZE_INV(14), SIZE_INV(15),
+ SIZE_INV(16),SIZE_INV(17), SIZE_INV(18), SIZE_INV(19),
+ SIZE_INV(20),SIZE_INV(21), SIZE_INV(22), SIZE_INV(23),
+ SIZE_INV(24),SIZE_INV(25), SIZE_INV(26), SIZE_INV(27),
+ SIZE_INV(28),SIZE_INV(29), SIZE_INV(30), SIZE_INV(31)
+#if (QUANTUM_2POW_MIN < 4)
+ ,
+ SIZE_INV(32), SIZE_INV(33), SIZE_INV(34), SIZE_INV(35),
+ SIZE_INV(36), SIZE_INV(37), SIZE_INV(38), SIZE_INV(39),
+ SIZE_INV(40), SIZE_INV(41), SIZE_INV(42), SIZE_INV(43),
+ SIZE_INV(44), SIZE_INV(45), SIZE_INV(46), SIZE_INV(47),
+ SIZE_INV(48), SIZE_INV(49), SIZE_INV(50), SIZE_INV(51),
+ SIZE_INV(52), SIZE_INV(53), SIZE_INV(54), SIZE_INV(55),
+ SIZE_INV(56), SIZE_INV(57), SIZE_INV(58), SIZE_INV(59),
+ SIZE_INV(60), SIZE_INV(61), SIZE_INV(62), SIZE_INV(63)
+#endif
+ };
+ unsigned diff, regind, elm, bit;
+
+ assert(run->magic == ARENA_RUN_MAGIC);
+ assert(((sizeof(size_invs)) / sizeof(unsigned)) + 3
+ >= (SMALL_MAX_DEFAULT >> QUANTUM_2POW_MIN));
+
+ /*
+ * Avoid doing division with a variable divisor if possible. Using
+ * actual division here can reduce allocator throughput by over 20%!
+ */
+ diff = (unsigned)((uintptr_t)ptr - (uintptr_t)run - bin->reg0_offset);
+ if ((size & (size - 1)) == 0) {
+ /*
+ * log2_table allows fast division of a power of two in the
+ * [1..128] range.
+ *
+ * (x / divisor) becomes (x >> log2_table[divisor - 1]).
+ */
+ static const unsigned char log2_table[] = {
+ 0, 1, 0, 2, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7
+ };
+
+ if (size <= 128)
+ regind = (diff >> log2_table[size - 1]);
+ else if (size <= 32768)
+ regind = diff >> (8 + log2_table[(size >> 8) - 1]);
+ else {
+ /*
+ * The run size is too large for us to use the lookup
+ * table. Use real division.
+ */
+ regind = diff / size;
+ }
+ } else if (size <= ((sizeof(size_invs) / sizeof(unsigned))
+ << QUANTUM_2POW_MIN) + 2) {
+ regind = size_invs[(size >> QUANTUM_2POW_MIN) - 3] * diff;
+ regind >>= SIZE_INV_SHIFT;
+ } else {
+ /*
+ * size_invs isn't large enough to handle this size class, so
+ * calculate regind using actual division. This only happens
+ * if the user increases small_max via the 'S' runtime
+ * configuration option.
+ */
+ regind = diff / size;
+ };
+ assert(diff == regind * size);
+ assert(regind < bin->nregs);
+
+ elm = regind >> (SIZEOF_INT_2POW + 3);
+ if (elm < run->regs_minelm)
+ run->regs_minelm = elm;
+ bit = regind - (elm << (SIZEOF_INT_2POW + 3));
+ assert((run->regs_mask[elm] & (1U << bit)) == 0);
+ run->regs_mask[elm] |= (1U << bit);
+#undef SIZE_INV
+#undef SIZE_INV_SHIFT
+}
+
+static void
+arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool small,
+ bool zero)
+{
+ arena_chunk_t *chunk;
+ size_t run_ind, total_pages, need_pages, rem_pages, i;
+ extent_node_t *nodeA, *nodeB, key;
+
+ /* Insert a node into runs_alloced_ad for the first part of the run. */
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
+ nodeA = arena_chunk_node_alloc(chunk);
+ nodeA->addr = run;
+ nodeA->size = size;
+ RB_INSERT(extent_tree_ad_s, &arena->runs_alloced_ad, nodeA);
+
+ key.addr = run;
+ nodeB = RB_FIND(extent_tree_ad_s, &arena->runs_avail_ad, &key);
+ assert(nodeB != NULL);
+
+ run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk)
+ >> pagesize_2pow);
+ total_pages = nodeB->size >> pagesize_2pow;
+ need_pages = (size >> pagesize_2pow);
+ assert(need_pages > 0);
+ assert(need_pages <= total_pages);
+ assert(need_pages <= CHUNK_MAP_POS_MASK || small == false);
+ rem_pages = total_pages - need_pages;
+
+ for (i = 0; i < need_pages; i++) {
+#ifdef MALLOC_DECOMMIT
+ /*
+ * Commit decommitted pages if necessary. If a decommitted
+ * page is encountered, commit all needed adjacent decommitted
+ * pages in one operation, in order to reduce system call
+ * overhead.
+ */
+ if (chunk->map[run_ind + i] & CHUNK_MAP_DECOMMITTED) {
+ size_t j;
+
+ /*
+ * Advance i+j to just past the index of the last page
+ * to commit. Clear CHUNK_MAP_DECOMMITTED along the
+ * way.
+ */
+ for (j = 0; i + j < need_pages && (chunk->map[run_ind +
+ i + j] & CHUNK_MAP_DECOMMITTED); j++) {
+ chunk->map[run_ind + i + j] ^=
+ CHUNK_MAP_DECOMMITTED;
+ }
+
+ pages_commit((void *)((uintptr_t)chunk + ((run_ind + i)
+ << pagesize_2pow)), (j << pagesize_2pow));
+# ifdef MALLOC_STATS
+ arena->stats.ncommit++;
+# endif
+ }
+#endif
+
+ /* Zero if necessary. */
+ if (zero) {
+ if ((chunk->map[run_ind + i] & CHUNK_MAP_UNTOUCHED)
+ == 0) {
+ memset((void *)((uintptr_t)chunk + ((run_ind
+ + i) << pagesize_2pow)), 0, pagesize);
+ /* CHUNK_MAP_UNTOUCHED is cleared below. */
+ }
+ }
+
+ /* Update dirty page accounting. */
+ if (chunk->map[run_ind + i] & CHUNK_MAP_DIRTY) {
+ chunk->ndirty--;
+ arena->ndirty--;
+ }
+
+ /* Initialize the chunk map. */
+ if (small)
+ chunk->map[run_ind + i] = (uint8_t)i;
+ else
+ chunk->map[run_ind + i] = CHUNK_MAP_LARGE;
+ }
+
+ /* Keep track of trailing unused pages for later use. */
+ RB_REMOVE(extent_tree_szad_s, &arena->runs_avail_szad, nodeB);
+ if (rem_pages > 0) {
+ /*
+ * Update nodeB in runs_avail_*. Its position within
+ * runs_avail_ad does not change.
+ */
+ nodeB->addr = (void *)((uintptr_t)nodeB->addr + size);
+ nodeB->size -= size;
+ RB_INSERT(extent_tree_szad_s, &arena->runs_avail_szad, nodeB);
+ } else {
+ /* Remove nodeB from runs_avail_*. */
+ RB_REMOVE(extent_tree_ad_s, &arena->runs_avail_ad, nodeB);
+ arena_chunk_node_dealloc(chunk, nodeB);
+ }
+
+ chunk->pages_used += need_pages;
+}
+
+static arena_chunk_t *
+arena_chunk_alloc(arena_t *arena)
+{
+ arena_chunk_t *chunk;
+ extent_node_t *node;
+
+ if (arena->spare != NULL) {
+ chunk = arena->spare;
+ arena->spare = NULL;
+ } else {
+ chunk = (arena_chunk_t *)chunk_alloc(chunksize, true);
+ if (chunk == NULL)
+ return (NULL);
+#ifdef MALLOC_STATS
+ arena->stats.mapped += chunksize;
+#endif
+
+ chunk->arena = arena;
+
+ RB_INSERT(arena_chunk_tree_s, &arena->chunks, chunk);
+
+ /*
+ * Claim that no pages are in use, since the header is merely
+ * overhead.
+ */
+ chunk->pages_used = 0;
+ chunk->ndirty = 0;
+
+ /*
+ * Initialize the map to contain one maximal free untouched
+ * run.
+ */
+ memset(chunk->map, (CHUNK_MAP_LARGE | CHUNK_MAP_POS_MASK),
+ arena_chunk_header_npages);
+ memset(&chunk->map[arena_chunk_header_npages],
+ (CHUNK_MAP_UNTOUCHED
+#ifdef MALLOC_DECOMMIT
+ | CHUNK_MAP_DECOMMITTED
+#endif
+ ), (chunk_npages -
+ arena_chunk_header_npages));
+
+ /* Initialize the tree of unused extent nodes. */
+ RB_INIT(&chunk->nodes);
+ chunk->nodes_past = (extent_node_t *)QUANTUM_CEILING(
+ (uintptr_t)&chunk->map[chunk_npages]);
+
+#ifdef MALLOC_DECOMMIT
+ /*
+ * Start out decommitted, in order to force a closer
+ * correspondence between dirty pages and committed untouched
+ * pages.
+ */
+ pages_decommit((void *)((uintptr_t)chunk +
+ (arena_chunk_header_npages << pagesize_2pow)),
+ ((chunk_npages - arena_chunk_header_npages) <<
+ pagesize_2pow));
+# ifdef MALLOC_STATS
+ arena->stats.ndecommit++;
+ arena->stats.decommitted += (chunk_npages -
+ arena_chunk_header_npages);
+# endif
+#endif
+ }
+
+ /* Insert the run into the runs_avail_* red-black trees. */
+ node = arena_chunk_node_alloc(chunk);
+ node->addr = (void *)((uintptr_t)chunk + (arena_chunk_header_npages <<
+ pagesize_2pow));
+ node->size = chunksize - (arena_chunk_header_npages << pagesize_2pow);
+ RB_INSERT(extent_tree_szad_s, &arena->runs_avail_szad, node);
+ RB_INSERT(extent_tree_ad_s, &arena->runs_avail_ad, node);
+
+ return (chunk);
+}
+
+static void
+arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
+{
+ extent_node_t *node, key;
+
+ if (arena->spare != NULL) {
+ RB_REMOVE(arena_chunk_tree_s, &chunk->arena->chunks,
+ arena->spare);
+ arena->ndirty -= arena->spare->ndirty;
+ chunk_dealloc((void *)arena->spare, chunksize);
+#ifdef MALLOC_STATS
+ arena->stats.mapped -= chunksize;
+#endif
+ }
+
+ /*
+ * Remove run from the runs trees, regardless of whether this chunk
+ * will be cached, so that the arena does not use it. Dirty page
+ * flushing only uses the chunks tree, so leaving this chunk in that
+ * tree is sufficient for that purpose.
+ */
+ key.addr = (void *)((uintptr_t)chunk + (arena_chunk_header_npages <<
+ pagesize_2pow));
+ node = RB_FIND(extent_tree_ad_s, &arena->runs_avail_ad, &key);
+ assert(node != NULL);
+ RB_REMOVE(extent_tree_szad_s, &arena->runs_avail_szad, node);
+ RB_REMOVE(extent_tree_ad_s, &arena->runs_avail_ad, node);
+ arena_chunk_node_dealloc(chunk, node);
+
+ arena->spare = chunk;
+}
+
+static arena_run_t *
+arena_run_alloc(arena_t *arena, size_t size, bool small, bool zero)
+{
+ arena_chunk_t *chunk;
+ arena_run_t *run;
+ extent_node_t *node, key;
+
+ assert(size <= (chunksize - (arena_chunk_header_npages <<
+ pagesize_2pow)));
+ assert((size & pagesize_mask) == 0);
+
+ /* Search the arena's chunks for the lowest best fit. */
+ key.addr = NULL;
+ key.size = size;
+ node = RB_NFIND(extent_tree_szad_s, &arena->runs_avail_szad, &key);
+ if (node != NULL) {
+ run = (arena_run_t *)node->addr;
+ arena_run_split(arena, run, size, small, zero);
+ return (run);
+ }
+
+ /*
+ * No usable runs. Create a new chunk from which to allocate the run.
+ */
+ chunk = arena_chunk_alloc(arena);
+ if (chunk == NULL)
+ return (NULL);
+ run = (arena_run_t *)((uintptr_t)chunk + (arena_chunk_header_npages <<
+ pagesize_2pow));
+ /* Update page map. */
+ arena_run_split(arena, run, size, small, zero);
+ return (run);
+}
+
+static void
+arena_purge(arena_t *arena)
+{
+ arena_chunk_t *chunk;
+#ifdef MALLOC_DEBUG
+ size_t ndirty;
+
+ ndirty = 0;
+ RB_FOREACH(chunk, arena_chunk_tree_s, &arena->chunks) {
+ ndirty += chunk->ndirty;
+ }
+ assert(ndirty == arena->ndirty);
+#endif
+ assert(arena->ndirty > opt_dirty_max);
+
+#ifdef MALLOC_STATS
+ arena->stats.npurge++;
+#endif
+
+ /*
+ * Iterate downward through chunks until enough dirty memory has been
+ * purged.
+ */
+ RB_FOREACH_REVERSE(chunk, arena_chunk_tree_s, &arena->chunks) {
+ if (chunk->ndirty > 0) {
+ size_t i;
+
+ for (i = chunk_npages - 1; i >=
+ arena_chunk_header_npages; i--) {
+ if (chunk->map[i] & CHUNK_MAP_DIRTY) {
+ size_t npages;
+
+ chunk->map[i] = (CHUNK_MAP_LARGE |
+#ifdef MALLOC_DECOMMIT
+ CHUNK_MAP_DECOMMITTED |
+#endif
+ CHUNK_MAP_POS_MASK);
+ chunk->ndirty--;
+ arena->ndirty--;
+ /* Find adjacent dirty run(s). */
+ for (npages = 1; i >
+ arena_chunk_header_npages &&
+ (chunk->map[i - 1] &
+ CHUNK_MAP_DIRTY); npages++) {
+ i--;
+ chunk->map[i] = (CHUNK_MAP_LARGE
+#ifdef MALLOC_DECOMMIT
+ | CHUNK_MAP_DECOMMITTED
+#endif
+ | CHUNK_MAP_POS_MASK);
+ chunk->ndirty--;
+ arena->ndirty--;
+ }
+
+#ifdef MALLOC_DECOMMIT
+ pages_decommit((void *)((uintptr_t)
+ chunk + (i << pagesize_2pow)),
+ (npages << pagesize_2pow));
+# ifdef MALLOC_STATS
+ arena->stats.ndecommit++;
+ arena->stats.decommitted += npages;
+# endif
+#else
+ madvise((void *)((uintptr_t)chunk + (i
+ << pagesize_2pow)), pagesize *
+ npages, MADV_FREE);
+#endif
+#ifdef MALLOC_STATS
+ arena->stats.nmadvise++;
+ arena->stats.purged += npages;
+#endif
+ }
+ }
+ }
+ }
+}
+
+static void
+arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
+{
+ arena_chunk_t *chunk;
+ extent_node_t *nodeA, *nodeB, *nodeC, key;
+ size_t size, run_ind, run_pages;
+
+ /* Remove run from runs_alloced_ad. */
+ key.addr = run;
+ nodeB = RB_FIND(extent_tree_ad_s, &arena->runs_alloced_ad, &key);
+ assert(nodeB != NULL);
+ RB_REMOVE(extent_tree_ad_s, &arena->runs_alloced_ad, nodeB);
+ size = nodeB->size;
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
+ run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk)
+ >> pagesize_2pow);
+ assert(run_ind >= arena_chunk_header_npages);
+ assert(run_ind < (chunksize >> pagesize_2pow));
+ run_pages = (size >> pagesize_2pow);
+
+ /* Subtract pages from count of pages used in chunk. */
+ chunk->pages_used -= run_pages;
+
+ if (dirty) {
+ size_t i;
+
+ for (i = 0; i < run_pages; i++) {
+ assert((chunk->map[run_ind + i] & CHUNK_MAP_DIRTY) ==
+ 0);
+ chunk->map[run_ind + i] |= CHUNK_MAP_DIRTY;
+ chunk->ndirty++;
+ arena->ndirty++;
+ }
+ }
+#ifdef MALLOC_DEBUG
+ /* Set map elements to a bogus value in order to aid error detection. */
+ {
+ size_t i;
+
+ for (i = 0; i < run_pages; i++) {
+ chunk->map[run_ind + i] |= (CHUNK_MAP_LARGE |
+ CHUNK_MAP_POS_MASK);
+ }
+ }
+#endif
+
+ /* Try to coalesce forward. */
+ key.addr = (void *)((uintptr_t)run + size);
+ nodeC = RB_NFIND(extent_tree_ad_s, &arena->runs_avail_ad, &key);
+ if (nodeC != NULL && nodeC->addr == key.addr) {
+ /*
+ * Coalesce forward. This does not change the position within
+ * runs_avail_ad, so only remove/insert from/into
+ * runs_avail_szad.
+ */
+ RB_REMOVE(extent_tree_szad_s, &arena->runs_avail_szad, nodeC);
+ nodeC->addr = (void *)run;
+ nodeC->size += size;
+ RB_INSERT(extent_tree_szad_s, &arena->runs_avail_szad, nodeC);
+ arena_chunk_node_dealloc(chunk, nodeB);
+ nodeB = nodeC;
+ } else {
+ /*
+ * Coalescing forward failed, so insert nodeB into runs_avail_*.
+ */
+ RB_INSERT(extent_tree_szad_s, &arena->runs_avail_szad, nodeB);
+ RB_INSERT(extent_tree_ad_s, &arena->runs_avail_ad, nodeB);
+ }
+
+ /* Try to coalesce backward. */
+ nodeA = RB_PREV(extent_tree_ad_s, &arena->runs_avail_ad, nodeB);
+ if (nodeA != NULL && (void *)((uintptr_t)nodeA->addr + nodeA->size) ==
+ (void *)run) {
+ /*
+ * Coalesce with previous run. This does not change nodeB's
+ * position within runs_avail_ad, so only remove/insert
+ * from/into runs_avail_szad.
+ */
+ RB_REMOVE(extent_tree_szad_s, &arena->runs_avail_szad, nodeA);
+ RB_REMOVE(extent_tree_ad_s, &arena->runs_avail_ad, nodeA);
+
+ RB_REMOVE(extent_tree_szad_s, &arena->runs_avail_szad, nodeB);
+ nodeB->addr = nodeA->addr;
+ nodeB->size += nodeA->size;
+ RB_INSERT(extent_tree_szad_s, &arena->runs_avail_szad, nodeB);
+
+ arena_chunk_node_dealloc(chunk, nodeA);
+ }
+
+ /* Deallocate chunk if it is now completely unused. */
+ if (chunk->pages_used == 0)
+ arena_chunk_dealloc(arena, chunk);
+
+ /* Enforce opt_dirty_max. */
+ if (arena->ndirty > opt_dirty_max)
+ arena_purge(arena);
+}
+
+static void
+arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, extent_node_t *nodeB,
+ arena_run_t *run, size_t oldsize, size_t newsize)
+{
+ extent_node_t *nodeA;
+
+ assert(nodeB->addr == run);
+ assert(nodeB->size == oldsize);
+ assert(oldsize > newsize);
+
+ /*
+ * Update the run's node in runs_alloced_ad. Its position does not
+ * change.
+ */
+ nodeB->addr = (void *)((uintptr_t)run + (oldsize - newsize));
+ nodeB->size = newsize;
+
+ /*
+ * Insert a node into runs_alloced_ad so that arena_run_dalloc() can
+ * treat the leading run as separately allocated.
+ */
+ nodeA = arena_chunk_node_alloc(chunk);
+ nodeA->addr = (void *)run;
+ nodeA->size = oldsize - newsize;
+ RB_INSERT(extent_tree_ad_s, &arena->runs_alloced_ad, nodeA);
+
+ arena_run_dalloc(arena, (arena_run_t *)run, false);
+}
+
+static void
+arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, extent_node_t *nodeA,
+ arena_run_t *run, size_t oldsize, size_t newsize, bool dirty)
+{
+ extent_node_t *nodeB;
+
+ assert(nodeA->addr == run);
+ assert(nodeA->size == oldsize);
+ assert(oldsize > newsize);
+
+ /*
+ * Update the run's node in runs_alloced_ad. Its position does not
+ * change.
+ */
+ nodeA->size = newsize;
+
+ /*
+ * Insert a node into runs_alloced_ad so that arena_run_dalloc() can
+ * treat the trailing run as separately allocated.
+ */
+ nodeB = arena_chunk_node_alloc(chunk);
+ nodeB->addr = (void *)((uintptr_t)run + newsize);
+ nodeB->size = oldsize - newsize;
+ RB_INSERT(extent_tree_ad_s, &arena->runs_alloced_ad, nodeB);
+
+ arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize),
+ dirty);
+}
+
+static arena_run_t *
+arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
+{
+ arena_run_t *run;
+ unsigned i, remainder;
+
+ /* Look for a usable run. */
+ if ((run = RB_MIN(arena_run_tree_s, &bin->runs)) != NULL) {
+ /* run is guaranteed to have available space. */
+ RB_REMOVE(arena_run_tree_s, &bin->runs, run);
+#ifdef MALLOC_STATS
+ bin->stats.reruns++;
+#endif
+ return (run);
+ }
+ /* No existing runs have any space available. */
+
+ /* Allocate a new run. */
+ run = arena_run_alloc(arena, bin->run_size, true, false);
+ if (run == NULL)
+ return (NULL);
+
+ /* Initialize run internals. */
+ run->bin = bin;
+
+ for (i = 0; i < bin->regs_mask_nelms; i++)
+ run->regs_mask[i] = UINT_MAX;
+ remainder = bin->nregs & ((1U << (SIZEOF_INT_2POW + 3)) - 1);
+ if (remainder != 0) {
+ /* The last element has spare bits that need to be unset. */
+ run->regs_mask[i] = (UINT_MAX >> ((1U << (SIZEOF_INT_2POW + 3))
+ - remainder));
+ }
+
+ run->regs_minelm = 0;
+
+ run->nfree = bin->nregs;
+#ifdef MALLOC_DEBUG
+ run->magic = ARENA_RUN_MAGIC;
+#endif
+
+#ifdef MALLOC_STATS
+ bin->stats.nruns++;
+ bin->stats.curruns++;
+ if (bin->stats.curruns > bin->stats.highruns)
+ bin->stats.highruns = bin->stats.curruns;
+#endif
+ return (run);
+}
+
+/* bin->runcur must have space available before this function is called. */
+static inline void *
+arena_bin_malloc_easy(arena_t *arena, arena_bin_t *bin, arena_run_t *run)
+{
+ void *ret;
+
+ assert(run->magic == ARENA_RUN_MAGIC);
+ assert(run->nfree > 0);
+
+ ret = arena_run_reg_alloc(run, bin);
+ assert(ret != NULL);
+ run->nfree--;
+
+ return (ret);
+}
+
+/* Re-fill bin->runcur, then call arena_bin_malloc_easy(). */
+static void *
+arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
+{
+
+ bin->runcur = arena_bin_nonfull_run_get(arena, bin);
+ if (bin->runcur == NULL)
+ return (NULL);
+ assert(bin->runcur->magic == ARENA_RUN_MAGIC);
+ assert(bin->runcur->nfree > 0);
+
+ return (arena_bin_malloc_easy(arena, bin, bin->runcur));
+}
+
+/*
+ * Calculate bin->run_size such that it meets the following constraints:
+ *
+ * *) bin->run_size >= min_run_size
+ * *) bin->run_size <= arena_maxclass
+ * *) bin->run_size <= RUN_MAX_SMALL
+ * *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed).
+ *
+ * bin->nregs, bin->regs_mask_nelms, and bin->reg0_offset are
+ * also calculated here, since these settings are all interdependent.
+ */
+static size_t
+arena_bin_run_size_calc(arena_bin_t *bin, size_t min_run_size)
+{
+ size_t try_run_size, good_run_size;
+ unsigned good_nregs, good_mask_nelms, good_reg0_offset;
+ unsigned try_nregs, try_mask_nelms, try_reg0_offset;
+
+ assert(min_run_size >= pagesize);
+ assert(min_run_size <= arena_maxclass);
+ assert(min_run_size <= RUN_MAX_SMALL);
+
+ /*
+ * Calculate known-valid settings before entering the run_size
+ * expansion loop, so that the first part of the loop always copies
+ * valid settings.
+ *
+ * The do..while loop iteratively reduces the number of regions until
+ * the run header and the regions no longer overlap. A closed formula
+ * would be quite messy, since there is an interdependency between the
+ * header's mask length and the number of regions.
+ */
+ try_run_size = min_run_size;
+ try_nregs = ((try_run_size - sizeof(arena_run_t)) / bin->reg_size)
+ + 1; /* Counter-act try_nregs-- in loop. */
+ do {
+ try_nregs--;
+ try_mask_nelms = (try_nregs >> (SIZEOF_INT_2POW + 3)) +
+ ((try_nregs & ((1U << (SIZEOF_INT_2POW + 3)) - 1)) ? 1 : 0);
+ try_reg0_offset = try_run_size - (try_nregs * bin->reg_size);
+ } while (sizeof(arena_run_t) + (sizeof(unsigned) * (try_mask_nelms - 1))
+ > try_reg0_offset);
+
+ /* run_size expansion loop. */
+ do {
+ /*
+ * Copy valid settings before trying more aggressive settings.
+ */
+ good_run_size = try_run_size;
+ good_nregs = try_nregs;
+ good_mask_nelms = try_mask_nelms;
+ good_reg0_offset = try_reg0_offset;
+
+ /* Try more aggressive settings. */
+ try_run_size += pagesize;
+ try_nregs = ((try_run_size - sizeof(arena_run_t)) /
+ bin->reg_size) + 1; /* Counter-act try_nregs-- in loop. */
+ do {
+ try_nregs--;
+ try_mask_nelms = (try_nregs >> (SIZEOF_INT_2POW + 3)) +
+ ((try_nregs & ((1U << (SIZEOF_INT_2POW + 3)) - 1)) ?
+ 1 : 0);
+ try_reg0_offset = try_run_size - (try_nregs *
+ bin->reg_size);
+ } while (sizeof(arena_run_t) + (sizeof(unsigned) *
+ (try_mask_nelms - 1)) > try_reg0_offset);
+ } while (try_run_size <= arena_maxclass && try_run_size <= RUN_MAX_SMALL
+ && RUN_MAX_OVRHD * (bin->reg_size << 3) > RUN_MAX_OVRHD_RELAX
+ && (try_reg0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size);
+
+ assert(sizeof(arena_run_t) + (sizeof(unsigned) * (good_mask_nelms - 1))
+ <= good_reg0_offset);
+ assert((good_mask_nelms << (SIZEOF_INT_2POW + 3)) >= good_nregs);
+
+ /* Copy final settings. */
+ bin->run_size = good_run_size;
+ bin->nregs = good_nregs;
+ bin->regs_mask_nelms = good_mask_nelms;
+ bin->reg0_offset = good_reg0_offset;
+
+ return (good_run_size);
+}
+
+#ifdef MALLOC_BALANCE
+static inline void
+arena_lock_balance(arena_t *arena)
+{
+ unsigned contention;
+
+ contention = malloc_spin_lock(&arena->lock);
+ if (narenas > 1) {
+ /*
+ * Calculate the exponentially averaged contention for this
+ * arena. Due to integer math always rounding down, this value
+ * decays somewhat faster then normal.
+ */
+ arena->contention = (((uint64_t)arena->contention
+ * (uint64_t)((1U << BALANCE_ALPHA_INV_2POW)-1))
+ + (uint64_t)contention) >> BALANCE_ALPHA_INV_2POW;
+ if (arena->contention >= opt_balance_threshold)
+ arena_lock_balance_hard(arena);
+ }
+}
+
+static void
+arena_lock_balance_hard(arena_t *arena)
+{
+ uint32_t ind;
+
+ arena->contention = 0;
+#ifdef MALLOC_STATS
+ arena->stats.nbalance++;
+#endif
+ ind = PRN(balance, narenas_2pow);
+ if (arenas[ind] != NULL) {
+#ifdef WIN32
+ TlsSetValue(tlsIndex, arenas[ind]);
+#else
+ arenas_map = arenas[ind];
+#endif
+ } else {
+ malloc_spin_lock(&arenas_lock);
+ if (arenas[ind] != NULL) {
+#ifdef WIN32
+ TlsSetValue(tlsIndex, arenas[ind]);
+#else
+ arenas_map = arenas[ind];
+#endif
+ } else {
+#ifdef WIN32
+ TlsSetValue(tlsIndex, arenas_extend(ind));
+#else
+ arenas_map = arenas_extend(ind);
+#endif
+ }
+ malloc_spin_unlock(&arenas_lock);
+ }
+}
+#endif
+
+static inline void *
+arena_malloc_small(arena_t *arena, size_t size, bool zero)
+{
+ void *ret;
+ arena_bin_t *bin;
+ arena_run_t *run;
+
+ if (size < small_min) {
+ /* Tiny. */
+ size = pow2_ceil(size);
+ bin = &arena->bins[ffs((int)(size >> (TINY_MIN_2POW +
+ 1)))];
+#if (!defined(NDEBUG) || defined(MALLOC_STATS))
+ /*
+ * Bin calculation is always correct, but we may need
+ * to fix size for the purposes of assertions and/or
+ * stats accuracy.
+ */
+ if (size < (1U << TINY_MIN_2POW))
+ size = (1U << TINY_MIN_2POW);
+#endif
+ } else if (size <= small_max) {
+ /* Quantum-spaced. */
+ size = QUANTUM_CEILING(size);
+ bin = &arena->bins[ntbins + (size >> opt_quantum_2pow)
+ - 1];
+ } else {
+ /* Sub-page. */
+ size = pow2_ceil(size);
+ bin = &arena->bins[ntbins + nqbins
+ + (ffs((int)(size >> opt_small_max_2pow)) - 2)];
+ }
+ assert(size == bin->reg_size);
+
+#ifdef MALLOC_BALANCE
+ arena_lock_balance(arena);
+#else
+ malloc_spin_lock(&arena->lock);
+#endif
+ if ((run = bin->runcur) != NULL && run->nfree > 0)
+ ret = arena_bin_malloc_easy(arena, bin, run);
+ else
+ ret = arena_bin_malloc_hard(arena, bin);
+
+ if (ret == NULL) {
+ malloc_spin_unlock(&arena->lock);
+ return (NULL);
+ }
+
+#ifdef MALLOC_STATS
+ bin->stats.nrequests++;
+ arena->stats.nmalloc_small++;
+ arena->stats.allocated_small += size;
+#endif
+ malloc_spin_unlock(&arena->lock);
+
+ if (zero == false) {
+#ifdef MALLOC_FILL
+ if (opt_junk)
+ memset(ret, 0xa5, size);
+ else if (opt_zero)
+ memset(ret, 0, size);
+#endif
+ } else
+ memset(ret, 0, size);
+
+ return (ret);
+}
+
+static void *
+arena_malloc_large(arena_t *arena, size_t size, bool zero)
+{
+ void *ret;
+
+ /* Large allocation. */
+ size = PAGE_CEILING(size);
+#ifdef MALLOC_BALANCE
+ arena_lock_balance(arena);
+#else
+ malloc_spin_lock(&arena->lock);
+#endif
+ ret = (void *)arena_run_alloc(arena, size, false, zero);
+ if (ret == NULL) {
+ malloc_spin_unlock(&arena->lock);
+ return (NULL);
+ }
+#ifdef MALLOC_STATS
+ arena->stats.nmalloc_large++;
+ arena->stats.allocated_large += size;
+#endif
+ malloc_spin_unlock(&arena->lock);
+
+ if (zero == false) {
+#ifdef MALLOC_FILL
+ if (opt_junk)
+ memset(ret, 0xa5, size);
+ else if (opt_zero)
+ memset(ret, 0, size);
+#endif
+ }
+
+ return (ret);
+}
+
+static inline void *
+arena_malloc(arena_t *arena, size_t size, bool zero)
+{
+
+ assert(arena != NULL);
+ assert(arena->magic == ARENA_MAGIC);
+ assert(size != 0);
+ assert(QUANTUM_CEILING(size) <= arena_maxclass);
+
+/* #ifdef USE_STATS_MEMORY */
+/* arena->mi.uordblks += size; */
+/* #endif */
+ if (size <= bin_maxclass) {
+ return (arena_malloc_small(arena, size, zero));
+ } else
+ return (arena_malloc_large(arena, size, zero));
+}
+
+static inline void *
+imalloc(size_t size)
+{
+
+ assert(size != 0);
+ if (size <= arena_maxclass)
+ return (arena_malloc(choose_arena(), size, false));
+ else
+ return (huge_malloc(size, false));
+}
+
+static inline void *
+icalloc(size_t size)
+{
+ if (size <= arena_maxclass)
+ return (arena_malloc(choose_arena(), size, true));
+ else
+ return (huge_malloc(size, true));
+}
+
+/* Only handles large allocations that require more than page alignment. */
+static void *
+arena_palloc(arena_t *arena, size_t alignment, size_t size, size_t alloc_size)
+{
+ void *ret;
+ size_t offset;
+ arena_chunk_t *chunk;
+ extent_node_t *node, key;
+
+ assert((size & pagesize_mask) == 0);
+ assert((alignment & pagesize_mask) == 0);
+
+#ifdef MALLOC_BALANCE
+ arena_lock_balance(arena);
+#else
+ malloc_spin_lock(&arena->lock);
+#endif
+ ret = (void *)arena_run_alloc(arena, alloc_size, false, false);
+ if (ret == NULL) {
+ malloc_spin_unlock(&arena->lock);
+ return (NULL);
+ }
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
+
+ offset = (uintptr_t)ret & (alignment - 1);
+ assert((offset & pagesize_mask) == 0);
+ assert(offset < alloc_size);
+ if (offset == 0) {
+ /*
+ * Update the run's node in runs_alloced_ad. Its position
+ * does not change.
+ */
+ key.addr = ret;
+ node = RB_FIND(extent_tree_ad_s, &arena->runs_alloced_ad, &key);
+ assert(node != NULL);
+
+ arena_run_trim_tail(arena, chunk, node, ret, alloc_size, size,
+ false);
+ } else {
+ size_t leadsize, trailsize;
+
+ /*
+ * Update the run's node in runs_alloced_ad. Its position
+ * does not change.
+ */
+ key.addr = ret;
+ node = RB_FIND(extent_tree_ad_s, &arena->runs_alloced_ad, &key);
+ assert(node != NULL);
+
+ leadsize = alignment - offset;
+ if (leadsize > 0) {
+ arena_run_trim_head(arena, chunk, node, ret, alloc_size,
+ alloc_size - leadsize);
+ ret = (void *)((uintptr_t)ret + leadsize);
+ }
+
+ trailsize = alloc_size - leadsize - size;
+ if (trailsize != 0) {
+ /* Trim trailing space. */
+ assert(trailsize < alloc_size);
+ arena_run_trim_tail(arena, chunk, node, ret, size +
+ trailsize, size, false);
+ }
+ }
+
+#ifdef MALLOC_STATS
+ arena->stats.nmalloc_large++;
+ arena->stats.allocated_large += size;
+#endif
+ malloc_spin_unlock(&arena->lock);
+
+#ifdef MALLOC_FILL
+ if (opt_junk)
+ memset(ret, 0xa5, size);
+ else if (opt_zero)
+ memset(ret, 0, size);
+#endif
+ return (ret);
+}
+
+static inline void *
+ipalloc(size_t alignment, size_t size)
+{
+ void *ret;
+ size_t ceil_size;
+
+ /*
+ * Round size up to the nearest multiple of alignment.
+ *
+ * This done, we can take advantage of the fact that for each small
+ * size class, every object is aligned at the smallest power of two
+ * that is non-zero in the base two representation of the size. For
+ * example:
+ *
+ * Size | Base 2 | Minimum alignment
+ * -----+----------+------------------
+ * 96 | 1100000 | 32
+ * 144 | 10100000 | 32
+ * 192 | 11000000 | 64
+ *
+ * Depending on runtime settings, it is possible that arena_malloc()
+ * will further round up to a power of two, but that never causes
+ * correctness issues.
+ */
+ ceil_size = (size + (alignment - 1)) & (-alignment);
+ /*
+ * (ceil_size < size) protects against the combination of maximal
+ * alignment and size greater than maximal alignment.
+ */
+ if (ceil_size < size) {
+ /* size_t overflow. */
+ return (NULL);
+ }
+
+ if (ceil_size <= pagesize || (alignment <= pagesize
+ && ceil_size <= arena_maxclass))
+ ret = arena_malloc(choose_arena(), ceil_size, false);
+ else {
+ size_t run_size;
+
+ /*
+ * We can't achieve sub-page alignment, so round up alignment
+ * permanently; it makes later calculations simpler.
+ */
+ alignment = PAGE_CEILING(alignment);
+ ceil_size = PAGE_CEILING(size);
+ /*
+ * (ceil_size < size) protects against very large sizes within
+ * pagesize of SIZE_T_MAX.
+ *
+ * (ceil_size + alignment < ceil_size) protects against the
+ * combination of maximal alignment and ceil_size large enough
+ * to cause overflow. This is similar to the first overflow
+ * check above, but it needs to be repeated due to the new
+ * ceil_size value, which may now be *equal* to maximal
+ * alignment, whereas before we only detected overflow if the
+ * original size was *greater* than maximal alignment.
+ */
+ if (ceil_size < size || ceil_size + alignment < ceil_size) {
+ /* size_t overflow. */
+ return (NULL);
+ }
+
+ /*
+ * Calculate the size of the over-size run that arena_palloc()
+ * would need to allocate in order to guarantee the alignment.
+ */
+ if (ceil_size >= alignment)
+ run_size = ceil_size + alignment - pagesize;
+ else {
+ /*
+ * It is possible that (alignment << 1) will cause
+ * overflow, but it doesn't matter because we also
+ * subtract pagesize, which in the case of overflow
+ * leaves us with a very large run_size. That causes
+ * the first conditional below to fail, which means
+ * that the bogus run_size value never gets used for
+ * anything important.
+ */
+ run_size = (alignment << 1) - pagesize;
+ }
+
+ if (run_size <= arena_maxclass) {
+ ret = arena_palloc(choose_arena(), alignment, ceil_size,
+ run_size);
+ } else if (alignment <= chunksize)
+ ret = huge_malloc(ceil_size, false);
+ else
+ ret = huge_palloc(alignment, ceil_size);
+ }
+
+ assert(((uintptr_t)ret & (alignment - 1)) == 0);
+ return (ret);
+}
+
+/* Return the size of the allocation pointed to by ptr. */
+static size_t
+arena_salloc(const void *ptr)
+{
+ size_t ret;
+ arena_chunk_t *chunk;
+ arena_chunk_map_t mapelm;
+ size_t pageind;
+
+ assert(ptr != NULL);
+ assert(CHUNK_ADDR2BASE(ptr) != ptr);
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >> pagesize_2pow);
+ mapelm = chunk->map[pageind];
+ if ((mapelm & CHUNK_MAP_LARGE) == 0) {
+ arena_run_t *run;
+
+ /* Small allocation size is in the run header. */
+ pageind -= (mapelm & CHUNK_MAP_POS_MASK);
+ run = (arena_run_t *)((uintptr_t)chunk + (pageind <<
+ pagesize_2pow));
+ assert(run->magic == ARENA_RUN_MAGIC);
+ ret = run->bin->reg_size;
+ } else {
+ arena_t *arena = chunk->arena;
+ extent_node_t *node, key;
+
+ /* Large allocation size is in the extent tree. */
+ assert((mapelm & CHUNK_MAP_POS_MASK) == 0);
+ arena = chunk->arena;
+ malloc_spin_lock(&arena->lock);
+ key.addr = (void *)ptr;
+ node = RB_FIND(extent_tree_ad_s, &arena->runs_alloced_ad, &key);
+ assert(node != NULL);
+ ret = node->size;
+ malloc_spin_unlock(&arena->lock);
+ }
+
+ return (ret);
+}
+
+static inline size_t
+isalloc(const void *ptr)
+{
+ size_t ret;
+ arena_chunk_t *chunk;
+
+ assert(ptr != NULL);
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ if (chunk != ptr) {
+ /* Region. */
+ assert(chunk->arena->magic == ARENA_MAGIC);
+
+ ret = arena_salloc(ptr);
+ } else {
+ extent_node_t *node, key;
+
+ /* Chunk (huge allocation). */
+
+ malloc_mutex_lock(&huge_mtx);
+
+ /* Extract from tree of huge allocations. */
+ key.addr = __DECONST(void *, ptr);
+ node = RB_FIND(extent_tree_ad_s, &huge, &key);
+ assert(node != NULL);
+
+ ret = node->size;
+
+ malloc_mutex_unlock(&huge_mtx);
+ }
+
+ return (ret);
+}
+
+static inline void
+arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+ size_t pageind, arena_chunk_map_t mapelm)
+{
+ arena_run_t *run;
+ arena_bin_t *bin;
+ size_t size;
+
+ pageind -= (mapelm & CHUNK_MAP_POS_MASK);
+
+ run = (arena_run_t *)((uintptr_t)chunk + (pageind << pagesize_2pow));
+ assert(run->magic == ARENA_RUN_MAGIC);
+ bin = run->bin;
+ size = bin->reg_size;
+
+/* #ifdef USE_STATS_MEMORY */
+/* arena->mi.fordblks += size; */
+/* #endif */
+#ifdef MALLOC_FILL
+ if (opt_junk)
+ memset(ptr, 0x5a, size);
+#endif
+
+ arena_run_reg_dalloc(run, bin, ptr, size);
+ run->nfree++;
+
+ if (run->nfree == bin->nregs) {
+ /* Deallocate run. */
+ if (run == bin->runcur)
+ bin->runcur = NULL;
+ else if (bin->nregs != 1) {
+ /*
+ * This block's conditional is necessary because if the
+ * run only contains one region, then it never gets
+ * inserted into the non-full runs tree.
+ */
+ RB_REMOVE(arena_run_tree_s, &bin->runs, run);
+ }
+#ifdef MALLOC_DEBUG
+ run->magic = 0;
+#endif
+ arena_run_dalloc(arena, run, true);
+#ifdef MALLOC_STATS
+ bin->stats.curruns--;
+#endif
+ } else if (run->nfree == 1 && run != bin->runcur) {
+ /*
+ * Make sure that bin->runcur always refers to the lowest
+ * non-full run, if one exists.
+ */
+ if (bin->runcur == NULL)
+ bin->runcur = run;
+ else if ((uintptr_t)run < (uintptr_t)bin->runcur) {
+ /* Switch runcur. */
+ if (bin->runcur->nfree > 0) {
+ /* Insert runcur. */
+ RB_INSERT(arena_run_tree_s, &bin->runs,
+ bin->runcur);
+ }
+ bin->runcur = run;
+ } else
+ RB_INSERT(arena_run_tree_s, &bin->runs, run);
+ }
+#ifdef MALLOC_STATS
+ arena->stats.allocated_small -= size;
+ arena->stats.ndalloc_small++;
+#endif
+}
+
+#ifdef MALLOC_LAZY_FREE
+static inline void
+arena_dalloc_lazy(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+ size_t pageind, arena_chunk_map_t *mapelm)
+{
+ void **free_cache = arena->free_cache;
+ unsigned i, slot;
+
+ if (g_isthreaded == false || opt_lazy_free_2pow < 0) {
+ malloc_spin_lock(&arena->lock);
+ arena_dalloc_small(arena, chunk, ptr, pageind, *mapelm);
+ malloc_spin_unlock(&arena->lock);
+ return;
+ }
+
+ for (i = 0; i < LAZY_FREE_NPROBES; i++) {
+ slot = PRN(lazy_free, opt_lazy_free_2pow);
+ if (atomic_cmpset_ptr((uintptr_t *)&free_cache[slot],
+ (uintptr_t)NULL, (uintptr_t)ptr)) {
+ return;
+ }
+ }
+
+ arena_dalloc_lazy_hard(arena, chunk, ptr, pageind, mapelm);
+}
+
+static void
+arena_dalloc_lazy_hard(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+ size_t pageind, arena_chunk_map_t *mapelm)
+{
+ void **free_cache = arena->free_cache;
+ unsigned i, slot;
+
+ malloc_spin_lock(&arena->lock);
+ arena_dalloc_small(arena, chunk, ptr, pageind, *mapelm);
+
+ /*
+ * Check whether another thread already cleared the cache. It is
+ * possible that another thread cleared the cache *and* this slot was
+ * already refilled, which could result in a mostly fruitless cache
+ * sweep, but such a sequence of events causes no correctness issues.
+ */
+ if ((ptr = (void *)atomic_readandclear_ptr(
+ (uintptr_t *)&free_cache[slot]))
+ != NULL) {
+ unsigned lazy_free_mask;
+
+ /*
+ * Clear the cache, since we failed to find a slot. It is
+ * possible that other threads will continue to insert objects
+ * into the cache while this one sweeps, but that is okay,
+ * since on average the cache is still swept with the same
+ * frequency.
+ */
+
+ /* Handle pointer at current slot. */
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >>
+ pagesize_2pow);
+ mapelm = &chunk->map[pageind];
+ arena_dalloc_small(arena, chunk, ptr, pageind, *mapelm);
+
+ /* Sweep remainder of slots. */
+ lazy_free_mask = (1U << opt_lazy_free_2pow) - 1;
+ for (i = (slot + 1) & lazy_free_mask;
+ i != slot;
+ i = (i + 1) & lazy_free_mask) {
+ ptr = (void *)atomic_readandclear_ptr(
+ (uintptr_t *)&free_cache[i]);
+ if (ptr != NULL) {
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ pageind = (((uintptr_t)ptr - (uintptr_t)chunk)
+ >> pagesize_2pow);
+ mapelm = &chunk->map[pageind];
+ arena_dalloc_small(arena, chunk, ptr, pageind,
+ *mapelm);
+ }
+ }
+ }
+
+ malloc_spin_unlock(&arena->lock);
+}
+#endif
+
+static void
+arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
+{
+ /* Large allocation. */
+ malloc_spin_lock(&arena->lock);
+
+#ifdef MALLOC_FILL
+#ifndef MALLOC_STATS
+ if (opt_junk)
+#endif
+#endif
+ {
+ extent_node_t *node, key;
+ size_t size;
+
+ key.addr = ptr;
+ node = RB_FIND(extent_tree_ad_s,
+ &arena->runs_alloced_ad, &key);
+ assert(node != NULL);
+ size = node->size;
+#ifdef MALLOC_FILL
+#ifdef MALLOC_STATS
+ if (opt_junk)
+#endif
+ memset(ptr, 0x5a, size);
+#endif
+/* #ifdef USE_STATS_MEMORY */
+/* arena->mi.fordblks += size; */
+/* #endif */
+#ifdef MALLOC_STATS
+ arena->stats.allocated_large -= size;
+#endif
+ }
+#ifdef MALLOC_STATS
+ arena->stats.ndalloc_large++;
+#endif
+
+ arena_run_dalloc(arena, (arena_run_t *)ptr, true);
+ malloc_spin_unlock(&arena->lock);
+}
+
+static inline void
+arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr)
+{
+ size_t pageind;
+ arena_chunk_map_t *mapelm;
+
+ assert(arena != NULL);
+ assert(arena->magic == ARENA_MAGIC);
+ assert(chunk->arena == arena);
+ assert(ptr != NULL);
+ assert(CHUNK_ADDR2BASE(ptr) != ptr);
+
+ pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >> pagesize_2pow);
+ mapelm = &chunk->map[pageind];
+ if ((*mapelm & CHUNK_MAP_LARGE) == 0) {
+ /* Small allocation. */
+#ifdef MALLOC_LAZY_FREE
+ arena_dalloc_lazy(arena, chunk, ptr, pageind, mapelm);
+#else
+ malloc_spin_lock(&arena->lock);
+ arena_dalloc_small(arena, chunk, ptr, pageind, *mapelm);
+ malloc_spin_unlock(&arena->lock);
+#endif
+ } else {
+ assert((*mapelm & CHUNK_MAP_POS_MASK) == 0);
+ arena_dalloc_large(arena, chunk, ptr);
+ }
+}
+
+static inline void
+idalloc(void *ptr)
+{
+ arena_chunk_t *chunk;
+
+ assert(ptr != NULL);
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ if (chunk != ptr)
+ arena_dalloc(chunk->arena, chunk, ptr);
+ else
+ huge_dalloc(ptr);
+}
+
+static void
+arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+ size_t size, size_t oldsize)
+{
+ extent_node_t *node, key;
+
+ assert(size < oldsize);
+
+ /*
+ * Shrink the run, and make trailing pages available for other
+ * allocations.
+ */
+ key.addr = (void *)((uintptr_t)ptr);
+#ifdef MALLOC_BALANCE
+ arena_lock_balance(arena);
+#else
+ malloc_spin_lock(&arena->lock);
+#endif
+ node = RB_FIND(extent_tree_ad_s, &arena->runs_alloced_ad, &key);
+ assert(node != NULL);
+ arena_run_trim_tail(arena, chunk, node, (arena_run_t *)ptr, oldsize,
+ size, true);
+#ifdef MALLOC_STATS
+ arena->stats.allocated_large -= oldsize - size;
+#endif
+ malloc_spin_unlock(&arena->lock);
+}
+
+static bool
+arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+ size_t size, size_t oldsize)
+{
+ extent_node_t *nodeC, key;
+
+ /* Try to extend the run. */
+ assert(size > oldsize);
+ key.addr = (void *)((uintptr_t)ptr + oldsize);
+#ifdef MALLOC_BALANCE
+ arena_lock_balance(arena);
+#else
+ malloc_spin_lock(&arena->lock);
+#endif
+ nodeC = RB_FIND(extent_tree_ad_s, &arena->runs_avail_ad, &key);
+ if (nodeC != NULL && oldsize + nodeC->size >= size) {
+ extent_node_t *nodeA, *nodeB;
+
+ /*
+ * The next run is available and sufficiently large. Split the
+ * following run, then merge the first part with the existing
+ * allocation. This results in a bit more tree manipulation
+ * than absolutely necessary, but it substantially simplifies
+ * the code.
+ */
+ arena_run_split(arena, (arena_run_t *)nodeC->addr, size -
+ oldsize, false, false);
+
+ key.addr = ptr;
+ nodeA = RB_FIND(extent_tree_ad_s, &arena->runs_alloced_ad,
+ &key);
+ assert(nodeA != NULL);
+
+ key.addr = (void *)((uintptr_t)ptr + oldsize);
+ nodeB = RB_FIND(extent_tree_ad_s, &arena->runs_alloced_ad,
+ &key);
+ assert(nodeB != NULL);
+
+ nodeA->size += nodeB->size;
+
+ RB_REMOVE(extent_tree_ad_s, &arena->runs_alloced_ad, nodeB);
+ arena_chunk_node_dealloc(chunk, nodeB);
+
+#ifdef MALLOC_STATS
+ arena->stats.allocated_large += size - oldsize;
+#endif
+ malloc_spin_unlock(&arena->lock);
+ return (false);
+ }
+ malloc_spin_unlock(&arena->lock);
+
+ return (true);
+}
+
+/*
+ * Try to resize a large allocation, in order to avoid copying. This will
+ * always fail if growing an object, and the following run is already in use.
+ */
+static bool
+arena_ralloc_large(void *ptr, size_t size, size_t oldsize)
+{
+ size_t psize;
+
+ psize = PAGE_CEILING(size);
+ if (psize == oldsize) {
+ /* Same size class. */
+#ifdef MALLOC_FILL
+ if (opt_junk && size < oldsize) {
+ memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize -
+ size);
+ }
+#endif
+ return (false);
+ } else {
+ arena_chunk_t *chunk;
+ arena_t *arena;
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ arena = chunk->arena;
+ assert(arena->magic == ARENA_MAGIC);
+
+ if (psize < oldsize) {
+#ifdef MALLOC_FILL
+ /* Fill before shrinking in order avoid a race. */
+ if (opt_junk) {
+ memset((void *)((uintptr_t)ptr + size), 0x5a,
+ oldsize - size);
+ }
+#endif
+ arena_ralloc_large_shrink(arena, chunk, ptr, psize,
+ oldsize);
+ return (false);
+ } else {
+ bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
+ psize, oldsize);
+#ifdef MALLOC_FILL
+ if (ret == false && opt_zero) {
+ memset((void *)((uintptr_t)ptr + oldsize), 0,
+ size - oldsize);
+ }
+#endif
+ return (ret);
+ }
+ }
+}
+
+static void *
+arena_ralloc(void *ptr, size_t size, size_t oldsize)
+{
+ void *ret;
+ size_t copysize;
+
+ /* Try to avoid moving the allocation. */
+ if (size < small_min) {
+ if (oldsize < small_min &&
+ ffs((int)(pow2_ceil(size) >> (TINY_MIN_2POW + 1)))
+ == ffs((int)(pow2_ceil(oldsize) >> (TINY_MIN_2POW + 1))))
+ goto IN_PLACE; /* Same size class. */
+ } else if (size <= small_max) {
+ if (oldsize >= small_min && oldsize <= small_max &&
+ (QUANTUM_CEILING(size) >> opt_quantum_2pow)
+ == (QUANTUM_CEILING(oldsize) >> opt_quantum_2pow))
+ goto IN_PLACE; /* Same size class. */
+ } else if (size <= bin_maxclass) {
+ if (oldsize > small_max && oldsize <= bin_maxclass &&
+ pow2_ceil(size) == pow2_ceil(oldsize))
+ goto IN_PLACE; /* Same size class. */
+ } else if (oldsize > bin_maxclass && oldsize <= arena_maxclass) {
+ assert(size > bin_maxclass);
+ if (arena_ralloc_large(ptr, size, oldsize) == false)
+ return (ptr);
+ }
+
+ /*
+ * If we get here, then size and oldsize are different enough that we
+ * need to move the object. In that case, fall back to allocating new
+ * space and copying.
+ */
+ ret = arena_malloc(choose_arena(), size, false);
+ if (ret == NULL)
+ return (NULL);
+
+ /* Junk/zero-filling were already done by arena_malloc(). */
+ copysize = (size < oldsize) ? size : oldsize;
+#ifdef VM_COPY_MIN
+ if (copysize >= VM_COPY_MIN)
+ pages_copy(ret, ptr, copysize);
+ else
+#endif
+ memcpy(ret, ptr, copysize);
+ idalloc(ptr);
+ return (ret);
+IN_PLACE:
+#ifdef MALLOC_FILL
+ if (opt_junk && size < oldsize)
+ memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize - size);
+ else if (opt_zero && size > oldsize)
+ memset((void *)((uintptr_t)ptr + oldsize), 0, size - oldsize);
+#endif
+ return (ptr);
+}
+
+static inline void *
+iralloc(void *ptr, size_t size)
+{
+ size_t oldsize;
+
+ assert(ptr != NULL);
+ assert(size != 0);
+
+ oldsize = isalloc(ptr);
+
+ if (size <= arena_maxclass)
+ return (arena_ralloc(ptr, size, oldsize));
+ else
+ return (huge_ralloc(ptr, size, oldsize));
+}
+
+static bool
+arena_new(arena_t *arena)
+{
+ unsigned i;
+ arena_bin_t *bin;
+ size_t pow2_size, prev_run_size;
+
+ if (malloc_spin_init(&arena->lock))
+ return (true);
+
+#ifdef MALLOC_STATS
+ memset(&arena->stats, 0, sizeof(arena_stats_t));
+#endif
+
+ /* Initialize chunks. */
+ RB_INIT(&arena->chunks);
+ arena->spare = NULL;
+
+ arena->ndirty = 0;
+
+ RB_INIT(&arena->runs_avail_szad);
+ RB_INIT(&arena->runs_avail_ad);
+ RB_INIT(&arena->runs_alloced_ad);
+
+#ifdef MALLOC_BALANCE
+ arena->contention = 0;
+#endif
+#ifdef MALLOC_LAZY_FREE
+ if (opt_lazy_free_2pow >= 0) {
+ arena->free_cache = (void **) base_calloc(1, sizeof(void *)
+ * (1U << opt_lazy_free_2pow));
+ if (arena->free_cache == NULL)
+ return (true);
+ } else
+ arena->free_cache = NULL;
+#endif
+
+ /* Initialize bins. */
+ prev_run_size = pagesize;
+
+ /* (2^n)-spaced tiny bins. */
+ for (i = 0; i < ntbins; i++) {
+ bin = &arena->bins[i];
+ bin->runcur = NULL;
+ RB_INIT(&bin->runs);
+
+ bin->reg_size = (1U << (TINY_MIN_2POW + i));
+
+ prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
+
+#ifdef MALLOC_STATS
+ memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
+#endif
+ }
+
+ /* Quantum-spaced bins. */
+ for (; i < ntbins + nqbins; i++) {
+ bin = &arena->bins[i];
+ bin->runcur = NULL;
+ RB_INIT(&bin->runs);
+
+ bin->reg_size = quantum * (i - ntbins + 1);
+
+ pow2_size = pow2_ceil(quantum * (i - ntbins + 1));
+ prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
+
+#ifdef MALLOC_STATS
+ memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
+#endif
+ }
+
+ /* (2^n)-spaced sub-page bins. */
+ for (; i < ntbins + nqbins + nsbins; i++) {
+ bin = &arena->bins[i];
+ bin->runcur = NULL;
+ RB_INIT(&bin->runs);
+
+ bin->reg_size = (small_max << (i - (ntbins + nqbins) + 1));
+
+ prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
+
+#ifdef MALLOC_STATS
+ memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
+#endif
+ }
+
+#ifdef MALLOC_DEBUG
+ arena->magic = ARENA_MAGIC;
+#endif
+
+ return (false);
+}
+
+/* Create a new arena and insert it into the arenas array at index ind. */
+static arena_t *
+arenas_extend(unsigned ind)
+{
+ arena_t *ret;
+
+ /* Allocate enough space for trailing bins. */
+ ret = (arena_t *)base_alloc(sizeof(arena_t)
+ + (sizeof(arena_bin_t) * (ntbins + nqbins + nsbins - 1)));
+ if (ret != NULL && arena_new(ret) == false) {
+ arenas[ind] = ret;
+ return (ret);
+ }
+ /* Only reached if there is an OOM error. */
+
+ /*
+ * OOM here is quite inconvenient to propagate, since dealing with it
+ * would require a check for failure in the fast path. Instead, punt
+ * by using arenas[0]. In practice, this is an extremely unlikely
+ * failure.
+ */
+ _malloc_message(_getprogname(),
+ ": (malloc) Error initializing arena\n", "", "");
+ if (opt_abort)
+ abort();
+
+ return (arenas[0]);
+}
+
+/*
+ * End arena.
+ */
+/******************************************************************************/
+/*
+ * Begin general internal functions.
+ */
+
+static void *
+huge_malloc(size_t size, bool zero)
+{
+ void *ret;
+ size_t csize;
+ extent_node_t *node;
+
+ /* Allocate one or more contiguous chunks for this request. */
+
+ csize = CHUNK_CEILING(size);
+ if (csize == 0) {
+ /* size is large enough to cause size_t wrap-around. */
+ return (NULL);
+ }
+
+ /* Allocate an extent node with which to track the chunk. */
+ node = base_node_alloc();
+ if (node == NULL)
+ return (NULL);
+
+ ret = chunk_alloc(csize, zero);
+ if (ret == NULL) {
+ base_node_dealloc(node);
+ return (NULL);
+ }
+
+ /* Insert node into huge. */
+ node->addr = ret;
+ node->size = csize;
+
+ malloc_mutex_lock(&huge_mtx);
+ RB_INSERT(extent_tree_ad_s, &huge, node);
+#ifdef MALLOC_STATS
+ huge_nmalloc++;
+ huge_allocated += csize;
+#endif
+ malloc_mutex_unlock(&huge_mtx);
+
+#ifdef MALLOC_FILL
+ if (zero == false) {
+ if (opt_junk)
+ memset(ret, 0xa5, csize);
+ else if (opt_zero)
+ memset(ret, 0, csize);
+ }
+#endif
+
+ return (ret);
+}
+
+/* Only handles large allocations that require more than chunk alignment. */
+static void *
+huge_palloc(size_t alignment, size_t size)
+{
+ void *ret;
+ size_t alloc_size, chunk_size, offset;
+ extent_node_t *node;
+
+ /*
+ * This allocation requires alignment that is even larger than chunk
+ * alignment. This means that huge_malloc() isn't good enough.
+ *
+ * Allocate almost twice as many chunks as are demanded by the size or
+ * alignment, in order to assure the alignment can be achieved, then
+ * unmap leading and trailing chunks.
+ */
+ assert(alignment >= chunksize);
+
+ chunk_size = CHUNK_CEILING(size);
+
+ if (size >= alignment)
+ alloc_size = chunk_size + alignment - chunksize;
+ else
+ alloc_size = (alignment << 1) - chunksize;
+
+ /* Allocate an extent node with which to track the chunk. */
+ node = base_node_alloc();
+ if (node == NULL)
+ return (NULL);
+
+ ret = chunk_alloc(alloc_size, false);
+ if (ret == NULL) {
+ base_node_dealloc(node);
+ return (NULL);
+ }
+
+ offset = (uintptr_t)ret & (alignment - 1);
+ assert((offset & chunksize_mask) == 0);
+ assert(offset < alloc_size);
+ if (offset == 0) {
+ /* Trim trailing space. */
+ chunk_dealloc((void *)((uintptr_t)ret + chunk_size), alloc_size
+ - chunk_size);
+ } else {
+ size_t trailsize;
+
+ /* Trim leading space. */
+ chunk_dealloc(ret, alignment - offset);
+
+ ret = (void *)((uintptr_t)ret + (alignment - offset));
+
+ trailsize = alloc_size - (alignment - offset) - chunk_size;
+ if (trailsize != 0) {
+ /* Trim trailing space. */
+ assert(trailsize < alloc_size);
+ chunk_dealloc((void *)((uintptr_t)ret + chunk_size),
+ trailsize);
+ }
+ }
+
+ /* Insert node into huge. */
+ node->addr = ret;
+ node->size = chunk_size;
+
+ malloc_mutex_lock(&huge_mtx);
+ RB_INSERT(extent_tree_ad_s, &huge, node);
+#ifdef MALLOC_STATS
+ huge_nmalloc++;
+ huge_allocated += chunk_size;
+#endif
+ malloc_mutex_unlock(&huge_mtx);
+
+#ifdef MALLOC_FILL
+ if (opt_junk)
+ memset(ret, 0xa5, chunk_size);
+ else if (opt_zero)
+ memset(ret, 0, chunk_size);
+#endif
+
+ return (ret);
+}
+
+static void *
+huge_ralloc(void *ptr, size_t size, size_t oldsize)
+{
+ void *ret;
+ size_t copysize;
+
+ /* Avoid moving the allocation if the size class would not change. */
+ if (oldsize > arena_maxclass &&
+ CHUNK_CEILING(size) == CHUNK_CEILING(oldsize)) {
+#ifdef MALLOC_FILL
+ if (opt_junk && size < oldsize) {
+ memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize
+ - size);
+ } else if (opt_zero && size > oldsize) {
+ memset((void *)((uintptr_t)ptr + oldsize), 0, size
+ - oldsize);
+ }
+#endif
+ return (ptr);
+ }
+
+ /*
+ * If we get here, then size and oldsize are different enough that we
+ * need to use a different size class. In that case, fall back to
+ * allocating new space and copying.
+ */
+ ret = huge_malloc(size, false);
+ if (ret == NULL)
+ return (NULL);
+
+ copysize = (size < oldsize) ? size : oldsize;
+#ifdef VM_COPY_MIN
+ if (copysize >= VM_COPY_MIN)
+ pages_copy(ret, ptr, copysize);
+ else
+#endif
+ memcpy(ret, ptr, copysize);
+ idalloc(ptr);
+ return (ret);
+}
+
+static void
+huge_dalloc(void *ptr)
+{
+ extent_node_t *node, key;
+
+ malloc_mutex_lock(&huge_mtx);
+
+ /* Extract from tree of huge allocations. */
+ key.addr = ptr;
+ node = RB_FIND(extent_tree_ad_s, &huge, &key);
+ assert(node != NULL);
+ assert(node->addr == ptr);
+ RB_REMOVE(extent_tree_ad_s, &huge, node);
+
+#ifdef MALLOC_STATS
+ huge_ndalloc++;
+ huge_allocated -= node->size;
+#endif
+
+ malloc_mutex_unlock(&huge_mtx);
+
+ /* Unmap chunk. */
+#ifdef MALLOC_DSS
+#ifdef MALLOC_FILL
+ if (opt_dss && opt_junk)
+ memset(node->addr, 0x5a, node->size);
+#endif
+#endif
+ chunk_dealloc(node->addr, node->size);
+
+ base_node_dealloc(node);
+}
+
+#ifdef BSD
+static inline unsigned
+malloc_ncpus(void)
+{
+ unsigned ret;
+ int mib[2];
+ size_t len;
+
+ mib[0] = CTL_HW;
+ mib[1] = HW_NCPU;
+ len = sizeof(ret);
+ if (sysctl(mib, 2, &ret, &len, (void *) 0, 0) == -1) {
+ /* Error. */
+ return (1);
+ }
+
+ return (ret);
+}
+#elif (defined(LINUX))
+#include <fcntl.h>
+
+static inline unsigned
+malloc_ncpus(void)
+{
+ unsigned ret;
+ int fd, nread, column;
+ char buf[1];
+ static const char matchstr[] = "processor\t:";
+
+ /*
+ * sysconf(3) would be the preferred method for determining the number
+ * of CPUs, but it uses malloc internally, which causes untennable
+ * recursion during malloc initialization.
+ */
+ fd = open("/proc/cpuinfo", O_RDONLY);
+ if (fd == -1)
+ return (1); /* Error. */
+ /*
+ * Count the number of occurrences of matchstr at the beginnings of
+ * lines. This treats hyperthreaded CPUs as multiple processors.
+ */
+ column = 0;
+ ret = 0;
+ while (true) {
+ nread = read(fd, &buf, sizeof(buf));
+ if (nread <= 0)
+ break; /* EOF or error. */
+
+ if (buf[0] == '\n')
+ column = 0;
+ else if (column != -1) {
+ if (buf[0] == matchstr[column]) {
+ column++;
+ if (column == sizeof(matchstr) - 1) {
+ column = -1;
+ ret++;
+ }
+ } else
+ column = -1;
+ }
+ }
+ if (ret == 0)
+ ret = 1; /* Something went wrong in the parser. */
+ close(fd);
+
+ return (ret);
+}
+#elif (defined(DARWIN))
+#include <mach/mach_init.h>
+#include <mach/mach_host.h>
+
+static inline unsigned
+malloc_ncpus(void)
+{
+ kern_return_t error;
+ natural_t n;
+ processor_info_array_t pinfo;
+ mach_msg_type_number_t pinfocnt;
+
+ error = host_processor_info(mach_host_self(), PROCESSOR_BASIC_INFO,
+ &n, &pinfo, &pinfocnt);
+ if (error != KERN_SUCCESS)
+ return (1); /* Error. */
+ else
+ return (n);
+}
+#elif (defined(HAVE_KSTAT))
+#include <kstat.h>
+
+static inline unsigned
+malloc_ncpus(void)
+{
+ unsigned ret;
+ kstat_ctl_t *ctl;
+ kstat_t *kstat;
+ kstat_named_t *named;
+ unsigned i;
+
+ if ((ctl = kstat_open()) == NULL)
+ return (1); /* Error. */
+
+ if ((kstat = kstat_lookup(ctl, "unix", -1, "system_misc")) == NULL)
+ return (1); /* Error. */
+
+ if (kstat_read(ctl, kstat, NULL) == -1)
+ return (1); /* Error. */
+
+ named = KSTAT_NAMED_PTR(kstat);
+
+ for (i = 0; i < kstat->ks_ndata; i++) {
+ if (strcmp(named[i].name, "ncpus") == 0) {
+ /* Figure out which one of these to actually use. */
+ switch(named[i].data_type) {
+ case KSTAT_DATA_INT32:
+ ret = named[i].value.i32;
+ break;
+ case KSTAT_DATA_UINT32:
+ ret = named[i].value.ui32;
+ break;
+ case KSTAT_DATA_INT64:
+ ret = named[i].value.i64;
+ break;
+ case KSTAT_DATA_UINT64:
+ ret = named[i].value.ui64;
+ break;
+ default:
+ return (1); /* Error. */
+ }
+ }
+ }
+
+ kstat_close(ctl); /* Don't bother checking for an error. */
+
+ return (ret);
+}
+#else
+static inline unsigned
+malloc_ncpus(void)
+{
+
+ /*
+ * We lack a way to determine the number of CPUs on this platform, so
+ * assume 1 CPU.
+ */
+ return (1);
+}
+#endif
+
+static void
+malloc_print_stats(void)
+{
+
+ if (opt_print_stats) {
+ char s[UMAX2S_BUFSIZE];
+ _malloc_message("___ Begin malloc statistics ___\n", "", "",
+ "");
+ _malloc_message("Assertions ",
+#ifdef NDEBUG
+ "disabled",
+#else
+ "enabled",
+#endif
+ "\n", "");
+ _malloc_message("Boolean MALLOC_OPTIONS: ",
+ opt_abort ? "A" : "a", "", "");
+#ifdef MALLOC_DSS
+ _malloc_message(opt_dss ? "D" : "d", "", "", "");
+#endif
+#ifdef MALLOC_FILL
+ _malloc_message(opt_junk ? "J" : "j", "", "", "");
+#endif
+#ifdef MALLOC_DSS
+ _malloc_message(opt_mmap ? "M" : "m", "", "", "");
+#endif
+ _malloc_message("P", "", "", "");
+#ifdef MALLOC_UTRACE
+ _malloc_message(opt_utrace ? "U" : "u", "", "", "");
+#endif
+#ifdef MALLOC_SYSV
+ _malloc_message(opt_sysv ? "V" : "v", "", "", "");
+#endif
+#ifdef MALLOC_XMALLOC
+ _malloc_message(opt_xmalloc ? "X" : "x", "", "", "");
+#endif
+#ifdef MALLOC_FILL
+ _malloc_message(opt_zero ? "Z" : "z", "", "", "");
+#endif
+ _malloc_message("\n", "", "", "");
+
+ _malloc_message("CPUs: ", umax2s(ncpus, s), "\n", "");
+ _malloc_message("Max arenas: ", umax2s(narenas, s), "\n", "");
+#ifdef MALLOC_LAZY_FREE
+ if (opt_lazy_free_2pow >= 0) {
+ _malloc_message("Lazy free slots: ",
+ umax2s(1U << opt_lazy_free_2pow, s), "\n", "");
+ } else
+ _malloc_message("Lazy free slots: 0\n", "", "", "");
+#endif
+#ifdef MALLOC_BALANCE
+ _malloc_message("Arena balance threshold: ",
+ umax2s(opt_balance_threshold, s), "\n", "");
+#endif
+ _malloc_message("Pointer size: ", umax2s(sizeof(void *), s),
+ "\n", "");
+ _malloc_message("Quantum size: ", umax2s(quantum, s), "\n", "");
+ _malloc_message("Max small size: ", umax2s(small_max, s), "\n",
+ "");
+ _malloc_message("Max dirty pages per arena: ",
+ umax2s(opt_dirty_max, s), "\n", "");
+
+ _malloc_message("Chunk size: ", umax2s(chunksize, s), "", "");
+ _malloc_message(" (2^", umax2s(opt_chunk_2pow, s), ")\n", "");
+
+#ifdef MALLOC_STATS
+ {
+ size_t allocated, mapped;
+#ifdef MALLOC_BALANCE
+ uint64_t nbalance = 0;
+#endif
+ unsigned i;
+ arena_t *arena;
+
+ /* Calculate and print allocated/mapped stats. */
+
+ /* arenas. */
+ for (i = 0, allocated = 0; i < narenas; i++) {
+ if (arenas[i] != NULL) {
+ malloc_spin_lock(&arenas[i]->lock);
+ allocated +=
+ arenas[i]->stats.allocated_small;
+ allocated +=
+ arenas[i]->stats.allocated_large;
+#ifdef MALLOC_BALANCE
+ nbalance += arenas[i]->stats.nbalance;
+#endif
+ malloc_spin_unlock(&arenas[i]->lock);
+ }
+ }
+
+ /* huge/base. */
+ malloc_mutex_lock(&huge_mtx);
+ allocated += huge_allocated;
+ mapped = stats_chunks.curchunks * chunksize;
+ malloc_mutex_unlock(&huge_mtx);
+
+ malloc_mutex_lock(&base_mtx);
+ mapped += base_mapped;
+ malloc_mutex_unlock(&base_mtx);
+
+#ifdef WIN32
+ malloc_printf("Allocated: %lu, mapped: %lu\n",
+ allocated, mapped);
+#else
+ malloc_printf("Allocated: %zu, mapped: %zu\n",
+ allocated, mapped);
+#endif
+
+#ifdef MALLOC_BALANCE
+ malloc_printf("Arena balance reassignments: %llu\n",
+ nbalance);
+#endif
+
+ /* Print chunk stats. */
+ {
+ chunk_stats_t chunks_stats;
+
+ malloc_mutex_lock(&huge_mtx);
+ chunks_stats = stats_chunks;
+ malloc_mutex_unlock(&huge_mtx);
+
+ malloc_printf("chunks: nchunks "
+ "highchunks curchunks\n");
+ malloc_printf(" %13llu%13lu%13lu\n",
+ chunks_stats.nchunks,
+ chunks_stats.highchunks,
+ chunks_stats.curchunks);
+ }
+
+ /* Print chunk stats. */
+ malloc_printf(
+ "huge: nmalloc ndalloc allocated\n");
+#ifdef WIN32
+ malloc_printf(" %12llu %12llu %12lu\n",
+ huge_nmalloc, huge_ndalloc, huge_allocated);
+#else
+ malloc_printf(" %12llu %12llu %12zu\n",
+ huge_nmalloc, huge_ndalloc, huge_allocated);
+#endif
+ /* Print stats for each arena. */
+ for (i = 0; i < narenas; i++) {
+ arena = arenas[i];
+ if (arena != NULL) {
+ malloc_printf(
+ "\narenas[%u]:\n", i);
+ malloc_spin_lock(&arena->lock);
+ stats_print(arena);
+ malloc_spin_unlock(&arena->lock);
+ }
+ }
+ }
+#endif /* #ifdef MALLOC_STATS */
+ _malloc_message("--- End malloc statistics ---\n", "", "", "");
+ }
+}
+
+/*
+ * FreeBSD's pthreads implementation calls malloc(3), so the malloc
+ * implementation has to take pains to avoid infinite recursion during
+ * initialization.
+ */
+#if (defined(WIN32) || defined(DARWIN))
+#define malloc_init() false
+#else
+static inline bool
+malloc_init(void)
+{
+ if (malloc_initialized == false)
+ return (malloc_init_hard());
+
+ return (false);
+}
+#endif
+
+#ifndef WIN32
+static
+#endif
+bool
+malloc_init_hard(void)
+{
+ unsigned i;
+ char buf[PATH_MAX + 1];
+ const char *opts;
+ long result;
+#ifndef WIN32
+ int linklen;
+#endif
+
+#ifndef WIN32
+ malloc_mutex_lock(&init_lock);
+#endif
+
+ if (malloc_initialized) {
+ /*
+ * Another thread initialized the allocator before this one
+ * acquired init_lock.
+ */
+#ifndef WIN32
+ malloc_mutex_unlock(&init_lock);
+#endif
+ return (false);
+ }
+
+#ifdef WIN32
+ /* get a thread local storage index */
+ tlsIndex = TlsAlloc();
+#endif
+
+ /* Get page size and number of CPUs */
+#ifdef WIN32
+ {
+ SYSTEM_INFO info;
+
+ GetSystemInfo(&info);
+ result = info.dwPageSize;
+
+ pagesize = (unsigned) result;
+
+ ncpus = info.dwNumberOfProcessors;
+ }
+#else
+ ncpus = malloc_ncpus();
+
+ result = sysconf(_SC_PAGESIZE);
+ assert(result != -1);
+
+ pagesize = (unsigned) result;
+#endif
+
+ /*
+ * We assume that pagesize is a power of 2 when calculating
+ * pagesize_mask and pagesize_2pow.
+ */
+ assert(((result - 1) & result) == 0);
+ pagesize_mask = result - 1;
+ pagesize_2pow = ffs((int)result) - 1;
+
+#ifdef MALLOC_LAZY_FREE
+ if (ncpus == 1)
+ opt_lazy_free_2pow = -1;
+#endif
+
+ for (i = 0; i < 3; i++) {
+ unsigned j;
+
+ /* Get runtime configuration. */
+ switch (i) {
+ case 0:
+#ifndef WIN32
+ if ((linklen = readlink("/etc/malloc.conf", buf,
+ sizeof(buf) - 1)) != -1) {
+ /*
+ * Use the contents of the "/etc/malloc.conf"
+ * symbolic link's name.
+ */
+ buf[linklen] = '\0';
+ opts = buf;
+ } else
+#endif
+ {
+ /* No configuration specified. */
+ buf[0] = '\0';
+ opts = buf;
+ }
+ break;
+ case 1:
+/* if (issetugid() == 0 && (opts = */
+/* getenv("MALLOC_OPTIONS")) != NULL) { */
+/* /\* */
+/* * Do nothing; opts is already initialized to */
+/* * the value of the MALLOC_OPTIONS environment
*/
+/* * variable. */
+/* *\/ */
+/* } else { */
+ /* No configuration specified. */
+ buf[0] = '\0';
+ opts = buf;
+/* } */
+ break;
+ case 2:
+ if (_malloc_options != NULL) {
+ /*
+ * Use options that were compiled into the
+ * program.
+ */
+ opts = _malloc_options;
+ } else {
+ /* No configuration specified. */
+ buf[0] = '\0';
+ opts = buf;
+ }
+ break;
+ default:
+ /* NOTREACHED */
+ buf[0] = '\0';
+ opts = buf;
+ assert(false);
+ }
+
+ for (j = 0; opts[j] != '\0'; j++) {
+ unsigned k, nreps;
+ bool nseen;
+
+ /* Parse repetition count, if any. */
+ for (nreps = 0, nseen = false;; j++, nseen = true) {
+ switch (opts[j]) {
+ case '0': case '1': case '2': case '3':
+ case '4': case '5': case '6': case '7':
+ case '8': case '9':
+ nreps *= 10;
+ nreps += opts[j] - '0';
+ break;
+ default:
+ goto MALLOC_OUT;
+ }
+ }
+MALLOC_OUT:
+ if (nseen == false)
+ nreps = 1;
+
+ for (k = 0; k < nreps; k++) {
+ switch (opts[j]) {
+ case 'a':
+ opt_abort = false;
+ break;
+ case 'A':
+ opt_abort = true;
+ break;
+ case 'b':
+#ifdef MALLOC_BALANCE
+ opt_balance_threshold >>= 1;
+#endif
+ break;
+ case 'B':
+#ifdef MALLOC_BALANCE
+ if (opt_balance_threshold == 0)
+ opt_balance_threshold = 1;
+ else if ((opt_balance_threshold << 1)
+ > opt_balance_threshold)
+ opt_balance_threshold <<= 1;
+#endif
+ break;
+ case 'd':
+#ifdef MALLOC_DSS
+ opt_dss = false;
+#endif
+ break;
+ case 'D':
+#ifdef MALLOC_DSS
+ opt_dss = true;
+#endif
+ break;
+ case 'f':
+ opt_dirty_max >>= 1;
+ break;
+ case 'F':
+ if (opt_dirty_max == 0)
+ opt_dirty_max = 1;
+ else if ((opt_dirty_max << 1) != 0)
+ opt_dirty_max <<= 1;
+ break;
+#ifdef MALLOC_FILL
+ case 'j':
+ opt_junk = false;
+ break;
+ case 'J':
+ opt_junk = true;
+ break;
+#endif
+ case 'k':
+ /*
+ * Chunks always require at least one
+ * header page, so chunks can never be
+ * smaller than two pages.
+ */
+ if (opt_chunk_2pow > pagesize_2pow + 1)
+ opt_chunk_2pow--;
+ break;
+ case 'K':
+ if (opt_chunk_2pow + 1 <
+ (sizeof(size_t) << 3))
+ opt_chunk_2pow++;
+ break;
+ case 'l':
+#ifdef MALLOC_LAZY_FREE
+ if (opt_lazy_free_2pow >= 0)
+ opt_lazy_free_2pow--;
+#endif
+ break;
+ case 'L':
+#ifdef MALLOC_LAZY_FREE
+ if (ncpus > 1)
+ opt_lazy_free_2pow++;
+#endif
+ break;
+ case 'm':
+#ifdef MALLOC_DSS
+ opt_mmap = false;
+#endif
+ break;
+ case 'M':
+#ifdef MALLOC_DSS
+ opt_mmap = true;
+#endif
+ break;
+ case 'n':
+ opt_narenas_lshift--;
+ break;
+ case 'N':
+ opt_narenas_lshift++;
+ break;
+ case 'p':
+ opt_print_stats = false;
+ break;
+ case 'P':
+ opt_print_stats = true;
+ break;
+ case 'q':
+ if (opt_quantum_2pow > QUANTUM_2POW_MIN)
+ opt_quantum_2pow--;
+ break;
+ case 'Q':
+ if (opt_quantum_2pow < pagesize_2pow -
+ 1)
+ opt_quantum_2pow++;
+ break;
+ case 's':
+ if (opt_small_max_2pow >
+ QUANTUM_2POW_MIN)
+ opt_small_max_2pow--;
+ break;
+ case 'S':
+ if (opt_small_max_2pow < pagesize_2pow
+ - 1)
+ opt_small_max_2pow++;
+ break;
+#ifdef MALLOC_UTRACE
+ case 'u':
+ opt_utrace = false;
+ break;
+ case 'U':
+ opt_utrace = true;
+ break;
+#endif
+#ifdef MALLOC_SYSV
+ case 'v':
+ opt_sysv = false;
+ break;
+ case 'V':
+ opt_sysv = true;
+ break;
+#endif
+#ifdef MALLOC_XMALLOC
+ case 'x':
+ opt_xmalloc = false;
+ break;
+ case 'X':
+ opt_xmalloc = true;
+ break;
+#endif
+#ifdef MALLOC_FILL
+ case 'z':
+ opt_zero = false;
+ break;
+ case 'Z':
+ opt_zero = true;
+ break;
+#endif
+ default: {
+ char cbuf[2];
+
+ cbuf[0] = opts[j];
+ cbuf[1] = '\0';
+ _malloc_message(_getprogname(),
+ ": (malloc) Unsupported character "
+ "in malloc options: '", cbuf,
+ "'\n");
+ }
+ }
+ }
+ }
+ }
+
+#ifdef MALLOC_DSS
+ /* Make sure that there is some method for acquiring memory. */
+ if (opt_dss == false && opt_mmap == false)
+ opt_mmap = true;
+#endif
+
+ /* Take care to call atexit() only once. */
+ if (opt_print_stats) {
+#ifndef WIN32
+ /* Print statistics at exit. */
+ atexit(malloc_print_stats);
+#endif
+ }
+
+ /* Set variables according to the value of opt_small_max_2pow. */
+ if (opt_small_max_2pow < opt_quantum_2pow)
+ opt_small_max_2pow = opt_quantum_2pow;
+ small_max = (1U << opt_small_max_2pow);
+
+ /* Set bin-related variables. */
+ bin_maxclass = (pagesize >> 1);
+ assert(opt_quantum_2pow >= TINY_MIN_2POW);
+ ntbins = opt_quantum_2pow - TINY_MIN_2POW;
+ assert(ntbins <= opt_quantum_2pow);
+ nqbins = (small_max >> opt_quantum_2pow);
+ nsbins = pagesize_2pow - opt_small_max_2pow - 1;
+
+ /* Set variables according to the value of opt_quantum_2pow. */
+ quantum = (1U << opt_quantum_2pow);
+ quantum_mask = quantum - 1;
+ if (ntbins > 0)
+ small_min = (quantum >> 1) + 1;
+ else
+ small_min = 1;
+ assert(small_min <= quantum);
+
+ /* Set variables according to the value of opt_chunk_2pow. */
+ chunksize = (1LU << opt_chunk_2pow);
+ chunksize_mask = chunksize - 1;
+ chunk_npages = (chunksize >> pagesize_2pow);
+ {
+ size_t header_size;
+
+ /*
+ * Compute the header size such that it is large
+ * enough to contain the page map and enough nodes for the
+ * worst case: one node per non-header page plus one extra for
+ * situations where we briefly have one more node allocated
+ * than we will need.
+ */
+ header_size = sizeof(arena_chunk_t) +
+ (sizeof(arena_chunk_map_t) * (chunk_npages - 1)) +
+ (sizeof(extent_node_t) * chunk_npages);
+ arena_chunk_header_npages = (header_size >> pagesize_2pow) +
+ ((header_size & pagesize_mask) != 0);
+ }
+ arena_maxclass = chunksize - (arena_chunk_header_npages <<
+ pagesize_2pow);
+#ifdef MALLOC_LAZY_FREE
+ /*
+ * Make sure that allocating the free_cache does not exceed the limits
+ * of what base_alloc() can handle.
+ */
+ while ((sizeof(void *) << opt_lazy_free_2pow) > chunksize)
+ opt_lazy_free_2pow--;
+#endif
+
+ UTRACE(0, 0, 0);
+
+#ifdef MALLOC_STATS
+ memset(&stats_chunks, 0, sizeof(chunk_stats_t));
+#endif
+
+ /* Various sanity checks that regard configuration. */
+ assert(quantum >= sizeof(void *));
+ assert(quantum <= pagesize);
+ assert(chunksize >= pagesize);
+ assert(quantum * 4 <= chunksize);
+
+ /* Initialize chunks data. */
+ malloc_mutex_init(&huge_mtx);
+ RB_INIT(&huge);
+#ifdef MALLOC_DSS
+ malloc_mutex_init(&dss_mtx);
+ dss_base = sbrk(0);
+ dss_prev = dss_base;
+ dss_max = dss_base;
+ RB_INIT(&dss_chunks_szad);
+ RB_INIT(&dss_chunks_ad);
+#endif
+#ifdef MALLOC_STATS
+ huge_nmalloc = 0;
+ huge_ndalloc = 0;
+ huge_allocated = 0;
+#endif
+
+ /* Initialize base allocation data structures. */
+#ifdef MALLOC_STATS
+ base_mapped = 0;
+#endif
+#ifdef MALLOC_DSS
+ /*
+ * Allocate a base chunk here, since it doesn't actually have to be
+ * chunk-aligned. Doing this before allocating any other chunks allows
+ * the use of space that would otherwise be wasted.
+ */
+ if (opt_dss)
+ base_pages_alloc(0);
+#endif
+ base_nodes = NULL;
+ malloc_mutex_init(&base_mtx);
+
+ if (ncpus > 1) {
+ /*
+ * For SMP systems, create four times as many arenas as there
+ * are CPUs by default.
+ */
+ opt_narenas_lshift += 2;
+ }
+
+ /* Determine how many arenas to use. */
+ narenas = ncpus;
+ if (opt_narenas_lshift > 0) {
+ if ((narenas << opt_narenas_lshift) > narenas)
+ narenas <<= opt_narenas_lshift;
+ /*
+ * Make sure not to exceed the limits of what base_alloc() can
+ * handle.
+ */
+ if (narenas * sizeof(arena_t *) > chunksize)
+ narenas = chunksize / sizeof(arena_t *);
+ } else if (opt_narenas_lshift < 0) {
+ if ((narenas >> -opt_narenas_lshift) < narenas)
+ narenas >>= -opt_narenas_lshift;
+ /* Make sure there is at least one arena. */
+ if (narenas == 0)
+ narenas = 1;
+ }
+#ifdef MALLOC_BALANCE
+ assert(narenas != 0);
+ for (narenas_2pow = 0;
+ (narenas >> (narenas_2pow + 1)) != 0;
+ narenas_2pow++);
+#endif
+
+#ifdef NO_TLS
+ if (narenas > 1) {
+ static const unsigned primes[] = {1, 3, 5, 7, 11, 13, 17, 19,
+ 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83,
+ 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149,
+ 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211,
+ 223, 227, 229, 233, 239, 241, 251, 257, 263};
+ unsigned nprimes, parenas;
+
+ /*
+ * Pick a prime number of hash arenas that is more than narenas
+ * so that direct hashing of pthread_self() pointers tends to
+ * spread allocations evenly among the arenas.
+ */
+ assert((narenas & 1) == 0); /* narenas must be even. */
+ nprimes = (sizeof(primes) >> SIZEOF_INT_2POW);
+ parenas = primes[nprimes - 1]; /* In case not enough primes. */
+ for (i = 1; i < nprimes; i++) {
+ if (primes[i] > narenas) {
+ parenas = primes[i];
+ break;
+ }
+ }
+ narenas = parenas;
+ }
+#endif
+
+#ifndef NO_TLS
+# ifndef MALLOC_BALANCE
+ next_arena = 0;
+# endif
+#endif
+
+ /* Allocate and initialize arenas. */
+ arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
+ if (arenas == NULL) {
+#ifndef WIN32
+ malloc_mutex_unlock(&init_lock);
+#endif
+ return (true);
+ }
+ /*
+ * Zero the array. In practice, this should always be pre-zeroed,
+ * since it was just mmap()ed, but let's be sure.
+ */
+ memset(arenas, 0, sizeof(arena_t *) * narenas);
+
+ /*
+ * Initialize one arena here. The rest are lazily created in
+ * choose_arena_hard().
+ */
+ arenas_extend(0);
+ if (arenas[0] == NULL) {
+#ifndef WIN32
+ malloc_mutex_unlock(&init_lock);
+#endif
+ return (true);
+ }
+#ifndef NO_TLS
+ /*
+ * Assign the initial arena to the initial thread, in order to avoid
+ * spurious creation of an extra arena if the application switches to
+ * threaded mode.
+ */
+#ifdef WIN32
+ TlsSetValue(tlsIndex, arenas[0]);
+#else
+ arenas_map = arenas[0];
+#endif
+#endif
+
+ /*
+ * Seed here for the initial thread, since choose_arena_hard() is only
+ * called for other threads. The seed values don't really matter.
+ */
+#ifdef MALLOC_LAZY_FREE
+ SPRN(lazy_free, 42);
+#endif
+#ifdef MALLOC_BALANCE
+ SPRN(balance, 42);
+#endif
+
+ malloc_spin_init(&arenas_lock);
+
+ malloc_initialized = true;
+#ifndef WIN32
+ malloc_mutex_unlock(&init_lock);
+#endif
+ return (false);
+}
+
+/* XXX Why not just expose malloc_print_stats()? */
+#ifdef WIN32
+void
+malloc_shutdown()
+{
+
+ malloc_print_stats();
+}
+#endif
+
+/*
+ * End general internal functions.
+ */
+/******************************************************************************/
+/*
+ * Begin malloc(3)-compatible functions.
+ */
+
+DSOEXPORT
+#ifdef DARWIN
+inline void *
+moz_malloc(size_t size)
+#else
+void *
+malloc(size_t size)
+#endif
+{
+ void *ret;
+
+ if (malloc_init()) {
+ ret = NULL;
+ goto RETURN;
+ }
+
+ if (size == 0) {
+#ifdef MALLOC_SYSV
+ if (opt_sysv == false)
+#endif
+ size = 1;
+#ifdef MALLOC_SYSV
+ else {
+ ret = NULL;
+ goto RETURN;
+ }
+#endif
+ }
+
+ ret = imalloc(size);
+
+RETURN:
+ if (ret == NULL) {
+#ifdef MALLOC_XMALLOC
+ if (opt_xmalloc) {
+ _malloc_message(_getprogname(),
+ ": (malloc) Error in malloc(): out of memory\n", "",
+ "");
+ abort();
+ }
+#endif
+ errno = ENOMEM;
+ }
+
+ UTRACE(0, size, ret);
+ return (ret);
+}
+
+DSOEXPORT
+#ifdef DARWIN
+inline int
+moz_posix_memalign(void **memptr, size_t alignment, size_t size)
+#else
+int
+posix_memalign(void **memptr, size_t alignment, size_t size)
+#endif
+{
+ int ret;
+ void *result;
+
+ if (malloc_init())
+ result = NULL;
+ else {
+ /* Make sure that alignment is a large enough power of 2. */
+ if (((alignment - 1) & alignment) != 0
+ || alignment < sizeof(void *)) {
+#ifdef MALLOC_XMALLOC
+ if (opt_xmalloc) {
+ _malloc_message(_getprogname(),
+ ": (malloc) Error in posix_memalign(): "
+ "invalid alignment\n", "", "");
+ abort();
+ }
+#endif
+ result = NULL;
+ ret = EINVAL;
+ goto RETURN;
+ }
+
+ result = ipalloc(alignment, size);
+ }
+
+ if (result == NULL) {
+#ifdef MALLOC_XMALLOC
+ if (opt_xmalloc) {
+ _malloc_message(_getprogname(),
+ ": (malloc) Error in posix_memalign(): out of memory\n",
+ "", "");
+ abort();
+ }
+#endif
+ ret = ENOMEM;
+ goto RETURN;
+ }
+
+ *memptr = result;
+ ret = 0;
+
+RETURN:
+ UTRACE(0, size, result);
+ return (ret);
+}
+
+DSOEXPORT
+#ifdef DARWIN
+inline void *
+moz_memalign(size_t alignment, size_t size)
+#else
+void *
+memalign(size_t alignment, size_t size)
+#endif
+{
+ void *ret;
+
+#ifdef DARWIN
+ if (moz_posix_memalign(&ret, alignment, size) != 0)
+#else
+ if (posix_memalign(&ret, alignment, size) != 0)
+#endif
+ return (NULL);
+
+ return ret;
+}
+
+DSOEXPORT
+#ifdef DARWIN
+inline void *
+moz_valloc(size_t size)
+#else
+void *
+valloc(size_t size)
+#endif
+{
+#ifdef DARWIN
+ return (moz_memalign(pagesize, size));
+#else
+ return (memalign(pagesize, size));
+#endif
+}
+
+DSOEXPORT
+#ifdef DARWIN
+inline void *
+moz_calloc(size_t num, size_t size)
+#else
+void *
+calloc(size_t num, size_t size)
+#endif
+{
+ void *ret;
+ size_t num_size;
+
+ if (malloc_init()) {
+ num_size = 0;
+ ret = NULL;
+ goto RETURN;
+ }
+
+ num_size = num * size;
+ if (num_size == 0) {
+#ifdef MALLOC_SYSV
+ if ((opt_sysv == false) && ((num == 0) || (size == 0)))
+#endif
+ num_size = 1;
+#ifdef MALLOC_SYSV
+ else {
+ ret = NULL;
+ goto RETURN;
+ }
+#endif
+ /*
+ * Try to avoid division here. We know that it isn't possible to
+ * overflow during multiplication if neither operand uses any of the
+ * most significant half of the bits in a size_t.
+ */
+ } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
+ && (num_size / size != num)) {
+ /* size_t overflow. */
+ ret = NULL;
+ goto RETURN;
+ }
+
+ ret = icalloc(num_size);
+
+RETURN:
+ if (ret == NULL) {
+#ifdef MALLOC_XMALLOC
+ if (opt_xmalloc) {
+ _malloc_message(_getprogname(),
+ ": (malloc) Error in calloc(): out of memory\n", "",
+ "");
+ abort();
+ }
+#endif
+ errno = ENOMEM;
+ }
+
+ UTRACE(0, num_size, ret);
+ return (ret);
+}
+
+DSOEXPORT
+#ifdef DARWIN
+inline void *
+moz_realloc(void *ptr, size_t size)
+#else
+void *
+realloc(void *ptr, size_t size)
+#endif
+{
+ void *ret;
+
+ if (size == 0) {
+#ifdef MALLOC_SYSV
+ if (opt_sysv == false)
+#endif
+ size = 1;
+#ifdef MALLOC_SYSV
+ else {
+ if (ptr != NULL)
+ idalloc(ptr);
+ ret = NULL;
+ goto RETURN;
+ }
+#endif
+ }
+
+ if (ptr != NULL) {
+ assert(malloc_initialized);
+
+ ret = iralloc(ptr, size);
+
+ if (ret == NULL) {
+#ifdef MALLOC_XMALLOC
+ if (opt_xmalloc) {
+ _malloc_message(_getprogname(),
+ ": (malloc) Error in realloc(): out of "
+ "memory\n", "", "");
+ abort();
+ }
+#endif
+ errno = ENOMEM;
+ }
+ } else {
+ if (malloc_init())
+ ret = NULL;
+ else
+ ret = imalloc(size);
+
+ if (ret == NULL) {
+#ifdef MALLOC_XMALLOC
+ if (opt_xmalloc) {
+ _malloc_message(_getprogname(),
+ ": (malloc) Error in realloc(): out of "
+ "memory\n", "", "");
+ abort();
+ }
+#endif
+ errno = ENOMEM;
+ }
+ }
+
+#ifdef MALLOC_SYSV
+RETURN:
+#endif
+ UTRACE(ptr, size, ret);
+ return (ret);
+}
+
+DSOEXPORT
+#ifdef DARWIN
+inline void
+moz_free(void *ptr)
+#else
+void
+free(void *ptr)
+#endif
+{
+
+ UTRACE(ptr, 0, 0);
+ if (ptr != NULL) {
+ assert(malloc_initialized);
+
+ idalloc(ptr);
+ }
+}
+
+/* /\* */
+/* * This is a work in progress, which doesn't even get used. */
+/* *\/ */
+/* #ifdef USE_STATS_MEMORY */
+/* #ifndef DARWIN */
+/* DSOEXPORT */
+/* struct mallinfo */
+/* mallinfo() */
+/* { */
+/* struct mallinfo mi; */
+/* size_t allocated = 0; */
+/* size_t mapped = 0; */
+/* arena_t *arena; */
+/* size_t i; */
+
+/* /\* Calculate and print allocated/mapped stats. *\/ */
+
+/* /\* arenas. *\/ */
+/* for (i = 0; i < narenas; i++) { */
+/* if (arenas[i] != NULL) { */
+/* malloc_spin_lock(&arenas[i]->lock); */
+/* mi.uordblks += */
+/* arenas[i]->stats.allocated_small; */
+/* mi.uordblks += */
+/* arenas[i]->stats.allocated_large; */
+/* malloc_spin_unlock(&arenas[i]->lock); */
+/* } */
+/* } */
+
+/* #if 0 */
+/* avail = chunksize(top(ar_ptr)); */
+/* navail = ((long)(avail) >= (long)MINSIZE)? 1 : 0; */
+
+/* // FIXME: add mutex */
+/* mi->arena = ar_ptr->size; */
+/* mi->ordblks = navail; */
+/* mi->smblks = mi->usmblks = mi->fsmblks = 0; /\* clear unused fields *\/
*/
+/* mi->uordblks = ar_ptr->size - avail; */
+/* mi->fordblks = avail; */
+/* mi->hblks = n_mmaps; */
+/* mi->hblkhd = mmapped_mem; */
+/* mi->keepcost = chunksize(top(ar_ptr)); */
+/* #endif */
+/* return mi; */
+/* } */
+/* # endif */
+/* #endif */
+
+/*
+ * End malloc(3)-compatible functions.
+ */
+/******************************************************************************/
+/*
+ * Begin non-standard functions.
+ */
+
+DSOEXPORT
+#ifdef DARWIN
+inline size_t
+moz_malloc_usable_size(const void *ptr)
+#else
+size_t
+malloc_usable_size(const void *ptr)
+#endif
+{
+
+ assert(ptr != NULL);
+
+ return (isalloc(ptr));
+}
+
+#ifdef WIN32
+void*
+_recalloc(void *ptr, size_t count, size_t size)
+{
+ size_t oldsize = (ptr != NULL) ? isalloc(ptr) : 0;
+ size_t newsize = count * size;
+
+ /*
+ * In order for all trailing bytes to be zeroed, the caller needs to
+ * use calloc(), followed by recalloc(). However, the current calloc()
+ * implementation only zeros the bytes requested, so if recalloc() is
+ * to work 100% correctly, calloc() will need to change to zero
+ * trailing bytes.
+ */
+
+ ptr = realloc(ptr, newsize);
+ if (ptr != NULL && oldsize < newsize) {
+ memset((void *)((uintptr_t)ptr + oldsize), 0, newsize -
+ oldsize);
+ }
+
+ return ptr;
+}
+
+/*
+ * This impl of _expand doesn't ever actually expand or shrink blocks: it
+ * simply replies that you may continue using a shrunk block.
+ */
+void*
+_expand(void *ptr, size_t newsize)
+{
+ if (isalloc(ptr) >= newsize)
+ return ptr;
+
+ return NULL;
+}
+
+size_t
+_msize(const void *ptr)
+{
+
+ return malloc_usable_size(ptr);
+}
+#endif
+
+
+/*
+ * End non-standard functions.
+ */
+/******************************************************************************/
+/*
+ * Begin library-private functions, used by threading libraries for protection
+ * of malloc during fork(). These functions are only called if the program is
+ * running in threaded mode, so there is no need to check whether the program
+ * is threaded here.
+ */
+
+void
+_malloc_prefork(void)
+{
+ unsigned i;
+
+ /* Acquire all mutexes in a safe order. */
+
+ malloc_spin_lock(&arenas_lock);
+ for (i = 0; i < narenas; i++) {
+ if (arenas[i] != NULL)
+ malloc_spin_lock(&arenas[i]->lock);
+ }
+ malloc_spin_unlock(&arenas_lock);
+
+ malloc_mutex_lock(&base_mtx);
+
+ malloc_mutex_lock(&huge_mtx);
+
+#ifdef MALLOC_DSS
+ malloc_mutex_lock(&dss_mtx);
+#endif
+}
+
+void
+_malloc_postfork(void)
+{
+ unsigned i;
+
+ /* Release all mutexes, now that fork() has completed. */
+
+#ifdef MALLOC_DSS
+ malloc_mutex_unlock(&dss_mtx);
+#endif
+
+ malloc_mutex_unlock(&huge_mtx);
+
+ malloc_mutex_unlock(&base_mtx);
+
+ malloc_spin_lock(&arenas_lock);
+ for (i = 0; i < narenas; i++) {
+ if (arenas[i] != NULL)
+ malloc_spin_unlock(&arenas[i]->lock);
+ }
+ malloc_spin_unlock(&arenas_lock);
+}
+
+/*
+ * End library-private functions.
+ */
+/******************************************************************************/
+
+
+#ifdef DARWIN
+static malloc_zone_t zone;
+static struct malloc_introspection_t zone_introspect;
+
+static size_t
+zone_size(malloc_zone_t *zone, void *ptr)
+{
+ size_t ret = 0;
+ arena_chunk_t *chunk;
+
+ /*
+ * There appear to be places within Darwin (such as setenv(3)) that
+ * cause calls to this function with pointers that *no* zone owns. If
+ * we knew that all pointers were owned by *some* zone, we could split
+ * our zone into two parts, and use one as the default allocator and
+ * the other as the default deallocator/reallocator. Since that will
+ * not work in practice, we must check all pointers to assure that they
+ * reside within a mapped chunk before determining size.
+ */
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ if (chunk != ptr) {
+ arena_t *arena;
+ unsigned i;
+ arena_t *arenas_snapshot[narenas];
+
+ /*
+ * Make a copy of the arenas vector while holding arenas_lock in
+ * order to assure that all elements are up to date in this
+ * processor's cache. Do this outside the following loop in
+ * order to reduce lock acquisitions.
+ */
+ malloc_spin_lock(&arenas_lock);
+ memcpy(&arenas_snapshot, arenas, sizeof(arena_t *) * narenas);
+ malloc_spin_unlock(&arenas_lock);
+
+ /* Region. */
+ for (i = 0; i < narenas; i++) {
+ arena = arenas_snapshot[i];
+
+ if (arena != NULL) {
+ bool own;
+
+ /* Make sure ptr is within a chunk. */
+ malloc_spin_lock(&arena->lock);
+ if (RB_FIND(arena_chunk_tree_s, &arena->chunks,
+ chunk) == chunk)
+ own = true;
+ else
+ own = false;
+ malloc_spin_unlock(&arena->lock);
+
+ if (own) {
+ ret = arena_salloc(ptr);
+ goto RETURN;
+ }
+ }
+ }
+ } else {
+ extent_node_t *node;
+ extent_node_t key;
+
+ /* Chunk. */
+ key.addr = (void *)chunk;
+ malloc_mutex_lock(&huge_mtx);
+ node = RB_FIND(extent_tree_ad_s, &huge, &key);
+ if (node != NULL)
+ ret = node->size;
+ else
+ ret = 0;
+ malloc_mutex_unlock(&huge_mtx);
+ }
+
+RETURN:
+ return (ret);
+}
+
+static void *
+zone_malloc(malloc_zone_t *zone, size_t size)
+{
+
+ return (moz_malloc(size));
+}
+
+static void *
+zone_calloc(malloc_zone_t *zone, size_t num, size_t size)
+{
+
+ return (moz_calloc(num, size));
+}
+
+static void *
+zone_valloc(malloc_zone_t *zone, size_t size)
+{
+ void *ret = NULL; /* Assignment avoids useless compiler warning. */
+
+ moz_posix_memalign(&ret, pagesize, size);
+
+ return (ret);
+}
+
+static void
+zone_free(malloc_zone_t *zone, void *ptr)
+{
+
+ moz_free(ptr);
+}
+
+static void *
+zone_realloc(malloc_zone_t *zone, void *ptr, size_t size)
+{
+
+ return (moz_realloc(ptr, size));
+}
+
+static void *
+zone_destroy(malloc_zone_t *zone)
+{
+
+ /* This function should never be called. */
+ assert(false);
+ return (NULL);
+}
+
+static size_t
+zone_good_size(malloc_zone_t *zone, size_t size)
+{
+ size_t ret;
+ void *p;
+
+ /*
+ * Actually create an object of the appropriate size, then find out
+ * how large it could have been without moving up to the next size
+ * class.
+ */
+ p = moz_malloc(size);
+ if (p != NULL) {
+ ret = isalloc(p);
+ moz_free(p);
+ } else
+ ret = size;
+
+ return (ret);
+}
+
+static void
+zone_force_lock(malloc_zone_t *zone)
+{
+
+ _malloc_prefork();
+}
+
+static void
+zone_force_unlock(malloc_zone_t *zone)
+{
+
+ _malloc_postfork();
+}
+
+static malloc_zone_t *
+create_zone(void)
+{
+
+ assert(malloc_initialized);
+
+ zone.size = (void *)zone_size;
+ zone.malloc = (void *)zone_malloc;
+ zone.calloc = (void *)zone_calloc;
+ zone.valloc = (void *)zone_valloc;
+ zone.free = (void *)zone_free;
+ zone.realloc = (void *)zone_realloc;
+ zone.destroy = (void *)zone_destroy;
+ zone.zone_name = "jemalloc_zone";
+ zone.batch_malloc = NULL;
+ zone.batch_free = NULL;
+ zone.introspect = &zone_introspect;
+
+ zone_introspect.enumerator = NULL;
+ zone_introspect.good_size = (void *)zone_good_size;
+ zone_introspect.check = NULL;
+ zone_introspect.print = NULL;
+ zone_introspect.log = NULL;
+ zone_introspect.force_lock = (void *)zone_force_lock;
+ zone_introspect.force_unlock = (void *)zone_force_unlock;
+ zone_introspect.statistics = NULL;
+
+ return (&zone);
+}
+
+__attribute__((constructor))
+void
+jemalloc_darwin_init(void)
+{
+ extern unsigned malloc_num_zones;
+ extern malloc_zone_t **malloc_zones;
+
+ if (malloc_init_hard())
+ abort();
+
+ /*
+ * The following code is *not* thread-safe, so it's critical that
+ * initialization be manually triggered.
+ */
+
+ /* Register the custom zones. */
+ malloc_zone_register(create_zone());
+ assert(malloc_zones[malloc_num_zones - 1] == &zone);
+
+ /*
+ * Shift malloc_zones around so that zone is first, which makes it the
+ * default zone.
+ */
+ assert(malloc_num_zones > 1);
+ memmove(&malloc_zones[1], &malloc_zones[0],
+ sizeof(malloc_zone_t *) * (malloc_num_zones - 1));
+ malloc_zones[0] = &zone;
+}
+#endif
Index: libbase/jemtree.h
===================================================================
RCS file: libbase/jemtree.h
diff -N libbase/jemtree.h
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ libbase/jemtree.h 29 Apr 2008 16:50:50 -0000 1.1
@@ -0,0 +1,743 @@
+/* $NetBSD: tree.h,v 1.8 2004/03/28 19:38:30 provos Exp $ */
+/* $OpenBSD: tree.h,v 1.7 2002/10/17 21:51:54 art Exp $ */
+/* $FreeBSD: src/sys/sys/tree.h,v 1.7 2007/12/28 07:03:26 jasone Exp $ */
+
+/*-
+ * Copyright 2002 Niels Provos <address@hidden>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SYS_TREE_H_
+#define _SYS_TREE_H_
+
+/*
+ * This file defines data structures for different types of trees:
+ * splay trees and red-black trees.
+ *
+ * A splay tree is a self-organizing data structure. Every operation
+ * on the tree causes a splay to happen. The splay moves the requested
+ * node to the root of the tree and partly rebalances it.
+ *
+ * This has the benefit that request locality causes faster lookups as
+ * the requested nodes move to the top of the tree. On the other hand,
+ * every lookup causes memory writes.
+ *
+ * The Balance Theorem bounds the total access time for m operations
+ * and n inserts on an initially empty tree as O((m + n)lg n). The
+ * amortized cost for a sequence of m accesses to a splay tree is O(lg n);
+ *
+ * A red-black tree is a binary search tree with the node color as an
+ * extra attribute. It fulfills a set of conditions:
+ * - every search path from the root to a leaf consists of the
+ * same number of black nodes,
+ * - each red node (except for the root) has a black parent,
+ * - each leaf node is black.
+ *
+ * Every operation on a red-black tree is bounded as O(lg n).
+ * The maximum height of a red-black tree is 2lg (n+1).
+ */
+
+#define SPLAY_HEAD(name, type) \
+struct name { \
+ struct type *sph_root; /* root of the tree */ \
+}
+
+#define SPLAY_INITIALIZER(root)
\
+ { NULL }
+
+#define SPLAY_INIT(root) do { \
+ (root)->sph_root = NULL; \
+} while (/*CONSTCOND*/ 0)
+
+#define SPLAY_ENTRY(type) \
+struct { \
+ struct type *spe_left; /* left element */ \
+ struct type *spe_right; /* right element */ \
+}
+
+#define SPLAY_LEFT(elm, field) (elm)->field.spe_left
+#define SPLAY_RIGHT(elm, field) (elm)->field.spe_right
+#define SPLAY_ROOT(head) (head)->sph_root
+#define SPLAY_EMPTY(head) (SPLAY_ROOT(head) == NULL)
+
+/* SPLAY_ROTATE_{LEFT,RIGHT} expect that tmp hold SPLAY_{RIGHT,LEFT} */
+#define SPLAY_ROTATE_RIGHT(head, tmp, field) do { \
+ SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(tmp, field); \
+ SPLAY_RIGHT(tmp, field) = (head)->sph_root; \
+ (head)->sph_root = tmp; \
+} while (/*CONSTCOND*/ 0)
+
+#define SPLAY_ROTATE_LEFT(head, tmp, field) do { \
+ SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(tmp, field); \
+ SPLAY_LEFT(tmp, field) = (head)->sph_root; \
+ (head)->sph_root = tmp; \
+} while (/*CONSTCOND*/ 0)
+
+#define SPLAY_LINKLEFT(head, tmp, field) do { \
+ SPLAY_LEFT(tmp, field) = (head)->sph_root; \
+ tmp = (head)->sph_root; \
+ (head)->sph_root = SPLAY_LEFT((head)->sph_root, field); \
+} while (/*CONSTCOND*/ 0)
+
+#define SPLAY_LINKRIGHT(head, tmp, field) do { \
+ SPLAY_RIGHT(tmp, field) = (head)->sph_root; \
+ tmp = (head)->sph_root; \
+ (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field); \
+} while (/*CONSTCOND*/ 0)
+
+#define SPLAY_ASSEMBLE(head, node, left, right, field) do { \
+ SPLAY_RIGHT(left, field) = SPLAY_LEFT((head)->sph_root, field); \
+ SPLAY_LEFT(right, field) = SPLAY_RIGHT((head)->sph_root, field);\
+ SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(node, field); \
+ SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(node, field); \
+} while (/*CONSTCOND*/ 0)
+
+/* Generates prototypes and inline functions */
+
+#define SPLAY_PROTOTYPE(name, type, field, cmp)
\
+void name##_SPLAY(struct name *, struct type *); \
+void name##_SPLAY_MINMAX(struct name *, int); \
+struct type *name##_SPLAY_INSERT(struct name *, struct type *);
\
+struct type *name##_SPLAY_REMOVE(struct name *, struct type *);
\
+ \
+/* Finds the node with the same key as elm */ \
+static __inline struct type * \
+name##_SPLAY_FIND(struct name *head, struct type *elm) \
+{ \
+ if (SPLAY_EMPTY(head)) \
+ return(NULL); \
+ name##_SPLAY(head, elm); \
+ if ((cmp)(elm, (head)->sph_root) == 0) \
+ return (head->sph_root); \
+ return (NULL); \
+} \
+ \
+static __inline struct type * \
+name##_SPLAY_NEXT(struct name *head, struct type *elm) \
+{ \
+ name##_SPLAY(head, elm); \
+ if (SPLAY_RIGHT(elm, field) != NULL) { \
+ elm = SPLAY_RIGHT(elm, field); \
+ while (SPLAY_LEFT(elm, field) != NULL) { \
+ elm = SPLAY_LEFT(elm, field); \
+ } \
+ } else \
+ elm = NULL; \
+ return (elm); \
+} \
+ \
+static __inline struct type * \
+name##_SPLAY_MIN_MAX(struct name *head, int val) \
+{ \
+ name##_SPLAY_MINMAX(head, val); \
+ return (SPLAY_ROOT(head)); \
+}
+
+/* Main splay operation.
+ * Moves node close to the key of elm to top
+ */
+#define SPLAY_GENERATE(name, type, field, cmp) \
+struct type * \
+name##_SPLAY_INSERT(struct name *head, struct type *elm) \
+{ \
+ if (SPLAY_EMPTY(head)) { \
+ SPLAY_LEFT(elm, field) = SPLAY_RIGHT(elm, field) = NULL; \
+ } else { \
+ int __comp; \
+ name##_SPLAY(head, elm); \
+ __comp = (cmp)(elm, (head)->sph_root); \
+ if(__comp < 0) { \
+ SPLAY_LEFT(elm, field) = SPLAY_LEFT((head)->sph_root,
field);\
+ SPLAY_RIGHT(elm, field) = (head)->sph_root; \
+ SPLAY_LEFT((head)->sph_root, field) = NULL; \
+ } else if (__comp > 0) { \
+ SPLAY_RIGHT(elm, field) = SPLAY_RIGHT((head)->sph_root,
field);\
+ SPLAY_LEFT(elm, field) = (head)->sph_root; \
+ SPLAY_RIGHT((head)->sph_root, field) = NULL; \
+ } else \
+ return ((head)->sph_root); \
+ } \
+ (head)->sph_root = (elm); \
+ return (NULL); \
+} \
+ \
+struct type * \
+name##_SPLAY_REMOVE(struct name *head, struct type *elm) \
+{ \
+ struct type *__tmp; \
+ if (SPLAY_EMPTY(head)) \
+ return (NULL); \
+ name##_SPLAY(head, elm); \
+ if ((cmp)(elm, (head)->sph_root) == 0) { \
+ if (SPLAY_LEFT((head)->sph_root, field) == NULL) { \
+ (head)->sph_root = SPLAY_RIGHT((head)->sph_root,
field);\
+ } else { \
+ __tmp = SPLAY_RIGHT((head)->sph_root, field); \
+ (head)->sph_root = SPLAY_LEFT((head)->sph_root, field);\
+ name##_SPLAY(head, elm); \
+ SPLAY_RIGHT((head)->sph_root, field) = __tmp; \
+ } \
+ return (elm); \
+ } \
+ return (NULL); \
+} \
+ \
+void \
+name##_SPLAY(struct name *head, struct type *elm) \
+{ \
+ struct type __node, *__left, *__right, *__tmp; \
+ int __comp; \
+\
+ SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL;\
+ __left = __right = &__node; \
+\
+ while ((__comp = (cmp)(elm, (head)->sph_root)) != 0) { \
+ if (__comp < 0) { \
+ __tmp = SPLAY_LEFT((head)->sph_root, field); \
+ if (__tmp == NULL) \
+ break; \
+ if ((cmp)(elm, __tmp) < 0){ \
+ SPLAY_ROTATE_RIGHT(head, __tmp, field); \
+ if (SPLAY_LEFT((head)->sph_root, field) ==
NULL)\
+ break; \
+ } \
+ SPLAY_LINKLEFT(head, __right, field); \
+ } else if (__comp > 0) { \
+ __tmp = SPLAY_RIGHT((head)->sph_root, field); \
+ if (__tmp == NULL) \
+ break; \
+ if ((cmp)(elm, __tmp) > 0){ \
+ SPLAY_ROTATE_LEFT(head, __tmp, field); \
+ if (SPLAY_RIGHT((head)->sph_root, field) ==
NULL)\
+ break; \
+ } \
+ SPLAY_LINKRIGHT(head, __left, field); \
+ } \
+ } \
+ SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \
+} \
+ \
+/* Splay with either the minimum or the maximum element
\
+ * Used to find minimum or maximum element in tree. \
+ */ \
+void name##_SPLAY_MINMAX(struct name *head, int __comp) \
+{ \
+ struct type __node, *__left, *__right, *__tmp; \
+\
+ SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL;\
+ __left = __right = &__node; \
+\
+ while (1) { \
+ if (__comp < 0) { \
+ __tmp = SPLAY_LEFT((head)->sph_root, field); \
+ if (__tmp == NULL) \
+ break; \
+ if (__comp < 0){ \
+ SPLAY_ROTATE_RIGHT(head, __tmp, field); \
+ if (SPLAY_LEFT((head)->sph_root, field) ==
NULL)\
+ break; \
+ } \
+ SPLAY_LINKLEFT(head, __right, field); \
+ } else if (__comp > 0) { \
+ __tmp = SPLAY_RIGHT((head)->sph_root, field); \
+ if (__tmp == NULL) \
+ break; \
+ if (__comp > 0) { \
+ SPLAY_ROTATE_LEFT(head, __tmp, field); \
+ if (SPLAY_RIGHT((head)->sph_root, field) ==
NULL)\
+ break; \
+ } \
+ SPLAY_LINKRIGHT(head, __left, field); \
+ } \
+ } \
+ SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \
+}
+
+#define SPLAY_NEGINF -1
+#define SPLAY_INF 1
+
+#define SPLAY_INSERT(name, x, y) name##_SPLAY_INSERT(x, y)
+#define SPLAY_REMOVE(name, x, y) name##_SPLAY_REMOVE(x, y)
+#define SPLAY_FIND(name, x, y) name##_SPLAY_FIND(x, y)
+#define SPLAY_NEXT(name, x, y) name##_SPLAY_NEXT(x, y)
+#define SPLAY_MIN(name, x) (SPLAY_EMPTY(x) ? NULL \
+ : name##_SPLAY_MIN_MAX(x, SPLAY_NEGINF))
+#define SPLAY_MAX(name, x) (SPLAY_EMPTY(x) ? NULL \
+ : name##_SPLAY_MIN_MAX(x, SPLAY_INF))
+
+#define SPLAY_FOREACH(x, name, head) \
+ for ((x) = SPLAY_MIN(name, head); \
+ (x) != NULL; \
+ (x) = SPLAY_NEXT(name, head, x))
+
+/* Macros that define a red-black tree */
+#define RB_HEAD(name, type) \
+struct name { \
+ struct type *rbh_root; /* root of the tree */ \
+}
+
+#define RB_INITIALIZER(root) \
+ { NULL }
+
+#define RB_INIT(root) do { \
+ (root)->rbh_root = NULL; \
+} while (/*CONSTCOND*/ 0)
+
+#define RB_BLACK 0
+#define RB_RED 1
+#define RB_ENTRY(type) \
+struct { \
+ struct type *rbe_left; /* left element */ \
+ struct type *rbe_right; /* right element */ \
+ struct type *rbe_parent; /* parent element */ \
+ int rbe_color; /* node color */ \
+}
+
+#define RB_LEFT(elm, field) (elm)->field.rbe_left
+#define RB_RIGHT(elm, field) (elm)->field.rbe_right
+#define RB_PARENT(elm, field) (elm)->field.rbe_parent
+#define RB_COLOR(elm, field) (elm)->field.rbe_color
+#define RB_ROOT(head) (head)->rbh_root
+#define RB_EMPTY(head) (RB_ROOT(head) == NULL)
+
+#define RB_SET(elm, parent, field) do {
\
+ RB_PARENT(elm, field) = parent; \
+ RB_LEFT(elm, field) = RB_RIGHT(elm, field) = NULL; \
+ RB_COLOR(elm, field) = RB_RED; \
+} while (/*CONSTCOND*/ 0)
+
+#define RB_SET_BLACKRED(black, red, field) do {
\
+ RB_COLOR(black, field) = RB_BLACK; \
+ RB_COLOR(red, field) = RB_RED; \
+} while (/*CONSTCOND*/ 0)
+
+#ifndef RB_AUGMENT
+#define RB_AUGMENT(x) do {} while (0)
+#endif
+
+#define RB_ROTATE_LEFT(head, elm, tmp, field) do { \
+ (tmp) = RB_RIGHT(elm, field); \
+ if ((RB_RIGHT(elm, field) = RB_LEFT(tmp, field)) != NULL) { \
+ RB_PARENT(RB_LEFT(tmp, field), field) = (elm); \
+ } \
+ RB_AUGMENT(elm); \
+ if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field)) != NULL) { \
+ if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \
+ RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \
+ else \
+ RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \
+ } else \
+ (head)->rbh_root = (tmp); \
+ RB_LEFT(tmp, field) = (elm); \
+ RB_PARENT(elm, field) = (tmp); \
+ RB_AUGMENT(tmp); \
+ if ((RB_PARENT(tmp, field))) \
+ RB_AUGMENT(RB_PARENT(tmp, field)); \
+} while (/*CONSTCOND*/ 0)
+
+#define RB_ROTATE_RIGHT(head, elm, tmp, field) do { \
+ (tmp) = RB_LEFT(elm, field); \
+ if ((RB_LEFT(elm, field) = RB_RIGHT(tmp, field)) != NULL) { \
+ RB_PARENT(RB_RIGHT(tmp, field), field) = (elm); \
+ } \
+ RB_AUGMENT(elm); \
+ if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field)) != NULL) { \
+ if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \
+ RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \
+ else \
+ RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \
+ } else \
+ (head)->rbh_root = (tmp); \
+ RB_RIGHT(tmp, field) = (elm); \
+ RB_PARENT(elm, field) = (tmp); \
+ RB_AUGMENT(tmp); \
+ if ((RB_PARENT(tmp, field))) \
+ RB_AUGMENT(RB_PARENT(tmp, field)); \
+} while (/*CONSTCOND*/ 0)
+
+/* Generates prototypes and inline functions */
+#define RB_PROTOTYPE(name, type, field, cmp)
\
+ RB_PROTOTYPE_INTERNAL(name, type, field, cmp,)
+#define RB_PROTOTYPE_STATIC(name, type, field, cmp)
\
+ RB_PROTOTYPE_INTERNAL(name, type, field, cmp, __unused static)
+#define RB_PROTOTYPE_INTERNAL(name, type, field, cmp, attr) \
+attr void name##_RB_INSERT_COLOR(struct name *, struct type *);
\
+attr void name##_RB_REMOVE_COLOR(struct name *, struct type *, struct type *);\
+attr struct type *name##_RB_REMOVE(struct name *, struct type *); \
+attr struct type *name##_RB_INSERT(struct name *, struct type *); \
+attr struct type *name##_RB_FIND(struct name *, struct type *);
\
+attr struct type *name##_RB_NFIND(struct name *, struct type *); \
+attr struct type *name##_RB_NEXT(struct type *); \
+attr struct type *name##_RB_PREV(struct type *); \
+attr struct type *name##_RB_MINMAX(struct name *, int);
\
+ \
+
+/* Main rb operation.
+ * Moves node close to the key of elm to top
+ */
+#define RB_GENERATE(name, type, field, cmp)
\
+ RB_GENERATE_INTERNAL(name, type, field, cmp,)
+#define RB_GENERATE_STATIC(name, type, field, cmp)
\
+ RB_GENERATE_INTERNAL(name, type, field, cmp, static)
+#define RB_GENERATE_INTERNAL(name, type, field, cmp, attr) \
+attr void \
+name##_RB_INSERT_COLOR(struct name *head, struct type *elm) \
+{ \
+ struct type *parent, *gparent, *tmp; \
+ while ((parent = RB_PARENT(elm, field)) != NULL && \
+ RB_COLOR(parent, field) == RB_RED) { \
+ gparent = RB_PARENT(parent, field); \
+ if (parent == RB_LEFT(gparent, field)) { \
+ tmp = RB_RIGHT(gparent, field); \
+ if (tmp && RB_COLOR(tmp, field) == RB_RED) { \
+ RB_COLOR(tmp, field) = RB_BLACK; \
+ RB_SET_BLACKRED(parent, gparent, field);\
+ elm = gparent; \
+ continue; \
+ } \
+ if (RB_RIGHT(parent, field) == elm) { \
+ RB_ROTATE_LEFT(head, parent, tmp, field);\
+ tmp = parent; \
+ parent = elm; \
+ elm = tmp; \
+ } \
+ RB_SET_BLACKRED(parent, gparent, field); \
+ RB_ROTATE_RIGHT(head, gparent, tmp, field); \
+ } else { \
+ tmp = RB_LEFT(gparent, field); \
+ if (tmp && RB_COLOR(tmp, field) == RB_RED) { \
+ RB_COLOR(tmp, field) = RB_BLACK; \
+ RB_SET_BLACKRED(parent, gparent, field);\
+ elm = gparent; \
+ continue; \
+ } \
+ if (RB_LEFT(parent, field) == elm) { \
+ RB_ROTATE_RIGHT(head, parent, tmp, field);\
+ tmp = parent; \
+ parent = elm; \
+ elm = tmp; \
+ } \
+ RB_SET_BLACKRED(parent, gparent, field); \
+ RB_ROTATE_LEFT(head, gparent, tmp, field); \
+ } \
+ } \
+ RB_COLOR(head->rbh_root, field) = RB_BLACK; \
+} \
+ \
+attr void \
+name##_RB_REMOVE_COLOR(struct name *head, struct type *parent, struct type
*elm) \
+{ \
+ struct type *tmp; \
+ while ((elm == NULL || RB_COLOR(elm, field) == RB_BLACK) && \
+ elm != RB_ROOT(head)) { \
+ if (RB_LEFT(parent, field) == elm) { \
+ tmp = RB_RIGHT(parent, field); \
+ if (RB_COLOR(tmp, field) == RB_RED) { \
+ RB_SET_BLACKRED(tmp, parent, field); \
+ RB_ROTATE_LEFT(head, parent, tmp, field);\
+ tmp = RB_RIGHT(parent, field); \
+ } \
+ if ((RB_LEFT(tmp, field) == NULL || \
+ RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK)
&&\
+ (RB_RIGHT(tmp, field) == NULL || \
+ RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK))
{\
+ RB_COLOR(tmp, field) = RB_RED; \
+ elm = parent; \
+ parent = RB_PARENT(elm, field); \
+ } else { \
+ if (RB_RIGHT(tmp, field) == NULL || \
+ RB_COLOR(RB_RIGHT(tmp, field), field) ==
RB_BLACK) {\
+ struct type *oleft; \
+ if ((oleft = RB_LEFT(tmp, field)) \
+ != NULL) \
+ RB_COLOR(oleft, field) =
RB_BLACK;\
+ RB_COLOR(tmp, field) = RB_RED; \
+ RB_ROTATE_RIGHT(head, tmp, oleft,
field);\
+ tmp = RB_RIGHT(parent, field); \
+ } \
+ RB_COLOR(tmp, field) = RB_COLOR(parent, field);\
+ RB_COLOR(parent, field) = RB_BLACK; \
+ if (RB_RIGHT(tmp, field)) \
+ RB_COLOR(RB_RIGHT(tmp, field), field) =
RB_BLACK;\
+ RB_ROTATE_LEFT(head, parent, tmp, field);\
+ elm = RB_ROOT(head); \
+ break; \
+ } \
+ } else { \
+ tmp = RB_LEFT(parent, field); \
+ if (RB_COLOR(tmp, field) == RB_RED) { \
+ RB_SET_BLACKRED(tmp, parent, field); \
+ RB_ROTATE_RIGHT(head, parent, tmp, field);\
+ tmp = RB_LEFT(parent, field); \
+ } \
+ if ((RB_LEFT(tmp, field) == NULL || \
+ RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK)
&&\
+ (RB_RIGHT(tmp, field) == NULL || \
+ RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK))
{\
+ RB_COLOR(tmp, field) = RB_RED; \
+ elm = parent; \
+ parent = RB_PARENT(elm, field); \
+ } else { \
+ if (RB_LEFT(tmp, field) == NULL || \
+ RB_COLOR(RB_LEFT(tmp, field), field) ==
RB_BLACK) {\
+ struct type *oright; \
+ if ((oright = RB_RIGHT(tmp, field)) \
+ != NULL) \
+ RB_COLOR(oright, field) =
RB_BLACK;\
+ RB_COLOR(tmp, field) = RB_RED; \
+ RB_ROTATE_LEFT(head, tmp, oright,
field);\
+ tmp = RB_LEFT(parent, field); \
+ } \
+ RB_COLOR(tmp, field) = RB_COLOR(parent, field);\
+ RB_COLOR(parent, field) = RB_BLACK; \
+ if (RB_LEFT(tmp, field)) \
+ RB_COLOR(RB_LEFT(tmp, field), field) =
RB_BLACK;\
+ RB_ROTATE_RIGHT(head, parent, tmp, field);\
+ elm = RB_ROOT(head); \
+ break; \
+ } \
+ } \
+ } \
+ if (elm) \
+ RB_COLOR(elm, field) = RB_BLACK; \
+} \
+ \
+attr struct type * \
+name##_RB_REMOVE(struct name *head, struct type *elm) \
+{ \
+ struct type *child, *parent, *old = elm; \
+ int color; \
+ if (RB_LEFT(elm, field) == NULL) \
+ child = RB_RIGHT(elm, field); \
+ else if (RB_RIGHT(elm, field) == NULL) \
+ child = RB_LEFT(elm, field); \
+ else { \
+ struct type *left; \
+ elm = RB_RIGHT(elm, field); \
+ while ((left = RB_LEFT(elm, field)) != NULL) \
+ elm = left; \
+ child = RB_RIGHT(elm, field); \
+ parent = RB_PARENT(elm, field); \
+ color = RB_COLOR(elm, field); \
+ if (child) \
+ RB_PARENT(child, field) = parent; \
+ if (parent) { \
+ if (RB_LEFT(parent, field) == elm) \
+ RB_LEFT(parent, field) = child; \
+ else \
+ RB_RIGHT(parent, field) = child; \
+ RB_AUGMENT(parent); \
+ } else \
+ RB_ROOT(head) = child; \
+ if (RB_PARENT(elm, field) == old) \
+ parent = elm; \
+ (elm)->field = (old)->field; \
+ if (RB_PARENT(old, field)) { \
+ if (RB_LEFT(RB_PARENT(old, field), field) == old)\
+ RB_LEFT(RB_PARENT(old, field), field) = elm;\
+ else \
+ RB_RIGHT(RB_PARENT(old, field), field) = elm;\
+ RB_AUGMENT(RB_PARENT(old, field)); \
+ } else \
+ RB_ROOT(head) = elm; \
+ RB_PARENT(RB_LEFT(old, field), field) = elm; \
+ if (RB_RIGHT(old, field)) \
+ RB_PARENT(RB_RIGHT(old, field), field) = elm; \
+ if (parent) { \
+ left = parent; \
+ do { \
+ RB_AUGMENT(left); \
+ } while ((left = RB_PARENT(left, field)) != NULL); \
+ } \
+ goto color; \
+ } \
+ parent = RB_PARENT(elm, field); \
+ color = RB_COLOR(elm, field); \
+ if (child) \
+ RB_PARENT(child, field) = parent; \
+ if (parent) { \
+ if (RB_LEFT(parent, field) == elm) \
+ RB_LEFT(parent, field) = child; \
+ else \
+ RB_RIGHT(parent, field) = child; \
+ RB_AUGMENT(parent); \
+ } else \
+ RB_ROOT(head) = child; \
+color: \
+ if (color == RB_BLACK) \
+ name##_RB_REMOVE_COLOR(head, parent, child); \
+ return (old); \
+} \
+ \
+/* Inserts a node into the RB tree */ \
+attr struct type * \
+name##_RB_INSERT(struct name *head, struct type *elm) \
+{ \
+ struct type *tmp; \
+ struct type *parent = NULL; \
+ int comp = 0; \
+ tmp = RB_ROOT(head); \
+ while (tmp) { \
+ parent = tmp; \
+ comp = (cmp)(elm, parent); \
+ if (comp < 0) \
+ tmp = RB_LEFT(tmp, field); \
+ else if (comp > 0) \
+ tmp = RB_RIGHT(tmp, field); \
+ else \
+ return (tmp); \
+ } \
+ RB_SET(elm, parent, field); \
+ if (parent != NULL) { \
+ if (comp < 0) \
+ RB_LEFT(parent, field) = elm; \
+ else \
+ RB_RIGHT(parent, field) = elm; \
+ RB_AUGMENT(parent); \
+ } else \
+ RB_ROOT(head) = elm; \
+ name##_RB_INSERT_COLOR(head, elm); \
+ return (NULL); \
+} \
+ \
+/* Finds the node with the same key as elm */ \
+attr struct type * \
+name##_RB_FIND(struct name *head, struct type *elm) \
+{ \
+ struct type *tmp = RB_ROOT(head); \
+ int comp; \
+ while (tmp) { \
+ comp = cmp(elm, tmp); \
+ if (comp < 0) \
+ tmp = RB_LEFT(tmp, field); \
+ else if (comp > 0) \
+ tmp = RB_RIGHT(tmp, field); \
+ else \
+ return (tmp); \
+ } \
+ return (NULL); \
+} \
+ \
+/* Finds the first node greater than or equal to the search key */ \
+attr struct type * \
+name##_RB_NFIND(struct name *head, struct type *elm) \
+{ \
+ struct type *tmp = RB_ROOT(head); \
+ struct type *res = NULL; \
+ int comp; \
+ while (tmp) { \
+ comp = cmp(elm, tmp); \
+ if (comp < 0) { \
+ res = tmp; \
+ tmp = RB_LEFT(tmp, field); \
+ } \
+ else if (comp > 0) \
+ tmp = RB_RIGHT(tmp, field); \
+ else \
+ return (tmp); \
+ } \
+ return (res); \
+} \
+ \
+/* ARGSUSED */ \
+attr struct type * \
+name##_RB_NEXT(struct type *elm) \
+{ \
+ if (RB_RIGHT(elm, field)) { \
+ elm = RB_RIGHT(elm, field); \
+ while (RB_LEFT(elm, field)) \
+ elm = RB_LEFT(elm, field); \
+ } else { \
+ if (RB_PARENT(elm, field) && \
+ (elm == RB_LEFT(RB_PARENT(elm, field), field))) \
+ elm = RB_PARENT(elm, field); \
+ else { \
+ while (RB_PARENT(elm, field) && \
+ (elm == RB_RIGHT(RB_PARENT(elm, field), field)))\
+ elm = RB_PARENT(elm, field); \
+ elm = RB_PARENT(elm, field); \
+ } \
+ } \
+ return (elm); \
+} \
+ \
+/* ARGSUSED */ \
+attr struct type * \
+name##_RB_PREV(struct type *elm) \
+{ \
+ if (RB_LEFT(elm, field)) { \
+ elm = RB_LEFT(elm, field); \
+ while (RB_RIGHT(elm, field)) \
+ elm = RB_RIGHT(elm, field); \
+ } else { \
+ if (RB_PARENT(elm, field) && \
+ (elm == RB_RIGHT(RB_PARENT(elm, field), field))) \
+ elm = RB_PARENT(elm, field); \
+ else { \
+ while (RB_PARENT(elm, field) && \
+ (elm == RB_LEFT(RB_PARENT(elm, field), field)))\
+ elm = RB_PARENT(elm, field); \
+ elm = RB_PARENT(elm, field); \
+ } \
+ } \
+ return (elm); \
+} \
+ \
+attr struct type * \
+name##_RB_MINMAX(struct name *head, int val) \
+{ \
+ struct type *tmp = RB_ROOT(head); \
+ struct type *parent = NULL; \
+ while (tmp) { \
+ parent = tmp; \
+ if (val < 0) \
+ tmp = RB_LEFT(tmp, field); \
+ else \
+ tmp = RB_RIGHT(tmp, field); \
+ } \
+ return (parent); \
+}
+
+#define RB_NEGINF -1
+#define RB_INF 1
+
+#define RB_INSERT(name, x, y) name##_RB_INSERT(x, y)
+#define RB_REMOVE(name, x, y) name##_RB_REMOVE(x, y)
+#define RB_FIND(name, x, y) name##_RB_FIND(x, y)
+#define RB_NFIND(name, x, y) name##_RB_NFIND(x, y)
+#define RB_NEXT(name, x, y) name##_RB_NEXT(y)
+#define RB_PREV(name, x, y) name##_RB_PREV(y)
+#define RB_MIN(name, x) name##_RB_MINMAX(x, RB_NEGINF)
+#define RB_MAX(name, x) name##_RB_MINMAX(x, RB_INF)
+
+#define RB_FOREACH(x, name, head) \
+ for ((x) = RB_MIN(name, head); \
+ (x) != NULL; \
+ (x) = name##_RB_NEXT(x))
+
+#define RB_FOREACH_REVERSE(x, name, head) \
+ for ((x) = RB_MAX(name, head); \
+ (x) != NULL; \
+ (x) = name##_RB_PREV(x))
+
+#endif /* _SYS_TREE_H_ */
[Prev in Thread] |
Current Thread |
[Next in Thread] |
- [Gnash-commit] gnash configure.ac ChangeLog libbase/Makefile.a...,
Rob Savoye <=