/webview/native/Source/WTF/wtf/FastMalloc.cpp
C++ | 4699 lines | 3334 code | 645 blank | 720 comment | 542 complexity | 327fc0ee0e4f35cde8c4dcd7c8510a7d MD5 | raw file
Possible License(s): BSD-3-Clause, LGPL-2.1, GPL-2.0, LGPL-2.0
Large files files are truncated, but you can click here to view the full file
- // Copyright (c) 2005, 2007, Google Inc.
- // All rights reserved.
- // Copyright (C) 2005, 2006, 2007, 2008, 2009, 2011 Apple Inc. All rights reserved.
- //
- // Redistribution and use in source and binary forms, with or without
- // modification, are permitted provided that the following conditions are
- // met:
- //
- // * Redistributions of source code must retain the above copyright
- // notice, this list of conditions and the following disclaimer.
- // * Redistributions in binary form must reproduce the above
- // copyright notice, this list of conditions and the following disclaimer
- // in the documentation and/or other materials provided with the
- // distribution.
- // * Neither the name of Google Inc. nor the names of its
- // contributors may be used to endorse or promote products derived from
- // this software without specific prior written permission.
- //
- // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- // ---
- // Author: Sanjay Ghemawat <opensource@google.com>
- //
- // A malloc that uses a per-thread cache to satisfy small malloc requests.
- // (The time for malloc/free of a small object drops from 300 ns to 50 ns.)
- //
- // See doc/tcmalloc.html for a high-level
- // description of how this malloc works.
- //
- // SYNCHRONIZATION
- // 1. The thread-specific lists are accessed without acquiring any locks.
- // This is safe because each such list is only accessed by one thread.
- // 2. We have a lock per central free-list, and hold it while manipulating
- // the central free list for a particular size.
- // 3. The central page allocator is protected by "pageheap_lock".
- // 4. The pagemap (which maps from page-number to descriptor),
- // can be read without holding any locks, and written while holding
- // the "pageheap_lock".
- // 5. To improve performance, a subset of the information one can get
- // from the pagemap is cached in a data structure, pagemap_cache_,
- // that atomically reads and writes its entries. This cache can be
- // read and written without locking.
- //
- // This multi-threaded access to the pagemap is safe for fairly
- // subtle reasons. We basically assume that when an object X is
- // allocated by thread A and deallocated by thread B, there must
- // have been appropriate synchronization in the handoff of object
- // X from thread A to thread B. The same logic applies to pagemap_cache_.
- //
- // THE PAGEID-TO-SIZECLASS CACHE
- // Hot PageID-to-sizeclass mappings are held by pagemap_cache_. If this cache
- // returns 0 for a particular PageID then that means "no information," not that
- // the sizeclass is 0. The cache may have stale information for pages that do
- // not hold the beginning of any free()'able object. Staleness is eliminated
- // in Populate() for pages with sizeclass > 0 objects, and in do_malloc() and
- // do_memalign() for all other relevant pages.
- //
- // TODO: Bias reclamation to larger addresses
- // TODO: implement mallinfo/mallopt
- // TODO: Better testing
- //
- // 9/28/2003 (new page-level allocator replaces ptmalloc2):
- // * malloc/free of small objects goes from ~300 ns to ~50 ns.
- // * allocation of a reasonably complicated struct
- // goes from about 1100 ns to about 300 ns.
- #include "config.h"
- #include "FastMalloc.h"
- #include "Assertions.h"
- #include <limits>
- #if OS(WINDOWS)
- #include <windows.h>
- #else
- #include <pthread.h>
- #endif
- #include <wtf/StdLibExtras.h>
- #include <string.h>
- #ifndef NO_TCMALLOC_SAMPLES
- #ifdef WTF_CHANGES
- #define NO_TCMALLOC_SAMPLES
- #endif
- #endif
- #if !(defined(USE_SYSTEM_MALLOC) && USE_SYSTEM_MALLOC) && defined(NDEBUG)
- #define FORCE_SYSTEM_MALLOC 0
- #else
- #define FORCE_SYSTEM_MALLOC 1
- #endif
- // Use a background thread to periodically scavenge memory to release back to the system
- #if PLATFORM(IOS)
- #define USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 0
- #else
- #define USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 1
- #endif
- #ifndef NDEBUG
- namespace WTF {
- #if OS(WINDOWS)
- // TLS_OUT_OF_INDEXES is not defined on WinCE.
- #ifndef TLS_OUT_OF_INDEXES
- #define TLS_OUT_OF_INDEXES 0xffffffff
- #endif
- static DWORD isForibiddenTlsIndex = TLS_OUT_OF_INDEXES;
- static const LPVOID kTlsAllowValue = reinterpret_cast<LPVOID>(0); // Must be zero.
- static const LPVOID kTlsForbiddenValue = reinterpret_cast<LPVOID>(1);
- #if !ASSERT_DISABLED
- static bool isForbidden()
- {
- // By default, fastMalloc is allowed so we don't allocate the
- // tls index unless we're asked to make it forbidden. If TlsSetValue
- // has not been called on a thread, the value returned by TlsGetValue is 0.
- return (isForibiddenTlsIndex != TLS_OUT_OF_INDEXES) && (TlsGetValue(isForibiddenTlsIndex) == kTlsForbiddenValue);
- }
- #endif
- void fastMallocForbid()
- {
- if (isForibiddenTlsIndex == TLS_OUT_OF_INDEXES)
- isForibiddenTlsIndex = TlsAlloc(); // a little racey, but close enough for debug only
- TlsSetValue(isForibiddenTlsIndex, kTlsForbiddenValue);
- }
- void fastMallocAllow()
- {
- if (isForibiddenTlsIndex == TLS_OUT_OF_INDEXES)
- return;
- TlsSetValue(isForibiddenTlsIndex, kTlsAllowValue);
- }
- #else // !OS(WINDOWS)
- static pthread_key_t isForbiddenKey;
- static pthread_once_t isForbiddenKeyOnce = PTHREAD_ONCE_INIT;
- static void initializeIsForbiddenKey()
- {
- pthread_key_create(&isForbiddenKey, 0);
- }
- #if !ASSERT_DISABLED
- static bool isForbidden()
- {
- pthread_once(&isForbiddenKeyOnce, initializeIsForbiddenKey);
- return !!pthread_getspecific(isForbiddenKey);
- }
- #endif
- void fastMallocForbid()
- {
- pthread_once(&isForbiddenKeyOnce, initializeIsForbiddenKey);
- pthread_setspecific(isForbiddenKey, &isForbiddenKey);
- }
- void fastMallocAllow()
- {
- pthread_once(&isForbiddenKeyOnce, initializeIsForbiddenKey);
- pthread_setspecific(isForbiddenKey, 0);
- }
- #endif // OS(WINDOWS)
- } // namespace WTF
- #endif // NDEBUG
- namespace WTF {
- namespace Internal {
- #if !ENABLE(WTF_MALLOC_VALIDATION)
- WTF_EXPORT_PRIVATE void fastMallocMatchFailed(void*);
- #else
- COMPILE_ASSERT(((sizeof(ValidationHeader) % sizeof(AllocAlignmentInteger)) == 0), ValidationHeader_must_produce_correct_alignment);
- #endif
- NO_RETURN_DUE_TO_CRASH void fastMallocMatchFailed(void*)
- {
- CRASH();
- }
- } // namespace Internal
- void* fastZeroedMalloc(size_t n)
- {
- void* result = fastMalloc(n);
- memset(result, 0, n);
- return result;
- }
- char* fastStrDup(const char* src)
- {
- size_t len = strlen(src) + 1;
- char* dup = static_cast<char*>(fastMalloc(len));
- memcpy(dup, src, len);
- return dup;
- }
- TryMallocReturnValue tryFastZeroedMalloc(size_t n)
- {
- void* result;
- if (!tryFastMalloc(n).getValue(result))
- return 0;
- memset(result, 0, n);
- return result;
- }
- } // namespace WTF
- #if FORCE_SYSTEM_MALLOC
- #if OS(DARWIN)
- #include <malloc/malloc.h>
- #elif OS(WINDOWS)
- #include <malloc.h>
- #endif
- namespace WTF {
- TryMallocReturnValue tryFastMalloc(size_t n)
- {
- ASSERT(!isForbidden());
- #if ENABLE(WTF_MALLOC_VALIDATION)
- if (std::numeric_limits<size_t>::max() - Internal::ValidationBufferSize <= n) // If overflow would occur...
- return 0;
- void* result = malloc(n + Internal::ValidationBufferSize);
- if (!result)
- return 0;
- Internal::ValidationHeader* header = static_cast<Internal::ValidationHeader*>(result);
- header->m_size = n;
- header->m_type = Internal::AllocTypeMalloc;
- header->m_prefix = static_cast<unsigned>(Internal::ValidationPrefix);
- result = header + 1;
- *Internal::fastMallocValidationSuffix(result) = Internal::ValidationSuffix;
- fastMallocValidate(result);
- return result;
- #else
- return malloc(n);
- #endif
- }
- void* fastMalloc(size_t n)
- {
- ASSERT(!isForbidden());
- #if ENABLE(WTF_MALLOC_VALIDATION)
- TryMallocReturnValue returnValue = tryFastMalloc(n);
- void* result;
- if (!returnValue.getValue(result))
- CRASH();
- #else
- void* result = malloc(n);
- #endif
- if (!result)
- CRASH();
- return result;
- }
- TryMallocReturnValue tryFastCalloc(size_t n_elements, size_t element_size)
- {
- ASSERT(!isForbidden());
- #if ENABLE(WTF_MALLOC_VALIDATION)
- size_t totalBytes = n_elements * element_size;
- if (n_elements > 1 && element_size && (totalBytes / element_size) != n_elements)
- return 0;
- TryMallocReturnValue returnValue = tryFastMalloc(totalBytes);
- void* result;
- if (!returnValue.getValue(result))
- return 0;
- memset(result, 0, totalBytes);
- fastMallocValidate(result);
- return result;
- #else
- return calloc(n_elements, element_size);
- #endif
- }
- void* fastCalloc(size_t n_elements, size_t element_size)
- {
- ASSERT(!isForbidden());
- #if ENABLE(WTF_MALLOC_VALIDATION)
- TryMallocReturnValue returnValue = tryFastCalloc(n_elements, element_size);
- void* result;
- if (!returnValue.getValue(result))
- CRASH();
- #else
- void* result = calloc(n_elements, element_size);
- #endif
- if (!result)
- CRASH();
- return result;
- }
- void fastFree(void* p)
- {
- ASSERT(!isForbidden());
- #if ENABLE(WTF_MALLOC_VALIDATION)
- if (!p)
- return;
-
- fastMallocMatchValidateFree(p, Internal::AllocTypeMalloc);
- Internal::ValidationHeader* header = Internal::fastMallocValidationHeader(p);
- memset(p, 0xCC, header->m_size);
- free(header);
- #else
- free(p);
- #endif
- }
- TryMallocReturnValue tryFastRealloc(void* p, size_t n)
- {
- ASSERT(!isForbidden());
- #if ENABLE(WTF_MALLOC_VALIDATION)
- if (p) {
- if (std::numeric_limits<size_t>::max() - Internal::ValidationBufferSize <= n) // If overflow would occur...
- return 0;
- fastMallocValidate(p);
- Internal::ValidationHeader* result = static_cast<Internal::ValidationHeader*>(realloc(Internal::fastMallocValidationHeader(p), n + Internal::ValidationBufferSize));
- if (!result)
- return 0;
- result->m_size = n;
- result = result + 1;
- *fastMallocValidationSuffix(result) = Internal::ValidationSuffix;
- fastMallocValidate(result);
- return result;
- } else {
- return fastMalloc(n);
- }
- #else
- return realloc(p, n);
- #endif
- }
- void* fastRealloc(void* p, size_t n)
- {
- ASSERT(!isForbidden());
- #if ENABLE(WTF_MALLOC_VALIDATION)
- TryMallocReturnValue returnValue = tryFastRealloc(p, n);
- void* result;
- if (!returnValue.getValue(result))
- CRASH();
- #else
- void* result = realloc(p, n);
- #endif
- if (!result)
- CRASH();
- return result;
- }
- void releaseFastMallocFreeMemory() { }
-
- FastMallocStatistics fastMallocStatistics()
- {
- FastMallocStatistics statistics = { 0, 0, 0 };
- return statistics;
- }
- size_t fastMallocSize(const void* p)
- {
- #if ENABLE(WTF_MALLOC_VALIDATION)
- return Internal::fastMallocValidationHeader(const_cast<void*>(p))->m_size;
- #elif OS(DARWIN)
- return malloc_size(p);
- #elif OS(WINDOWS)
- return _msize(const_cast<void*>(p));
- #else
- return 1;
- #endif
- }
- } // namespace WTF
- #if OS(DARWIN)
- // This symbol is present in the JavaScriptCore exports file even when FastMalloc is disabled.
- // It will never be used in this case, so it's type and value are less interesting than its presence.
- extern "C" WTF_EXPORT_PRIVATE const int jscore_fastmalloc_introspection = 0;
- #endif
- #else // FORCE_SYSTEM_MALLOC
- #include "AlwaysInline.h"
- #include "TCPackedCache.h"
- #include "TCPageMap.h"
- #include "TCSpinLock.h"
- #include "TCSystemAlloc.h"
- #include <algorithm>
- #include <pthread.h>
- #include <stdarg.h>
- #include <stddef.h>
- #include <stdint.h>
- #include <stdio.h>
- #if HAVE(ERRNO_H)
- #include <errno.h>
- #endif
- #if OS(UNIX)
- #include <unistd.h>
- #endif
- #if OS(WINDOWS)
- #ifndef WIN32_LEAN_AND_MEAN
- #define WIN32_LEAN_AND_MEAN
- #endif
- #include <windows.h>
- #endif
- #ifdef WTF_CHANGES
- #if OS(DARWIN)
- #include "MallocZoneSupport.h"
- #include <wtf/HashSet.h>
- #include <wtf/Vector.h>
- #endif
- #if HAVE(HEADER_DETECTION_H)
- #include "HeaderDetection.h"
- #endif
- #if HAVE(DISPATCH_H)
- #include <dispatch/dispatch.h>
- #endif
- #if HAVE(PTHREAD_MACHDEP_H)
- #include <System/pthread_machdep.h>
- #if defined(__PTK_FRAMEWORK_JAVASCRIPTCORE_KEY0)
- #define WTF_USE_PTHREAD_GETSPECIFIC_DIRECT 1
- #endif
- #endif
- #ifndef PRIuS
- #define PRIuS "zu"
- #endif
- // Calling pthread_getspecific through a global function pointer is faster than a normal
- // call to the function on Mac OS X, and it's used in performance-critical code. So we
- // use a function pointer. But that's not necessarily faster on other platforms, and we had
- // problems with this technique on Windows, so we'll do this only on Mac OS X.
- #if OS(DARWIN)
- #if !USE(PTHREAD_GETSPECIFIC_DIRECT)
- static void* (*pthread_getspecific_function_pointer)(pthread_key_t) = pthread_getspecific;
- #define pthread_getspecific(key) pthread_getspecific_function_pointer(key)
- #else
- #define pthread_getspecific(key) _pthread_getspecific_direct(key)
- #define pthread_setspecific(key, val) _pthread_setspecific_direct(key, (val))
- #endif
- #endif
- #define DEFINE_VARIABLE(type, name, value, meaning) \
- namespace FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead { \
- type FLAGS_##name(value); \
- char FLAGS_no##name; \
- } \
- using FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead::FLAGS_##name
-
- #define DEFINE_int64(name, value, meaning) \
- DEFINE_VARIABLE(int64_t, name, value, meaning)
-
- #define DEFINE_double(name, value, meaning) \
- DEFINE_VARIABLE(double, name, value, meaning)
- namespace WTF {
- #define malloc fastMalloc
- #define calloc fastCalloc
- #define free fastFree
- #define realloc fastRealloc
- #define MESSAGE LOG_ERROR
- #define CHECK_CONDITION ASSERT
- #if OS(DARWIN)
- struct Span;
- class TCMalloc_Central_FreeListPadded;
- class TCMalloc_PageHeap;
- class TCMalloc_ThreadCache;
- template <typename T> class PageHeapAllocator;
- class FastMallocZone {
- public:
- static void init();
- static kern_return_t enumerate(task_t, void*, unsigned typeMmask, vm_address_t zoneAddress, memory_reader_t, vm_range_recorder_t);
- static size_t goodSize(malloc_zone_t*, size_t size) { return size; }
- static boolean_t check(malloc_zone_t*) { return true; }
- static void print(malloc_zone_t*, boolean_t) { }
- static void log(malloc_zone_t*, void*) { }
- static void forceLock(malloc_zone_t*) { }
- static void forceUnlock(malloc_zone_t*) { }
- static void statistics(malloc_zone_t*, malloc_statistics_t* stats) { memset(stats, 0, sizeof(malloc_statistics_t)); }
- private:
- FastMallocZone(TCMalloc_PageHeap*, TCMalloc_ThreadCache**, TCMalloc_Central_FreeListPadded*, PageHeapAllocator<Span>*, PageHeapAllocator<TCMalloc_ThreadCache>*);
- static size_t size(malloc_zone_t*, const void*);
- static void* zoneMalloc(malloc_zone_t*, size_t);
- static void* zoneCalloc(malloc_zone_t*, size_t numItems, size_t size);
- static void zoneFree(malloc_zone_t*, void*);
- static void* zoneRealloc(malloc_zone_t*, void*, size_t);
- static void* zoneValloc(malloc_zone_t*, size_t) { LOG_ERROR("valloc is not supported"); return 0; }
- static void zoneDestroy(malloc_zone_t*) { }
- malloc_zone_t m_zone;
- TCMalloc_PageHeap* m_pageHeap;
- TCMalloc_ThreadCache** m_threadHeaps;
- TCMalloc_Central_FreeListPadded* m_centralCaches;
- PageHeapAllocator<Span>* m_spanAllocator;
- PageHeapAllocator<TCMalloc_ThreadCache>* m_pageHeapAllocator;
- };
- #endif
- #endif
- #ifndef WTF_CHANGES
- // This #ifdef should almost never be set. Set NO_TCMALLOC_SAMPLES if
- // you're porting to a system where you really can't get a stacktrace.
- #ifdef NO_TCMALLOC_SAMPLES
- // We use #define so code compiles even if you #include stacktrace.h somehow.
- # define GetStackTrace(stack, depth, skip) (0)
- #else
- # include <google/stacktrace.h>
- #endif
- #endif
- // Even if we have support for thread-local storage in the compiler
- // and linker, the OS may not support it. We need to check that at
- // runtime. Right now, we have to keep a manual set of "bad" OSes.
- #if defined(HAVE_TLS)
- static bool kernel_supports_tls = false; // be conservative
- static inline bool KernelSupportsTLS() {
- return kernel_supports_tls;
- }
- # if !HAVE_DECL_UNAME // if too old for uname, probably too old for TLS
- static void CheckIfKernelSupportsTLS() {
- kernel_supports_tls = false;
- }
- # else
- # include <sys/utsname.h> // DECL_UNAME checked for <sys/utsname.h> too
- static void CheckIfKernelSupportsTLS() {
- struct utsname buf;
- if (uname(&buf) != 0) { // should be impossible
- MESSAGE("uname failed assuming no TLS support (errno=%d)\n", errno);
- kernel_supports_tls = false;
- } else if (strcasecmp(buf.sysname, "linux") == 0) {
- // The linux case: the first kernel to support TLS was 2.6.0
- if (buf.release[0] < '2' && buf.release[1] == '.') // 0.x or 1.x
- kernel_supports_tls = false;
- else if (buf.release[0] == '2' && buf.release[1] == '.' &&
- buf.release[2] >= '0' && buf.release[2] < '6' &&
- buf.release[3] == '.') // 2.0 - 2.5
- kernel_supports_tls = false;
- else
- kernel_supports_tls = true;
- } else { // some other kernel, we'll be optimisitic
- kernel_supports_tls = true;
- }
- // TODO(csilvers): VLOG(1) the tls status once we support RAW_VLOG
- }
- # endif // HAVE_DECL_UNAME
- #endif // HAVE_TLS
- // __THROW is defined in glibc systems. It means, counter-intuitively,
- // "This function will never throw an exception." It's an optional
- // optimization tool, but we may need to use it to match glibc prototypes.
- #ifndef __THROW // I guess we're not on a glibc system
- # define __THROW // __THROW is just an optimization, so ok to make it ""
- #endif
- //-------------------------------------------------------------------
- // Configuration
- //-------------------------------------------------------------------
- // Not all possible combinations of the following parameters make
- // sense. In particular, if kMaxSize increases, you may have to
- // increase kNumClasses as well.
- static const size_t kPageShift = 12;
- static const size_t kPageSize = 1 << kPageShift;
- static const size_t kMaxSize = 8u * kPageSize;
- static const size_t kAlignShift = 3;
- static const size_t kAlignment = 1 << kAlignShift;
- static const size_t kNumClasses = 68;
- // Allocates a big block of memory for the pagemap once we reach more than
- // 128MB
- static const size_t kPageMapBigAllocationThreshold = 128 << 20;
- // Minimum number of pages to fetch from system at a time. Must be
- // significantly bigger than kPageSize to amortize system-call
- // overhead, and also to reduce external fragementation. Also, we
- // should keep this value big because various incarnations of Linux
- // have small limits on the number of mmap() regions per
- // address-space.
- static const size_t kMinSystemAlloc = 1 << (20 - kPageShift);
- // Number of objects to move between a per-thread list and a central
- // list in one shot. We want this to be not too small so we can
- // amortize the lock overhead for accessing the central list. Making
- // it too big may temporarily cause unnecessary memory wastage in the
- // per-thread free list until the scavenger cleans up the list.
- static int num_objects_to_move[kNumClasses];
- // Maximum length we allow a per-thread free-list to have before we
- // move objects from it into the corresponding central free-list. We
- // want this big to avoid locking the central free-list too often. It
- // should not hurt to make this list somewhat big because the
- // scavenging code will shrink it down when its contents are not in use.
- static const int kMaxFreeListLength = 256;
- // Lower and upper bounds on the per-thread cache sizes
- static const size_t kMinThreadCacheSize = kMaxSize * 2;
- #if PLATFORM(IOS)
- static const size_t kMaxThreadCacheSize = 512 * 1024;
- #else
- static const size_t kMaxThreadCacheSize = 2 << 20;
- #endif
- // Default bound on the total amount of thread caches
- static const size_t kDefaultOverallThreadCacheSize = 16 << 20;
- // For all span-lengths < kMaxPages we keep an exact-size list.
- // REQUIRED: kMaxPages >= kMinSystemAlloc;
- static const size_t kMaxPages = kMinSystemAlloc;
- /* The smallest prime > 2^n */
- static int primes_list[] = {
- // Small values might cause high rates of sampling
- // and hence commented out.
- // 2, 5, 11, 17, 37, 67, 131, 257,
- // 521, 1031, 2053, 4099, 8209, 16411,
- 32771, 65537, 131101, 262147, 524309, 1048583,
- 2097169, 4194319, 8388617, 16777259, 33554467 };
- // Twice the approximate gap between sampling actions.
- // I.e., we take one sample approximately once every
- // tcmalloc_sample_parameter/2
- // bytes of allocation, i.e., ~ once every 128KB.
- // Must be a prime number.
- #ifdef NO_TCMALLOC_SAMPLES
- DEFINE_int64(tcmalloc_sample_parameter, 0,
- "Unused: code is compiled with NO_TCMALLOC_SAMPLES");
- static size_t sample_period = 0;
- #else
- DEFINE_int64(tcmalloc_sample_parameter, 262147,
- "Twice the approximate gap between sampling actions."
- " Must be a prime number. Otherwise will be rounded up to a "
- " larger prime number");
- static size_t sample_period = 262147;
- #endif
- // Protects sample_period above
- static SpinLock sample_period_lock = SPINLOCK_INITIALIZER;
- // Parameters for controlling how fast memory is returned to the OS.
- DEFINE_double(tcmalloc_release_rate, 1,
- "Rate at which we release unused memory to the system. "
- "Zero means we never release memory back to the system. "
- "Increase this flag to return memory faster; decrease it "
- "to return memory slower. Reasonable rates are in the "
- "range [0,10]");
- //-------------------------------------------------------------------
- // Mapping from size to size_class and vice versa
- //-------------------------------------------------------------------
- // Sizes <= 1024 have an alignment >= 8. So for such sizes we have an
- // array indexed by ceil(size/8). Sizes > 1024 have an alignment >= 128.
- // So for these larger sizes we have an array indexed by ceil(size/128).
- //
- // We flatten both logical arrays into one physical array and use
- // arithmetic to compute an appropriate index. The constants used by
- // ClassIndex() were selected to make the flattening work.
- //
- // Examples:
- // Size Expression Index
- // -------------------------------------------------------
- // 0 (0 + 7) / 8 0
- // 1 (1 + 7) / 8 1
- // ...
- // 1024 (1024 + 7) / 8 128
- // 1025 (1025 + 127 + (120<<7)) / 128 129
- // ...
- // 32768 (32768 + 127 + (120<<7)) / 128 376
- static const size_t kMaxSmallSize = 1024;
- static const int shift_amount[2] = { 3, 7 }; // For divides by 8 or 128
- static const int add_amount[2] = { 7, 127 + (120 << 7) };
- static unsigned char class_array[377];
- // Compute index of the class_array[] entry for a given size
- static inline int ClassIndex(size_t s) {
- const int i = (s > kMaxSmallSize);
- return static_cast<int>((s + add_amount[i]) >> shift_amount[i]);
- }
- // Mapping from size class to max size storable in that class
- static size_t class_to_size[kNumClasses];
- // Mapping from size class to number of pages to allocate at a time
- static size_t class_to_pages[kNumClasses];
- // TransferCache is used to cache transfers of num_objects_to_move[size_class]
- // back and forth between thread caches and the central cache for a given size
- // class.
- struct TCEntry {
- void *head; // Head of chain of objects.
- void *tail; // Tail of chain of objects.
- };
- // A central cache freelist can have anywhere from 0 to kNumTransferEntries
- // slots to put link list chains into. To keep memory usage bounded the total
- // number of TCEntries across size classes is fixed. Currently each size
- // class is initially given one TCEntry which also means that the maximum any
- // one class can have is kNumClasses.
- static const int kNumTransferEntries = kNumClasses;
- // Note: the following only works for "n"s that fit in 32-bits, but
- // that is fine since we only use it for small sizes.
- static inline int LgFloor(size_t n) {
- int log = 0;
- for (int i = 4; i >= 0; --i) {
- int shift = (1 << i);
- size_t x = n >> shift;
- if (x != 0) {
- n = x;
- log += shift;
- }
- }
- ASSERT(n == 1);
- return log;
- }
- // Some very basic linked list functions for dealing with using void * as
- // storage.
- static inline void *SLL_Next(void *t) {
- return *(reinterpret_cast<void**>(t));
- }
- static inline void SLL_SetNext(void *t, void *n) {
- *(reinterpret_cast<void**>(t)) = n;
- }
- static inline void SLL_Push(void **list, void *element) {
- SLL_SetNext(element, *list);
- *list = element;
- }
- static inline void *SLL_Pop(void **list) {
- void *result = *list;
- *list = SLL_Next(*list);
- return result;
- }
- // Remove N elements from a linked list to which head points. head will be
- // modified to point to the new head. start and end will point to the first
- // and last nodes of the range. Note that end will point to NULL after this
- // function is called.
- static inline void SLL_PopRange(void **head, int N, void **start, void **end) {
- if (N == 0) {
- *start = NULL;
- *end = NULL;
- return;
- }
- void *tmp = *head;
- for (int i = 1; i < N; ++i) {
- tmp = SLL_Next(tmp);
- }
- *start = *head;
- *end = tmp;
- *head = SLL_Next(tmp);
- // Unlink range from list.
- SLL_SetNext(tmp, NULL);
- }
- static inline void SLL_PushRange(void **head, void *start, void *end) {
- if (!start) return;
- SLL_SetNext(end, *head);
- *head = start;
- }
- static inline size_t SLL_Size(void *head) {
- int count = 0;
- while (head) {
- count++;
- head = SLL_Next(head);
- }
- return count;
- }
- // Setup helper functions.
- static ALWAYS_INLINE size_t SizeClass(size_t size) {
- return class_array[ClassIndex(size)];
- }
- // Get the byte-size for a specified class
- static ALWAYS_INLINE size_t ByteSizeForClass(size_t cl) {
- return class_to_size[cl];
- }
- static int NumMoveSize(size_t size) {
- if (size == 0) return 0;
- // Use approx 64k transfers between thread and central caches.
- int num = static_cast<int>(64.0 * 1024.0 / size);
- if (num < 2) num = 2;
- // Clamp well below kMaxFreeListLength to avoid ping pong between central
- // and thread caches.
- if (num > static_cast<int>(0.8 * kMaxFreeListLength))
- num = static_cast<int>(0.8 * kMaxFreeListLength);
- // Also, avoid bringing in too many objects into small object free
- // lists. There are lots of such lists, and if we allow each one to
- // fetch too many at a time, we end up having to scavenge too often
- // (especially when there are lots of threads and each thread gets a
- // small allowance for its thread cache).
- //
- // TODO: Make thread cache free list sizes dynamic so that we do not
- // have to equally divide a fixed resource amongst lots of threads.
- if (num > 32) num = 32;
- return num;
- }
- // Initialize the mapping arrays
- static void InitSizeClasses() {
- // Do some sanity checking on add_amount[]/shift_amount[]/class_array[]
- if (ClassIndex(0) < 0) {
- MESSAGE("Invalid class index %d for size 0\n", ClassIndex(0));
- CRASH();
- }
- if (static_cast<size_t>(ClassIndex(kMaxSize)) >= sizeof(class_array)) {
- MESSAGE("Invalid class index %d for kMaxSize\n", ClassIndex(kMaxSize));
- CRASH();
- }
- // Compute the size classes we want to use
- size_t sc = 1; // Next size class to assign
- unsigned char alignshift = kAlignShift;
- int last_lg = -1;
- for (size_t size = kAlignment; size <= kMaxSize; size += (1 << alignshift)) {
- int lg = LgFloor(size);
- if (lg > last_lg) {
- // Increase alignment every so often.
- //
- // Since we double the alignment every time size doubles and
- // size >= 128, this means that space wasted due to alignment is
- // at most 16/128 i.e., 12.5%. Plus we cap the alignment at 256
- // bytes, so the space wasted as a percentage starts falling for
- // sizes > 2K.
- if ((lg >= 7) && (alignshift < 8)) {
- alignshift++;
- }
- last_lg = lg;
- }
- // Allocate enough pages so leftover is less than 1/8 of total.
- // This bounds wasted space to at most 12.5%.
- size_t psize = kPageSize;
- while ((psize % size) > (psize >> 3)) {
- psize += kPageSize;
- }
- const size_t my_pages = psize >> kPageShift;
- if (sc > 1 && my_pages == class_to_pages[sc-1]) {
- // See if we can merge this into the previous class without
- // increasing the fragmentation of the previous class.
- const size_t my_objects = (my_pages << kPageShift) / size;
- const size_t prev_objects = (class_to_pages[sc-1] << kPageShift)
- / class_to_size[sc-1];
- if (my_objects == prev_objects) {
- // Adjust last class to include this size
- class_to_size[sc-1] = size;
- continue;
- }
- }
- // Add new class
- class_to_pages[sc] = my_pages;
- class_to_size[sc] = size;
- sc++;
- }
- if (sc != kNumClasses) {
- MESSAGE("wrong number of size classes: found %" PRIuS " instead of %d\n",
- sc, int(kNumClasses));
- CRASH();
- }
- // Initialize the mapping arrays
- int next_size = 0;
- for (unsigned char c = 1; c < kNumClasses; c++) {
- const size_t max_size_in_class = class_to_size[c];
- for (size_t s = next_size; s <= max_size_in_class; s += kAlignment) {
- class_array[ClassIndex(s)] = c;
- }
- next_size = static_cast<int>(max_size_in_class + kAlignment);
- }
- // Double-check sizes just to be safe
- for (size_t size = 0; size <= kMaxSize; size++) {
- const size_t sc = SizeClass(size);
- if (sc == 0) {
- MESSAGE("Bad size class %" PRIuS " for %" PRIuS "\n", sc, size);
- CRASH();
- }
- if (sc > 1 && size <= class_to_size[sc-1]) {
- MESSAGE("Allocating unnecessarily large class %" PRIuS " for %" PRIuS
- "\n", sc, size);
- CRASH();
- }
- if (sc >= kNumClasses) {
- MESSAGE("Bad size class %" PRIuS " for %" PRIuS "\n", sc, size);
- CRASH();
- }
- const size_t s = class_to_size[sc];
- if (size > s) {
- MESSAGE("Bad size %" PRIuS " for %" PRIuS " (sc = %" PRIuS ")\n", s, size, sc);
- CRASH();
- }
- if (s == 0) {
- MESSAGE("Bad size %" PRIuS " for %" PRIuS " (sc = %" PRIuS ")\n", s, size, sc);
- CRASH();
- }
- }
- // Initialize the num_objects_to_move array.
- for (size_t cl = 1; cl < kNumClasses; ++cl) {
- num_objects_to_move[cl] = NumMoveSize(ByteSizeForClass(cl));
- }
- #ifndef WTF_CHANGES
- if (false) {
- // Dump class sizes and maximum external wastage per size class
- for (size_t cl = 1; cl < kNumClasses; ++cl) {
- const int alloc_size = class_to_pages[cl] << kPageShift;
- const int alloc_objs = alloc_size / class_to_size[cl];
- const int min_used = (class_to_size[cl-1] + 1) * alloc_objs;
- const int max_waste = alloc_size - min_used;
- MESSAGE("SC %3d [ %8d .. %8d ] from %8d ; %2.0f%% maxwaste\n",
- int(cl),
- int(class_to_size[cl-1] + 1),
- int(class_to_size[cl]),
- int(class_to_pages[cl] << kPageShift),
- max_waste * 100.0 / alloc_size
- );
- }
- }
- #endif
- }
- // -------------------------------------------------------------------------
- // Simple allocator for objects of a specified type. External locking
- // is required before accessing one of these objects.
- // -------------------------------------------------------------------------
- // Metadata allocator -- keeps stats about how many bytes allocated
- static uint64_t metadata_system_bytes = 0;
- static void* MetaDataAlloc(size_t bytes) {
- void* result = TCMalloc_SystemAlloc(bytes, 0);
- if (result != NULL) {
- metadata_system_bytes += bytes;
- }
- return result;
- }
- template <class T>
- class PageHeapAllocator {
- private:
- // How much to allocate from system at a time
- static const size_t kAllocIncrement = 32 << 10;
- // Aligned size of T
- static const size_t kAlignedSize
- = (((sizeof(T) + kAlignment - 1) / kAlignment) * kAlignment);
- // Free area from which to carve new objects
- char* free_area_;
- size_t free_avail_;
- // Linked list of all regions allocated by this allocator
- void* allocated_regions_;
- // Free list of already carved objects
- void* free_list_;
- // Number of allocated but unfreed objects
- int inuse_;
- public:
- void Init() {
- ASSERT(kAlignedSize <= kAllocIncrement);
- inuse_ = 0;
- allocated_regions_ = 0;
- free_area_ = NULL;
- free_avail_ = 0;
- free_list_ = NULL;
- }
- T* New() {
- // Consult free list
- void* result;
- if (free_list_ != NULL) {
- result = free_list_;
- free_list_ = *(reinterpret_cast<void**>(result));
- } else {
- if (free_avail_ < kAlignedSize) {
- // Need more room
- char* new_allocation = reinterpret_cast<char*>(MetaDataAlloc(kAllocIncrement));
- if (!new_allocation)
- CRASH();
- *reinterpret_cast_ptr<void**>(new_allocation) = allocated_regions_;
- allocated_regions_ = new_allocation;
- free_area_ = new_allocation + kAlignedSize;
- free_avail_ = kAllocIncrement - kAlignedSize;
- }
- result = free_area_;
- free_area_ += kAlignedSize;
- free_avail_ -= kAlignedSize;
- }
- inuse_++;
- return reinterpret_cast<T*>(result);
- }
- void Delete(T* p) {
- *(reinterpret_cast<void**>(p)) = free_list_;
- free_list_ = p;
- inuse_--;
- }
- int inuse() const { return inuse_; }
- #if defined(WTF_CHANGES) && OS(DARWIN)
- template <class Recorder>
- void recordAdministrativeRegions(Recorder& recorder, const RemoteMemoryReader& reader)
- {
- for (void* adminAllocation = allocated_regions_; adminAllocation; adminAllocation = reader.nextEntryInLinkedList(reinterpret_cast<void**>(adminAllocation)))
- recorder.recordRegion(reinterpret_cast<vm_address_t>(adminAllocation), kAllocIncrement);
- }
- #endif
- };
- // -------------------------------------------------------------------------
- // Span - a contiguous run of pages
- // -------------------------------------------------------------------------
- // Type that can hold a page number
- typedef uintptr_t PageID;
- // Type that can hold the length of a run of pages
- typedef uintptr_t Length;
- static const Length kMaxValidPages = (~static_cast<Length>(0)) >> kPageShift;
- // Convert byte size into pages. This won't overflow, but may return
- // an unreasonably large value if bytes is huge enough.
- static inline Length pages(size_t bytes) {
- return (bytes >> kPageShift) +
- ((bytes & (kPageSize - 1)) > 0 ? 1 : 0);
- }
- // Convert a user size into the number of bytes that will actually be
- // allocated
- static size_t AllocationSize(size_t bytes) {
- if (bytes > kMaxSize) {
- // Large object: we allocate an integral number of pages
- ASSERT(bytes <= (kMaxValidPages << kPageShift));
- return pages(bytes) << kPageShift;
- } else {
- // Small object: find the size class to which it belongs
- return ByteSizeForClass(SizeClass(bytes));
- }
- }
- // Information kept for a span (a contiguous run of pages).
- struct Span {
- PageID start; // Starting page number
- Length length; // Number of pages in span
- Span* next; // Used when in link list
- Span* prev; // Used when in link list
- void* objects; // Linked list of free objects
- unsigned int free : 1; // Is the span free
- #ifndef NO_TCMALLOC_SAMPLES
- unsigned int sample : 1; // Sampled object?
- #endif
- unsigned int sizeclass : 8; // Size-class for small objects (or 0)
- unsigned int refcount : 11; // Number of non-free objects
- bool decommitted : 1;
- #undef SPAN_HISTORY
- #ifdef SPAN_HISTORY
- // For debugging, we can keep a log events per span
- int nexthistory;
- char history[64];
- int value[64];
- #endif
- };
- #define ASSERT_SPAN_COMMITTED(span) ASSERT(!span->decommitted)
- #ifdef SPAN_HISTORY
- void Event(Span* span, char op, int v = 0) {
- span->history[span->nexthistory] = op;
- span->value[span->nexthistory] = v;
- span->nexthistory++;
- if (span->nexthistory == sizeof(span->history)) span->nexthistory = 0;
- }
- #else
- #define Event(s,o,v) ((void) 0)
- #endif
- // Allocator/deallocator for spans
- static PageHeapAllocator<Span> span_allocator;
- static Span* NewSpan(PageID p, Length len) {
- Span* result = span_allocator.New();
- memset(result, 0, sizeof(*result));
- result->start = p;
- result->length = len;
- #ifdef SPAN_HISTORY
- result->nexthistory = 0;
- #endif
- return result;
- }
- static inline void DeleteSpan(Span* span) {
- #ifndef NDEBUG
- // In debug mode, trash the contents of deleted Spans
- memset(span, 0x3f, sizeof(*span));
- #endif
- span_allocator.Delete(span);
- }
- // -------------------------------------------------------------------------
- // Doubly linked list of spans.
- // -------------------------------------------------------------------------
- static inline void DLL_Init(Span* list) {
- list->next = list;
- list->prev = list;
- }
- static inline void DLL_Remove(Span* span) {
- span->prev->next = span->next;
- span->next->prev = span->prev;
- span->prev = NULL;
- span->next = NULL;
- }
- static ALWAYS_INLINE bool DLL_IsEmpty(const Span* list) {
- return list->next == list;
- }
- static int DLL_Length(const Span* list) {
- int result = 0;
- for (Span* s = list->next; s != list; s = s->next) {
- result++;
- }
- return result;
- }
- #if 0 /* Not needed at the moment -- causes compiler warnings if not used */
- static void DLL_Print(const char* label, const Span* list) {
- MESSAGE("%-10s %p:", label, list);
- for (const Span* s = list->next; s != list; s = s->next) {
- MESSAGE(" <%p,%u,%u>", s, s->start, s->length);
- }
- MESSAGE("\n");
- }
- #endif
- static inline void DLL_Prepend(Span* list, Span* span) {
- ASSERT(span->next == NULL);
- ASSERT(span->prev == NULL);
- span->next = list->next;
- span->prev = list;
- list->next->prev = span;
- list->next = span;
- }
- // -------------------------------------------------------------------------
- // Stack traces kept for sampled allocations
- // The following state is protected by pageheap_lock_.
- // -------------------------------------------------------------------------
- // size/depth are made the same size as a pointer so that some generic
- // code below can conveniently cast them back and forth to void*.
- static const int kMaxStackDepth = 31;
- struct StackTrace {
- uintptr_t size; // Size of object
- uintptr_t depth; // Number of PC values stored in array below
- void* stack[kMaxStackDepth];
- };
- static PageHeapAllocator<StackTrace> stacktrace_allocator;
- static Span sampled_objects;
- // -------------------------------------------------------------------------
- // Map from page-id to per-page data
- // -------------------------------------------------------------------------
- // We use PageMap2<> for 32-bit and PageMap3<> for 64-bit machines.
- // We also use a simple one-level cache for hot PageID-to-sizeclass mappings,
- // because sometimes the sizeclass is all the information we need.
- // Selector class -- general selector uses 3-level map
- template <int BITS> class MapSelector {
- public:
- typedef TCMalloc_PageMap3<BITS-kPageShift> Type;
- typedef PackedCache<BITS, uint64_t> CacheType;
- };
- #if defined(WTF_CHANGES)
- #if CPU(X86_64)
- // On all known X86-64 platforms, the upper 16 bits are always unused and therefore
- // can be excluded from the PageMap key.
- // See http://en.wikipedia.org/wiki/X86-64#Virtual_address_space_details
- static const size_t kBitsUnusedOn64Bit = 16;
- #else
- static const size_t kBitsUnusedOn64Bit = 0;
- #endif
- // A three-level map for 64-bit machines
- template <> class MapSelector<64> {
- public:
- typedef TCMalloc_PageMap3<64 - kPageShift - kBitsUnusedOn64Bit> Type;
- typedef PackedCache<64, uint64_t> CacheType;
- };
- #endif
- // A two-level map for 32-bit machines
- template <> class MapSelector<32> {
- public:
- typedef TCMalloc_PageMap2<32 - kPageShift> Type;
- typedef PackedCache<32 - kPageShift, uint16_t> CacheType;
- };
- // -------------------------------------------------------------------------
- // Page-level allocator
- // * Eager coalescing
- //
- // Heap for page-level allocation. We allow allocating and freeing a
- // contiguous runs of pages (called a "span").
- // -------------------------------------------------------------------------
- #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
- // The page heap maintains a free list for spans that are no longer in use by
- // the central cache or any thread caches. We use a background thread to
- // periodically scan the free list and release a percentage of it back to the OS.
- // If free_committed_pages_ exceeds kMinimumFreeCommittedPageCount, the
- // background thread:
- // - wakes up
- // - pauses for kScavengeDelayInSeconds
- // - returns to the OS a percentage of the memory that remained unused during
- // that pause (kScavengePercentage * min_free_committed_pages_since_last_scavenge_)
- // The goal of this strategy is to reduce memory pressure in a timely fashion
- // while avoiding thrashing the OS allocator.
- // Time delay before the page heap scavenger will consider returning pages to
- // the OS.
- static const int kScavengeDelayInSeconds = 2;
- // Approximate percentage of free committed pages to return to the OS in one
- // scavenge.
- static const float kScavengePercentage = .5f;
- // number of span lists to keep spans in when memory is returned.
- static const int kMinSpanListsWithSpans = 32;
- // Number of free committed pages that we want to keep around. The minimum number of pages used when there
- // is 1 span in each of the first kMinSpanListsWithSpans spanlists. Currently 528 pages.
- static const size_t kMinimumFreeCommittedPageCount = kMinSpanListsWithSpans * ((1.0f+kMinSpanListsWithSpans) / 2.0f);
- #endif
- static SpinLock pageheap_lock = SPINLOCK_INITIALIZER;
- class TCMalloc_PageHeap {
- public:
- void init();
- // Allocate a run of "n" pages. Returns zero if out of memory.
- Span* New(Length n);
- // Delete the span "[p, p+n-1]".
- // REQUIRES: span was returned by earlier call to New() and
- // has not yet been deleted.
- void Delete(Span* span);
- // Mark an allocated span as being used for small objects of the
- // specified size-class.
- // REQUIRES: span was returned by an earlier call to New()
- // and has not yet been deleted.
- void RegisterSizeClass(Span* span, size_t sc);
- // Split an allocated span into two spans: one of length "n" pages
- // followed by another span of length "span->length - n" pages.
- // Modifies "*span" to point to the first span of length "n" pages.
- // Returns a pointer to the second span.
- //
- // REQUIRES: "0 < n < span->length"
- // REQUIRES: !span->free
- // REQUIRES: span->sizeclass == 0
- Span* Split(Span* span, Length n);
- // Return the descriptor for the specified page.
- inline Span* GetDescriptor(PageID p) const {
- return reinterpret_cast<Span*>(pagemap_.get(p));
- }
- #ifdef WTF_CHANGES
- inline Span* GetDescriptorEnsureSafe(PageID p)
- {
- pagemap_.Ensure(p, 1);
- return GetDescriptor(p);
- }
-
- size_t ReturnedBytes() const;
- #endif
- // Dump state to stderr
- #ifndef WTF_CHANGES
- void Dump(TCMalloc_Printer* out);
- #endif
- // Return number of bytes allocated from system
- inline uint64_t SystemBytes() const { return system_bytes_; }
- // Return number of free bytes in heap
- uint64_t FreeBytes() const {
- return (static_cast<uint64_t>(free_pages_) << kPageShift);
- }
- bool Check();
- size_t CheckList(Span* list, Length min_pages, Length max_pages, bool decommitted);
- // Release all pages on the free list for reuse by the OS:
- void ReleaseFreePages();
- void ReleaseFreeList(Span*, Span*);
- // Return 0 if we have no information, or else the correct sizeclass for p.
- // Reads and writes to pagemap_cache_ do not require locking.
- // The entries are 64 bits on 64-bit hardware and 16 bits on
- // 32-bit hardware, and we don't mind raciness as long as each read of
- // an entry yields a valid entry, not a partially updated entry.
- size_t GetSizeClassIfCached(PageID p) const {
- return pagemap_cache_.GetOrDefault(p, 0);
- }
- void CacheSizeClass(PageID p, size_t cl) const { pagemap_cache_.Put(p, cl); }
- private:
- // Pick the appropriate map and cache types based on pointer size
- typedef MapSelector<8*sizeof(uintptr_t)>::Type PageMap;
- typedef MapSelector<8*sizeof(uintptr_t)>::CacheType PageMapCache;
- PageMap pagemap_;
- mutable PageMapCache pagemap_cache_;
- // We segregate spans of a given size into two circular linked
- // lists: one for normal spans, and one for spans whose memory
- // has been returned to the system.
- struct SpanList {
- Span normal;
- Span returned;
- };
- // List of free spans of length >= kMaxPages
- SpanList large_;
- // Array mapping from span length to a doubly linked list of free spans
- SpanList free_[kMaxPages];
- // Number of pages kept in free lists
- uintptr_t free_pages_;
- // Bytes allocated from system
- uint64_t system_bytes_;
- #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
- // Number of pages kept in free lists that are still committed.
- Length free_committed_pages_;
- // Minimum number of free committed pages since last scavenge. (Can be 0 if
- // we've committed new pages since the last scavenge.)
- Length min_free_committed_pages_since_last_scavenge_;
- #endif
- bool GrowHeap(Length n);
- // REQUIRES span->length >= n
- // Remove span from its free list, and move any leftover part of
- // span into appropriate free lists. Also update "span" to have
- // length exactly "n" and mark it as non-free so it can be returned
- // to the client.
- //
- // "released" is true iff "span" was found on a "returned" list.
- void Carve(Span* span, Length n, bool released);
- void RecordSpan(Span* span) {
- pagemap_.set(span->start, span);
- if (span->length > 1) {
- pagemap_.set(span->start + span->length - 1, span);
- }
- }
-
- // Allocate a large span of length == n. If successful, returns a
- // span of exactly the specified length. Else, returns NULL.
- Span* AllocLarge(Length n);
- #if !USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
- // Incrementally release some memory to the system.
- // IncrementalScavenge(n) is called whenever n pages are freed.
- void IncrementalScavenge(Length n);
- #endif
- // Number of pages to deallocate before doing more scavenging
- int64_t scavenge_counter_;
- // Index of last free list we scavenged
- size_t scavenge_index_;
-
- #if defined(WTF_CHANGES) && OS(DARWIN)
- friend class FastMallocZone;
- #endif
- #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
- void initializeScavenger();
- ALWAYS_INLINE void signalScavenger();
- void scavenge();
- ALWAYS_INLINE bool shouldScavenge() const;
- #if HAVE(DISPATCH_H) || OS(WINDOWS)
- void periodicScavenge();
- ALWAYS_INLINE bool isScavengerSuspended();
- ALWAYS_INLINE void scheduleScavenger();
- ALWAYS_INLINE void rescheduleScavenger();
- ALWAYS_INLINE void suspendScavenger();
- #endif
- #if HAVE(DISPATCH_H)
- dispatch_queue_t m_scavengeQueue;
- dispatch_source_t m_scavengeTimer;
- bool m_scavengingSuspended;
- #elif OS(WINDOWS)
- static void CALLBACK scavengerTimerFired(void*, BOOLEAN);
- HANDLE m_scavengeQueueTimer;
- #else
- static NO_RETURN_WITH_VALUE void* runScavengerThread(void*);
- NO_RETURN void scavengerThread();
- // Keeps track of whether the background thread is actively scavenging memory every kScavengeDelayInSeconds, or
- // it's blocked waiting for more pages to be deleted.
- bool m_scavengeThreadActive;
- pthread_mutex_t m_scavengeMutex;
- pthread_cond_t m_scavengeCondition;
- #endif
- #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
- };
- void TCMalloc_PageHeap::init()
- {
- pagemap_.init(MetaDataAlloc);
- pagemap_cache_ = PageMapCache(0);
- free_pages_ = 0;
- system_bytes_ = 0;
- #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
- free_committed_pages_ = 0;
- min_free_committed_pages_since_last_scavenge_ = 0;
- #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
- scavenge_counter_ = 0;
- // Start scavenging at kMaxPages list
- scavenge_index_ = kMaxPages-1;
- COMPILE_ASSERT(kNumClasses <= (1 << PageMapCache::kValuebits), valuebits);
- DLL_Init(&large_.normal);
- DLL_Init(&large_.returned);
- for (size_t i = 0; i < kMaxPages; i++) {
- DLL_Init(&free_[i].normal);
- DLL_Init(&free_[i].returned);
- }
- #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
- initializeScavenger();
- #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
- }
- #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
- #if HAVE(DISPATCH_H)
- void TCMalloc_PageHeap::initializeScavenger()
- {
- m_scavengeQueue = dispatch_queue_create("com.apple.JavaScriptCore.FastMallocSavenger", NULL);
- m_scavengeTimer = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 0, 0, m_scavengeQueue);
- dispatch_time_t startTime = dispatch_time(DISPATCH_TIME_NOW, kScavengeDelayInSeconds * NSEC_PER_SEC);
- dispatch_source_set_timer(m_scavengeTimer, startTime, kScavengeDelayInSeconds * NSEC_PER_SEC, 1000 * NSEC_PER_USEC);
- dispatch_source_set_event_handler(m_scavengeTimer, ^{ periodicScavenge(); });
- m_scavengingSuspended = true;
- }
- ALWAYS_INLINE bool TCMalloc_PageHeap::isScavengerSuspended()
- {
- ASSERT(pageheap_lock.IsHeld());
- return m_scavengingSuspended;
- }
- ALWAYS_INLINE void TCMalloc_PageHeap::scheduleScavenger()
- {
- ASSERT(pageheap_lock.IsHeld());
- m_scavengingSuspended = false;
- dispatch_resume(m_scav…
Large files files are truncated, but you can click here to view the full file