Permalink
Browse files

Clean up a few files in TCMalloc. Increase the maximum local block ca…

…che size.
  • Loading branch information...
1 parent 5865e10 commit 89b0544f8faea433013bd61c8117524a2e7ce71e @dmbryson dmbryson committed Apr 11, 2012
View
37 libs/tcmalloc-1.4/TCMalloc-1.4.xcodeproj/project.pbxproj
@@ -138,43 +138,43 @@
70FB4E9D1386EBC100D8F6F0 /* atomicops-internals-x86.h */,
70FB4E9E1386EBC100D8F6F0 /* atomicops.h */,
70FB4E9F1386EBC100D8F6F0 /* basictypes.h */,
- 70FB4EA01386EBC100D8F6F0 /* central_freelist.cc */,
70FB4EA11386EBC100D8F6F0 /* central_freelist.h */,
+ 70FB4EA01386EBC100D8F6F0 /* central_freelist.cc */,
70FB4EA21386EBC100D8F6F0 /* commandlineflags.h */,
- 70FB4EA31386EBC100D8F6F0 /* common.cc */,
70FB4EA41386EBC100D8F6F0 /* common.h */,
+ 70FB4EA31386EBC100D8F6F0 /* common.cc */,
70FB4EA51386EBC100D8F6F0 /* cycleclock.h */,
- 70FB4EA61386EBC100D8F6F0 /* internal_logging.cc */,
70FB4EA71386EBC100D8F6F0 /* internal_logging.h */,
+ 70FB4EA61386EBC100D8F6F0 /* internal_logging.cc */,
70FB4EA81386EBC100D8F6F0 /* linked_list.h */,
70FB4EA91386EBC100D8F6F0 /* linux_syscall_support.h */,
- 70FB4EAA1386EBC100D8F6F0 /* malloc_extension.cc */,
70FB4EAB1386EBC100D8F6F0 /* malloc_extension.h */,
+ 70FB4EAA1386EBC100D8F6F0 /* malloc_extension.cc */,
70FB4EAC1386EBC100D8F6F0 /* packed-cache-inl.h */,
- 70FB4EAD1386EBC100D8F6F0 /* page_heap.cc */,
70FB4EAE1386EBC100D8F6F0 /* page_heap.h */,
+ 70FB4EAD1386EBC100D8F6F0 /* page_heap.cc */,
70FB4EAF1386EBC100D8F6F0 /* page_heap_allocator.h */,
70FB4EB01386EBC100D8F6F0 /* pagemap.h */,
- 70FB4EB11386EBC100D8F6F0 /* span.cc */,
70FB4EB21386EBC100D8F6F0 /* span.h */,
- 70FB4EB31386EBC100D8F6F0 /* spinlock.cc */,
+ 70FB4EB11386EBC100D8F6F0 /* span.cc */,
70FB4EB41386EBC100D8F6F0 /* spinlock.h */,
+ 70FB4EB31386EBC100D8F6F0 /* spinlock.cc */,
70FB4EB51386EBC100D8F6F0 /* spinlock_linux-inl.h */,
70FB4EB61386EBC100D8F6F0 /* spinlock_posix-inl.h */,
70FB4EB71386EBC100D8F6F0 /* spinlock_win32-inl.h */,
- 70FB4EB81386EBC100D8F6F0 /* static_vars.cc */,
70FB4EB91386EBC100D8F6F0 /* static_vars.h */,
- 70FB4EBA1386EBC100D8F6F0 /* sysinfo.cc */,
+ 70FB4EB81386EBC100D8F6F0 /* static_vars.cc */,
70FB4EBB1386EBC100D8F6F0 /* sysinfo.h */,
- 70FB4EBC1386EBC100D8F6F0 /* system-alloc.cc */,
+ 70FB4EBA1386EBC100D8F6F0 /* sysinfo.cc */,
70FB4EBD1386EBC100D8F6F0 /* system-alloc.h */,
+ 70FB4EBC1386EBC100D8F6F0 /* system-alloc.cc */,
70FB4EBE1386EBC100D8F6F0 /* tcmalloc-platform.h */,
- 70FB4EBF1386EBC100D8F6F0 /* tcmalloc.cc */,
70FB4EC01386EBC100D8F6F0 /* tcmalloc.h */,
+ 70FB4EBF1386EBC100D8F6F0 /* tcmalloc.cc */,
70FB4EC11386EBC100D8F6F0 /* tcmalloc_guard.h */,
70FB4EC21386EBC100D8F6F0 /* thread_annotations.h */,
- 70FB4EC31386EBC100D8F6F0 /* thread_cache.cc */,
70FB4EC41386EBC100D8F6F0 /* thread_cache.h */,
+ 70FB4EC31386EBC100D8F6F0 /* thread_cache.cc */,
);
path = src;
sourceTree = "<group>";
@@ -291,7 +291,7 @@
70FB4E931386EB5100D8F6F0 /* Debug */ = {
isa = XCBuildConfiguration;
buildSettings = {
- ARCHS = "$(ARCHS_STANDARD_32_64_BIT)";
+ ARCHS = "$(NATIVE_ARCH_ACTUAL)";
COPY_PHASE_STRIP = NO;
DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
GCC_C_LANGUAGE_STANDARD = gnu99;
@@ -302,9 +302,9 @@
GCC_VERSION = com.apple.compilers.llvm.clang.1_0;
GCC_WARN_ABOUT_RETURN_TYPE = YES;
GCC_WARN_UNUSED_VARIABLE = YES;
- MACOSX_DEPLOYMENT_TARGET = 10.6;
+ MACOSX_DEPLOYMENT_TARGET = 10.7;
ONLY_ACTIVE_ARCH = YES;
- SDKROOT = "";
+ SDKROOT = macosx10.7;
};
name = Debug;
};
@@ -314,19 +314,16 @@
ARCHS = "$(ARCHS_STANDARD_32_64_BIT)";
DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
GCC_C_LANGUAGE_STANDARD = gnu99;
- GCC_DYNAMIC_NO_PIC = YES;
GCC_ENABLE_CPP_EXCEPTIONS = NO;
- GCC_ENABLE_SSE3_EXTENSIONS = YES;
- GCC_ENABLE_SUPPLEMENTAL_SSE3_INSTRUCTIONS = YES;
GCC_OPTIMIZATION_LEVEL = 3;
GCC_PREPROCESSOR_DEFINITIONS = NDEBUG;
GCC_STRICT_ALIASING = YES;
GCC_UNROLL_LOOPS = YES;
GCC_VERSION = com.apple.compilers.llvm.clang.1_0;
GCC_WARN_ABOUT_RETURN_TYPE = YES;
GCC_WARN_UNUSED_VARIABLE = YES;
- MACOSX_DEPLOYMENT_TARGET = 10.6;
- SDKROOT = "";
+ MACOSX_DEPLOYMENT_TARGET = 10.7;
+ SDKROOT = macosx10.7;
};
name = Release;
};
View
250 libs/tcmalloc-1.4/src/central_freelist.h
@@ -62,134 +62,132 @@
namespace tcmalloc {
-// Data kept per size-class in central cache.
-class CentralFreeList {
- public:
- void Init(size_t cl);
-
- // These methods all do internal locking.
-
- // Insert the specified range into the central freelist. N is the number of
- // elements in the range. RemoveRange() is the opposite operation.
- void InsertRange(void *start, void *end, int N);
-
- // Returns the actual number of fetched elements and sets *start and *end.
- int RemoveRange(void **start, void **end, int N);
-
- // Returns the number of free objects in cache.
- int length() {
- SpinLockHolder h(&lock_);
- return counter_;
- }
-
- // Returns the number of free objects in the transfer cache.
- int tc_length();
-
- private:
- // TransferCache is used to cache transfers of
- // sizemap.num_objects_to_move(size_class) back and forth between
- // thread caches and the central cache for a given size class.
- struct TCEntry {
- void *head; // Head of chain of objects.
- void *tail; // Tail of chain of objects.
+ // Data kept per size-class in central cache.
+ class CentralFreeList
+ {
+ public:
+ void Init(size_t cl);
+
+ // These methods all do internal locking.
+
+ // Insert the specified range into the central freelist. N is the number of
+ // elements in the range. RemoveRange() is the opposite operation.
+ void InsertRange(void *start, void *end, int N);
+
+ // Returns the actual number of fetched elements and sets *start and *end.
+ int RemoveRange(void **start, void **end, int N);
+
+ // Returns the number of free objects in cache.
+ int length() {
+ SpinLockHolder h(&lock_);
+ return counter_;
+ }
+
+ // Returns the number of free objects in the transfer cache.
+ int tc_length();
+
+ private:
+ // TransferCache is used to cache transfers of
+ // sizemap.num_objects_to_move(size_class) back and forth between
+ // thread caches and the central cache for a given size class.
+ struct TCEntry {
+ void *head; // Head of chain of objects.
+ void *tail; // Tail of chain of objects.
+ };
+
+ // A central cache freelist can have anywhere from 0 to kNumTransferEntries
+ // slots to put link list chains into. To keep memory usage bounded the total
+ // number of TCEntries across size classes is fixed. Currently each size
+ // class is initially given one TCEntry which also means that the maximum any
+ // one class can have is kNumClasses.
+ static const int kNumTransferEntries = kNumClasses;
+
+ // REQUIRES: lock_ is held
+ // Remove object from cache and return.
+ // Return NULL if no free entries in cache.
+ void* FetchFromSpans() EXCLUSIVE_LOCKS_REQUIRED(lock_);
+
+ // REQUIRES: lock_ is held
+ // Remove object from cache and return. Fetches
+ // from pageheap if cache is empty. Only returns
+ // NULL on allocation failure.
+ void* FetchFromSpansSafe() EXCLUSIVE_LOCKS_REQUIRED(lock_);
+
+ // REQUIRES: lock_ is held
+ // Release a linked list of objects to spans.
+ // May temporarily release lock_.
+ void ReleaseListToSpans(void *start) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+
+ // REQUIRES: lock_ is held
+ // Release an object to spans.
+ // May temporarily release lock_.
+ void ReleaseToSpans(void* object) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+
+ // REQUIRES: lock_ is held
+ // Populate cache by fetching from the page heap.
+ // May temporarily release lock_.
+ void Populate() EXCLUSIVE_LOCKS_REQUIRED(lock_);
+
+ // REQUIRES: lock is held.
+ // Tries to make room for a TCEntry. If the cache is full it will try to
+ // expand it at the cost of some other cache size. Return false if there is
+ // no space.
+ bool MakeCacheSpace() EXCLUSIVE_LOCKS_REQUIRED(lock_);
+
+ // REQUIRES: lock_ for locked_size_class is held.
+ // Picks a "random" size class to steal TCEntry slot from. In reality it
+ // just iterates over the sizeclasses but does so without taking a lock.
+ // Returns true on success.
+ // May temporarily lock a "random" size class.
+ static bool EvictRandomSizeClass(int locked_size_class, bool force);
+
+ // REQUIRES: lock_ is *not* held.
+ // Tries to shrink the Cache. If force is true it will relase objects to
+ // spans if it allows it to shrink the cache. Return false if it failed to
+ // shrink the cache. Decrements cache_size_ on succeess.
+ // May temporarily take lock_. If it takes lock_, the locked_size_class
+ // lock is released to keep the thread from holding two size class locks
+ // concurrently which could lead to a deadlock.
+ bool ShrinkCache(int locked_size_class, bool force) LOCKS_EXCLUDED(lock_);
+
+ // This lock protects all the data members. cached_entries and cache_size_
+ // may be looked at without holding the lock.
+ SpinLock lock_;
+
+ // We keep linked lists of empty and non-empty spans.
+ size_t size_class_; // My size class
+ Span empty_; // Dummy header for list of empty spans
+ Span nonempty_; // Dummy header for list of non-empty spans
+ size_t counter_; // Number of free objects in cache entry
+
+ // Here we reserve space for TCEntry cache slots. Since one size class can
+ // end up getting all the TCEntries quota in the system we just preallocate
+ // sufficient number of entries here.
+ TCEntry tc_slots_[kNumTransferEntries];
+
+ // Number of currently used cached entries in tc_slots_. This variable is
+ // updated under a lock but can be read without one.
+ int32_t used_slots_;
+ // The current number of slots for this size class. This is an
+ // adaptive value that is increased if there is lots of traffic
+ // on a given size class.
+ int32_t cache_size_;
};
- // A central cache freelist can have anywhere from 0 to kNumTransferEntries
- // slots to put link list chains into. To keep memory usage bounded the total
- // number of TCEntries across size classes is fixed. Currently each size
- // class is initially given one TCEntry which also means that the maximum any
- // one class can have is kNumClasses.
- static const int kNumTransferEntries = kNumClasses;
-
- // REQUIRES: lock_ is held
- // Remove object from cache and return.
- // Return NULL if no free entries in cache.
- void* FetchFromSpans() EXCLUSIVE_LOCKS_REQUIRED(lock_);
-
- // REQUIRES: lock_ is held
- // Remove object from cache and return. Fetches
- // from pageheap if cache is empty. Only returns
- // NULL on allocation failure.
- void* FetchFromSpansSafe() EXCLUSIVE_LOCKS_REQUIRED(lock_);
-
- // REQUIRES: lock_ is held
- // Release a linked list of objects to spans.
- // May temporarily release lock_.
- void ReleaseListToSpans(void *start) EXCLUSIVE_LOCKS_REQUIRED(lock_);
-
- // REQUIRES: lock_ is held
- // Release an object to spans.
- // May temporarily release lock_.
- void ReleaseToSpans(void* object) EXCLUSIVE_LOCKS_REQUIRED(lock_);
-
- // REQUIRES: lock_ is held
- // Populate cache by fetching from the page heap.
- // May temporarily release lock_.
- void Populate() EXCLUSIVE_LOCKS_REQUIRED(lock_);
-
- // REQUIRES: lock is held.
- // Tries to make room for a TCEntry. If the cache is full it will try to
- // expand it at the cost of some other cache size. Return false if there is
- // no space.
- bool MakeCacheSpace() EXCLUSIVE_LOCKS_REQUIRED(lock_);
-
- // REQUIRES: lock_ for locked_size_class is held.
- // Picks a "random" size class to steal TCEntry slot from. In reality it
- // just iterates over the sizeclasses but does so without taking a lock.
- // Returns true on success.
- // May temporarily lock a "random" size class.
- static bool EvictRandomSizeClass(int locked_size_class, bool force);
-
- // REQUIRES: lock_ is *not* held.
- // Tries to shrink the Cache. If force is true it will relase objects to
- // spans if it allows it to shrink the cache. Return false if it failed to
- // shrink the cache. Decrements cache_size_ on succeess.
- // May temporarily take lock_. If it takes lock_, the locked_size_class
- // lock is released to keep the thread from holding two size class locks
- // concurrently which could lead to a deadlock.
- bool ShrinkCache(int locked_size_class, bool force) LOCKS_EXCLUDED(lock_);
-
- // This lock protects all the data members. cached_entries and cache_size_
- // may be looked at without holding the lock.
- SpinLock lock_;
-
- // We keep linked lists of empty and non-empty spans.
- size_t size_class_; // My size class
- Span empty_; // Dummy header for list of empty spans
- Span nonempty_; // Dummy header for list of non-empty spans
- size_t counter_; // Number of free objects in cache entry
-
- // Here we reserve space for TCEntry cache slots. Since one size class can
- // end up getting all the TCEntries quota in the system we just preallocate
- // sufficient number of entries here.
- TCEntry tc_slots_[kNumTransferEntries];
-
- // Number of currently used cached entries in tc_slots_. This variable is
- // updated under a lock but can be read without one.
- int32_t used_slots_;
- // The current number of slots for this size class. This is an
- // adaptive value that is increased if there is lots of traffic
- // on a given size class.
- int32_t cache_size_;
-};
-
-// Pads each CentralCache object to multiple of 64 bytes. Since some
-// compilers (such as MSVC) don't like it when the padding is 0, I use
-// template specialization to remove the padding entirely when
-// sizeof(CentralFreeList) is a multiple of 64.
-template<int kFreeListSizeMod64>
-class CentralFreeListPaddedTo : public CentralFreeList {
- private:
- char pad_[64 - kFreeListSizeMod64];
-};
-
-template<>
-class CentralFreeListPaddedTo<0> : public CentralFreeList {
-};
-
-class CentralFreeListPadded : public CentralFreeListPaddedTo<
- sizeof(CentralFreeList) % 64> {
-};
+ // Pads each CentralCache object to multiple of 64 bytes. Since some
+ // compilers (such as MSVC) don't like it when the padding is 0, I use
+ // template specialization to remove the padding entirely when
+ // sizeof(CentralFreeList) is a multiple of 64.
+ template<int kFreeListSizeMod64>
+ class CentralFreeListPaddedTo : public CentralFreeList
+ {
+ private:
+ char pad_[64 - kFreeListSizeMod64];
+ };
+
+ template<> class CentralFreeListPaddedTo<0> : public CentralFreeList { ; };
+
+ class CentralFreeListPadded : public CentralFreeListPaddedTo<sizeof(CentralFreeList) % 64> { ; };
} // namespace tcmalloc
View
208 libs/tcmalloc-1.4/src/common.h
@@ -89,120 +89,110 @@ static const size_t kNumClasses = 61;
// want this big to avoid locking the central free-list too often. It
// should not hurt to make this list somewhat big because the
// scavenging code will shrink it down when its contents are not in use.
-static const int kMaxDynamicFreeListLength = 8192;
+static const int kMaxDynamicFreeListLength = 65536; // was 8192
static const Length kMaxValidPages = (~static_cast<Length>(0)) >> kPageShift;
namespace tcmalloc {
-// Convert byte size into pages. This won't overflow, but may return
-// an unreasonably large value if bytes is huge enough.
-inline Length pages(size_t bytes) {
- return (bytes >> kPageShift) +
- ((bytes & (kPageSize - 1)) > 0 ? 1 : 0);
-}
-
-// Size-class information + mapping
-class SizeMap {
- private:
- // Number of objects to move between a per-thread list and a central
- // list in one shot. We want this to be not too small so we can
- // amortize the lock overhead for accessing the central list. Making
- // it too big may temporarily cause unnecessary memory wastage in the
- // per-thread free list until the scavenger cleans up the list.
- int num_objects_to_move_[kNumClasses];
-
- //-------------------------------------------------------------------
- // Mapping from size to size_class and vice versa
- //-------------------------------------------------------------------
-
- // Sizes <= 1024 have an alignment >= 8. So for such sizes we have an
- // array indexed by ceil(size/8). Sizes > 1024 have an alignment >= 128.
- // So for these larger sizes we have an array indexed by ceil(size/128).
- //
- // We flatten both logical arrays into one physical array and use
- // arithmetic to compute an appropriate index. The constants used by
- // ClassIndex() were selected to make the flattening work.
- //
- // Examples:
- // Size Expression Index
- // -------------------------------------------------------
- // 0 (0 + 7) / 8 0
- // 1 (1 + 7) / 8 1
- // ...
- // 1024 (1024 + 7) / 8 128
- // 1025 (1025 + 127 + (120<<7)) / 128 129
- // ...
- // 32768 (32768 + 127 + (120<<7)) / 128 376
- static const int kMaxSmallSize = 1024;
- unsigned char class_array_[377];
-
- // Compute index of the class_array[] entry for a given size
- static inline int ClassIndex(int s) {
- ASSERT(s <= static_cast<int>(kMaxSize));
- const bool big = (s > kMaxSmallSize);
- const int add_amount = big ? (127 + (120<<7)) : 7;
- const int shift_amount = big ? 7 : 3;
- return (s + add_amount) >> shift_amount;
- }
-
- int NumMoveSize(size_t size);
-
- // Mapping from size class to max size storable in that class
- size_t class_to_size_[kNumClasses];
-
- // Mapping from size class to number of pages to allocate at a time
- size_t class_to_pages_[kNumClasses];
-
- public:
- // Constructor should do nothing since we rely on explicit Init()
- // call, which may or may not be called before the constructor runs.
- SizeMap() { }
-
- // Initialize the mapping arrays
- void Init();
-
- inline unsigned int SizeClass(int size) {
- return class_array_[ClassIndex(size)];
- }
-
- // Get the byte-size for a specified class
- inline size_t ByteSizeForClass(size_t cl) {
- return class_to_size_[cl];
- }
-
- // Mapping from size class to max size storable in that class
- inline size_t class_to_size(size_t cl) {
- return class_to_size_[cl];
- }
-
- // Mapping from size class to number of pages to allocate at a time
- inline size_t class_to_pages(size_t cl) {
- return class_to_pages_[cl];
- }
-
- // Number of objects to move between a per-thread list and a central
- // list in one shot. We want this to be not too small so we can
- // amortize the lock overhead for accessing the central list. Making
- // it too big may temporarily cause unnecessary memory wastage in the
- // per-thread free list until the scavenger cleans up the list.
- inline int num_objects_to_move(size_t cl) {
- return num_objects_to_move_[cl];
- }
-
- // Dump contents of the computed size map
- void Dump(TCMalloc_Printer* out);
-};
-
-// Allocates "bytes" worth of memory and returns it. Increments
-// metadata_system_bytes appropriately. May return NULL if allocation
-// fails. Requires pageheap_lock is held.
-void* MetaDataAlloc(size_t bytes);
+ // Convert byte size into pages. This won't overflow, but may return
+ // an unreasonably large value if bytes is huge enough.
+ inline Length pages(size_t bytes) { return (bytes >> kPageShift) + ((bytes & (kPageSize - 1)) > 0 ? 1 : 0); }
+
+ // Size-class information + mapping
+ class SizeMap
+ {
+ private:
+ // Number of objects to move between a per-thread list and a central
+ // list in one shot. We want this to be not too small so we can
+ // amortize the lock overhead for accessing the central list. Making
+ // it too big may temporarily cause unnecessary memory wastage in the
+ // per-thread free list until the scavenger cleans up the list.
+ int num_objects_to_move_[kNumClasses];
+
+ //-------------------------------------------------------------------
+ // Mapping from size to size_class and vice versa
+ //-------------------------------------------------------------------
+
+ // Sizes <= 1024 have an alignment >= 8. So for such sizes we have an
+ // array indexed by ceil(size/8). Sizes > 1024 have an alignment >= 128.
+ // So for these larger sizes we have an array indexed by ceil(size/128).
+ //
+ // We flatten both logical arrays into one physical array and use
+ // arithmetic to compute an appropriate index. The constants used by
+ // ClassIndex() were selected to make the flattening work.
+ //
+ // Examples:
+ // Size Expression Index
+ // -------------------------------------------------------
+ // 0 (0 + 7) / 8 0
+ // 1 (1 + 7) / 8 1
+ // ...
+ // 1024 (1024 + 7) / 8 128
+ // 1025 (1025 + 127 + (120<<7)) / 128 129
+ // ...
+ // 32768 (32768 + 127 + (120<<7)) / 128 376
+ static const int kMaxSmallSize = 1024;
+ unsigned char class_array_[377];
+
+ // Compute index of the class_array[] entry for a given size
+ static inline int ClassIndex(int s) {
+ ASSERT(s <= static_cast<int>(kMaxSize));
+ const bool big = (s > kMaxSmallSize);
+ const int add_amount = big ? (127 + (120<<7)) : 7;
+ const int shift_amount = big ? 7 : 3;
+ return (s + add_amount) >> shift_amount;
+ }
+
+ int NumMoveSize(size_t size);
+
+ // Mapping from size class to max size storable in that class
+ size_t class_to_size_[kNumClasses];
+
+ // Mapping from size class to number of pages to allocate at a time
+ size_t class_to_pages_[kNumClasses];
+
+ public:
+ // Constructor should do nothing since we rely on explicit Init()
+ // call, which may or may not be called before the constructor runs.
+ SizeMap() { }
+
+ // Initialize the mapping arrays
+ void Init();
+
+ inline unsigned int SizeClass(int size) {
+ return class_array_[ClassIndex(size)];
+ }
+
+ // Get the byte-size for a specified class
+ inline size_t ByteSizeForClass(size_t cl) { return class_to_size_[cl]; }
+
+ // Mapping from size class to max size storable in that class
+ inline size_t class_to_size(size_t cl) { return class_to_size_[cl]; }
+
+ // Mapping from size class to number of pages to allocate at a time
+ inline size_t class_to_pages(size_t cl) { return class_to_pages_[cl]; }
+
+ // Number of objects to move between a per-thread list and a central
+ // list in one shot. We want this to be not too small so we can
+ // amortize the lock overhead for accessing the central list. Making
+ // it too big may temporarily cause unnecessary memory wastage in the
+ // per-thread free list until the scavenger cleans up the list.
+ inline int num_objects_to_move(size_t cl) { return num_objects_to_move_[cl]; }
+
+ // Dump contents of the computed size map
+ void Dump(TCMalloc_Printer* out);
+ };
+
+ // Allocates "bytes" worth of memory and returns it. Increments
+ // metadata_system_bytes appropriately. May return NULL if allocation
+ // fails. Requires pageheap_lock is held.
+ void* MetaDataAlloc(size_t bytes);
+
+ // Returns the total number of bytes allocated from the system.
+ // Requires pageheap_lock is held.
+ uint64_t metadata_system_bytes();
-// Returns the total number of bytes allocated from the system.
-// Requires pageheap_lock is held.
-uint64_t metadata_system_bytes();
-
-} // namespace tcmalloc
+};
#endif // TCMALLOC_COMMON_H_
View
12 libs/tcmalloc-1.4/src/packed-cache-inl.h
@@ -138,8 +138,7 @@
// A safe way of doing "(1 << n) - 1" -- without worrying about overflow
// Note this will all be resolved to a constant expression at compile-time
#define N_ONES_(IntType, N) \
- ( (N) == 0 ? 0 : ((static_cast<IntType>(1) << ((N)-1))-1 + \
- (static_cast<IntType>(1) << ((N)-1))) )
+ ( (N) == 0 ? 0 : ((static_cast<IntType>(1) << ((N)-1))-1 + (static_cast<IntType>(1) << ((N)-1))) )
// The types K and V provide upper bounds on the number of valid keys
// and values, but we explicitly require the keys to be less than
@@ -159,8 +158,7 @@ class PackedCache {
COMPILE_ASSERT(static_cast<int>(kKeybits) <= sizeof(K) * 8, key_size);
COMPILE_ASSERT(static_cast<int>(kValuebits) <= sizeof(V) * 8, value_size);
COMPILE_ASSERT(kHashbits <= kKeybits, hash_function);
- COMPILE_ASSERT(kKeybits - kHashbits + kValuebits <= kTbits,
- entry_size_must_be_big_enough);
+ COMPILE_ASSERT(kKeybits - kHashbits + kValuebits <= kTbits, entry_size_must_be_big_enough);
Clear(initial_value);
}
@@ -176,10 +174,8 @@ class PackedCache {
}
V GetOrDefault(K key, V default_value) const {
- // As with other code in this class, we touch array_ as few times
- // as we can. Assuming entries are read atomically (e.g., their
- // type is uintptr_t on most hardware) then certain races are
- // harmless.
+ // As with other code in this class, we touch array_ as few times as we can. Assuming entries are read atomically
+ // (e.g., their type is uintptr_t on most hardware) then certain races are harmless.
ASSERT(key == (key & kKeyMask));
T entry = array_[Hash(key)];
return KeyMatch(entry, key) ? EntryToValue(entry) : default_value;
View
3 libs/tcmalloc-1.4/src/tcmalloc.cc
@@ -633,7 +633,8 @@ static inline ThreadCache* GetCacheIfPresent() {
// This lets you call back to a given function pointer if ptr is invalid.
// It is used primarily by windows code which wants a specialized callback.
-inline void do_free_with_callback(void* ptr, void (*invalid_free_fn)(void*)) {
+inline void do_free_with_callback(void* ptr, void (*invalid_free_fn)(void*))
+{
if (ptr == NULL) return;
ASSERT(Static::pageheap() != NULL); // Should not call free() before malloc()
const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
View
17 libs/tcmalloc-1.4/src/thread_cache.cc
@@ -158,8 +158,7 @@ void* ThreadCache::FetchFromCentralCache(size_t cl, size_t byte_size) {
const int num_to_move = min<int>(list->max_length(), batch_size);
void *start, *end;
- int fetch_count = Static::central_cache()[cl].RemoveRange(
- &start, &end, num_to_move);
+ int fetch_count = Static::central_cache()[cl].RemoveRange(&start, &end, num_to_move);
ASSERT((start == NULL) == (fetch_count == 0));
if (--fetch_count >= 0) {
@@ -173,14 +172,12 @@ void* ThreadCache::FetchFromCentralCache(size_t cl, size_t byte_size) {
if (static_cast<int>(list->max_length()) < batch_size) {
list->set_max_length(list->max_length() + 1);
} else {
- // Don't let the list get too long. In 32 bit builds, the length
- // is represented by a 16 bit int, so we need to watch out for
- // integer overflow.
- int new_length = min<int>(list->max_length() + batch_size,
- kMaxDynamicFreeListLength);
- // The list's max_length must always be a multiple of batch_size,
- // and kMaxDynamicFreeListLength is not necessarily a multiple
- // of batch_size.
+ // Don't let the list get too long. In 32 bit builds, the length is represented by a 16 bit int, so we need to watch out
+ // for integer overflow.
+ int new_length = min<int>(list->max_length() + batch_size, kMaxDynamicFreeListLength);
+
+ // The list's max_length must always be a multiple of batch_size, and kMaxDynamicFreeListLength is not necessarily a
+ // multiple of batch_size.
new_length -= new_length % batch_size;
ASSERT(new_length % batch_size == 0);
list->set_max_length(new_length);

0 comments on commit 89b0544

Please sign in to comment.