<html><head><meta name="color-scheme" content="light dark"></head><body><pre style="word-wrap: break-word; white-space: pre-wrap;">https://bugs.webkit.org/show_bug.cgi?id=129370
https://trac.webkit.org/changeset?old=164727&amp;old_path=webkit%2Ftrunk%2FSource%2FWTF%2Fwtf%2FFastMalloc.cpp&amp;new=164742&amp;new_path=webkit%2Ftrunk%2FSource%2FWTF%2Fwtf%2FFastMalloc.cpp

Index: trunk/Source/WTF/wtf/FastMalloc.cpp
===================================================================
--- Source/WTF/wtf/FastMalloc.cpp	(revision 164727)
+++ Source/WTF/wtf/FastMalloc.cpp	(revision 164742)
@@ -92,4 +92,5 @@
 
 #if OS(DARWIN)
+#include &lt;mach/mach_init.h&gt;
 #include &lt;malloc/malloc.h&gt;
 #endif
@@ -630,26 +631,20 @@
 //-------------------------------------------------------------------
 
+// Type that can hold the length of a run of pages
+typedef uintptr_t Length;
+
 // Not all possible combinations of the following parameters make
 // sense.  In particular, if kMaxSize increases, you may have to
 // increase kNumClasses as well.
-#if OS(DARWIN)
-#    define K_PAGE_SHIFT PAGE_SHIFT
-#    if (K_PAGE_SHIFT == 12)
-#        define K_NUM_CLASSES 68
-#    elif (K_PAGE_SHIFT == 14)
-#        define K_NUM_CLASSES 77
-#    else
-#        error "Unsupported PAGE_SHIFT amount"
-#    endif
-#else
-#    define K_PAGE_SHIFT 12
-#    define K_NUM_CLASSES 68
-#endif
-static const size_t kPageShift  = K_PAGE_SHIFT;
-static const size_t kPageSize   = 1 &lt;&lt; kPageShift;
+#define K_PAGE_SHIFT_MIN 12
+#define K_PAGE_SHIFT_MAX 14
+#define K_NUM_CLASSES_MAX 77
+static size_t kPageShift  = 0;
+static size_t kNumClasses = 0;
+static size_t kPageSize   = 0;
+static Length kMaxValidPages = 0;
 static const size_t kMaxSize    = 32u * 1024;
 static const size_t kAlignShift = 3;
 static const size_t kAlignment  = 1 &lt;&lt; kAlignShift;
-static const size_t kNumClasses = K_NUM_CLASSES;
 
 // Allocates a big block of memory for the pagemap once we reach more than
@@ -663,5 +658,5 @@
 // have small limits on the number of mmap() regions per
 // address-space.
-static const size_t kMinSystemAlloc = 1 &lt;&lt; (20 - kPageShift);
+static const size_t kMinSystemAlloc = 1 &lt;&lt; (20 - K_PAGE_SHIFT_MAX);
 
 // Number of objects to move between a per-thread list and a central
@@ -670,5 +665,5 @@
 // it too big may temporarily cause unnecessary memory wastage in the
 // per-thread free list until the scavenger cleans up the list.
-static int num_objects_to_move[kNumClasses];
+static int num_objects_to_move[K_NUM_CLASSES_MAX];
 
 // Maximum length we allow a per-thread free-list to have before we
@@ -766,8 +761,8 @@
 
 // Mapping from size class to max size storable in that class
-static size_t class_to_size[kNumClasses];
+static size_t class_to_size[K_NUM_CLASSES_MAX];
 
 // Mapping from size class to number of pages to allocate at a time
-static size_t class_to_pages[kNumClasses];
+static size_t class_to_pages[K_NUM_CLASSES_MAX];
 
 // Hardened singly linked list.  We make this a class to allow compiler to
@@ -814,5 +809,6 @@
 // class is initially given one TCEntry which also means that the maximum any
 // one class can have is kNumClasses.
-static const int kNumTransferEntries = kNumClasses;
+#define K_NUM_TRANSFER_ENTRIES_MAX static_cast&lt;int&gt;(K_NUM_CLASSES_MAX)
+#define kNumTransferEntries static_cast&lt;int&gt;(kNumClasses)
 
 // Note: the following only works for "n"s that fit in 32-bits, but
@@ -918,4 +914,23 @@
 // Initialize the mapping arrays
 static void InitSizeClasses() {
+#if OS(DARWIN)
+  kPageShift = vm_page_shift;
+  switch (kPageShift) {
+  case 12:
+    kNumClasses = 68;
+    break;
+  case 14:
+    kNumClasses = 77;
+    break;
+  default:
+    CRASH();
+  };
+#else
+  kPageShift = 12;
+  kNumClasses = 68;
+#endif
+  kPageSize = 1 &lt;&lt; kPageShift;
+  kMaxValidPages = (~static_cast&lt;Length&gt;(0)) &gt;&gt; kPageShift;
+
   // Do some sanity checking on add_amount[]/shift_amount[]/class_array[]
   if (ClassIndex(0) &lt; 0) {
@@ -1145,12 +1160,8 @@
 typedef uintptr_t PageID;
 
-// Type that can hold the length of a run of pages
-typedef uintptr_t Length;
-
-static const Length kMaxValidPages = (~static_cast&lt;Length&gt;(0)) &gt;&gt; kPageShift;
-
 // Convert byte size into pages.  This won't overflow, but may return
 // an unreasonably large value if bytes is huge enough.
 static inline Length pages(size_t bytes) {
+  ASSERT(kPageShift &amp;&amp; kNumClasses &amp;&amp; kPageSize);
   return (bytes &gt;&gt; kPageShift) +
       ((bytes &amp; (kPageSize - 1)) &gt; 0 ? 1 : 0);
@@ -1160,4 +1171,5 @@
 // allocated
 static size_t AllocationSize(size_t bytes) {
+  ASSERT(kPageShift &amp;&amp; kNumClasses &amp;&amp; kPageSize);
   if (bytes &gt; kMaxSize) {
     // Large object: we allocate an integral number of pages
@@ -1432,5 +1444,5 @@
   // end up getting all the TCEntries quota in the system we just preallocate
   // sufficient number of entries here.
-  TCEntry tc_slots_[kNumTransferEntries];
+  TCEntry tc_slots_[K_NUM_TRANSFER_ENTRIES_MAX];
 
   // Number of currently used cached entries in tc_slots_.  This variable is
@@ -1654,5 +1666,5 @@
 template &lt;int BITS&gt; class MapSelector {
  public:
-  typedef TCMalloc_PageMap3&lt;BITS-kPageShift&gt; Type;
+  typedef TCMalloc_PageMap3&lt;BITS-K_PAGE_SHIFT_MIN&gt; Type;
   typedef PackedCache&lt;BITS, uint64_t&gt; CacheType;
 };
@@ -1672,5 +1684,5 @@
 template &lt;&gt; class MapSelector&lt;64&gt; {
  public:
-  typedef TCMalloc_PageMap3&lt;64 - kPageShift - kBitsUnusedOn64Bit&gt; Type;
+  typedef TCMalloc_PageMap3&lt;64 - K_PAGE_SHIFT_MIN - kBitsUnusedOn64Bit&gt; Type;
   typedef PackedCache&lt;64, uint64_t&gt; CacheType;
 };
@@ -1680,6 +1692,6 @@
 template &lt;&gt; class MapSelector&lt;32&gt; {
  public:
-  typedef TCMalloc_PageMap2&lt;32 - kPageShift&gt; Type;
-  typedef PackedCache&lt;32 - kPageShift, uint16_t&gt; CacheType;
+  typedef TCMalloc_PageMap2&lt;32 - K_PAGE_SHIFT_MIN&gt; Type;
+  typedef PackedCache&lt;32 - K_PAGE_SHIFT_MIN, uint16_t&gt; CacheType;
 };
 
@@ -1778,4 +1790,5 @@
   // Return number of free bytes in heap
   uint64_t FreeBytes() const {
+    ASSERT(kPageShift &amp;&amp; kNumClasses &amp;&amp; kPageSize);
     return (static_cast&lt;uint64_t&gt;(free_pages_) &lt;&lt; kPageShift);
   }
@@ -1913,4 +1926,6 @@
 void TCMalloc_PageHeap::init()
 {
+  ASSERT(kPageShift &amp;&amp; kNumClasses &amp;&amp; kPageSize);
+
   pagemap_.init(MetaDataAlloc);
   pagemap_cache_ = PageMapCache(0);
@@ -1927,5 +1942,5 @@
   // Start scavenging at kMaxPages list
   scavenge_index_ = kMaxPages-1;
-  COMPILE_ASSERT(kNumClasses &lt;= (1 &lt;&lt; PageMapCache::kValuebits), valuebits);
+  ASSERT(kNumClasses &lt;= (1 &lt;&lt; PageMapCache::kValuebits));
   DLL_Init(&amp;large_.normal, entropy_);
   DLL_Init(&amp;large_.returned, entropy_);
@@ -2068,4 +2083,5 @@
 void TCMalloc_PageHeap::scavenge()
 {
+    ASSERT(kPageShift &amp;&amp; kNumClasses &amp;&amp; kPageSize);
     size_t pagesToRelease = min_free_committed_pages_since_last_scavenge_ * kScavengePercentage;
     size_t targetPageCount = std::max&lt;size_t&gt;(kMinimumFreeCommittedPageCount, free_committed_pages_ - pagesToRelease);
@@ -2229,4 +2245,5 @@
 
 inline void TCMalloc_PageHeap::Carve(Span* span, Length n, bool released) {
+  ASSERT(kPageShift &amp;&amp; kNumClasses &amp;&amp; kPageSize);
   ASSERT(n &gt; 0);
   DLL_Remove(span, entropy_);
@@ -2265,4 +2282,5 @@
 static ALWAYS_INLINE void mergeDecommittedStates(Span* destination, Span* other)
 {
+    ASSERT(kPageShift &amp;&amp; kNumClasses &amp;&amp; kPageSize);
     if (destination-&gt;decommitted &amp;&amp; !other-&gt;decommitted) {
         TCMalloc_SystemRelease(reinterpret_cast&lt;void*&gt;(other-&gt;start &lt;&lt; kPageShift),
@@ -2368,4 +2386,5 @@
 #if !USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
 void TCMalloc_PageHeap::IncrementalScavenge(Length n) {
+  ASSERT(kPageShift &amp;&amp; kNumClasses &amp;&amp; kPageSize);
   // Fast path; not yet time to release memory
   scavenge_counter_ -= n;
@@ -2429,4 +2448,5 @@
 #ifdef WTF_CHANGES
 size_t TCMalloc_PageHeap::ReturnedBytes() const {
+    ASSERT(kPageShift &amp;&amp; kNumClasses &amp;&amp; kPageSize);
     size_t result = 0;
     for (unsigned s = 0; s &lt; kMaxPages; s++) {
@@ -2444,4 +2464,5 @@
 #ifndef WTF_CHANGES
 static double PagesToMB(uint64_t pages) {
+  ASSERT(kPageShift &amp;&amp; kNumClasses &amp;&amp; kPageSize);
   return (pages &lt;&lt; kPageShift) / 1048576.0;
 }
@@ -2510,4 +2531,5 @@
 
 bool TCMalloc_PageHeap::GrowHeap(Length n) {
+  ASSERT(kPageShift &amp;&amp; kNumClasses &amp;&amp; kPageSize);
   ASSERT(kMaxPages &gt;= kMinSystemAlloc);
   if (n &gt; kMaxValidPages) return false;
@@ -2606,4 +2628,5 @@
 
 void TCMalloc_PageHeap::ReleaseFreeList(Span* list, Span* returned) {
+  ASSERT(kPageShift &amp;&amp; kNumClasses &amp;&amp; kPageSize);
   // Walk backwards through list so that when we push these
   // spans on the "returned" list, we preserve the order.
@@ -2739,5 +2762,5 @@
   ThreadIdentifier tid_;                // Which thread owns it
   bool          in_setspecific_;           // Called pthread_setspecific?
-  FreeList      list_[kNumClasses];     // Array indexed by size-class
+  FreeList      list_[K_NUM_CLASSES_MAX];     // Array indexed by size-class
 
   // We sample allocations, biased by the size of the allocation
@@ -2795,4 +2818,5 @@
   void enumerateFreeObjects(Finder&amp; finder, const Reader&amp; reader)
   {
+      ASSERT(kPageShift &amp;&amp; kNumClasses &amp;&amp; kPageSize);
       for (unsigned sizeClass = 0; sizeClass &lt; kNumClasses; sizeClass++)
           list_[sizeClass].enumerateFreeObjects(finder, reader);
@@ -2807,5 +2831,5 @@
 // Central cache -- a collection of free-lists, one per size-class.
 // We have a separate lock per free-list to reduce contention.
-static TCMalloc_Central_FreeListPadded central_cache[kNumClasses];
+static TCMalloc_Central_FreeListPadded central_cache[K_NUM_CLASSES_MAX];
 
 // Page-level allocator
@@ -2963,4 +2987,5 @@
 
 void TCMalloc_Central_FreeList::Init(size_t cl, uintptr_t entropy) {
+  ASSERT(kPageShift &amp;&amp; kNumClasses &amp;&amp; kPageSize);
   lock_.Init();
   size_class_ = cl;
@@ -2987,4 +3012,5 @@
 
 ALWAYS_INLINE void TCMalloc_Central_FreeList::ReleaseToSpans(HardenedSLL object) {
+  ASSERT(kPageShift &amp;&amp; kNumClasses &amp;&amp; kPageSize);
   const PageID p = reinterpret_cast&lt;uintptr_t&gt;(object.value()) &gt;&gt; kPageShift;
   Span* span = pageheap-&gt;GetDescriptor(p);
@@ -3033,4 +3059,5 @@
 ALWAYS_INLINE bool TCMalloc_Central_FreeList::EvictRandomSizeClass(
     size_t locked_size_class, bool force) {
+  ASSERT(kPageShift &amp;&amp; kNumClasses &amp;&amp; kPageSize);
   static int race_counter = 0;
   int t = race_counter++;  // Updated without a lock, but who cares.
@@ -3048,4 +3075,5 @@
 
 bool TCMalloc_Central_FreeList::MakeCacheSpace() {
+  ASSERT(kPageShift &amp;&amp; kNumClasses &amp;&amp; kPageSize);
   // Is there room in the cache?
   if (used_slots_ &lt; cache_size_) return true;
@@ -3102,4 +3130,5 @@
 
 void TCMalloc_Central_FreeList::InsertRange(HardenedSLL start, HardenedSLL end, int N) {
+  ASSERT(kPageShift &amp;&amp; kNumClasses &amp;&amp; kPageSize);
   SpinLockHolder h(&amp;lock_);
   if (N == num_objects_to_move[size_class_] &amp;&amp;
@@ -3184,4 +3213,5 @@
 // Fetch memory from the system and add to the central cache freelist.
 ALWAYS_INLINE void TCMalloc_Central_FreeList::Populate() {
+  ASSERT(kPageShift &amp;&amp; kNumClasses &amp;&amp; kPageSize);
   // Release central list lock while operating on pageheap
   lock_.Unlock();
@@ -3270,4 +3300,5 @@
 
 void TCMalloc_ThreadCache::Init(ThreadIdentifier tid, uintptr_t entropy) {
+  ASSERT(kPageShift &amp;&amp; kNumClasses &amp;&amp; kPageSize);
   size_ = 0;
   next_ = NULL;
@@ -3292,4 +3323,5 @@
 
 void TCMalloc_ThreadCache::Cleanup() {
+  ASSERT(kPageShift &amp;&amp; kNumClasses &amp;&amp; kPageSize);
   // Put unused memory back into central cache
   for (size_t cl = 0; cl &lt; kNumClasses; ++cl) {
@@ -3366,4 +3398,5 @@
 // Release idle memory to the central cache
 inline void TCMalloc_ThreadCache::Scavenge() {
+  ASSERT(kPageShift &amp;&amp; kNumClasses &amp;&amp; kPageSize);
   // If the low-water mark for the free list is L, it means we would
   // not have had to allocate anything from the central cache even if
@@ -3658,4 +3691,5 @@
 
 void TCMalloc_ThreadCache::Print() const {
+  ASSERT(kPageShift &amp;&amp; kNumClasses &amp;&amp; kPageSize);
   for (size_t cl = 0; cl &lt; kNumClasses; ++cl) {
     MESSAGE("      %5" PRIuS " : %4d len; %4d lo\n",
@@ -3679,4 +3713,5 @@
 // Get stats into "r".  Also get per-size-class counts if class_count != NULL
 static void ExtractStats(TCMallocStats* r, uint64_t* class_count) {
+  ASSERT(kPageShift &amp;&amp; kNumClasses &amp;&amp; kPageSize);
   r-&gt;central_bytes = 0;
   r-&gt;transfer_bytes = 0;
@@ -3716,4 +3751,5 @@
 // WRITE stats to "out"
 static void DumpStats(TCMalloc_Printer* out, int level) {
+  ASSERT(kPageShift &amp;&amp; kNumClasses &amp;&amp; kPageSize);
   TCMallocStats stats;
   uint64_t class_count[kNumClasses];
@@ -4004,4 +4040,5 @@
 #if !ASSERT_DISABLED
 static inline bool CheckCachedSizeClass(void *ptr) {
+  ASSERT(kPageShift &amp;&amp; kNumClasses &amp;&amp; kPageSize);
   PageID p = reinterpret_cast&lt;uintptr_t&gt;(ptr) &gt;&gt; kPageShift;
   size_t cached_value = pageheap-&gt;GetSizeClassIfCached(p);
@@ -4018,4 +4055,5 @@
 
 static inline void* SpanToMallocResult(Span *span) {
+  ASSERT(kPageShift &amp;&amp; kNumClasses &amp;&amp; kPageSize);
   ASSERT_SPAN_COMMITTED(span);
   pageheap-&gt;CacheSizeClass(span-&gt;start, 0);
@@ -4071,4 +4109,5 @@
   if (ptr == NULL) return;
   ASSERT(pageheap != NULL);  // Should not call free() before malloc()
+  ASSERT(kPageShift &amp;&amp; kNumClasses &amp;&amp; kPageSize);
   const PageID p = reinterpret_cast&lt;uintptr_t&gt;(ptr) &gt;&gt; kPageShift;
   Span* span = pageheap-&gt;GetDescriptor(p);
@@ -4122,4 +4161,5 @@
   ASSERT(align &gt; 0);
   if (pageheap == NULL) TCMalloc_ThreadCache::InitModule();
+  ASSERT(kPageShift &amp;&amp; kNumClasses &amp;&amp; kPageSize);
 
   // Allocate at least one byte to avoid boundary conditions below
@@ -4441,4 +4481,7 @@
     new_size += Internal::ValidationBufferSize;
 #endif
+
+  ASSERT(pageheap != NULL);  // Should not call realloc() before malloc()
+  ASSERT(kPageShift &amp;&amp; kNumClasses &amp;&amp; kPageSize);
 
   // Get the size of the old entry
@@ -4660,4 +4703,5 @@
 FastMallocStatistics fastMallocStatistics()
 {
+    ASSERT(kPageShift &amp;&amp; kNumClasses &amp;&amp; kPageSize);
     FastMallocStatistics statistics;
 
@@ -4681,4 +4725,7 @@
 size_t fastMallocSize(const void* ptr)
 {
+  if (pageheap == NULL) TCMalloc_ThreadCache::InitModule();
+  ASSERT(kPageShift &amp;&amp; kNumClasses &amp;&amp; kPageSize);
+
 #if ENABLE(WTF_MALLOC_VALIDATION)
     return Internal::fastMallocValidationHeader(const_cast&lt;void*&gt;(ptr))-&gt;m_size;
@@ -4792,4 +4839,5 @@
     int visit(void* ptr) const
     {
+        ASSERT(kPageShift &amp;&amp; kNumClasses &amp;&amp; kPageSize);
         if (!ptr)
             return 1;
@@ -4839,4 +4887,6 @@
     void recordPendingRegions()
     {
+        ASSERT(kPageShift &amp;&amp; kNumClasses &amp;&amp; kPageSize);
+
         bool recordRegionsContainingPointers = m_typeMask &amp; MALLOC_PTR_REGION_RANGE_TYPE;
         bool recordAllocations = m_typeMask &amp; MALLOC_PTR_IN_USE_RANGE_TYPE;
@@ -4887,4 +4937,5 @@
     int visit(void* ptr)
     {
+        ASSERT(kPageShift &amp;&amp; kNumClasses &amp;&amp; kPageSize);
         if (!ptr)
             return 1;
</pre></body></html>