@@ -2809,3 +2809,99 @@ swift::swift_getGenericWitnessTable(GenericWitnessTable *genericTable,
28092809}
28102810
28112811uint64_t swift::RelativeDirectPointerNullPtr = 0 ;
2812+
2813+ /* **************************************************************************/
2814+ /* ** Allocator implementation **********************************************/
2815+ /* **************************************************************************/
2816+
2817+ namespace {
2818+ struct PoolRange {
2819+ static constexpr uintptr_t PageSize = 16 * 1024 ;
2820+ static constexpr uintptr_t MaxPoolAllocationSize = PageSize / 2 ;
2821+
2822+ // / The start of the allocation.
2823+ char *Begin;
2824+
2825+ // / The number of bytes remaining.
2826+ size_t Remaining;
2827+ };
2828+ }
2829+
2830+ // A statically-allocated pool. It's zero-initialized, so this
2831+ // doesn't cost us anything in binary size.
2832+ LLVM_ALIGNAS (alignof (void *)) static char InitialAllocationPool[64 *1024 ];
2833+ static std::atomic<PoolRange>
2834+ AllocationPool{PoolRange{InitialAllocationPool,
2835+ sizeof (InitialAllocationPool)}};
2836+
2837+ void *MetadataAllocator::Allocate (size_t size, size_t alignment) {
2838+ assert (alignment <= alignof (void *));
2839+ assert (size % alignof (void *) == 0 );
2840+
2841+ // If the size is larger than the maximum, just use malloc.
2842+ if (size > PoolRange::MaxPoolAllocationSize)
2843+ return malloc (size);
2844+
2845+ // Allocate out of the pool.
2846+ PoolRange curState = AllocationPool.load (std::memory_order_relaxed);
2847+ while (true ) {
2848+ char *allocation;
2849+ PoolRange newState;
2850+ bool allocatedNewPage;
2851+
2852+ // Try to allocate out of the current page.
2853+ if (size <= curState.Remaining ) {
2854+ allocatedNewPage = false ;
2855+ allocation = curState.Begin ;
2856+ newState = PoolRange{curState.Begin + size, curState.Remaining - size};
2857+ } else {
2858+ allocatedNewPage = true ;
2859+ allocation = new char [PoolRange::PageSize];
2860+ newState = PoolRange{allocation + size, PoolRange::PageSize - size};
2861+ __asan_poison_memory_region (allocation, PoolRange::PageSize);
2862+ }
2863+
2864+ // Swap in the new state.
2865+ if (std::atomic_compare_exchange_weak_explicit (&AllocationPool,
2866+ &curState, newState,
2867+ std::memory_order_relaxed,
2868+ std::memory_order_relaxed)) {
2869+ // If that succeeded, we've successfully allocated.
2870+ __msan_allocated_memory (allocation, size);
2871+ __asan_poison_memory_region (allocation, size);
2872+ return allocation;
2873+ }
2874+
2875+ // If it failed, go back to a neutral state and try again.
2876+ if (allocatedNewPage) {
2877+ delete[] allocation;
2878+ }
2879+ }
2880+ }
2881+
2882+ void MetadataAllocator::Deallocate (const void *allocation, size_t size) {
2883+ __asan_poison_memory_region (allocation, size);
2884+
2885+ if (size > PoolRange::MaxPoolAllocationSize) {
2886+ free (const_cast <void *>(allocation));
2887+ return ;
2888+ }
2889+
2890+ // Check whether the allocation pool is still in the state it was in
2891+ // immediately after the given allocation.
2892+ PoolRange curState = AllocationPool.load (std::memory_order_relaxed);
2893+ if (reinterpret_cast <const char *>(allocation) + size != curState.Begin ) {
2894+ return ;
2895+ }
2896+
2897+ // Try to swap back to the pre-allocation state. If this fails,
2898+ // don't bother trying again; we'll just leak the allocation.
2899+ PoolRange newState = { reinterpret_cast <char *>(const_cast <void *>(allocation)),
2900+ curState.Remaining + size };
2901+ (void )
2902+ std::atomic_compare_exchange_strong_explicit (&AllocationPool,
2903+ &curState, newState,
2904+ std::memory_order_relaxed,
2905+ std::memory_order_relaxed);
2906+ }
2907+
0 commit comments