diff --git a/compiler-rt/lib/scudo/standalone/allocator_config.def b/compiler-rt/lib/scudo/standalone/allocator_config.def index ce37b1cfaedcc..7d50315b35dae 100644 --- a/compiler-rt/lib/scudo/standalone/allocator_config.def +++ b/compiler-rt/lib/scudo/standalone/allocator_config.def @@ -78,6 +78,8 @@ PRIMARY_REQUIRED(const s32, MaxReleaseToOsIntervalMs) // PRIMARY_OPTIONAL(TYPE, NAME, DEFAULT) // +PRIMARY_OPTIONAL(const bool, EnableCache, true) + // The scale of a compact pointer. E.g., Ptr = Base + (CompactPtr << Scale). PRIMARY_OPTIONAL(const uptr, CompactPtrScale, SCUDO_MIN_ALIGNMENT_LOG) diff --git a/compiler-rt/lib/scudo/standalone/local_cache.h b/compiler-rt/lib/scudo/standalone/local_cache.h index 46d6affdc033b..b14a72cdaaa2f 100644 --- a/compiler-rt/lib/scudo/standalone/local_cache.h +++ b/compiler-rt/lib/scudo/standalone/local_cache.h @@ -161,11 +161,6 @@ template struct SizeClassAllocatorLocalCache { } } - void destroyBatch(uptr ClassId, void *B) { - if (ClassId != BatchClassId) - deallocate(BatchClassId, B); - } - NOINLINE bool refill(PerClass *C, uptr ClassId, u16 MaxRefill) { const u16 NumBlocksRefilled = Allocator->popBlocks(this, ClassId, C->Chunks, MaxRefill); @@ -184,6 +179,148 @@ template struct SizeClassAllocatorLocalCache { } }; +template struct NoCache { + typedef typename SizeClassAllocator::SizeClassMap SizeClassMap; + typedef typename SizeClassAllocator::CompactPtrT CompactPtrT; + + void init(GlobalStats *S, SizeClassAllocator *A) { + Stats.init(); + if (LIKELY(S)) + S->link(&Stats); + Allocator = A; + initCache(); + } + + void destroy(GlobalStats *S) { + if (LIKELY(S)) + S->unlink(&Stats); + } + + void *allocate(uptr ClassId) { + CompactPtrT CompactPtr; + uptr NumBlocksPopped = Allocator->popBlocks(this, ClassId, &CompactPtr, 1U); + if (NumBlocksPopped == 0) + return nullptr; + DCHECK_EQ(NumBlocksPopped, 1U); + const PerClass *C = &PerClassArray[ClassId]; + Stats.add(StatAllocated, C->ClassSize); + Stats.sub(StatFree, C->ClassSize); + return Allocator->decompactPtr(ClassId, CompactPtr); + } + + bool deallocate(uptr ClassId, void *P) { + CHECK_LT(ClassId, NumClasses); + + if (ClassId == BatchClassId) + return deallocateBatchClassBlock(P); + + CompactPtrT CompactPtr = + Allocator->compactPtr(ClassId, reinterpret_cast(P)); + Allocator->pushBlocks(this, ClassId, &CompactPtr, 1U); + PerClass *C = &PerClassArray[ClassId]; + Stats.sub(StatAllocated, C->ClassSize); + Stats.add(StatFree, C->ClassSize); + + // The following adopts the same strategy of allocator draining as + // SizeClassAllocatorLocalCache so that they have the same hint for doing + // page release. + ++C->Count; + const bool SuggestDraining = C->Count == C->MaxCount; + if (SuggestDraining) + C->Count = 0; + return SuggestDraining; + } + + void *getBatchClassBlock() { + PerClass *C = &PerClassArray[BatchClassId]; + if (C->Count == 0) { + const u16 NumBlocksRefilled = Allocator->popBlocks( + this, BatchClassId, BatchClassStorage, C->MaxCount); + if (NumBlocksRefilled == 0) + reportOutOfMemory(SizeClassAllocator::getSizeByClassId(BatchClassId)); + DCHECK_LE(NumBlocksRefilled, SizeClassMap::MaxNumCachedHint); + C->Count = NumBlocksRefilled; + } + + const uptr ClassSize = C->ClassSize; + CompactPtrT CompactP = BatchClassStorage[--C->Count]; + Stats.add(StatAllocated, ClassSize); + Stats.sub(StatFree, ClassSize); + + return Allocator->decompactPtr(BatchClassId, CompactP); + } + + LocalStats &getStats() { return Stats; } + + void getStats(ScopedString *Str) { Str->append(" No block is cached.\n"); } + + bool isEmpty() const { + const PerClass *C = &PerClassArray[BatchClassId]; + return C->Count == 0; + } + void drain() { + PerClass *C = &PerClassArray[BatchClassId]; + if (C->Count > 0) { + Allocator->pushBlocks(this, BatchClassId, BatchClassStorage, C->Count); + C->Count = 0; + } + } + + static u16 getMaxCached(uptr Size) { + return Min(SizeClassMap::MaxNumCachedHint, + SizeClassMap::getMaxCachedHint(Size)); + } + +private: + static const uptr NumClasses = SizeClassMap::NumClasses; + static const uptr BatchClassId = SizeClassMap::BatchClassId; + struct alignas(SCUDO_CACHE_LINE_SIZE) PerClass { + u16 Count = 0; + u16 MaxCount; + // Note: ClassSize is zero for the transfer batch. + uptr ClassSize; + }; + PerClass PerClassArray[NumClasses] = {}; + // Popping BatchClass blocks requires taking a certain amount of blocks at + // once. This restriction comes from how we manage the storing of BatchClass + // in the primary allocator. See more details in `popBlocksImpl` in the + // primary allocator. + CompactPtrT BatchClassStorage[SizeClassMap::MaxNumCachedHint]; + LocalStats Stats; + SizeClassAllocator *Allocator = nullptr; + + bool deallocateBatchClassBlock(void *P) { + PerClass *C = &PerClassArray[BatchClassId]; + // Drain all the blocks. + if (C->Count == C->MaxCount) { + Allocator->pushBlocks(this, BatchClassId, BatchClassStorage, C->Count); + C->Count = 0; + } + BatchClassStorage[C->Count++] = + Allocator->compactPtr(BatchClassId, reinterpret_cast(P)); + + // Currently, BatchClass doesn't support page releasing, so we always return + // false. + return false; + } + + NOINLINE void initCache() { + for (uptr I = 0; I < NumClasses; I++) { + PerClass *P = &PerClassArray[I]; + const uptr Size = SizeClassAllocator::getSizeByClassId(I); + if (I != BatchClassId) { + P->ClassSize = Size; + P->MaxCount = static_cast(2 * getMaxCached(Size)); + } else { + // ClassSize in this struct is only used for malloc/free stats, which + // should only track user allocations, not internal movements. + P->ClassSize = 0; + P->MaxCount = SizeClassMap::MaxNumCachedHint; + } + } + } +}; + } // namespace scudo #endif // SCUDO_LOCAL_CACHE_H_ diff --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h index 2b520ceb33148..f01293487271a 100644 --- a/compiler-rt/lib/scudo/standalone/primary64.h +++ b/compiler-rt/lib/scudo/standalone/primary64.h @@ -57,9 +57,11 @@ template class SizeClassAllocator64 { "Group size shouldn't be greater than the region size"); static const uptr GroupScale = GroupSizeLog - CompactPtrScale; typedef SizeClassAllocator64 ThisT; - typedef SizeClassAllocatorLocalCache CacheT; typedef TransferBatch TransferBatchT; typedef BatchGroup BatchGroupT; + using CacheT = typename Conditional, + NoCache>::type; // BachClass is used to store internal metadata so it needs to be at least as // large as the largest data structure. diff --git a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp index ff98eb3397ee0..4b920ba090a03 100644 --- a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp +++ b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp @@ -210,6 +210,47 @@ struct TestConditionVariableConfig { }; template using SecondaryT = scudo::MapAllocator; }; + +struct TestNoCacheConfig { + static const bool MaySupportMemoryTagging = true; + template + using TSDRegistryT = + scudo::TSDRegistrySharedT; // Shared, max 8 TSDs. + + struct Primary { + using SizeClassMap = scudo::AndroidSizeClassMap; +#if SCUDO_CAN_USE_PRIMARY64 + static const scudo::uptr RegionSizeLog = 28U; + typedef scudo::u32 CompactPtrT; + static const scudo::uptr CompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG; + static const scudo::uptr GroupSizeLog = 20U; + static const bool EnableRandomOffset = true; + static const scudo::uptr MapSizeIncrement = 1UL << 18; +#else + static const scudo::uptr RegionSizeLog = 18U; + static const scudo::uptr GroupSizeLog = 18U; + typedef scudo::uptr CompactPtrT; +#endif + static const bool EnableCache = false; + static const scudo::s32 MinReleaseToOsIntervalMs = 1000; + static const scudo::s32 MaxReleaseToOsIntervalMs = 1000; + }; + +#if SCUDO_CAN_USE_PRIMARY64 + template + using PrimaryT = scudo::SizeClassAllocator64; +#else + template + using PrimaryT = scudo::SizeClassAllocator32; +#endif + + struct Secondary { + template + using CacheT = scudo::MapAllocatorNoCache; + }; + template using SecondaryT = scudo::MapAllocator; +}; + } // namespace scudo #if SCUDO_FUCHSIA @@ -219,7 +260,8 @@ struct TestConditionVariableConfig { #define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \ SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, DefaultConfig) \ SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, AndroidConfig) \ - SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConditionVariableConfig) + SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConditionVariableConfig) \ + SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestNoCacheConfig) #endif #define SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TYPE) \ diff --git a/compiler-rt/lib/scudo/standalone/type_traits.h b/compiler-rt/lib/scudo/standalone/type_traits.h index 16ed5a048f82b..1c36a83ade02f 100644 --- a/compiler-rt/lib/scudo/standalone/type_traits.h +++ b/compiler-rt/lib/scudo/standalone/type_traits.h @@ -42,6 +42,14 @@ template struct isPointer { static constexpr bool value = true; }; +template struct Conditional { + using type = L; +}; + +template struct Conditional { + using type = R; +}; + } // namespace scudo #endif // SCUDO_TYPE_TRAITS_H_