@@ -161,11 +161,6 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
161161 }
162162 }
163163
164- void destroyBatch (uptr ClassId, void *B) {
165- if (ClassId != BatchClassId)
166- deallocate (BatchClassId, B);
167- }
168-
169164 NOINLINE bool refill (PerClass *C, uptr ClassId, u16 MaxRefill) {
170165 const u16 NumBlocksRefilled =
171166 Allocator->popBlocks (this , ClassId, C->Chunks , MaxRefill);
@@ -184,6 +179,148 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
184179 }
185180};
186181
182+ template <class SizeClassAllocator > struct NoCache {
183+ typedef typename SizeClassAllocator::SizeClassMap SizeClassMap;
184+ typedef typename SizeClassAllocator::CompactPtrT CompactPtrT;
185+
186+ void init (GlobalStats *S, SizeClassAllocator *A) {
187+ Stats.init ();
188+ if (LIKELY (S))
189+ S->link (&Stats);
190+ Allocator = A;
191+ initCache ();
192+ }
193+
194+ void destroy (GlobalStats *S) {
195+ if (LIKELY (S))
196+ S->unlink (&Stats);
197+ }
198+
199+ void *allocate (uptr ClassId) {
200+ CompactPtrT CompactPtr;
201+ uptr NumBlocksPopped = Allocator->popBlocks (this , ClassId, &CompactPtr, 1U );
202+ if (NumBlocksPopped == 0 )
203+ return nullptr ;
204+ DCHECK_EQ (NumBlocksPopped, 1U );
205+ const PerClass *C = &PerClassArray[ClassId];
206+ Stats.add (StatAllocated, C->ClassSize );
207+ Stats.sub (StatFree, C->ClassSize );
208+ return Allocator->decompactPtr (ClassId, CompactPtr);
209+ }
210+
211+ bool deallocate (uptr ClassId, void *P) {
212+ CHECK_LT (ClassId, NumClasses);
213+
214+ if (ClassId == BatchClassId)
215+ return deallocateBatchClassBlock (P);
216+
217+ CompactPtrT CompactPtr =
218+ Allocator->compactPtr (ClassId, reinterpret_cast <uptr>(P));
219+ Allocator->pushBlocks (this , ClassId, &CompactPtr, 1U );
220+ PerClass *C = &PerClassArray[ClassId];
221+ Stats.sub (StatAllocated, C->ClassSize );
222+ Stats.add (StatFree, C->ClassSize );
223+
224+ // The following adopts the same strategy of allocator draining as
225+ // SizeClassAllocatorLocalCache so that they have the same hint for doing
226+ // page release.
227+ ++C->Count ;
228+ const bool SuggestDraining = C->Count == C->MaxCount ;
229+ if (SuggestDraining)
230+ C->Count = 0 ;
231+ return SuggestDraining;
232+ }
233+
234+ void *getBatchClassBlock () {
235+ PerClass *C = &PerClassArray[BatchClassId];
236+ if (C->Count == 0 ) {
237+ const u16 NumBlocksRefilled = Allocator->popBlocks (
238+ this , BatchClassId, BatchClassStorage, C->MaxCount );
239+ if (NumBlocksRefilled == 0 )
240+ reportOutOfMemory (SizeClassAllocator::getSizeByClassId (BatchClassId));
241+ DCHECK_LE (NumBlocksRefilled, SizeClassMap::MaxNumCachedHint);
242+ C->Count = NumBlocksRefilled;
243+ }
244+
245+ const uptr ClassSize = C->ClassSize ;
246+ CompactPtrT CompactP = BatchClassStorage[--C->Count ];
247+ Stats.add (StatAllocated, ClassSize);
248+ Stats.sub (StatFree, ClassSize);
249+
250+ return Allocator->decompactPtr (BatchClassId, CompactP);
251+ }
252+
253+ LocalStats &getStats () { return Stats; }
254+
255+ void getStats (ScopedString *Str) { Str->append (" No block is cached.\n " ); }
256+
257+ bool isEmpty () const {
258+ const PerClass *C = &PerClassArray[BatchClassId];
259+ return C->Count == 0 ;
260+ }
261+ void drain () {
262+ PerClass *C = &PerClassArray[BatchClassId];
263+ if (C->Count > 0 ) {
264+ Allocator->pushBlocks (this , BatchClassId, BatchClassStorage, C->Count );
265+ C->Count = 0 ;
266+ }
267+ }
268+
269+ static u16 getMaxCached (uptr Size) {
270+ return Min (SizeClassMap::MaxNumCachedHint,
271+ SizeClassMap::getMaxCachedHint (Size));
272+ }
273+
274+ private:
275+ static const uptr NumClasses = SizeClassMap::NumClasses;
276+ static const uptr BatchClassId = SizeClassMap::BatchClassId;
277+ struct alignas (SCUDO_CACHE_LINE_SIZE) PerClass {
278+ u16 Count = 0 ;
279+ u16 MaxCount;
280+ // Note: ClassSize is zero for the transfer batch.
281+ uptr ClassSize;
282+ };
283+ PerClass PerClassArray[NumClasses] = {};
284+ // Popping BatchClass blocks requires taking a certain amount of blocks at
285+ // once. This restriction comes from how we manage the storing of BatchClass
286+ // in the primary allocator. See more details in `popBlocksImpl` in the
287+ // primary allocator.
288+ CompactPtrT BatchClassStorage[SizeClassMap::MaxNumCachedHint];
289+ LocalStats Stats;
290+ SizeClassAllocator *Allocator = nullptr ;
291+
292+ bool deallocateBatchClassBlock (void *P) {
293+ PerClass *C = &PerClassArray[BatchClassId];
294+ // Drain all the blocks.
295+ if (C->Count == C->MaxCount ) {
296+ Allocator->pushBlocks (this , BatchClassId, BatchClassStorage, C->Count );
297+ C->Count = 0 ;
298+ }
299+ BatchClassStorage[C->Count ++] =
300+ Allocator->compactPtr (BatchClassId, reinterpret_cast <uptr>(P));
301+
302+ // Currently, BatchClass doesn't support page releasing, so we always return
303+ // false.
304+ return false ;
305+ }
306+
307+ NOINLINE void initCache () {
308+ for (uptr I = 0 ; I < NumClasses; I++) {
309+ PerClass *P = &PerClassArray[I];
310+ const uptr Size = SizeClassAllocator::getSizeByClassId (I);
311+ if (I != BatchClassId) {
312+ P->ClassSize = Size;
313+ P->MaxCount = static_cast <u16 >(2 * getMaxCached (Size));
314+ } else {
315+ // ClassSize in this struct is only used for malloc/free stats, which
316+ // should only track user allocations, not internal movements.
317+ P->ClassSize = 0 ;
318+ P->MaxCount = SizeClassMap::MaxNumCachedHint;
319+ }
320+ }
321+ }
322+ };
323+
187324} // namespace scudo
188325
189326#endif // SCUDO_LOCAL_CACHE_H_
0 commit comments