xref: /haiku/src/tests/system/kernel/slab/Slab.h (revision 4171bc72bce5ea14a907bfbd0c880b080c2ba7e3)
1 /*
2  * Copyright 2007, Hugo Santos. All Rights Reserved.
3  * Distributed under the terms of the MIT License.
4  *
5  * Authors:
6  *      Hugo Santos, hugosantos@gmail.com
7  */
8 
9 #ifndef _SLAB_H_
10 #define _SLAB_H_
11 
12 #include <stdint.h>
13 #include <stdlib.h>
14 
15 #include <algorithm> // for swap()
16 #include <new>
17 #include <utility> // for pair<>
18 
19 #include <util/AutoLock.h>
20 #include <util/DoublyLinkedList.h>
21 #include <util/OpenHashTable.h>
22 
23 #include <OS.h>
24 
25 
26 #define smp_get_current_cpu() 0
27 #define smp_get_num_cpus() 1
28 
29 
30 // C interface
31 extern "C" {
32 	typedef void *object_cache_t;
33 	object_cache_t
34 	object_cache_create(const char *name, size_t object_size, size_t alignment,
35 		void (*_constructor)(void *, void *), void (*_destructor)(void *, void *),
36 		void *cookie);
37 	void *object_cache_alloc(object_cache_t cache);
38 	void *object_cache_alloc_etc(object_cache_t cache, uint32_t flags);
39 	void object_cache_free(object_cache_t cache, void *object);
40 	void object_cache_destroy(object_cache_t cache);
41 }
42 
43 
44 // TODO this values should be dynamically tuned per cache.
45 static const int kMinimumSlabItems = 32;
46 
47 // base Slab implementation, opaque to the backend used.
48 class BaseCache {
49 public:
50 	typedef void (*Constructor)(void *cookie, void *object);
51 	typedef void (*Destructor)(void *cookie, void *object);
52 
53 	BaseCache(const char *_name, size_t objectSize, size_t alignment,
54 		Constructor _constructor, Destructor _destructor, void *_cookie);
55 
56 	struct ObjectLink {
57 		struct ObjectLink *next;
58 	};
59 
60 	struct Slab : DoublyLinkedListLinkImpl<Slab> {
61 		void *pages;
62 		size_t count, size;
63 		ObjectLink *free;
64 	};
65 
66 	typedef std::pair<Slab *, ObjectLink *> ObjectInfo;
67 
68 	ObjectLink *AllocateObject();
69 	bool ReturnObject(const ObjectInfo &object);
70 
71 	Slab *ConstructSlab(Slab *slab, void *pages, size_t byteCount,
72 		ObjectLink *(*getLink)(void *parent, void *object), void *parent);
73 	void DestructSlab(Slab *slab);
74 
Name()75 	const char *Name() const { return fName; }
ObjectSize()76 	size_t ObjectSize() const { return fObjectSize; }
77 
78 protected:
79 	typedef DoublyLinkedList<Slab> SlabList;
80 
81 	char fName[32];
82 	size_t fObjectSize, fCacheColorCycle;
83 	SlabList fPartialSlabs, fFullSlabs;
84 
85 	Constructor fConstructor;
86 	Destructor fDestructor;
87 	void *fCookie;
88 };
89 
90 
91 enum {
92 	CACHE_ALIGN_TO_PAGE_TOTAL	= 1 << 0,
93 };
94 
95 struct MallocBackend {
96 	typedef void *AllocationID;
97 
PageSizeMallocBackend98 	static int PageSize() { return 4096; }
99 	static status_t AllocatePages(BaseCache *cache, AllocationID *id,
100 		void **pages, size_t byteCount, uint32_t flags);
101 	static void FreePages(BaseCache *cache, void *pages);
102 };
103 
104 
105 struct AreaBackend {
106 	typedef area_id AllocationID;
107 
PageSizeAreaBackend108 	static int PageSize() { return B_PAGE_SIZE; }
109 	static status_t AllocatePages(BaseCache *cache, area_id *id, void **pages,
110 		size_t byteCount, uint32_t flags);
111 	static void FreePages(BaseCache *cache, area_id id);
112 };
113 
114 
115 // Slab implementation, glues together the frontend, backend as
116 // well as the Slab strategy used.
117 template<typename Strategy>
118 class Cache : protected BaseCache {
119 public:
Cache(const char * _name,size_t objectSize,size_t alignment,Constructor _constructor,Destructor _destructor,void * _cookie)120 	Cache(const char *_name, size_t objectSize, size_t alignment,
121 		Constructor _constructor, Destructor _destructor, void *_cookie)
122 		: BaseCache(_name, Strategy::RequiredSpace(objectSize), alignment,
123 			_constructor, _destructor, _cookie), fStrategy(this) {}
124 
AllocateObject(uint32_t flags)125 	void *AllocateObject(uint32_t flags)
126 	{
127 		if (fPartialSlabs.IsEmpty()) {
128 			Slab *newSlab = fStrategy.NewSlab(flags);
129 			if (newSlab == NULL)
130 				return NULL;
131 			fPartialSlabs.Add(newSlab);
132 		}
133 
134 		return fStrategy.Object(BaseCache::AllocateObject());
135 	}
136 
ReturnObject(void * object)137 	void ReturnObject(void *object)
138 	{
139 		ObjectInfo location = fStrategy.ObjectInformation(object);
140 
141 		if (BaseCache::ReturnObject(location))
142 			fStrategy.ReturnSlab(location.first);
143 	}
144 
145 private:
146 	Strategy fStrategy;
147 };
148 
149 
150 static inline const void *
LowerBoundary(void * object,size_t byteCount)151 LowerBoundary(void *object, size_t byteCount)
152 {
153 	const uint8_t *null = (uint8_t *)NULL;
154 	return null + ((((uint8_t *)object) - null) & ~(byteCount - 1));
155 }
156 
157 
158 template<typename Backend>
159 class BaseCacheStrategy {
160 protected:
161 	typedef BaseCache::ObjectLink ObjectLink;
162 
BaseCacheStrategy(BaseCache * parent)163 	BaseCacheStrategy(BaseCache *parent)
164 		: fParent(parent) {}
165 
SlabSize(size_t tailSpace)166 	size_t SlabSize(size_t tailSpace) const
167 	{
168 		size_t pageCount = (kMinimumSlabItems * fParent->ObjectSize()
169 			+ tailSpace) / Backend::PageSize();
170 		if (pageCount < 1)
171 			pageCount = 1;
172 		return pageCount * Backend::PageSize();
173 	}
174 
175 	struct Slab : BaseCache::Slab {
176 		typename Backend::AllocationID id;
177 	};
178 
_ConstructSlab(Slab * slab,void * pages,size_t tailSpace,ObjectLink * (* getLink)(void * parent,void * object),void * parent)179 	BaseCache::Slab *_ConstructSlab(Slab *slab, void *pages, size_t tailSpace,
180 		ObjectLink *(*getLink)(void *parent, void *object), void *parent)
181 	{
182 		return fParent->ConstructSlab(slab, pages, SlabSize(tailSpace)
183 			- tailSpace, getLink, parent);
184 	}
185 
_DestructSlab(BaseCache::Slab * slab)186 	void _DestructSlab(BaseCache::Slab *slab)
187 	{
188 		fParent->DestructSlab(slab);
189 		Backend::FreePages(fParent, ((Slab *)slab)->id);
190 	}
191 
192 	BaseCache *fParent;
193 };
194 
195 
196 // This slab strategy includes the ObjectLink at the end of each object and the
197 // slab at the end of the allocated pages. It uses aligned allocations to
198 // provide object to slab mapping with zero storage, thus there is only one
199 // word of overhead per object. This is optimized for small objects.
200 template<typename Backend>
201 class MergedLinkCacheStrategy : public BaseCacheStrategy<Backend> {
202 public:
203 	typedef typename BaseCacheStrategy<Backend>::Slab Slab;
204 	typedef BaseCache::ObjectLink Link;
205 	typedef BaseCache::ObjectInfo ObjectInfo;
206 
MergedLinkCacheStrategy(BaseCache * parent)207 	MergedLinkCacheStrategy(BaseCache *parent)
208 		: BaseCacheStrategy<Backend>(parent) {}
209 
RequiredSpace(size_t objectSize)210 	static size_t RequiredSpace(size_t objectSize)
211 	{
212 		return objectSize + sizeof(Link);
213 	}
214 
Object(Link * link)215 	void *Object(Link *link) const
216 	{
217 		return ((uint8_t *)link) - (fParent->ObjectSize() - sizeof(Link));
218 	}
219 
ObjectInformation(void * object)220 	ObjectInfo ObjectInformation(void *object) const
221 	{
222 		Slab *slab = _SlabInPages(LowerBoundary(object, _SlabSize()));
223 		return ObjectInfo(slab, _Linkage(object));
224 	}
225 
NewSlab(uint32_t flags)226 	BaseCache::Slab *NewSlab(uint32_t flags)
227 	{
228 		typename Backend::AllocationID id;
229 		void *pages;
230 
231 		// in order to save a pointer per object or a hash table to
232 		// map objects to slabs we required this set of pages to be
233 		// aligned in a (pageCount * PAGE_SIZE) boundary.
234 		if (Backend::AllocatePages(fParent, &id, &pages, _SlabSize(),
235 				CACHE_ALIGN_TO_PAGE_TOTAL | flags) < B_OK)
236 			return NULL;
237 
238 		_SlabInPages(pages)->id = id;
239 
240 		return BaseCacheStrategy<Backend>::_ConstructSlab(_SlabInPages(pages),
241 			pages, sizeof(Slab), _Linkage, this);
242 	}
243 
ReturnSlab(BaseCache::Slab * slab)244 	void ReturnSlab(BaseCache::Slab *slab)
245 	{
246 		BaseCacheStrategy<Backend>::_DestructSlab(slab);
247 	}
248 
249 private:
_SlabSize()250 	size_t _SlabSize() const
251 	{
252 		return BaseCacheStrategy<Backend>::SlabSize(sizeof(Slab));
253 	}
254 
_Linkage(void * object)255 	Link *_Linkage(void *object) const
256 	{
257 		return (Link *)(((uint8_t *)object)
258 			+ (fParent->ObjectSize() - sizeof(Link)));
259 	}
260 
_SlabInPages(const void * pages)261 	Slab *_SlabInPages(const void *pages) const
262 	{
263 		return (Slab *)(((uint8_t *)pages) + _SlabSize() - sizeof(Slab));
264 	}
265 
_Linkage(void * _this,void * object)266 	static Link *_Linkage(void *_this, void *object)
267 	{
268 		return ((MergedLinkCacheStrategy *)_this)->_Linkage(object);
269 	}
270 };
271 
272 
273 template<typename Type, typename Backend>
274 class TypedCache : public Cache<MergedLinkCacheStrategy<Backend> > {
275 public:
276 	typedef MergedLinkCacheStrategy<Backend> Strategy;
277 	typedef Cache<Strategy> BaseType;
278 
TypedCache(const char * name,size_t alignment)279 	TypedCache(const char *name, size_t alignment)
280 		: BaseType(name, sizeof(Type), alignment, _ConstructObject,
281 			_DestructObject, this) {}
~TypedCache()282 	virtual ~TypedCache() {}
283 
Alloc(uint32_t flags)284 	Type *Alloc(uint32_t flags) { return (Type *)BaseType::AllocateObject(flags); }
Free(Type * object)285 	void Free(Type *object) { BaseType::ReturnObject(object); }
286 
287 private:
_ConstructObject(void * cookie,void * object)288 	static void _ConstructObject(void *cookie, void *object)
289 	{
290 		((TypedCache *)cookie)->ConstructObject((Type *)object);
291 	}
292 
_DestructObject(void * cookie,void * object)293 	static void _DestructObject(void *cookie, void *object)
294 	{
295 		((TypedCache *)cookie)->DestructObject((Type *)object);
296 	}
297 
ConstructObject(Type * object)298 	virtual void ConstructObject(Type *object) {}
DestructObject(Type * object)299 	virtual void DestructObject(Type *object) {}
300 };
301 
302 
303 static inline int
Fls(size_t value)304 Fls(size_t value)
305 {
306 	for (int i = 31; i >= 0; i--) {
307 		if ((value >> i) & 1)
308 			return i + 1;
309 	}
310 
311 	return -1;
312 }
313 
314 
315 struct BaseHashCacheStrategy {
316 	typedef BaseCache::ObjectLink ObjectLink;
317 	typedef BaseCache::ObjectInfo ObjectInfo;
318 
319 	struct Link : ObjectLink, HashTableLink<Link> {
320 		BaseCache::Slab *slab;
321 		void *buffer;
322 	};
323 
324 	struct HashTableDefinition {
325 		typedef BaseHashCacheStrategy	ParentType;
326 		typedef void *					KeyType;
327 		typedef Link					ValueType;
328 
HashTableDefinitionBaseHashCacheStrategy::HashTableDefinition329 		HashTableDefinition(BaseHashCacheStrategy *_parent) : parent(_parent) {}
330 
HashKeyBaseHashCacheStrategy::HashTableDefinition331 		size_t HashKey(void *key) const
332 		{
333 			return (((uint8_t *)key) - ((uint8_t *)0)) >> parent->fLowerBoundary;
334 		}
335 
HashBaseHashCacheStrategy::HashTableDefinition336 		size_t Hash(Link *value) const { return HashKey(value->buffer); }
337 
CompareBaseHashCacheStrategy::HashTableDefinition338 		bool Compare(void *key, Link *value) const
339 		{
340 			return value->buffer == key;
341 		}
342 
GetLinkBaseHashCacheStrategy::HashTableDefinition343 		HashTableLink<Link> *GetLink(Link *value) const { return value; }
344 
345 		BaseHashCacheStrategy *parent;
346 	};
347 
348 	// for g++ 2.95
349 	friend class HashTableDefinition;
350 
351 	typedef OpenHashTable<HashTableDefinition> HashTable;
352 
BaseHashCacheStrategyBaseHashCacheStrategy353 	BaseHashCacheStrategy(BaseCache *parent)
354 		: fHashTable(this), fLowerBoundary(Fls(parent->ObjectSize()) - 1) {}
355 
ObjectBaseHashCacheStrategy356 	void *Object(ObjectLink *link) const
357 	{
358 		return ((Link *)link)->buffer;
359 	}
360 
ObjectInformationBaseHashCacheStrategy361 	ObjectInfo ObjectInformation(void *object) const
362 	{
363 		Link *link = _Linkage(object);
364 		return ObjectInfo(link->slab, link);
365 	}
366 
367 protected:
_LinkageBaseHashCacheStrategy368 	Link *_Linkage(void *object) const
369 	{
370 		return fHashTable.Lookup(object);
371 	}
372 
_LinkageBaseHashCacheStrategy373 	static ObjectLink *_Linkage(void *_this, void *object)
374 	{
375 		return ((BaseHashCacheStrategy *)_this)->_Linkage(object);
376 	}
377 
378 	HashTable fHashTable;
379 	const size_t fLowerBoundary;
380 };
381 
382 
383 template<typename Backend>
384 struct HashCacheStrategy : BaseCacheStrategy<Backend>, BaseHashCacheStrategy {
385 	typedef typename BaseCacheStrategy<Backend>::Slab Slab;
386 	typedef HashCacheStrategy<Backend> Strategy;
387 
HashCacheStrategyHashCacheStrategy388 	HashCacheStrategy(BaseCache *parent)
389 		: BaseCacheStrategy<Backend>(parent), BaseHashCacheStrategy(parent),
390 			fSlabCache("slab cache", 0), fLinkCache("link cache", 0) {}
391 
RequiredSpaceHashCacheStrategy392 	static size_t RequiredSpace(size_t objectSize)
393 	{
394 		return objectSize;
395 	}
396 
NewSlabHashCacheStrategy397 	BaseCache::Slab *NewSlab(uint32_t flags)
398 	{
399 		size_t byteCount = _SlabSize();
400 
401 		Slab *slab = fSlabCache.Alloc(flags);
402 		if (slab == NULL)
403 			return NULL;
404 
405 		void *pages;
406 		if (Backend::AllocatePages(fParent, &slab->id, &pages, byteCount,
407 				flags) < B_OK) {
408 			fSlabCache.Free(slab);
409 			return NULL;
410 		}
411 
412 		if (_PrepareSlab(fParent, slab, pages, byteCount, flags) < B_OK) {
413 			Backend::FreePages(fParent, slab->id);
414 			fSlabCache.Free(slab);
415 			return NULL;
416 		}
417 
418 		// it's very important that we cast this to BaseHashCacheStrategy
419 		// so we get the proper instance offset through void *
420 		return BaseCacheStrategy<Backend>::_ConstructSlab(slab, pages, 0,
421 			_Linkage, (BaseHashCacheStrategy *)this);
422 	}
423 
ReturnSlabHashCacheStrategy424 	void ReturnSlab(BaseCache::Slab *slab)
425 	{
426 		_ClearSlab(fParent, slab->pages, _SlabSize());
427 		BaseCacheStrategy<Backend>::_DestructSlab(slab);
428 		fSlabCache.Free((Slab *)slab);
429 	}
430 
431 private:
_SlabSizeHashCacheStrategy432 	size_t _SlabSize() const
433 	{
434 		return BaseCacheStrategy<Backend>::SlabSize(0);
435 	}
436 
_PrepareSlabHashCacheStrategy437 	status_t _PrepareSlab(BaseCache *parent, Slab *slab, void *pages,
438 		size_t byteCount, uint32_t flags)
439 	{
440 		uint8_t *data = (uint8_t *)pages;
441 		for (uint8_t *it = data;
442 				it < (data + byteCount); it += parent->ObjectSize()) {
443 			Link *link = fLinkCache.Alloc(flags);
444 
445 			if (link == NULL) {
446 				_ClearSlabRange(parent, data, it);
447 				return B_NO_MEMORY;
448 			}
449 
450 			link->slab = slab;
451 			link->buffer = it;
452 			fHashTable.Insert(link);
453 		}
454 
455 		return B_OK;
456 	}
457 
_ClearSlabHashCacheStrategy458 	void _ClearSlab(BaseCache *parent, void *pages, size_t byteCount)
459 	{
460 		_ClearSlabRange(parent, (uint8_t *)pages,
461 			((uint8_t *)pages) + byteCount);
462 	}
463 
_ClearSlabRangeHashCacheStrategy464 	void _ClearSlabRange(BaseCache *parent, uint8_t *data, uint8_t *end)
465 	{
466 		for (uint8_t *it = data; it < end; it += parent->ObjectSize()) {
467 			Link *link = fHashTable.Lookup(it);
468 			fHashTable.Remove(link);
469 			fLinkCache.Free(link);
470 		}
471 	}
472 
473 	TypedCache<Slab, Backend> fSlabCache;
474 	TypedCache<Link, Backend> fLinkCache;
475 };
476 
477 
478 class BaseDepot {
479 public:
480 	struct Magazine {
481 		Magazine *next;
482 		uint16_t current_round, round_count;
483 		void *rounds[0];
484 	};
485 
486 	struct CPUStore {
487 		benaphore lock;
488 		Magazine *loaded, *previous;
489 	};
490 
491 protected:
492 	BaseDepot();
493 	virtual ~BaseDepot();
494 
495 	status_t InitCheck() const;
496 
CPU()497 	CPUStore *CPU() const { return &fStores[smp_get_current_cpu()]; }
498 
499 	void *ObtainFromStore(CPUStore *store);
500 	bool ReturnToStore(CPUStore *store, void *object);
501 	void MakeEmpty();
502 
503 	virtual void ReturnObject(void *object) = 0;
504 
505 	bool _ExchangeWithFull(Magazine* &magazine);
506 	bool _ExchangeWithEmpty(Magazine* &magazine);
507 	void _EmptyMagazine(Magazine *magazine);
508 
509 	Magazine *_AllocMagazine();
510 	void _FreeMagazine(Magazine *magazine);
511 
512 	benaphore fLock;
513 	Magazine *fFull, *fEmpty;
514 	size_t fFullCount, fEmptyCount;
515 	CPUStore *fStores;
516 };
517 
518 
519 template<typename CacheType>
520 class LocalCache : public CacheType, protected BaseDepot {
521 public:
522 	typedef typename CacheType::Constructor Constructor;
523 	typedef typename CacheType::Destructor Destructor;
524 
LocalCache(const char * name,size_t objectSize,size_t alignment,Constructor _constructor,Destructor _destructor,void * _cookie)525 	LocalCache(const char *name, size_t objectSize, size_t alignment,
526 		Constructor _constructor, Destructor _destructor, void *_cookie)
527 		: CacheType(name, objectSize, alignment, _constructor, _destructor,
528 			_cookie) {}
529 
~LocalCache()530 	~LocalCache() { Destroy(); }
531 
Alloc(uint32_t flags)532 	void *Alloc(uint32_t flags)
533 	{
534 		void *object = BaseDepot::ObtainFromStore(CPU());
535 		if (object == NULL)
536 			object = CacheType::AllocateObject(flags);
537 		return object;
538 	}
539 
Free(void * object)540 	void Free(void *object)
541 	{
542 		if (!BaseDepot::ReturnToStore(CPU(), object))
543 			CacheType::ReturnObject(object);
544 	}
545 
Destroy()546 	void Destroy() { BaseDepot::MakeEmpty(); }
547 
548 private:
ReturnObject(void * object)549 	void ReturnObject(void *object)
550 	{
551 		CacheType::ReturnObject(object);
552 	}
553 };
554 
555 #endif
556