xref: /haiku/src/system/kernel/slab/Slab.cpp (revision 7200c6f499191933e4b04e1f9c9a269d10fc92c0)
1 /*
2  * Copyright 2008, Axel Dörfler. All Rights Reserved.
3  * Copyright 2007, Hugo Santos. All Rights Reserved.
4  *
5  * Distributed under the terms of the MIT License.
6  */
7 
8 
9 #include <Slab.h>
10 #include "slab_private.h"
11 
12 #include <algorithm>
13 #include <new>
14 #include <signal.h>
15 #include <stdlib.h>
16 #include <string.h>
17 
18 #include <KernelExport.h>
19 
20 #include <condition_variable.h>
21 #include <Depot.h>
22 #include <kernel.h>
23 #include <low_resource_manager.h>
24 #include <smp.h>
25 #include <tracing.h>
26 #include <util/AutoLock.h>
27 #include <util/DoublyLinkedList.h>
28 #include <util/OpenHashTable.h>
29 #include <util/khash.h>
30 #include <vm.h>
31 #include <vm_address_space.h>
32 
33 
34 // TODO kMagazineCapacity should be dynamically tuned per cache.
35 
36 
37 //#define TRACE_SLAB
38 
39 #ifdef TRACE_SLAB
40 #define TRACE_CACHE(cache, format, args...) \
41 	dprintf("Cache[%p, %s] " format "\n", cache, cache->name , ##args)
42 #else
43 #define TRACE_CACHE(cache, format, bananas...) do { } while (0)
44 #endif
45 
46 #define COMPONENT_PARANOIA_LEVEL	OBJECT_CACHE_PARANOIA
47 #include <debug_paranoia.h>
48 
49 
50 #define CACHE_ALIGN_ON_SIZE	(30 << 1)
51 
52 static const int kMagazineCapacity = 32;
53 static const size_t kCacheColorPeriod = 8;
54 
55 struct object_link {
56 	struct object_link *next;
57 };
58 
59 struct slab : DoublyLinkedListLinkImpl<slab> {
60 	void *pages;
61 	size_t size;		// total number of objects
62 	size_t count;		// free objects
63 	size_t offset;
64 	object_link *free;
65 };
66 
67 typedef DoublyLinkedList<slab> SlabList;
68 struct ResizeRequest;
69 
70 struct object_cache : DoublyLinkedListLinkImpl<object_cache> {
71 	char name[32];
72 	mutex lock;
73 	size_t object_size;
74 	size_t cache_color_cycle;
75 	SlabList empty, partial, full;
76 	size_t total_objects;		// total number of objects
77 	size_t used_count;			// used objects
78 	size_t empty_count;			// empty slabs
79 	size_t pressure;
80 	size_t min_object_reserve;	// minimum number of free objects
81 
82 	size_t slab_size;
83 	size_t usage, maximum;
84 	uint32 flags;
85 
86 	ResizeRequest *resize_request;
87 
88 	void *cookie;
89 	object_cache_constructor constructor;
90 	object_cache_destructor destructor;
91 	object_cache_reclaimer reclaimer;
92 
93 	status_t (*allocate_pages)(object_cache *cache, void **pages,
94 		uint32 flags, bool unlockWhileAllocating);
95 	void (*free_pages)(object_cache *cache, void *pages);
96 
97 	object_depot depot;
98 
99 	virtual slab *CreateSlab(uint32 flags, bool unlockWhileAllocating) = 0;
100 	virtual void ReturnSlab(slab *slab) = 0;
101 	virtual slab *ObjectSlab(void *object) const = 0;
102 
103 	slab *InitSlab(slab *slab, void *pages, size_t byteCount);
104 	void UninitSlab(slab *slab);
105 
106 	virtual status_t PrepareObject(slab *source, void *object) { return B_OK; }
107 	virtual void UnprepareObject(slab *source, void *object) {}
108 
109 	virtual ~object_cache() {}
110 
111 	bool Lock()		{ return mutex_lock(&lock) == B_OK; }
112 	void Unlock()	{ mutex_unlock(&lock); }
113 };
114 
115 typedef DoublyLinkedList<object_cache> ObjectCacheList;
116 
117 struct SmallObjectCache : object_cache {
118 	slab *CreateSlab(uint32 flags, bool unlockWhileAllocating);
119 	void ReturnSlab(slab *slab);
120 	slab *ObjectSlab(void *object) const;
121 };
122 
123 
124 struct HashedObjectCache : object_cache {
125 	struct Link : HashTableLink<Link> {
126 		const void *buffer;
127 		slab *parent;
128 	};
129 
130 	struct Definition {
131 		typedef HashedObjectCache	ParentType;
132 		typedef const void *		KeyType;
133 		typedef Link				ValueType;
134 
135 		Definition(HashedObjectCache *_parent) : parent(_parent) {}
136 		Definition(const Definition& definition) : parent(definition.parent) {}
137 
138 		size_t HashKey(const void *key) const
139 		{
140 			return (((const uint8 *)key) - ((const uint8 *)0))
141 				>> parent->lower_boundary;
142 		}
143 
144 		size_t Hash(Link *value) const { return HashKey(value->buffer); }
145 		bool Compare(const void *key, Link *value) const
146 			{ return value->buffer == key; }
147 		HashTableLink<Link> *GetLink(Link *value) const { return value; }
148 
149 		HashedObjectCache *parent;
150 	};
151 
152 	typedef OpenHashTable<Definition> HashTable;
153 
154 	HashedObjectCache()
155 		: hash_table(this) {}
156 
157 	slab *CreateSlab(uint32 flags, bool unlockWhileAllocating);
158 	void ReturnSlab(slab *slab);
159 	slab *ObjectSlab(void *object) const;
160 	status_t PrepareObject(slab *source, void *object);
161 	void UnprepareObject(slab *source, void *object);
162 
163 	HashTable hash_table;
164 	size_t lower_boundary;
165 };
166 
167 
168 struct depot_magazine {
169 	struct depot_magazine *next;
170 	uint16 current_round, round_count;
171 	void *rounds[0];
172 };
173 
174 
175 struct depot_cpu_store {
176 	recursive_lock lock;
177 	struct depot_magazine *loaded, *previous;
178 };
179 
180 struct ResizeRequest : DoublyLinkedListLinkImpl<ResizeRequest> {
181 	ResizeRequest(object_cache* cache)
182 		:
183 		cache(cache),
184 		pending(false),
185 		delete_when_done(false)
186 	{
187 	}
188 
189 	object_cache*	cache;
190 	bool			pending;
191 	bool			delete_when_done;
192 };
193 
194 typedef DoublyLinkedList<ResizeRequest> ResizeRequestQueue;
195 
196 
197 static ObjectCacheList sObjectCaches;
198 static mutex sObjectCacheListLock = MUTEX_INITIALIZER("object cache list");
199 
200 static uint8 *sInitialBegin, *sInitialLimit, *sInitialPointer;
201 static kernel_args *sKernelArgs;
202 
203 
204 static status_t object_cache_reserve_internal(object_cache *cache,
205 	size_t object_count, uint32 flags, bool unlockWhileAllocating);
206 static depot_magazine *alloc_magazine();
207 static void free_magazine(depot_magazine *magazine);
208 
209 static mutex sResizeRequestsLock
210 	= MUTEX_INITIALIZER("object cache resize requests");
211 static ResizeRequestQueue sResizeRequests;
212 static ConditionVariable sResizeRequestsCondition;
213 
214 
215 #if OBJECT_CACHE_TRACING
216 
217 
218 namespace ObjectCacheTracing {
219 
220 class ObjectCacheTraceEntry : public AbstractTraceEntry {
221 	public:
222 		ObjectCacheTraceEntry(object_cache* cache)
223 			:
224 			fCache(cache)
225 		{
226 		}
227 
228 	protected:
229 		object_cache*	fCache;
230 };
231 
232 
233 class Create : public ObjectCacheTraceEntry {
234 	public:
235 		Create(const char* name, size_t objectSize, size_t alignment,
236 				size_t maxByteUsage, uint32 flags, void *cookie,
237 				object_cache *cache)
238 			:
239 			ObjectCacheTraceEntry(cache),
240 			fObjectSize(objectSize),
241 			fAlignment(alignment),
242 			fMaxByteUsage(maxByteUsage),
243 			fFlags(flags),
244 			fCookie(cookie)
245 		{
246 			fName = alloc_tracing_buffer_strcpy(name, 64, false);
247 			Initialized();
248 		}
249 
250 		virtual void AddDump(TraceOutput& out)
251 		{
252 			out.Print("object cache create: name: \"%s\", object size: %lu, "
253 				"alignment: %lu, max usage: %lu, flags: 0x%lx, cookie: %p "
254 				"-> cache: %p", fName, fObjectSize, fAlignment, fMaxByteUsage,
255 					fFlags, fCookie, fCache);
256 		}
257 
258 	private:
259 		const char*	fName;
260 		size_t		fObjectSize;
261 		size_t		fAlignment;
262 		size_t		fMaxByteUsage;
263 		uint32		fFlags;
264 		void*		fCookie;
265 };
266 
267 
268 class Delete : public ObjectCacheTraceEntry {
269 	public:
270 		Delete(object_cache *cache)
271 			:
272 			ObjectCacheTraceEntry(cache)
273 		{
274 			Initialized();
275 		}
276 
277 		virtual void AddDump(TraceOutput& out)
278 		{
279 			out.Print("object cache delete: %p", fCache);
280 		}
281 };
282 
283 
284 class Alloc : public ObjectCacheTraceEntry {
285 	public:
286 		Alloc(object_cache *cache, uint32 flags, void* object)
287 			:
288 			ObjectCacheTraceEntry(cache),
289 			fFlags(flags),
290 			fObject(object)
291 		{
292 			Initialized();
293 		}
294 
295 		virtual void AddDump(TraceOutput& out)
296 		{
297 			out.Print("object cache alloc: cache: %p, flags: 0x%lx -> "
298 				"object: %p", fCache, fFlags, fObject);
299 		}
300 
301 	private:
302 		uint32		fFlags;
303 		void*		fObject;
304 };
305 
306 
307 class Free : public ObjectCacheTraceEntry {
308 	public:
309 		Free(object_cache *cache, void* object)
310 			:
311 			ObjectCacheTraceEntry(cache),
312 			fObject(object)
313 		{
314 			Initialized();
315 		}
316 
317 		virtual void AddDump(TraceOutput& out)
318 		{
319 			out.Print("object cache free: cache: %p, object: %p", fCache,
320 				fObject);
321 		}
322 
323 	private:
324 		void*		fObject;
325 };
326 
327 
328 class Reserve : public ObjectCacheTraceEntry {
329 	public:
330 		Reserve(object_cache *cache, size_t count, uint32 flags)
331 			:
332 			ObjectCacheTraceEntry(cache),
333 			fCount(count),
334 			fFlags(flags)
335 		{
336 			Initialized();
337 		}
338 
339 		virtual void AddDump(TraceOutput& out)
340 		{
341 			out.Print("object cache reserve: cache: %p, count: %lu, "
342 				"flags: 0x%lx", fCache, fCount, fFlags);
343 		}
344 
345 	private:
346 		uint32		fCount;
347 		uint32		fFlags;
348 };
349 
350 
351 }	// namespace ObjectCacheTracing
352 
353 #	define T(x)	new(std::nothrow) ObjectCacheTracing::x
354 
355 #else
356 #	define T(x)
357 #endif	// OBJECT_CACHE_TRACING
358 
359 
360 // #pragma mark -
361 
362 
363 template<typename Type> static Type *
364 _pop(Type* &head)
365 {
366 	Type *oldHead = head;
367 	head = head->next;
368 	return oldHead;
369 }
370 
371 
372 template<typename Type> static inline void
373 _push(Type* &head, Type *object)
374 {
375 	object->next = head;
376 	head = object;
377 }
378 
379 
380 static inline void *
381 link_to_object(object_link *link, size_t objectSize)
382 {
383 	return ((uint8 *)link) - (objectSize - sizeof(object_link));
384 }
385 
386 
387 static inline object_link *
388 object_to_link(void *object, size_t objectSize)
389 {
390 	return (object_link *)(((uint8 *)object)
391 		+ (objectSize - sizeof(object_link)));
392 }
393 
394 
395 static inline depot_cpu_store *
396 object_depot_cpu(object_depot *depot)
397 {
398 	return &depot->stores[smp_get_current_cpu()];
399 }
400 
401 
402 static inline int
403 __fls0(size_t value)
404 {
405 	if (value == 0)
406 		return -1;
407 
408 	int bit;
409 	for (bit = 0; value != 1; bit++)
410 		value >>= 1;
411 	return bit;
412 }
413 
414 
415 static void *
416 internal_alloc(size_t size, uint32 flags)
417 {
418 	if (flags & CACHE_DURING_BOOT) {
419 		if ((sInitialPointer + size) > sInitialLimit)
420 			panic("internal_alloc: ran out of initial space");
421 
422 		uint8 *buffer = sInitialPointer;
423 		sInitialPointer += size;
424 		return buffer;
425 	}
426 
427 	// TODO: Support CACHE_DONT_SLEEP!
428 	return block_alloc(size);
429 }
430 
431 
432 static void
433 internal_free(void *_buffer)
434 {
435 	uint8 *buffer = (uint8 *)_buffer;
436 
437 	if (buffer >= sInitialBegin && buffer < sInitialLimit)
438 		return;
439 
440 	block_free(buffer);
441 }
442 
443 
444 static status_t
445 area_allocate_pages(object_cache *cache, void **pages, uint32 flags,
446 	bool unlockWhileAllocating)
447 {
448 	TRACE_CACHE(cache, "allocate pages (%lu, 0x0%lx)", cache->slab_size, flags);
449 
450 	uint32 lock = B_FULL_LOCK;
451 	if (cache->flags & CACHE_UNLOCKED_PAGES)
452 		lock = B_NO_LOCK;
453 
454 	uint32 addressSpec = B_ANY_KERNEL_ADDRESS;
455 	if ((cache->flags & CACHE_ALIGN_ON_SIZE) != 0
456 		&& cache->slab_size != B_PAGE_SIZE)
457 		addressSpec = B_ANY_KERNEL_BLOCK_ADDRESS;
458 
459 	if (unlockWhileAllocating)
460 		cache->Unlock();
461 
462 	// if we are allocating, it is because we need the pages immediatly
463 	// so we lock them. when moving the slab to the empty list we should
464 	// unlock them, and lock them again when getting one from the empty list.
465 	area_id areaId = vm_create_anonymous_area(vm_kernel_address_space_id(),
466 		cache->name, pages, addressSpec, cache->slab_size, lock,
467 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
468 		(flags & CACHE_DONT_SLEEP) != 0 ? CREATE_AREA_DONT_WAIT : 0, true);
469 
470 	if (unlockWhileAllocating)
471 		cache->Lock();
472 
473 	if (areaId < 0)
474 		return areaId;
475 
476 	cache->usage += cache->slab_size;
477 
478 	TRACE_CACHE(cache, "  ... = { %ld, %p }", areaId, *pages);
479 
480 	return B_OK;
481 }
482 
483 
484 static void
485 area_free_pages(object_cache *cache, void *pages)
486 {
487 	area_id id = area_for(pages);
488 
489 	TRACE_CACHE(cache, "delete pages %p (%ld)", pages, id);
490 
491 	if (id < B_OK)
492 		panic("object cache: freeing unknown area");
493 
494 	delete_area(id);
495 
496 	cache->usage -= cache->slab_size;
497 }
498 
499 
500 static status_t
501 early_allocate_pages(object_cache *cache, void **pages, uint32 flags,
502 	bool unlockWhileAllocating)
503 {
504 	TRACE_CACHE(cache, "early allocate pages (%lu, 0x0%lx)", cache->slab_size,
505 		flags);
506 
507 	if (unlockWhileAllocating)
508 		cache->Unlock();
509 
510 	addr_t base = vm_allocate_early(sKernelArgs, cache->slab_size,
511 		cache->slab_size, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
512 
513 	if (unlockWhileAllocating)
514 		cache->Lock();
515 
516 	*pages = (void *)base;
517 
518 	cache->usage += cache->slab_size;
519 
520 	TRACE_CACHE(cache, "  ... = { %p }", *pages);
521 
522 	return B_OK;
523 }
524 
525 
526 static void
527 early_free_pages(object_cache *cache, void *pages)
528 {
529 	panic("memory pressure on bootup?");
530 }
531 
532 
533 static void
534 object_cache_low_memory(void *_self, uint32 resources, int32 level)
535 {
536 	if (level == B_NO_LOW_RESOURCE)
537 		return;
538 
539 	object_cache *cache = (object_cache *)_self;
540 
541 	// we are calling the reclaimer without the object cache lock
542 	// to give the owner a chance to return objects to the slabs.
543 	// As we only register the low memory handler after we complete
544 	// the init of the object cache, unless there are some funky
545 	// memory re-order business going on, we should be ok.
546 
547 	if (cache->reclaimer)
548 		cache->reclaimer(cache->cookie, level);
549 
550 	MutexLocker _(cache->lock);
551 	size_t minimumAllowed;
552 
553 	switch (level) {
554 	case B_LOW_RESOURCE_NOTE:
555 		minimumAllowed = cache->pressure / 2 + 1;
556 		break;
557 
558 	case B_LOW_RESOURCE_WARNING:
559 		cache->pressure /= 2;
560 		minimumAllowed = 0;
561 		break;
562 
563 	default:
564 		cache->pressure = 0;
565 		minimumAllowed = 0;
566 		break;
567 	}
568 
569 	// If the object cache has minimum object reserve, make sure that we don't
570 	// free too many slabs.
571 	if (cache->min_object_reserve > 0 && cache->empty_count > 0) {
572 		size_t objectsPerSlab = cache->empty.Head()->size;
573 		size_t freeObjects = cache->total_objects - cache->used_count;
574 
575 		if (cache->min_object_reserve + objectsPerSlab >= freeObjects)
576 			return;
577 
578 		size_t slabsToFree = (freeObjects - cache->min_object_reserve)
579 			/ objectsPerSlab;
580 
581 		if (cache->empty_count > minimumAllowed + slabsToFree)
582 			minimumAllowed = cache->empty_count - slabsToFree;
583 	}
584 
585 	if (cache->empty_count <= minimumAllowed)
586 		return;
587 
588 	TRACE_CACHE(cache, "cache: memory pressure, will release down to %lu.",
589 		minimumAllowed);
590 
591 	while (cache->empty_count > minimumAllowed) {
592 		cache->ReturnSlab(cache->empty.RemoveHead());
593 		cache->empty_count--;
594 	}
595 }
596 
597 
598 static void
599 object_cache_return_object_wrapper(object_depot *depot, void *object)
600 {
601 	// TODO: the offset calculation might be wrong because we hardcode a
602 	// SmallObjectCache instead of a base object_cache. Also this must
603 	// have an unacceptable overhead.
604 	SmallObjectCache dummyCache;
605 	object_cache *cache = (object_cache *)(((uint8 *)depot)
606 		- offset_of_member(dummyCache, depot));
607 
608 	object_cache_free(cache, object);
609 }
610 
611 
612 static status_t
613 object_cache_init(object_cache *cache, const char *name, size_t objectSize,
614 	size_t alignment, size_t maximum, uint32 flags, void *cookie,
615 	object_cache_constructor constructor, object_cache_destructor destructor,
616 	object_cache_reclaimer reclaimer)
617 {
618 	strlcpy(cache->name, name, sizeof(cache->name));
619 
620 	mutex_init(&cache->lock, cache->name);
621 
622 	if (objectSize < sizeof(object_link))
623 		objectSize = sizeof(object_link);
624 
625 	if (alignment > 0 && (objectSize & (alignment - 1)))
626 		cache->object_size = objectSize + alignment
627 			- (objectSize & (alignment - 1));
628 	else
629 		cache->object_size = objectSize;
630 
631 	TRACE_CACHE(cache, "init %lu, %lu -> %lu", objectSize, alignment,
632 		cache->object_size);
633 
634 	cache->cache_color_cycle = 0;
635 	cache->total_objects = 0;
636 	cache->used_count = 0;
637 	cache->empty_count = 0;
638 	cache->pressure = 0;
639 	cache->min_object_reserve = 0;
640 
641 	cache->usage = 0;
642 	cache->maximum = maximum;
643 
644 	cache->flags = flags;
645 
646 	cache->resize_request = NULL;
647 
648 	// TODO: depot destruction is obviously broken
649 	// no gain in using the depot in single cpu setups
650 	//if (smp_get_num_cpus() == 1)
651 		cache->flags |= CACHE_NO_DEPOT;
652 
653 	if (!(cache->flags & CACHE_NO_DEPOT)) {
654 		status_t status = object_depot_init(&cache->depot, flags,
655 			object_cache_return_object_wrapper);
656 		if (status < B_OK) {
657 			mutex_destroy(&cache->lock);
658 			return status;
659 		}
660 	}
661 
662 	cache->cookie = cookie;
663 	cache->constructor = constructor;
664 	cache->destructor = destructor;
665 	cache->reclaimer = reclaimer;
666 
667 	if (cache->flags & CACHE_DURING_BOOT) {
668 		cache->allocate_pages = early_allocate_pages;
669 		cache->free_pages = early_free_pages;
670 	} else {
671 		cache->allocate_pages = area_allocate_pages;
672 		cache->free_pages = area_free_pages;
673 	}
674 
675 	register_low_resource_handler(object_cache_low_memory, cache,
676 		B_KERNEL_RESOURCE_PAGES | B_KERNEL_RESOURCE_MEMORY, 5);
677 
678 	MutexLocker _(sObjectCacheListLock);
679 	sObjectCaches.Add(cache);
680 
681 	return B_OK;
682 }
683 
684 
685 static void
686 object_cache_commit_slab(object_cache *cache, slab *slab)
687 {
688 	void *pages = (void *)ROUNDOWN((addr_t)slab->pages, B_PAGE_SIZE);
689 	if (create_area(cache->name, &pages, B_EXACT_ADDRESS, cache->slab_size,
690 		B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA) < B_OK)
691 		panic("failed to create_area()");
692 }
693 
694 
695 static void
696 object_cache_commit_pre_pages(object_cache *cache)
697 {
698 	SlabList::Iterator it = cache->full.GetIterator();
699 	while (it.HasNext())
700 		object_cache_commit_slab(cache, it.Next());
701 
702 	it = cache->partial.GetIterator();
703 	while (it.HasNext())
704 		object_cache_commit_slab(cache, it.Next());
705 
706 	it = cache->empty.GetIterator();
707 	while (it.HasNext())
708 		object_cache_commit_slab(cache, it.Next());
709 
710 	cache->allocate_pages = area_allocate_pages;
711 	cache->free_pages = area_free_pages;
712 }
713 
714 
715 static status_t
716 object_cache_resizer(void*)
717 {
718 	while (true) {
719 		MutexLocker locker(sResizeRequestsLock);
720 
721 		// wait for the next request
722 		while (sResizeRequests.IsEmpty()) {
723 			ConditionVariableEntry entry;
724 			sResizeRequestsCondition.Add(&entry);
725 			locker.Unlock();
726 			entry.Wait();
727 			locker.Lock();
728 		}
729 
730 		ResizeRequest* request = sResizeRequests.RemoveHead();
731 
732 		locker.Unlock();
733 
734 		// resize the cache, if necessary
735 
736 		object_cache* cache = request->cache;
737 
738 		MutexLocker cacheLocker(cache->lock);
739 
740 		size_t freeObjects = cache->total_objects - cache->used_count;
741 
742 		while (freeObjects < cache->min_object_reserve) {
743 			status_t error = object_cache_reserve_internal(cache,
744 				cache->min_object_reserve - freeObjects, 0, true);
745 			if (error != B_OK) {
746 				dprintf("object cache resizer: Failed to resize object cache "
747 					"%p!\n", cache);
748 				break;
749 			}
750 
751 			freeObjects = cache->total_objects - cache->used_count;
752 		}
753 
754 		request->pending = false;
755 
756 		if (request->delete_when_done)
757 			delete request;
758 	}
759 
760 	// never can get here
761 	return B_OK;
762 }
763 
764 
765 static void
766 increase_object_reserve(object_cache* cache)
767 {
768 	if (cache->resize_request->pending)
769 		return;
770 
771 	cache->resize_request->pending = true;
772 
773 	MutexLocker locker(sResizeRequestsLock);
774 	sResizeRequests.Add(cache->resize_request);
775 	sResizeRequestsCondition.NotifyAll();
776 }
777 
778 
779 static void
780 delete_cache(object_cache *cache)
781 {
782 	cache->~object_cache();
783 	internal_free(cache);
784 }
785 
786 
787 static SmallObjectCache *
788 create_small_object_cache(const char *name, size_t object_size,
789 	size_t alignment, size_t maximum, uint32 flags, void *cookie,
790 	object_cache_constructor constructor, object_cache_destructor destructor,
791 	object_cache_reclaimer reclaimer)
792 {
793 	void *buffer = internal_alloc(sizeof(SmallObjectCache), flags);
794 	if (buffer == NULL)
795 		return NULL;
796 
797 	SmallObjectCache *cache = new (buffer) SmallObjectCache();
798 
799 	if (object_cache_init(cache, name, object_size, alignment, maximum,
800 			flags | CACHE_ALIGN_ON_SIZE, cookie, constructor, destructor,
801 			reclaimer) < B_OK) {
802 		delete_cache(cache);
803 		return NULL;
804 	}
805 
806 	if ((flags & CACHE_LARGE_SLAB) != 0)
807 		cache->slab_size = max_c(16 * B_PAGE_SIZE, 1024 * object_size);
808 	else
809 		cache->slab_size = B_PAGE_SIZE;
810 
811 	return cache;
812 }
813 
814 
815 static HashedObjectCache *
816 create_hashed_object_cache(const char *name, size_t object_size,
817 	size_t alignment, size_t maximum, uint32 flags, void *cookie,
818 	object_cache_constructor constructor, object_cache_destructor destructor,
819 	object_cache_reclaimer reclaimer)
820 {
821 	void *buffer = internal_alloc(sizeof(HashedObjectCache), flags);
822 	if (buffer == NULL)
823 		return NULL;
824 
825 	HashedObjectCache *cache = new (buffer) HashedObjectCache();
826 
827 	if (object_cache_init(cache, name, object_size, alignment, maximum, flags,
828 		cookie, constructor, destructor, reclaimer) < B_OK) {
829 		delete_cache(cache);
830 		return NULL;
831 	}
832 
833 	if ((flags & CACHE_LARGE_SLAB) != 0)
834 		cache->slab_size = max_c(256 * B_PAGE_SIZE, 128 * object_size);
835 	else
836 		cache->slab_size = max_c(16 * B_PAGE_SIZE, 8 * object_size);
837 	cache->lower_boundary = __fls0(cache->object_size);
838 
839 	return cache;
840 }
841 
842 
843 object_cache *
844 create_object_cache(const char *name, size_t object_size, size_t alignment,
845 	void *cookie, object_cache_constructor constructor,
846 	object_cache_destructor destructor)
847 {
848 	return create_object_cache_etc(name, object_size, alignment, 0, 0, cookie,
849 		constructor, destructor, NULL);
850 }
851 
852 
853 object_cache *
854 create_object_cache_etc(const char *name, size_t objectSize, size_t alignment,
855 	size_t maximum, uint32 flags, void *cookie,
856 	object_cache_constructor constructor, object_cache_destructor destructor,
857 	object_cache_reclaimer reclaimer)
858 {
859 	object_cache *cache;
860 
861 	if (objectSize == 0) {
862 		cache = NULL;
863 	} else if (objectSize <= 256) {
864 		cache = create_small_object_cache(name, objectSize, alignment, maximum,
865 			flags, cookie, constructor, destructor, reclaimer);
866 	} else {
867 		cache = create_hashed_object_cache(name, objectSize, alignment,
868 			maximum, flags, cookie, constructor, destructor, reclaimer);
869 	}
870 
871 	T(Create(name, objectSize, alignment, maximum, flags, cookie, cache));
872 	return cache;
873 }
874 
875 
876 void
877 delete_object_cache(object_cache *cache)
878 {
879 	T(Delete(cache));
880 
881 	{
882 		MutexLocker _(sObjectCacheListLock);
883 		sObjectCaches.Remove(cache);
884 	}
885 
886 	mutex_lock(&cache->lock);
887 
888 	if (!(cache->flags & CACHE_NO_DEPOT))
889 		object_depot_destroy(&cache->depot);
890 
891 	unregister_low_resource_handler(object_cache_low_memory, cache);
892 
893 	if (!cache->full.IsEmpty())
894 		panic("cache destroy: still has full slabs");
895 
896 	if (!cache->partial.IsEmpty())
897 		panic("cache destroy: still has partial slabs");
898 
899 	while (!cache->empty.IsEmpty())
900 		cache->ReturnSlab(cache->empty.RemoveHead());
901 
902 	mutex_destroy(&cache->lock);
903 	delete_cache(cache);
904 }
905 
906 
907 status_t
908 object_cache_set_minimum_reserve(object_cache *cache, size_t objectCount)
909 {
910 	MutexLocker _(cache->lock);
911 
912 	if (cache->min_object_reserve == objectCount)
913 		return B_OK;
914 
915 	if (cache->min_object_reserve == 0) {
916 		cache->resize_request = new(std::nothrow) ResizeRequest(cache);
917 		if (cache->resize_request == NULL)
918 			return B_NO_MEMORY;
919 	} else if (cache->min_object_reserve == 0) {
920 		if (cache->resize_request->pending)
921 			cache->resize_request->delete_when_done = true;
922 		else
923 			delete cache->resize_request;
924 
925 		cache->resize_request = NULL;
926 	}
927 
928 	cache->min_object_reserve = objectCount;
929 
930 	increase_object_reserve(cache);
931 
932 	return B_OK;
933 }
934 
935 
936 void *
937 object_cache_alloc(object_cache *cache, uint32 flags)
938 {
939 	if (!(cache->flags & CACHE_NO_DEPOT)) {
940 		void *object = object_depot_obtain(&cache->depot);
941 		if (object) {
942 			T(Alloc(cache, flags, object));
943 			return object;
944 		}
945 	}
946 
947 	MutexLocker _(cache->lock);
948 	slab *source;
949 
950 	if (cache->partial.IsEmpty()) {
951 		if (cache->empty.IsEmpty()) {
952 			if (object_cache_reserve_internal(cache, 1, flags, false) < B_OK) {
953 				T(Alloc(cache, flags, NULL));
954 				return NULL;
955 			}
956 
957 			cache->pressure++;
958 		}
959 
960 		source = cache->empty.RemoveHead();
961 		cache->empty_count--;
962 		cache->partial.Add(source);
963 	} else {
964 		source = cache->partial.Head();
965 	}
966 
967 	ParanoiaChecker _2(source);
968 
969 	object_link *link = _pop(source->free);
970 	source->count--;
971 	cache->used_count++;
972 
973 	if (cache->total_objects - cache->used_count < cache->min_object_reserve)
974 		increase_object_reserve(cache);
975 
976 	REMOVE_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, source, &link->next,
977 		sizeof(void*));
978 
979 	TRACE_CACHE(cache, "allocate %p (%p) from %p, %lu remaining.",
980 		link_to_object(link, cache->object_size), link, source, source->count);
981 
982 	if (source->count == 0) {
983 		cache->partial.Remove(source);
984 		cache->full.Add(source);
985 	}
986 
987 	void *object = link_to_object(link, cache->object_size);
988 	T(Alloc(cache, flags, object));
989 	return object;
990 }
991 
992 
993 static void
994 object_cache_return_to_slab(object_cache *cache, slab *source, void *object)
995 {
996 	if (source == NULL)
997 		panic("object_cache: free'd object has no slab");
998 
999 	ParanoiaChecker _(source);
1000 
1001 	object_link *link = object_to_link(object, cache->object_size);
1002 
1003 	TRACE_CACHE(cache, "returning %p (%p) to %p, %lu used (%lu empty slabs).",
1004 		object, link, source, source->size - source->count,
1005 		cache->empty_count);
1006 
1007 	_push(source->free, link);
1008 	source->count++;
1009 	cache->used_count--;
1010 
1011 	ADD_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, source, &link->next, sizeof(void*));
1012 
1013 	if (source->count == source->size) {
1014 		cache->partial.Remove(source);
1015 
1016 		if (cache->empty_count < cache->pressure
1017 			&& cache->total_objects - cache->used_count - source->size
1018 				>= cache->min_object_reserve) {
1019 			cache->empty_count++;
1020 			cache->empty.Add(source);
1021 		} else {
1022 			cache->ReturnSlab(source);
1023 		}
1024 	} else if (source->count == 1) {
1025 		cache->full.Remove(source);
1026 		cache->partial.Add(source);
1027 	}
1028 }
1029 
1030 
1031 void
1032 object_cache_free(object_cache *cache, void *object)
1033 {
1034 	if (object == NULL)
1035 		return;
1036 
1037 	T(Free(cache, object));
1038 
1039 	if (!(cache->flags & CACHE_NO_DEPOT)) {
1040 		if (object_depot_store(&cache->depot, object))
1041 			return;
1042 	}
1043 
1044 	MutexLocker _(cache->lock);
1045 	object_cache_return_to_slab(cache, cache->ObjectSlab(object), object);
1046 }
1047 
1048 
1049 static status_t
1050 object_cache_reserve_internal(object_cache *cache, size_t objectCount,
1051 	uint32 flags, bool unlockWhileAllocating)
1052 {
1053 	size_t numBytes = objectCount * cache->object_size;
1054 	size_t slabCount = ((numBytes - 1) / cache->slab_size) + 1;
1055 		// TODO: This also counts the unusable space of each slab, which can
1056 		// sum up.
1057 
1058 	while (slabCount > 0) {
1059 		slab *newSlab = cache->CreateSlab(flags, unlockWhileAllocating);
1060 		if (newSlab == NULL)
1061 			return B_NO_MEMORY;
1062 
1063 		cache->empty.Add(newSlab);
1064 		cache->empty_count++;
1065 		slabCount--;
1066 	}
1067 
1068 	return B_OK;
1069 }
1070 
1071 
1072 status_t
1073 object_cache_reserve(object_cache *cache, size_t objectCount, uint32 flags)
1074 {
1075 	if (objectCount == 0)
1076 		return B_OK;
1077 
1078 	T(Reserve(cache, objectCount, flags));
1079 
1080 	MutexLocker _(cache->lock);
1081 	return object_cache_reserve_internal(cache, objectCount, flags, false);
1082 }
1083 
1084 
1085 void
1086 object_cache_get_usage(object_cache *cache, size_t *_allocatedMemory)
1087 {
1088 	MutexLocker _(cache->lock);
1089 	*_allocatedMemory = cache->usage;
1090 }
1091 
1092 
1093 slab *
1094 object_cache::InitSlab(slab *slab, void *pages, size_t byteCount)
1095 {
1096 	TRACE_CACHE(this, "construct (%p, %p .. %p, %lu)", slab, pages,
1097 		((uint8 *)pages) + byteCount, byteCount);
1098 
1099 	slab->pages = pages;
1100 	slab->count = slab->size = byteCount / object_size;
1101 	slab->free = NULL;
1102 	total_objects += slab->size;
1103 
1104 	size_t spareBytes = byteCount - (slab->size * object_size);
1105 	slab->offset = cache_color_cycle;
1106 
1107 	if (slab->offset > spareBytes)
1108 		cache_color_cycle = slab->offset = 0;
1109 	else
1110 		cache_color_cycle += kCacheColorPeriod;
1111 
1112 	TRACE_CACHE(this, "  %lu objects, %lu spare bytes, offset %lu",
1113 		slab->size, spareBytes, slab->offset);
1114 
1115 	uint8 *data = ((uint8 *)pages) + slab->offset;
1116 
1117 	CREATE_PARANOIA_CHECK_SET(slab, "slab");
1118 
1119 	for (size_t i = 0; i < slab->size; i++) {
1120 		bool failedOnFirst = false;
1121 
1122 		status_t status = PrepareObject(slab, data);
1123 		if (status < B_OK)
1124 			failedOnFirst = true;
1125 		else if (constructor)
1126 			status = constructor(cookie, data);
1127 
1128 		if (status < B_OK) {
1129 			if (!failedOnFirst)
1130 				UnprepareObject(slab, data);
1131 
1132 			data = ((uint8 *)pages) + slab->offset;
1133 			for (size_t j = 0; j < i; j++) {
1134 				if (destructor)
1135 					destructor(cookie, data);
1136 				UnprepareObject(slab, data);
1137 				data += object_size;
1138 			}
1139 
1140 			DELETE_PARANOIA_CHECK_SET(slab);
1141 
1142 			return NULL;
1143 		}
1144 
1145 		_push(slab->free, object_to_link(data, object_size));
1146 
1147 		ADD_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, slab,
1148 			&object_to_link(data, object_size)->next, sizeof(void*));
1149 
1150 		data += object_size;
1151 	}
1152 
1153 	return slab;
1154 }
1155 
1156 
1157 void
1158 object_cache::UninitSlab(slab *slab)
1159 {
1160 	TRACE_CACHE(this, "destruct %p", slab);
1161 
1162 	if (slab->count != slab->size)
1163 		panic("cache: destroying a slab which isn't empty.");
1164 
1165 	total_objects -= slab->size;
1166 
1167 	DELETE_PARANOIA_CHECK_SET(slab);
1168 
1169 	uint8 *data = ((uint8 *)slab->pages) + slab->offset;
1170 
1171 	for (size_t i = 0; i < slab->size; i++) {
1172 		if (destructor)
1173 			destructor(cookie, data);
1174 		UnprepareObject(slab, data);
1175 		data += object_size;
1176 	}
1177 }
1178 
1179 
1180 static inline slab *
1181 slab_in_pages(const void *pages, size_t slab_size)
1182 {
1183 	return (slab *)(((uint8 *)pages) + slab_size - sizeof(slab));
1184 }
1185 
1186 
1187 static inline const void *
1188 lower_boundary(void *object, size_t byteCount)
1189 {
1190 	const uint8 *null = (uint8 *)NULL;
1191 	return null + ((((uint8 *)object) - null) & ~(byteCount - 1));
1192 }
1193 
1194 
1195 static inline bool
1196 check_cache_quota(object_cache *cache)
1197 {
1198 	if (cache->maximum == 0)
1199 		return true;
1200 
1201 	return (cache->usage + cache->slab_size) <= cache->maximum;
1202 }
1203 
1204 
1205 slab *
1206 SmallObjectCache::CreateSlab(uint32 flags, bool unlockWhileAllocating)
1207 {
1208 	if (!check_cache_quota(this))
1209 		return NULL;
1210 
1211 	void *pages;
1212 
1213 	if (allocate_pages(this, &pages, flags, unlockWhileAllocating) < B_OK)
1214 		return NULL;
1215 
1216 	return InitSlab(slab_in_pages(pages, slab_size), pages,
1217 		slab_size - sizeof(slab));
1218 }
1219 
1220 
1221 void
1222 SmallObjectCache::ReturnSlab(slab *slab)
1223 {
1224 	UninitSlab(slab);
1225 	free_pages(this, slab->pages);
1226 }
1227 
1228 
1229 slab *
1230 SmallObjectCache::ObjectSlab(void *object) const
1231 {
1232 	return slab_in_pages(lower_boundary(object, slab_size), slab_size);
1233 }
1234 
1235 
1236 static slab *
1237 allocate_slab(uint32 flags)
1238 {
1239 	return (slab *)internal_alloc(sizeof(slab), flags);
1240 }
1241 
1242 
1243 static void
1244 free_slab(slab *slab)
1245 {
1246 	internal_free(slab);
1247 }
1248 
1249 
1250 static HashedObjectCache::Link *
1251 allocate_link(uint32 flags)
1252 {
1253 	return (HashedObjectCache::Link *)
1254 		internal_alloc(sizeof(HashedObjectCache::Link), flags);
1255 }
1256 
1257 
1258 static void
1259 free_link(HashedObjectCache::Link *link)
1260 {
1261 	internal_free(link);
1262 }
1263 
1264 
1265 slab *
1266 HashedObjectCache::CreateSlab(uint32 flags, bool unlockWhileAllocating)
1267 {
1268 	if (!check_cache_quota(this))
1269 		return NULL;
1270 
1271 	if (unlockWhileAllocating)
1272 		Unlock();
1273 
1274 	slab *slab = allocate_slab(flags);
1275 
1276 	if (unlockWhileAllocating)
1277 		Lock();
1278 
1279 	if (slab == NULL)
1280 		return NULL;
1281 
1282 	void *pages;
1283 
1284 	if (allocate_pages(this, &pages, flags, unlockWhileAllocating) == B_OK) {
1285 		if (InitSlab(slab, pages, slab_size))
1286 			return slab;
1287 
1288 		free_pages(this, pages);
1289 	}
1290 
1291 	free_slab(slab);
1292 	return NULL;
1293 }
1294 
1295 
1296 void
1297 HashedObjectCache::ReturnSlab(slab *slab)
1298 {
1299 	UninitSlab(slab);
1300 	free_pages(this, slab->pages);
1301 }
1302 
1303 
1304 slab *
1305 HashedObjectCache::ObjectSlab(void *object) const
1306 {
1307 	Link *link = hash_table.Lookup(object);
1308 	if (link == NULL) {
1309 		panic("object cache: requested object %p missing from hash table",
1310 			object);
1311 	}
1312 	return link->parent;
1313 }
1314 
1315 
1316 status_t
1317 HashedObjectCache::PrepareObject(slab *source, void *object)
1318 {
1319 	Link *link = allocate_link(CACHE_DONT_SLEEP);
1320 	if (link == NULL)
1321 		return B_NO_MEMORY;
1322 
1323 	link->buffer = object;
1324 	link->parent = source;
1325 
1326 	hash_table.Insert(link);
1327 	return B_OK;
1328 }
1329 
1330 
1331 void
1332 HashedObjectCache::UnprepareObject(slab *source, void *object)
1333 {
1334 	Link *link = hash_table.Lookup(object);
1335 	if (link == NULL)
1336 		panic("object cache: requested object missing from hash table");
1337 	if (link->parent != source)
1338 		panic("object cache: slab mismatch");
1339 
1340 	hash_table.Remove(link);
1341 	free_link(link);
1342 }
1343 
1344 
1345 static inline bool
1346 is_magazine_empty(depot_magazine *magazine)
1347 {
1348 	return magazine->current_round == 0;
1349 }
1350 
1351 
1352 static inline bool
1353 is_magazine_full(depot_magazine *magazine)
1354 {
1355 	return magazine->current_round == magazine->round_count;
1356 }
1357 
1358 
1359 static inline void *
1360 pop_magazine(depot_magazine *magazine)
1361 {
1362 	return magazine->rounds[--magazine->current_round];
1363 }
1364 
1365 
1366 static inline bool
1367 push_magazine(depot_magazine *magazine, void *object)
1368 {
1369 	if (is_magazine_full(magazine))
1370 		return false;
1371 	magazine->rounds[magazine->current_round++] = object;
1372 	return true;
1373 }
1374 
1375 
1376 static bool
1377 exchange_with_full(object_depot *depot, depot_magazine* &magazine)
1378 {
1379 	RecursiveLocker _(depot->lock);
1380 
1381 	if (depot->full == NULL)
1382 		return false;
1383 
1384 	depot->full_count--;
1385 	depot->empty_count++;
1386 
1387 	_push(depot->empty, magazine);
1388 	magazine = _pop(depot->full);
1389 	return true;
1390 }
1391 
1392 
1393 static bool
1394 exchange_with_empty(object_depot *depot, depot_magazine* &magazine)
1395 {
1396 	RecursiveLocker _(depot->lock);
1397 
1398 	if (depot->empty == NULL) {
1399 		depot->empty = alloc_magazine();
1400 		if (depot->empty == NULL)
1401 			return false;
1402 	} else {
1403 		depot->empty_count--;
1404 	}
1405 
1406 	if (magazine) {
1407 		_push(depot->full, magazine);
1408 		depot->full_count++;
1409 	}
1410 
1411 	magazine = _pop(depot->empty);
1412 	return true;
1413 }
1414 
1415 
1416 static depot_magazine *
1417 alloc_magazine()
1418 {
1419 	depot_magazine *magazine = (depot_magazine *)internal_alloc(
1420 		sizeof(depot_magazine) + kMagazineCapacity * sizeof(void *), 0);
1421 	if (magazine) {
1422 		magazine->next = NULL;
1423 		magazine->current_round = 0;
1424 		magazine->round_count = kMagazineCapacity;
1425 	}
1426 
1427 	return magazine;
1428 }
1429 
1430 
1431 static void
1432 free_magazine(depot_magazine *magazine)
1433 {
1434 	internal_free(magazine);
1435 }
1436 
1437 
1438 static void
1439 empty_magazine(object_depot *depot, depot_magazine *magazine)
1440 {
1441 	for (uint16 i = 0; i < magazine->current_round; i++)
1442 		depot->return_object(depot, magazine->rounds[i]);
1443 	free_magazine(magazine);
1444 }
1445 
1446 
1447 status_t
1448 object_depot_init(object_depot *depot, uint32 flags,
1449 	void (*return_object)(object_depot *depot, void *object))
1450 {
1451 	depot->full = NULL;
1452 	depot->empty = NULL;
1453 	depot->full_count = depot->empty_count = 0;
1454 
1455 	recursive_lock_init(&depot->lock, "depot");
1456 
1457 	depot->stores = (depot_cpu_store *)internal_alloc(sizeof(depot_cpu_store)
1458 		* smp_get_num_cpus(), flags);
1459 	if (depot->stores == NULL) {
1460 		recursive_lock_destroy(&depot->lock);
1461 		return B_NO_MEMORY;
1462 	}
1463 
1464 	for (int i = 0; i < smp_get_num_cpus(); i++) {
1465 		recursive_lock_init(&depot->stores[i].lock, "cpu store");
1466 		depot->stores[i].loaded = depot->stores[i].previous = NULL;
1467 	}
1468 
1469 	depot->return_object = return_object;
1470 
1471 	return B_OK;
1472 }
1473 
1474 
1475 void
1476 object_depot_destroy(object_depot *depot)
1477 {
1478 	object_depot_make_empty(depot);
1479 
1480 	for (int i = 0; i < smp_get_num_cpus(); i++) {
1481 		recursive_lock_destroy(&depot->stores[i].lock);
1482 	}
1483 
1484 	internal_free(depot->stores);
1485 
1486 	recursive_lock_destroy(&depot->lock);
1487 }
1488 
1489 
1490 static void *
1491 object_depot_obtain_from_store(object_depot *depot, depot_cpu_store *store)
1492 {
1493 	RecursiveLocker _(store->lock);
1494 
1495 	// To better understand both the Alloc() and Free() logic refer to
1496 	// Bonwick's ``Magazines and Vmem'' [in 2001 USENIX proceedings]
1497 
1498 	// In a nutshell, we try to get an object from the loaded magazine
1499 	// if it's not empty, or from the previous magazine if it's full
1500 	// and finally from the Slab if the magazine depot has no full magazines.
1501 
1502 	if (store->loaded == NULL)
1503 		return NULL;
1504 
1505 	while (true) {
1506 		if (!is_magazine_empty(store->loaded))
1507 			return pop_magazine(store->loaded);
1508 
1509 		if (store->previous && (is_magazine_full(store->previous)
1510 			|| exchange_with_full(depot, store->previous)))
1511 			std::swap(store->previous, store->loaded);
1512 		else
1513 			return NULL;
1514 	}
1515 }
1516 
1517 
1518 static int
1519 object_depot_return_to_store(object_depot *depot, depot_cpu_store *store,
1520 	void *object)
1521 {
1522 	RecursiveLocker _(store->lock);
1523 
1524 	// We try to add the object to the loaded magazine if we have one
1525 	// and it's not full, or to the previous one if it is empty. If
1526 	// the magazine depot doesn't provide us with a new empty magazine
1527 	// we return the object directly to the slab.
1528 
1529 	while (true) {
1530 		if (store->loaded && push_magazine(store->loaded, object))
1531 			return 1;
1532 
1533 		if ((store->previous && is_magazine_empty(store->previous))
1534 			|| exchange_with_empty(depot, store->previous))
1535 			std::swap(store->loaded, store->previous);
1536 		else
1537 			return 0;
1538 	}
1539 }
1540 
1541 
1542 void *
1543 object_depot_obtain(object_depot *depot)
1544 {
1545 	return object_depot_obtain_from_store(depot, object_depot_cpu(depot));
1546 }
1547 
1548 
1549 int
1550 object_depot_store(object_depot *depot, void *object)
1551 {
1552 	return object_depot_return_to_store(depot, object_depot_cpu(depot),
1553 		object);
1554 }
1555 
1556 
1557 void
1558 object_depot_make_empty(object_depot *depot)
1559 {
1560 	for (int i = 0; i < smp_get_num_cpus(); i++) {
1561 		depot_cpu_store *store = &depot->stores[i];
1562 
1563 		RecursiveLocker _(store->lock);
1564 
1565 		if (depot->stores[i].loaded)
1566 			empty_magazine(depot, depot->stores[i].loaded);
1567 		if (depot->stores[i].previous)
1568 			empty_magazine(depot, depot->stores[i].previous);
1569 		depot->stores[i].loaded = depot->stores[i].previous = NULL;
1570 	}
1571 
1572 	RecursiveLocker _(depot->lock);
1573 
1574 	while (depot->full)
1575 		empty_magazine(depot, _pop(depot->full));
1576 
1577 	while (depot->empty)
1578 		empty_magazine(depot, _pop(depot->empty));
1579 }
1580 
1581 
1582 static int
1583 dump_slabs(int argc, char *argv[])
1584 {
1585 	kprintf("%10s %22s %8s %8s %6s %8s %8s %8s\n", "address", "name",
1586 		"objsize", "usage", "empty", "usedobj", "total", "flags");
1587 
1588 	ObjectCacheList::Iterator it = sObjectCaches.GetIterator();
1589 
1590 	while (it.HasNext()) {
1591 		object_cache *cache = it.Next();
1592 
1593 		kprintf("%p %22s %8lu %8lu %6lu %8lu %8lu %8lx\n", cache, cache->name,
1594 			cache->object_size, cache->usage, cache->empty_count,
1595 			cache->used_count, cache->usage / cache->object_size,
1596 			cache->flags);
1597 	}
1598 
1599 	return 0;
1600 }
1601 
1602 
1603 static int
1604 dump_cache_info(int argc, char *argv[])
1605 {
1606 	if (argc < 2) {
1607 		kprintf("usage: cache_info [address]\n");
1608 		return 0;
1609 	}
1610 
1611 	object_cache *cache = (object_cache *)strtoul(argv[1], NULL, 16);
1612 
1613 	kprintf("name: %s\n", cache->name);
1614 	kprintf("lock: %p\n", &cache->lock);
1615 	kprintf("object_size: %lu\n", cache->object_size);
1616 	kprintf("cache_color_cycle: %lu\n", cache->cache_color_cycle);
1617 	kprintf("used_count: %lu\n", cache->used_count);
1618 	kprintf("empty_count: %lu\n", cache->empty_count);
1619 	kprintf("pressure: %lu\n", cache->pressure);
1620 	kprintf("slab_size: %lu\n", cache->slab_size);
1621 	kprintf("usage: %lu\n", cache->usage);
1622 	kprintf("maximum: %lu\n", cache->maximum);
1623 	kprintf("flags: 0x%lx\n", cache->flags);
1624 	kprintf("cookie: %p\n", cache->cookie);
1625 
1626 	return 0;
1627 }
1628 
1629 
1630 void
1631 slab_init(kernel_args *args, addr_t initialBase, size_t initialSize)
1632 {
1633 	dprintf("slab: init base %p + 0x%lx\n", (void *)initialBase, initialSize);
1634 
1635 	sInitialBegin = (uint8 *)initialBase;
1636 	sInitialLimit = sInitialBegin + initialSize;
1637 	sInitialPointer = sInitialBegin;
1638 
1639 	sKernelArgs = args;
1640 
1641 	new (&sObjectCaches) ObjectCacheList();
1642 
1643 	block_allocator_init_boot();
1644 
1645 	add_debugger_command("slabs", dump_slabs, "list all object caches");
1646 	add_debugger_command("cache_info", dump_cache_info,
1647 		"dump information about a specific cache");
1648 }
1649 
1650 
1651 void
1652 slab_init_post_sem()
1653 {
1654 	ObjectCacheList::Iterator it = sObjectCaches.GetIterator();
1655 
1656 	while (it.HasNext()) {
1657 		object_cache *cache = it.Next();
1658 		if (cache->allocate_pages == early_allocate_pages)
1659 			object_cache_commit_pre_pages(cache);
1660 	}
1661 
1662 	block_allocator_init_rest();
1663 }
1664 
1665 
1666 void
1667 slab_init_post_thread()
1668 {
1669 	new(&sResizeRequests) ResizeRequestQueue;
1670 	sResizeRequestsCondition.Init(&sResizeRequests, "object cache resizer");
1671 
1672 	thread_id objectCacheResizer = spawn_kernel_thread(object_cache_resizer,
1673 		"object cache resizer", B_URGENT_PRIORITY, NULL);
1674 	if (objectCacheResizer < 0) {
1675 		panic("slab_init_post_thread(): failed to spawn object cache resizer "
1676 			"thread\n");
1677 		return;
1678 	}
1679 
1680 	send_signal_etc(objectCacheResizer, SIGCONT, B_DO_NOT_RESCHEDULE);
1681 }
1682