xref: /haiku/src/system/kernel/slab/Slab.cpp (revision b6b0567fbd186f8ce8a0c90bdc7a7b5b4c649678)
1 /*
2  * Copyright 2008, Axel Dörfler. All Rights Reserved.
3  * Copyright 2007, Hugo Santos. All Rights Reserved.
4  *
5  * Distributed under the terms of the MIT License.
6  */
7 
8 
9 #include <Slab.h>
10 #include "slab_private.h"
11 
12 #include <algorithm>
13 #include <new>
14 #include <signal.h>
15 #include <stdlib.h>
16 #include <string.h>
17 
18 #include <KernelExport.h>
19 
20 #include <condition_variable.h>
21 #include <Depot.h>
22 #include <kernel.h>
23 #include <low_resource_manager.h>
24 #include <smp.h>
25 #include <tracing.h>
26 #include <util/AutoLock.h>
27 #include <util/DoublyLinkedList.h>
28 #include <util/OpenHashTable.h>
29 #include <util/khash.h>
30 #include <vm.h>
31 #include <vm_address_space.h>
32 
33 
34 // TODO kMagazineCapacity should be dynamically tuned per cache.
35 
36 
37 //#define TRACE_SLAB
38 
39 #ifdef TRACE_SLAB
40 #define TRACE_CACHE(cache, format, args...) \
41 	dprintf("Cache[%p, %s] " format "\n", cache, cache->name , ##args)
42 #else
43 #define TRACE_CACHE(cache, format, bananas...) do { } while (0)
44 #endif
45 
46 #define COMPONENT_PARANOIA_LEVEL	OBJECT_CACHE_PARANOIA
47 #include <debug_paranoia.h>
48 
49 
50 #define CACHE_ALIGN_ON_SIZE	(30 << 1)
51 
52 static const int kMagazineCapacity = 32;
53 static const size_t kCacheColorPeriod = 8;
54 
55 struct object_link {
56 	struct object_link *next;
57 };
58 
59 struct slab : DoublyLinkedListLinkImpl<slab> {
60 	void *pages;
61 	size_t size;		// total number of objects
62 	size_t count;		// free objects
63 	size_t offset;
64 	object_link *free;
65 };
66 
67 typedef DoublyLinkedList<slab> SlabList;
68 struct ResizeRequest;
69 
70 struct object_cache : DoublyLinkedListLinkImpl<object_cache> {
71 	char name[32];
72 	mutex lock;
73 	size_t object_size;
74 	size_t cache_color_cycle;
75 	SlabList empty, partial, full;
76 	size_t total_objects;		// total number of objects
77 	size_t used_count;			// used objects
78 	size_t empty_count;			// empty slabs
79 	size_t pressure;
80 	size_t min_object_reserve;	// minimum number of free objects
81 
82 	size_t slab_size;
83 	size_t usage, maximum;
84 	uint32 flags;
85 
86 	ResizeRequest *resize_request;
87 
88 	void *cookie;
89 	object_cache_constructor constructor;
90 	object_cache_destructor destructor;
91 	object_cache_reclaimer reclaimer;
92 
93 	status_t (*allocate_pages)(object_cache *cache, void **pages,
94 		uint32 flags, bool unlockWhileAllocating);
95 	void (*free_pages)(object_cache *cache, void *pages);
96 
97 	object_depot depot;
98 
99 	virtual slab *CreateSlab(uint32 flags, bool unlockWhileAllocating) = 0;
100 	virtual void ReturnSlab(slab *slab) = 0;
101 	virtual slab *ObjectSlab(void *object) const = 0;
102 
103 	slab *InitSlab(slab *slab, void *pages, size_t byteCount);
104 	void UninitSlab(slab *slab);
105 
106 	virtual status_t PrepareObject(slab *source, void *object) { return B_OK; }
107 	virtual void UnprepareObject(slab *source, void *object) {}
108 
109 	virtual ~object_cache() {}
110 
111 	bool Lock()		{ return mutex_lock(&lock) == B_OK; }
112 	void Unlock()	{ mutex_unlock(&lock); }
113 };
114 
115 typedef DoublyLinkedList<object_cache> ObjectCacheList;
116 
117 struct SmallObjectCache : object_cache {
118 	slab *CreateSlab(uint32 flags, bool unlockWhileAllocating);
119 	void ReturnSlab(slab *slab);
120 	slab *ObjectSlab(void *object) const;
121 };
122 
123 
124 struct HashedObjectCache : object_cache {
125 	struct Link {
126 		const void*	buffer;
127 		slab*		parent;
128 		Link*		next;
129 	};
130 
131 	struct Definition {
132 		typedef HashedObjectCache	ParentType;
133 		typedef const void *		KeyType;
134 		typedef Link				ValueType;
135 
136 		Definition(HashedObjectCache *_parent) : parent(_parent) {}
137 		Definition(const Definition& definition) : parent(definition.parent) {}
138 
139 		size_t HashKey(const void *key) const
140 		{
141 			return (((const uint8 *)key) - ((const uint8 *)0))
142 				>> parent->lower_boundary;
143 		}
144 
145 		size_t Hash(Link *value) const { return HashKey(value->buffer); }
146 		bool Compare(const void *key, Link *value) const
147 			{ return value->buffer == key; }
148 		Link*& GetLink(Link *value) const { return value->next; }
149 
150 		HashedObjectCache *parent;
151 	};
152 
153 	typedef BOpenHashTable<Definition> HashTable;
154 
155 	HashedObjectCache()
156 		: hash_table(this) {}
157 
158 	slab *CreateSlab(uint32 flags, bool unlockWhileAllocating);
159 	void ReturnSlab(slab *slab);
160 	slab *ObjectSlab(void *object) const;
161 	status_t PrepareObject(slab *source, void *object);
162 	void UnprepareObject(slab *source, void *object);
163 
164 	HashTable hash_table;
165 	size_t lower_boundary;
166 };
167 
168 
169 struct depot_magazine {
170 	struct depot_magazine *next;
171 	uint16 current_round, round_count;
172 	void *rounds[0];
173 };
174 
175 
176 struct depot_cpu_store {
177 	recursive_lock lock;
178 	struct depot_magazine *loaded, *previous;
179 };
180 
181 struct ResizeRequest : DoublyLinkedListLinkImpl<ResizeRequest> {
182 	ResizeRequest(object_cache* cache)
183 		:
184 		cache(cache),
185 		pending(false),
186 		delete_when_done(false)
187 	{
188 	}
189 
190 	object_cache*	cache;
191 	bool			pending;
192 	bool			delete_when_done;
193 };
194 
195 typedef DoublyLinkedList<ResizeRequest> ResizeRequestQueue;
196 
197 
198 static ObjectCacheList sObjectCaches;
199 static mutex sObjectCacheListLock = MUTEX_INITIALIZER("object cache list");
200 
201 static uint8 *sInitialBegin, *sInitialLimit, *sInitialPointer;
202 static kernel_args *sKernelArgs;
203 
204 
205 static status_t object_cache_reserve_internal(object_cache *cache,
206 	size_t object_count, uint32 flags, bool unlockWhileAllocating);
207 static depot_magazine *alloc_magazine();
208 static void free_magazine(depot_magazine *magazine);
209 
210 static mutex sResizeRequestsLock
211 	= MUTEX_INITIALIZER("object cache resize requests");
212 static ResizeRequestQueue sResizeRequests;
213 static ConditionVariable sResizeRequestsCondition;
214 
215 
216 #if OBJECT_CACHE_TRACING
217 
218 
219 namespace ObjectCacheTracing {
220 
221 class ObjectCacheTraceEntry : public AbstractTraceEntry {
222 	public:
223 		ObjectCacheTraceEntry(object_cache* cache)
224 			:
225 			fCache(cache)
226 		{
227 		}
228 
229 	protected:
230 		object_cache*	fCache;
231 };
232 
233 
234 class Create : public ObjectCacheTraceEntry {
235 	public:
236 		Create(const char* name, size_t objectSize, size_t alignment,
237 				size_t maxByteUsage, uint32 flags, void *cookie,
238 				object_cache *cache)
239 			:
240 			ObjectCacheTraceEntry(cache),
241 			fObjectSize(objectSize),
242 			fAlignment(alignment),
243 			fMaxByteUsage(maxByteUsage),
244 			fFlags(flags),
245 			fCookie(cookie)
246 		{
247 			fName = alloc_tracing_buffer_strcpy(name, 64, false);
248 			Initialized();
249 		}
250 
251 		virtual void AddDump(TraceOutput& out)
252 		{
253 			out.Print("object cache create: name: \"%s\", object size: %lu, "
254 				"alignment: %lu, max usage: %lu, flags: 0x%lx, cookie: %p "
255 				"-> cache: %p", fName, fObjectSize, fAlignment, fMaxByteUsage,
256 					fFlags, fCookie, fCache);
257 		}
258 
259 	private:
260 		const char*	fName;
261 		size_t		fObjectSize;
262 		size_t		fAlignment;
263 		size_t		fMaxByteUsage;
264 		uint32		fFlags;
265 		void*		fCookie;
266 };
267 
268 
269 class Delete : public ObjectCacheTraceEntry {
270 	public:
271 		Delete(object_cache *cache)
272 			:
273 			ObjectCacheTraceEntry(cache)
274 		{
275 			Initialized();
276 		}
277 
278 		virtual void AddDump(TraceOutput& out)
279 		{
280 			out.Print("object cache delete: %p", fCache);
281 		}
282 };
283 
284 
285 class Alloc : public ObjectCacheTraceEntry {
286 	public:
287 		Alloc(object_cache *cache, uint32 flags, void* object)
288 			:
289 			ObjectCacheTraceEntry(cache),
290 			fFlags(flags),
291 			fObject(object)
292 		{
293 			Initialized();
294 		}
295 
296 		virtual void AddDump(TraceOutput& out)
297 		{
298 			out.Print("object cache alloc: cache: %p, flags: 0x%lx -> "
299 				"object: %p", fCache, fFlags, fObject);
300 		}
301 
302 	private:
303 		uint32		fFlags;
304 		void*		fObject;
305 };
306 
307 
308 class Free : public ObjectCacheTraceEntry {
309 	public:
310 		Free(object_cache *cache, void* object)
311 			:
312 			ObjectCacheTraceEntry(cache),
313 			fObject(object)
314 		{
315 			Initialized();
316 		}
317 
318 		virtual void AddDump(TraceOutput& out)
319 		{
320 			out.Print("object cache free: cache: %p, object: %p", fCache,
321 				fObject);
322 		}
323 
324 	private:
325 		void*		fObject;
326 };
327 
328 
329 class Reserve : public ObjectCacheTraceEntry {
330 	public:
331 		Reserve(object_cache *cache, size_t count, uint32 flags)
332 			:
333 			ObjectCacheTraceEntry(cache),
334 			fCount(count),
335 			fFlags(flags)
336 		{
337 			Initialized();
338 		}
339 
340 		virtual void AddDump(TraceOutput& out)
341 		{
342 			out.Print("object cache reserve: cache: %p, count: %lu, "
343 				"flags: 0x%lx", fCache, fCount, fFlags);
344 		}
345 
346 	private:
347 		uint32		fCount;
348 		uint32		fFlags;
349 };
350 
351 
352 }	// namespace ObjectCacheTracing
353 
354 #	define T(x)	new(std::nothrow) ObjectCacheTracing::x
355 
356 #else
357 #	define T(x)
358 #endif	// OBJECT_CACHE_TRACING
359 
360 
361 // #pragma mark -
362 
363 
364 template<typename Type> static Type *
365 _pop(Type* &head)
366 {
367 	Type *oldHead = head;
368 	head = head->next;
369 	return oldHead;
370 }
371 
372 
373 template<typename Type> static inline void
374 _push(Type* &head, Type *object)
375 {
376 	object->next = head;
377 	head = object;
378 }
379 
380 
381 static inline void *
382 link_to_object(object_link *link, size_t objectSize)
383 {
384 	return ((uint8 *)link) - (objectSize - sizeof(object_link));
385 }
386 
387 
388 static inline object_link *
389 object_to_link(void *object, size_t objectSize)
390 {
391 	return (object_link *)(((uint8 *)object)
392 		+ (objectSize - sizeof(object_link)));
393 }
394 
395 
396 static inline depot_cpu_store *
397 object_depot_cpu(object_depot *depot)
398 {
399 	return &depot->stores[smp_get_current_cpu()];
400 }
401 
402 
403 static inline int
404 __fls0(size_t value)
405 {
406 	if (value == 0)
407 		return -1;
408 
409 	int bit;
410 	for (bit = 0; value != 1; bit++)
411 		value >>= 1;
412 	return bit;
413 }
414 
415 
416 static void *
417 internal_alloc(size_t size, uint32 flags)
418 {
419 	if (flags & CACHE_DURING_BOOT) {
420 		if ((sInitialPointer + size) > sInitialLimit) {
421 			panic("internal_alloc: ran out of initial space");
422 			return NULL;
423 		}
424 
425 		uint8 *buffer = sInitialPointer;
426 		sInitialPointer += size;
427 		return buffer;
428 	}
429 
430 	// TODO: Support CACHE_DONT_SLEEP!
431 	return block_alloc(size);
432 }
433 
434 
435 static void
436 internal_free(void *_buffer)
437 {
438 	uint8 *buffer = (uint8 *)_buffer;
439 
440 	if (buffer >= sInitialBegin && buffer < sInitialLimit)
441 		return;
442 
443 	block_free(buffer);
444 }
445 
446 
447 static status_t
448 area_allocate_pages(object_cache *cache, void **pages, uint32 flags,
449 	bool unlockWhileAllocating)
450 {
451 	TRACE_CACHE(cache, "allocate pages (%lu, 0x0%lx)", cache->slab_size, flags);
452 
453 	uint32 lock = B_FULL_LOCK;
454 	if (cache->flags & CACHE_UNLOCKED_PAGES)
455 		lock = B_NO_LOCK;
456 
457 	uint32 addressSpec = B_ANY_KERNEL_ADDRESS;
458 	if ((cache->flags & CACHE_ALIGN_ON_SIZE) != 0
459 		&& cache->slab_size != B_PAGE_SIZE)
460 		addressSpec = B_ANY_KERNEL_BLOCK_ADDRESS;
461 
462 	if (unlockWhileAllocating)
463 		cache->Unlock();
464 
465 	// if we are allocating, it is because we need the pages immediatly
466 	// so we lock them. when moving the slab to the empty list we should
467 	// unlock them, and lock them again when getting one from the empty list.
468 	area_id areaId = create_area_etc(vm_kernel_address_space_id(),
469 		cache->name, pages, addressSpec, cache->slab_size, lock,
470 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0,
471 		(flags & CACHE_DONT_SLEEP) != 0 ? CREATE_AREA_DONT_WAIT : 0);
472 
473 	if (unlockWhileAllocating)
474 		cache->Lock();
475 
476 	if (areaId < 0)
477 		return areaId;
478 
479 	cache->usage += cache->slab_size;
480 
481 	TRACE_CACHE(cache, "  ... = { %ld, %p }", areaId, *pages);
482 
483 	return B_OK;
484 }
485 
486 
487 static void
488 area_free_pages(object_cache *cache, void *pages)
489 {
490 	area_id id = area_for(pages);
491 
492 	TRACE_CACHE(cache, "delete pages %p (%ld)", pages, id);
493 
494 	if (id < 0) {
495 		panic("object cache: freeing unknown area");
496 		return;
497 	}
498 
499 	delete_area(id);
500 
501 	cache->usage -= cache->slab_size;
502 }
503 
504 
505 static status_t
506 early_allocate_pages(object_cache *cache, void **pages, uint32 flags,
507 	bool unlockWhileAllocating)
508 {
509 	TRACE_CACHE(cache, "early allocate pages (%lu, 0x0%lx)", cache->slab_size,
510 		flags);
511 
512 	if (unlockWhileAllocating)
513 		cache->Unlock();
514 
515 	addr_t base = vm_allocate_early(sKernelArgs, cache->slab_size,
516 		cache->slab_size, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
517 
518 	if (unlockWhileAllocating)
519 		cache->Lock();
520 
521 	*pages = (void *)base;
522 
523 	cache->usage += cache->slab_size;
524 
525 	TRACE_CACHE(cache, "  ... = { %p }", *pages);
526 
527 	return B_OK;
528 }
529 
530 
531 static void
532 early_free_pages(object_cache *cache, void *pages)
533 {
534 	panic("memory pressure on bootup?");
535 }
536 
537 
538 static void
539 object_cache_low_memory(void *_self, uint32 resources, int32 level)
540 {
541 	if (level == B_NO_LOW_RESOURCE)
542 		return;
543 
544 	object_cache *cache = (object_cache *)_self;
545 
546 	// we are calling the reclaimer without the object cache lock
547 	// to give the owner a chance to return objects to the slabs.
548 	// As we only register the low memory handler after we complete
549 	// the init of the object cache, unless there are some funky
550 	// memory re-order business going on, we should be ok.
551 
552 	if (cache->reclaimer)
553 		cache->reclaimer(cache->cookie, level);
554 
555 	MutexLocker _(cache->lock);
556 	size_t minimumAllowed;
557 
558 	switch (level) {
559 	case B_LOW_RESOURCE_NOTE:
560 		minimumAllowed = cache->pressure / 2 + 1;
561 		break;
562 
563 	case B_LOW_RESOURCE_WARNING:
564 		cache->pressure /= 2;
565 		minimumAllowed = 0;
566 		break;
567 
568 	default:
569 		cache->pressure = 0;
570 		minimumAllowed = 0;
571 		break;
572 	}
573 
574 	// If the object cache has minimum object reserve, make sure that we don't
575 	// free too many slabs.
576 	if (cache->min_object_reserve > 0 && cache->empty_count > 0) {
577 		size_t objectsPerSlab = cache->empty.Head()->size;
578 		size_t freeObjects = cache->total_objects - cache->used_count;
579 
580 		if (cache->min_object_reserve + objectsPerSlab >= freeObjects)
581 			return;
582 
583 		size_t slabsToFree = (freeObjects - cache->min_object_reserve)
584 			/ objectsPerSlab;
585 
586 		if (cache->empty_count > minimumAllowed + slabsToFree)
587 			minimumAllowed = cache->empty_count - slabsToFree;
588 	}
589 
590 	if (cache->empty_count <= minimumAllowed)
591 		return;
592 
593 	TRACE_CACHE(cache, "cache: memory pressure, will release down to %lu.",
594 		minimumAllowed);
595 
596 	while (cache->empty_count > minimumAllowed) {
597 		cache->ReturnSlab(cache->empty.RemoveHead());
598 		cache->empty_count--;
599 	}
600 }
601 
602 
603 static void
604 object_cache_return_object_wrapper(object_depot *depot, void *object)
605 {
606 	// TODO: the offset calculation might be wrong because we hardcode a
607 	// SmallObjectCache instead of a base object_cache. Also this must
608 	// have an unacceptable overhead.
609 	SmallObjectCache dummyCache;
610 	object_cache *cache = (object_cache *)(((uint8 *)depot)
611 		- offset_of_member(dummyCache, depot));
612 
613 	object_cache_free(cache, object);
614 }
615 
616 
617 static status_t
618 object_cache_init(object_cache *cache, const char *name, size_t objectSize,
619 	size_t alignment, size_t maximum, uint32 flags, void *cookie,
620 	object_cache_constructor constructor, object_cache_destructor destructor,
621 	object_cache_reclaimer reclaimer)
622 {
623 	strlcpy(cache->name, name, sizeof(cache->name));
624 
625 	mutex_init(&cache->lock, cache->name);
626 
627 	if (objectSize < sizeof(object_link))
628 		objectSize = sizeof(object_link);
629 
630 	if (alignment > 0 && (objectSize & (alignment - 1)))
631 		cache->object_size = objectSize + alignment
632 			- (objectSize & (alignment - 1));
633 	else
634 		cache->object_size = objectSize;
635 
636 	TRACE_CACHE(cache, "init %lu, %lu -> %lu", objectSize, alignment,
637 		cache->object_size);
638 
639 	cache->cache_color_cycle = 0;
640 	cache->total_objects = 0;
641 	cache->used_count = 0;
642 	cache->empty_count = 0;
643 	cache->pressure = 0;
644 	cache->min_object_reserve = 0;
645 
646 	cache->usage = 0;
647 	cache->maximum = maximum;
648 
649 	cache->flags = flags;
650 
651 	cache->resize_request = NULL;
652 
653 	// TODO: depot destruction is obviously broken
654 	// no gain in using the depot in single cpu setups
655 	//if (smp_get_num_cpus() == 1)
656 		cache->flags |= CACHE_NO_DEPOT;
657 
658 	if (!(cache->flags & CACHE_NO_DEPOT)) {
659 		status_t status = object_depot_init(&cache->depot, flags,
660 			object_cache_return_object_wrapper);
661 		if (status < B_OK) {
662 			mutex_destroy(&cache->lock);
663 			return status;
664 		}
665 	}
666 
667 	cache->cookie = cookie;
668 	cache->constructor = constructor;
669 	cache->destructor = destructor;
670 	cache->reclaimer = reclaimer;
671 
672 	if (cache->flags & CACHE_DURING_BOOT) {
673 		cache->allocate_pages = early_allocate_pages;
674 		cache->free_pages = early_free_pages;
675 	} else {
676 		cache->allocate_pages = area_allocate_pages;
677 		cache->free_pages = area_free_pages;
678 	}
679 
680 	register_low_resource_handler(object_cache_low_memory, cache,
681 		B_KERNEL_RESOURCE_PAGES | B_KERNEL_RESOURCE_MEMORY, 5);
682 
683 	MutexLocker _(sObjectCacheListLock);
684 	sObjectCaches.Add(cache);
685 
686 	return B_OK;
687 }
688 
689 
690 static void
691 object_cache_commit_slab(object_cache *cache, slab *slab)
692 {
693 	void *pages = (void *)ROUNDDOWN((addr_t)slab->pages, B_PAGE_SIZE);
694 	if (create_area(cache->name, &pages, B_EXACT_ADDRESS, cache->slab_size,
695 		B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA) < 0)
696 		panic("failed to create_area()");
697 }
698 
699 
700 static void
701 object_cache_commit_pre_pages(object_cache *cache)
702 {
703 	SlabList::Iterator it = cache->full.GetIterator();
704 	while (it.HasNext())
705 		object_cache_commit_slab(cache, it.Next());
706 
707 	it = cache->partial.GetIterator();
708 	while (it.HasNext())
709 		object_cache_commit_slab(cache, it.Next());
710 
711 	it = cache->empty.GetIterator();
712 	while (it.HasNext())
713 		object_cache_commit_slab(cache, it.Next());
714 
715 	cache->allocate_pages = area_allocate_pages;
716 	cache->free_pages = area_free_pages;
717 }
718 
719 
720 static status_t
721 object_cache_resizer(void*)
722 {
723 	while (true) {
724 		MutexLocker locker(sResizeRequestsLock);
725 
726 		// wait for the next request
727 		while (sResizeRequests.IsEmpty()) {
728 			ConditionVariableEntry entry;
729 			sResizeRequestsCondition.Add(&entry);
730 			locker.Unlock();
731 			entry.Wait();
732 			locker.Lock();
733 		}
734 
735 		ResizeRequest* request = sResizeRequests.RemoveHead();
736 
737 		locker.Unlock();
738 
739 		// resize the cache, if necessary
740 
741 		object_cache* cache = request->cache;
742 
743 		MutexLocker cacheLocker(cache->lock);
744 
745 		size_t freeObjects = cache->total_objects - cache->used_count;
746 
747 		while (freeObjects < cache->min_object_reserve) {
748 			status_t error = object_cache_reserve_internal(cache,
749 				cache->min_object_reserve - freeObjects, 0, true);
750 			if (error != B_OK) {
751 				dprintf("object cache resizer: Failed to resize object cache "
752 					"%p!\n", cache);
753 				break;
754 			}
755 
756 			freeObjects = cache->total_objects - cache->used_count;
757 		}
758 
759 		request->pending = false;
760 
761 		if (request->delete_when_done)
762 			delete request;
763 	}
764 
765 	// never can get here
766 	return B_OK;
767 }
768 
769 
770 static void
771 increase_object_reserve(object_cache* cache)
772 {
773 	if (cache->resize_request->pending)
774 		return;
775 
776 	cache->resize_request->pending = true;
777 
778 	MutexLocker locker(sResizeRequestsLock);
779 	sResizeRequests.Add(cache->resize_request);
780 	sResizeRequestsCondition.NotifyAll();
781 }
782 
783 
784 static void
785 delete_cache(object_cache *cache)
786 {
787 	cache->~object_cache();
788 	internal_free(cache);
789 }
790 
791 
792 static SmallObjectCache *
793 create_small_object_cache(const char *name, size_t object_size,
794 	size_t alignment, size_t maximum, uint32 flags, void *cookie,
795 	object_cache_constructor constructor, object_cache_destructor destructor,
796 	object_cache_reclaimer reclaimer)
797 {
798 	void *buffer = internal_alloc(sizeof(SmallObjectCache), flags);
799 	if (buffer == NULL)
800 		return NULL;
801 
802 	SmallObjectCache *cache = new (buffer) SmallObjectCache();
803 
804 	if (object_cache_init(cache, name, object_size, alignment, maximum,
805 			flags | CACHE_ALIGN_ON_SIZE, cookie, constructor, destructor,
806 			reclaimer) < B_OK) {
807 		delete_cache(cache);
808 		return NULL;
809 	}
810 
811 	if ((flags & CACHE_LARGE_SLAB) != 0)
812 		cache->slab_size = max_c(16 * B_PAGE_SIZE, 1024 * object_size);
813 	else
814 		cache->slab_size = B_PAGE_SIZE;
815 
816 	return cache;
817 }
818 
819 
820 static HashedObjectCache *
821 create_hashed_object_cache(const char *name, size_t object_size,
822 	size_t alignment, size_t maximum, uint32 flags, void *cookie,
823 	object_cache_constructor constructor, object_cache_destructor destructor,
824 	object_cache_reclaimer reclaimer)
825 {
826 	void *buffer = internal_alloc(sizeof(HashedObjectCache), flags);
827 	if (buffer == NULL)
828 		return NULL;
829 
830 	HashedObjectCache *cache = new (buffer) HashedObjectCache();
831 
832 	if (object_cache_init(cache, name, object_size, alignment, maximum, flags,
833 		cookie, constructor, destructor, reclaimer) < B_OK) {
834 		delete_cache(cache);
835 		return NULL;
836 	}
837 
838 	if ((flags & CACHE_LARGE_SLAB) != 0)
839 		cache->slab_size = max_c(256 * B_PAGE_SIZE, 128 * object_size);
840 	else
841 		cache->slab_size = max_c(16 * B_PAGE_SIZE, 8 * object_size);
842 	cache->lower_boundary = __fls0(cache->object_size);
843 
844 	return cache;
845 }
846 
847 
848 object_cache *
849 create_object_cache(const char *name, size_t object_size, size_t alignment,
850 	void *cookie, object_cache_constructor constructor,
851 	object_cache_destructor destructor)
852 {
853 	return create_object_cache_etc(name, object_size, alignment, 0, 0, cookie,
854 		constructor, destructor, NULL);
855 }
856 
857 
858 object_cache *
859 create_object_cache_etc(const char *name, size_t objectSize, size_t alignment,
860 	size_t maximum, uint32 flags, void *cookie,
861 	object_cache_constructor constructor, object_cache_destructor destructor,
862 	object_cache_reclaimer reclaimer)
863 {
864 	object_cache *cache;
865 
866 	if (objectSize == 0) {
867 		cache = NULL;
868 	} else if (objectSize <= 256) {
869 		cache = create_small_object_cache(name, objectSize, alignment, maximum,
870 			flags, cookie, constructor, destructor, reclaimer);
871 	} else {
872 		cache = create_hashed_object_cache(name, objectSize, alignment,
873 			maximum, flags, cookie, constructor, destructor, reclaimer);
874 	}
875 
876 	T(Create(name, objectSize, alignment, maximum, flags, cookie, cache));
877 	return cache;
878 }
879 
880 
881 void
882 delete_object_cache(object_cache *cache)
883 {
884 	T(Delete(cache));
885 
886 	{
887 		MutexLocker _(sObjectCacheListLock);
888 		sObjectCaches.Remove(cache);
889 	}
890 
891 	mutex_lock(&cache->lock);
892 
893 	if (!(cache->flags & CACHE_NO_DEPOT))
894 		object_depot_destroy(&cache->depot);
895 
896 	unregister_low_resource_handler(object_cache_low_memory, cache);
897 
898 	if (!cache->full.IsEmpty())
899 		panic("cache destroy: still has full slabs");
900 
901 	if (!cache->partial.IsEmpty())
902 		panic("cache destroy: still has partial slabs");
903 
904 	while (!cache->empty.IsEmpty())
905 		cache->ReturnSlab(cache->empty.RemoveHead());
906 
907 	mutex_destroy(&cache->lock);
908 	delete_cache(cache);
909 }
910 
911 
912 status_t
913 object_cache_set_minimum_reserve(object_cache *cache, size_t objectCount)
914 {
915 	MutexLocker _(cache->lock);
916 
917 	if (cache->min_object_reserve == objectCount)
918 		return B_OK;
919 
920 	if (cache->min_object_reserve == 0) {
921 		cache->resize_request = new(std::nothrow) ResizeRequest(cache);
922 		if (cache->resize_request == NULL)
923 			return B_NO_MEMORY;
924 	} else if (cache->min_object_reserve == 0) {
925 		if (cache->resize_request->pending)
926 			cache->resize_request->delete_when_done = true;
927 		else
928 			delete cache->resize_request;
929 
930 		cache->resize_request = NULL;
931 	}
932 
933 	cache->min_object_reserve = objectCount;
934 
935 	increase_object_reserve(cache);
936 
937 	return B_OK;
938 }
939 
940 
941 void *
942 object_cache_alloc(object_cache *cache, uint32 flags)
943 {
944 	if (!(cache->flags & CACHE_NO_DEPOT)) {
945 		void *object = object_depot_obtain(&cache->depot);
946 		if (object) {
947 			T(Alloc(cache, flags, object));
948 			return object;
949 		}
950 	}
951 
952 	MutexLocker _(cache->lock);
953 	slab *source;
954 
955 	if (cache->partial.IsEmpty()) {
956 		if (cache->empty.IsEmpty()) {
957 			if (object_cache_reserve_internal(cache, 1, flags, false) < B_OK) {
958 				T(Alloc(cache, flags, NULL));
959 				return NULL;
960 			}
961 
962 			cache->pressure++;
963 		}
964 
965 		source = cache->empty.RemoveHead();
966 		cache->empty_count--;
967 		cache->partial.Add(source);
968 	} else {
969 		source = cache->partial.Head();
970 	}
971 
972 	ParanoiaChecker _2(source);
973 
974 	object_link *link = _pop(source->free);
975 	source->count--;
976 	cache->used_count++;
977 
978 	if (cache->total_objects - cache->used_count < cache->min_object_reserve)
979 		increase_object_reserve(cache);
980 
981 	REMOVE_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, source, &link->next,
982 		sizeof(void*));
983 
984 	TRACE_CACHE(cache, "allocate %p (%p) from %p, %lu remaining.",
985 		link_to_object(link, cache->object_size), link, source, source->count);
986 
987 	if (source->count == 0) {
988 		cache->partial.Remove(source);
989 		cache->full.Add(source);
990 	}
991 
992 	void *object = link_to_object(link, cache->object_size);
993 	T(Alloc(cache, flags, object));
994 	return object;
995 }
996 
997 
998 static void
999 object_cache_return_to_slab(object_cache *cache, slab *source, void *object)
1000 {
1001 	if (source == NULL) {
1002 		panic("object_cache: free'd object has no slab");
1003 		return;
1004 	}
1005 
1006 	ParanoiaChecker _(source);
1007 
1008 	object_link *link = object_to_link(object, cache->object_size);
1009 
1010 	TRACE_CACHE(cache, "returning %p (%p) to %p, %lu used (%lu empty slabs).",
1011 		object, link, source, source->size - source->count,
1012 		cache->empty_count);
1013 
1014 	_push(source->free, link);
1015 	source->count++;
1016 	cache->used_count--;
1017 
1018 	ADD_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, source, &link->next, sizeof(void*));
1019 
1020 	if (source->count == source->size) {
1021 		cache->partial.Remove(source);
1022 
1023 		if (cache->empty_count < cache->pressure
1024 			&& cache->total_objects - cache->used_count - source->size
1025 				>= cache->min_object_reserve) {
1026 			cache->empty_count++;
1027 			cache->empty.Add(source);
1028 		} else {
1029 			cache->ReturnSlab(source);
1030 		}
1031 	} else if (source->count == 1) {
1032 		cache->full.Remove(source);
1033 		cache->partial.Add(source);
1034 	}
1035 }
1036 
1037 
1038 void
1039 object_cache_free(object_cache *cache, void *object)
1040 {
1041 	if (object == NULL)
1042 		return;
1043 
1044 	T(Free(cache, object));
1045 
1046 	if (!(cache->flags & CACHE_NO_DEPOT)) {
1047 		if (object_depot_store(&cache->depot, object))
1048 			return;
1049 	}
1050 
1051 	MutexLocker _(cache->lock);
1052 	object_cache_return_to_slab(cache, cache->ObjectSlab(object), object);
1053 }
1054 
1055 
1056 static status_t
1057 object_cache_reserve_internal(object_cache *cache, size_t objectCount,
1058 	uint32 flags, bool unlockWhileAllocating)
1059 {
1060 	size_t numBytes = objectCount * cache->object_size;
1061 	size_t slabCount = ((numBytes - 1) / cache->slab_size) + 1;
1062 		// TODO: This also counts the unusable space of each slab, which can
1063 		// sum up.
1064 
1065 	while (slabCount > 0) {
1066 		slab *newSlab = cache->CreateSlab(flags, unlockWhileAllocating);
1067 		if (newSlab == NULL)
1068 			return B_NO_MEMORY;
1069 
1070 		cache->empty.Add(newSlab);
1071 		cache->empty_count++;
1072 		slabCount--;
1073 	}
1074 
1075 	return B_OK;
1076 }
1077 
1078 
1079 status_t
1080 object_cache_reserve(object_cache *cache, size_t objectCount, uint32 flags)
1081 {
1082 	if (objectCount == 0)
1083 		return B_OK;
1084 
1085 	T(Reserve(cache, objectCount, flags));
1086 
1087 	MutexLocker _(cache->lock);
1088 	return object_cache_reserve_internal(cache, objectCount, flags, false);
1089 }
1090 
1091 
1092 void
1093 object_cache_get_usage(object_cache *cache, size_t *_allocatedMemory)
1094 {
1095 	MutexLocker _(cache->lock);
1096 	*_allocatedMemory = cache->usage;
1097 }
1098 
1099 
1100 slab *
1101 object_cache::InitSlab(slab *slab, void *pages, size_t byteCount)
1102 {
1103 	TRACE_CACHE(this, "construct (%p, %p .. %p, %lu)", slab, pages,
1104 		((uint8 *)pages) + byteCount, byteCount);
1105 
1106 	slab->pages = pages;
1107 	slab->count = slab->size = byteCount / object_size;
1108 	slab->free = NULL;
1109 	total_objects += slab->size;
1110 
1111 	size_t spareBytes = byteCount - (slab->size * object_size);
1112 	slab->offset = cache_color_cycle;
1113 
1114 	if (slab->offset > spareBytes)
1115 		cache_color_cycle = slab->offset = 0;
1116 	else
1117 		cache_color_cycle += kCacheColorPeriod;
1118 
1119 	TRACE_CACHE(this, "  %lu objects, %lu spare bytes, offset %lu",
1120 		slab->size, spareBytes, slab->offset);
1121 
1122 	uint8 *data = ((uint8 *)pages) + slab->offset;
1123 
1124 	CREATE_PARANOIA_CHECK_SET(slab, "slab");
1125 
1126 	for (size_t i = 0; i < slab->size; i++) {
1127 		bool failedOnFirst = false;
1128 
1129 		status_t status = PrepareObject(slab, data);
1130 		if (status < B_OK)
1131 			failedOnFirst = true;
1132 		else if (constructor)
1133 			status = constructor(cookie, data);
1134 
1135 		if (status < B_OK) {
1136 			if (!failedOnFirst)
1137 				UnprepareObject(slab, data);
1138 
1139 			data = ((uint8 *)pages) + slab->offset;
1140 			for (size_t j = 0; j < i; j++) {
1141 				if (destructor)
1142 					destructor(cookie, data);
1143 				UnprepareObject(slab, data);
1144 				data += object_size;
1145 			}
1146 
1147 			DELETE_PARANOIA_CHECK_SET(slab);
1148 
1149 			return NULL;
1150 		}
1151 
1152 		_push(slab->free, object_to_link(data, object_size));
1153 
1154 		ADD_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, slab,
1155 			&object_to_link(data, object_size)->next, sizeof(void*));
1156 
1157 		data += object_size;
1158 	}
1159 
1160 	return slab;
1161 }
1162 
1163 
1164 void
1165 object_cache::UninitSlab(slab *slab)
1166 {
1167 	TRACE_CACHE(this, "destruct %p", slab);
1168 
1169 	if (slab->count != slab->size)
1170 		panic("cache: destroying a slab which isn't empty.");
1171 
1172 	total_objects -= slab->size;
1173 
1174 	DELETE_PARANOIA_CHECK_SET(slab);
1175 
1176 	uint8 *data = ((uint8 *)slab->pages) + slab->offset;
1177 
1178 	for (size_t i = 0; i < slab->size; i++) {
1179 		if (destructor)
1180 			destructor(cookie, data);
1181 		UnprepareObject(slab, data);
1182 		data += object_size;
1183 	}
1184 }
1185 
1186 
1187 static inline slab *
1188 slab_in_pages(const void *pages, size_t slab_size)
1189 {
1190 	return (slab *)(((uint8 *)pages) + slab_size - sizeof(slab));
1191 }
1192 
1193 
1194 static inline const void *
1195 lower_boundary(void *object, size_t byteCount)
1196 {
1197 	const uint8 *null = (uint8 *)NULL;
1198 	return null + ((((uint8 *)object) - null) & ~(byteCount - 1));
1199 }
1200 
1201 
1202 static inline bool
1203 check_cache_quota(object_cache *cache)
1204 {
1205 	if (cache->maximum == 0)
1206 		return true;
1207 
1208 	return (cache->usage + cache->slab_size) <= cache->maximum;
1209 }
1210 
1211 
1212 slab *
1213 SmallObjectCache::CreateSlab(uint32 flags, bool unlockWhileAllocating)
1214 {
1215 	if (!check_cache_quota(this))
1216 		return NULL;
1217 
1218 	void *pages;
1219 
1220 	if (allocate_pages(this, &pages, flags, unlockWhileAllocating) < B_OK)
1221 		return NULL;
1222 
1223 	return InitSlab(slab_in_pages(pages, slab_size), pages,
1224 		slab_size - sizeof(slab));
1225 }
1226 
1227 
1228 void
1229 SmallObjectCache::ReturnSlab(slab *slab)
1230 {
1231 	UninitSlab(slab);
1232 	free_pages(this, slab->pages);
1233 }
1234 
1235 
1236 slab *
1237 SmallObjectCache::ObjectSlab(void *object) const
1238 {
1239 	return slab_in_pages(lower_boundary(object, slab_size), slab_size);
1240 }
1241 
1242 
1243 static slab *
1244 allocate_slab(uint32 flags)
1245 {
1246 	return (slab *)internal_alloc(sizeof(slab), flags);
1247 }
1248 
1249 
1250 static void
1251 free_slab(slab *slab)
1252 {
1253 	internal_free(slab);
1254 }
1255 
1256 
1257 static HashedObjectCache::Link *
1258 allocate_link(uint32 flags)
1259 {
1260 	return (HashedObjectCache::Link *)
1261 		internal_alloc(sizeof(HashedObjectCache::Link), flags);
1262 }
1263 
1264 
1265 static void
1266 free_link(HashedObjectCache::Link *link)
1267 {
1268 	internal_free(link);
1269 }
1270 
1271 
1272 slab *
1273 HashedObjectCache::CreateSlab(uint32 flags, bool unlockWhileAllocating)
1274 {
1275 	if (!check_cache_quota(this))
1276 		return NULL;
1277 
1278 	if (unlockWhileAllocating)
1279 		Unlock();
1280 
1281 	slab *slab = allocate_slab(flags);
1282 
1283 	if (unlockWhileAllocating)
1284 		Lock();
1285 
1286 	if (slab == NULL)
1287 		return NULL;
1288 
1289 	void *pages;
1290 
1291 	if (allocate_pages(this, &pages, flags, unlockWhileAllocating) == B_OK) {
1292 		if (InitSlab(slab, pages, slab_size))
1293 			return slab;
1294 
1295 		free_pages(this, pages);
1296 	}
1297 
1298 	free_slab(slab);
1299 	return NULL;
1300 }
1301 
1302 
1303 void
1304 HashedObjectCache::ReturnSlab(slab *slab)
1305 {
1306 	UninitSlab(slab);
1307 	free_pages(this, slab->pages);
1308 }
1309 
1310 
1311 slab *
1312 HashedObjectCache::ObjectSlab(void *object) const
1313 {
1314 	Link *link = hash_table.Lookup(object);
1315 	if (link == NULL) {
1316 		panic("object cache: requested object %p missing from hash table",
1317 			object);
1318 		return NULL;
1319 	}
1320 	return link->parent;
1321 }
1322 
1323 
1324 status_t
1325 HashedObjectCache::PrepareObject(slab *source, void *object)
1326 {
1327 	Link *link = allocate_link(CACHE_DONT_SLEEP);
1328 	if (link == NULL)
1329 		return B_NO_MEMORY;
1330 
1331 	link->buffer = object;
1332 	link->parent = source;
1333 
1334 	hash_table.Insert(link);
1335 	return B_OK;
1336 }
1337 
1338 
1339 void
1340 HashedObjectCache::UnprepareObject(slab *source, void *object)
1341 {
1342 	Link *link = hash_table.Lookup(object);
1343 	if (link == NULL) {
1344 		panic("object cache: requested object missing from hash table");
1345 		return;
1346 	}
1347 
1348 	if (link->parent != source) {
1349 		panic("object cache: slab mismatch");
1350 		return;
1351 	}
1352 
1353 	hash_table.Remove(link);
1354 	free_link(link);
1355 }
1356 
1357 
1358 static inline bool
1359 is_magazine_empty(depot_magazine *magazine)
1360 {
1361 	return magazine->current_round == 0;
1362 }
1363 
1364 
1365 static inline bool
1366 is_magazine_full(depot_magazine *magazine)
1367 {
1368 	return magazine->current_round == magazine->round_count;
1369 }
1370 
1371 
1372 static inline void *
1373 pop_magazine(depot_magazine *magazine)
1374 {
1375 	return magazine->rounds[--magazine->current_round];
1376 }
1377 
1378 
1379 static inline bool
1380 push_magazine(depot_magazine *magazine, void *object)
1381 {
1382 	if (is_magazine_full(magazine))
1383 		return false;
1384 	magazine->rounds[magazine->current_round++] = object;
1385 	return true;
1386 }
1387 
1388 
1389 static bool
1390 exchange_with_full(object_depot *depot, depot_magazine* &magazine)
1391 {
1392 	RecursiveLocker _(depot->lock);
1393 
1394 	if (depot->full == NULL)
1395 		return false;
1396 
1397 	depot->full_count--;
1398 	depot->empty_count++;
1399 
1400 	_push(depot->empty, magazine);
1401 	magazine = _pop(depot->full);
1402 	return true;
1403 }
1404 
1405 
1406 static bool
1407 exchange_with_empty(object_depot *depot, depot_magazine* &magazine)
1408 {
1409 	RecursiveLocker _(depot->lock);
1410 
1411 	if (depot->empty == NULL) {
1412 		depot->empty = alloc_magazine();
1413 		if (depot->empty == NULL)
1414 			return false;
1415 	} else {
1416 		depot->empty_count--;
1417 	}
1418 
1419 	if (magazine) {
1420 		_push(depot->full, magazine);
1421 		depot->full_count++;
1422 	}
1423 
1424 	magazine = _pop(depot->empty);
1425 	return true;
1426 }
1427 
1428 
1429 static depot_magazine *
1430 alloc_magazine()
1431 {
1432 	depot_magazine *magazine = (depot_magazine *)internal_alloc(
1433 		sizeof(depot_magazine) + kMagazineCapacity * sizeof(void *), 0);
1434 	if (magazine) {
1435 		magazine->next = NULL;
1436 		magazine->current_round = 0;
1437 		magazine->round_count = kMagazineCapacity;
1438 	}
1439 
1440 	return magazine;
1441 }
1442 
1443 
1444 static void
1445 free_magazine(depot_magazine *magazine)
1446 {
1447 	internal_free(magazine);
1448 }
1449 
1450 
1451 static void
1452 empty_magazine(object_depot *depot, depot_magazine *magazine)
1453 {
1454 	for (uint16 i = 0; i < magazine->current_round; i++)
1455 		depot->return_object(depot, magazine->rounds[i]);
1456 	free_magazine(magazine);
1457 }
1458 
1459 
1460 status_t
1461 object_depot_init(object_depot *depot, uint32 flags,
1462 	void (*return_object)(object_depot *depot, void *object))
1463 {
1464 	depot->full = NULL;
1465 	depot->empty = NULL;
1466 	depot->full_count = depot->empty_count = 0;
1467 
1468 	recursive_lock_init(&depot->lock, "depot");
1469 
1470 	depot->stores = (depot_cpu_store *)internal_alloc(sizeof(depot_cpu_store)
1471 		* smp_get_num_cpus(), flags);
1472 	if (depot->stores == NULL) {
1473 		recursive_lock_destroy(&depot->lock);
1474 		return B_NO_MEMORY;
1475 	}
1476 
1477 	for (int i = 0; i < smp_get_num_cpus(); i++) {
1478 		recursive_lock_init(&depot->stores[i].lock, "cpu store");
1479 		depot->stores[i].loaded = depot->stores[i].previous = NULL;
1480 	}
1481 
1482 	depot->return_object = return_object;
1483 
1484 	return B_OK;
1485 }
1486 
1487 
1488 void
1489 object_depot_destroy(object_depot *depot)
1490 {
1491 	object_depot_make_empty(depot);
1492 
1493 	for (int i = 0; i < smp_get_num_cpus(); i++) {
1494 		recursive_lock_destroy(&depot->stores[i].lock);
1495 	}
1496 
1497 	internal_free(depot->stores);
1498 
1499 	recursive_lock_destroy(&depot->lock);
1500 }
1501 
1502 
1503 static void *
1504 object_depot_obtain_from_store(object_depot *depot, depot_cpu_store *store)
1505 {
1506 	RecursiveLocker _(store->lock);
1507 
1508 	// To better understand both the Alloc() and Free() logic refer to
1509 	// Bonwick's ``Magazines and Vmem'' [in 2001 USENIX proceedings]
1510 
1511 	// In a nutshell, we try to get an object from the loaded magazine
1512 	// if it's not empty, or from the previous magazine if it's full
1513 	// and finally from the Slab if the magazine depot has no full magazines.
1514 
1515 	if (store->loaded == NULL)
1516 		return NULL;
1517 
1518 	while (true) {
1519 		if (!is_magazine_empty(store->loaded))
1520 			return pop_magazine(store->loaded);
1521 
1522 		if (store->previous && (is_magazine_full(store->previous)
1523 			|| exchange_with_full(depot, store->previous)))
1524 			std::swap(store->previous, store->loaded);
1525 		else
1526 			return NULL;
1527 	}
1528 }
1529 
1530 
1531 static int
1532 object_depot_return_to_store(object_depot *depot, depot_cpu_store *store,
1533 	void *object)
1534 {
1535 	RecursiveLocker _(store->lock);
1536 
1537 	// We try to add the object to the loaded magazine if we have one
1538 	// and it's not full, or to the previous one if it is empty. If
1539 	// the magazine depot doesn't provide us with a new empty magazine
1540 	// we return the object directly to the slab.
1541 
1542 	while (true) {
1543 		if (store->loaded && push_magazine(store->loaded, object))
1544 			return 1;
1545 
1546 		if ((store->previous && is_magazine_empty(store->previous))
1547 			|| exchange_with_empty(depot, store->previous))
1548 			std::swap(store->loaded, store->previous);
1549 		else
1550 			return 0;
1551 	}
1552 }
1553 
1554 
1555 void *
1556 object_depot_obtain(object_depot *depot)
1557 {
1558 	return object_depot_obtain_from_store(depot, object_depot_cpu(depot));
1559 }
1560 
1561 
1562 int
1563 object_depot_store(object_depot *depot, void *object)
1564 {
1565 	return object_depot_return_to_store(depot, object_depot_cpu(depot),
1566 		object);
1567 }
1568 
1569 
1570 void
1571 object_depot_make_empty(object_depot *depot)
1572 {
1573 	for (int i = 0; i < smp_get_num_cpus(); i++) {
1574 		depot_cpu_store *store = &depot->stores[i];
1575 
1576 		RecursiveLocker _(store->lock);
1577 
1578 		if (depot->stores[i].loaded)
1579 			empty_magazine(depot, depot->stores[i].loaded);
1580 		if (depot->stores[i].previous)
1581 			empty_magazine(depot, depot->stores[i].previous);
1582 		depot->stores[i].loaded = depot->stores[i].previous = NULL;
1583 	}
1584 
1585 	RecursiveLocker _(depot->lock);
1586 
1587 	while (depot->full)
1588 		empty_magazine(depot, _pop(depot->full));
1589 
1590 	while (depot->empty)
1591 		empty_magazine(depot, _pop(depot->empty));
1592 }
1593 
1594 
1595 static int
1596 dump_slabs(int argc, char *argv[])
1597 {
1598 	kprintf("%10s %22s %8s %8s %6s %8s %8s %8s\n", "address", "name",
1599 		"objsize", "usage", "empty", "usedobj", "total", "flags");
1600 
1601 	ObjectCacheList::Iterator it = sObjectCaches.GetIterator();
1602 
1603 	while (it.HasNext()) {
1604 		object_cache *cache = it.Next();
1605 
1606 		kprintf("%p %22s %8lu %8lu %6lu %8lu %8lu %8lx\n", cache, cache->name,
1607 			cache->object_size, cache->usage, cache->empty_count,
1608 			cache->used_count, cache->usage / cache->object_size,
1609 			cache->flags);
1610 	}
1611 
1612 	return 0;
1613 }
1614 
1615 
1616 static int
1617 dump_cache_info(int argc, char *argv[])
1618 {
1619 	if (argc < 2) {
1620 		kprintf("usage: cache_info [address]\n");
1621 		return 0;
1622 	}
1623 
1624 	object_cache *cache = (object_cache *)strtoul(argv[1], NULL, 16);
1625 
1626 	kprintf("name: %s\n", cache->name);
1627 	kprintf("lock: %p\n", &cache->lock);
1628 	kprintf("object_size: %lu\n", cache->object_size);
1629 	kprintf("cache_color_cycle: %lu\n", cache->cache_color_cycle);
1630 	kprintf("used_count: %lu\n", cache->used_count);
1631 	kprintf("empty_count: %lu\n", cache->empty_count);
1632 	kprintf("pressure: %lu\n", cache->pressure);
1633 	kprintf("slab_size: %lu\n", cache->slab_size);
1634 	kprintf("usage: %lu\n", cache->usage);
1635 	kprintf("maximum: %lu\n", cache->maximum);
1636 	kprintf("flags: 0x%lx\n", cache->flags);
1637 	kprintf("cookie: %p\n", cache->cookie);
1638 
1639 	return 0;
1640 }
1641 
1642 
1643 void
1644 slab_init(kernel_args *args, addr_t initialBase, size_t initialSize)
1645 {
1646 	dprintf("slab: init base %p + 0x%lx\n", (void *)initialBase, initialSize);
1647 
1648 	sInitialBegin = (uint8 *)initialBase;
1649 	sInitialLimit = sInitialBegin + initialSize;
1650 	sInitialPointer = sInitialBegin;
1651 
1652 	sKernelArgs = args;
1653 
1654 	new (&sObjectCaches) ObjectCacheList();
1655 
1656 	block_allocator_init_boot();
1657 
1658 	add_debugger_command("slabs", dump_slabs, "list all object caches");
1659 	add_debugger_command("cache_info", dump_cache_info,
1660 		"dump information about a specific cache");
1661 }
1662 
1663 
1664 void
1665 slab_init_post_sem()
1666 {
1667 	ObjectCacheList::Iterator it = sObjectCaches.GetIterator();
1668 
1669 	while (it.HasNext()) {
1670 		object_cache *cache = it.Next();
1671 		if (cache->allocate_pages == early_allocate_pages)
1672 			object_cache_commit_pre_pages(cache);
1673 	}
1674 
1675 	block_allocator_init_rest();
1676 }
1677 
1678 
1679 void
1680 slab_init_post_thread()
1681 {
1682 	new(&sResizeRequests) ResizeRequestQueue;
1683 	sResizeRequestsCondition.Init(&sResizeRequests, "object cache resizer");
1684 
1685 	thread_id objectCacheResizer = spawn_kernel_thread(object_cache_resizer,
1686 		"object cache resizer", B_URGENT_PRIORITY, NULL);
1687 	if (objectCacheResizer < 0) {
1688 		panic("slab_init_post_thread(): failed to spawn object cache resizer "
1689 			"thread\n");
1690 		return;
1691 	}
1692 
1693 	send_signal_etc(objectCacheResizer, SIGCONT, B_DO_NOT_RESCHEDULE);
1694 }
1695