xref: /haiku/src/system/kernel/slab/Slab.cpp (revision 746cac055adc6ac3308c7bc2d29040fb95689cc9)
1 /*
2  * Copyright 2008, Axel Dörfler. All Rights Reserved.
3  * Copyright 2007, Hugo Santos. All Rights Reserved.
4  *
5  * Distributed under the terms of the MIT License.
6  */
7 
8 
9 #include <Slab.h>
10 #include "slab_private.h"
11 
12 #include <algorithm>
13 #include <new>
14 #include <signal.h>
15 #include <stdlib.h>
16 #include <string.h>
17 
18 #include <KernelExport.h>
19 
20 #include <condition_variable.h>
21 #include <Depot.h>
22 #include <kernel.h>
23 #include <low_resource_manager.h>
24 #include <smp.h>
25 #include <tracing.h>
26 #include <util/AutoLock.h>
27 #include <util/DoublyLinkedList.h>
28 #include <util/OpenHashTable.h>
29 #include <vm.h>
30 #include <vm_address_space.h>
31 
32 
33 // TODO kMagazineCapacity should be dynamically tuned per cache.
34 
35 
36 //#define TRACE_SLAB
37 
38 #ifdef TRACE_SLAB
39 #define TRACE_CACHE(cache, format, args...) \
40 	dprintf("Cache[%p, %s] " format "\n", cache, cache->name , ##args)
41 #else
42 #define TRACE_CACHE(cache, format, bananas...) do { } while (0)
43 #endif
44 
45 #define COMPONENT_PARANOIA_LEVEL	OBJECT_CACHE_PARANOIA
46 #include <debug_paranoia.h>
47 
48 
49 #define CACHE_ALIGN_ON_SIZE	(30 << 1)
50 
51 static const int kMagazineCapacity = 32;
52 static const size_t kCacheColorPeriod = 8;
53 
54 struct object_link {
55 	struct object_link *next;
56 };
57 
58 struct slab : DoublyLinkedListLinkImpl<slab> {
59 	void *pages;
60 	size_t size;		// total number of objects
61 	size_t count;		// free objects
62 	size_t offset;
63 	object_link *free;
64 };
65 
66 typedef DoublyLinkedList<slab> SlabList;
67 struct ResizeRequest;
68 
69 struct object_cache : DoublyLinkedListLinkImpl<object_cache> {
70 	char name[32];
71 	mutex lock;
72 	size_t object_size;
73 	size_t cache_color_cycle;
74 	SlabList empty, partial, full;
75 	size_t total_objects;		// total number of objects
76 	size_t used_count;			// used objects
77 	size_t empty_count;			// empty slabs
78 	size_t pressure;
79 	size_t min_object_reserve;	// minimum number of free objects
80 
81 	size_t slab_size;
82 	size_t usage, maximum;
83 	uint32 flags;
84 
85 	ResizeRequest *resize_request;
86 
87 	void *cookie;
88 	object_cache_constructor constructor;
89 	object_cache_destructor destructor;
90 	object_cache_reclaimer reclaimer;
91 
92 	status_t (*allocate_pages)(object_cache *cache, void **pages,
93 		uint32 flags, bool unlockWhileAllocating);
94 	void (*free_pages)(object_cache *cache, void *pages);
95 
96 	object_depot depot;
97 
98 	virtual slab *CreateSlab(uint32 flags, bool unlockWhileAllocating) = 0;
99 	virtual void ReturnSlab(slab *slab) = 0;
100 	virtual slab *ObjectSlab(void *object) const = 0;
101 
102 	slab *InitSlab(slab *slab, void *pages, size_t byteCount);
103 	void UninitSlab(slab *slab);
104 
105 	virtual status_t PrepareObject(slab *source, void *object) { return B_OK; }
106 	virtual void UnprepareObject(slab *source, void *object) {}
107 
108 	virtual ~object_cache() {}
109 
110 	bool Lock()		{ return mutex_lock(&lock) == B_OK; }
111 	void Unlock()	{ mutex_unlock(&lock); }
112 };
113 
114 typedef DoublyLinkedList<object_cache> ObjectCacheList;
115 
116 struct SmallObjectCache : object_cache {
117 	slab *CreateSlab(uint32 flags, bool unlockWhileAllocating);
118 	void ReturnSlab(slab *slab);
119 	slab *ObjectSlab(void *object) const;
120 };
121 
122 
123 struct HashedObjectCache : object_cache {
124 	struct Link : HashTableLink<Link> {
125 		const void *buffer;
126 		slab *parent;
127 	};
128 
129 	struct Definition {
130 		typedef HashedObjectCache	ParentType;
131 		typedef const void *		KeyType;
132 		typedef Link				ValueType;
133 
134 		Definition(HashedObjectCache *_parent) : parent(_parent) {}
135 		Definition(const Definition& definition) : parent(definition.parent) {}
136 
137 		size_t HashKey(const void *key) const
138 		{
139 			return (((const uint8 *)key) - ((const uint8 *)0))
140 				>> parent->lower_boundary;
141 		}
142 
143 		size_t Hash(Link *value) const { return HashKey(value->buffer); }
144 		bool Compare(const void *key, Link *value) const
145 			{ return value->buffer == key; }
146 		HashTableLink<Link> *GetLink(Link *value) const { return value; }
147 
148 		HashedObjectCache *parent;
149 	};
150 
151 	typedef OpenHashTable<Definition> HashTable;
152 
153 	HashedObjectCache()
154 		: hash_table(this) {}
155 
156 	slab *CreateSlab(uint32 flags, bool unlockWhileAllocating);
157 	void ReturnSlab(slab *slab);
158 	slab *ObjectSlab(void *object) const;
159 	status_t PrepareObject(slab *source, void *object);
160 	void UnprepareObject(slab *source, void *object);
161 
162 	HashTable hash_table;
163 	size_t lower_boundary;
164 };
165 
166 
167 struct depot_magazine {
168 	struct depot_magazine *next;
169 	uint16 current_round, round_count;
170 	void *rounds[0];
171 };
172 
173 
174 struct depot_cpu_store {
175 	recursive_lock lock;
176 	struct depot_magazine *loaded, *previous;
177 };
178 
179 struct ResizeRequest : DoublyLinkedListLinkImpl<ResizeRequest> {
180 	ResizeRequest(object_cache* cache)
181 		:
182 		cache(cache),
183 		pending(false),
184 		delete_when_done(false)
185 	{
186 	}
187 
188 	object_cache*	cache;
189 	bool			pending;
190 	bool			delete_when_done;
191 };
192 
193 typedef DoublyLinkedList<ResizeRequest> ResizeRequestQueue;
194 
195 
196 static ObjectCacheList sObjectCaches;
197 static mutex sObjectCacheListLock = MUTEX_INITIALIZER("object cache list");
198 
199 static uint8 *sInitialBegin, *sInitialLimit, *sInitialPointer;
200 static kernel_args *sKernelArgs;
201 
202 
203 static status_t object_cache_reserve_internal(object_cache *cache,
204 	size_t object_count, uint32 flags, bool unlockWhileAllocating);
205 static depot_magazine *alloc_magazine();
206 static void free_magazine(depot_magazine *magazine);
207 
208 static mutex sResizeRequestsLock
209 	= MUTEX_INITIALIZER("object cache resize requests");
210 static ResizeRequestQueue sResizeRequests;
211 static ConditionVariable sResizeRequestsCondition;
212 
213 
214 #if OBJECT_CACHE_TRACING
215 
216 
217 namespace ObjectCacheTracing {
218 
219 class ObjectCacheTraceEntry : public AbstractTraceEntry {
220 	public:
221 		ObjectCacheTraceEntry(object_cache* cache)
222 			:
223 			fCache(cache)
224 		{
225 		}
226 
227 	protected:
228 		object_cache*	fCache;
229 };
230 
231 
232 class Create : public ObjectCacheTraceEntry {
233 	public:
234 		Create(const char* name, size_t objectSize, size_t alignment,
235 				size_t maxByteUsage, uint32 flags, void *cookie,
236 				object_cache *cache)
237 			:
238 			ObjectCacheTraceEntry(cache),
239 			fObjectSize(objectSize),
240 			fAlignment(alignment),
241 			fMaxByteUsage(maxByteUsage),
242 			fFlags(flags),
243 			fCookie(cookie)
244 		{
245 			fName = alloc_tracing_buffer_strcpy(name, 64, false);
246 			Initialized();
247 		}
248 
249 		virtual void AddDump(TraceOutput& out)
250 		{
251 			out.Print("object cache create: name: \"%s\", object size: %lu, "
252 				"alignment: %lu, max usage: %lu, flags: 0x%lx, cookie: %p "
253 				"-> cache: %p", fName, fObjectSize, fAlignment, fMaxByteUsage,
254 					fFlags, fCookie, fCache);
255 		}
256 
257 	private:
258 		const char*	fName;
259 		size_t		fObjectSize;
260 		size_t		fAlignment;
261 		size_t		fMaxByteUsage;
262 		uint32		fFlags;
263 		void*		fCookie;
264 };
265 
266 
267 class Delete : public ObjectCacheTraceEntry {
268 	public:
269 		Delete(object_cache *cache)
270 			:
271 			ObjectCacheTraceEntry(cache)
272 		{
273 			Initialized();
274 		}
275 
276 		virtual void AddDump(TraceOutput& out)
277 		{
278 			out.Print("object cache delete: %p", fCache);
279 		}
280 };
281 
282 
283 class Alloc : public ObjectCacheTraceEntry {
284 	public:
285 		Alloc(object_cache *cache, uint32 flags, void* object)
286 			:
287 			ObjectCacheTraceEntry(cache),
288 			fFlags(flags),
289 			fObject(object)
290 		{
291 			Initialized();
292 		}
293 
294 		virtual void AddDump(TraceOutput& out)
295 		{
296 			out.Print("object cache alloc: cache: %p, flags: 0x%lx -> "
297 				"object: %p", fCache, fFlags, fObject);
298 		}
299 
300 	private:
301 		uint32		fFlags;
302 		void*		fObject;
303 };
304 
305 
306 class Free : public ObjectCacheTraceEntry {
307 	public:
308 		Free(object_cache *cache, void* object)
309 			:
310 			ObjectCacheTraceEntry(cache),
311 			fObject(object)
312 		{
313 			Initialized();
314 		}
315 
316 		virtual void AddDump(TraceOutput& out)
317 		{
318 			out.Print("object cache free: cache: %p, object: %p", fCache,
319 				fObject);
320 		}
321 
322 	private:
323 		void*		fObject;
324 };
325 
326 
327 class Reserve : public ObjectCacheTraceEntry {
328 	public:
329 		Reserve(object_cache *cache, size_t count, uint32 flags)
330 			:
331 			ObjectCacheTraceEntry(cache),
332 			fCount(count),
333 			fFlags(flags)
334 		{
335 			Initialized();
336 		}
337 
338 		virtual void AddDump(TraceOutput& out)
339 		{
340 			out.Print("object cache reserve: cache: %p, count: %lu, "
341 				"flags: 0x%lx", fCache, fCount, fFlags);
342 		}
343 
344 	private:
345 		uint32		fCount;
346 		uint32		fFlags;
347 };
348 
349 
350 }	// namespace ObjectCacheTracing
351 
352 #	define T(x)	new(std::nothrow) ObjectCacheTracing::x
353 
354 #else
355 #	define T(x)
356 #endif	// OBJECT_CACHE_TRACING
357 
358 
359 // #pragma mark -
360 
361 
362 template<typename Type> static Type *
363 _pop(Type* &head)
364 {
365 	Type *oldHead = head;
366 	head = head->next;
367 	return oldHead;
368 }
369 
370 
371 template<typename Type> static inline void
372 _push(Type* &head, Type *object)
373 {
374 	object->next = head;
375 	head = object;
376 }
377 
378 
379 static inline void *
380 link_to_object(object_link *link, size_t objectSize)
381 {
382 	return ((uint8 *)link) - (objectSize - sizeof(object_link));
383 }
384 
385 
386 static inline object_link *
387 object_to_link(void *object, size_t objectSize)
388 {
389 	return (object_link *)(((uint8 *)object)
390 		+ (objectSize - sizeof(object_link)));
391 }
392 
393 
394 static inline depot_cpu_store *
395 object_depot_cpu(object_depot *depot)
396 {
397 	return &depot->stores[smp_get_current_cpu()];
398 }
399 
400 
401 static inline int
402 __fls0(size_t value)
403 {
404 	if (value == 0)
405 		return -1;
406 
407 	int bit;
408 	for (bit = 0; value != 1; bit++)
409 		value >>= 1;
410 	return bit;
411 }
412 
413 
414 static void *
415 internal_alloc(size_t size, uint32 flags)
416 {
417 	if (flags & CACHE_DURING_BOOT) {
418 		if ((sInitialPointer + size) > sInitialLimit)
419 			panic("internal_alloc: ran out of initial space");
420 
421 		uint8 *buffer = sInitialPointer;
422 		sInitialPointer += size;
423 		return buffer;
424 	}
425 
426 	// TODO: Support CACHE_DONT_SLEEP!
427 	return block_alloc(size);
428 }
429 
430 
431 static void
432 internal_free(void *_buffer)
433 {
434 	uint8 *buffer = (uint8 *)_buffer;
435 
436 	if (buffer >= sInitialBegin && buffer < sInitialLimit)
437 		return;
438 
439 	block_free(buffer);
440 }
441 
442 
443 static status_t
444 area_allocate_pages(object_cache *cache, void **pages, uint32 flags,
445 	bool unlockWhileAllocating)
446 {
447 	TRACE_CACHE(cache, "allocate pages (%lu, 0x0%lx)", cache->slab_size, flags);
448 
449 	uint32 lock = B_FULL_LOCK;
450 	if (cache->flags & CACHE_UNLOCKED_PAGES)
451 		lock = B_NO_LOCK;
452 
453 	uint32 addressSpec = B_ANY_KERNEL_ADDRESS;
454 	if ((cache->flags & CACHE_ALIGN_ON_SIZE) != 0
455 		&& cache->slab_size != B_PAGE_SIZE)
456 		addressSpec = B_ANY_KERNEL_BLOCK_ADDRESS;
457 
458 	if (unlockWhileAllocating)
459 		cache->Unlock();
460 
461 	// if we are allocating, it is because we need the pages immediatly
462 	// so we lock them. when moving the slab to the empty list we should
463 	// unlock them, and lock them again when getting one from the empty list.
464 	area_id areaId = vm_create_anonymous_area(vm_kernel_address_space_id(),
465 		cache->name, pages, addressSpec, cache->slab_size, lock,
466 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
467 		(flags & CACHE_DONT_SLEEP) != 0 ? CREATE_AREA_DONT_WAIT : 0, true);
468 
469 	if (unlockWhileAllocating)
470 		cache->Lock();
471 
472 	if (areaId < 0)
473 		return areaId;
474 
475 	cache->usage += cache->slab_size;
476 
477 	TRACE_CACHE(cache, "  ... = { %ld, %p }", areaId, *pages);
478 
479 	return B_OK;
480 }
481 
482 
483 static void
484 area_free_pages(object_cache *cache, void *pages)
485 {
486 	area_id id = area_for(pages);
487 
488 	TRACE_CACHE(cache, "delete pages %p (%ld)", pages, id);
489 
490 	if (id < B_OK)
491 		panic("object cache: freeing unknown area");
492 
493 	delete_area(id);
494 
495 	cache->usage -= cache->slab_size;
496 }
497 
498 
499 static status_t
500 early_allocate_pages(object_cache *cache, void **pages, uint32 flags,
501 	bool unlockWhileAllocating)
502 {
503 	TRACE_CACHE(cache, "early allocate pages (%lu, 0x0%lx)", cache->slab_size,
504 		flags);
505 
506 	if (unlockWhileAllocating)
507 		cache->Unlock();
508 
509 	addr_t base = vm_allocate_early(sKernelArgs, cache->slab_size,
510 		cache->slab_size, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
511 
512 	if (unlockWhileAllocating)
513 		cache->Lock();
514 
515 	*pages = (void *)base;
516 
517 	cache->usage += cache->slab_size;
518 
519 	TRACE_CACHE(cache, "  ... = { %p }", *pages);
520 
521 	return B_OK;
522 }
523 
524 
525 static void
526 early_free_pages(object_cache *cache, void *pages)
527 {
528 	panic("memory pressure on bootup?");
529 }
530 
531 
532 static void
533 object_cache_low_memory(void *_self, uint32 resources, int32 level)
534 {
535 	if (level == B_NO_LOW_RESOURCE)
536 		return;
537 
538 	object_cache *cache = (object_cache *)_self;
539 
540 	// we are calling the reclaimer without the object cache lock
541 	// to give the owner a chance to return objects to the slabs.
542 	// As we only register the low memory handler after we complete
543 	// the init of the object cache, unless there are some funky
544 	// memory re-order business going on, we should be ok.
545 
546 	if (cache->reclaimer)
547 		cache->reclaimer(cache->cookie, level);
548 
549 	MutexLocker _(cache->lock);
550 	size_t minimumAllowed;
551 
552 	switch (level) {
553 	case B_LOW_RESOURCE_NOTE:
554 		minimumAllowed = cache->pressure / 2 + 1;
555 		break;
556 
557 	case B_LOW_RESOURCE_WARNING:
558 		cache->pressure /= 2;
559 		minimumAllowed = 0;
560 		break;
561 
562 	default:
563 		cache->pressure = 0;
564 		minimumAllowed = 0;
565 		break;
566 	}
567 
568 	// If the object cache has minimum object reserve, make sure that we don't
569 	// free too many slabs.
570 	if (cache->min_object_reserve > 0 && cache->empty_count > 0) {
571 		size_t objectsPerSlab = cache->empty.Head()->size;
572 		size_t freeObjects = cache->total_objects - cache->used_count;
573 
574 		if (cache->min_object_reserve + objectsPerSlab >= freeObjects)
575 			return;
576 
577 		size_t slabsToFree = (freeObjects - cache->min_object_reserve)
578 			/ objectsPerSlab;
579 
580 		if (cache->empty_count > minimumAllowed + slabsToFree)
581 			minimumAllowed = cache->empty_count - slabsToFree;
582 	}
583 
584 	if (cache->empty_count <= minimumAllowed)
585 		return;
586 
587 	TRACE_CACHE(cache, "cache: memory pressure, will release down to %lu.",
588 		minimumAllowed);
589 
590 	while (cache->empty_count > minimumAllowed) {
591 		cache->ReturnSlab(cache->empty.RemoveHead());
592 		cache->empty_count--;
593 	}
594 }
595 
596 
597 static void
598 object_cache_return_object_wrapper(object_depot *depot, void *object)
599 {
600 	object_cache *cache = (object_cache *)(((uint8 *)depot)
601 		- offsetof(object_cache, depot));
602 
603 	object_cache_free(cache, object);
604 }
605 
606 
607 static status_t
608 object_cache_init(object_cache *cache, const char *name, size_t objectSize,
609 	size_t alignment, size_t maximum, uint32 flags, void *cookie,
610 	object_cache_constructor constructor, object_cache_destructor destructor,
611 	object_cache_reclaimer reclaimer)
612 {
613 	strlcpy(cache->name, name, sizeof(cache->name));
614 
615 	mutex_init(&cache->lock, cache->name);
616 
617 	if (objectSize < sizeof(object_link))
618 		objectSize = sizeof(object_link);
619 
620 	if (alignment > 0 && (objectSize & (alignment - 1)))
621 		cache->object_size = objectSize + alignment
622 			- (objectSize & (alignment - 1));
623 	else
624 		cache->object_size = objectSize;
625 
626 	TRACE_CACHE(cache, "init %lu, %lu -> %lu", objectSize, alignment,
627 		cache->object_size);
628 
629 	cache->cache_color_cycle = 0;
630 	cache->total_objects = 0;
631 	cache->used_count = 0;
632 	cache->empty_count = 0;
633 	cache->pressure = 0;
634 	cache->min_object_reserve = 0;
635 
636 	cache->usage = 0;
637 	cache->maximum = maximum;
638 
639 	cache->flags = flags;
640 
641 	cache->resize_request = NULL;
642 
643 	// TODO: depot destruction is obviously broken
644 	// no gain in using the depot in single cpu setups
645 	//if (smp_get_num_cpus() == 1)
646 		cache->flags |= CACHE_NO_DEPOT;
647 
648 	if (!(cache->flags & CACHE_NO_DEPOT)) {
649 		status_t status = object_depot_init(&cache->depot, flags,
650 			object_cache_return_object_wrapper);
651 		if (status < B_OK) {
652 			mutex_destroy(&cache->lock);
653 			return status;
654 		}
655 	}
656 
657 	cache->cookie = cookie;
658 	cache->constructor = constructor;
659 	cache->destructor = destructor;
660 	cache->reclaimer = reclaimer;
661 
662 	if (cache->flags & CACHE_DURING_BOOT) {
663 		cache->allocate_pages = early_allocate_pages;
664 		cache->free_pages = early_free_pages;
665 	} else {
666 		cache->allocate_pages = area_allocate_pages;
667 		cache->free_pages = area_free_pages;
668 	}
669 
670 	register_low_resource_handler(object_cache_low_memory, cache,
671 		B_KERNEL_RESOURCE_PAGES | B_KERNEL_RESOURCE_MEMORY, 5);
672 
673 	MutexLocker _(sObjectCacheListLock);
674 	sObjectCaches.Add(cache);
675 
676 	return B_OK;
677 }
678 
679 
680 static void
681 object_cache_commit_slab(object_cache *cache, slab *slab)
682 {
683 	void *pages = (void *)ROUNDOWN((addr_t)slab->pages, B_PAGE_SIZE);
684 	if (create_area(cache->name, &pages, B_EXACT_ADDRESS, cache->slab_size,
685 		B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA) < B_OK)
686 		panic("failed to create_area()");
687 }
688 
689 
690 static void
691 object_cache_commit_pre_pages(object_cache *cache)
692 {
693 	SlabList::Iterator it = cache->full.GetIterator();
694 	while (it.HasNext())
695 		object_cache_commit_slab(cache, it.Next());
696 
697 	it = cache->partial.GetIterator();
698 	while (it.HasNext())
699 		object_cache_commit_slab(cache, it.Next());
700 
701 	it = cache->empty.GetIterator();
702 	while (it.HasNext())
703 		object_cache_commit_slab(cache, it.Next());
704 
705 	cache->allocate_pages = area_allocate_pages;
706 	cache->free_pages = area_free_pages;
707 }
708 
709 
710 static status_t
711 object_cache_resizer(void*)
712 {
713 	while (true) {
714 		MutexLocker locker(sResizeRequestsLock);
715 
716 		// wait for the next request
717 		while (sResizeRequests.IsEmpty()) {
718 			ConditionVariableEntry entry;
719 			sResizeRequestsCondition.Add(&entry);
720 			locker.Unlock();
721 			entry.Wait();
722 			locker.Lock();
723 		}
724 
725 		ResizeRequest* request = sResizeRequests.RemoveHead();
726 
727 		locker.Unlock();
728 
729 		// resize the cache, if necessary
730 
731 		object_cache* cache = request->cache;
732 
733 		MutexLocker cacheLocker(cache->lock);
734 
735 		size_t freeObjects = cache->total_objects - cache->used_count;
736 
737 		while (freeObjects < cache->min_object_reserve) {
738 			status_t error = object_cache_reserve_internal(cache,
739 				cache->min_object_reserve - freeObjects, 0, true);
740 			if (error != B_OK) {
741 				dprintf("object cache resizer: Failed to resize object cache "
742 					"%p!\n", cache);
743 				break;
744 			}
745 
746 			freeObjects = cache->total_objects - cache->used_count;
747 		}
748 
749 		request->pending = false;
750 
751 		if (request->delete_when_done)
752 			delete request;
753 	}
754 }
755 
756 
757 static void
758 increase_object_reserve(object_cache* cache)
759 {
760 	if (cache->resize_request->pending)
761 		return;
762 
763 	cache->resize_request->pending = true;
764 
765 	MutexLocker locker(sResizeRequestsLock);
766 	sResizeRequests.Add(cache->resize_request);
767 	sResizeRequestsCondition.NotifyAll();
768 }
769 
770 
771 static void
772 delete_cache(object_cache *cache)
773 {
774 	cache->~object_cache();
775 	internal_free(cache);
776 }
777 
778 
779 static SmallObjectCache *
780 create_small_object_cache(const char *name, size_t object_size,
781 	size_t alignment, size_t maximum, uint32 flags, void *cookie,
782 	object_cache_constructor constructor, object_cache_destructor destructor,
783 	object_cache_reclaimer reclaimer)
784 {
785 	void *buffer = internal_alloc(sizeof(SmallObjectCache), flags);
786 	if (buffer == NULL)
787 		return NULL;
788 
789 	SmallObjectCache *cache = new (buffer) SmallObjectCache();
790 
791 	if (object_cache_init(cache, name, object_size, alignment, maximum,
792 			flags | CACHE_ALIGN_ON_SIZE, cookie, constructor, destructor,
793 			reclaimer) < B_OK) {
794 		delete_cache(cache);
795 		return NULL;
796 	}
797 
798 	if ((flags & CACHE_LARGE_SLAB) != 0)
799 		cache->slab_size = max_c(16 * B_PAGE_SIZE, 1024 * object_size);
800 	else
801 		cache->slab_size = B_PAGE_SIZE;
802 
803 	return cache;
804 }
805 
806 
807 static HashedObjectCache *
808 create_hashed_object_cache(const char *name, size_t object_size,
809 	size_t alignment, size_t maximum, uint32 flags, void *cookie,
810 	object_cache_constructor constructor, object_cache_destructor destructor,
811 	object_cache_reclaimer reclaimer)
812 {
813 	void *buffer = internal_alloc(sizeof(HashedObjectCache), flags);
814 	if (buffer == NULL)
815 		return NULL;
816 
817 	HashedObjectCache *cache = new (buffer) HashedObjectCache();
818 
819 	if (object_cache_init(cache, name, object_size, alignment, maximum, flags,
820 		cookie, constructor, destructor, reclaimer) < B_OK) {
821 		delete_cache(cache);
822 		return NULL;
823 	}
824 
825 	if ((flags & CACHE_LARGE_SLAB) != 0)
826 		cache->slab_size = max_c(256 * B_PAGE_SIZE, 128 * object_size);
827 	else
828 		cache->slab_size = max_c(16 * B_PAGE_SIZE, 8 * object_size);
829 	cache->lower_boundary = __fls0(cache->object_size);
830 
831 	return cache;
832 }
833 
834 
835 object_cache *
836 create_object_cache(const char *name, size_t object_size, size_t alignment,
837 	void *cookie, object_cache_constructor constructor,
838 	object_cache_destructor destructor)
839 {
840 	return create_object_cache_etc(name, object_size, alignment, 0, 0, cookie,
841 		constructor, destructor, NULL);
842 }
843 
844 
845 object_cache *
846 create_object_cache_etc(const char *name, size_t objectSize, size_t alignment,
847 	size_t maximum, uint32 flags, void *cookie,
848 	object_cache_constructor constructor, object_cache_destructor destructor,
849 	object_cache_reclaimer reclaimer)
850 {
851 	object_cache *cache;
852 
853 	if (objectSize == 0) {
854 		cache = NULL;
855 	} else if (objectSize <= 256) {
856 		cache = create_small_object_cache(name, objectSize, alignment, maximum,
857 			flags, cookie, constructor, destructor, reclaimer);
858 	} else {
859 		cache = create_hashed_object_cache(name, objectSize, alignment,
860 			maximum, flags, cookie, constructor, destructor, reclaimer);
861 	}
862 
863 	T(Create(name, objectSize, alignment, maximum, flags, cookie, cache));
864 	return cache;
865 }
866 
867 
868 void
869 delete_object_cache(object_cache *cache)
870 {
871 	T(Delete(cache));
872 
873 	{
874 		MutexLocker _(sObjectCacheListLock);
875 		sObjectCaches.Remove(cache);
876 	}
877 
878 	mutex_lock(&cache->lock);
879 
880 	if (!(cache->flags & CACHE_NO_DEPOT))
881 		object_depot_destroy(&cache->depot);
882 
883 	unregister_low_resource_handler(object_cache_low_memory, cache);
884 
885 	if (!cache->full.IsEmpty())
886 		panic("cache destroy: still has full slabs");
887 
888 	if (!cache->partial.IsEmpty())
889 		panic("cache destroy: still has partial slabs");
890 
891 	while (!cache->empty.IsEmpty())
892 		cache->ReturnSlab(cache->empty.RemoveHead());
893 
894 	mutex_destroy(&cache->lock);
895 	delete_cache(cache);
896 }
897 
898 
899 status_t
900 object_cache_set_minimum_reserve(object_cache *cache, size_t objectCount)
901 {
902 	MutexLocker _(cache->lock);
903 
904 	if (cache->min_object_reserve == objectCount)
905 		return B_OK;
906 
907 	if (cache->min_object_reserve == 0) {
908 		cache->resize_request = new(std::nothrow) ResizeRequest(cache);
909 		if (cache->resize_request == NULL)
910 			return B_NO_MEMORY;
911 	} else if (cache->min_object_reserve == 0) {
912 		if (cache->resize_request->pending)
913 			cache->resize_request->delete_when_done = true;
914 		else
915 			delete cache->resize_request;
916 
917 		cache->resize_request = NULL;
918 	}
919 
920 	cache->min_object_reserve = objectCount;
921 
922 	increase_object_reserve(cache);
923 
924 	return B_OK;
925 }
926 
927 
928 void *
929 object_cache_alloc(object_cache *cache, uint32 flags)
930 {
931 	if (!(cache->flags & CACHE_NO_DEPOT)) {
932 		void *object = object_depot_obtain(&cache->depot);
933 		if (object) {
934 			T(Alloc(cache, flags, object));
935 			return object;
936 		}
937 	}
938 
939 	MutexLocker _(cache->lock);
940 	slab *source;
941 
942 	if (cache->partial.IsEmpty()) {
943 		if (cache->empty.IsEmpty()) {
944 			if (object_cache_reserve_internal(cache, 1, flags, false) < B_OK) {
945 				T(Alloc(cache, flags, NULL));
946 				return NULL;
947 			}
948 
949 			cache->pressure++;
950 		}
951 
952 		source = cache->empty.RemoveHead();
953 		cache->empty_count--;
954 		cache->partial.Add(source);
955 	} else {
956 		source = cache->partial.Head();
957 	}
958 
959 	ParanoiaChecker _2(source);
960 
961 	object_link *link = _pop(source->free);
962 	source->count--;
963 	cache->used_count++;
964 
965 	if (cache->total_objects - cache->used_count < cache->min_object_reserve)
966 		increase_object_reserve(cache);
967 
968 	REMOVE_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, source, &link->next,
969 		sizeof(void*));
970 
971 	TRACE_CACHE(cache, "allocate %p (%p) from %p, %lu remaining.",
972 		link_to_object(link, cache->object_size), link, source, source->count);
973 
974 	if (source->count == 0) {
975 		cache->partial.Remove(source);
976 		cache->full.Add(source);
977 	}
978 
979 	void *object = link_to_object(link, cache->object_size);
980 	T(Alloc(cache, flags, object));
981 	return object;
982 }
983 
984 
985 static void
986 object_cache_return_to_slab(object_cache *cache, slab *source, void *object)
987 {
988 	if (source == NULL)
989 		panic("object_cache: free'd object has no slab");
990 
991 	ParanoiaChecker _(source);
992 
993 	object_link *link = object_to_link(object, cache->object_size);
994 
995 	TRACE_CACHE(cache, "returning %p (%p) to %p, %lu used (%lu empty slabs).",
996 		object, link, source, source->size - source->count,
997 		cache->empty_count);
998 
999 	_push(source->free, link);
1000 	source->count++;
1001 	cache->used_count--;
1002 
1003 	ADD_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, source, &link->next, sizeof(void*));
1004 
1005 	if (source->count == source->size) {
1006 		cache->partial.Remove(source);
1007 
1008 		if (cache->empty_count < cache->pressure
1009 			&& cache->total_objects - cache->used_count - source->size
1010 				>= cache->min_object_reserve) {
1011 			cache->empty_count++;
1012 			cache->empty.Add(source);
1013 		} else {
1014 			cache->ReturnSlab(source);
1015 		}
1016 	} else if (source->count == 1) {
1017 		cache->full.Remove(source);
1018 		cache->partial.Add(source);
1019 	}
1020 }
1021 
1022 
1023 void
1024 object_cache_free(object_cache *cache, void *object)
1025 {
1026 	if (object == NULL)
1027 		return;
1028 
1029 	T(Free(cache, object));
1030 
1031 	if (!(cache->flags & CACHE_NO_DEPOT)) {
1032 		if (object_depot_store(&cache->depot, object))
1033 			return;
1034 	}
1035 
1036 	MutexLocker _(cache->lock);
1037 	object_cache_return_to_slab(cache, cache->ObjectSlab(object), object);
1038 }
1039 
1040 
1041 static status_t
1042 object_cache_reserve_internal(object_cache *cache, size_t objectCount,
1043 	uint32 flags, bool unlockWhileAllocating)
1044 {
1045 	size_t numBytes = objectCount * cache->object_size;
1046 	size_t slabCount = ((numBytes - 1) / cache->slab_size) + 1;
1047 		// TODO: This also counts the unusable space of each slab, which can
1048 		// sum up.
1049 
1050 	while (slabCount > 0) {
1051 		slab *newSlab = cache->CreateSlab(flags, unlockWhileAllocating);
1052 		if (newSlab == NULL)
1053 			return B_NO_MEMORY;
1054 
1055 		cache->empty.Add(newSlab);
1056 		cache->empty_count++;
1057 		slabCount--;
1058 	}
1059 
1060 	return B_OK;
1061 }
1062 
1063 
1064 status_t
1065 object_cache_reserve(object_cache *cache, size_t objectCount, uint32 flags)
1066 {
1067 	if (objectCount == 0)
1068 		return B_OK;
1069 
1070 	T(Reserve(cache, objectCount, flags));
1071 
1072 	MutexLocker _(cache->lock);
1073 	return object_cache_reserve_internal(cache, objectCount, flags, false);
1074 }
1075 
1076 
1077 void
1078 object_cache_get_usage(object_cache *cache, size_t *_allocatedMemory)
1079 {
1080 	MutexLocker _(cache->lock);
1081 	*_allocatedMemory = cache->usage;
1082 }
1083 
1084 
1085 slab *
1086 object_cache::InitSlab(slab *slab, void *pages, size_t byteCount)
1087 {
1088 	TRACE_CACHE(this, "construct (%p, %p .. %p, %lu)", slab, pages,
1089 		((uint8 *)pages) + byteCount, byteCount);
1090 
1091 	slab->pages = pages;
1092 	slab->count = slab->size = byteCount / object_size;
1093 	slab->free = NULL;
1094 	total_objects += slab->size;
1095 
1096 	size_t spareBytes = byteCount - (slab->size * object_size);
1097 	slab->offset = cache_color_cycle;
1098 
1099 	if (slab->offset > spareBytes)
1100 		cache_color_cycle = slab->offset = 0;
1101 	else
1102 		cache_color_cycle += kCacheColorPeriod;
1103 
1104 	TRACE_CACHE(this, "  %lu objects, %lu spare bytes, offset %lu",
1105 		slab->size, spareBytes, slab->offset);
1106 
1107 	uint8 *data = ((uint8 *)pages) + slab->offset;
1108 
1109 	CREATE_PARANOIA_CHECK_SET(slab, "slab");
1110 
1111 	for (size_t i = 0; i < slab->size; i++) {
1112 		bool failedOnFirst = false;
1113 
1114 		status_t status = PrepareObject(slab, data);
1115 		if (status < B_OK)
1116 			failedOnFirst = true;
1117 		else if (constructor)
1118 			status = constructor(cookie, data);
1119 
1120 		if (status < B_OK) {
1121 			if (!failedOnFirst)
1122 				UnprepareObject(slab, data);
1123 
1124 			data = ((uint8 *)pages) + slab->offset;
1125 			for (size_t j = 0; j < i; j++) {
1126 				if (destructor)
1127 					destructor(cookie, data);
1128 				UnprepareObject(slab, data);
1129 				data += object_size;
1130 			}
1131 
1132 			DELETE_PARANOIA_CHECK_SET(slab);
1133 
1134 			return NULL;
1135 		}
1136 
1137 		_push(slab->free, object_to_link(data, object_size));
1138 
1139 		ADD_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, slab,
1140 			&object_to_link(data, object_size)->next, sizeof(void*));
1141 
1142 		data += object_size;
1143 	}
1144 
1145 	return slab;
1146 }
1147 
1148 
1149 void
1150 object_cache::UninitSlab(slab *slab)
1151 {
1152 	TRACE_CACHE(this, "destruct %p", slab);
1153 
1154 	if (slab->count != slab->size)
1155 		panic("cache: destroying a slab which isn't empty.");
1156 
1157 	total_objects -= slab->size;
1158 
1159 	DELETE_PARANOIA_CHECK_SET(slab);
1160 
1161 	uint8 *data = ((uint8 *)slab->pages) + slab->offset;
1162 
1163 	for (size_t i = 0; i < slab->size; i++) {
1164 		if (destructor)
1165 			destructor(cookie, data);
1166 		UnprepareObject(slab, data);
1167 		data += object_size;
1168 	}
1169 }
1170 
1171 
1172 static inline slab *
1173 slab_in_pages(const void *pages, size_t slab_size)
1174 {
1175 	return (slab *)(((uint8 *)pages) + slab_size - sizeof(slab));
1176 }
1177 
1178 
1179 static inline const void *
1180 lower_boundary(void *object, size_t byteCount)
1181 {
1182 	const uint8 *null = (uint8 *)NULL;
1183 	return null + ((((uint8 *)object) - null) & ~(byteCount - 1));
1184 }
1185 
1186 
1187 static inline bool
1188 check_cache_quota(object_cache *cache)
1189 {
1190 	if (cache->maximum == 0)
1191 		return true;
1192 
1193 	return (cache->usage + cache->slab_size) <= cache->maximum;
1194 }
1195 
1196 
1197 slab *
1198 SmallObjectCache::CreateSlab(uint32 flags, bool unlockWhileAllocating)
1199 {
1200 	if (!check_cache_quota(this))
1201 		return NULL;
1202 
1203 	void *pages;
1204 
1205 	if (allocate_pages(this, &pages, flags, unlockWhileAllocating) < B_OK)
1206 		return NULL;
1207 
1208 	return InitSlab(slab_in_pages(pages, slab_size), pages,
1209 		slab_size - sizeof(slab));
1210 }
1211 
1212 
1213 void
1214 SmallObjectCache::ReturnSlab(slab *slab)
1215 {
1216 	UninitSlab(slab);
1217 	free_pages(this, slab->pages);
1218 }
1219 
1220 
1221 slab *
1222 SmallObjectCache::ObjectSlab(void *object) const
1223 {
1224 	return slab_in_pages(lower_boundary(object, slab_size), slab_size);
1225 }
1226 
1227 
1228 static slab *
1229 allocate_slab(uint32 flags)
1230 {
1231 	return (slab *)internal_alloc(sizeof(slab), flags);
1232 }
1233 
1234 
1235 static void
1236 free_slab(slab *slab)
1237 {
1238 	internal_free(slab);
1239 }
1240 
1241 
1242 static HashedObjectCache::Link *
1243 allocate_link(uint32 flags)
1244 {
1245 	return (HashedObjectCache::Link *)
1246 		internal_alloc(sizeof(HashedObjectCache::Link), flags);
1247 }
1248 
1249 
1250 static void
1251 free_link(HashedObjectCache::Link *link)
1252 {
1253 	internal_free(link);
1254 }
1255 
1256 
1257 slab *
1258 HashedObjectCache::CreateSlab(uint32 flags, bool unlockWhileAllocating)
1259 {
1260 	if (!check_cache_quota(this))
1261 		return NULL;
1262 
1263 	if (unlockWhileAllocating)
1264 		Unlock();
1265 
1266 	slab *slab = allocate_slab(flags);
1267 
1268 	if (unlockWhileAllocating)
1269 		Lock();
1270 
1271 	if (slab == NULL)
1272 		return NULL;
1273 
1274 	void *pages;
1275 
1276 	if (allocate_pages(this, &pages, flags, unlockWhileAllocating) == B_OK) {
1277 		if (InitSlab(slab, pages, slab_size))
1278 			return slab;
1279 
1280 		free_pages(this, pages);
1281 	}
1282 
1283 	free_slab(slab);
1284 	return NULL;
1285 }
1286 
1287 
1288 void
1289 HashedObjectCache::ReturnSlab(slab *slab)
1290 {
1291 	UninitSlab(slab);
1292 	free_pages(this, slab->pages);
1293 }
1294 
1295 
1296 slab *
1297 HashedObjectCache::ObjectSlab(void *object) const
1298 {
1299 	Link *link = hash_table.Lookup(object);
1300 	if (link == NULL) {
1301 		panic("object cache: requested object %p missing from hash table",
1302 			object);
1303 	}
1304 	return link->parent;
1305 }
1306 
1307 
1308 status_t
1309 HashedObjectCache::PrepareObject(slab *source, void *object)
1310 {
1311 	Link *link = allocate_link(CACHE_DONT_SLEEP);
1312 	if (link == NULL)
1313 		return B_NO_MEMORY;
1314 
1315 	link->buffer = object;
1316 	link->parent = source;
1317 
1318 	hash_table.Insert(link);
1319 	return B_OK;
1320 }
1321 
1322 
1323 void
1324 HashedObjectCache::UnprepareObject(slab *source, void *object)
1325 {
1326 	Link *link = hash_table.Lookup(object);
1327 	if (link == NULL)
1328 		panic("object cache: requested object missing from hash table");
1329 	if (link->parent != source)
1330 		panic("object cache: slab mismatch");
1331 
1332 	hash_table.Remove(link);
1333 	free_link(link);
1334 }
1335 
1336 
1337 static inline bool
1338 is_magazine_empty(depot_magazine *magazine)
1339 {
1340 	return magazine->current_round == 0;
1341 }
1342 
1343 
1344 static inline bool
1345 is_magazine_full(depot_magazine *magazine)
1346 {
1347 	return magazine->current_round == magazine->round_count;
1348 }
1349 
1350 
1351 static inline void *
1352 pop_magazine(depot_magazine *magazine)
1353 {
1354 	return magazine->rounds[--magazine->current_round];
1355 }
1356 
1357 
1358 static inline bool
1359 push_magazine(depot_magazine *magazine, void *object)
1360 {
1361 	if (is_magazine_full(magazine))
1362 		return false;
1363 	magazine->rounds[magazine->current_round++] = object;
1364 	return true;
1365 }
1366 
1367 
1368 static bool
1369 exchange_with_full(object_depot *depot, depot_magazine* &magazine)
1370 {
1371 	RecursiveLocker _(depot->lock);
1372 
1373 	if (depot->full == NULL)
1374 		return false;
1375 
1376 	depot->full_count--;
1377 	depot->empty_count++;
1378 
1379 	_push(depot->empty, magazine);
1380 	magazine = _pop(depot->full);
1381 	return true;
1382 }
1383 
1384 
1385 static bool
1386 exchange_with_empty(object_depot *depot, depot_magazine* &magazine)
1387 {
1388 	RecursiveLocker _(depot->lock);
1389 
1390 	if (depot->empty == NULL) {
1391 		depot->empty = alloc_magazine();
1392 		if (depot->empty == NULL)
1393 			return false;
1394 	} else {
1395 		depot->empty_count--;
1396 	}
1397 
1398 	if (magazine) {
1399 		_push(depot->full, magazine);
1400 		depot->full_count++;
1401 	}
1402 
1403 	magazine = _pop(depot->empty);
1404 	return true;
1405 }
1406 
1407 
1408 static depot_magazine *
1409 alloc_magazine()
1410 {
1411 	depot_magazine *magazine = (depot_magazine *)internal_alloc(
1412 		sizeof(depot_magazine) + kMagazineCapacity * sizeof(void *), 0);
1413 	if (magazine) {
1414 		magazine->next = NULL;
1415 		magazine->current_round = 0;
1416 		magazine->round_count = kMagazineCapacity;
1417 	}
1418 
1419 	return magazine;
1420 }
1421 
1422 
1423 static void
1424 free_magazine(depot_magazine *magazine)
1425 {
1426 	internal_free(magazine);
1427 }
1428 
1429 
1430 static void
1431 empty_magazine(object_depot *depot, depot_magazine *magazine)
1432 {
1433 	for (uint16 i = 0; i < magazine->current_round; i++)
1434 		depot->return_object(depot, magazine->rounds[i]);
1435 	free_magazine(magazine);
1436 }
1437 
1438 
1439 status_t
1440 object_depot_init(object_depot *depot, uint32 flags,
1441 	void (*return_object)(object_depot *depot, void *object))
1442 {
1443 	depot->full = NULL;
1444 	depot->empty = NULL;
1445 	depot->full_count = depot->empty_count = 0;
1446 
1447 	recursive_lock_init(&depot->lock, "depot");
1448 
1449 	depot->stores = (depot_cpu_store *)internal_alloc(sizeof(depot_cpu_store)
1450 		* smp_get_num_cpus(), flags);
1451 	if (depot->stores == NULL) {
1452 		recursive_lock_destroy(&depot->lock);
1453 		return B_NO_MEMORY;
1454 	}
1455 
1456 	for (int i = 0; i < smp_get_num_cpus(); i++) {
1457 		recursive_lock_init(&depot->stores[i].lock, "cpu store");
1458 		depot->stores[i].loaded = depot->stores[i].previous = NULL;
1459 	}
1460 
1461 	depot->return_object = return_object;
1462 
1463 	return B_OK;
1464 }
1465 
1466 
1467 void
1468 object_depot_destroy(object_depot *depot)
1469 {
1470 	object_depot_make_empty(depot);
1471 
1472 	for (int i = 0; i < smp_get_num_cpus(); i++) {
1473 		recursive_lock_destroy(&depot->stores[i].lock);
1474 	}
1475 
1476 	internal_free(depot->stores);
1477 
1478 	recursive_lock_destroy(&depot->lock);
1479 }
1480 
1481 
1482 static void *
1483 object_depot_obtain_from_store(object_depot *depot, depot_cpu_store *store)
1484 {
1485 	RecursiveLocker _(store->lock);
1486 
1487 	// To better understand both the Alloc() and Free() logic refer to
1488 	// Bonwick's ``Magazines and Vmem'' [in 2001 USENIX proceedings]
1489 
1490 	// In a nutshell, we try to get an object from the loaded magazine
1491 	// if it's not empty, or from the previous magazine if it's full
1492 	// and finally from the Slab if the magazine depot has no full magazines.
1493 
1494 	if (store->loaded == NULL)
1495 		return NULL;
1496 
1497 	while (true) {
1498 		if (!is_magazine_empty(store->loaded))
1499 			return pop_magazine(store->loaded);
1500 
1501 		if (store->previous && (is_magazine_full(store->previous)
1502 			|| exchange_with_full(depot, store->previous)))
1503 			std::swap(store->previous, store->loaded);
1504 		else
1505 			return NULL;
1506 	}
1507 }
1508 
1509 
1510 static int
1511 object_depot_return_to_store(object_depot *depot, depot_cpu_store *store,
1512 	void *object)
1513 {
1514 	RecursiveLocker _(store->lock);
1515 
1516 	// We try to add the object to the loaded magazine if we have one
1517 	// and it's not full, or to the previous one if it is empty. If
1518 	// the magazine depot doesn't provide us with a new empty magazine
1519 	// we return the object directly to the slab.
1520 
1521 	while (true) {
1522 		if (store->loaded && push_magazine(store->loaded, object))
1523 			return 1;
1524 
1525 		if ((store->previous && is_magazine_empty(store->previous))
1526 			|| exchange_with_empty(depot, store->previous))
1527 			std::swap(store->loaded, store->previous);
1528 		else
1529 			return 0;
1530 	}
1531 }
1532 
1533 
1534 void *
1535 object_depot_obtain(object_depot *depot)
1536 {
1537 	return object_depot_obtain_from_store(depot, object_depot_cpu(depot));
1538 }
1539 
1540 
1541 int
1542 object_depot_store(object_depot *depot, void *object)
1543 {
1544 	return object_depot_return_to_store(depot, object_depot_cpu(depot),
1545 		object);
1546 }
1547 
1548 
1549 void
1550 object_depot_make_empty(object_depot *depot)
1551 {
1552 	for (int i = 0; i < smp_get_num_cpus(); i++) {
1553 		depot_cpu_store *store = &depot->stores[i];
1554 
1555 		RecursiveLocker _(store->lock);
1556 
1557 		if (depot->stores[i].loaded)
1558 			empty_magazine(depot, depot->stores[i].loaded);
1559 		if (depot->stores[i].previous)
1560 			empty_magazine(depot, depot->stores[i].previous);
1561 		depot->stores[i].loaded = depot->stores[i].previous = NULL;
1562 	}
1563 
1564 	RecursiveLocker _(depot->lock);
1565 
1566 	while (depot->full)
1567 		empty_magazine(depot, _pop(depot->full));
1568 
1569 	while (depot->empty)
1570 		empty_magazine(depot, _pop(depot->empty));
1571 }
1572 
1573 
1574 static int
1575 dump_slabs(int argc, char *argv[])
1576 {
1577 	kprintf("%10s %22s %8s %8s %6s %8s %8s %8s\n", "address", "name",
1578 		"objsize", "usage", "empty", "usedobj", "total", "flags");
1579 
1580 	ObjectCacheList::Iterator it = sObjectCaches.GetIterator();
1581 
1582 	while (it.HasNext()) {
1583 		object_cache *cache = it.Next();
1584 
1585 		kprintf("%p %22s %8lu %8lu %6lu %8lu %8lu %8lx\n", cache, cache->name,
1586 			cache->object_size, cache->usage, cache->empty_count,
1587 			cache->used_count, cache->usage / cache->object_size,
1588 			cache->flags);
1589 	}
1590 
1591 	return 0;
1592 }
1593 
1594 
1595 static int
1596 dump_cache_info(int argc, char *argv[])
1597 {
1598 	if (argc < 2) {
1599 		kprintf("usage: cache_info [address]\n");
1600 		return 0;
1601 	}
1602 
1603 	object_cache *cache = (object_cache *)strtoul(argv[1], NULL, 16);
1604 
1605 	kprintf("name: %s\n", cache->name);
1606 	kprintf("lock: %p\n", &cache->lock);
1607 	kprintf("object_size: %lu\n", cache->object_size);
1608 	kprintf("cache_color_cycle: %lu\n", cache->cache_color_cycle);
1609 	kprintf("used_count: %lu\n", cache->used_count);
1610 	kprintf("empty_count: %lu\n", cache->empty_count);
1611 	kprintf("pressure: %lu\n", cache->pressure);
1612 	kprintf("slab_size: %lu\n", cache->slab_size);
1613 	kprintf("usage: %lu\n", cache->usage);
1614 	kprintf("maximum: %lu\n", cache->maximum);
1615 	kprintf("flags: 0x%lx\n", cache->flags);
1616 	kprintf("cookie: %p\n", cache->cookie);
1617 
1618 	return 0;
1619 }
1620 
1621 
1622 void
1623 slab_init(kernel_args *args, addr_t initialBase, size_t initialSize)
1624 {
1625 	dprintf("slab: init base %p + 0x%lx\n", (void *)initialBase, initialSize);
1626 
1627 	sInitialBegin = (uint8 *)initialBase;
1628 	sInitialLimit = sInitialBegin + initialSize;
1629 	sInitialPointer = sInitialBegin;
1630 
1631 	sKernelArgs = args;
1632 
1633 	new (&sObjectCaches) ObjectCacheList();
1634 
1635 	block_allocator_init_boot();
1636 
1637 	add_debugger_command("slabs", dump_slabs, "list all object caches");
1638 	add_debugger_command("cache_info", dump_cache_info,
1639 		"dump information about a specific cache");
1640 }
1641 
1642 
1643 void
1644 slab_init_post_sem()
1645 {
1646 	ObjectCacheList::Iterator it = sObjectCaches.GetIterator();
1647 
1648 	while (it.HasNext()) {
1649 		object_cache *cache = it.Next();
1650 		if (cache->allocate_pages == early_allocate_pages)
1651 			object_cache_commit_pre_pages(cache);
1652 	}
1653 
1654 	block_allocator_init_rest();
1655 }
1656 
1657 
1658 void
1659 slab_init_post_thread()
1660 {
1661 	new(&sResizeRequests) ResizeRequestQueue;
1662 	sResizeRequestsCondition.Init(&sResizeRequests, "object cache resizer");
1663 
1664 	thread_id objectCacheResizer = spawn_kernel_thread(object_cache_resizer,
1665 		"object cache resizer", B_URGENT_PRIORITY, NULL);
1666 	if (objectCacheResizer < 0) {
1667 		panic("slab_init_post_thread(): failed to spawn object cache resizer "
1668 			"thread\n");
1669 		return;
1670 	}
1671 
1672 	send_signal_etc(objectCacheResizer, SIGCONT, B_DO_NOT_RESCHEDULE);
1673 }
1674