xref: /haiku/src/system/kernel/slab/Slab.cpp (revision 35ead8815b679605a9b4db8130613ea100f4b14c)
1 /*
2  * Copyright 2010, Ingo Weinhold <ingo_weinhold@gmx.de>.
3  * Copyright 2008-2010, Axel Dörfler. All Rights Reserved.
4  * Copyright 2007, Hugo Santos. All Rights Reserved.
5  *
6  * Distributed under the terms of the MIT License.
7  */
8 
9 
10 #include <slab/Slab.h>
11 
12 #include <algorithm>
13 #include <new>
14 #include <signal.h>
15 #include <stdlib.h>
16 #include <string.h>
17 
18 #include <KernelExport.h>
19 
20 #include <condition_variable.h>
21 #include <kernel.h>
22 #include <low_resource_manager.h>
23 #include <slab/ObjectDepot.h>
24 #include <smp.h>
25 #include <tracing.h>
26 #include <util/AutoLock.h>
27 #include <util/DoublyLinkedList.h>
28 #include <util/khash.h>
29 #include <vm/vm.h>
30 #include <vm/VMAddressSpace.h>
31 
32 #include "HashedObjectCache.h"
33 #include "MemoryManager.h"
34 #include "slab_private.h"
35 #include "SmallObjectCache.h"
36 
37 
38 typedef DoublyLinkedList<ObjectCache> ObjectCacheList;
39 
40 typedef DoublyLinkedList<ObjectCache,
41 	DoublyLinkedListMemberGetLink<ObjectCache, &ObjectCache::maintenance_link> >
42 		MaintenanceQueue;
43 
44 static ObjectCacheList sObjectCaches;
45 static mutex sObjectCacheListLock = MUTEX_INITIALIZER("object cache list");
46 
47 static mutex sMaintenanceLock
48 	= MUTEX_INITIALIZER("object cache resize requests");
49 static MaintenanceQueue sMaintenanceQueue;
50 static ConditionVariable sMaintenanceCondition;
51 
52 
53 #if SLAB_OBJECT_CACHE_TRACING
54 
55 
56 namespace SlabObjectCacheTracing {
57 
58 class ObjectCacheTraceEntry : public AbstractTraceEntry {
59 	public:
60 		ObjectCacheTraceEntry(ObjectCache* cache)
61 			:
62 			fCache(cache)
63 		{
64 		}
65 
66 	protected:
67 		ObjectCache*	fCache;
68 };
69 
70 
71 class Create : public ObjectCacheTraceEntry {
72 	public:
73 		Create(const char* name, size_t objectSize, size_t alignment,
74 				size_t maxByteUsage, uint32 flags, void* cookie,
75 				ObjectCache* cache)
76 			:
77 			ObjectCacheTraceEntry(cache),
78 			fObjectSize(objectSize),
79 			fAlignment(alignment),
80 			fMaxByteUsage(maxByteUsage),
81 			fFlags(flags),
82 			fCookie(cookie)
83 		{
84 			fName = alloc_tracing_buffer_strcpy(name, 64, false);
85 			Initialized();
86 		}
87 
88 		virtual void AddDump(TraceOutput& out)
89 		{
90 			out.Print("object cache create: name: \"%s\", object size: %lu, "
91 				"alignment: %lu, max usage: %lu, flags: 0x%lx, cookie: %p "
92 				"-> cache: %p", fName, fObjectSize, fAlignment, fMaxByteUsage,
93 					fFlags, fCookie, fCache);
94 		}
95 
96 	private:
97 		const char*	fName;
98 		size_t		fObjectSize;
99 		size_t		fAlignment;
100 		size_t		fMaxByteUsage;
101 		uint32		fFlags;
102 		void*		fCookie;
103 };
104 
105 
106 class Delete : public ObjectCacheTraceEntry {
107 	public:
108 		Delete(ObjectCache* cache)
109 			:
110 			ObjectCacheTraceEntry(cache)
111 		{
112 			Initialized();
113 		}
114 
115 		virtual void AddDump(TraceOutput& out)
116 		{
117 			out.Print("object cache delete: %p", fCache);
118 		}
119 };
120 
121 
122 class Alloc : public ObjectCacheTraceEntry {
123 	public:
124 		Alloc(ObjectCache* cache, uint32 flags, void* object)
125 			:
126 			ObjectCacheTraceEntry(cache),
127 			fFlags(flags),
128 			fObject(object)
129 		{
130 			Initialized();
131 		}
132 
133 		virtual void AddDump(TraceOutput& out)
134 		{
135 			out.Print("object cache alloc: cache: %p, flags: 0x%lx -> "
136 				"object: %p", fCache, fFlags, fObject);
137 		}
138 
139 	private:
140 		uint32		fFlags;
141 		void*		fObject;
142 };
143 
144 
145 class Free : public ObjectCacheTraceEntry {
146 	public:
147 		Free(ObjectCache* cache, void* object)
148 			:
149 			ObjectCacheTraceEntry(cache),
150 			fObject(object)
151 		{
152 			Initialized();
153 		}
154 
155 		virtual void AddDump(TraceOutput& out)
156 		{
157 			out.Print("object cache free: cache: %p, object: %p", fCache,
158 				fObject);
159 		}
160 
161 	private:
162 		void*		fObject;
163 };
164 
165 
166 class Reserve : public ObjectCacheTraceEntry {
167 	public:
168 		Reserve(ObjectCache* cache, size_t count, uint32 flags)
169 			:
170 			ObjectCacheTraceEntry(cache),
171 			fCount(count),
172 			fFlags(flags)
173 		{
174 			Initialized();
175 		}
176 
177 		virtual void AddDump(TraceOutput& out)
178 		{
179 			out.Print("object cache reserve: cache: %p, count: %lu, "
180 				"flags: 0x%lx", fCache, fCount, fFlags);
181 		}
182 
183 	private:
184 		uint32		fCount;
185 		uint32		fFlags;
186 };
187 
188 
189 }	// namespace SlabObjectCacheTracing
190 
191 #	define T(x)	new(std::nothrow) SlabObjectCacheTracing::x
192 
193 #else
194 #	define T(x)
195 #endif	// SLAB_OBJECT_CACHE_TRACING
196 
197 
198 // #pragma mark -
199 
200 
201 static void
202 dump_slab(::slab* slab)
203 {
204 	kprintf("  %p  %p  %6" B_PRIuSIZE " %6" B_PRIuSIZE " %6" B_PRIuSIZE "  %p\n",
205 		slab, slab->pages, slab->size, slab->count, slab->offset, slab->free);
206 }
207 
208 
209 static int
210 dump_slabs(int argc, char* argv[])
211 {
212 	kprintf("%10s %22s %8s %8s %6s %8s %8s %8s\n", "address", "name",
213 		"objsize", "usage", "empty", "usedobj", "total", "flags");
214 
215 	ObjectCacheList::Iterator it = sObjectCaches.GetIterator();
216 
217 	while (it.HasNext()) {
218 		ObjectCache* cache = it.Next();
219 
220 		kprintf("%p %22s %8lu %8lu %6lu %8lu %8lu %8lx\n", cache, cache->name,
221 			cache->object_size, cache->usage, cache->empty_count,
222 			cache->used_count, cache->total_objects, cache->flags);
223 	}
224 
225 	return 0;
226 }
227 
228 
229 static int
230 dump_cache_info(int argc, char* argv[])
231 {
232 	if (argc < 2) {
233 		kprintf("usage: cache_info [address]\n");
234 		return 0;
235 	}
236 
237 	ObjectCache* cache = (ObjectCache*)parse_expression(argv[1]);
238 
239 	kprintf("name:              %s\n", cache->name);
240 	kprintf("lock:              %p\n", &cache->lock);
241 	kprintf("object_size:       %lu\n", cache->object_size);
242 	kprintf("cache_color_cycle: %lu\n", cache->cache_color_cycle);
243 	kprintf("total_objects:     %lu\n", cache->total_objects);
244 	kprintf("used_count:        %lu\n", cache->used_count);
245 	kprintf("empty_count:       %lu\n", cache->empty_count);
246 	kprintf("pressure:          %lu\n", cache->pressure);
247 	kprintf("slab_size:         %lu\n", cache->slab_size);
248 	kprintf("usage:             %lu\n", cache->usage);
249 	kprintf("maximum:           %lu\n", cache->maximum);
250 	kprintf("flags:             0x%lx\n", cache->flags);
251 	kprintf("cookie:            %p\n", cache->cookie);
252 	kprintf("resize entry don't wait: %p\n", cache->resize_entry_dont_wait);
253 	kprintf("resize entry can wait:   %p\n", cache->resize_entry_can_wait);
254 
255 	kprintf("  slab        chunk         size   used offset  free\n");
256 
257 	SlabList::Iterator iterator = cache->empty.GetIterator();
258 	if (iterator.HasNext())
259 		kprintf("empty:\n");
260 	while (::slab* slab = iterator.Next())
261 		dump_slab(slab);
262 
263 	iterator = cache->partial.GetIterator();
264 	if (iterator.HasNext())
265 		kprintf("partial:\n");
266 	while (::slab* slab = iterator.Next())
267 		dump_slab(slab);
268 
269 	iterator = cache->full.GetIterator();
270 	if (iterator.HasNext())
271 		kprintf("full:\n");
272 	while (::slab* slab = iterator.Next())
273 		dump_slab(slab);
274 
275 	if ((cache->flags & CACHE_NO_DEPOT) == 0) {
276 		kprintf("depot:\n");
277 		dump_object_depot(&cache->depot);
278 	}
279 
280 	return 0;
281 }
282 
283 
284 // #pragma mark -
285 
286 
287 void
288 request_memory_manager_maintenance()
289 {
290 	MutexLocker locker(sMaintenanceLock);
291 	sMaintenanceCondition.NotifyAll();
292 }
293 
294 
295 // #pragma mark -
296 
297 
298 static void
299 delete_object_cache_internal(object_cache* cache)
300 {
301 	if (!(cache->flags & CACHE_NO_DEPOT))
302 		object_depot_destroy(&cache->depot, 0);
303 
304 	mutex_lock(&cache->lock);
305 
306 	if (!cache->full.IsEmpty())
307 		panic("cache destroy: still has full slabs");
308 
309 	if (!cache->partial.IsEmpty())
310 		panic("cache destroy: still has partial slabs");
311 
312 	while (!cache->empty.IsEmpty())
313 		cache->ReturnSlab(cache->empty.RemoveHead(), 0);
314 
315 	mutex_destroy(&cache->lock);
316 	cache->Delete();
317 }
318 
319 
320 static void
321 increase_object_reserve(ObjectCache* cache)
322 {
323 	MutexLocker locker(sMaintenanceLock);
324 
325 	cache->maintenance_resize = true;
326 
327 	if (!cache->maintenance_pending) {
328 		cache->maintenance_pending = true;
329 		sMaintenanceQueue.Add(cache);
330 		sMaintenanceCondition.NotifyAll();
331 	}
332 }
333 
334 
335 /*!	Makes sure that \a objectCount objects can be allocated.
336 */
337 static status_t
338 object_cache_reserve_internal(ObjectCache* cache, size_t objectCount,
339 	uint32 flags)
340 {
341 	// If someone else is already adding slabs, we wait for that to be finished
342 	// first.
343 	thread_id thread = find_thread(NULL);
344 	while (true) {
345 		if (objectCount <= cache->total_objects - cache->used_count)
346 			return B_OK;
347 
348 		ObjectCacheResizeEntry* resizeEntry = NULL;
349 		if (cache->resize_entry_dont_wait != NULL) {
350 			resizeEntry = cache->resize_entry_dont_wait;
351 			if (thread == resizeEntry->thread)
352 				return B_WOULD_BLOCK;
353 			// Note: We could still have reentered the function, i.e.
354 			// resize_entry_can_wait would be ours. That doesn't matter much,
355 			// though, since after the don't-wait thread has done its job
356 			// everyone will be happy.
357 		} else if (cache->resize_entry_can_wait != NULL) {
358 			resizeEntry = cache->resize_entry_can_wait;
359 			if (thread == resizeEntry->thread)
360 				return B_WOULD_BLOCK;
361 
362 			if ((flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0)
363 				break;
364 		} else
365 			break;
366 
367 		ConditionVariableEntry entry;
368 		resizeEntry->condition.Add(&entry);
369 
370 		cache->Unlock();
371 		entry.Wait();
372 		cache->Lock();
373 	}
374 
375 	// prepare the resize entry others can wait on
376 	ObjectCacheResizeEntry*& resizeEntry
377 		= (flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0
378 			? cache->resize_entry_dont_wait : cache->resize_entry_can_wait;
379 
380 	ObjectCacheResizeEntry myResizeEntry;
381 	resizeEntry = &myResizeEntry;
382 	resizeEntry->condition.Init(cache, "wait for slabs");
383 	resizeEntry->thread = thread;
384 
385 	// add new slabs until there are as many free ones as requested
386 	while (objectCount > cache->total_objects - cache->used_count) {
387 		slab* newSlab = cache->CreateSlab(flags);
388 		if (newSlab == NULL) {
389 			resizeEntry->condition.NotifyAll();
390 			resizeEntry = NULL;
391 			return B_NO_MEMORY;
392 		}
393 
394 		cache->usage += cache->slab_size;
395 		cache->total_objects += newSlab->size;
396 
397 		cache->empty.Add(newSlab);
398 		cache->empty_count++;
399 	}
400 
401 	resizeEntry->condition.NotifyAll();
402 	resizeEntry = NULL;
403 
404 	return B_OK;
405 }
406 
407 
408 static void
409 object_cache_low_memory(void* dummy, uint32 resources, int32 level)
410 {
411 	if (level == B_NO_LOW_RESOURCE)
412 		return;
413 
414 	MutexLocker cacheListLocker(sObjectCacheListLock);
415 
416 	// Append the first cache to the end of the queue. We assume that it is
417 	// one of the caches that will never be deleted and thus we use it as a
418 	// marker.
419 	ObjectCache* firstCache = sObjectCaches.RemoveHead();
420 	sObjectCaches.Add(firstCache);
421 	cacheListLocker.Unlock();
422 
423 	ObjectCache* cache;
424 	do {
425 		cacheListLocker.Lock();
426 
427 		cache = sObjectCaches.RemoveHead();
428 		sObjectCaches.Add(cache);
429 
430 		MutexLocker maintenanceLocker(sMaintenanceLock);
431 		if (cache->maintenance_pending || cache->maintenance_in_progress) {
432 			// We don't want to mess with caches in maintenance.
433 			continue;
434 		}
435 
436 		cache->maintenance_pending = true;
437 		cache->maintenance_in_progress = true;
438 
439 		maintenanceLocker.Unlock();
440 		cacheListLocker.Unlock();
441 
442 		// We are calling the reclaimer without the object cache lock
443 		// to give the owner a chance to return objects to the slabs.
444 
445 		if (cache->reclaimer)
446 			cache->reclaimer(cache->cookie, level);
447 
448 		if ((cache->flags & CACHE_NO_DEPOT) == 0)
449 			object_depot_make_empty(&cache->depot, 0);
450 
451 		MutexLocker cacheLocker(cache->lock);
452 		size_t minimumAllowed;
453 
454 		switch (level) {
455 			case B_LOW_RESOURCE_NOTE:
456 				minimumAllowed = cache->pressure / 2 + 1;
457 				break;
458 
459 			case B_LOW_RESOURCE_WARNING:
460 				cache->pressure /= 2;
461 				minimumAllowed = 0;
462 				break;
463 
464 			default:
465 				cache->pressure = 0;
466 				minimumAllowed = 0;
467 				break;
468 		}
469 
470 		while (cache->empty_count > minimumAllowed) {
471 			// make sure we respect the cache's minimum object reserve
472 			size_t objectsPerSlab = cache->empty.Head()->size;
473 			size_t freeObjects = cache->total_objects - cache->used_count;
474 			if (freeObjects < cache->min_object_reserve + objectsPerSlab)
475 				break;
476 
477 			cache->ReturnSlab(cache->empty.RemoveHead(), 0);
478 			cache->empty_count--;
479 		}
480 
481 		cacheLocker.Unlock();
482 
483 		// Check whether in the meantime someone has really requested
484 		// maintenance for the cache.
485 		maintenanceLocker.Lock();
486 
487 		if (cache->maintenance_delete) {
488 			delete_object_cache_internal(cache);
489 			continue;
490 		}
491 
492 		cache->maintenance_in_progress = false;
493 
494 		if (cache->maintenance_resize)
495 			sMaintenanceQueue.Add(cache);
496 		else
497 			cache->maintenance_pending = false;
498 	} while (cache != firstCache);
499 }
500 
501 
502 static status_t
503 object_cache_maintainer(void*)
504 {
505 	while (true) {
506 		MutexLocker locker(sMaintenanceLock);
507 
508 		// wait for the next request
509 		while (sMaintenanceQueue.IsEmpty()) {
510 			// perform memory manager maintenance, if needed
511 			if (MemoryManager::MaintenanceNeeded()) {
512 				locker.Unlock();
513 				MemoryManager::PerformMaintenance();
514 				locker.Lock();
515 				continue;
516 			}
517 
518 			ConditionVariableEntry entry;
519 			sMaintenanceCondition.Add(&entry);
520 			locker.Unlock();
521 			entry.Wait();
522 			locker.Lock();
523 		}
524 
525 		ObjectCache* cache = sMaintenanceQueue.RemoveHead();
526 
527 		while (true) {
528 			bool resizeRequested = cache->maintenance_resize;
529 			bool deleteRequested = cache->maintenance_delete;
530 
531 			if (!resizeRequested && !deleteRequested) {
532 				cache->maintenance_pending = false;
533 				cache->maintenance_in_progress = false;
534 				break;
535 			}
536 
537 			cache->maintenance_resize = false;
538 			cache->maintenance_in_progress = true;
539 
540 			locker.Unlock();
541 
542 			if (deleteRequested) {
543 				delete_object_cache_internal(cache);
544 				break;
545 			}
546 
547 			// resize the cache, if necessary
548 
549 			MutexLocker cacheLocker(cache->lock);
550 
551 			if (resizeRequested) {
552 				status_t error = object_cache_reserve_internal(cache,
553 					cache->min_object_reserve, 0);
554 				if (error != B_OK) {
555 					dprintf("object cache resizer: Failed to resize object "
556 						"cache %p!\n", cache);
557 					break;
558 				}
559 			}
560 
561 			locker.Lock();
562 		}
563 	}
564 
565 	// never can get here
566 	return B_OK;
567 }
568 
569 
570 // #pragma mark - public API
571 
572 
573 object_cache*
574 create_object_cache(const char* name, size_t object_size, size_t alignment,
575 	void* cookie, object_cache_constructor constructor,
576 	object_cache_destructor destructor)
577 {
578 	return create_object_cache_etc(name, object_size, alignment, 0, 0, 0, 0,
579 		cookie, constructor, destructor, NULL);
580 }
581 
582 
583 object_cache*
584 create_object_cache_etc(const char* name, size_t objectSize, size_t alignment,
585 	size_t maximum, size_t magazineCapacity, size_t maxMagazineCount,
586 	uint32 flags, void* cookie, object_cache_constructor constructor,
587 	object_cache_destructor destructor, object_cache_reclaimer reclaimer)
588 {
589 	ObjectCache* cache;
590 
591 	if (objectSize == 0) {
592 		cache = NULL;
593 	} else if (objectSize <= 256) {
594 		cache = SmallObjectCache::Create(name, objectSize, alignment, maximum,
595 			magazineCapacity, maxMagazineCount, flags, cookie, constructor,
596 			destructor, reclaimer);
597 	} else {
598 		cache = HashedObjectCache::Create(name, objectSize, alignment, maximum,
599 			magazineCapacity, maxMagazineCount, flags, cookie, constructor,
600 			destructor, reclaimer);
601 	}
602 
603 	if (cache != NULL) {
604 		MutexLocker _(sObjectCacheListLock);
605 		sObjectCaches.Add(cache);
606 	}
607 
608 	T(Create(name, objectSize, alignment, maximum, flags, cookie, cache));
609 	return cache;
610 }
611 
612 
613 void
614 delete_object_cache(object_cache* cache)
615 {
616 	T(Delete(cache));
617 
618 	{
619 		MutexLocker _(sObjectCacheListLock);
620 		sObjectCaches.Remove(cache);
621 	}
622 
623 	MutexLocker cacheLocker(cache->lock);
624 
625 	{
626 		MutexLocker maintenanceLocker(sMaintenanceLock);
627 		if (cache->maintenance_in_progress) {
628 			// The maintainer thread is working with the cache. Just mark it
629 			// to be deleted.
630 			cache->maintenance_delete = true;
631 			return;
632 		}
633 
634 		// unschedule maintenance
635 		if (cache->maintenance_pending)
636 			sMaintenanceQueue.Remove(cache);
637 	}
638 
639 	// at this point no-one should have a reference to the cache anymore
640 	cacheLocker.Unlock();
641 
642 	delete_object_cache_internal(cache);
643 }
644 
645 
646 status_t
647 object_cache_set_minimum_reserve(object_cache* cache, size_t objectCount)
648 {
649 	MutexLocker _(cache->lock);
650 
651 	if (cache->min_object_reserve == objectCount)
652 		return B_OK;
653 
654 	cache->min_object_reserve = objectCount;
655 
656 	increase_object_reserve(cache);
657 
658 	return B_OK;
659 }
660 
661 
662 void*
663 object_cache_alloc(object_cache* cache, uint32 flags)
664 {
665 	if (!(cache->flags & CACHE_NO_DEPOT)) {
666 		void* object = object_depot_obtain(&cache->depot);
667 		if (object) {
668 			T(Alloc(cache, flags, object));
669 			return object;
670 		}
671 	}
672 
673 	MutexLocker _(cache->lock);
674 	slab* source = NULL;
675 
676 	while (true) {
677 		source = cache->partial.Head();
678 		if (source != NULL)
679 			break;
680 
681 		source = cache->empty.RemoveHead();
682 		if (source != NULL) {
683 			cache->empty_count--;
684 			cache->partial.Add(source);
685 			break;
686 		}
687 
688 		if (object_cache_reserve_internal(cache, 1, flags) != B_OK) {
689 			T(Alloc(cache, flags, NULL));
690 			return NULL;
691 		}
692 
693 		cache->pressure++;
694 	}
695 
696 	ParanoiaChecker _2(source);
697 
698 	object_link* link = _pop(source->free);
699 	source->count--;
700 	cache->used_count++;
701 
702 	if (cache->total_objects - cache->used_count < cache->min_object_reserve)
703 		increase_object_reserve(cache);
704 
705 	REMOVE_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, source, &link->next,
706 		sizeof(void*));
707 
708 	TRACE_CACHE(cache, "allocate %p (%p) from %p, %lu remaining.",
709 		link_to_object(link, cache->object_size), link, source, source->count);
710 
711 	if (source->count == 0) {
712 		cache->partial.Remove(source);
713 		cache->full.Add(source);
714 	}
715 
716 	void* object = link_to_object(link, cache->object_size);
717 	T(Alloc(cache, flags, object));
718 	return object;
719 }
720 
721 
722 void
723 object_cache_free(object_cache* cache, void* object, uint32 flags)
724 {
725 	if (object == NULL)
726 		return;
727 
728 	T(Free(cache, object));
729 
730 	if (!(cache->flags & CACHE_NO_DEPOT)) {
731 		object_depot_store(&cache->depot, object, flags);
732 		return;
733 	}
734 
735 	MutexLocker _(cache->lock);
736 	cache->ReturnObjectToSlab(cache->ObjectSlab(object), object, flags);
737 }
738 
739 
740 status_t
741 object_cache_reserve(object_cache* cache, size_t objectCount, uint32 flags)
742 {
743 	if (objectCount == 0)
744 		return B_OK;
745 
746 	T(Reserve(cache, objectCount, flags));
747 
748 	MutexLocker _(cache->lock);
749 	return object_cache_reserve_internal(cache, objectCount, flags);
750 }
751 
752 
753 void
754 object_cache_get_usage(object_cache* cache, size_t* _allocatedMemory)
755 {
756 	MutexLocker _(cache->lock);
757 	*_allocatedMemory = cache->usage;
758 }
759 
760 
761 void
762 slab_init(kernel_args* args)
763 {
764 	MemoryManager::Init(args);
765 
766 	new (&sObjectCaches) ObjectCacheList();
767 
768 	block_allocator_init_boot();
769 }
770 
771 
772 void
773 slab_init_post_area()
774 {
775 	MemoryManager::InitPostArea();
776 
777 	add_debugger_command("slabs", dump_slabs, "list all object caches");
778 	add_debugger_command("slab_cache", dump_cache_info,
779 		"dump information about a specific object cache");
780 	add_debugger_command("slab_depot", dump_object_depot,
781 		"dump contents of an object depot");
782 	add_debugger_command("slab_magazine", dump_depot_magazine,
783 		"dump contents of a depot magazine");
784 }
785 
786 
787 void
788 slab_init_post_sem()
789 {
790 	register_low_resource_handler(object_cache_low_memory, NULL,
791 		B_KERNEL_RESOURCE_PAGES | B_KERNEL_RESOURCE_MEMORY
792 			| B_KERNEL_RESOURCE_ADDRESS_SPACE, 5);
793 
794 	block_allocator_init_rest();
795 }
796 
797 
798 void
799 slab_init_post_thread()
800 {
801 	new(&sMaintenanceQueue) MaintenanceQueue;
802 	sMaintenanceCondition.Init(&sMaintenanceQueue, "object cache maintainer");
803 
804 	thread_id objectCacheResizer = spawn_kernel_thread(object_cache_maintainer,
805 		"object cache resizer", B_URGENT_PRIORITY, NULL);
806 	if (objectCacheResizer < 0) {
807 		panic("slab_init_post_thread(): failed to spawn object cache resizer "
808 			"thread\n");
809 		return;
810 	}
811 
812 	send_signal_etc(objectCacheResizer, SIGCONT, B_DO_NOT_RESCHEDULE);
813 }
814