xref: /haiku/src/system/kernel/slab/Slab.cpp (revision 0044a8c39ab5721051b6279506d1a8c511e20453)
1 /*
2  * Copyright 2010, Ingo Weinhold <ingo_weinhold@gmx.de>.
3  * Copyright 2008-2010, Axel Dörfler. All Rights Reserved.
4  * Copyright 2007, Hugo Santos. All Rights Reserved.
5  *
6  * Distributed under the terms of the MIT License.
7  */
8 
9 
10 #include <slab/Slab.h>
11 
12 #include <algorithm>
13 #include <new>
14 #include <stdlib.h>
15 #include <string.h>
16 
17 #include <KernelExport.h>
18 
19 #include <condition_variable.h>
20 #include <kernel.h>
21 #include <low_resource_manager.h>
22 #include <slab/ObjectDepot.h>
23 #include <smp.h>
24 #include <tracing.h>
25 #include <util/AutoLock.h>
26 #include <util/DoublyLinkedList.h>
27 #include <util/khash.h>
28 #include <vm/vm.h>
29 #include <vm/VMAddressSpace.h>
30 
31 #include "HashedObjectCache.h"
32 #include "MemoryManager.h"
33 #include "slab_private.h"
34 #include "SmallObjectCache.h"
35 
36 
37 typedef DoublyLinkedList<ObjectCache> ObjectCacheList;
38 
39 typedef DoublyLinkedList<ObjectCache,
40 	DoublyLinkedListMemberGetLink<ObjectCache, &ObjectCache::maintenance_link> >
41 		MaintenanceQueue;
42 
43 static ObjectCacheList sObjectCaches;
44 static mutex sObjectCacheListLock = MUTEX_INITIALIZER("object cache list");
45 
46 static mutex sMaintenanceLock
47 	= MUTEX_INITIALIZER("object cache resize requests");
48 static MaintenanceQueue sMaintenanceQueue;
49 static ConditionVariable sMaintenanceCondition;
50 
51 
52 #if SLAB_OBJECT_CACHE_TRACING
53 
54 
55 namespace SlabObjectCacheTracing {
56 
57 class ObjectCacheTraceEntry
58 	: public TRACE_ENTRY_SELECTOR(SLAB_OBJECT_CACHE_TRACING_STACK_TRACE) {
59 	public:
60 		ObjectCacheTraceEntry(ObjectCache* cache)
61 			:
62 			TraceEntryBase(SLAB_OBJECT_CACHE_TRACING_STACK_TRACE, 0, true),
63 			fCache(cache)
64 		{
65 		}
66 
67 	protected:
68 		ObjectCache*	fCache;
69 };
70 
71 
72 class Create : public ObjectCacheTraceEntry {
73 	public:
74 		Create(const char* name, size_t objectSize, size_t alignment,
75 				size_t maxByteUsage, uint32 flags, void* cookie,
76 				ObjectCache* cache)
77 			:
78 			ObjectCacheTraceEntry(cache),
79 			fObjectSize(objectSize),
80 			fAlignment(alignment),
81 			fMaxByteUsage(maxByteUsage),
82 			fFlags(flags),
83 			fCookie(cookie)
84 		{
85 			fName = alloc_tracing_buffer_strcpy(name, 64, false);
86 			Initialized();
87 		}
88 
89 		virtual void AddDump(TraceOutput& out)
90 		{
91 			out.Print("object cache create: name: \"%s\", object size: %lu, "
92 				"alignment: %lu, max usage: %lu, flags: 0x%lx, cookie: %p "
93 				"-> cache: %p", fName, fObjectSize, fAlignment, fMaxByteUsage,
94 					fFlags, fCookie, fCache);
95 		}
96 
97 	private:
98 		const char*	fName;
99 		size_t		fObjectSize;
100 		size_t		fAlignment;
101 		size_t		fMaxByteUsage;
102 		uint32		fFlags;
103 		void*		fCookie;
104 };
105 
106 
107 class Delete : public ObjectCacheTraceEntry {
108 	public:
109 		Delete(ObjectCache* cache)
110 			:
111 			ObjectCacheTraceEntry(cache)
112 		{
113 			Initialized();
114 		}
115 
116 		virtual void AddDump(TraceOutput& out)
117 		{
118 			out.Print("object cache delete: %p", fCache);
119 		}
120 };
121 
122 
123 class Alloc : public ObjectCacheTraceEntry {
124 	public:
125 		Alloc(ObjectCache* cache, uint32 flags, void* object)
126 			:
127 			ObjectCacheTraceEntry(cache),
128 			fFlags(flags),
129 			fObject(object)
130 		{
131 			Initialized();
132 		}
133 
134 		virtual void AddDump(TraceOutput& out)
135 		{
136 			out.Print("object cache alloc: cache: %p, flags: 0x%lx -> "
137 				"object: %p", fCache, fFlags, fObject);
138 		}
139 
140 	private:
141 		uint32		fFlags;
142 		void*		fObject;
143 };
144 
145 
146 class Free : public ObjectCacheTraceEntry {
147 	public:
148 		Free(ObjectCache* cache, void* object)
149 			:
150 			ObjectCacheTraceEntry(cache),
151 			fObject(object)
152 		{
153 			Initialized();
154 		}
155 
156 		virtual void AddDump(TraceOutput& out)
157 		{
158 			out.Print("object cache free: cache: %p, object: %p", fCache,
159 				fObject);
160 		}
161 
162 	private:
163 		void*		fObject;
164 };
165 
166 
167 class Reserve : public ObjectCacheTraceEntry {
168 	public:
169 		Reserve(ObjectCache* cache, size_t count, uint32 flags)
170 			:
171 			ObjectCacheTraceEntry(cache),
172 			fCount(count),
173 			fFlags(flags)
174 		{
175 			Initialized();
176 		}
177 
178 		virtual void AddDump(TraceOutput& out)
179 		{
180 			out.Print("object cache reserve: cache: %p, count: %lu, "
181 				"flags: 0x%lx", fCache, fCount, fFlags);
182 		}
183 
184 	private:
185 		uint32		fCount;
186 		uint32		fFlags;
187 };
188 
189 
190 }	// namespace SlabObjectCacheTracing
191 
192 #	define T(x)	new(std::nothrow) SlabObjectCacheTracing::x
193 
194 #else
195 #	define T(x)
196 #endif	// SLAB_OBJECT_CACHE_TRACING
197 
198 
199 // #pragma mark -
200 
201 
202 static void
203 dump_slab(::slab* slab)
204 {
205 	kprintf("  %p  %p  %6" B_PRIuSIZE " %6" B_PRIuSIZE " %6" B_PRIuSIZE "  %p\n",
206 		slab, slab->pages, slab->size, slab->count, slab->offset, slab->free);
207 }
208 
209 
210 static int
211 dump_slabs(int argc, char* argv[])
212 {
213 	kprintf("%10s %22s %8s %8s %8s %6s %8s %8s %8s\n", "address", "name",
214 		"objsize", "align", "usage", "empty", "usedobj", "total", "flags");
215 
216 	ObjectCacheList::Iterator it = sObjectCaches.GetIterator();
217 
218 	while (it.HasNext()) {
219 		ObjectCache* cache = it.Next();
220 
221 		kprintf("%p %22s %8lu %8" B_PRIuSIZE " %8lu %6lu %8lu %8lu %8lx\n",
222 			cache, cache->name, cache->object_size, cache->alignment,
223 			cache->usage, cache->empty_count, cache->used_count,
224 			cache->total_objects, cache->flags);
225 	}
226 
227 	return 0;
228 }
229 
230 
231 static int
232 dump_cache_info(int argc, char* argv[])
233 {
234 	if (argc < 2) {
235 		kprintf("usage: slab_cache [address]\n");
236 		return 0;
237 	}
238 
239 	ObjectCache* cache = (ObjectCache*)parse_expression(argv[1]);
240 
241 	kprintf("name:              %s\n", cache->name);
242 	kprintf("lock:              %p\n", &cache->lock);
243 	kprintf("object_size:       %lu\n", cache->object_size);
244 	kprintf("alignment:         %" B_PRIuSIZE "\n", cache->alignment);
245 	kprintf("cache_color_cycle: %lu\n", cache->cache_color_cycle);
246 	kprintf("total_objects:     %lu\n", cache->total_objects);
247 	kprintf("used_count:        %lu\n", cache->used_count);
248 	kprintf("empty_count:       %lu\n", cache->empty_count);
249 	kprintf("pressure:          %lu\n", cache->pressure);
250 	kprintf("slab_size:         %lu\n", cache->slab_size);
251 	kprintf("usage:             %lu\n", cache->usage);
252 	kprintf("maximum:           %lu\n", cache->maximum);
253 	kprintf("flags:             0x%lx\n", cache->flags);
254 	kprintf("cookie:            %p\n", cache->cookie);
255 	kprintf("resize entry don't wait: %p\n", cache->resize_entry_dont_wait);
256 	kprintf("resize entry can wait:   %p\n", cache->resize_entry_can_wait);
257 
258 	kprintf("  slab        chunk         size   used offset  free\n");
259 
260 	SlabList::Iterator iterator = cache->empty.GetIterator();
261 	if (iterator.HasNext())
262 		kprintf("empty:\n");
263 	while (::slab* slab = iterator.Next())
264 		dump_slab(slab);
265 
266 	iterator = cache->partial.GetIterator();
267 	if (iterator.HasNext())
268 		kprintf("partial:\n");
269 	while (::slab* slab = iterator.Next())
270 		dump_slab(slab);
271 
272 	iterator = cache->full.GetIterator();
273 	if (iterator.HasNext())
274 		kprintf("full:\n");
275 	while (::slab* slab = iterator.Next())
276 		dump_slab(slab);
277 
278 	if ((cache->flags & CACHE_NO_DEPOT) == 0) {
279 		kprintf("depot:\n");
280 		dump_object_depot(&cache->depot);
281 	}
282 
283 	return 0;
284 }
285 
286 
287 // #pragma mark -
288 
289 
290 void
291 request_memory_manager_maintenance()
292 {
293 	MutexLocker locker(sMaintenanceLock);
294 	sMaintenanceCondition.NotifyAll();
295 }
296 
297 
298 // #pragma mark -
299 
300 
301 static void
302 delete_object_cache_internal(object_cache* cache)
303 {
304 	if (!(cache->flags & CACHE_NO_DEPOT))
305 		object_depot_destroy(&cache->depot, 0);
306 
307 	mutex_lock(&cache->lock);
308 
309 	if (!cache->full.IsEmpty())
310 		panic("cache destroy: still has full slabs");
311 
312 	if (!cache->partial.IsEmpty())
313 		panic("cache destroy: still has partial slabs");
314 
315 	while (!cache->empty.IsEmpty())
316 		cache->ReturnSlab(cache->empty.RemoveHead(), 0);
317 
318 	mutex_destroy(&cache->lock);
319 	cache->Delete();
320 }
321 
322 
323 static void
324 increase_object_reserve(ObjectCache* cache)
325 {
326 	MutexLocker locker(sMaintenanceLock);
327 
328 	cache->maintenance_resize = true;
329 
330 	if (!cache->maintenance_pending) {
331 		cache->maintenance_pending = true;
332 		sMaintenanceQueue.Add(cache);
333 		sMaintenanceCondition.NotifyAll();
334 	}
335 }
336 
337 
338 /*!	Makes sure that \a objectCount objects can be allocated.
339 */
340 static status_t
341 object_cache_reserve_internal(ObjectCache* cache, size_t objectCount,
342 	uint32 flags)
343 {
344 	// If someone else is already adding slabs, we wait for that to be finished
345 	// first.
346 	thread_id thread = find_thread(NULL);
347 	while (true) {
348 		if (objectCount <= cache->total_objects - cache->used_count)
349 			return B_OK;
350 
351 		ObjectCacheResizeEntry* resizeEntry = NULL;
352 		if (cache->resize_entry_dont_wait != NULL) {
353 			resizeEntry = cache->resize_entry_dont_wait;
354 			if (thread == resizeEntry->thread)
355 				return B_WOULD_BLOCK;
356 			// Note: We could still have reentered the function, i.e.
357 			// resize_entry_can_wait would be ours. That doesn't matter much,
358 			// though, since after the don't-wait thread has done its job
359 			// everyone will be happy.
360 		} else if (cache->resize_entry_can_wait != NULL) {
361 			resizeEntry = cache->resize_entry_can_wait;
362 			if (thread == resizeEntry->thread)
363 				return B_WOULD_BLOCK;
364 
365 			if ((flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0)
366 				break;
367 		} else
368 			break;
369 
370 		ConditionVariableEntry entry;
371 		resizeEntry->condition.Add(&entry);
372 
373 		cache->Unlock();
374 		entry.Wait();
375 		cache->Lock();
376 	}
377 
378 	// prepare the resize entry others can wait on
379 	ObjectCacheResizeEntry*& resizeEntry
380 		= (flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0
381 			? cache->resize_entry_dont_wait : cache->resize_entry_can_wait;
382 
383 	ObjectCacheResizeEntry myResizeEntry;
384 	resizeEntry = &myResizeEntry;
385 	resizeEntry->condition.Init(cache, "wait for slabs");
386 	resizeEntry->thread = thread;
387 
388 	// add new slabs until there are as many free ones as requested
389 	while (objectCount > cache->total_objects - cache->used_count) {
390 		slab* newSlab = cache->CreateSlab(flags);
391 		if (newSlab == NULL) {
392 			resizeEntry->condition.NotifyAll();
393 			resizeEntry = NULL;
394 			return B_NO_MEMORY;
395 		}
396 
397 		cache->usage += cache->slab_size;
398 		cache->total_objects += newSlab->size;
399 
400 		cache->empty.Add(newSlab);
401 		cache->empty_count++;
402 	}
403 
404 	resizeEntry->condition.NotifyAll();
405 	resizeEntry = NULL;
406 
407 	return B_OK;
408 }
409 
410 
411 static void
412 object_cache_low_memory(void* dummy, uint32 resources, int32 level)
413 {
414 	if (level == B_NO_LOW_RESOURCE)
415 		return;
416 
417 	MutexLocker cacheListLocker(sObjectCacheListLock);
418 
419 	// Append the first cache to the end of the queue. We assume that it is
420 	// one of the caches that will never be deleted and thus we use it as a
421 	// marker.
422 	ObjectCache* firstCache = sObjectCaches.RemoveHead();
423 	sObjectCaches.Add(firstCache);
424 	cacheListLocker.Unlock();
425 
426 	ObjectCache* cache;
427 	do {
428 		cacheListLocker.Lock();
429 
430 		cache = sObjectCaches.RemoveHead();
431 		sObjectCaches.Add(cache);
432 
433 		MutexLocker maintenanceLocker(sMaintenanceLock);
434 		if (cache->maintenance_pending || cache->maintenance_in_progress) {
435 			// We don't want to mess with caches in maintenance.
436 			continue;
437 		}
438 
439 		cache->maintenance_pending = true;
440 		cache->maintenance_in_progress = true;
441 
442 		maintenanceLocker.Unlock();
443 		cacheListLocker.Unlock();
444 
445 		// We are calling the reclaimer without the object cache lock
446 		// to give the owner a chance to return objects to the slabs.
447 
448 		if (cache->reclaimer)
449 			cache->reclaimer(cache->cookie, level);
450 
451 		if ((cache->flags & CACHE_NO_DEPOT) == 0)
452 			object_depot_make_empty(&cache->depot, 0);
453 
454 		MutexLocker cacheLocker(cache->lock);
455 		size_t minimumAllowed;
456 
457 		switch (level) {
458 			case B_LOW_RESOURCE_NOTE:
459 				minimumAllowed = cache->pressure / 2 + 1;
460 				cache->pressure -= cache->pressure / 8;
461 				break;
462 
463 			case B_LOW_RESOURCE_WARNING:
464 				cache->pressure /= 2;
465 				minimumAllowed = 0;
466 				break;
467 
468 			default:
469 				cache->pressure = 0;
470 				minimumAllowed = 0;
471 				break;
472 		}
473 
474 		while (cache->empty_count > minimumAllowed) {
475 			// make sure we respect the cache's minimum object reserve
476 			size_t objectsPerSlab = cache->empty.Head()->size;
477 			size_t freeObjects = cache->total_objects - cache->used_count;
478 			if (freeObjects < cache->min_object_reserve + objectsPerSlab)
479 				break;
480 
481 			cache->ReturnSlab(cache->empty.RemoveHead(), 0);
482 			cache->empty_count--;
483 		}
484 
485 		cacheLocker.Unlock();
486 
487 		// Check whether in the meantime someone has really requested
488 		// maintenance for the cache.
489 		maintenanceLocker.Lock();
490 
491 		if (cache->maintenance_delete) {
492 			delete_object_cache_internal(cache);
493 			continue;
494 		}
495 
496 		cache->maintenance_in_progress = false;
497 
498 		if (cache->maintenance_resize)
499 			sMaintenanceQueue.Add(cache);
500 		else
501 			cache->maintenance_pending = false;
502 	} while (cache != firstCache);
503 }
504 
505 
506 static status_t
507 object_cache_maintainer(void*)
508 {
509 	while (true) {
510 		MutexLocker locker(sMaintenanceLock);
511 
512 		// wait for the next request
513 		while (sMaintenanceQueue.IsEmpty()) {
514 			// perform memory manager maintenance, if needed
515 			if (MemoryManager::MaintenanceNeeded()) {
516 				locker.Unlock();
517 				MemoryManager::PerformMaintenance();
518 				locker.Lock();
519 				continue;
520 			}
521 
522 			ConditionVariableEntry entry;
523 			sMaintenanceCondition.Add(&entry);
524 			locker.Unlock();
525 			entry.Wait();
526 			locker.Lock();
527 		}
528 
529 		ObjectCache* cache = sMaintenanceQueue.RemoveHead();
530 
531 		while (true) {
532 			bool resizeRequested = cache->maintenance_resize;
533 			bool deleteRequested = cache->maintenance_delete;
534 
535 			if (!resizeRequested && !deleteRequested) {
536 				cache->maintenance_pending = false;
537 				cache->maintenance_in_progress = false;
538 				break;
539 			}
540 
541 			cache->maintenance_resize = false;
542 			cache->maintenance_in_progress = true;
543 
544 			locker.Unlock();
545 
546 			if (deleteRequested) {
547 				delete_object_cache_internal(cache);
548 				break;
549 			}
550 
551 			// resize the cache, if necessary
552 
553 			MutexLocker cacheLocker(cache->lock);
554 
555 			if (resizeRequested) {
556 				status_t error = object_cache_reserve_internal(cache,
557 					cache->min_object_reserve, 0);
558 				if (error != B_OK) {
559 					dprintf("object cache resizer: Failed to resize object "
560 						"cache %p!\n", cache);
561 					break;
562 				}
563 			}
564 
565 			locker.Lock();
566 		}
567 	}
568 
569 	// never can get here
570 	return B_OK;
571 }
572 
573 
574 // #pragma mark - public API
575 
576 
577 object_cache*
578 create_object_cache(const char* name, size_t object_size, size_t alignment,
579 	void* cookie, object_cache_constructor constructor,
580 	object_cache_destructor destructor)
581 {
582 	return create_object_cache_etc(name, object_size, alignment, 0, 0, 0, 0,
583 		cookie, constructor, destructor, NULL);
584 }
585 
586 
587 object_cache*
588 create_object_cache_etc(const char* name, size_t objectSize, size_t alignment,
589 	size_t maximum, size_t magazineCapacity, size_t maxMagazineCount,
590 	uint32 flags, void* cookie, object_cache_constructor constructor,
591 	object_cache_destructor destructor, object_cache_reclaimer reclaimer)
592 {
593 	ObjectCache* cache;
594 
595 	if (objectSize == 0) {
596 		cache = NULL;
597 	} else if (objectSize <= 256) {
598 		cache = SmallObjectCache::Create(name, objectSize, alignment, maximum,
599 			magazineCapacity, maxMagazineCount, flags, cookie, constructor,
600 			destructor, reclaimer);
601 	} else {
602 		cache = HashedObjectCache::Create(name, objectSize, alignment, maximum,
603 			magazineCapacity, maxMagazineCount, flags, cookie, constructor,
604 			destructor, reclaimer);
605 	}
606 
607 	if (cache != NULL) {
608 		MutexLocker _(sObjectCacheListLock);
609 		sObjectCaches.Add(cache);
610 	}
611 
612 	T(Create(name, objectSize, alignment, maximum, flags, cookie, cache));
613 	return cache;
614 }
615 
616 
617 void
618 delete_object_cache(object_cache* cache)
619 {
620 	T(Delete(cache));
621 
622 	{
623 		MutexLocker _(sObjectCacheListLock);
624 		sObjectCaches.Remove(cache);
625 	}
626 
627 	MutexLocker cacheLocker(cache->lock);
628 
629 	{
630 		MutexLocker maintenanceLocker(sMaintenanceLock);
631 		if (cache->maintenance_in_progress) {
632 			// The maintainer thread is working with the cache. Just mark it
633 			// to be deleted.
634 			cache->maintenance_delete = true;
635 			return;
636 		}
637 
638 		// unschedule maintenance
639 		if (cache->maintenance_pending)
640 			sMaintenanceQueue.Remove(cache);
641 	}
642 
643 	// at this point no-one should have a reference to the cache anymore
644 	cacheLocker.Unlock();
645 
646 	delete_object_cache_internal(cache);
647 }
648 
649 
650 status_t
651 object_cache_set_minimum_reserve(object_cache* cache, size_t objectCount)
652 {
653 	MutexLocker _(cache->lock);
654 
655 	if (cache->min_object_reserve == objectCount)
656 		return B_OK;
657 
658 	cache->min_object_reserve = objectCount;
659 
660 	increase_object_reserve(cache);
661 
662 	return B_OK;
663 }
664 
665 
666 void*
667 object_cache_alloc(object_cache* cache, uint32 flags)
668 {
669 	if (!(cache->flags & CACHE_NO_DEPOT)) {
670 		void* object = object_depot_obtain(&cache->depot);
671 		if (object) {
672 			T(Alloc(cache, flags, object));
673 			return fill_allocated_block(object, cache->object_size);
674 		}
675 	}
676 
677 	MutexLocker _(cache->lock);
678 	slab* source = NULL;
679 
680 	while (true) {
681 		source = cache->partial.Head();
682 		if (source != NULL)
683 			break;
684 
685 		source = cache->empty.RemoveHead();
686 		if (source != NULL) {
687 			cache->empty_count--;
688 			cache->partial.Add(source);
689 			break;
690 		}
691 
692 		if (object_cache_reserve_internal(cache, 1, flags) != B_OK) {
693 			T(Alloc(cache, flags, NULL));
694 			return NULL;
695 		}
696 
697 		cache->pressure++;
698 	}
699 
700 	ParanoiaChecker _2(source);
701 
702 	object_link* link = _pop(source->free);
703 	source->count--;
704 	cache->used_count++;
705 
706 	if (cache->total_objects - cache->used_count < cache->min_object_reserve)
707 		increase_object_reserve(cache);
708 
709 	REMOVE_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, source, &link->next,
710 		sizeof(void*));
711 
712 	TRACE_CACHE(cache, "allocate %p (%p) from %p, %lu remaining.",
713 		link_to_object(link, cache->object_size), link, source, source->count);
714 
715 	if (source->count == 0) {
716 		cache->partial.Remove(source);
717 		cache->full.Add(source);
718 	}
719 
720 	void* object = link_to_object(link, cache->object_size);
721 	T(Alloc(cache, flags, object));
722 	return fill_allocated_block(object, cache->object_size);
723 }
724 
725 
726 void
727 object_cache_free(object_cache* cache, void* object, uint32 flags)
728 {
729 	if (object == NULL)
730 		return;
731 
732 	T(Free(cache, object));
733 
734 #if PARANOID_KERNEL_FREE
735 	// TODO: allow forcing the check even if we don't find deadbeef
736 	if (*(uint32*)object == 0xdeadbeef) {
737 		if (!cache->AssertObjectNotFreed(object))
738 			return;
739 
740 		if ((cache->flags & CACHE_NO_DEPOT) == 0) {
741 			if (object_depot_contains_object(&cache->depot, object)) {
742 				panic("object_cache: object %p is already freed", object);
743 				return;
744 			}
745 		}
746 	}
747 
748 	fill_freed_block(object, cache->object_size);
749 #endif
750 
751 	if ((cache->flags & CACHE_NO_DEPOT) == 0) {
752 		object_depot_store(&cache->depot, object, flags);
753 		return;
754 	}
755 
756 	MutexLocker _(cache->lock);
757 	cache->ReturnObjectToSlab(cache->ObjectSlab(object), object, flags);
758 }
759 
760 
761 status_t
762 object_cache_reserve(object_cache* cache, size_t objectCount, uint32 flags)
763 {
764 	if (objectCount == 0)
765 		return B_OK;
766 
767 	T(Reserve(cache, objectCount, flags));
768 
769 	MutexLocker _(cache->lock);
770 	return object_cache_reserve_internal(cache, objectCount, flags);
771 }
772 
773 
774 void
775 object_cache_get_usage(object_cache* cache, size_t* _allocatedMemory)
776 {
777 	MutexLocker _(cache->lock);
778 	*_allocatedMemory = cache->usage;
779 }
780 
781 
782 void
783 slab_init(kernel_args* args)
784 {
785 	MemoryManager::Init(args);
786 
787 	new (&sObjectCaches) ObjectCacheList();
788 
789 	block_allocator_init_boot();
790 }
791 
792 
793 void
794 slab_init_post_area()
795 {
796 	MemoryManager::InitPostArea();
797 
798 	add_debugger_command("slabs", dump_slabs, "list all object caches");
799 	add_debugger_command("slab_cache", dump_cache_info,
800 		"dump information about a specific object cache");
801 	add_debugger_command("slab_depot", dump_object_depot,
802 		"dump contents of an object depot");
803 	add_debugger_command("slab_magazine", dump_depot_magazine,
804 		"dump contents of a depot magazine");
805 }
806 
807 
808 void
809 slab_init_post_sem()
810 {
811 	register_low_resource_handler(object_cache_low_memory, NULL,
812 		B_KERNEL_RESOURCE_PAGES | B_KERNEL_RESOURCE_MEMORY
813 			| B_KERNEL_RESOURCE_ADDRESS_SPACE, 5);
814 
815 	block_allocator_init_rest();
816 }
817 
818 
819 void
820 slab_init_post_thread()
821 {
822 	new(&sMaintenanceQueue) MaintenanceQueue;
823 	sMaintenanceCondition.Init(&sMaintenanceQueue, "object cache maintainer");
824 
825 	thread_id objectCacheResizer = spawn_kernel_thread(object_cache_maintainer,
826 		"object cache resizer", B_URGENT_PRIORITY, NULL);
827 	if (objectCacheResizer < 0) {
828 		panic("slab_init_post_thread(): failed to spawn object cache resizer "
829 			"thread\n");
830 		return;
831 	}
832 
833 	resume_thread(objectCacheResizer);
834 }
835