xref: /haiku/src/system/kernel/slab/Slab.cpp (revision 4466b89c65970de4c7236ac87faa2bee4589f413)
1 /*
2  * Copyright 2010, Ingo Weinhold <ingo_weinhold@gmx.de>.
3  * Copyright 2008-2010, Axel Dörfler. All Rights Reserved.
4  * Copyright 2007, Hugo Santos. All Rights Reserved.
5  *
6  * Distributed under the terms of the MIT License.
7  */
8 
9 
10 #include <slab/Slab.h>
11 
12 #include <algorithm>
13 #include <new>
14 #include <stdlib.h>
15 #include <string.h>
16 
17 #include <KernelExport.h>
18 
19 #include <condition_variable.h>
20 #include <elf.h>
21 #include <kernel.h>
22 #include <low_resource_manager.h>
23 #include <slab/ObjectDepot.h>
24 #include <smp.h>
25 #include <tracing.h>
26 #include <util/AutoLock.h>
27 #include <util/DoublyLinkedList.h>
28 #include <util/khash.h>
29 #include <vm/vm.h>
30 #include <vm/VMAddressSpace.h>
31 
32 #include "HashedObjectCache.h"
33 #include "MemoryManager.h"
34 #include "slab_debug.h"
35 #include "slab_private.h"
36 #include "SmallObjectCache.h"
37 
38 
39 typedef DoublyLinkedList<ObjectCache> ObjectCacheList;
40 
41 typedef DoublyLinkedList<ObjectCache,
42 	DoublyLinkedListMemberGetLink<ObjectCache, &ObjectCache::maintenance_link> >
43 		MaintenanceQueue;
44 
45 static ObjectCacheList sObjectCaches;
46 static mutex sObjectCacheListLock = MUTEX_INITIALIZER("object cache list");
47 
48 static mutex sMaintenanceLock
49 	= MUTEX_INITIALIZER("object cache resize requests");
50 static MaintenanceQueue sMaintenanceQueue;
51 static ConditionVariable sMaintenanceCondition;
52 
53 
54 #if SLAB_ALLOCATION_TRACKING_AVAILABLE
55 
56 struct caller_info {
57 	addr_t		caller;
58 	size_t		count;
59 	size_t		size;
60 };
61 
62 static const int32 kCallerInfoTableSize = 1024;
63 static caller_info sCallerInfoTable[kCallerInfoTableSize];
64 static int32 sCallerInfoCount = 0;
65 
66 static caller_info* get_caller_info(addr_t caller);
67 
68 
69 RANGE_MARKER_FUNCTION_PROTOTYPES(slab_allocator)
70 RANGE_MARKER_FUNCTION_PROTOTYPES(SlabHashedObjectCache)
71 RANGE_MARKER_FUNCTION_PROTOTYPES(SlabMemoryManager)
72 RANGE_MARKER_FUNCTION_PROTOTYPES(SlabObjectCache)
73 RANGE_MARKER_FUNCTION_PROTOTYPES(SlabObjectDepot)
74 RANGE_MARKER_FUNCTION_PROTOTYPES(Slab)
75 RANGE_MARKER_FUNCTION_PROTOTYPES(SlabSmallObjectCache)
76 
77 
78 static const addr_t kSlabCodeAddressRanges[] = {
79 	RANGE_MARKER_FUNCTION_ADDRESS_RANGE(slab_allocator),
80 	RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabHashedObjectCache),
81 	RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabMemoryManager),
82 	RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabObjectCache),
83 	RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabObjectDepot),
84 	RANGE_MARKER_FUNCTION_ADDRESS_RANGE(Slab),
85 	RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabSmallObjectCache)
86 };
87 
88 static const uint32 kSlabCodeAddressRangeCount
89 	= sizeof(kSlabCodeAddressRanges) / sizeof(kSlabCodeAddressRanges[0]) / 2;
90 
91 #endif	// SLAB_ALLOCATION_TRACKING_AVAILABLE
92 
93 
94 RANGE_MARKER_FUNCTION_BEGIN(Slab)
95 
96 
97 #if SLAB_OBJECT_CACHE_TRACING
98 
99 
100 namespace SlabObjectCacheTracing {
101 
102 class ObjectCacheTraceEntry
103 	: public TRACE_ENTRY_SELECTOR(SLAB_OBJECT_CACHE_TRACING_STACK_TRACE) {
104 	public:
105 		ObjectCacheTraceEntry(ObjectCache* cache)
106 			:
107 			TraceEntryBase(SLAB_OBJECT_CACHE_TRACING_STACK_TRACE, 0, true),
108 			fCache(cache)
109 		{
110 		}
111 
112 	protected:
113 		ObjectCache*	fCache;
114 };
115 
116 
117 class Create : public ObjectCacheTraceEntry {
118 	public:
119 		Create(const char* name, size_t objectSize, size_t alignment,
120 				size_t maxByteUsage, uint32 flags, void* cookie,
121 				ObjectCache* cache)
122 			:
123 			ObjectCacheTraceEntry(cache),
124 			fObjectSize(objectSize),
125 			fAlignment(alignment),
126 			fMaxByteUsage(maxByteUsage),
127 			fFlags(flags),
128 			fCookie(cookie)
129 		{
130 			fName = alloc_tracing_buffer_strcpy(name, 64, false);
131 			Initialized();
132 		}
133 
134 		virtual void AddDump(TraceOutput& out)
135 		{
136 			out.Print("object cache create: name: \"%s\", object size: %lu, "
137 				"alignment: %lu, max usage: %lu, flags: 0x%lx, cookie: %p "
138 				"-> cache: %p", fName, fObjectSize, fAlignment, fMaxByteUsage,
139 					fFlags, fCookie, fCache);
140 		}
141 
142 	private:
143 		const char*	fName;
144 		size_t		fObjectSize;
145 		size_t		fAlignment;
146 		size_t		fMaxByteUsage;
147 		uint32		fFlags;
148 		void*		fCookie;
149 };
150 
151 
152 class Delete : public ObjectCacheTraceEntry {
153 	public:
154 		Delete(ObjectCache* cache)
155 			:
156 			ObjectCacheTraceEntry(cache)
157 		{
158 			Initialized();
159 		}
160 
161 		virtual void AddDump(TraceOutput& out)
162 		{
163 			out.Print("object cache delete: %p", fCache);
164 		}
165 };
166 
167 
168 class Alloc : public ObjectCacheTraceEntry {
169 	public:
170 		Alloc(ObjectCache* cache, uint32 flags, void* object)
171 			:
172 			ObjectCacheTraceEntry(cache),
173 			fFlags(flags),
174 			fObject(object)
175 		{
176 			Initialized();
177 		}
178 
179 		virtual void AddDump(TraceOutput& out)
180 		{
181 			out.Print("object cache alloc: cache: %p, flags: 0x%lx -> "
182 				"object: %p", fCache, fFlags, fObject);
183 		}
184 
185 	private:
186 		uint32		fFlags;
187 		void*		fObject;
188 };
189 
190 
191 class Free : public ObjectCacheTraceEntry {
192 	public:
193 		Free(ObjectCache* cache, void* object)
194 			:
195 			ObjectCacheTraceEntry(cache),
196 			fObject(object)
197 		{
198 			Initialized();
199 		}
200 
201 		virtual void AddDump(TraceOutput& out)
202 		{
203 			out.Print("object cache free: cache: %p, object: %p", fCache,
204 				fObject);
205 		}
206 
207 	private:
208 		void*		fObject;
209 };
210 
211 
212 class Reserve : public ObjectCacheTraceEntry {
213 	public:
214 		Reserve(ObjectCache* cache, size_t count, uint32 flags)
215 			:
216 			ObjectCacheTraceEntry(cache),
217 			fCount(count),
218 			fFlags(flags)
219 		{
220 			Initialized();
221 		}
222 
223 		virtual void AddDump(TraceOutput& out)
224 		{
225 			out.Print("object cache reserve: cache: %p, count: %lu, "
226 				"flags: 0x%lx", fCache, fCount, fFlags);
227 		}
228 
229 	private:
230 		uint32		fCount;
231 		uint32		fFlags;
232 };
233 
234 
235 }	// namespace SlabObjectCacheTracing
236 
237 #	define T(x)	new(std::nothrow) SlabObjectCacheTracing::x
238 
239 #else
240 #	define T(x)
241 #endif	// SLAB_OBJECT_CACHE_TRACING
242 
243 
244 // #pragma mark -
245 
246 
247 static void
248 dump_slab(::slab* slab)
249 {
250 	kprintf("  %p  %p  %6" B_PRIuSIZE " %6" B_PRIuSIZE " %6" B_PRIuSIZE "  %p\n",
251 		slab, slab->pages, slab->size, slab->count, slab->offset, slab->free);
252 }
253 
254 
255 static int
256 dump_slabs(int argc, char* argv[])
257 {
258 	kprintf("%10s %22s %8s %8s %8s %6s %8s %8s %8s\n", "address", "name",
259 		"objsize", "align", "usage", "empty", "usedobj", "total", "flags");
260 
261 	ObjectCacheList::Iterator it = sObjectCaches.GetIterator();
262 
263 	while (it.HasNext()) {
264 		ObjectCache* cache = it.Next();
265 
266 		kprintf("%p %22s %8lu %8" B_PRIuSIZE " %8lu %6lu %8lu %8lu %8lx\n",
267 			cache, cache->name, cache->object_size, cache->alignment,
268 			cache->usage, cache->empty_count, cache->used_count,
269 			cache->total_objects, cache->flags);
270 	}
271 
272 	return 0;
273 }
274 
275 
276 static int
277 dump_cache_info(int argc, char* argv[])
278 {
279 	if (argc < 2) {
280 		kprintf("usage: slab_cache [address]\n");
281 		return 0;
282 	}
283 
284 	ObjectCache* cache = (ObjectCache*)parse_expression(argv[1]);
285 
286 	kprintf("name:              %s\n", cache->name);
287 	kprintf("lock:              %p\n", &cache->lock);
288 	kprintf("object_size:       %lu\n", cache->object_size);
289 	kprintf("alignment:         %" B_PRIuSIZE "\n", cache->alignment);
290 	kprintf("cache_color_cycle: %lu\n", cache->cache_color_cycle);
291 	kprintf("total_objects:     %lu\n", cache->total_objects);
292 	kprintf("used_count:        %lu\n", cache->used_count);
293 	kprintf("empty_count:       %lu\n", cache->empty_count);
294 	kprintf("pressure:          %lu\n", cache->pressure);
295 	kprintf("slab_size:         %lu\n", cache->slab_size);
296 	kprintf("usage:             %lu\n", cache->usage);
297 	kprintf("maximum:           %lu\n", cache->maximum);
298 	kprintf("flags:             0x%lx\n", cache->flags);
299 	kprintf("cookie:            %p\n", cache->cookie);
300 	kprintf("resize entry don't wait: %p\n", cache->resize_entry_dont_wait);
301 	kprintf("resize entry can wait:   %p\n", cache->resize_entry_can_wait);
302 
303 	kprintf("  slab        chunk         size   used offset  free\n");
304 
305 	SlabList::Iterator iterator = cache->empty.GetIterator();
306 	if (iterator.HasNext())
307 		kprintf("empty:\n");
308 	while (::slab* slab = iterator.Next())
309 		dump_slab(slab);
310 
311 	iterator = cache->partial.GetIterator();
312 	if (iterator.HasNext())
313 		kprintf("partial:\n");
314 	while (::slab* slab = iterator.Next())
315 		dump_slab(slab);
316 
317 	iterator = cache->full.GetIterator();
318 	if (iterator.HasNext())
319 		kprintf("full:\n");
320 	while (::slab* slab = iterator.Next())
321 		dump_slab(slab);
322 
323 	if ((cache->flags & CACHE_NO_DEPOT) == 0) {
324 		kprintf("depot:\n");
325 		dump_object_depot(&cache->depot);
326 	}
327 
328 	return 0;
329 }
330 
331 
332 // #pragma mark - AllocationTrackingCallback
333 
334 
335 #if SLAB_ALLOCATION_TRACKING_AVAILABLE
336 
337 AllocationTrackingCallback::~AllocationTrackingCallback()
338 {
339 }
340 
341 #endif	// SLAB_ALLOCATION_TRACKING_AVAILABLE
342 
343 
344 // #pragma mark -
345 
346 
347 #if SLAB_ALLOCATION_TRACKING_AVAILABLE
348 
349 namespace {
350 
351 class AllocationCollectorCallback : public AllocationTrackingCallback {
352 public:
353 	AllocationCollectorCallback(bool resetInfos)
354 		:
355 		fResetInfos(resetInfos)
356 	{
357 	}
358 
359 	virtual bool ProcessTrackingInfo(AllocationTrackingInfo* info,
360 		void* allocation, size_t allocationSize)
361 	{
362 		if (!info->IsInitialized())
363 			return true;
364 
365 		addr_t caller = 0;
366 		AbstractTraceEntryWithStackTrace* traceEntry = info->TraceEntry();
367 
368 		if (traceEntry != NULL && info->IsTraceEntryValid()) {
369 			caller = tracing_find_caller_in_stack_trace(
370 				traceEntry->StackTrace(), kSlabCodeAddressRanges,
371 				kSlabCodeAddressRangeCount);
372 		}
373 
374 		caller_info* callerInfo = get_caller_info(caller);
375 		if (callerInfo == NULL) {
376 			kprintf("out of space for caller infos\n");
377 			return false;
378 		}
379 
380 		callerInfo->count++;
381 		callerInfo->size += allocationSize;
382 
383 		if (fResetInfos)
384 			info->Clear();
385 
386 		return true;
387 	}
388 
389 private:
390 	bool	fResetInfos;
391 };
392 
393 
394 class AllocationInfoPrinterCallback : public AllocationTrackingCallback {
395 public:
396 	AllocationInfoPrinterCallback(bool printStackTrace, addr_t addressFilter,
397 		team_id teamFilter, thread_id threadFilter)
398 		:
399 		fPrintStackTrace(printStackTrace),
400 		fAddressFilter(addressFilter),
401 		fTeamFilter(teamFilter),
402 		fThreadFilter(threadFilter)
403 	{
404 	}
405 
406 	virtual bool ProcessTrackingInfo(AllocationTrackingInfo* info,
407 		void* allocation, size_t allocationSize)
408 	{
409 		if (!info->IsInitialized())
410 			return true;
411 
412 		if (fAddressFilter != 0 && (addr_t)allocation != fAddressFilter)
413 			return true;
414 
415 		AbstractTraceEntryWithStackTrace* traceEntry = info->TraceEntry();
416 		if (traceEntry != NULL && !info->IsTraceEntryValid())
417 			traceEntry = NULL;
418 
419 		if (traceEntry != NULL) {
420 			if (fTeamFilter != -1 && traceEntry->TeamID() != fTeamFilter)
421 				return true;
422 			if (fThreadFilter != -1 && traceEntry->ThreadID() != fThreadFilter)
423 				return true;
424 		} else {
425 			// we need the info if we have filters set
426 			if (fTeamFilter != -1 || fThreadFilter != -1)
427 				return true;
428 		}
429 
430 		kprintf("allocation %p, size: %" B_PRIuSIZE, allocation,
431 			allocationSize);
432 
433 		if (traceEntry != NULL) {
434 			kprintf(", team: %" B_PRId32 ", thread %" B_PRId32
435 				", time %" B_PRId64 "\n", traceEntry->TeamID(),
436 				traceEntry->ThreadID(), traceEntry->Time());
437 
438 			if (fPrintStackTrace)
439 				tracing_print_stack_trace(traceEntry->StackTrace());
440 		} else
441 			kprintf("\n");
442 
443 		return true;
444 	}
445 
446 private:
447 	bool		fPrintStackTrace;
448 	addr_t		fAddressFilter;
449 	team_id		fTeamFilter;
450 	thread_id	fThreadFilter;
451 };
452 
453 
454 class AllocationDetailPrinterCallback : public AllocationTrackingCallback {
455 public:
456 	AllocationDetailPrinterCallback(addr_t caller)
457 		:
458 		fCaller(caller)
459 	{
460 	}
461 
462 	virtual bool ProcessTrackingInfo(AllocationTrackingInfo* info,
463 		void* allocation, size_t allocationSize)
464 	{
465 		if (!info->IsInitialized())
466 			return true;
467 
468 		addr_t caller = 0;
469 		AbstractTraceEntryWithStackTrace* traceEntry = info->TraceEntry();
470 		if (traceEntry != NULL && !info->IsTraceEntryValid())
471 			traceEntry = NULL;
472 
473 		if (traceEntry != NULL) {
474 			caller = tracing_find_caller_in_stack_trace(
475 				traceEntry->StackTrace(), kSlabCodeAddressRanges,
476 				kSlabCodeAddressRangeCount);
477 		}
478 
479 		if (caller != fCaller)
480 			return true;
481 
482 		kprintf("allocation %p, size: %" B_PRIuSIZE "\n", allocation,
483 			allocationSize);
484 		if (traceEntry != NULL)
485 			tracing_print_stack_trace(traceEntry->StackTrace());
486 
487 		return true;
488 	}
489 
490 private:
491 	addr_t	fCaller;
492 };
493 
494 }	// unnamed namespace
495 
496 static caller_info*
497 get_caller_info(addr_t caller)
498 {
499 	// find the caller info
500 	for (int32 i = 0; i < sCallerInfoCount; i++) {
501 		if (caller == sCallerInfoTable[i].caller)
502 			return &sCallerInfoTable[i];
503 	}
504 
505 	// not found, add a new entry, if there are free slots
506 	if (sCallerInfoCount >= kCallerInfoTableSize)
507 		return NULL;
508 
509 	caller_info* info = &sCallerInfoTable[sCallerInfoCount++];
510 	info->caller = caller;
511 	info->count = 0;
512 	info->size = 0;
513 
514 	return info;
515 }
516 
517 
518 static int
519 caller_info_compare_size(const void* _a, const void* _b)
520 {
521 	const caller_info* a = (const caller_info*)_a;
522 	const caller_info* b = (const caller_info*)_b;
523 	return (int)(b->size - a->size);
524 }
525 
526 
527 static int
528 caller_info_compare_count(const void* _a, const void* _b)
529 {
530 	const caller_info* a = (const caller_info*)_a;
531 	const caller_info* b = (const caller_info*)_b;
532 	return (int)(b->count - a->count);
533 }
534 
535 
536 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
537 
538 static bool
539 analyze_allocation_callers(ObjectCache* cache, slab* slab,
540 	AllocationTrackingCallback& callback)
541 {
542 	for (uint32 i = 0; i < slab->size; i++) {
543 		if (!callback.ProcessTrackingInfo(&slab->tracking[i],
544 				cache->ObjectAtIndex(slab, i), cache->object_size)) {
545 			return false;
546 		}
547 	}
548 
549 	return true;
550 }
551 
552 
553 static bool
554 analyze_allocation_callers(ObjectCache* cache, const SlabList& slabList,
555 	AllocationTrackingCallback& callback)
556 {
557 	for (SlabList::ConstIterator it = slabList.GetIterator();
558 			slab* slab = it.Next();) {
559 		if (!analyze_allocation_callers(cache, slab, callback))
560 			return false;
561 	}
562 
563 	return true;
564 }
565 
566 
567 static bool
568 analyze_allocation_callers(ObjectCache* cache,
569 	AllocationTrackingCallback& callback)
570 {
571 	return analyze_allocation_callers(cache, cache->full, callback)
572 		&& analyze_allocation_callers(cache, cache->partial, callback);
573 }
574 
575 #endif	// SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
576 
577 
578 static int
579 dump_allocation_infos(int argc, char **argv)
580 {
581 	ObjectCache* cache = NULL;
582 	slab* slab = NULL;
583 	addr_t addressFilter = 0;
584 	team_id teamFilter = -1;
585 	thread_id threadFilter = -1;
586 	bool printStackTraces = false;
587 
588 	for (int32 i = 1; i < argc; i++) {
589 		if (strcmp(argv[i], "--stacktrace") == 0)
590 			printStackTraces = true;
591 		else if (strcmp(argv[i], "-a") == 0) {
592 			uint64 address;
593 			if (++i >= argc
594 				|| !evaluate_debug_expression(argv[i], &address, true)) {
595 				print_debugger_command_usage(argv[0]);
596 				return 0;
597 			}
598 
599 			addressFilter = address;
600 		} else if (strcmp(argv[i], "-o") == 0) {
601 			uint64 cacheAddress;
602 			if (++i >= argc
603 				|| !evaluate_debug_expression(argv[i], &cacheAddress, true)) {
604 				print_debugger_command_usage(argv[0]);
605 				return 0;
606 			}
607 
608 			cache = (ObjectCache*)(addr_t)cacheAddress;
609 		} else if (strcasecmp(argv[i], "-s") == 0) {
610 			uint64 slabAddress;
611 			if (++i >= argc
612 				|| !evaluate_debug_expression(argv[i], &slabAddress, true)) {
613 				print_debugger_command_usage(argv[0]);
614 				return 0;
615 			}
616 
617 			void* slabPages = (void*)slabAddress;
618 			if (strcmp(argv[i], "-s") == 0) {
619 				slab = (struct slab*)(addr_t)slabAddress;
620 				slabPages = slab->pages;
621 			}
622 
623 			cache = MemoryManager::DebugObjectCacheForAddress(slabPages);
624 			if (cache == NULL) {
625 				kprintf("Couldn't find object cache for address %p.\n",
626 					slabPages);
627 				return 0;
628 			}
629 
630 			if (slab == NULL) {
631 				slab = cache->ObjectSlab(slabPages);
632 
633 				if (slab == NULL) {
634 					kprintf("Couldn't find slab for address %p.\n", slabPages);
635 					return 0;
636 				}
637 			}
638 		} else if (strcmp(argv[i], "--team") == 0) {
639 			uint64 team;
640 			if (++i >= argc
641 				|| !evaluate_debug_expression(argv[i], &team, true)) {
642 				print_debugger_command_usage(argv[0]);
643 				return 0;
644 			}
645 
646 			teamFilter = team;
647 		} else if (strcmp(argv[i], "--thread") == 0) {
648 			uint64 thread;
649 			if (++i >= argc
650 				|| !evaluate_debug_expression(argv[i], &thread, true)) {
651 				print_debugger_command_usage(argv[0]);
652 				return 0;
653 			}
654 
655 			threadFilter = thread;
656 		} else {
657 			print_debugger_command_usage(argv[0]);
658 			return 0;
659 		}
660 	}
661 
662 	AllocationInfoPrinterCallback callback(printStackTraces, addressFilter,
663 		teamFilter, threadFilter);
664 
665 	if (slab != NULL || cache != NULL) {
666 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
667 		if (slab != NULL) {
668 			if (!analyze_allocation_callers(cache, slab, callback))
669 				return 0;
670 		} else if (cache != NULL) {
671 			if (!analyze_allocation_callers(cache, callback))
672 				return 0;
673 		}
674 #else
675 		kprintf("Object cache allocation tracking not available. "
676 			"SLAB_OBJECT_CACHE_TRACING (%d) and "
677 			"SLAB_OBJECT_CACHE_TRACING_STACK_TRACE (%d) must be enabled.\n",
678 			SLAB_OBJECT_CACHE_TRACING, SLAB_OBJECT_CACHE_TRACING_STACK_TRACE);
679 		return 0;
680 #endif
681 	} else {
682 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
683 
684 		for (ObjectCacheList::Iterator it = sObjectCaches.GetIterator();
685 				it.HasNext();) {
686 			if (!analyze_allocation_callers(it.Next(), callback))
687 				return 0;
688 		}
689 #endif
690 
691 #if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
692 		if (!MemoryManager::AnalyzeAllocationCallers(callback))
693 			return 0;
694 #endif
695 	}
696 
697 	return 0;
698 }
699 
700 
701 static int
702 dump_allocations_per_caller(int argc, char **argv)
703 {
704 	bool sortBySize = true;
705 	bool resetAllocationInfos = false;
706 	bool printDetails = false;
707 	ObjectCache* cache = NULL;
708 	addr_t caller = 0;
709 
710 	for (int32 i = 1; i < argc; i++) {
711 		if (strcmp(argv[i], "-c") == 0) {
712 			sortBySize = false;
713 		} else if (strcmp(argv[i], "-d") == 0) {
714 			uint64 callerAddress;
715 			if (++i >= argc
716 				|| !evaluate_debug_expression(argv[i], &callerAddress, true)) {
717 				print_debugger_command_usage(argv[0]);
718 				return 0;
719 			}
720 
721 			caller = callerAddress;
722 			printDetails = true;
723 		} else if (strcmp(argv[i], "-o") == 0) {
724 			uint64 cacheAddress;
725 			if (++i >= argc
726 				|| !evaluate_debug_expression(argv[i], &cacheAddress, true)) {
727 				print_debugger_command_usage(argv[0]);
728 				return 0;
729 			}
730 
731 			cache = (ObjectCache*)(addr_t)cacheAddress;
732 		} else if (strcmp(argv[i], "-r") == 0) {
733 			resetAllocationInfos = true;
734 		} else {
735 			print_debugger_command_usage(argv[0]);
736 			return 0;
737 		}
738 	}
739 
740 	sCallerInfoCount = 0;
741 
742 	AllocationCollectorCallback collectorCallback(resetAllocationInfos);
743 	AllocationDetailPrinterCallback detailsCallback(caller);
744 	AllocationTrackingCallback& callback = printDetails
745 		? (AllocationTrackingCallback&)detailsCallback
746 		: (AllocationTrackingCallback&)collectorCallback;
747 
748 	if (cache != NULL) {
749 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
750 		if (!analyze_allocation_callers(cache, callback))
751 			return 0;
752 #else
753 		kprintf("Object cache allocation tracking not available. "
754 			"SLAB_OBJECT_CACHE_TRACING (%d) and "
755 			"SLAB_OBJECT_CACHE_TRACING_STACK_TRACE (%d) must be enabled.\n",
756 			SLAB_OBJECT_CACHE_TRACING, SLAB_OBJECT_CACHE_TRACING_STACK_TRACE);
757 		return 0;
758 #endif
759 	} else {
760 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
761 
762 		for (ObjectCacheList::Iterator it = sObjectCaches.GetIterator();
763 				it.HasNext();) {
764 			if (!analyze_allocation_callers(it.Next(), callback))
765 				return 0;
766 		}
767 #endif
768 
769 #if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
770 		if (!MemoryManager::AnalyzeAllocationCallers(callback))
771 			return 0;
772 #endif
773 	}
774 
775 	if (printDetails)
776 		return 0;
777 
778 	// sort the array
779 	qsort(sCallerInfoTable, sCallerInfoCount, sizeof(caller_info),
780 		sortBySize ? &caller_info_compare_size : &caller_info_compare_count);
781 
782 	kprintf("%ld different callers, sorted by %s...\n\n", sCallerInfoCount,
783 		sortBySize ? "size" : "count");
784 
785 	size_t totalAllocationSize = 0;
786 	size_t totalAllocationCount = 0;
787 
788 	kprintf("     count        size      caller\n");
789 	kprintf("----------------------------------\n");
790 	for (int32 i = 0; i < sCallerInfoCount; i++) {
791 		caller_info& info = sCallerInfoTable[i];
792 		kprintf("%10" B_PRIuSIZE "  %10" B_PRIuSIZE "  %p", info.count,
793 			info.size, (void*)info.caller);
794 
795 		const char* symbol;
796 		const char* imageName;
797 		bool exactMatch;
798 		addr_t baseAddress;
799 
800 		if (elf_debug_lookup_symbol_address(info.caller, &baseAddress, &symbol,
801 				&imageName, &exactMatch) == B_OK) {
802 			kprintf("  %s + %#" B_PRIxADDR " (%s)%s\n", symbol,
803 				info.caller - baseAddress, imageName,
804 				exactMatch ? "" : " (nearest)");
805 		} else
806 			kprintf("\n");
807 
808 		totalAllocationCount += info.count;
809 		totalAllocationSize += info.size;
810 	}
811 
812 	kprintf("\ntotal allocations: %" B_PRIuSIZE ", %" B_PRIuSIZE " bytes\n",
813 		totalAllocationCount, totalAllocationSize);
814 
815 	return 0;
816 }
817 
818 #endif	// SLAB_ALLOCATION_TRACKING_AVAILABLE
819 
820 
821 void
822 add_alloc_tracing_entry(ObjectCache* cache, uint32 flags, void* object)
823 {
824 #if SLAB_OBJECT_CACHE_TRACING
825 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
826 	cache->TrackingInfoFor(object)->Init(T(Alloc(cache, flags, object)));
827 #else
828 	T(Alloc(cache, flags, object));
829 #endif
830 #endif
831 }
832 
833 // #pragma mark -
834 
835 
836 void
837 request_memory_manager_maintenance()
838 {
839 	MutexLocker locker(sMaintenanceLock);
840 	sMaintenanceCondition.NotifyAll();
841 }
842 
843 
844 // #pragma mark -
845 
846 
847 static void
848 delete_object_cache_internal(object_cache* cache)
849 {
850 	if (!(cache->flags & CACHE_NO_DEPOT))
851 		object_depot_destroy(&cache->depot, 0);
852 
853 	mutex_lock(&cache->lock);
854 
855 	if (!cache->full.IsEmpty())
856 		panic("cache destroy: still has full slabs");
857 
858 	if (!cache->partial.IsEmpty())
859 		panic("cache destroy: still has partial slabs");
860 
861 	while (!cache->empty.IsEmpty())
862 		cache->ReturnSlab(cache->empty.RemoveHead(), 0);
863 
864 	mutex_destroy(&cache->lock);
865 	cache->Delete();
866 }
867 
868 
869 static void
870 increase_object_reserve(ObjectCache* cache)
871 {
872 	MutexLocker locker(sMaintenanceLock);
873 
874 	cache->maintenance_resize = true;
875 
876 	if (!cache->maintenance_pending) {
877 		cache->maintenance_pending = true;
878 		sMaintenanceQueue.Add(cache);
879 		sMaintenanceCondition.NotifyAll();
880 	}
881 }
882 
883 
884 /*!	Makes sure that \a objectCount objects can be allocated.
885 */
886 static status_t
887 object_cache_reserve_internal(ObjectCache* cache, size_t objectCount,
888 	uint32 flags)
889 {
890 	// If someone else is already adding slabs, we wait for that to be finished
891 	// first.
892 	thread_id thread = find_thread(NULL);
893 	while (true) {
894 		if (objectCount <= cache->total_objects - cache->used_count)
895 			return B_OK;
896 
897 		ObjectCacheResizeEntry* resizeEntry = NULL;
898 		if (cache->resize_entry_dont_wait != NULL) {
899 			resizeEntry = cache->resize_entry_dont_wait;
900 			if (thread == resizeEntry->thread)
901 				return B_WOULD_BLOCK;
902 			// Note: We could still have reentered the function, i.e.
903 			// resize_entry_can_wait would be ours. That doesn't matter much,
904 			// though, since after the don't-wait thread has done its job
905 			// everyone will be happy.
906 		} else if (cache->resize_entry_can_wait != NULL) {
907 			resizeEntry = cache->resize_entry_can_wait;
908 			if (thread == resizeEntry->thread)
909 				return B_WOULD_BLOCK;
910 
911 			if ((flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0)
912 				break;
913 		} else
914 			break;
915 
916 		ConditionVariableEntry entry;
917 		resizeEntry->condition.Add(&entry);
918 
919 		cache->Unlock();
920 		entry.Wait();
921 		cache->Lock();
922 	}
923 
924 	// prepare the resize entry others can wait on
925 	ObjectCacheResizeEntry*& resizeEntry
926 		= (flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0
927 			? cache->resize_entry_dont_wait : cache->resize_entry_can_wait;
928 
929 	ObjectCacheResizeEntry myResizeEntry;
930 	resizeEntry = &myResizeEntry;
931 	resizeEntry->condition.Init(cache, "wait for slabs");
932 	resizeEntry->thread = thread;
933 
934 	// add new slabs until there are as many free ones as requested
935 	while (objectCount > cache->total_objects - cache->used_count) {
936 		slab* newSlab = cache->CreateSlab(flags);
937 		if (newSlab == NULL) {
938 			resizeEntry->condition.NotifyAll();
939 			resizeEntry = NULL;
940 			return B_NO_MEMORY;
941 		}
942 
943 		cache->usage += cache->slab_size;
944 		cache->total_objects += newSlab->size;
945 
946 		cache->empty.Add(newSlab);
947 		cache->empty_count++;
948 	}
949 
950 	resizeEntry->condition.NotifyAll();
951 	resizeEntry = NULL;
952 
953 	return B_OK;
954 }
955 
956 
957 static void
958 object_cache_low_memory(void* dummy, uint32 resources, int32 level)
959 {
960 	if (level == B_NO_LOW_RESOURCE)
961 		return;
962 
963 	MutexLocker cacheListLocker(sObjectCacheListLock);
964 
965 	// Append the first cache to the end of the queue. We assume that it is
966 	// one of the caches that will never be deleted and thus we use it as a
967 	// marker.
968 	ObjectCache* firstCache = sObjectCaches.RemoveHead();
969 	sObjectCaches.Add(firstCache);
970 	cacheListLocker.Unlock();
971 
972 	ObjectCache* cache;
973 	do {
974 		cacheListLocker.Lock();
975 
976 		cache = sObjectCaches.RemoveHead();
977 		sObjectCaches.Add(cache);
978 
979 		MutexLocker maintenanceLocker(sMaintenanceLock);
980 		if (cache->maintenance_pending || cache->maintenance_in_progress) {
981 			// We don't want to mess with caches in maintenance.
982 			continue;
983 		}
984 
985 		cache->maintenance_pending = true;
986 		cache->maintenance_in_progress = true;
987 
988 		maintenanceLocker.Unlock();
989 		cacheListLocker.Unlock();
990 
991 		// We are calling the reclaimer without the object cache lock
992 		// to give the owner a chance to return objects to the slabs.
993 
994 		if (cache->reclaimer)
995 			cache->reclaimer(cache->cookie, level);
996 
997 		if ((cache->flags & CACHE_NO_DEPOT) == 0)
998 			object_depot_make_empty(&cache->depot, 0);
999 
1000 		MutexLocker cacheLocker(cache->lock);
1001 		size_t minimumAllowed;
1002 
1003 		switch (level) {
1004 			case B_LOW_RESOURCE_NOTE:
1005 				minimumAllowed = cache->pressure / 2 + 1;
1006 				cache->pressure -= cache->pressure / 8;
1007 				break;
1008 
1009 			case B_LOW_RESOURCE_WARNING:
1010 				cache->pressure /= 2;
1011 				minimumAllowed = 0;
1012 				break;
1013 
1014 			default:
1015 				cache->pressure = 0;
1016 				minimumAllowed = 0;
1017 				break;
1018 		}
1019 
1020 		while (cache->empty_count > minimumAllowed) {
1021 			// make sure we respect the cache's minimum object reserve
1022 			size_t objectsPerSlab = cache->empty.Head()->size;
1023 			size_t freeObjects = cache->total_objects - cache->used_count;
1024 			if (freeObjects < cache->min_object_reserve + objectsPerSlab)
1025 				break;
1026 
1027 			cache->ReturnSlab(cache->empty.RemoveHead(), 0);
1028 			cache->empty_count--;
1029 		}
1030 
1031 		cacheLocker.Unlock();
1032 
1033 		// Check whether in the meantime someone has really requested
1034 		// maintenance for the cache.
1035 		maintenanceLocker.Lock();
1036 
1037 		if (cache->maintenance_delete) {
1038 			delete_object_cache_internal(cache);
1039 			continue;
1040 		}
1041 
1042 		cache->maintenance_in_progress = false;
1043 
1044 		if (cache->maintenance_resize)
1045 			sMaintenanceQueue.Add(cache);
1046 		else
1047 			cache->maintenance_pending = false;
1048 	} while (cache != firstCache);
1049 }
1050 
1051 
1052 static status_t
1053 object_cache_maintainer(void*)
1054 {
1055 	while (true) {
1056 		MutexLocker locker(sMaintenanceLock);
1057 
1058 		// wait for the next request
1059 		while (sMaintenanceQueue.IsEmpty()) {
1060 			// perform memory manager maintenance, if needed
1061 			if (MemoryManager::MaintenanceNeeded()) {
1062 				locker.Unlock();
1063 				MemoryManager::PerformMaintenance();
1064 				locker.Lock();
1065 				continue;
1066 			}
1067 
1068 			ConditionVariableEntry entry;
1069 			sMaintenanceCondition.Add(&entry);
1070 			locker.Unlock();
1071 			entry.Wait();
1072 			locker.Lock();
1073 		}
1074 
1075 		ObjectCache* cache = sMaintenanceQueue.RemoveHead();
1076 
1077 		while (true) {
1078 			bool resizeRequested = cache->maintenance_resize;
1079 			bool deleteRequested = cache->maintenance_delete;
1080 
1081 			if (!resizeRequested && !deleteRequested) {
1082 				cache->maintenance_pending = false;
1083 				cache->maintenance_in_progress = false;
1084 				break;
1085 			}
1086 
1087 			cache->maintenance_resize = false;
1088 			cache->maintenance_in_progress = true;
1089 
1090 			locker.Unlock();
1091 
1092 			if (deleteRequested) {
1093 				delete_object_cache_internal(cache);
1094 				break;
1095 			}
1096 
1097 			// resize the cache, if necessary
1098 
1099 			MutexLocker cacheLocker(cache->lock);
1100 
1101 			if (resizeRequested) {
1102 				status_t error = object_cache_reserve_internal(cache,
1103 					cache->min_object_reserve, 0);
1104 				if (error != B_OK) {
1105 					dprintf("object cache resizer: Failed to resize object "
1106 						"cache %p!\n", cache);
1107 					break;
1108 				}
1109 			}
1110 
1111 			locker.Lock();
1112 		}
1113 	}
1114 
1115 	// never can get here
1116 	return B_OK;
1117 }
1118 
1119 
1120 // #pragma mark - public API
1121 
1122 
1123 object_cache*
1124 create_object_cache(const char* name, size_t object_size, size_t alignment,
1125 	void* cookie, object_cache_constructor constructor,
1126 	object_cache_destructor destructor)
1127 {
1128 	return create_object_cache_etc(name, object_size, alignment, 0, 0, 0, 0,
1129 		cookie, constructor, destructor, NULL);
1130 }
1131 
1132 
1133 object_cache*
1134 create_object_cache_etc(const char* name, size_t objectSize, size_t alignment,
1135 	size_t maximum, size_t magazineCapacity, size_t maxMagazineCount,
1136 	uint32 flags, void* cookie, object_cache_constructor constructor,
1137 	object_cache_destructor destructor, object_cache_reclaimer reclaimer)
1138 {
1139 	ObjectCache* cache;
1140 
1141 	if (objectSize == 0) {
1142 		cache = NULL;
1143 	} else if (objectSize <= 256) {
1144 		cache = SmallObjectCache::Create(name, objectSize, alignment, maximum,
1145 			magazineCapacity, maxMagazineCount, flags, cookie, constructor,
1146 			destructor, reclaimer);
1147 	} else {
1148 		cache = HashedObjectCache::Create(name, objectSize, alignment, maximum,
1149 			magazineCapacity, maxMagazineCount, flags, cookie, constructor,
1150 			destructor, reclaimer);
1151 	}
1152 
1153 	if (cache != NULL) {
1154 		MutexLocker _(sObjectCacheListLock);
1155 		sObjectCaches.Add(cache);
1156 	}
1157 
1158 	T(Create(name, objectSize, alignment, maximum, flags, cookie, cache));
1159 	return cache;
1160 }
1161 
1162 
1163 void
1164 delete_object_cache(object_cache* cache)
1165 {
1166 	T(Delete(cache));
1167 
1168 	{
1169 		MutexLocker _(sObjectCacheListLock);
1170 		sObjectCaches.Remove(cache);
1171 	}
1172 
1173 	MutexLocker cacheLocker(cache->lock);
1174 
1175 	{
1176 		MutexLocker maintenanceLocker(sMaintenanceLock);
1177 		if (cache->maintenance_in_progress) {
1178 			// The maintainer thread is working with the cache. Just mark it
1179 			// to be deleted.
1180 			cache->maintenance_delete = true;
1181 			return;
1182 		}
1183 
1184 		// unschedule maintenance
1185 		if (cache->maintenance_pending)
1186 			sMaintenanceQueue.Remove(cache);
1187 	}
1188 
1189 	// at this point no-one should have a reference to the cache anymore
1190 	cacheLocker.Unlock();
1191 
1192 	delete_object_cache_internal(cache);
1193 }
1194 
1195 
1196 status_t
1197 object_cache_set_minimum_reserve(object_cache* cache, size_t objectCount)
1198 {
1199 	MutexLocker _(cache->lock);
1200 
1201 	if (cache->min_object_reserve == objectCount)
1202 		return B_OK;
1203 
1204 	cache->min_object_reserve = objectCount;
1205 
1206 	increase_object_reserve(cache);
1207 
1208 	return B_OK;
1209 }
1210 
1211 
1212 void*
1213 object_cache_alloc(object_cache* cache, uint32 flags)
1214 {
1215 	if (!(cache->flags & CACHE_NO_DEPOT)) {
1216 		void* object = object_depot_obtain(&cache->depot);
1217 		if (object) {
1218 			add_alloc_tracing_entry(cache, flags, object);
1219 			return fill_allocated_block(object, cache->object_size);
1220 		}
1221 	}
1222 
1223 	MutexLocker _(cache->lock);
1224 	slab* source = NULL;
1225 
1226 	while (true) {
1227 		source = cache->partial.Head();
1228 		if (source != NULL)
1229 			break;
1230 
1231 		source = cache->empty.RemoveHead();
1232 		if (source != NULL) {
1233 			cache->empty_count--;
1234 			cache->partial.Add(source);
1235 			break;
1236 		}
1237 
1238 		if (object_cache_reserve_internal(cache, 1, flags) != B_OK) {
1239 			T(Alloc(cache, flags, NULL));
1240 			return NULL;
1241 		}
1242 
1243 		cache->pressure++;
1244 	}
1245 
1246 	ParanoiaChecker _2(source);
1247 
1248 	object_link* link = _pop(source->free);
1249 	source->count--;
1250 	cache->used_count++;
1251 
1252 	if (cache->total_objects - cache->used_count < cache->min_object_reserve)
1253 		increase_object_reserve(cache);
1254 
1255 	REMOVE_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, source, &link->next,
1256 		sizeof(void*));
1257 
1258 	TRACE_CACHE(cache, "allocate %p (%p) from %p, %lu remaining.",
1259 		link_to_object(link, cache->object_size), link, source, source->count);
1260 
1261 	if (source->count == 0) {
1262 		cache->partial.Remove(source);
1263 		cache->full.Add(source);
1264 	}
1265 
1266 	void* object = link_to_object(link, cache->object_size);
1267 	add_alloc_tracing_entry(cache, flags, object);
1268 	return fill_allocated_block(object, cache->object_size);
1269 }
1270 
1271 
1272 void
1273 object_cache_free(object_cache* cache, void* object, uint32 flags)
1274 {
1275 	if (object == NULL)
1276 		return;
1277 
1278 	T(Free(cache, object));
1279 
1280 #if PARANOID_KERNEL_FREE
1281 	// TODO: allow forcing the check even if we don't find deadbeef
1282 	if (*(uint32*)object == 0xdeadbeef) {
1283 		if (!cache->AssertObjectNotFreed(object))
1284 			return;
1285 
1286 		if ((cache->flags & CACHE_NO_DEPOT) == 0) {
1287 			if (object_depot_contains_object(&cache->depot, object)) {
1288 				panic("object_cache: object %p is already freed", object);
1289 				return;
1290 			}
1291 		}
1292 	}
1293 
1294 	fill_freed_block(object, cache->object_size);
1295 #endif
1296 
1297 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
1298 	cache->TrackingInfoFor(object)->Clear();
1299 #endif
1300 
1301 	if ((cache->flags & CACHE_NO_DEPOT) == 0) {
1302 		object_depot_store(&cache->depot, object, flags);
1303 		return;
1304 	}
1305 
1306 	MutexLocker _(cache->lock);
1307 	cache->ReturnObjectToSlab(cache->ObjectSlab(object), object, flags);
1308 }
1309 
1310 
1311 status_t
1312 object_cache_reserve(object_cache* cache, size_t objectCount, uint32 flags)
1313 {
1314 	if (objectCount == 0)
1315 		return B_OK;
1316 
1317 	T(Reserve(cache, objectCount, flags));
1318 
1319 	MutexLocker _(cache->lock);
1320 	return object_cache_reserve_internal(cache, objectCount, flags);
1321 }
1322 
1323 
1324 void
1325 object_cache_get_usage(object_cache* cache, size_t* _allocatedMemory)
1326 {
1327 	MutexLocker _(cache->lock);
1328 	*_allocatedMemory = cache->usage;
1329 }
1330 
1331 
1332 void
1333 slab_init(kernel_args* args)
1334 {
1335 	MemoryManager::Init(args);
1336 
1337 	new (&sObjectCaches) ObjectCacheList();
1338 
1339 	block_allocator_init_boot();
1340 }
1341 
1342 
1343 void
1344 slab_init_post_area()
1345 {
1346 	MemoryManager::InitPostArea();
1347 
1348 	add_debugger_command("slabs", dump_slabs, "list all object caches");
1349 	add_debugger_command("slab_cache", dump_cache_info,
1350 		"dump information about a specific object cache");
1351 	add_debugger_command("slab_depot", dump_object_depot,
1352 		"dump contents of an object depot");
1353 	add_debugger_command("slab_magazine", dump_depot_magazine,
1354 		"dump contents of a depot magazine");
1355 #if SLAB_ALLOCATION_TRACKING_AVAILABLE
1356 	add_debugger_command_etc("allocations_per_caller",
1357 		&dump_allocations_per_caller,
1358 		"Dump current slab allocations summed up per caller",
1359 		"[ -c ] [ -d <caller> ] [ -o <object cache> ] [ -r ]\n"
1360 		"The current allocations will by summed up by caller (their count and\n"
1361 		"size) printed in decreasing order by size or, if \"-c\" is\n"
1362 		"specified, by allocation count. If given <object cache> specifies\n"
1363 		"the address of the object cache for which to print the allocations.\n"
1364 		"If \"-d\" is given, each allocation for caller <caller> is printed\n"
1365 		"including the respective stack trace.\n"
1366 		"If \"-r\" is given, the allocation infos are reset after gathering\n"
1367 		"the information, so the next command invocation will only show the\n"
1368 		"allocations made after the reset.\n", 0);
1369 	add_debugger_command_etc("allocation_infos",
1370 		&dump_allocation_infos,
1371 		"Dump current slab allocations",
1372 		"[ --stacktrace ] [ -o <object cache> | -s <slab> | -S <address> ] "
1373 		"[ -a <allocation> ] [ --team <team ID> ] [ --thread <thread ID> ]\n"
1374 		"The current allocations filtered by optional values will be printed.\n"
1375 		"If given, <object cache> specifies the address of the object cache\n"
1376 		"or <slab> specifies the address of a slab, for which to print the\n"
1377 		"allocations. Alternatively <address> specifies any address within\n"
1378 		"a slab allocation range.\n"
1379 		"The optional \"-a\" address filters for a specific allocation,\n"
1380 		"with \"--team\" and \"--thread\" allocations by specific teams\n"
1381 		"and/or threads can be filtered (these only work if a corresponding\n"
1382 		"tracing entry is still available).\n"
1383 		"If \"--stacktrace\" is given, then stack traces of the allocation\n"
1384 		"callers are printed, where available\n", 0);
1385 #endif	// SLAB_ALLOCATION_TRACKING_AVAILABLE
1386 }
1387 
1388 
1389 void
1390 slab_init_post_sem()
1391 {
1392 	register_low_resource_handler(object_cache_low_memory, NULL,
1393 		B_KERNEL_RESOURCE_PAGES | B_KERNEL_RESOURCE_MEMORY
1394 			| B_KERNEL_RESOURCE_ADDRESS_SPACE, 5);
1395 
1396 	block_allocator_init_rest();
1397 }
1398 
1399 
1400 void
1401 slab_init_post_thread()
1402 {
1403 	new(&sMaintenanceQueue) MaintenanceQueue;
1404 	sMaintenanceCondition.Init(&sMaintenanceQueue, "object cache maintainer");
1405 
1406 	thread_id objectCacheResizer = spawn_kernel_thread(object_cache_maintainer,
1407 		"object cache resizer", B_URGENT_PRIORITY, NULL);
1408 	if (objectCacheResizer < 0) {
1409 		panic("slab_init_post_thread(): failed to spawn object cache resizer "
1410 			"thread\n");
1411 		return;
1412 	}
1413 
1414 	resume_thread(objectCacheResizer);
1415 }
1416 
1417 
1418 RANGE_MARKER_FUNCTION_END(Slab)
1419