xref: /haiku/src/system/kernel/slab/Slab.cpp (revision 6e434fd80e4640c64031faf5e49720c5672fc470)
1 /*
2  * Copyright 2010, Ingo Weinhold <ingo_weinhold@gmx.de>.
3  * Copyright 2008-2010, Axel Dörfler. All Rights Reserved.
4  * Copyright 2007, Hugo Santos. All Rights Reserved.
5  *
6  * Distributed under the terms of the MIT License.
7  */
8 
9 
10 #include <slab/Slab.h>
11 
12 #include <algorithm>
13 #include <new>
14 #include <stdlib.h>
15 #include <string.h>
16 
17 #include <KernelExport.h>
18 
19 #include <condition_variable.h>
20 #include <elf.h>
21 #include <kernel.h>
22 #include <low_resource_manager.h>
23 #include <slab/ObjectDepot.h>
24 #include <smp.h>
25 #include <tracing.h>
26 #include <util/AutoLock.h>
27 #include <util/DoublyLinkedList.h>
28 #include <util/khash.h>
29 #include <vm/vm.h>
30 #include <vm/VMAddressSpace.h>
31 
32 #include "HashedObjectCache.h"
33 #include "MemoryManager.h"
34 #include "slab_debug.h"
35 #include "slab_private.h"
36 #include "SmallObjectCache.h"
37 
38 
39 #if !USE_GUARDED_HEAP_FOR_OBJECT_CACHE
40 
41 
42 typedef DoublyLinkedList<ObjectCache> ObjectCacheList;
43 
44 typedef DoublyLinkedList<ObjectCache,
45 	DoublyLinkedListMemberGetLink<ObjectCache, &ObjectCache::maintenance_link> >
46 		MaintenanceQueue;
47 
48 static ObjectCacheList sObjectCaches;
49 static mutex sObjectCacheListLock = MUTEX_INITIALIZER("object cache list");
50 
51 static mutex sMaintenanceLock
52 	= MUTEX_INITIALIZER("object cache resize requests");
53 static MaintenanceQueue sMaintenanceQueue;
54 static ConditionVariable sMaintenanceCondition;
55 
56 
57 #if SLAB_ALLOCATION_TRACKING_AVAILABLE
58 
59 struct caller_info {
60 	addr_t		caller;
61 	size_t		count;
62 	size_t		size;
63 };
64 
65 static const int32 kCallerInfoTableSize = 1024;
66 static caller_info sCallerInfoTable[kCallerInfoTableSize];
67 static int32 sCallerInfoCount = 0;
68 
69 static caller_info* get_caller_info(addr_t caller);
70 
71 
72 RANGE_MARKER_FUNCTION_PROTOTYPES(slab_allocator)
73 RANGE_MARKER_FUNCTION_PROTOTYPES(SlabHashedObjectCache)
74 RANGE_MARKER_FUNCTION_PROTOTYPES(SlabMemoryManager)
75 RANGE_MARKER_FUNCTION_PROTOTYPES(SlabObjectCache)
76 RANGE_MARKER_FUNCTION_PROTOTYPES(SlabObjectDepot)
77 RANGE_MARKER_FUNCTION_PROTOTYPES(Slab)
78 RANGE_MARKER_FUNCTION_PROTOTYPES(SlabSmallObjectCache)
79 
80 
81 static const addr_t kSlabCodeAddressRanges[] = {
82 	RANGE_MARKER_FUNCTION_ADDRESS_RANGE(slab_allocator),
83 	RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabHashedObjectCache),
84 	RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabMemoryManager),
85 	RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabObjectCache),
86 	RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabObjectDepot),
87 	RANGE_MARKER_FUNCTION_ADDRESS_RANGE(Slab),
88 	RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabSmallObjectCache)
89 };
90 
91 static const uint32 kSlabCodeAddressRangeCount
92 	= sizeof(kSlabCodeAddressRanges) / sizeof(kSlabCodeAddressRanges[0]) / 2;
93 
94 #endif	// SLAB_ALLOCATION_TRACKING_AVAILABLE
95 
96 
97 RANGE_MARKER_FUNCTION_BEGIN(Slab)
98 
99 
100 #if SLAB_OBJECT_CACHE_TRACING
101 
102 
103 namespace SlabObjectCacheTracing {
104 
105 class ObjectCacheTraceEntry
106 	: public TRACE_ENTRY_SELECTOR(SLAB_OBJECT_CACHE_TRACING_STACK_TRACE) {
107 	public:
108 		ObjectCacheTraceEntry(ObjectCache* cache)
109 			:
110 			TraceEntryBase(SLAB_OBJECT_CACHE_TRACING_STACK_TRACE, 0, true),
111 			fCache(cache)
112 		{
113 		}
114 
115 	protected:
116 		ObjectCache*	fCache;
117 };
118 
119 
120 class Create : public ObjectCacheTraceEntry {
121 	public:
122 		Create(const char* name, size_t objectSize, size_t alignment,
123 				size_t maxByteUsage, uint32 flags, void* cookie,
124 				ObjectCache* cache)
125 			:
126 			ObjectCacheTraceEntry(cache),
127 			fObjectSize(objectSize),
128 			fAlignment(alignment),
129 			fMaxByteUsage(maxByteUsage),
130 			fFlags(flags),
131 			fCookie(cookie)
132 		{
133 			fName = alloc_tracing_buffer_strcpy(name, 64, false);
134 			Initialized();
135 		}
136 
137 		virtual void AddDump(TraceOutput& out)
138 		{
139 			out.Print("object cache create: name: \"%s\", object size: %lu, "
140 				"alignment: %lu, max usage: %lu, flags: 0x%lx, cookie: %p "
141 				"-> cache: %p", fName, fObjectSize, fAlignment, fMaxByteUsage,
142 					fFlags, fCookie, fCache);
143 		}
144 
145 	private:
146 		const char*	fName;
147 		size_t		fObjectSize;
148 		size_t		fAlignment;
149 		size_t		fMaxByteUsage;
150 		uint32		fFlags;
151 		void*		fCookie;
152 };
153 
154 
155 class Delete : public ObjectCacheTraceEntry {
156 	public:
157 		Delete(ObjectCache* cache)
158 			:
159 			ObjectCacheTraceEntry(cache)
160 		{
161 			Initialized();
162 		}
163 
164 		virtual void AddDump(TraceOutput& out)
165 		{
166 			out.Print("object cache delete: %p", fCache);
167 		}
168 };
169 
170 
171 class Alloc : public ObjectCacheTraceEntry {
172 	public:
173 		Alloc(ObjectCache* cache, uint32 flags, void* object)
174 			:
175 			ObjectCacheTraceEntry(cache),
176 			fFlags(flags),
177 			fObject(object)
178 		{
179 			Initialized();
180 		}
181 
182 		virtual void AddDump(TraceOutput& out)
183 		{
184 			out.Print("object cache alloc: cache: %p, flags: 0x%lx -> "
185 				"object: %p", fCache, fFlags, fObject);
186 		}
187 
188 	private:
189 		uint32		fFlags;
190 		void*		fObject;
191 };
192 
193 
194 class Free : public ObjectCacheTraceEntry {
195 	public:
196 		Free(ObjectCache* cache, void* object)
197 			:
198 			ObjectCacheTraceEntry(cache),
199 			fObject(object)
200 		{
201 			Initialized();
202 		}
203 
204 		virtual void AddDump(TraceOutput& out)
205 		{
206 			out.Print("object cache free: cache: %p, object: %p", fCache,
207 				fObject);
208 		}
209 
210 	private:
211 		void*		fObject;
212 };
213 
214 
215 class Reserve : public ObjectCacheTraceEntry {
216 	public:
217 		Reserve(ObjectCache* cache, size_t count, uint32 flags)
218 			:
219 			ObjectCacheTraceEntry(cache),
220 			fCount(count),
221 			fFlags(flags)
222 		{
223 			Initialized();
224 		}
225 
226 		virtual void AddDump(TraceOutput& out)
227 		{
228 			out.Print("object cache reserve: cache: %p, count: %lu, "
229 				"flags: 0x%lx", fCache, fCount, fFlags);
230 		}
231 
232 	private:
233 		uint32		fCount;
234 		uint32		fFlags;
235 };
236 
237 
238 }	// namespace SlabObjectCacheTracing
239 
240 #	define T(x)	new(std::nothrow) SlabObjectCacheTracing::x
241 
242 #else
243 #	define T(x)
244 #endif	// SLAB_OBJECT_CACHE_TRACING
245 
246 
247 // #pragma mark -
248 
249 
250 static void
251 dump_slab(::slab* slab)
252 {
253 	kprintf("  %p  %p  %6" B_PRIuSIZE " %6" B_PRIuSIZE " %6" B_PRIuSIZE "  %p\n",
254 		slab, slab->pages, slab->size, slab->count, slab->offset, slab->free);
255 }
256 
257 
258 static int
259 dump_slabs(int argc, char* argv[])
260 {
261 	kprintf("%10s %22s %8s %8s %8s %6s %8s %8s %8s\n", "address", "name",
262 		"objsize", "align", "usage", "empty", "usedobj", "total", "flags");
263 
264 	ObjectCacheList::Iterator it = sObjectCaches.GetIterator();
265 
266 	while (it.HasNext()) {
267 		ObjectCache* cache = it.Next();
268 
269 		kprintf("%p %22s %8lu %8" B_PRIuSIZE " %8lu %6lu %8lu %8lu %8lx\n",
270 			cache, cache->name, cache->object_size, cache->alignment,
271 			cache->usage, cache->empty_count, cache->used_count,
272 			cache->total_objects, cache->flags);
273 	}
274 
275 	return 0;
276 }
277 
278 
279 static int
280 dump_cache_info(int argc, char* argv[])
281 {
282 	if (argc < 2) {
283 		kprintf("usage: slab_cache [address]\n");
284 		return 0;
285 	}
286 
287 	ObjectCache* cache = (ObjectCache*)parse_expression(argv[1]);
288 
289 	kprintf("name:              %s\n", cache->name);
290 	kprintf("lock:              %p\n", &cache->lock);
291 	kprintf("object_size:       %lu\n", cache->object_size);
292 	kprintf("alignment:         %" B_PRIuSIZE "\n", cache->alignment);
293 	kprintf("cache_color_cycle: %lu\n", cache->cache_color_cycle);
294 	kprintf("total_objects:     %lu\n", cache->total_objects);
295 	kprintf("used_count:        %lu\n", cache->used_count);
296 	kprintf("empty_count:       %lu\n", cache->empty_count);
297 	kprintf("pressure:          %lu\n", cache->pressure);
298 	kprintf("slab_size:         %lu\n", cache->slab_size);
299 	kprintf("usage:             %lu\n", cache->usage);
300 	kprintf("maximum:           %lu\n", cache->maximum);
301 	kprintf("flags:             0x%lx\n", cache->flags);
302 	kprintf("cookie:            %p\n", cache->cookie);
303 	kprintf("resize entry don't wait: %p\n", cache->resize_entry_dont_wait);
304 	kprintf("resize entry can wait:   %p\n", cache->resize_entry_can_wait);
305 
306 	kprintf("  slab        chunk         size   used offset  free\n");
307 
308 	SlabList::Iterator iterator = cache->empty.GetIterator();
309 	if (iterator.HasNext())
310 		kprintf("empty:\n");
311 	while (::slab* slab = iterator.Next())
312 		dump_slab(slab);
313 
314 	iterator = cache->partial.GetIterator();
315 	if (iterator.HasNext())
316 		kprintf("partial:\n");
317 	while (::slab* slab = iterator.Next())
318 		dump_slab(slab);
319 
320 	iterator = cache->full.GetIterator();
321 	if (iterator.HasNext())
322 		kprintf("full:\n");
323 	while (::slab* slab = iterator.Next())
324 		dump_slab(slab);
325 
326 	if ((cache->flags & CACHE_NO_DEPOT) == 0) {
327 		kprintf("depot:\n");
328 		dump_object_depot(&cache->depot);
329 	}
330 
331 	return 0;
332 }
333 
334 
335 // #pragma mark - AllocationTrackingCallback
336 
337 
338 #if SLAB_ALLOCATION_TRACKING_AVAILABLE
339 
340 AllocationTrackingCallback::~AllocationTrackingCallback()
341 {
342 }
343 
344 #endif	// SLAB_ALLOCATION_TRACKING_AVAILABLE
345 
346 
347 // #pragma mark -
348 
349 
350 #if SLAB_ALLOCATION_TRACKING_AVAILABLE
351 
352 namespace {
353 
354 class AllocationCollectorCallback : public AllocationTrackingCallback {
355 public:
356 	AllocationCollectorCallback(bool resetInfos)
357 		:
358 		fResetInfos(resetInfos)
359 	{
360 	}
361 
362 	virtual bool ProcessTrackingInfo(AllocationTrackingInfo* info,
363 		void* allocation, size_t allocationSize)
364 	{
365 		if (!info->IsInitialized())
366 			return true;
367 
368 		addr_t caller = 0;
369 		AbstractTraceEntryWithStackTrace* traceEntry = info->TraceEntry();
370 
371 		if (traceEntry != NULL && info->IsTraceEntryValid()) {
372 			caller = tracing_find_caller_in_stack_trace(
373 				traceEntry->StackTrace(), kSlabCodeAddressRanges,
374 				kSlabCodeAddressRangeCount);
375 		}
376 
377 		caller_info* callerInfo = get_caller_info(caller);
378 		if (callerInfo == NULL) {
379 			kprintf("out of space for caller infos\n");
380 			return false;
381 		}
382 
383 		callerInfo->count++;
384 		callerInfo->size += allocationSize;
385 
386 		if (fResetInfos)
387 			info->Clear();
388 
389 		return true;
390 	}
391 
392 private:
393 	bool	fResetInfos;
394 };
395 
396 
397 class AllocationInfoPrinterCallback : public AllocationTrackingCallback {
398 public:
399 	AllocationInfoPrinterCallback(bool printStackTrace, addr_t addressFilter,
400 		team_id teamFilter, thread_id threadFilter)
401 		:
402 		fPrintStackTrace(printStackTrace),
403 		fAddressFilter(addressFilter),
404 		fTeamFilter(teamFilter),
405 		fThreadFilter(threadFilter)
406 	{
407 	}
408 
409 	virtual bool ProcessTrackingInfo(AllocationTrackingInfo* info,
410 		void* allocation, size_t allocationSize)
411 	{
412 		if (!info->IsInitialized())
413 			return true;
414 
415 		if (fAddressFilter != 0 && (addr_t)allocation != fAddressFilter)
416 			return true;
417 
418 		AbstractTraceEntryWithStackTrace* traceEntry = info->TraceEntry();
419 		if (traceEntry != NULL && !info->IsTraceEntryValid())
420 			traceEntry = NULL;
421 
422 		if (traceEntry != NULL) {
423 			if (fTeamFilter != -1 && traceEntry->TeamID() != fTeamFilter)
424 				return true;
425 			if (fThreadFilter != -1 && traceEntry->ThreadID() != fThreadFilter)
426 				return true;
427 		} else {
428 			// we need the info if we have filters set
429 			if (fTeamFilter != -1 || fThreadFilter != -1)
430 				return true;
431 		}
432 
433 		kprintf("allocation %p, size: %" B_PRIuSIZE, allocation,
434 			allocationSize);
435 
436 		if (traceEntry != NULL) {
437 			kprintf(", team: %" B_PRId32 ", thread %" B_PRId32
438 				", time %" B_PRId64 "\n", traceEntry->TeamID(),
439 				traceEntry->ThreadID(), traceEntry->Time());
440 
441 			if (fPrintStackTrace)
442 				tracing_print_stack_trace(traceEntry->StackTrace());
443 		} else
444 			kprintf("\n");
445 
446 		return true;
447 	}
448 
449 private:
450 	bool		fPrintStackTrace;
451 	addr_t		fAddressFilter;
452 	team_id		fTeamFilter;
453 	thread_id	fThreadFilter;
454 };
455 
456 
457 class AllocationDetailPrinterCallback : public AllocationTrackingCallback {
458 public:
459 	AllocationDetailPrinterCallback(addr_t caller)
460 		:
461 		fCaller(caller)
462 	{
463 	}
464 
465 	virtual bool ProcessTrackingInfo(AllocationTrackingInfo* info,
466 		void* allocation, size_t allocationSize)
467 	{
468 		if (!info->IsInitialized())
469 			return true;
470 
471 		addr_t caller = 0;
472 		AbstractTraceEntryWithStackTrace* traceEntry = info->TraceEntry();
473 		if (traceEntry != NULL && !info->IsTraceEntryValid())
474 			traceEntry = NULL;
475 
476 		if (traceEntry != NULL) {
477 			caller = tracing_find_caller_in_stack_trace(
478 				traceEntry->StackTrace(), kSlabCodeAddressRanges,
479 				kSlabCodeAddressRangeCount);
480 		}
481 
482 		if (caller != fCaller)
483 			return true;
484 
485 		kprintf("allocation %p, size: %" B_PRIuSIZE "\n", allocation,
486 			allocationSize);
487 		if (traceEntry != NULL)
488 			tracing_print_stack_trace(traceEntry->StackTrace());
489 
490 		return true;
491 	}
492 
493 private:
494 	addr_t	fCaller;
495 };
496 
497 }	// unnamed namespace
498 
499 static caller_info*
500 get_caller_info(addr_t caller)
501 {
502 	// find the caller info
503 	for (int32 i = 0; i < sCallerInfoCount; i++) {
504 		if (caller == sCallerInfoTable[i].caller)
505 			return &sCallerInfoTable[i];
506 	}
507 
508 	// not found, add a new entry, if there are free slots
509 	if (sCallerInfoCount >= kCallerInfoTableSize)
510 		return NULL;
511 
512 	caller_info* info = &sCallerInfoTable[sCallerInfoCount++];
513 	info->caller = caller;
514 	info->count = 0;
515 	info->size = 0;
516 
517 	return info;
518 }
519 
520 
521 static int
522 caller_info_compare_size(const void* _a, const void* _b)
523 {
524 	const caller_info* a = (const caller_info*)_a;
525 	const caller_info* b = (const caller_info*)_b;
526 	return (int)(b->size - a->size);
527 }
528 
529 
530 static int
531 caller_info_compare_count(const void* _a, const void* _b)
532 {
533 	const caller_info* a = (const caller_info*)_a;
534 	const caller_info* b = (const caller_info*)_b;
535 	return (int)(b->count - a->count);
536 }
537 
538 
539 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
540 
541 static bool
542 analyze_allocation_callers(ObjectCache* cache, slab* slab,
543 	AllocationTrackingCallback& callback)
544 {
545 	for (uint32 i = 0; i < slab->size; i++) {
546 		if (!callback.ProcessTrackingInfo(&slab->tracking[i],
547 				cache->ObjectAtIndex(slab, i), cache->object_size)) {
548 			return false;
549 		}
550 	}
551 
552 	return true;
553 }
554 
555 
556 static bool
557 analyze_allocation_callers(ObjectCache* cache, const SlabList& slabList,
558 	AllocationTrackingCallback& callback)
559 {
560 	for (SlabList::ConstIterator it = slabList.GetIterator();
561 			slab* slab = it.Next();) {
562 		if (!analyze_allocation_callers(cache, slab, callback))
563 			return false;
564 	}
565 
566 	return true;
567 }
568 
569 
570 static bool
571 analyze_allocation_callers(ObjectCache* cache,
572 	AllocationTrackingCallback& callback)
573 {
574 	return analyze_allocation_callers(cache, cache->full, callback)
575 		&& analyze_allocation_callers(cache, cache->partial, callback);
576 }
577 
578 #endif	// SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
579 
580 
581 static int
582 dump_allocation_infos(int argc, char **argv)
583 {
584 	ObjectCache* cache = NULL;
585 	slab* slab = NULL;
586 	addr_t addressFilter = 0;
587 	team_id teamFilter = -1;
588 	thread_id threadFilter = -1;
589 	bool printStackTraces = false;
590 
591 	for (int32 i = 1; i < argc; i++) {
592 		if (strcmp(argv[i], "--stacktrace") == 0)
593 			printStackTraces = true;
594 		else if (strcmp(argv[i], "-a") == 0) {
595 			uint64 address;
596 			if (++i >= argc
597 				|| !evaluate_debug_expression(argv[i], &address, true)) {
598 				print_debugger_command_usage(argv[0]);
599 				return 0;
600 			}
601 
602 			addressFilter = address;
603 		} else if (strcmp(argv[i], "-o") == 0) {
604 			uint64 cacheAddress;
605 			if (++i >= argc
606 				|| !evaluate_debug_expression(argv[i], &cacheAddress, true)) {
607 				print_debugger_command_usage(argv[0]);
608 				return 0;
609 			}
610 
611 			cache = (ObjectCache*)(addr_t)cacheAddress;
612 		} else if (strcasecmp(argv[i], "-s") == 0) {
613 			uint64 slabAddress;
614 			if (++i >= argc
615 				|| !evaluate_debug_expression(argv[i], &slabAddress, true)) {
616 				print_debugger_command_usage(argv[0]);
617 				return 0;
618 			}
619 
620 			void* slabPages = (void*)slabAddress;
621 			if (strcmp(argv[i], "-s") == 0) {
622 				slab = (struct slab*)(addr_t)slabAddress;
623 				slabPages = slab->pages;
624 			}
625 
626 			cache = MemoryManager::DebugObjectCacheForAddress(slabPages);
627 			if (cache == NULL) {
628 				kprintf("Couldn't find object cache for address %p.\n",
629 					slabPages);
630 				return 0;
631 			}
632 
633 			if (slab == NULL) {
634 				slab = cache->ObjectSlab(slabPages);
635 
636 				if (slab == NULL) {
637 					kprintf("Couldn't find slab for address %p.\n", slabPages);
638 					return 0;
639 				}
640 			}
641 		} else if (strcmp(argv[i], "--team") == 0) {
642 			uint64 team;
643 			if (++i >= argc
644 				|| !evaluate_debug_expression(argv[i], &team, true)) {
645 				print_debugger_command_usage(argv[0]);
646 				return 0;
647 			}
648 
649 			teamFilter = team;
650 		} else if (strcmp(argv[i], "--thread") == 0) {
651 			uint64 thread;
652 			if (++i >= argc
653 				|| !evaluate_debug_expression(argv[i], &thread, true)) {
654 				print_debugger_command_usage(argv[0]);
655 				return 0;
656 			}
657 
658 			threadFilter = thread;
659 		} else {
660 			print_debugger_command_usage(argv[0]);
661 			return 0;
662 		}
663 	}
664 
665 	AllocationInfoPrinterCallback callback(printStackTraces, addressFilter,
666 		teamFilter, threadFilter);
667 
668 	if (slab != NULL || cache != NULL) {
669 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
670 		if (slab != NULL) {
671 			if (!analyze_allocation_callers(cache, slab, callback))
672 				return 0;
673 		} else if (cache != NULL) {
674 			if (!analyze_allocation_callers(cache, callback))
675 				return 0;
676 		}
677 #else
678 		kprintf("Object cache allocation tracking not available. "
679 			"SLAB_OBJECT_CACHE_TRACING (%d) and "
680 			"SLAB_OBJECT_CACHE_TRACING_STACK_TRACE (%d) must be enabled.\n",
681 			SLAB_OBJECT_CACHE_TRACING, SLAB_OBJECT_CACHE_TRACING_STACK_TRACE);
682 		return 0;
683 #endif
684 	} else {
685 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
686 
687 		for (ObjectCacheList::Iterator it = sObjectCaches.GetIterator();
688 				it.HasNext();) {
689 			if (!analyze_allocation_callers(it.Next(), callback))
690 				return 0;
691 		}
692 #endif
693 
694 #if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
695 		if (!MemoryManager::AnalyzeAllocationCallers(callback))
696 			return 0;
697 #endif
698 	}
699 
700 	return 0;
701 }
702 
703 
704 static int
705 dump_allocations_per_caller(int argc, char **argv)
706 {
707 	bool sortBySize = true;
708 	bool resetAllocationInfos = false;
709 	bool printDetails = false;
710 	ObjectCache* cache = NULL;
711 	addr_t caller = 0;
712 
713 	for (int32 i = 1; i < argc; i++) {
714 		if (strcmp(argv[i], "-c") == 0) {
715 			sortBySize = false;
716 		} else if (strcmp(argv[i], "-d") == 0) {
717 			uint64 callerAddress;
718 			if (++i >= argc
719 				|| !evaluate_debug_expression(argv[i], &callerAddress, true)) {
720 				print_debugger_command_usage(argv[0]);
721 				return 0;
722 			}
723 
724 			caller = callerAddress;
725 			printDetails = true;
726 		} else if (strcmp(argv[i], "-o") == 0) {
727 			uint64 cacheAddress;
728 			if (++i >= argc
729 				|| !evaluate_debug_expression(argv[i], &cacheAddress, true)) {
730 				print_debugger_command_usage(argv[0]);
731 				return 0;
732 			}
733 
734 			cache = (ObjectCache*)(addr_t)cacheAddress;
735 		} else if (strcmp(argv[i], "-r") == 0) {
736 			resetAllocationInfos = true;
737 		} else {
738 			print_debugger_command_usage(argv[0]);
739 			return 0;
740 		}
741 	}
742 
743 	sCallerInfoCount = 0;
744 
745 	AllocationCollectorCallback collectorCallback(resetAllocationInfos);
746 	AllocationDetailPrinterCallback detailsCallback(caller);
747 	AllocationTrackingCallback& callback = printDetails
748 		? (AllocationTrackingCallback&)detailsCallback
749 		: (AllocationTrackingCallback&)collectorCallback;
750 
751 	if (cache != NULL) {
752 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
753 		if (!analyze_allocation_callers(cache, callback))
754 			return 0;
755 #else
756 		kprintf("Object cache allocation tracking not available. "
757 			"SLAB_OBJECT_CACHE_TRACING (%d) and "
758 			"SLAB_OBJECT_CACHE_TRACING_STACK_TRACE (%d) must be enabled.\n",
759 			SLAB_OBJECT_CACHE_TRACING, SLAB_OBJECT_CACHE_TRACING_STACK_TRACE);
760 		return 0;
761 #endif
762 	} else {
763 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
764 
765 		for (ObjectCacheList::Iterator it = sObjectCaches.GetIterator();
766 				it.HasNext();) {
767 			if (!analyze_allocation_callers(it.Next(), callback))
768 				return 0;
769 		}
770 #endif
771 
772 #if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
773 		if (!MemoryManager::AnalyzeAllocationCallers(callback))
774 			return 0;
775 #endif
776 	}
777 
778 	if (printDetails)
779 		return 0;
780 
781 	// sort the array
782 	qsort(sCallerInfoTable, sCallerInfoCount, sizeof(caller_info),
783 		sortBySize ? &caller_info_compare_size : &caller_info_compare_count);
784 
785 	kprintf("%ld different callers, sorted by %s...\n\n", sCallerInfoCount,
786 		sortBySize ? "size" : "count");
787 
788 	size_t totalAllocationSize = 0;
789 	size_t totalAllocationCount = 0;
790 
791 	kprintf("     count        size      caller\n");
792 	kprintf("----------------------------------\n");
793 	for (int32 i = 0; i < sCallerInfoCount; i++) {
794 		caller_info& info = sCallerInfoTable[i];
795 		kprintf("%10" B_PRIuSIZE "  %10" B_PRIuSIZE "  %p", info.count,
796 			info.size, (void*)info.caller);
797 
798 		const char* symbol;
799 		const char* imageName;
800 		bool exactMatch;
801 		addr_t baseAddress;
802 
803 		if (elf_debug_lookup_symbol_address(info.caller, &baseAddress, &symbol,
804 				&imageName, &exactMatch) == B_OK) {
805 			kprintf("  %s + %#" B_PRIxADDR " (%s)%s\n", symbol,
806 				info.caller - baseAddress, imageName,
807 				exactMatch ? "" : " (nearest)");
808 		} else
809 			kprintf("\n");
810 
811 		totalAllocationCount += info.count;
812 		totalAllocationSize += info.size;
813 	}
814 
815 	kprintf("\ntotal allocations: %" B_PRIuSIZE ", %" B_PRIuSIZE " bytes\n",
816 		totalAllocationCount, totalAllocationSize);
817 
818 	return 0;
819 }
820 
821 #endif	// SLAB_ALLOCATION_TRACKING_AVAILABLE
822 
823 
824 void
825 add_alloc_tracing_entry(ObjectCache* cache, uint32 flags, void* object)
826 {
827 #if SLAB_OBJECT_CACHE_TRACING
828 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
829 	MutexLocker _(cache->lock);
830 	cache->TrackingInfoFor(object)->Init(T(Alloc(cache, flags, object)));
831 #else
832 	T(Alloc(cache, flags, object));
833 #endif
834 #endif
835 }
836 
837 
838 // #pragma mark -
839 
840 
841 void
842 request_memory_manager_maintenance()
843 {
844 	MutexLocker locker(sMaintenanceLock);
845 	sMaintenanceCondition.NotifyAll();
846 }
847 
848 
849 // #pragma mark -
850 
851 
852 static void
853 delete_object_cache_internal(object_cache* cache)
854 {
855 	if (!(cache->flags & CACHE_NO_DEPOT))
856 		object_depot_destroy(&cache->depot, 0);
857 
858 	mutex_lock(&cache->lock);
859 
860 	if (!cache->full.IsEmpty())
861 		panic("cache destroy: still has full slabs");
862 
863 	if (!cache->partial.IsEmpty())
864 		panic("cache destroy: still has partial slabs");
865 
866 	while (!cache->empty.IsEmpty())
867 		cache->ReturnSlab(cache->empty.RemoveHead(), 0);
868 
869 	mutex_destroy(&cache->lock);
870 	cache->Delete();
871 }
872 
873 
874 static void
875 increase_object_reserve(ObjectCache* cache)
876 {
877 	MutexLocker locker(sMaintenanceLock);
878 
879 	cache->maintenance_resize = true;
880 
881 	if (!cache->maintenance_pending) {
882 		cache->maintenance_pending = true;
883 		sMaintenanceQueue.Add(cache);
884 		sMaintenanceCondition.NotifyAll();
885 	}
886 }
887 
888 
889 /*!	Makes sure that \a objectCount objects can be allocated.
890 */
891 static status_t
892 object_cache_reserve_internal(ObjectCache* cache, size_t objectCount,
893 	uint32 flags)
894 {
895 	// If someone else is already adding slabs, we wait for that to be finished
896 	// first.
897 	thread_id thread = find_thread(NULL);
898 	while (true) {
899 		if (objectCount <= cache->total_objects - cache->used_count)
900 			return B_OK;
901 
902 		ObjectCacheResizeEntry* resizeEntry = NULL;
903 		if (cache->resize_entry_dont_wait != NULL) {
904 			resizeEntry = cache->resize_entry_dont_wait;
905 			if (thread == resizeEntry->thread)
906 				return B_WOULD_BLOCK;
907 			// Note: We could still have reentered the function, i.e.
908 			// resize_entry_can_wait would be ours. That doesn't matter much,
909 			// though, since after the don't-wait thread has done its job
910 			// everyone will be happy.
911 		} else if (cache->resize_entry_can_wait != NULL) {
912 			resizeEntry = cache->resize_entry_can_wait;
913 			if (thread == resizeEntry->thread)
914 				return B_WOULD_BLOCK;
915 
916 			if ((flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0)
917 				break;
918 		} else
919 			break;
920 
921 		ConditionVariableEntry entry;
922 		resizeEntry->condition.Add(&entry);
923 
924 		cache->Unlock();
925 		entry.Wait();
926 		cache->Lock();
927 	}
928 
929 	// prepare the resize entry others can wait on
930 	ObjectCacheResizeEntry*& resizeEntry
931 		= (flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0
932 			? cache->resize_entry_dont_wait : cache->resize_entry_can_wait;
933 
934 	ObjectCacheResizeEntry myResizeEntry;
935 	resizeEntry = &myResizeEntry;
936 	resizeEntry->condition.Init(cache, "wait for slabs");
937 	resizeEntry->thread = thread;
938 
939 	// add new slabs until there are as many free ones as requested
940 	while (objectCount > cache->total_objects - cache->used_count) {
941 		slab* newSlab = cache->CreateSlab(flags);
942 		if (newSlab == NULL) {
943 			resizeEntry->condition.NotifyAll();
944 			resizeEntry = NULL;
945 			return B_NO_MEMORY;
946 		}
947 
948 		cache->usage += cache->slab_size;
949 		cache->total_objects += newSlab->size;
950 
951 		cache->empty.Add(newSlab);
952 		cache->empty_count++;
953 	}
954 
955 	resizeEntry->condition.NotifyAll();
956 	resizeEntry = NULL;
957 
958 	return B_OK;
959 }
960 
961 
962 static void
963 object_cache_low_memory(void* dummy, uint32 resources, int32 level)
964 {
965 	if (level == B_NO_LOW_RESOURCE)
966 		return;
967 
968 	MutexLocker cacheListLocker(sObjectCacheListLock);
969 
970 	// Append the first cache to the end of the queue. We assume that it is
971 	// one of the caches that will never be deleted and thus we use it as a
972 	// marker.
973 	ObjectCache* firstCache = sObjectCaches.RemoveHead();
974 	sObjectCaches.Add(firstCache);
975 	cacheListLocker.Unlock();
976 
977 	ObjectCache* cache;
978 	do {
979 		cacheListLocker.Lock();
980 
981 		cache = sObjectCaches.RemoveHead();
982 		sObjectCaches.Add(cache);
983 
984 		MutexLocker maintenanceLocker(sMaintenanceLock);
985 		if (cache->maintenance_pending || cache->maintenance_in_progress) {
986 			// We don't want to mess with caches in maintenance.
987 			continue;
988 		}
989 
990 		cache->maintenance_pending = true;
991 		cache->maintenance_in_progress = true;
992 
993 		maintenanceLocker.Unlock();
994 		cacheListLocker.Unlock();
995 
996 		// We are calling the reclaimer without the object cache lock
997 		// to give the owner a chance to return objects to the slabs.
998 
999 		if (cache->reclaimer)
1000 			cache->reclaimer(cache->cookie, level);
1001 
1002 		if ((cache->flags & CACHE_NO_DEPOT) == 0)
1003 			object_depot_make_empty(&cache->depot, 0);
1004 
1005 		MutexLocker cacheLocker(cache->lock);
1006 		size_t minimumAllowed;
1007 
1008 		switch (level) {
1009 			case B_LOW_RESOURCE_NOTE:
1010 				minimumAllowed = cache->pressure / 2 + 1;
1011 				cache->pressure -= cache->pressure / 8;
1012 				break;
1013 
1014 			case B_LOW_RESOURCE_WARNING:
1015 				cache->pressure /= 2;
1016 				minimumAllowed = 0;
1017 				break;
1018 
1019 			default:
1020 				cache->pressure = 0;
1021 				minimumAllowed = 0;
1022 				break;
1023 		}
1024 
1025 		while (cache->empty_count > minimumAllowed) {
1026 			// make sure we respect the cache's minimum object reserve
1027 			size_t objectsPerSlab = cache->empty.Head()->size;
1028 			size_t freeObjects = cache->total_objects - cache->used_count;
1029 			if (freeObjects < cache->min_object_reserve + objectsPerSlab)
1030 				break;
1031 
1032 			cache->ReturnSlab(cache->empty.RemoveHead(), 0);
1033 			cache->empty_count--;
1034 		}
1035 
1036 		cacheLocker.Unlock();
1037 
1038 		// Check whether in the meantime someone has really requested
1039 		// maintenance for the cache.
1040 		maintenanceLocker.Lock();
1041 
1042 		if (cache->maintenance_delete) {
1043 			delete_object_cache_internal(cache);
1044 			continue;
1045 		}
1046 
1047 		cache->maintenance_in_progress = false;
1048 
1049 		if (cache->maintenance_resize)
1050 			sMaintenanceQueue.Add(cache);
1051 		else
1052 			cache->maintenance_pending = false;
1053 	} while (cache != firstCache);
1054 }
1055 
1056 
1057 static status_t
1058 object_cache_maintainer(void*)
1059 {
1060 	while (true) {
1061 		MutexLocker locker(sMaintenanceLock);
1062 
1063 		// wait for the next request
1064 		while (sMaintenanceQueue.IsEmpty()) {
1065 			// perform memory manager maintenance, if needed
1066 			if (MemoryManager::MaintenanceNeeded()) {
1067 				locker.Unlock();
1068 				MemoryManager::PerformMaintenance();
1069 				locker.Lock();
1070 				continue;
1071 			}
1072 
1073 			ConditionVariableEntry entry;
1074 			sMaintenanceCondition.Add(&entry);
1075 			locker.Unlock();
1076 			entry.Wait();
1077 			locker.Lock();
1078 		}
1079 
1080 		ObjectCache* cache = sMaintenanceQueue.RemoveHead();
1081 
1082 		while (true) {
1083 			bool resizeRequested = cache->maintenance_resize;
1084 			bool deleteRequested = cache->maintenance_delete;
1085 
1086 			if (!resizeRequested && !deleteRequested) {
1087 				cache->maintenance_pending = false;
1088 				cache->maintenance_in_progress = false;
1089 				break;
1090 			}
1091 
1092 			cache->maintenance_resize = false;
1093 			cache->maintenance_in_progress = true;
1094 
1095 			locker.Unlock();
1096 
1097 			if (deleteRequested) {
1098 				delete_object_cache_internal(cache);
1099 				break;
1100 			}
1101 
1102 			// resize the cache, if necessary
1103 
1104 			MutexLocker cacheLocker(cache->lock);
1105 
1106 			if (resizeRequested) {
1107 				status_t error = object_cache_reserve_internal(cache,
1108 					cache->min_object_reserve, 0);
1109 				if (error != B_OK) {
1110 					dprintf("object cache resizer: Failed to resize object "
1111 						"cache %p!\n", cache);
1112 					break;
1113 				}
1114 			}
1115 
1116 			locker.Lock();
1117 		}
1118 	}
1119 
1120 	// never can get here
1121 	return B_OK;
1122 }
1123 
1124 
1125 // #pragma mark - public API
1126 
1127 
1128 object_cache*
1129 create_object_cache(const char* name, size_t object_size, size_t alignment,
1130 	void* cookie, object_cache_constructor constructor,
1131 	object_cache_destructor destructor)
1132 {
1133 	return create_object_cache_etc(name, object_size, alignment, 0, 0, 0, 0,
1134 		cookie, constructor, destructor, NULL);
1135 }
1136 
1137 
1138 object_cache*
1139 create_object_cache_etc(const char* name, size_t objectSize, size_t alignment,
1140 	size_t maximum, size_t magazineCapacity, size_t maxMagazineCount,
1141 	uint32 flags, void* cookie, object_cache_constructor constructor,
1142 	object_cache_destructor destructor, object_cache_reclaimer reclaimer)
1143 {
1144 	ObjectCache* cache;
1145 
1146 	if (objectSize == 0) {
1147 		cache = NULL;
1148 	} else if (objectSize <= 256) {
1149 		cache = SmallObjectCache::Create(name, objectSize, alignment, maximum,
1150 			magazineCapacity, maxMagazineCount, flags, cookie, constructor,
1151 			destructor, reclaimer);
1152 	} else {
1153 		cache = HashedObjectCache::Create(name, objectSize, alignment, maximum,
1154 			magazineCapacity, maxMagazineCount, flags, cookie, constructor,
1155 			destructor, reclaimer);
1156 	}
1157 
1158 	if (cache != NULL) {
1159 		MutexLocker _(sObjectCacheListLock);
1160 		sObjectCaches.Add(cache);
1161 	}
1162 
1163 	T(Create(name, objectSize, alignment, maximum, flags, cookie, cache));
1164 	return cache;
1165 }
1166 
1167 
1168 void
1169 delete_object_cache(object_cache* cache)
1170 {
1171 	T(Delete(cache));
1172 
1173 	{
1174 		MutexLocker _(sObjectCacheListLock);
1175 		sObjectCaches.Remove(cache);
1176 	}
1177 
1178 	MutexLocker cacheLocker(cache->lock);
1179 
1180 	{
1181 		MutexLocker maintenanceLocker(sMaintenanceLock);
1182 		if (cache->maintenance_in_progress) {
1183 			// The maintainer thread is working with the cache. Just mark it
1184 			// to be deleted.
1185 			cache->maintenance_delete = true;
1186 			return;
1187 		}
1188 
1189 		// unschedule maintenance
1190 		if (cache->maintenance_pending)
1191 			sMaintenanceQueue.Remove(cache);
1192 	}
1193 
1194 	// at this point no-one should have a reference to the cache anymore
1195 	cacheLocker.Unlock();
1196 
1197 	delete_object_cache_internal(cache);
1198 }
1199 
1200 
1201 status_t
1202 object_cache_set_minimum_reserve(object_cache* cache, size_t objectCount)
1203 {
1204 	MutexLocker _(cache->lock);
1205 
1206 	if (cache->min_object_reserve == objectCount)
1207 		return B_OK;
1208 
1209 	cache->min_object_reserve = objectCount;
1210 
1211 	increase_object_reserve(cache);
1212 
1213 	return B_OK;
1214 }
1215 
1216 
1217 void*
1218 object_cache_alloc(object_cache* cache, uint32 flags)
1219 {
1220 	if (!(cache->flags & CACHE_NO_DEPOT)) {
1221 		void* object = object_depot_obtain(&cache->depot);
1222 		if (object) {
1223 			add_alloc_tracing_entry(cache, flags, object);
1224 			return fill_allocated_block(object, cache->object_size);
1225 		}
1226 	}
1227 
1228 	MutexLocker locker(cache->lock);
1229 	slab* source = NULL;
1230 
1231 	while (true) {
1232 		source = cache->partial.Head();
1233 		if (source != NULL)
1234 			break;
1235 
1236 		source = cache->empty.RemoveHead();
1237 		if (source != NULL) {
1238 			cache->empty_count--;
1239 			cache->partial.Add(source);
1240 			break;
1241 		}
1242 
1243 		if (object_cache_reserve_internal(cache, 1, flags) != B_OK) {
1244 			T(Alloc(cache, flags, NULL));
1245 			return NULL;
1246 		}
1247 
1248 		cache->pressure++;
1249 	}
1250 
1251 	ParanoiaChecker _2(source);
1252 
1253 	object_link* link = _pop(source->free);
1254 	source->count--;
1255 	cache->used_count++;
1256 
1257 	if (cache->total_objects - cache->used_count < cache->min_object_reserve)
1258 		increase_object_reserve(cache);
1259 
1260 	REMOVE_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, source, &link->next,
1261 		sizeof(void*));
1262 
1263 	TRACE_CACHE(cache, "allocate %p (%p) from %p, %lu remaining.",
1264 		link_to_object(link, cache->object_size), link, source, source->count);
1265 
1266 	if (source->count == 0) {
1267 		cache->partial.Remove(source);
1268 		cache->full.Add(source);
1269 	}
1270 
1271 	void* object = link_to_object(link, cache->object_size);
1272 	locker.Unlock();
1273 
1274 	add_alloc_tracing_entry(cache, flags, object);
1275 	return fill_allocated_block(object, cache->object_size);
1276 }
1277 
1278 
1279 void
1280 object_cache_free(object_cache* cache, void* object, uint32 flags)
1281 {
1282 	if (object == NULL)
1283 		return;
1284 
1285 	T(Free(cache, object));
1286 
1287 #if PARANOID_KERNEL_FREE
1288 	// TODO: allow forcing the check even if we don't find deadbeef
1289 	if (*(uint32*)object == 0xdeadbeef) {
1290 		if (!cache->AssertObjectNotFreed(object))
1291 			return;
1292 
1293 		if ((cache->flags & CACHE_NO_DEPOT) == 0) {
1294 			if (object_depot_contains_object(&cache->depot, object)) {
1295 				panic("object_cache: object %p is already freed", object);
1296 				return;
1297 			}
1298 		}
1299 	}
1300 
1301 	fill_freed_block(object, cache->object_size);
1302 #endif
1303 
1304 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
1305 	mutex_lock(&cache->lock);
1306 	cache->TrackingInfoFor(object)->Clear();
1307 	mutex_unlock(&cache->lock);
1308 #endif
1309 
1310 	if ((cache->flags & CACHE_NO_DEPOT) == 0) {
1311 		object_depot_store(&cache->depot, object, flags);
1312 		return;
1313 	}
1314 
1315 	MutexLocker _(cache->lock);
1316 	cache->ReturnObjectToSlab(cache->ObjectSlab(object), object, flags);
1317 }
1318 
1319 
1320 status_t
1321 object_cache_reserve(object_cache* cache, size_t objectCount, uint32 flags)
1322 {
1323 	if (objectCount == 0)
1324 		return B_OK;
1325 
1326 	T(Reserve(cache, objectCount, flags));
1327 
1328 	MutexLocker _(cache->lock);
1329 	return object_cache_reserve_internal(cache, objectCount, flags);
1330 }
1331 
1332 
1333 void
1334 object_cache_get_usage(object_cache* cache, size_t* _allocatedMemory)
1335 {
1336 	MutexLocker _(cache->lock);
1337 	*_allocatedMemory = cache->usage;
1338 }
1339 
1340 
1341 void
1342 slab_init(kernel_args* args)
1343 {
1344 	MemoryManager::Init(args);
1345 
1346 	new (&sObjectCaches) ObjectCacheList();
1347 
1348 	block_allocator_init_boot();
1349 }
1350 
1351 
1352 void
1353 slab_init_post_area()
1354 {
1355 	MemoryManager::InitPostArea();
1356 
1357 	add_debugger_command("slabs", dump_slabs, "list all object caches");
1358 	add_debugger_command("slab_cache", dump_cache_info,
1359 		"dump information about a specific object cache");
1360 	add_debugger_command("slab_depot", dump_object_depot,
1361 		"dump contents of an object depot");
1362 	add_debugger_command("slab_magazine", dump_depot_magazine,
1363 		"dump contents of a depot magazine");
1364 #if SLAB_ALLOCATION_TRACKING_AVAILABLE
1365 	add_debugger_command_etc("allocations_per_caller",
1366 		&dump_allocations_per_caller,
1367 		"Dump current slab allocations summed up per caller",
1368 		"[ -c ] [ -d <caller> ] [ -o <object cache> ] [ -r ]\n"
1369 		"The current allocations will by summed up by caller (their count and\n"
1370 		"size) printed in decreasing order by size or, if \"-c\" is\n"
1371 		"specified, by allocation count. If given <object cache> specifies\n"
1372 		"the address of the object cache for which to print the allocations.\n"
1373 		"If \"-d\" is given, each allocation for caller <caller> is printed\n"
1374 		"including the respective stack trace.\n"
1375 		"If \"-r\" is given, the allocation infos are reset after gathering\n"
1376 		"the information, so the next command invocation will only show the\n"
1377 		"allocations made after the reset.\n", 0);
1378 	add_debugger_command_etc("allocation_infos",
1379 		&dump_allocation_infos,
1380 		"Dump current slab allocations",
1381 		"[ --stacktrace ] [ -o <object cache> | -s <slab> | -S <address> ] "
1382 		"[ -a <allocation> ] [ --team <team ID> ] [ --thread <thread ID> ]\n"
1383 		"The current allocations filtered by optional values will be printed.\n"
1384 		"If given, <object cache> specifies the address of the object cache\n"
1385 		"or <slab> specifies the address of a slab, for which to print the\n"
1386 		"allocations. Alternatively <address> specifies any address within\n"
1387 		"a slab allocation range.\n"
1388 		"The optional \"-a\" address filters for a specific allocation,\n"
1389 		"with \"--team\" and \"--thread\" allocations by specific teams\n"
1390 		"and/or threads can be filtered (these only work if a corresponding\n"
1391 		"tracing entry is still available).\n"
1392 		"If \"--stacktrace\" is given, then stack traces of the allocation\n"
1393 		"callers are printed, where available\n", 0);
1394 #endif	// SLAB_ALLOCATION_TRACKING_AVAILABLE
1395 }
1396 
1397 
1398 void
1399 slab_init_post_sem()
1400 {
1401 	register_low_resource_handler(object_cache_low_memory, NULL,
1402 		B_KERNEL_RESOURCE_PAGES | B_KERNEL_RESOURCE_MEMORY
1403 			| B_KERNEL_RESOURCE_ADDRESS_SPACE, 5);
1404 
1405 	block_allocator_init_rest();
1406 }
1407 
1408 
1409 void
1410 slab_init_post_thread()
1411 {
1412 	new(&sMaintenanceQueue) MaintenanceQueue;
1413 	sMaintenanceCondition.Init(&sMaintenanceQueue, "object cache maintainer");
1414 
1415 	thread_id objectCacheResizer = spawn_kernel_thread(object_cache_maintainer,
1416 		"object cache resizer", B_URGENT_PRIORITY, NULL);
1417 	if (objectCacheResizer < 0) {
1418 		panic("slab_init_post_thread(): failed to spawn object cache resizer "
1419 			"thread\n");
1420 		return;
1421 	}
1422 
1423 	resume_thread(objectCacheResizer);
1424 }
1425 
1426 
1427 RANGE_MARKER_FUNCTION_END(Slab)
1428 
1429 
1430 #endif	// !USE_GUARDED_HEAP_FOR_OBJECT_CACHE
1431