xref: /haiku/src/system/kernel/slab/Slab.cpp (revision eea5774f46bba925156498abf9cb1a1165647bf7)
1 /*
2  * Copyright 2010, Ingo Weinhold <ingo_weinhold@gmx.de>.
3  * Copyright 2008-2010, Axel Dörfler. All Rights Reserved.
4  * Copyright 2007, Hugo Santos. All Rights Reserved.
5  *
6  * Distributed under the terms of the MIT License.
7  */
8 
9 
10 #include <slab/Slab.h>
11 
12 #include <algorithm>
13 #include <new>
14 #include <stdlib.h>
15 #include <string.h>
16 
17 #include <KernelExport.h>
18 
19 #include <condition_variable.h>
20 #include <elf.h>
21 #include <kernel.h>
22 #include <low_resource_manager.h>
23 #include <slab/ObjectDepot.h>
24 #include <smp.h>
25 #include <tracing.h>
26 #include <util/AutoLock.h>
27 #include <util/DoublyLinkedList.h>
28 #include <vm/vm.h>
29 #include <vm/VMAddressSpace.h>
30 
31 #include "HashedObjectCache.h"
32 #include "MemoryManager.h"
33 #include "slab_debug.h"
34 #include "slab_private.h"
35 #include "SmallObjectCache.h"
36 
37 
38 #if !USE_GUARDED_HEAP_FOR_OBJECT_CACHE
39 
40 
41 typedef DoublyLinkedList<ObjectCache> ObjectCacheList;
42 
43 typedef DoublyLinkedList<ObjectCache,
44 	DoublyLinkedListMemberGetLink<ObjectCache, &ObjectCache::maintenance_link> >
45 		MaintenanceQueue;
46 
47 static ObjectCacheList sObjectCaches;
48 static mutex sObjectCacheListLock = MUTEX_INITIALIZER("object cache list");
49 
50 static mutex sMaintenanceLock
51 	= MUTEX_INITIALIZER("object cache resize requests");
52 static MaintenanceQueue sMaintenanceQueue;
53 static ConditionVariable sMaintenanceCondition;
54 
55 
56 #if SLAB_ALLOCATION_TRACKING_AVAILABLE
57 
58 struct caller_info {
59 	addr_t		caller;
60 	size_t		count;
61 	size_t		size;
62 };
63 
64 static const int32 kCallerInfoTableSize = 1024;
65 static caller_info sCallerInfoTable[kCallerInfoTableSize];
66 static int32 sCallerInfoCount = 0;
67 
68 static caller_info* get_caller_info(addr_t caller);
69 
70 
71 RANGE_MARKER_FUNCTION_PROTOTYPES(slab_allocator)
72 RANGE_MARKER_FUNCTION_PROTOTYPES(SlabHashedObjectCache)
73 RANGE_MARKER_FUNCTION_PROTOTYPES(SlabMemoryManager)
74 RANGE_MARKER_FUNCTION_PROTOTYPES(SlabObjectCache)
75 RANGE_MARKER_FUNCTION_PROTOTYPES(SlabObjectDepot)
76 RANGE_MARKER_FUNCTION_PROTOTYPES(Slab)
77 RANGE_MARKER_FUNCTION_PROTOTYPES(SlabSmallObjectCache)
78 
79 
80 static const addr_t kSlabCodeAddressRanges[] = {
81 	RANGE_MARKER_FUNCTION_ADDRESS_RANGE(slab_allocator),
82 	RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabHashedObjectCache),
83 	RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabMemoryManager),
84 	RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabObjectCache),
85 	RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabObjectDepot),
86 	RANGE_MARKER_FUNCTION_ADDRESS_RANGE(Slab),
87 	RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabSmallObjectCache)
88 };
89 
90 static const uint32 kSlabCodeAddressRangeCount
91 	= B_COUNT_OF(kSlabCodeAddressRanges) / 2;
92 
93 #endif	// SLAB_ALLOCATION_TRACKING_AVAILABLE
94 
95 
96 RANGE_MARKER_FUNCTION_BEGIN(Slab)
97 
98 
99 #if SLAB_OBJECT_CACHE_TRACING
100 
101 
102 namespace SlabObjectCacheTracing {
103 
104 class ObjectCacheTraceEntry
105 	: public TRACE_ENTRY_SELECTOR(SLAB_OBJECT_CACHE_TRACING_STACK_TRACE) {
106 	public:
107 		ObjectCacheTraceEntry(ObjectCache* cache)
108 			:
109 			TraceEntryBase(SLAB_OBJECT_CACHE_TRACING_STACK_TRACE, 0, true),
110 			fCache(cache)
111 		{
112 		}
113 
114 	protected:
115 		ObjectCache*	fCache;
116 };
117 
118 
119 class Create : public ObjectCacheTraceEntry {
120 	public:
121 		Create(const char* name, size_t objectSize, size_t alignment,
122 				size_t maxByteUsage, uint32 flags, void* cookie,
123 				ObjectCache* cache)
124 			:
125 			ObjectCacheTraceEntry(cache),
126 			fObjectSize(objectSize),
127 			fAlignment(alignment),
128 			fMaxByteUsage(maxByteUsage),
129 			fFlags(flags),
130 			fCookie(cookie)
131 		{
132 			fName = alloc_tracing_buffer_strcpy(name, 64, false);
133 			Initialized();
134 		}
135 
136 		virtual void AddDump(TraceOutput& out)
137 		{
138 			out.Print("object cache create: name: \"%s\", object size: "
139 				"%" B_PRIuSIZE ", alignment: %" B_PRIuSIZE ", max usage: "
140 				"%" B_PRIuSIZE ", flags: 0x%" B_PRIx32 ", cookie: %p -> cache: %p",
141 					fName, fObjectSize, fAlignment, fMaxByteUsage, fFlags,
142 					fCookie, fCache);
143 		}
144 
145 	private:
146 		const char*	fName;
147 		size_t		fObjectSize;
148 		size_t		fAlignment;
149 		size_t		fMaxByteUsage;
150 		uint32		fFlags;
151 		void*		fCookie;
152 };
153 
154 
155 class Delete : public ObjectCacheTraceEntry {
156 	public:
157 		Delete(ObjectCache* cache)
158 			:
159 			ObjectCacheTraceEntry(cache)
160 		{
161 			Initialized();
162 		}
163 
164 		virtual void AddDump(TraceOutput& out)
165 		{
166 			out.Print("object cache delete: %p", fCache);
167 		}
168 };
169 
170 
171 class Alloc : public ObjectCacheTraceEntry {
172 	public:
173 		Alloc(ObjectCache* cache, uint32 flags, void* object)
174 			:
175 			ObjectCacheTraceEntry(cache),
176 			fFlags(flags),
177 			fObject(object)
178 		{
179 			Initialized();
180 		}
181 
182 		virtual void AddDump(TraceOutput& out)
183 		{
184 			out.Print("object cache alloc: cache: %p, flags: 0x%" B_PRIx32
185 				" -> object: %p", fCache, fFlags, fObject);
186 		}
187 
188 	private:
189 		uint32		fFlags;
190 		void*		fObject;
191 };
192 
193 
194 class Free : public ObjectCacheTraceEntry {
195 	public:
196 		Free(ObjectCache* cache, void* object)
197 			:
198 			ObjectCacheTraceEntry(cache),
199 			fObject(object)
200 		{
201 			Initialized();
202 		}
203 
204 		virtual void AddDump(TraceOutput& out)
205 		{
206 			out.Print("object cache free: cache: %p, object: %p", fCache,
207 				fObject);
208 		}
209 
210 	private:
211 		void*		fObject;
212 };
213 
214 
215 class Reserve : public ObjectCacheTraceEntry {
216 	public:
217 		Reserve(ObjectCache* cache, size_t count, uint32 flags)
218 			:
219 			ObjectCacheTraceEntry(cache),
220 			fCount(count),
221 			fFlags(flags)
222 		{
223 			Initialized();
224 		}
225 
226 		virtual void AddDump(TraceOutput& out)
227 		{
228 			out.Print("object cache reserve: cache: %p, count: %" B_PRIu32 ", "
229 				"flags: 0x%" B_PRIx32, fCache, fCount, fFlags);
230 		}
231 
232 	private:
233 		uint32		fCount;
234 		uint32		fFlags;
235 };
236 
237 
238 }	// namespace SlabObjectCacheTracing
239 
240 #	define T(x)	new(std::nothrow) SlabObjectCacheTracing::x
241 
242 #else
243 #	define T(x)
244 #endif	// SLAB_OBJECT_CACHE_TRACING
245 
246 
247 // #pragma mark -
248 
249 
250 static void
251 dump_slab(::slab* slab)
252 {
253 	kprintf("  %p  %p  %6" B_PRIuSIZE " %6" B_PRIuSIZE " %6" B_PRIuSIZE "  %p\n",
254 		slab, slab->pages, slab->size, slab->count, slab->offset, slab->free);
255 }
256 
257 
258 static int
259 dump_slabs(int argc, char* argv[])
260 {
261 	kprintf("%*s %22s %8s %8s %8s %6s %8s %8s %8s\n",
262 		B_PRINTF_POINTER_WIDTH + 2, "address", "name", "objsize", "align",
263 		"usage", "empty", "usedobj", "total", "flags");
264 
265 	ObjectCacheList::Iterator it = sObjectCaches.GetIterator();
266 
267 	while (it.HasNext()) {
268 		ObjectCache* cache = it.Next();
269 
270 		kprintf("%p %22s %8lu %8" B_PRIuSIZE " %8lu %6lu %8lu %8lu %8" B_PRIx32
271 			"\n", cache, cache->name, cache->object_size, cache->alignment,
272 			cache->usage, cache->empty_count, cache->used_count,
273 			cache->total_objects, cache->flags);
274 	}
275 
276 	return 0;
277 }
278 
279 
280 static int
281 dump_cache_info(int argc, char* argv[])
282 {
283 	if (argc < 2) {
284 		kprintf("usage: slab_cache [address]\n");
285 		return 0;
286 	}
287 
288 	ObjectCache* cache = (ObjectCache*)parse_expression(argv[1]);
289 
290 	kprintf("name:              %s\n", cache->name);
291 	kprintf("lock:              %p\n", &cache->lock);
292 	kprintf("object_size:       %lu\n", cache->object_size);
293 	kprintf("alignment:         %" B_PRIuSIZE "\n", cache->alignment);
294 	kprintf("cache_color_cycle: %lu\n", cache->cache_color_cycle);
295 	kprintf("total_objects:     %lu\n", cache->total_objects);
296 	kprintf("used_count:        %lu\n", cache->used_count);
297 	kprintf("empty_count:       %lu\n", cache->empty_count);
298 	kprintf("pressure:          %lu\n", cache->pressure);
299 	kprintf("slab_size:         %lu\n", cache->slab_size);
300 	kprintf("usage:             %lu\n", cache->usage);
301 	kprintf("maximum:           %lu\n", cache->maximum);
302 	kprintf("flags:             0x%" B_PRIx32 "\n", cache->flags);
303 	kprintf("cookie:            %p\n", cache->cookie);
304 	kprintf("resize entry don't wait: %p\n", cache->resize_entry_dont_wait);
305 	kprintf("resize entry can wait:   %p\n", cache->resize_entry_can_wait);
306 
307 	kprintf("  %-*s    %-*s      size   used offset  free\n",
308 		B_PRINTF_POINTER_WIDTH, "slab", B_PRINTF_POINTER_WIDTH, "chunk");
309 
310 	SlabList::Iterator iterator = cache->empty.GetIterator();
311 	if (iterator.HasNext())
312 		kprintf("empty:\n");
313 	while (::slab* slab = iterator.Next())
314 		dump_slab(slab);
315 
316 	iterator = cache->partial.GetIterator();
317 	if (iterator.HasNext())
318 		kprintf("partial:\n");
319 	while (::slab* slab = iterator.Next())
320 		dump_slab(slab);
321 
322 	iterator = cache->full.GetIterator();
323 	if (iterator.HasNext())
324 		kprintf("full:\n");
325 	while (::slab* slab = iterator.Next())
326 		dump_slab(slab);
327 
328 	if ((cache->flags & CACHE_NO_DEPOT) == 0) {
329 		kprintf("depot:\n");
330 		dump_object_depot(&cache->depot);
331 	}
332 
333 	return 0;
334 }
335 
336 
337 static int
338 dump_object_info(int argc, char* argv[])
339 {
340 	if (argc < 2) {
341 		kprintf("usage: slab_object [address]\n");
342 		return 0;
343 	}
344 
345 	void* object = (void*)parse_expression(argv[1]);
346 	ObjectCache* cache = MemoryManager::DebugObjectCacheForAddress(object);
347 	if (cache == NULL) {
348 		kprintf("%p does not seem to be in an object_cache\n", object);
349 		return 1;
350 	}
351 
352 	kprintf("address %p\n", object);
353 	kprintf("\tobject_cache\t%p (%s)\n", cache, cache->name);
354 	return 0;
355 }
356 
357 
358 // #pragma mark - AllocationTrackingCallback
359 
360 
361 #if SLAB_ALLOCATION_TRACKING_AVAILABLE
362 
363 AllocationTrackingCallback::~AllocationTrackingCallback()
364 {
365 }
366 
367 #endif	// SLAB_ALLOCATION_TRACKING_AVAILABLE
368 
369 
370 // #pragma mark -
371 
372 
373 #if SLAB_ALLOCATION_TRACKING_AVAILABLE
374 
375 namespace {
376 
377 class AllocationCollectorCallback : public AllocationTrackingCallback {
378 public:
379 	AllocationCollectorCallback(bool resetInfos)
380 		:
381 		fResetInfos(resetInfos)
382 	{
383 	}
384 
385 	virtual bool ProcessTrackingInfo(AllocationTrackingInfo* info,
386 		void* allocation, size_t allocationSize)
387 	{
388 		if (!info->IsInitialized())
389 			return true;
390 
391 		addr_t caller = 0;
392 		AbstractTraceEntryWithStackTrace* traceEntry = info->TraceEntry();
393 
394 		if (traceEntry != NULL && info->IsTraceEntryValid()) {
395 			caller = tracing_find_caller_in_stack_trace(
396 				traceEntry->StackTrace(), kSlabCodeAddressRanges,
397 				kSlabCodeAddressRangeCount);
398 		}
399 
400 		caller_info* callerInfo = get_caller_info(caller);
401 		if (callerInfo == NULL) {
402 			kprintf("out of space for caller infos\n");
403 			return false;
404 		}
405 
406 		callerInfo->count++;
407 		callerInfo->size += allocationSize;
408 
409 		if (fResetInfos)
410 			info->Clear();
411 
412 		return true;
413 	}
414 
415 private:
416 	bool	fResetInfos;
417 };
418 
419 
420 class AllocationInfoPrinterCallback : public AllocationTrackingCallback {
421 public:
422 	AllocationInfoPrinterCallback(bool printStackTrace, addr_t addressFilter,
423 		team_id teamFilter, thread_id threadFilter)
424 		:
425 		fPrintStackTrace(printStackTrace),
426 		fAddressFilter(addressFilter),
427 		fTeamFilter(teamFilter),
428 		fThreadFilter(threadFilter)
429 	{
430 	}
431 
432 	virtual bool ProcessTrackingInfo(AllocationTrackingInfo* info,
433 		void* allocation, size_t allocationSize)
434 	{
435 		if (!info->IsInitialized())
436 			return true;
437 
438 		if (fAddressFilter != 0 && (addr_t)allocation != fAddressFilter)
439 			return true;
440 
441 		AbstractTraceEntryWithStackTrace* traceEntry = info->TraceEntry();
442 		if (traceEntry != NULL && !info->IsTraceEntryValid())
443 			traceEntry = NULL;
444 
445 		if (traceEntry != NULL) {
446 			if (fTeamFilter != -1 && traceEntry->TeamID() != fTeamFilter)
447 				return true;
448 			if (fThreadFilter != -1 && traceEntry->ThreadID() != fThreadFilter)
449 				return true;
450 		} else {
451 			// we need the info if we have filters set
452 			if (fTeamFilter != -1 || fThreadFilter != -1)
453 				return true;
454 		}
455 
456 		kprintf("allocation %p, size: %" B_PRIuSIZE, allocation,
457 			allocationSize);
458 
459 		if (traceEntry != NULL) {
460 			kprintf(", team: %" B_PRId32 ", thread %" B_PRId32
461 				", time %" B_PRId64 "\n", traceEntry->TeamID(),
462 				traceEntry->ThreadID(), traceEntry->Time());
463 
464 			if (fPrintStackTrace)
465 				tracing_print_stack_trace(traceEntry->StackTrace());
466 		} else
467 			kprintf("\n");
468 
469 		return true;
470 	}
471 
472 private:
473 	bool		fPrintStackTrace;
474 	addr_t		fAddressFilter;
475 	team_id		fTeamFilter;
476 	thread_id	fThreadFilter;
477 };
478 
479 
480 class AllocationDetailPrinterCallback : public AllocationTrackingCallback {
481 public:
482 	AllocationDetailPrinterCallback(addr_t caller)
483 		:
484 		fCaller(caller)
485 	{
486 	}
487 
488 	virtual bool ProcessTrackingInfo(AllocationTrackingInfo* info,
489 		void* allocation, size_t allocationSize)
490 	{
491 		if (!info->IsInitialized())
492 			return true;
493 
494 		addr_t caller = 0;
495 		AbstractTraceEntryWithStackTrace* traceEntry = info->TraceEntry();
496 		if (traceEntry != NULL && !info->IsTraceEntryValid())
497 			traceEntry = NULL;
498 
499 		if (traceEntry != NULL) {
500 			caller = tracing_find_caller_in_stack_trace(
501 				traceEntry->StackTrace(), kSlabCodeAddressRanges,
502 				kSlabCodeAddressRangeCount);
503 		}
504 
505 		if (caller != fCaller)
506 			return true;
507 
508 		kprintf("allocation %p, size: %" B_PRIuSIZE "\n", allocation,
509 			allocationSize);
510 		if (traceEntry != NULL)
511 			tracing_print_stack_trace(traceEntry->StackTrace());
512 
513 		return true;
514 	}
515 
516 private:
517 	addr_t	fCaller;
518 };
519 
520 }	// unnamed namespace
521 
522 static caller_info*
523 get_caller_info(addr_t caller)
524 {
525 	// find the caller info
526 	for (int32 i = 0; i < sCallerInfoCount; i++) {
527 		if (caller == sCallerInfoTable[i].caller)
528 			return &sCallerInfoTable[i];
529 	}
530 
531 	// not found, add a new entry, if there are free slots
532 	if (sCallerInfoCount >= kCallerInfoTableSize)
533 		return NULL;
534 
535 	caller_info* info = &sCallerInfoTable[sCallerInfoCount++];
536 	info->caller = caller;
537 	info->count = 0;
538 	info->size = 0;
539 
540 	return info;
541 }
542 
543 
544 static int
545 caller_info_compare_size(const void* _a, const void* _b)
546 {
547 	const caller_info* a = (const caller_info*)_a;
548 	const caller_info* b = (const caller_info*)_b;
549 	return (int)(b->size - a->size);
550 }
551 
552 
553 static int
554 caller_info_compare_count(const void* _a, const void* _b)
555 {
556 	const caller_info* a = (const caller_info*)_a;
557 	const caller_info* b = (const caller_info*)_b;
558 	return (int)(b->count - a->count);
559 }
560 
561 
562 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
563 
564 static bool
565 analyze_allocation_callers(ObjectCache* cache, slab* slab,
566 	AllocationTrackingCallback& callback)
567 {
568 	for (uint32 i = 0; i < slab->size; i++) {
569 		if (!callback.ProcessTrackingInfo(&slab->tracking[i],
570 				cache->ObjectAtIndex(slab, i), cache->object_size)) {
571 			return false;
572 		}
573 	}
574 
575 	return true;
576 }
577 
578 
579 static bool
580 analyze_allocation_callers(ObjectCache* cache, const SlabList& slabList,
581 	AllocationTrackingCallback& callback)
582 {
583 	for (SlabList::ConstIterator it = slabList.GetIterator();
584 			slab* slab = it.Next();) {
585 		if (!analyze_allocation_callers(cache, slab, callback))
586 			return false;
587 	}
588 
589 	return true;
590 }
591 
592 
593 static bool
594 analyze_allocation_callers(ObjectCache* cache,
595 	AllocationTrackingCallback& callback)
596 {
597 	return analyze_allocation_callers(cache, cache->full, callback)
598 		&& analyze_allocation_callers(cache, cache->partial, callback);
599 }
600 
601 #endif	// SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
602 
603 
604 static int
605 dump_allocation_infos(int argc, char **argv)
606 {
607 	ObjectCache* cache = NULL;
608 	slab* slab = NULL;
609 	addr_t addressFilter = 0;
610 	team_id teamFilter = -1;
611 	thread_id threadFilter = -1;
612 	bool printStackTraces = false;
613 
614 	for (int32 i = 1; i < argc; i++) {
615 		if (strcmp(argv[i], "--stacktrace") == 0)
616 			printStackTraces = true;
617 		else if (strcmp(argv[i], "-a") == 0) {
618 			uint64 address;
619 			if (++i >= argc
620 				|| !evaluate_debug_expression(argv[i], &address, true)) {
621 				print_debugger_command_usage(argv[0]);
622 				return 0;
623 			}
624 
625 			addressFilter = address;
626 		} else if (strcmp(argv[i], "-o") == 0) {
627 			uint64 cacheAddress;
628 			if (++i >= argc
629 				|| !evaluate_debug_expression(argv[i], &cacheAddress, true)) {
630 				print_debugger_command_usage(argv[0]);
631 				return 0;
632 			}
633 
634 			cache = (ObjectCache*)(addr_t)cacheAddress;
635 		} else if (strcasecmp(argv[i], "-s") == 0) {
636 			uint64 slabAddress;
637 			if (++i >= argc
638 				|| !evaluate_debug_expression(argv[i], &slabAddress, true)) {
639 				print_debugger_command_usage(argv[0]);
640 				return 0;
641 			}
642 
643 			void* slabPages = (void*)slabAddress;
644 			if (strcmp(argv[i], "-s") == 0) {
645 				slab = (struct slab*)(addr_t)slabAddress;
646 				slabPages = slab->pages;
647 			}
648 
649 			cache = MemoryManager::DebugObjectCacheForAddress(slabPages);
650 			if (cache == NULL) {
651 				kprintf("Couldn't find object cache for address %p.\n",
652 					slabPages);
653 				return 0;
654 			}
655 
656 			if (slab == NULL) {
657 				slab = cache->ObjectSlab(slabPages);
658 
659 				if (slab == NULL) {
660 					kprintf("Couldn't find slab for address %p.\n", slabPages);
661 					return 0;
662 				}
663 			}
664 		} else if (strcmp(argv[i], "--team") == 0) {
665 			uint64 team;
666 			if (++i >= argc
667 				|| !evaluate_debug_expression(argv[i], &team, true)) {
668 				print_debugger_command_usage(argv[0]);
669 				return 0;
670 			}
671 
672 			teamFilter = team;
673 		} else if (strcmp(argv[i], "--thread") == 0) {
674 			uint64 thread;
675 			if (++i >= argc
676 				|| !evaluate_debug_expression(argv[i], &thread, true)) {
677 				print_debugger_command_usage(argv[0]);
678 				return 0;
679 			}
680 
681 			threadFilter = thread;
682 		} else {
683 			print_debugger_command_usage(argv[0]);
684 			return 0;
685 		}
686 	}
687 
688 	AllocationInfoPrinterCallback callback(printStackTraces, addressFilter,
689 		teamFilter, threadFilter);
690 
691 	if (slab != NULL || cache != NULL) {
692 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
693 		if (slab != NULL) {
694 			if (!analyze_allocation_callers(cache, slab, callback))
695 				return 0;
696 		} else if (cache != NULL) {
697 			if (!analyze_allocation_callers(cache, callback))
698 				return 0;
699 		}
700 #else
701 		kprintf("Object cache allocation tracking not available. "
702 			"SLAB_OBJECT_CACHE_TRACING (%d) and "
703 			"SLAB_OBJECT_CACHE_TRACING_STACK_TRACE (%d) must be enabled.\n",
704 			SLAB_OBJECT_CACHE_TRACING, SLAB_OBJECT_CACHE_TRACING_STACK_TRACE);
705 		return 0;
706 #endif
707 	} else {
708 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
709 
710 		for (ObjectCacheList::Iterator it = sObjectCaches.GetIterator();
711 				it.HasNext();) {
712 			if (!analyze_allocation_callers(it.Next(), callback))
713 				return 0;
714 		}
715 #endif
716 
717 #if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
718 		if (!MemoryManager::AnalyzeAllocationCallers(callback))
719 			return 0;
720 #endif
721 	}
722 
723 	return 0;
724 }
725 
726 
727 static int
728 dump_allocations_per_caller(int argc, char **argv)
729 {
730 	bool sortBySize = true;
731 	bool resetAllocationInfos = false;
732 	bool printDetails = false;
733 	ObjectCache* cache = NULL;
734 	addr_t caller = 0;
735 
736 	for (int32 i = 1; i < argc; i++) {
737 		if (strcmp(argv[i], "-c") == 0) {
738 			sortBySize = false;
739 		} else if (strcmp(argv[i], "-d") == 0) {
740 			uint64 callerAddress;
741 			if (++i >= argc
742 				|| !evaluate_debug_expression(argv[i], &callerAddress, true)) {
743 				print_debugger_command_usage(argv[0]);
744 				return 0;
745 			}
746 
747 			caller = callerAddress;
748 			printDetails = true;
749 		} else if (strcmp(argv[i], "-o") == 0) {
750 			uint64 cacheAddress;
751 			if (++i >= argc
752 				|| !evaluate_debug_expression(argv[i], &cacheAddress, true)) {
753 				print_debugger_command_usage(argv[0]);
754 				return 0;
755 			}
756 
757 			cache = (ObjectCache*)(addr_t)cacheAddress;
758 		} else if (strcmp(argv[i], "-r") == 0) {
759 			resetAllocationInfos = true;
760 		} else {
761 			print_debugger_command_usage(argv[0]);
762 			return 0;
763 		}
764 	}
765 
766 	sCallerInfoCount = 0;
767 
768 	AllocationCollectorCallback collectorCallback(resetAllocationInfos);
769 	AllocationDetailPrinterCallback detailsCallback(caller);
770 	AllocationTrackingCallback& callback = printDetails
771 		? (AllocationTrackingCallback&)detailsCallback
772 		: (AllocationTrackingCallback&)collectorCallback;
773 
774 	if (cache != NULL) {
775 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
776 		if (!analyze_allocation_callers(cache, callback))
777 			return 0;
778 #else
779 		kprintf("Object cache allocation tracking not available. "
780 			"SLAB_OBJECT_CACHE_TRACING (%d) and "
781 			"SLAB_OBJECT_CACHE_TRACING_STACK_TRACE (%d) must be enabled.\n",
782 			SLAB_OBJECT_CACHE_TRACING, SLAB_OBJECT_CACHE_TRACING_STACK_TRACE);
783 		return 0;
784 #endif
785 	} else {
786 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
787 
788 		for (ObjectCacheList::Iterator it = sObjectCaches.GetIterator();
789 				it.HasNext();) {
790 			if (!analyze_allocation_callers(it.Next(), callback))
791 				return 0;
792 		}
793 #endif
794 
795 #if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
796 		if (!MemoryManager::AnalyzeAllocationCallers(callback))
797 			return 0;
798 #endif
799 	}
800 
801 	if (printDetails)
802 		return 0;
803 
804 	// sort the array
805 	qsort(sCallerInfoTable, sCallerInfoCount, sizeof(caller_info),
806 		sortBySize ? &caller_info_compare_size : &caller_info_compare_count);
807 
808 	kprintf("%" B_PRId32 " different callers, sorted by %s...\n\n",
809 		sCallerInfoCount, sortBySize ? "size" : "count");
810 
811 	size_t totalAllocationSize = 0;
812 	size_t totalAllocationCount = 0;
813 
814 	kprintf("     count        size      caller\n");
815 	kprintf("----------------------------------\n");
816 	for (int32 i = 0; i < sCallerInfoCount; i++) {
817 		caller_info& info = sCallerInfoTable[i];
818 		kprintf("%10" B_PRIuSIZE "  %10" B_PRIuSIZE "  %p", info.count,
819 			info.size, (void*)info.caller);
820 
821 		const char* symbol;
822 		const char* imageName;
823 		bool exactMatch;
824 		addr_t baseAddress;
825 
826 		if (elf_debug_lookup_symbol_address(info.caller, &baseAddress, &symbol,
827 				&imageName, &exactMatch) == B_OK) {
828 			kprintf("  %s + %#" B_PRIxADDR " (%s)%s\n", symbol,
829 				info.caller - baseAddress, imageName,
830 				exactMatch ? "" : " (nearest)");
831 		} else
832 			kprintf("\n");
833 
834 		totalAllocationCount += info.count;
835 		totalAllocationSize += info.size;
836 	}
837 
838 	kprintf("\ntotal allocations: %" B_PRIuSIZE ", %" B_PRIuSIZE " bytes\n",
839 		totalAllocationCount, totalAllocationSize);
840 
841 	return 0;
842 }
843 
844 #endif	// SLAB_ALLOCATION_TRACKING_AVAILABLE
845 
846 
847 void
848 add_alloc_tracing_entry(ObjectCache* cache, uint32 flags, void* object)
849 {
850 #if SLAB_OBJECT_CACHE_TRACING
851 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
852 	MutexLocker _(cache->lock);
853 	cache->TrackingInfoFor(object)->Init(T(Alloc(cache, flags, object)));
854 #else
855 	T(Alloc(cache, flags, object));
856 #endif
857 #endif
858 }
859 
860 
861 // #pragma mark -
862 
863 
864 void
865 request_memory_manager_maintenance()
866 {
867 	MutexLocker locker(sMaintenanceLock);
868 	sMaintenanceCondition.NotifyAll();
869 }
870 
871 
872 // #pragma mark -
873 
874 
875 static void
876 delete_object_cache_internal(object_cache* cache)
877 {
878 	if (!(cache->flags & CACHE_NO_DEPOT))
879 		object_depot_destroy(&cache->depot, 0);
880 
881 	mutex_lock(&cache->lock);
882 
883 	if (!cache->full.IsEmpty())
884 		panic("cache destroy: still has full slabs");
885 
886 	if (!cache->partial.IsEmpty())
887 		panic("cache destroy: still has partial slabs");
888 
889 	while (!cache->empty.IsEmpty())
890 		cache->ReturnSlab(cache->empty.RemoveHead(), 0);
891 
892 	mutex_destroy(&cache->lock);
893 	cache->Delete();
894 }
895 
896 
897 static void
898 increase_object_reserve(ObjectCache* cache)
899 {
900 	MutexLocker locker(sMaintenanceLock);
901 
902 	cache->maintenance_resize = true;
903 
904 	if (!cache->maintenance_pending) {
905 		cache->maintenance_pending = true;
906 		sMaintenanceQueue.Add(cache);
907 		sMaintenanceCondition.NotifyAll();
908 	}
909 }
910 
911 
912 /*!	Makes sure that \a objectCount objects can be allocated.
913 */
914 static status_t
915 object_cache_reserve_internal(ObjectCache* cache, size_t objectCount,
916 	uint32 flags)
917 {
918 	// If someone else is already adding slabs, we wait for that to be finished
919 	// first.
920 	thread_id thread = find_thread(NULL);
921 	while (true) {
922 		if (objectCount <= cache->total_objects - cache->used_count)
923 			return B_OK;
924 
925 		ObjectCacheResizeEntry* resizeEntry = NULL;
926 		if (cache->resize_entry_dont_wait != NULL) {
927 			resizeEntry = cache->resize_entry_dont_wait;
928 			if (thread == resizeEntry->thread)
929 				return B_WOULD_BLOCK;
930 			// Note: We could still have reentered the function, i.e.
931 			// resize_entry_can_wait would be ours. That doesn't matter much,
932 			// though, since after the don't-wait thread has done its job
933 			// everyone will be happy.
934 		} else if (cache->resize_entry_can_wait != NULL) {
935 			resizeEntry = cache->resize_entry_can_wait;
936 			if (thread == resizeEntry->thread)
937 				return B_WOULD_BLOCK;
938 
939 			if ((flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0)
940 				break;
941 		} else
942 			break;
943 
944 		resizeEntry->condition.Wait(&cache->lock);
945 	}
946 
947 	// prepare the resize entry others can wait on
948 	ObjectCacheResizeEntry*& resizeEntry
949 		= (flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0
950 			? cache->resize_entry_dont_wait : cache->resize_entry_can_wait;
951 
952 	ObjectCacheResizeEntry myResizeEntry;
953 	resizeEntry = &myResizeEntry;
954 	resizeEntry->condition.Init(cache, "wait for slabs");
955 	resizeEntry->thread = thread;
956 
957 	// add new slabs until there are as many free ones as requested
958 	while (objectCount > cache->total_objects - cache->used_count) {
959 		slab* newSlab = cache->CreateSlab(flags);
960 		if (newSlab == NULL) {
961 			resizeEntry->condition.NotifyAll();
962 			resizeEntry = NULL;
963 			return B_NO_MEMORY;
964 		}
965 
966 		cache->usage += cache->slab_size;
967 		cache->total_objects += newSlab->size;
968 
969 		cache->empty.Add(newSlab);
970 		cache->empty_count++;
971 	}
972 
973 	resizeEntry->condition.NotifyAll();
974 	resizeEntry = NULL;
975 
976 	return B_OK;
977 }
978 
979 
980 static void
981 object_cache_low_memory(void* dummy, uint32 resources, int32 level)
982 {
983 	if (level == B_NO_LOW_RESOURCE)
984 		return;
985 
986 	MutexLocker cacheListLocker(sObjectCacheListLock);
987 
988 	// Append the first cache to the end of the queue. We assume that it is
989 	// one of the caches that will never be deleted and thus we use it as a
990 	// marker.
991 	ObjectCache* firstCache = sObjectCaches.RemoveHead();
992 	sObjectCaches.Add(firstCache);
993 	cacheListLocker.Unlock();
994 
995 	ObjectCache* cache;
996 	do {
997 		cacheListLocker.Lock();
998 
999 		cache = sObjectCaches.RemoveHead();
1000 		sObjectCaches.Add(cache);
1001 
1002 		MutexLocker maintenanceLocker(sMaintenanceLock);
1003 		if (cache->maintenance_pending || cache->maintenance_in_progress) {
1004 			// We don't want to mess with caches in maintenance.
1005 			continue;
1006 		}
1007 
1008 		cache->maintenance_pending = true;
1009 		cache->maintenance_in_progress = true;
1010 
1011 		maintenanceLocker.Unlock();
1012 		cacheListLocker.Unlock();
1013 
1014 		// We are calling the reclaimer without the object cache lock
1015 		// to give the owner a chance to return objects to the slabs.
1016 
1017 		if (cache->reclaimer)
1018 			cache->reclaimer(cache->cookie, level);
1019 
1020 		if ((cache->flags & CACHE_NO_DEPOT) == 0)
1021 			object_depot_make_empty(&cache->depot, 0);
1022 
1023 		MutexLocker cacheLocker(cache->lock);
1024 		size_t minimumAllowed;
1025 
1026 		switch (level) {
1027 			case B_LOW_RESOURCE_NOTE:
1028 				minimumAllowed = cache->pressure / 2 + 1;
1029 				cache->pressure -= cache->pressure / 8;
1030 				break;
1031 
1032 			case B_LOW_RESOURCE_WARNING:
1033 				cache->pressure /= 2;
1034 				minimumAllowed = 0;
1035 				break;
1036 
1037 			default:
1038 				cache->pressure = 0;
1039 				minimumAllowed = 0;
1040 				break;
1041 		}
1042 
1043 		while (cache->empty_count > minimumAllowed) {
1044 			// make sure we respect the cache's minimum object reserve
1045 			size_t objectsPerSlab = cache->empty.Head()->size;
1046 			size_t freeObjects = cache->total_objects - cache->used_count;
1047 			if (freeObjects < cache->min_object_reserve + objectsPerSlab)
1048 				break;
1049 
1050 			cache->ReturnSlab(cache->empty.RemoveHead(), 0);
1051 			cache->empty_count--;
1052 		}
1053 
1054 		cacheLocker.Unlock();
1055 
1056 		// Check whether in the meantime someone has really requested
1057 		// maintenance for the cache.
1058 		maintenanceLocker.Lock();
1059 
1060 		if (cache->maintenance_delete) {
1061 			delete_object_cache_internal(cache);
1062 			continue;
1063 		}
1064 
1065 		cache->maintenance_in_progress = false;
1066 
1067 		if (cache->maintenance_resize)
1068 			sMaintenanceQueue.Add(cache);
1069 		else
1070 			cache->maintenance_pending = false;
1071 	} while (cache != firstCache);
1072 }
1073 
1074 
1075 static status_t
1076 object_cache_maintainer(void*)
1077 {
1078 	while (true) {
1079 		MutexLocker locker(sMaintenanceLock);
1080 
1081 		// wait for the next request
1082 		while (sMaintenanceQueue.IsEmpty()) {
1083 			// perform memory manager maintenance, if needed
1084 			if (MemoryManager::MaintenanceNeeded()) {
1085 				locker.Unlock();
1086 				MemoryManager::PerformMaintenance();
1087 				locker.Lock();
1088 				continue;
1089 			}
1090 
1091 			sMaintenanceCondition.Wait(locker.Get());
1092 		}
1093 
1094 		ObjectCache* cache = sMaintenanceQueue.RemoveHead();
1095 
1096 		while (true) {
1097 			bool resizeRequested = cache->maintenance_resize;
1098 			bool deleteRequested = cache->maintenance_delete;
1099 
1100 			if (!resizeRequested && !deleteRequested) {
1101 				cache->maintenance_pending = false;
1102 				cache->maintenance_in_progress = false;
1103 				break;
1104 			}
1105 
1106 			cache->maintenance_resize = false;
1107 			cache->maintenance_in_progress = true;
1108 
1109 			locker.Unlock();
1110 
1111 			if (deleteRequested) {
1112 				delete_object_cache_internal(cache);
1113 				break;
1114 			}
1115 
1116 			// resize the cache, if necessary
1117 
1118 			MutexLocker cacheLocker(cache->lock);
1119 
1120 			if (resizeRequested) {
1121 				status_t error = object_cache_reserve_internal(cache,
1122 					cache->min_object_reserve, 0);
1123 				if (error != B_OK) {
1124 					dprintf("object cache resizer: Failed to resize object "
1125 						"cache %p!\n", cache);
1126 					break;
1127 				}
1128 			}
1129 
1130 			locker.Lock();
1131 		}
1132 	}
1133 
1134 	// never can get here
1135 	return B_OK;
1136 }
1137 
1138 
1139 // #pragma mark - public API
1140 
1141 
1142 object_cache*
1143 create_object_cache(const char* name, size_t object_size, size_t alignment,
1144 	void* cookie, object_cache_constructor constructor,
1145 	object_cache_destructor destructor)
1146 {
1147 	return create_object_cache_etc(name, object_size, alignment, 0, 0, 0, 0,
1148 		cookie, constructor, destructor, NULL);
1149 }
1150 
1151 
1152 object_cache*
1153 create_object_cache_etc(const char* name, size_t objectSize, size_t alignment,
1154 	size_t maximum, size_t magazineCapacity, size_t maxMagazineCount,
1155 	uint32 flags, void* cookie, object_cache_constructor constructor,
1156 	object_cache_destructor destructor, object_cache_reclaimer reclaimer)
1157 {
1158 	ObjectCache* cache;
1159 
1160 	if (objectSize == 0) {
1161 		cache = NULL;
1162 	} else if (objectSize <= 256) {
1163 		cache = SmallObjectCache::Create(name, objectSize, alignment, maximum,
1164 			magazineCapacity, maxMagazineCount, flags, cookie, constructor,
1165 			destructor, reclaimer);
1166 	} else {
1167 		cache = HashedObjectCache::Create(name, objectSize, alignment, maximum,
1168 			magazineCapacity, maxMagazineCount, flags, cookie, constructor,
1169 			destructor, reclaimer);
1170 	}
1171 
1172 	if (cache != NULL) {
1173 		MutexLocker _(sObjectCacheListLock);
1174 		sObjectCaches.Add(cache);
1175 	}
1176 
1177 	T(Create(name, objectSize, alignment, maximum, flags, cookie, cache));
1178 	return cache;
1179 }
1180 
1181 
1182 void
1183 delete_object_cache(object_cache* cache)
1184 {
1185 	T(Delete(cache));
1186 
1187 	{
1188 		MutexLocker _(sObjectCacheListLock);
1189 		sObjectCaches.Remove(cache);
1190 	}
1191 
1192 	MutexLocker cacheLocker(cache->lock);
1193 
1194 	{
1195 		MutexLocker maintenanceLocker(sMaintenanceLock);
1196 		if (cache->maintenance_in_progress) {
1197 			// The maintainer thread is working with the cache. Just mark it
1198 			// to be deleted.
1199 			cache->maintenance_delete = true;
1200 			return;
1201 		}
1202 
1203 		// unschedule maintenance
1204 		if (cache->maintenance_pending)
1205 			sMaintenanceQueue.Remove(cache);
1206 	}
1207 
1208 	// at this point no-one should have a reference to the cache anymore
1209 	cacheLocker.Unlock();
1210 
1211 	delete_object_cache_internal(cache);
1212 }
1213 
1214 
1215 status_t
1216 object_cache_set_minimum_reserve(object_cache* cache, size_t objectCount)
1217 {
1218 	MutexLocker _(cache->lock);
1219 
1220 	if (cache->min_object_reserve == objectCount)
1221 		return B_OK;
1222 
1223 	cache->min_object_reserve = objectCount;
1224 
1225 	increase_object_reserve(cache);
1226 
1227 	return B_OK;
1228 }
1229 
1230 
1231 void*
1232 object_cache_alloc(object_cache* cache, uint32 flags)
1233 {
1234 	if (!(cache->flags & CACHE_NO_DEPOT)) {
1235 		void* object = object_depot_obtain(&cache->depot);
1236 		if (object) {
1237 			add_alloc_tracing_entry(cache, flags, object);
1238 			return fill_allocated_block(object, cache->object_size);
1239 		}
1240 	}
1241 
1242 	MutexLocker locker(cache->lock);
1243 	slab* source = NULL;
1244 
1245 	while (true) {
1246 		source = cache->partial.Head();
1247 		if (source != NULL)
1248 			break;
1249 
1250 		source = cache->empty.RemoveHead();
1251 		if (source != NULL) {
1252 			cache->empty_count--;
1253 			cache->partial.Add(source);
1254 			break;
1255 		}
1256 
1257 		if (object_cache_reserve_internal(cache, 1, flags) != B_OK) {
1258 			T(Alloc(cache, flags, NULL));
1259 			return NULL;
1260 		}
1261 
1262 		cache->pressure++;
1263 	}
1264 
1265 	ParanoiaChecker _2(source);
1266 
1267 	object_link* link = _pop(source->free);
1268 	source->count--;
1269 	cache->used_count++;
1270 
1271 	if (cache->total_objects - cache->used_count < cache->min_object_reserve)
1272 		increase_object_reserve(cache);
1273 
1274 	REMOVE_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, source, &link->next,
1275 		sizeof(void*));
1276 
1277 	TRACE_CACHE(cache, "allocate %p (%p) from %p, %lu remaining.",
1278 		link_to_object(link, cache->object_size), link, source, source->count);
1279 
1280 	if (source->count == 0) {
1281 		cache->partial.Remove(source);
1282 		cache->full.Add(source);
1283 	}
1284 
1285 	void* object = link_to_object(link, cache->object_size);
1286 	locker.Unlock();
1287 
1288 	add_alloc_tracing_entry(cache, flags, object);
1289 	return fill_allocated_block(object, cache->object_size);
1290 }
1291 
1292 
1293 void
1294 object_cache_free(object_cache* cache, void* object, uint32 flags)
1295 {
1296 	if (object == NULL)
1297 		return;
1298 
1299 	T(Free(cache, object));
1300 
1301 #if PARANOID_KERNEL_FREE
1302 	// TODO: allow forcing the check even if we don't find deadbeef
1303 	if (*(uint32*)object == 0xdeadbeef) {
1304 		if (!cache->AssertObjectNotFreed(object))
1305 			return;
1306 
1307 		if ((cache->flags & CACHE_NO_DEPOT) == 0) {
1308 			if (object_depot_contains_object(&cache->depot, object)) {
1309 				panic("object_cache: object %p is already freed", object);
1310 				return;
1311 			}
1312 		}
1313 	}
1314 
1315 	fill_freed_block(object, cache->object_size);
1316 #endif
1317 
1318 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
1319 	mutex_lock(&cache->lock);
1320 	cache->TrackingInfoFor(object)->Clear();
1321 	mutex_unlock(&cache->lock);
1322 #endif
1323 
1324 	if ((cache->flags & CACHE_NO_DEPOT) == 0) {
1325 		object_depot_store(&cache->depot, object, flags);
1326 		return;
1327 	}
1328 
1329 	MutexLocker _(cache->lock);
1330 	cache->ReturnObjectToSlab(cache->ObjectSlab(object), object, flags);
1331 }
1332 
1333 
1334 status_t
1335 object_cache_reserve(object_cache* cache, size_t objectCount, uint32 flags)
1336 {
1337 	if (objectCount == 0)
1338 		return B_OK;
1339 
1340 	T(Reserve(cache, objectCount, flags));
1341 
1342 	MutexLocker _(cache->lock);
1343 	return object_cache_reserve_internal(cache, objectCount, flags);
1344 }
1345 
1346 
1347 void
1348 object_cache_get_usage(object_cache* cache, size_t* _allocatedMemory)
1349 {
1350 	MutexLocker _(cache->lock);
1351 	*_allocatedMemory = cache->usage;
1352 }
1353 
1354 
1355 void
1356 slab_init(kernel_args* args)
1357 {
1358 	MemoryManager::Init(args);
1359 
1360 	new (&sObjectCaches) ObjectCacheList();
1361 
1362 	block_allocator_init_boot();
1363 }
1364 
1365 
1366 void
1367 slab_init_post_area()
1368 {
1369 	MemoryManager::InitPostArea();
1370 
1371 	add_debugger_command("slabs", dump_slabs, "list all object caches");
1372 	add_debugger_command("slab_cache", dump_cache_info,
1373 		"dump information about a specific object cache");
1374 	add_debugger_command("slab_depot", dump_object_depot,
1375 		"dump contents of an object depot");
1376 	add_debugger_command("slab_magazine", dump_depot_magazine,
1377 		"dump contents of a depot magazine");
1378 	add_debugger_command("slab_object", dump_object_info,
1379 		"dump information about an object in an object_cache");
1380 #if SLAB_ALLOCATION_TRACKING_AVAILABLE
1381 	add_debugger_command_etc("allocations_per_caller",
1382 		&dump_allocations_per_caller,
1383 		"Dump current slab allocations summed up per caller",
1384 		"[ -c ] [ -d <caller> ] [ -o <object cache> ] [ -r ]\n"
1385 		"The current allocations will by summed up by caller (their count and\n"
1386 		"size) printed in decreasing order by size or, if \"-c\" is\n"
1387 		"specified, by allocation count. If given <object cache> specifies\n"
1388 		"the address of the object cache for which to print the allocations.\n"
1389 		"If \"-d\" is given, each allocation for caller <caller> is printed\n"
1390 		"including the respective stack trace.\n"
1391 		"If \"-r\" is given, the allocation infos are reset after gathering\n"
1392 		"the information, so the next command invocation will only show the\n"
1393 		"allocations made after the reset.\n", 0);
1394 	add_debugger_command_etc("allocation_infos",
1395 		&dump_allocation_infos,
1396 		"Dump current slab allocations",
1397 		"[ --stacktrace ] [ -o <object cache> | -s <slab> | -S <address> ] "
1398 		"[ -a <allocation> ] [ --team <team ID> ] [ --thread <thread ID> ]\n"
1399 		"The current allocations filtered by optional values will be printed.\n"
1400 		"If given, <object cache> specifies the address of the object cache\n"
1401 		"or <slab> specifies the address of a slab, for which to print the\n"
1402 		"allocations. Alternatively <address> specifies any address within\n"
1403 		"a slab allocation range.\n"
1404 		"The optional \"-a\" address filters for a specific allocation,\n"
1405 		"with \"--team\" and \"--thread\" allocations by specific teams\n"
1406 		"and/or threads can be filtered (these only work if a corresponding\n"
1407 		"tracing entry is still available).\n"
1408 		"If \"--stacktrace\" is given, then stack traces of the allocation\n"
1409 		"callers are printed, where available\n", 0);
1410 #endif	// SLAB_ALLOCATION_TRACKING_AVAILABLE
1411 }
1412 
1413 
1414 void
1415 slab_init_post_sem()
1416 {
1417 	register_low_resource_handler(object_cache_low_memory, NULL,
1418 		B_KERNEL_RESOURCE_PAGES | B_KERNEL_RESOURCE_MEMORY
1419 			| B_KERNEL_RESOURCE_ADDRESS_SPACE, 5);
1420 
1421 	block_allocator_init_rest();
1422 }
1423 
1424 
1425 void
1426 slab_init_post_thread()
1427 {
1428 	new(&sMaintenanceQueue) MaintenanceQueue;
1429 	sMaintenanceCondition.Init(&sMaintenanceQueue, "object cache maintainer");
1430 
1431 	thread_id objectCacheResizer = spawn_kernel_thread(object_cache_maintainer,
1432 		"object cache resizer", B_URGENT_PRIORITY, NULL);
1433 	if (objectCacheResizer < 0) {
1434 		panic("slab_init_post_thread(): failed to spawn object cache resizer "
1435 			"thread\n");
1436 		return;
1437 	}
1438 
1439 	resume_thread(objectCacheResizer);
1440 }
1441 
1442 
1443 RANGE_MARKER_FUNCTION_END(Slab)
1444 
1445 
1446 #endif	// !USE_GUARDED_HEAP_FOR_OBJECT_CACHE
1447