xref: /haiku/src/system/kernel/slab/Slab.cpp (revision 85892ec52f476b254d75e2bb2e6560e72faa567c)
1 /*
2  * Copyright 2010, Ingo Weinhold <ingo_weinhold@gmx.de>.
3  * Copyright 2008-2010, Axel Dörfler. All Rights Reserved.
4  * Copyright 2007, Hugo Santos. All Rights Reserved.
5  *
6  * Distributed under the terms of the MIT License.
7  */
8 
9 
10 #include <slab/Slab.h>
11 
12 #include <algorithm>
13 #include <new>
14 #include <stdlib.h>
15 #include <string.h>
16 
17 #include <KernelExport.h>
18 
19 #include <condition_variable.h>
20 #include <elf.h>
21 #include <kernel.h>
22 #include <low_resource_manager.h>
23 #include <slab/ObjectDepot.h>
24 #include <smp.h>
25 #include <tracing.h>
26 #include <util/AutoLock.h>
27 #include <util/DoublyLinkedList.h>
28 #include <vm/vm.h>
29 #include <vm/VMAddressSpace.h>
30 
31 #include "HashedObjectCache.h"
32 #include "MemoryManager.h"
33 #include "slab_debug.h"
34 #include "slab_private.h"
35 #include "SmallObjectCache.h"
36 
37 
38 #if !USE_GUARDED_HEAP_FOR_OBJECT_CACHE
39 
40 
41 typedef DoublyLinkedList<ObjectCache> ObjectCacheList;
42 
43 typedef DoublyLinkedList<ObjectCache,
44 	DoublyLinkedListMemberGetLink<ObjectCache, &ObjectCache::maintenance_link> >
45 		MaintenanceQueue;
46 
47 static ObjectCacheList sObjectCaches;
48 static mutex sObjectCacheListLock = MUTEX_INITIALIZER("object cache list");
49 
50 static mutex sMaintenanceLock
51 	= MUTEX_INITIALIZER("object cache resize requests");
52 static MaintenanceQueue sMaintenanceQueue;
53 static ConditionVariable sMaintenanceCondition;
54 
55 
56 #if SLAB_ALLOCATION_TRACKING_AVAILABLE
57 
58 struct caller_info {
59 	addr_t		caller;
60 	size_t		count;
61 	size_t		size;
62 };
63 
64 static const int32 kCallerInfoTableSize = 1024;
65 static caller_info sCallerInfoTable[kCallerInfoTableSize];
66 static int32 sCallerInfoCount = 0;
67 
68 static caller_info* get_caller_info(addr_t caller);
69 
70 
71 RANGE_MARKER_FUNCTION_PROTOTYPES(slab_allocator)
72 RANGE_MARKER_FUNCTION_PROTOTYPES(SlabHashedObjectCache)
73 RANGE_MARKER_FUNCTION_PROTOTYPES(SlabMemoryManager)
74 RANGE_MARKER_FUNCTION_PROTOTYPES(SlabObjectCache)
75 RANGE_MARKER_FUNCTION_PROTOTYPES(SlabObjectDepot)
76 RANGE_MARKER_FUNCTION_PROTOTYPES(Slab)
77 RANGE_MARKER_FUNCTION_PROTOTYPES(SlabSmallObjectCache)
78 
79 
80 static const addr_t kSlabCodeAddressRanges[] = {
81 	RANGE_MARKER_FUNCTION_ADDRESS_RANGE(slab_allocator),
82 	RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabHashedObjectCache),
83 	RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabMemoryManager),
84 	RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabObjectCache),
85 	RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabObjectDepot),
86 	RANGE_MARKER_FUNCTION_ADDRESS_RANGE(Slab),
87 	RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabSmallObjectCache)
88 };
89 
90 static const uint32 kSlabCodeAddressRangeCount
91 	= sizeof(kSlabCodeAddressRanges) / sizeof(kSlabCodeAddressRanges[0]) / 2;
92 
93 #endif	// SLAB_ALLOCATION_TRACKING_AVAILABLE
94 
95 
96 RANGE_MARKER_FUNCTION_BEGIN(Slab)
97 
98 
99 #if SLAB_OBJECT_CACHE_TRACING
100 
101 
102 namespace SlabObjectCacheTracing {
103 
104 class ObjectCacheTraceEntry
105 	: public TRACE_ENTRY_SELECTOR(SLAB_OBJECT_CACHE_TRACING_STACK_TRACE) {
106 	public:
107 		ObjectCacheTraceEntry(ObjectCache* cache)
108 			:
109 			TraceEntryBase(SLAB_OBJECT_CACHE_TRACING_STACK_TRACE, 0, true),
110 			fCache(cache)
111 		{
112 		}
113 
114 	protected:
115 		ObjectCache*	fCache;
116 };
117 
118 
119 class Create : public ObjectCacheTraceEntry {
120 	public:
121 		Create(const char* name, size_t objectSize, size_t alignment,
122 				size_t maxByteUsage, uint32 flags, void* cookie,
123 				ObjectCache* cache)
124 			:
125 			ObjectCacheTraceEntry(cache),
126 			fObjectSize(objectSize),
127 			fAlignment(alignment),
128 			fMaxByteUsage(maxByteUsage),
129 			fFlags(flags),
130 			fCookie(cookie)
131 		{
132 			fName = alloc_tracing_buffer_strcpy(name, 64, false);
133 			Initialized();
134 		}
135 
136 		virtual void AddDump(TraceOutput& out)
137 		{
138 			out.Print("object cache create: name: \"%s\", object size: %lu, "
139 				"alignment: %lu, max usage: %lu, flags: 0x%lx, cookie: %p "
140 				"-> cache: %p", fName, fObjectSize, fAlignment, fMaxByteUsage,
141 					fFlags, fCookie, fCache);
142 		}
143 
144 	private:
145 		const char*	fName;
146 		size_t		fObjectSize;
147 		size_t		fAlignment;
148 		size_t		fMaxByteUsage;
149 		uint32		fFlags;
150 		void*		fCookie;
151 };
152 
153 
154 class Delete : public ObjectCacheTraceEntry {
155 	public:
156 		Delete(ObjectCache* cache)
157 			:
158 			ObjectCacheTraceEntry(cache)
159 		{
160 			Initialized();
161 		}
162 
163 		virtual void AddDump(TraceOutput& out)
164 		{
165 			out.Print("object cache delete: %p", fCache);
166 		}
167 };
168 
169 
170 class Alloc : public ObjectCacheTraceEntry {
171 	public:
172 		Alloc(ObjectCache* cache, uint32 flags, void* object)
173 			:
174 			ObjectCacheTraceEntry(cache),
175 			fFlags(flags),
176 			fObject(object)
177 		{
178 			Initialized();
179 		}
180 
181 		virtual void AddDump(TraceOutput& out)
182 		{
183 			out.Print("object cache alloc: cache: %p, flags: 0x%lx -> "
184 				"object: %p", fCache, fFlags, fObject);
185 		}
186 
187 	private:
188 		uint32		fFlags;
189 		void*		fObject;
190 };
191 
192 
193 class Free : public ObjectCacheTraceEntry {
194 	public:
195 		Free(ObjectCache* cache, void* object)
196 			:
197 			ObjectCacheTraceEntry(cache),
198 			fObject(object)
199 		{
200 			Initialized();
201 		}
202 
203 		virtual void AddDump(TraceOutput& out)
204 		{
205 			out.Print("object cache free: cache: %p, object: %p", fCache,
206 				fObject);
207 		}
208 
209 	private:
210 		void*		fObject;
211 };
212 
213 
214 class Reserve : public ObjectCacheTraceEntry {
215 	public:
216 		Reserve(ObjectCache* cache, size_t count, uint32 flags)
217 			:
218 			ObjectCacheTraceEntry(cache),
219 			fCount(count),
220 			fFlags(flags)
221 		{
222 			Initialized();
223 		}
224 
225 		virtual void AddDump(TraceOutput& out)
226 		{
227 			out.Print("object cache reserve: cache: %p, count: %lu, "
228 				"flags: 0x%lx", fCache, fCount, fFlags);
229 		}
230 
231 	private:
232 		uint32		fCount;
233 		uint32		fFlags;
234 };
235 
236 
237 }	// namespace SlabObjectCacheTracing
238 
239 #	define T(x)	new(std::nothrow) SlabObjectCacheTracing::x
240 
241 #else
242 #	define T(x)
243 #endif	// SLAB_OBJECT_CACHE_TRACING
244 
245 
246 // #pragma mark -
247 
248 
249 static void
250 dump_slab(::slab* slab)
251 {
252 	kprintf("  %p  %p  %6" B_PRIuSIZE " %6" B_PRIuSIZE " %6" B_PRIuSIZE "  %p\n",
253 		slab, slab->pages, slab->size, slab->count, slab->offset, slab->free);
254 }
255 
256 
257 static int
258 dump_slabs(int argc, char* argv[])
259 {
260 	kprintf("%*s %22s %8s %8s %8s %6s %8s %8s %8s\n",
261 		B_PRINTF_POINTER_WIDTH + 2, "address", "name", "objsize", "align",
262 		"usage", "empty", "usedobj", "total", "flags");
263 
264 	ObjectCacheList::Iterator it = sObjectCaches.GetIterator();
265 
266 	while (it.HasNext()) {
267 		ObjectCache* cache = it.Next();
268 
269 		kprintf("%p %22s %8lu %8" B_PRIuSIZE " %8lu %6lu %8lu %8lu %8" B_PRIx32
270 			"\n", cache, cache->name, cache->object_size, cache->alignment,
271 			cache->usage, cache->empty_count, cache->used_count,
272 			cache->total_objects, cache->flags);
273 	}
274 
275 	return 0;
276 }
277 
278 
279 static int
280 dump_cache_info(int argc, char* argv[])
281 {
282 	if (argc < 2) {
283 		kprintf("usage: slab_cache [address]\n");
284 		return 0;
285 	}
286 
287 	ObjectCache* cache = (ObjectCache*)parse_expression(argv[1]);
288 
289 	kprintf("name:              %s\n", cache->name);
290 	kprintf("lock:              %p\n", &cache->lock);
291 	kprintf("object_size:       %lu\n", cache->object_size);
292 	kprintf("alignment:         %" B_PRIuSIZE "\n", cache->alignment);
293 	kprintf("cache_color_cycle: %lu\n", cache->cache_color_cycle);
294 	kprintf("total_objects:     %lu\n", cache->total_objects);
295 	kprintf("used_count:        %lu\n", cache->used_count);
296 	kprintf("empty_count:       %lu\n", cache->empty_count);
297 	kprintf("pressure:          %lu\n", cache->pressure);
298 	kprintf("slab_size:         %lu\n", cache->slab_size);
299 	kprintf("usage:             %lu\n", cache->usage);
300 	kprintf("maximum:           %lu\n", cache->maximum);
301 	kprintf("flags:             0x%" B_PRIx32 "\n", cache->flags);
302 	kprintf("cookie:            %p\n", cache->cookie);
303 	kprintf("resize entry don't wait: %p\n", cache->resize_entry_dont_wait);
304 	kprintf("resize entry can wait:   %p\n", cache->resize_entry_can_wait);
305 
306 	kprintf("  %-*s    %-*s      size   used offset  free\n",
307 		B_PRINTF_POINTER_WIDTH, "slab", B_PRINTF_POINTER_WIDTH, "chunk");
308 
309 	SlabList::Iterator iterator = cache->empty.GetIterator();
310 	if (iterator.HasNext())
311 		kprintf("empty:\n");
312 	while (::slab* slab = iterator.Next())
313 		dump_slab(slab);
314 
315 	iterator = cache->partial.GetIterator();
316 	if (iterator.HasNext())
317 		kprintf("partial:\n");
318 	while (::slab* slab = iterator.Next())
319 		dump_slab(slab);
320 
321 	iterator = cache->full.GetIterator();
322 	if (iterator.HasNext())
323 		kprintf("full:\n");
324 	while (::slab* slab = iterator.Next())
325 		dump_slab(slab);
326 
327 	if ((cache->flags & CACHE_NO_DEPOT) == 0) {
328 		kprintf("depot:\n");
329 		dump_object_depot(&cache->depot);
330 	}
331 
332 	return 0;
333 }
334 
335 
336 // #pragma mark - AllocationTrackingCallback
337 
338 
339 #if SLAB_ALLOCATION_TRACKING_AVAILABLE
340 
341 AllocationTrackingCallback::~AllocationTrackingCallback()
342 {
343 }
344 
345 #endif	// SLAB_ALLOCATION_TRACKING_AVAILABLE
346 
347 
348 // #pragma mark -
349 
350 
351 #if SLAB_ALLOCATION_TRACKING_AVAILABLE
352 
353 namespace {
354 
355 class AllocationCollectorCallback : public AllocationTrackingCallback {
356 public:
357 	AllocationCollectorCallback(bool resetInfos)
358 		:
359 		fResetInfos(resetInfos)
360 	{
361 	}
362 
363 	virtual bool ProcessTrackingInfo(AllocationTrackingInfo* info,
364 		void* allocation, size_t allocationSize)
365 	{
366 		if (!info->IsInitialized())
367 			return true;
368 
369 		addr_t caller = 0;
370 		AbstractTraceEntryWithStackTrace* traceEntry = info->TraceEntry();
371 
372 		if (traceEntry != NULL && info->IsTraceEntryValid()) {
373 			caller = tracing_find_caller_in_stack_trace(
374 				traceEntry->StackTrace(), kSlabCodeAddressRanges,
375 				kSlabCodeAddressRangeCount);
376 		}
377 
378 		caller_info* callerInfo = get_caller_info(caller);
379 		if (callerInfo == NULL) {
380 			kprintf("out of space for caller infos\n");
381 			return false;
382 		}
383 
384 		callerInfo->count++;
385 		callerInfo->size += allocationSize;
386 
387 		if (fResetInfos)
388 			info->Clear();
389 
390 		return true;
391 	}
392 
393 private:
394 	bool	fResetInfos;
395 };
396 
397 
398 class AllocationInfoPrinterCallback : public AllocationTrackingCallback {
399 public:
400 	AllocationInfoPrinterCallback(bool printStackTrace, addr_t addressFilter,
401 		team_id teamFilter, thread_id threadFilter)
402 		:
403 		fPrintStackTrace(printStackTrace),
404 		fAddressFilter(addressFilter),
405 		fTeamFilter(teamFilter),
406 		fThreadFilter(threadFilter)
407 	{
408 	}
409 
410 	virtual bool ProcessTrackingInfo(AllocationTrackingInfo* info,
411 		void* allocation, size_t allocationSize)
412 	{
413 		if (!info->IsInitialized())
414 			return true;
415 
416 		if (fAddressFilter != 0 && (addr_t)allocation != fAddressFilter)
417 			return true;
418 
419 		AbstractTraceEntryWithStackTrace* traceEntry = info->TraceEntry();
420 		if (traceEntry != NULL && !info->IsTraceEntryValid())
421 			traceEntry = NULL;
422 
423 		if (traceEntry != NULL) {
424 			if (fTeamFilter != -1 && traceEntry->TeamID() != fTeamFilter)
425 				return true;
426 			if (fThreadFilter != -1 && traceEntry->ThreadID() != fThreadFilter)
427 				return true;
428 		} else {
429 			// we need the info if we have filters set
430 			if (fTeamFilter != -1 || fThreadFilter != -1)
431 				return true;
432 		}
433 
434 		kprintf("allocation %p, size: %" B_PRIuSIZE, allocation,
435 			allocationSize);
436 
437 		if (traceEntry != NULL) {
438 			kprintf(", team: %" B_PRId32 ", thread %" B_PRId32
439 				", time %" B_PRId64 "\n", traceEntry->TeamID(),
440 				traceEntry->ThreadID(), traceEntry->Time());
441 
442 			if (fPrintStackTrace)
443 				tracing_print_stack_trace(traceEntry->StackTrace());
444 		} else
445 			kprintf("\n");
446 
447 		return true;
448 	}
449 
450 private:
451 	bool		fPrintStackTrace;
452 	addr_t		fAddressFilter;
453 	team_id		fTeamFilter;
454 	thread_id	fThreadFilter;
455 };
456 
457 
458 class AllocationDetailPrinterCallback : public AllocationTrackingCallback {
459 public:
460 	AllocationDetailPrinterCallback(addr_t caller)
461 		:
462 		fCaller(caller)
463 	{
464 	}
465 
466 	virtual bool ProcessTrackingInfo(AllocationTrackingInfo* info,
467 		void* allocation, size_t allocationSize)
468 	{
469 		if (!info->IsInitialized())
470 			return true;
471 
472 		addr_t caller = 0;
473 		AbstractTraceEntryWithStackTrace* traceEntry = info->TraceEntry();
474 		if (traceEntry != NULL && !info->IsTraceEntryValid())
475 			traceEntry = NULL;
476 
477 		if (traceEntry != NULL) {
478 			caller = tracing_find_caller_in_stack_trace(
479 				traceEntry->StackTrace(), kSlabCodeAddressRanges,
480 				kSlabCodeAddressRangeCount);
481 		}
482 
483 		if (caller != fCaller)
484 			return true;
485 
486 		kprintf("allocation %p, size: %" B_PRIuSIZE "\n", allocation,
487 			allocationSize);
488 		if (traceEntry != NULL)
489 			tracing_print_stack_trace(traceEntry->StackTrace());
490 
491 		return true;
492 	}
493 
494 private:
495 	addr_t	fCaller;
496 };
497 
498 }	// unnamed namespace
499 
500 static caller_info*
501 get_caller_info(addr_t caller)
502 {
503 	// find the caller info
504 	for (int32 i = 0; i < sCallerInfoCount; i++) {
505 		if (caller == sCallerInfoTable[i].caller)
506 			return &sCallerInfoTable[i];
507 	}
508 
509 	// not found, add a new entry, if there are free slots
510 	if (sCallerInfoCount >= kCallerInfoTableSize)
511 		return NULL;
512 
513 	caller_info* info = &sCallerInfoTable[sCallerInfoCount++];
514 	info->caller = caller;
515 	info->count = 0;
516 	info->size = 0;
517 
518 	return info;
519 }
520 
521 
522 static int
523 caller_info_compare_size(const void* _a, const void* _b)
524 {
525 	const caller_info* a = (const caller_info*)_a;
526 	const caller_info* b = (const caller_info*)_b;
527 	return (int)(b->size - a->size);
528 }
529 
530 
531 static int
532 caller_info_compare_count(const void* _a, const void* _b)
533 {
534 	const caller_info* a = (const caller_info*)_a;
535 	const caller_info* b = (const caller_info*)_b;
536 	return (int)(b->count - a->count);
537 }
538 
539 
540 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
541 
542 static bool
543 analyze_allocation_callers(ObjectCache* cache, slab* slab,
544 	AllocationTrackingCallback& callback)
545 {
546 	for (uint32 i = 0; i < slab->size; i++) {
547 		if (!callback.ProcessTrackingInfo(&slab->tracking[i],
548 				cache->ObjectAtIndex(slab, i), cache->object_size)) {
549 			return false;
550 		}
551 	}
552 
553 	return true;
554 }
555 
556 
557 static bool
558 analyze_allocation_callers(ObjectCache* cache, const SlabList& slabList,
559 	AllocationTrackingCallback& callback)
560 {
561 	for (SlabList::ConstIterator it = slabList.GetIterator();
562 			slab* slab = it.Next();) {
563 		if (!analyze_allocation_callers(cache, slab, callback))
564 			return false;
565 	}
566 
567 	return true;
568 }
569 
570 
571 static bool
572 analyze_allocation_callers(ObjectCache* cache,
573 	AllocationTrackingCallback& callback)
574 {
575 	return analyze_allocation_callers(cache, cache->full, callback)
576 		&& analyze_allocation_callers(cache, cache->partial, callback);
577 }
578 
579 #endif	// SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
580 
581 
582 static int
583 dump_allocation_infos(int argc, char **argv)
584 {
585 	ObjectCache* cache = NULL;
586 	slab* slab = NULL;
587 	addr_t addressFilter = 0;
588 	team_id teamFilter = -1;
589 	thread_id threadFilter = -1;
590 	bool printStackTraces = false;
591 
592 	for (int32 i = 1; i < argc; i++) {
593 		if (strcmp(argv[i], "--stacktrace") == 0)
594 			printStackTraces = true;
595 		else if (strcmp(argv[i], "-a") == 0) {
596 			uint64 address;
597 			if (++i >= argc
598 				|| !evaluate_debug_expression(argv[i], &address, true)) {
599 				print_debugger_command_usage(argv[0]);
600 				return 0;
601 			}
602 
603 			addressFilter = address;
604 		} else if (strcmp(argv[i], "-o") == 0) {
605 			uint64 cacheAddress;
606 			if (++i >= argc
607 				|| !evaluate_debug_expression(argv[i], &cacheAddress, true)) {
608 				print_debugger_command_usage(argv[0]);
609 				return 0;
610 			}
611 
612 			cache = (ObjectCache*)(addr_t)cacheAddress;
613 		} else if (strcasecmp(argv[i], "-s") == 0) {
614 			uint64 slabAddress;
615 			if (++i >= argc
616 				|| !evaluate_debug_expression(argv[i], &slabAddress, true)) {
617 				print_debugger_command_usage(argv[0]);
618 				return 0;
619 			}
620 
621 			void* slabPages = (void*)slabAddress;
622 			if (strcmp(argv[i], "-s") == 0) {
623 				slab = (struct slab*)(addr_t)slabAddress;
624 				slabPages = slab->pages;
625 			}
626 
627 			cache = MemoryManager::DebugObjectCacheForAddress(slabPages);
628 			if (cache == NULL) {
629 				kprintf("Couldn't find object cache for address %p.\n",
630 					slabPages);
631 				return 0;
632 			}
633 
634 			if (slab == NULL) {
635 				slab = cache->ObjectSlab(slabPages);
636 
637 				if (slab == NULL) {
638 					kprintf("Couldn't find slab for address %p.\n", slabPages);
639 					return 0;
640 				}
641 			}
642 		} else if (strcmp(argv[i], "--team") == 0) {
643 			uint64 team;
644 			if (++i >= argc
645 				|| !evaluate_debug_expression(argv[i], &team, true)) {
646 				print_debugger_command_usage(argv[0]);
647 				return 0;
648 			}
649 
650 			teamFilter = team;
651 		} else if (strcmp(argv[i], "--thread") == 0) {
652 			uint64 thread;
653 			if (++i >= argc
654 				|| !evaluate_debug_expression(argv[i], &thread, true)) {
655 				print_debugger_command_usage(argv[0]);
656 				return 0;
657 			}
658 
659 			threadFilter = thread;
660 		} else {
661 			print_debugger_command_usage(argv[0]);
662 			return 0;
663 		}
664 	}
665 
666 	AllocationInfoPrinterCallback callback(printStackTraces, addressFilter,
667 		teamFilter, threadFilter);
668 
669 	if (slab != NULL || cache != NULL) {
670 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
671 		if (slab != NULL) {
672 			if (!analyze_allocation_callers(cache, slab, callback))
673 				return 0;
674 		} else if (cache != NULL) {
675 			if (!analyze_allocation_callers(cache, callback))
676 				return 0;
677 		}
678 #else
679 		kprintf("Object cache allocation tracking not available. "
680 			"SLAB_OBJECT_CACHE_TRACING (%d) and "
681 			"SLAB_OBJECT_CACHE_TRACING_STACK_TRACE (%d) must be enabled.\n",
682 			SLAB_OBJECT_CACHE_TRACING, SLAB_OBJECT_CACHE_TRACING_STACK_TRACE);
683 		return 0;
684 #endif
685 	} else {
686 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
687 
688 		for (ObjectCacheList::Iterator it = sObjectCaches.GetIterator();
689 				it.HasNext();) {
690 			if (!analyze_allocation_callers(it.Next(), callback))
691 				return 0;
692 		}
693 #endif
694 
695 #if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
696 		if (!MemoryManager::AnalyzeAllocationCallers(callback))
697 			return 0;
698 #endif
699 	}
700 
701 	return 0;
702 }
703 
704 
705 static int
706 dump_allocations_per_caller(int argc, char **argv)
707 {
708 	bool sortBySize = true;
709 	bool resetAllocationInfos = false;
710 	bool printDetails = false;
711 	ObjectCache* cache = NULL;
712 	addr_t caller = 0;
713 
714 	for (int32 i = 1; i < argc; i++) {
715 		if (strcmp(argv[i], "-c") == 0) {
716 			sortBySize = false;
717 		} else if (strcmp(argv[i], "-d") == 0) {
718 			uint64 callerAddress;
719 			if (++i >= argc
720 				|| !evaluate_debug_expression(argv[i], &callerAddress, true)) {
721 				print_debugger_command_usage(argv[0]);
722 				return 0;
723 			}
724 
725 			caller = callerAddress;
726 			printDetails = true;
727 		} else if (strcmp(argv[i], "-o") == 0) {
728 			uint64 cacheAddress;
729 			if (++i >= argc
730 				|| !evaluate_debug_expression(argv[i], &cacheAddress, true)) {
731 				print_debugger_command_usage(argv[0]);
732 				return 0;
733 			}
734 
735 			cache = (ObjectCache*)(addr_t)cacheAddress;
736 		} else if (strcmp(argv[i], "-r") == 0) {
737 			resetAllocationInfos = true;
738 		} else {
739 			print_debugger_command_usage(argv[0]);
740 			return 0;
741 		}
742 	}
743 
744 	sCallerInfoCount = 0;
745 
746 	AllocationCollectorCallback collectorCallback(resetAllocationInfos);
747 	AllocationDetailPrinterCallback detailsCallback(caller);
748 	AllocationTrackingCallback& callback = printDetails
749 		? (AllocationTrackingCallback&)detailsCallback
750 		: (AllocationTrackingCallback&)collectorCallback;
751 
752 	if (cache != NULL) {
753 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
754 		if (!analyze_allocation_callers(cache, callback))
755 			return 0;
756 #else
757 		kprintf("Object cache allocation tracking not available. "
758 			"SLAB_OBJECT_CACHE_TRACING (%d) and "
759 			"SLAB_OBJECT_CACHE_TRACING_STACK_TRACE (%d) must be enabled.\n",
760 			SLAB_OBJECT_CACHE_TRACING, SLAB_OBJECT_CACHE_TRACING_STACK_TRACE);
761 		return 0;
762 #endif
763 	} else {
764 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
765 
766 		for (ObjectCacheList::Iterator it = sObjectCaches.GetIterator();
767 				it.HasNext();) {
768 			if (!analyze_allocation_callers(it.Next(), callback))
769 				return 0;
770 		}
771 #endif
772 
773 #if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
774 		if (!MemoryManager::AnalyzeAllocationCallers(callback))
775 			return 0;
776 #endif
777 	}
778 
779 	if (printDetails)
780 		return 0;
781 
782 	// sort the array
783 	qsort(sCallerInfoTable, sCallerInfoCount, sizeof(caller_info),
784 		sortBySize ? &caller_info_compare_size : &caller_info_compare_count);
785 
786 	kprintf("%ld different callers, sorted by %s...\n\n", sCallerInfoCount,
787 		sortBySize ? "size" : "count");
788 
789 	size_t totalAllocationSize = 0;
790 	size_t totalAllocationCount = 0;
791 
792 	kprintf("     count        size      caller\n");
793 	kprintf("----------------------------------\n");
794 	for (int32 i = 0; i < sCallerInfoCount; i++) {
795 		caller_info& info = sCallerInfoTable[i];
796 		kprintf("%10" B_PRIuSIZE "  %10" B_PRIuSIZE "  %p", info.count,
797 			info.size, (void*)info.caller);
798 
799 		const char* symbol;
800 		const char* imageName;
801 		bool exactMatch;
802 		addr_t baseAddress;
803 
804 		if (elf_debug_lookup_symbol_address(info.caller, &baseAddress, &symbol,
805 				&imageName, &exactMatch) == B_OK) {
806 			kprintf("  %s + %#" B_PRIxADDR " (%s)%s\n", symbol,
807 				info.caller - baseAddress, imageName,
808 				exactMatch ? "" : " (nearest)");
809 		} else
810 			kprintf("\n");
811 
812 		totalAllocationCount += info.count;
813 		totalAllocationSize += info.size;
814 	}
815 
816 	kprintf("\ntotal allocations: %" B_PRIuSIZE ", %" B_PRIuSIZE " bytes\n",
817 		totalAllocationCount, totalAllocationSize);
818 
819 	return 0;
820 }
821 
822 #endif	// SLAB_ALLOCATION_TRACKING_AVAILABLE
823 
824 
825 void
826 add_alloc_tracing_entry(ObjectCache* cache, uint32 flags, void* object)
827 {
828 #if SLAB_OBJECT_CACHE_TRACING
829 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
830 	MutexLocker _(cache->lock);
831 	cache->TrackingInfoFor(object)->Init(T(Alloc(cache, flags, object)));
832 #else
833 	T(Alloc(cache, flags, object));
834 #endif
835 #endif
836 }
837 
838 
839 // #pragma mark -
840 
841 
842 void
843 request_memory_manager_maintenance()
844 {
845 	MutexLocker locker(sMaintenanceLock);
846 	sMaintenanceCondition.NotifyAll();
847 }
848 
849 
850 // #pragma mark -
851 
852 
853 static void
854 delete_object_cache_internal(object_cache* cache)
855 {
856 	if (!(cache->flags & CACHE_NO_DEPOT))
857 		object_depot_destroy(&cache->depot, 0);
858 
859 	mutex_lock(&cache->lock);
860 
861 	if (!cache->full.IsEmpty())
862 		panic("cache destroy: still has full slabs");
863 
864 	if (!cache->partial.IsEmpty())
865 		panic("cache destroy: still has partial slabs");
866 
867 	while (!cache->empty.IsEmpty())
868 		cache->ReturnSlab(cache->empty.RemoveHead(), 0);
869 
870 	mutex_destroy(&cache->lock);
871 	cache->Delete();
872 }
873 
874 
875 static void
876 increase_object_reserve(ObjectCache* cache)
877 {
878 	MutexLocker locker(sMaintenanceLock);
879 
880 	cache->maintenance_resize = true;
881 
882 	if (!cache->maintenance_pending) {
883 		cache->maintenance_pending = true;
884 		sMaintenanceQueue.Add(cache);
885 		sMaintenanceCondition.NotifyAll();
886 	}
887 }
888 
889 
890 /*!	Makes sure that \a objectCount objects can be allocated.
891 */
892 static status_t
893 object_cache_reserve_internal(ObjectCache* cache, size_t objectCount,
894 	uint32 flags)
895 {
896 	// If someone else is already adding slabs, we wait for that to be finished
897 	// first.
898 	thread_id thread = find_thread(NULL);
899 	while (true) {
900 		if (objectCount <= cache->total_objects - cache->used_count)
901 			return B_OK;
902 
903 		ObjectCacheResizeEntry* resizeEntry = NULL;
904 		if (cache->resize_entry_dont_wait != NULL) {
905 			resizeEntry = cache->resize_entry_dont_wait;
906 			if (thread == resizeEntry->thread)
907 				return B_WOULD_BLOCK;
908 			// Note: We could still have reentered the function, i.e.
909 			// resize_entry_can_wait would be ours. That doesn't matter much,
910 			// though, since after the don't-wait thread has done its job
911 			// everyone will be happy.
912 		} else if (cache->resize_entry_can_wait != NULL) {
913 			resizeEntry = cache->resize_entry_can_wait;
914 			if (thread == resizeEntry->thread)
915 				return B_WOULD_BLOCK;
916 
917 			if ((flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0)
918 				break;
919 		} else
920 			break;
921 
922 		ConditionVariableEntry entry;
923 		resizeEntry->condition.Add(&entry);
924 
925 		cache->Unlock();
926 		entry.Wait();
927 		cache->Lock();
928 	}
929 
930 	// prepare the resize entry others can wait on
931 	ObjectCacheResizeEntry*& resizeEntry
932 		= (flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0
933 			? cache->resize_entry_dont_wait : cache->resize_entry_can_wait;
934 
935 	ObjectCacheResizeEntry myResizeEntry;
936 	resizeEntry = &myResizeEntry;
937 	resizeEntry->condition.Init(cache, "wait for slabs");
938 	resizeEntry->thread = thread;
939 
940 	// add new slabs until there are as many free ones as requested
941 	while (objectCount > cache->total_objects - cache->used_count) {
942 		slab* newSlab = cache->CreateSlab(flags);
943 		if (newSlab == NULL) {
944 			resizeEntry->condition.NotifyAll();
945 			resizeEntry = NULL;
946 			return B_NO_MEMORY;
947 		}
948 
949 		cache->usage += cache->slab_size;
950 		cache->total_objects += newSlab->size;
951 
952 		cache->empty.Add(newSlab);
953 		cache->empty_count++;
954 	}
955 
956 	resizeEntry->condition.NotifyAll();
957 	resizeEntry = NULL;
958 
959 	return B_OK;
960 }
961 
962 
963 static void
964 object_cache_low_memory(void* dummy, uint32 resources, int32 level)
965 {
966 	if (level == B_NO_LOW_RESOURCE)
967 		return;
968 
969 	MutexLocker cacheListLocker(sObjectCacheListLock);
970 
971 	// Append the first cache to the end of the queue. We assume that it is
972 	// one of the caches that will never be deleted and thus we use it as a
973 	// marker.
974 	ObjectCache* firstCache = sObjectCaches.RemoveHead();
975 	sObjectCaches.Add(firstCache);
976 	cacheListLocker.Unlock();
977 
978 	ObjectCache* cache;
979 	do {
980 		cacheListLocker.Lock();
981 
982 		cache = sObjectCaches.RemoveHead();
983 		sObjectCaches.Add(cache);
984 
985 		MutexLocker maintenanceLocker(sMaintenanceLock);
986 		if (cache->maintenance_pending || cache->maintenance_in_progress) {
987 			// We don't want to mess with caches in maintenance.
988 			continue;
989 		}
990 
991 		cache->maintenance_pending = true;
992 		cache->maintenance_in_progress = true;
993 
994 		maintenanceLocker.Unlock();
995 		cacheListLocker.Unlock();
996 
997 		// We are calling the reclaimer without the object cache lock
998 		// to give the owner a chance to return objects to the slabs.
999 
1000 		if (cache->reclaimer)
1001 			cache->reclaimer(cache->cookie, level);
1002 
1003 		if ((cache->flags & CACHE_NO_DEPOT) == 0)
1004 			object_depot_make_empty(&cache->depot, 0);
1005 
1006 		MutexLocker cacheLocker(cache->lock);
1007 		size_t minimumAllowed;
1008 
1009 		switch (level) {
1010 			case B_LOW_RESOURCE_NOTE:
1011 				minimumAllowed = cache->pressure / 2 + 1;
1012 				cache->pressure -= cache->pressure / 8;
1013 				break;
1014 
1015 			case B_LOW_RESOURCE_WARNING:
1016 				cache->pressure /= 2;
1017 				minimumAllowed = 0;
1018 				break;
1019 
1020 			default:
1021 				cache->pressure = 0;
1022 				minimumAllowed = 0;
1023 				break;
1024 		}
1025 
1026 		while (cache->empty_count > minimumAllowed) {
1027 			// make sure we respect the cache's minimum object reserve
1028 			size_t objectsPerSlab = cache->empty.Head()->size;
1029 			size_t freeObjects = cache->total_objects - cache->used_count;
1030 			if (freeObjects < cache->min_object_reserve + objectsPerSlab)
1031 				break;
1032 
1033 			cache->ReturnSlab(cache->empty.RemoveHead(), 0);
1034 			cache->empty_count--;
1035 		}
1036 
1037 		cacheLocker.Unlock();
1038 
1039 		// Check whether in the meantime someone has really requested
1040 		// maintenance for the cache.
1041 		maintenanceLocker.Lock();
1042 
1043 		if (cache->maintenance_delete) {
1044 			delete_object_cache_internal(cache);
1045 			continue;
1046 		}
1047 
1048 		cache->maintenance_in_progress = false;
1049 
1050 		if (cache->maintenance_resize)
1051 			sMaintenanceQueue.Add(cache);
1052 		else
1053 			cache->maintenance_pending = false;
1054 	} while (cache != firstCache);
1055 }
1056 
1057 
1058 static status_t
1059 object_cache_maintainer(void*)
1060 {
1061 	while (true) {
1062 		MutexLocker locker(sMaintenanceLock);
1063 
1064 		// wait for the next request
1065 		while (sMaintenanceQueue.IsEmpty()) {
1066 			// perform memory manager maintenance, if needed
1067 			if (MemoryManager::MaintenanceNeeded()) {
1068 				locker.Unlock();
1069 				MemoryManager::PerformMaintenance();
1070 				locker.Lock();
1071 				continue;
1072 			}
1073 
1074 			ConditionVariableEntry entry;
1075 			sMaintenanceCondition.Add(&entry);
1076 			locker.Unlock();
1077 			entry.Wait();
1078 			locker.Lock();
1079 		}
1080 
1081 		ObjectCache* cache = sMaintenanceQueue.RemoveHead();
1082 
1083 		while (true) {
1084 			bool resizeRequested = cache->maintenance_resize;
1085 			bool deleteRequested = cache->maintenance_delete;
1086 
1087 			if (!resizeRequested && !deleteRequested) {
1088 				cache->maintenance_pending = false;
1089 				cache->maintenance_in_progress = false;
1090 				break;
1091 			}
1092 
1093 			cache->maintenance_resize = false;
1094 			cache->maintenance_in_progress = true;
1095 
1096 			locker.Unlock();
1097 
1098 			if (deleteRequested) {
1099 				delete_object_cache_internal(cache);
1100 				break;
1101 			}
1102 
1103 			// resize the cache, if necessary
1104 
1105 			MutexLocker cacheLocker(cache->lock);
1106 
1107 			if (resizeRequested) {
1108 				status_t error = object_cache_reserve_internal(cache,
1109 					cache->min_object_reserve, 0);
1110 				if (error != B_OK) {
1111 					dprintf("object cache resizer: Failed to resize object "
1112 						"cache %p!\n", cache);
1113 					break;
1114 				}
1115 			}
1116 
1117 			locker.Lock();
1118 		}
1119 	}
1120 
1121 	// never can get here
1122 	return B_OK;
1123 }
1124 
1125 
1126 // #pragma mark - public API
1127 
1128 
1129 object_cache*
1130 create_object_cache(const char* name, size_t object_size, size_t alignment,
1131 	void* cookie, object_cache_constructor constructor,
1132 	object_cache_destructor destructor)
1133 {
1134 	return create_object_cache_etc(name, object_size, alignment, 0, 0, 0, 0,
1135 		cookie, constructor, destructor, NULL);
1136 }
1137 
1138 
1139 object_cache*
1140 create_object_cache_etc(const char* name, size_t objectSize, size_t alignment,
1141 	size_t maximum, size_t magazineCapacity, size_t maxMagazineCount,
1142 	uint32 flags, void* cookie, object_cache_constructor constructor,
1143 	object_cache_destructor destructor, object_cache_reclaimer reclaimer)
1144 {
1145 	ObjectCache* cache;
1146 
1147 	if (objectSize == 0) {
1148 		cache = NULL;
1149 	} else if (objectSize <= 256) {
1150 		cache = SmallObjectCache::Create(name, objectSize, alignment, maximum,
1151 			magazineCapacity, maxMagazineCount, flags, cookie, constructor,
1152 			destructor, reclaimer);
1153 	} else {
1154 		cache = HashedObjectCache::Create(name, objectSize, alignment, maximum,
1155 			magazineCapacity, maxMagazineCount, flags, cookie, constructor,
1156 			destructor, reclaimer);
1157 	}
1158 
1159 	if (cache != NULL) {
1160 		MutexLocker _(sObjectCacheListLock);
1161 		sObjectCaches.Add(cache);
1162 	}
1163 
1164 	T(Create(name, objectSize, alignment, maximum, flags, cookie, cache));
1165 	return cache;
1166 }
1167 
1168 
1169 void
1170 delete_object_cache(object_cache* cache)
1171 {
1172 	T(Delete(cache));
1173 
1174 	{
1175 		MutexLocker _(sObjectCacheListLock);
1176 		sObjectCaches.Remove(cache);
1177 	}
1178 
1179 	MutexLocker cacheLocker(cache->lock);
1180 
1181 	{
1182 		MutexLocker maintenanceLocker(sMaintenanceLock);
1183 		if (cache->maintenance_in_progress) {
1184 			// The maintainer thread is working with the cache. Just mark it
1185 			// to be deleted.
1186 			cache->maintenance_delete = true;
1187 			return;
1188 		}
1189 
1190 		// unschedule maintenance
1191 		if (cache->maintenance_pending)
1192 			sMaintenanceQueue.Remove(cache);
1193 	}
1194 
1195 	// at this point no-one should have a reference to the cache anymore
1196 	cacheLocker.Unlock();
1197 
1198 	delete_object_cache_internal(cache);
1199 }
1200 
1201 
1202 status_t
1203 object_cache_set_minimum_reserve(object_cache* cache, size_t objectCount)
1204 {
1205 	MutexLocker _(cache->lock);
1206 
1207 	if (cache->min_object_reserve == objectCount)
1208 		return B_OK;
1209 
1210 	cache->min_object_reserve = objectCount;
1211 
1212 	increase_object_reserve(cache);
1213 
1214 	return B_OK;
1215 }
1216 
1217 
1218 void*
1219 object_cache_alloc(object_cache* cache, uint32 flags)
1220 {
1221 	if (!(cache->flags & CACHE_NO_DEPOT)) {
1222 		void* object = object_depot_obtain(&cache->depot);
1223 		if (object) {
1224 			add_alloc_tracing_entry(cache, flags, object);
1225 			return fill_allocated_block(object, cache->object_size);
1226 		}
1227 	}
1228 
1229 	MutexLocker locker(cache->lock);
1230 	slab* source = NULL;
1231 
1232 	while (true) {
1233 		source = cache->partial.Head();
1234 		if (source != NULL)
1235 			break;
1236 
1237 		source = cache->empty.RemoveHead();
1238 		if (source != NULL) {
1239 			cache->empty_count--;
1240 			cache->partial.Add(source);
1241 			break;
1242 		}
1243 
1244 		if (object_cache_reserve_internal(cache, 1, flags) != B_OK) {
1245 			T(Alloc(cache, flags, NULL));
1246 			return NULL;
1247 		}
1248 
1249 		cache->pressure++;
1250 	}
1251 
1252 	ParanoiaChecker _2(source);
1253 
1254 	object_link* link = _pop(source->free);
1255 	source->count--;
1256 	cache->used_count++;
1257 
1258 	if (cache->total_objects - cache->used_count < cache->min_object_reserve)
1259 		increase_object_reserve(cache);
1260 
1261 	REMOVE_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, source, &link->next,
1262 		sizeof(void*));
1263 
1264 	TRACE_CACHE(cache, "allocate %p (%p) from %p, %lu remaining.",
1265 		link_to_object(link, cache->object_size), link, source, source->count);
1266 
1267 	if (source->count == 0) {
1268 		cache->partial.Remove(source);
1269 		cache->full.Add(source);
1270 	}
1271 
1272 	void* object = link_to_object(link, cache->object_size);
1273 	locker.Unlock();
1274 
1275 	add_alloc_tracing_entry(cache, flags, object);
1276 	return fill_allocated_block(object, cache->object_size);
1277 }
1278 
1279 
1280 void
1281 object_cache_free(object_cache* cache, void* object, uint32 flags)
1282 {
1283 	if (object == NULL)
1284 		return;
1285 
1286 	T(Free(cache, object));
1287 
1288 #if PARANOID_KERNEL_FREE
1289 	// TODO: allow forcing the check even if we don't find deadbeef
1290 	if (*(uint32*)object == 0xdeadbeef) {
1291 		if (!cache->AssertObjectNotFreed(object))
1292 			return;
1293 
1294 		if ((cache->flags & CACHE_NO_DEPOT) == 0) {
1295 			if (object_depot_contains_object(&cache->depot, object)) {
1296 				panic("object_cache: object %p is already freed", object);
1297 				return;
1298 			}
1299 		}
1300 	}
1301 
1302 	fill_freed_block(object, cache->object_size);
1303 #endif
1304 
1305 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
1306 	mutex_lock(&cache->lock);
1307 	cache->TrackingInfoFor(object)->Clear();
1308 	mutex_unlock(&cache->lock);
1309 #endif
1310 
1311 	if ((cache->flags & CACHE_NO_DEPOT) == 0) {
1312 		object_depot_store(&cache->depot, object, flags);
1313 		return;
1314 	}
1315 
1316 	MutexLocker _(cache->lock);
1317 	cache->ReturnObjectToSlab(cache->ObjectSlab(object), object, flags);
1318 }
1319 
1320 
1321 status_t
1322 object_cache_reserve(object_cache* cache, size_t objectCount, uint32 flags)
1323 {
1324 	if (objectCount == 0)
1325 		return B_OK;
1326 
1327 	T(Reserve(cache, objectCount, flags));
1328 
1329 	MutexLocker _(cache->lock);
1330 	return object_cache_reserve_internal(cache, objectCount, flags);
1331 }
1332 
1333 
1334 void
1335 object_cache_get_usage(object_cache* cache, size_t* _allocatedMemory)
1336 {
1337 	MutexLocker _(cache->lock);
1338 	*_allocatedMemory = cache->usage;
1339 }
1340 
1341 
1342 void
1343 slab_init(kernel_args* args)
1344 {
1345 	MemoryManager::Init(args);
1346 
1347 	new (&sObjectCaches) ObjectCacheList();
1348 
1349 	block_allocator_init_boot();
1350 }
1351 
1352 
1353 void
1354 slab_init_post_area()
1355 {
1356 	MemoryManager::InitPostArea();
1357 
1358 	add_debugger_command("slabs", dump_slabs, "list all object caches");
1359 	add_debugger_command("slab_cache", dump_cache_info,
1360 		"dump information about a specific object cache");
1361 	add_debugger_command("slab_depot", dump_object_depot,
1362 		"dump contents of an object depot");
1363 	add_debugger_command("slab_magazine", dump_depot_magazine,
1364 		"dump contents of a depot magazine");
1365 #if SLAB_ALLOCATION_TRACKING_AVAILABLE
1366 	add_debugger_command_etc("allocations_per_caller",
1367 		&dump_allocations_per_caller,
1368 		"Dump current slab allocations summed up per caller",
1369 		"[ -c ] [ -d <caller> ] [ -o <object cache> ] [ -r ]\n"
1370 		"The current allocations will by summed up by caller (their count and\n"
1371 		"size) printed in decreasing order by size or, if \"-c\" is\n"
1372 		"specified, by allocation count. If given <object cache> specifies\n"
1373 		"the address of the object cache for which to print the allocations.\n"
1374 		"If \"-d\" is given, each allocation for caller <caller> is printed\n"
1375 		"including the respective stack trace.\n"
1376 		"If \"-r\" is given, the allocation infos are reset after gathering\n"
1377 		"the information, so the next command invocation will only show the\n"
1378 		"allocations made after the reset.\n", 0);
1379 	add_debugger_command_etc("allocation_infos",
1380 		&dump_allocation_infos,
1381 		"Dump current slab allocations",
1382 		"[ --stacktrace ] [ -o <object cache> | -s <slab> | -S <address> ] "
1383 		"[ -a <allocation> ] [ --team <team ID> ] [ --thread <thread ID> ]\n"
1384 		"The current allocations filtered by optional values will be printed.\n"
1385 		"If given, <object cache> specifies the address of the object cache\n"
1386 		"or <slab> specifies the address of a slab, for which to print the\n"
1387 		"allocations. Alternatively <address> specifies any address within\n"
1388 		"a slab allocation range.\n"
1389 		"The optional \"-a\" address filters for a specific allocation,\n"
1390 		"with \"--team\" and \"--thread\" allocations by specific teams\n"
1391 		"and/or threads can be filtered (these only work if a corresponding\n"
1392 		"tracing entry is still available).\n"
1393 		"If \"--stacktrace\" is given, then stack traces of the allocation\n"
1394 		"callers are printed, where available\n", 0);
1395 #endif	// SLAB_ALLOCATION_TRACKING_AVAILABLE
1396 }
1397 
1398 
1399 void
1400 slab_init_post_sem()
1401 {
1402 	register_low_resource_handler(object_cache_low_memory, NULL,
1403 		B_KERNEL_RESOURCE_PAGES | B_KERNEL_RESOURCE_MEMORY
1404 			| B_KERNEL_RESOURCE_ADDRESS_SPACE, 5);
1405 
1406 	block_allocator_init_rest();
1407 }
1408 
1409 
1410 void
1411 slab_init_post_thread()
1412 {
1413 	new(&sMaintenanceQueue) MaintenanceQueue;
1414 	sMaintenanceCondition.Init(&sMaintenanceQueue, "object cache maintainer");
1415 
1416 	thread_id objectCacheResizer = spawn_kernel_thread(object_cache_maintainer,
1417 		"object cache resizer", B_URGENT_PRIORITY, NULL);
1418 	if (objectCacheResizer < 0) {
1419 		panic("slab_init_post_thread(): failed to spawn object cache resizer "
1420 			"thread\n");
1421 		return;
1422 	}
1423 
1424 	resume_thread(objectCacheResizer);
1425 }
1426 
1427 
1428 RANGE_MARKER_FUNCTION_END(Slab)
1429 
1430 
1431 #endif	// !USE_GUARDED_HEAP_FOR_OBJECT_CACHE
1432