1 /*
2 * Copyright 2010, Ingo Weinhold <ingo_weinhold@gmx.de>.
3 * Copyright 2008-2010, Axel Dörfler. All Rights Reserved.
4 * Copyright 2007, Hugo Santos. All Rights Reserved.
5 *
6 * Distributed under the terms of the MIT License.
7 */
8
9
10 #include <slab/Slab.h>
11
12 #include <algorithm>
13 #include <new>
14 #include <stdlib.h>
15 #include <string.h>
16
17 #include <KernelExport.h>
18
19 #include <condition_variable.h>
20 #include <elf.h>
21 #include <kernel.h>
22 #include <low_resource_manager.h>
23 #include <slab/ObjectDepot.h>
24 #include <smp.h>
25 #include <tracing.h>
26 #include <util/AutoLock.h>
27 #include <util/DoublyLinkedList.h>
28 #include <vm/vm.h>
29 #include <vm/VMAddressSpace.h>
30
31 #include "HashedObjectCache.h"
32 #include "MemoryManager.h"
33 #include "slab_debug.h"
34 #include "slab_private.h"
35 #include "SmallObjectCache.h"
36
37
38 #if !USE_GUARDED_HEAP_FOR_OBJECT_CACHE
39
40
41 typedef DoublyLinkedList<ObjectCache> ObjectCacheList;
42
43 typedef DoublyLinkedList<ObjectCache,
44 DoublyLinkedListMemberGetLink<ObjectCache, &ObjectCache::maintenance_link> >
45 MaintenanceQueue;
46
47 static ObjectCacheList sObjectCaches;
48 static mutex sObjectCacheListLock = MUTEX_INITIALIZER("object cache list");
49
50 static mutex sMaintenanceLock
51 = MUTEX_INITIALIZER("object cache resize requests");
52 static MaintenanceQueue sMaintenanceQueue;
53 static ConditionVariable sMaintenanceCondition;
54
55
56 #if SLAB_ALLOCATION_TRACKING_AVAILABLE
57
58 struct caller_info {
59 addr_t caller;
60 size_t count;
61 size_t size;
62 };
63
64 static const int32 kCallerInfoTableSize = 1024;
65 static caller_info sCallerInfoTable[kCallerInfoTableSize];
66 static int32 sCallerInfoCount = 0;
67
68 static caller_info* get_caller_info(addr_t caller);
69
70
71 RANGE_MARKER_FUNCTION_PROTOTYPES(slab_allocator)
72 RANGE_MARKER_FUNCTION_PROTOTYPES(SlabHashedObjectCache)
73 RANGE_MARKER_FUNCTION_PROTOTYPES(SlabMemoryManager)
74 RANGE_MARKER_FUNCTION_PROTOTYPES(SlabObjectCache)
75 RANGE_MARKER_FUNCTION_PROTOTYPES(SlabObjectDepot)
76 RANGE_MARKER_FUNCTION_PROTOTYPES(Slab)
77 RANGE_MARKER_FUNCTION_PROTOTYPES(SlabSmallObjectCache)
78
79
80 static const addr_t kSlabCodeAddressRanges[] = {
81 RANGE_MARKER_FUNCTION_ADDRESS_RANGE(slab_allocator),
82 RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabHashedObjectCache),
83 RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabMemoryManager),
84 RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabObjectCache),
85 RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabObjectDepot),
86 RANGE_MARKER_FUNCTION_ADDRESS_RANGE(Slab),
87 RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabSmallObjectCache)
88 };
89
90 static const uint32 kSlabCodeAddressRangeCount
91 = B_COUNT_OF(kSlabCodeAddressRanges) / 2;
92
93 #endif // SLAB_ALLOCATION_TRACKING_AVAILABLE
94
95
96 RANGE_MARKER_FUNCTION_BEGIN(Slab)
97
98
99 #if SLAB_OBJECT_CACHE_TRACING
100
101
102 namespace SlabObjectCacheTracing {
103
104 class ObjectCacheTraceEntry
105 : public TRACE_ENTRY_SELECTOR(SLAB_OBJECT_CACHE_TRACING_STACK_TRACE) {
106 public:
ObjectCacheTraceEntry(ObjectCache * cache)107 ObjectCacheTraceEntry(ObjectCache* cache)
108 :
109 TraceEntryBase(SLAB_OBJECT_CACHE_TRACING_STACK_TRACE, 0, true),
110 fCache(cache)
111 {
112 }
113
114 protected:
115 ObjectCache* fCache;
116 };
117
118
119 class Create : public ObjectCacheTraceEntry {
120 public:
Create(const char * name,size_t objectSize,size_t alignment,size_t maxByteUsage,uint32 flags,void * cookie,ObjectCache * cache)121 Create(const char* name, size_t objectSize, size_t alignment,
122 size_t maxByteUsage, uint32 flags, void* cookie,
123 ObjectCache* cache)
124 :
125 ObjectCacheTraceEntry(cache),
126 fObjectSize(objectSize),
127 fAlignment(alignment),
128 fMaxByteUsage(maxByteUsage),
129 fFlags(flags),
130 fCookie(cookie)
131 {
132 fName = alloc_tracing_buffer_strcpy(name, 64, false);
133 Initialized();
134 }
135
AddDump(TraceOutput & out)136 virtual void AddDump(TraceOutput& out)
137 {
138 out.Print("object cache create: name: \"%s\", object size: "
139 "%" B_PRIuSIZE ", alignment: %" B_PRIuSIZE ", max usage: "
140 "%" B_PRIuSIZE ", flags: 0x%" B_PRIx32 ", cookie: %p -> cache: %p",
141 fName, fObjectSize, fAlignment, fMaxByteUsage, fFlags,
142 fCookie, fCache);
143 }
144
145 private:
146 const char* fName;
147 size_t fObjectSize;
148 size_t fAlignment;
149 size_t fMaxByteUsage;
150 uint32 fFlags;
151 void* fCookie;
152 };
153
154
155 class Delete : public ObjectCacheTraceEntry {
156 public:
Delete(ObjectCache * cache)157 Delete(ObjectCache* cache)
158 :
159 ObjectCacheTraceEntry(cache)
160 {
161 Initialized();
162 }
163
AddDump(TraceOutput & out)164 virtual void AddDump(TraceOutput& out)
165 {
166 out.Print("object cache delete: %p", fCache);
167 }
168 };
169
170
171 class Alloc : public ObjectCacheTraceEntry {
172 public:
Alloc(ObjectCache * cache,uint32 flags,void * object)173 Alloc(ObjectCache* cache, uint32 flags, void* object)
174 :
175 ObjectCacheTraceEntry(cache),
176 fFlags(flags),
177 fObject(object)
178 {
179 Initialized();
180 }
181
AddDump(TraceOutput & out)182 virtual void AddDump(TraceOutput& out)
183 {
184 out.Print("object cache alloc: cache: %p, flags: 0x%" B_PRIx32
185 " -> object: %p", fCache, fFlags, fObject);
186 }
187
188 private:
189 uint32 fFlags;
190 void* fObject;
191 };
192
193
194 class Free : public ObjectCacheTraceEntry {
195 public:
Free(ObjectCache * cache,void * object)196 Free(ObjectCache* cache, void* object)
197 :
198 ObjectCacheTraceEntry(cache),
199 fObject(object)
200 {
201 Initialized();
202 }
203
AddDump(TraceOutput & out)204 virtual void AddDump(TraceOutput& out)
205 {
206 out.Print("object cache free: cache: %p, object: %p", fCache,
207 fObject);
208 }
209
210 private:
211 void* fObject;
212 };
213
214
215 class Reserve : public ObjectCacheTraceEntry {
216 public:
Reserve(ObjectCache * cache,size_t count,uint32 flags)217 Reserve(ObjectCache* cache, size_t count, uint32 flags)
218 :
219 ObjectCacheTraceEntry(cache),
220 fCount(count),
221 fFlags(flags)
222 {
223 Initialized();
224 }
225
AddDump(TraceOutput & out)226 virtual void AddDump(TraceOutput& out)
227 {
228 out.Print("object cache reserve: cache: %p, count: %" B_PRIu32 ", "
229 "flags: 0x%" B_PRIx32, fCache, fCount, fFlags);
230 }
231
232 private:
233 uint32 fCount;
234 uint32 fFlags;
235 };
236
237
238 } // namespace SlabObjectCacheTracing
239
240 # define T(x) new(std::nothrow) SlabObjectCacheTracing::x
241
242 #else
243 # define T(x)
244 #endif // SLAB_OBJECT_CACHE_TRACING
245
246
247 // #pragma mark -
248
249
250 static void
dump_slab(::slab * slab)251 dump_slab(::slab* slab)
252 {
253 kprintf(" %p %p %6" B_PRIuSIZE " %6" B_PRIuSIZE " %6" B_PRIuSIZE " %p\n",
254 slab, slab->pages, slab->size, slab->count, slab->offset, slab->free);
255 }
256
257
258 static int
dump_slabs(int argc,char * argv[])259 dump_slabs(int argc, char* argv[])
260 {
261 kprintf("%*s %22s %8s %8s %8s %6s %8s %8s %8s\n",
262 B_PRINTF_POINTER_WIDTH + 2, "address", "name", "objsize", "align",
263 "usage", "empty", "usedobj", "total", "flags");
264
265 ObjectCacheList::Iterator it = sObjectCaches.GetIterator();
266
267 while (it.HasNext()) {
268 ObjectCache* cache = it.Next();
269
270 kprintf("%p %22s %8lu %8" B_PRIuSIZE " %8lu %6lu %8lu %8lu %8" B_PRIx32
271 "\n", cache, cache->name, cache->object_size, cache->alignment,
272 cache->usage, cache->empty_count, cache->used_count,
273 cache->total_objects, cache->flags);
274 }
275
276 return 0;
277 }
278
279
280 static int
dump_cache_info(int argc,char * argv[])281 dump_cache_info(int argc, char* argv[])
282 {
283 if (argc < 2) {
284 kprintf("usage: slab_cache [address]\n");
285 return 0;
286 }
287
288 ObjectCache* cache = (ObjectCache*)parse_expression(argv[1]);
289
290 kprintf("name: %s\n", cache->name);
291 kprintf("lock: %p\n", &cache->lock);
292 kprintf("object_size: %lu\n", cache->object_size);
293 kprintf("alignment: %" B_PRIuSIZE "\n", cache->alignment);
294 kprintf("cache_color_cycle: %lu\n", cache->cache_color_cycle);
295 kprintf("total_objects: %lu\n", cache->total_objects);
296 kprintf("used_count: %lu\n", cache->used_count);
297 kprintf("empty_count: %lu\n", cache->empty_count);
298 kprintf("pressure: %lu\n", cache->pressure);
299 kprintf("slab_size: %lu\n", cache->slab_size);
300 kprintf("usage: %lu\n", cache->usage);
301 kprintf("maximum: %lu\n", cache->maximum);
302 kprintf("flags: 0x%" B_PRIx32 "\n", cache->flags);
303 kprintf("cookie: %p\n", cache->cookie);
304 kprintf("resize entry don't wait: %p\n", cache->resize_entry_dont_wait);
305 kprintf("resize entry can wait: %p\n", cache->resize_entry_can_wait);
306
307 kprintf(" %-*s %-*s size used offset free\n",
308 B_PRINTF_POINTER_WIDTH, "slab", B_PRINTF_POINTER_WIDTH, "chunk");
309
310 SlabList::Iterator iterator = cache->empty.GetIterator();
311 if (iterator.HasNext())
312 kprintf("empty:\n");
313 while (::slab* slab = iterator.Next())
314 dump_slab(slab);
315
316 iterator = cache->partial.GetIterator();
317 if (iterator.HasNext())
318 kprintf("partial:\n");
319 while (::slab* slab = iterator.Next())
320 dump_slab(slab);
321
322 iterator = cache->full.GetIterator();
323 if (iterator.HasNext())
324 kprintf("full:\n");
325 while (::slab* slab = iterator.Next())
326 dump_slab(slab);
327
328 if ((cache->flags & CACHE_NO_DEPOT) == 0) {
329 kprintf("depot:\n");
330 dump_object_depot(&cache->depot);
331 }
332
333 return 0;
334 }
335
336
337 static int
dump_object_info(int argc,char * argv[])338 dump_object_info(int argc, char* argv[])
339 {
340 if (argc < 2) {
341 kprintf("usage: slab_object [address]\n");
342 return 0;
343 }
344
345 void* object = (void*)parse_expression(argv[1]);
346 ObjectCache* cache = MemoryManager::DebugObjectCacheForAddress(object);
347 if (cache == NULL) {
348 kprintf("%p does not seem to be in an object_cache\n", object);
349 return 1;
350 }
351
352 kprintf("address %p\n", object);
353 kprintf("\tslab_cache: %p (%s)\n", cache, cache->name);
354
355 MutexTryLocker cacheLocker(cache->lock);
356 if (cacheLocker.IsLocked()) {
357 slab* slab = cache->ObjectSlab(object);
358 const char* slabType = cache->empty.Contains(slab) ? "empty"
359 : cache->partial.Contains(slab) ? "partial"
360 : cache->full.Contains(slab) ? "full" : NULL;
361
362 kprintf("\tobject is in %s slab: %p\n", slabType, slab);
363 }
364
365 return 0;
366 }
367
368
369 // #pragma mark - AllocationTrackingCallback
370
371
372 #if SLAB_ALLOCATION_TRACKING_AVAILABLE
373
~AllocationTrackingCallback()374 AllocationTrackingCallback::~AllocationTrackingCallback()
375 {
376 }
377
378 #endif // SLAB_ALLOCATION_TRACKING_AVAILABLE
379
380
381 // #pragma mark -
382
383
384 #if SLAB_ALLOCATION_TRACKING_AVAILABLE
385
386 namespace {
387
388 class AllocationCollectorCallback : public AllocationTrackingCallback {
389 public:
AllocationCollectorCallback(bool resetInfos)390 AllocationCollectorCallback(bool resetInfos)
391 :
392 fResetInfos(resetInfos)
393 {
394 }
395
ProcessTrackingInfo(AllocationTrackingInfo * info,void * allocation,size_t allocationSize)396 virtual bool ProcessTrackingInfo(AllocationTrackingInfo* info,
397 void* allocation, size_t allocationSize)
398 {
399 if (!info->IsInitialized())
400 return true;
401
402 addr_t caller = 0;
403 AbstractTraceEntryWithStackTrace* traceEntry = info->TraceEntry();
404
405 if (traceEntry != NULL && info->IsTraceEntryValid()) {
406 caller = tracing_find_caller_in_stack_trace(
407 traceEntry->StackTrace(), kSlabCodeAddressRanges,
408 kSlabCodeAddressRangeCount);
409 }
410
411 caller_info* callerInfo = get_caller_info(caller);
412 if (callerInfo == NULL) {
413 kprintf("out of space for caller infos\n");
414 return false;
415 }
416
417 callerInfo->count++;
418 callerInfo->size += allocationSize;
419
420 if (fResetInfos)
421 info->Clear();
422
423 return true;
424 }
425
426 private:
427 bool fResetInfos;
428 };
429
430
431 class AllocationInfoPrinterCallback : public AllocationTrackingCallback {
432 public:
AllocationInfoPrinterCallback(bool printStackTrace,addr_t addressFilter,team_id teamFilter,thread_id threadFilter)433 AllocationInfoPrinterCallback(bool printStackTrace, addr_t addressFilter,
434 team_id teamFilter, thread_id threadFilter)
435 :
436 fPrintStackTrace(printStackTrace),
437 fAddressFilter(addressFilter),
438 fTeamFilter(teamFilter),
439 fThreadFilter(threadFilter)
440 {
441 }
442
ProcessTrackingInfo(AllocationTrackingInfo * info,void * allocation,size_t allocationSize)443 virtual bool ProcessTrackingInfo(AllocationTrackingInfo* info,
444 void* allocation, size_t allocationSize)
445 {
446 if (!info->IsInitialized())
447 return true;
448
449 if (fAddressFilter != 0 && (addr_t)allocation != fAddressFilter)
450 return true;
451
452 AbstractTraceEntryWithStackTrace* traceEntry = info->TraceEntry();
453 if (traceEntry != NULL && !info->IsTraceEntryValid())
454 traceEntry = NULL;
455
456 if (traceEntry != NULL) {
457 if (fTeamFilter != -1 && traceEntry->TeamID() != fTeamFilter)
458 return true;
459 if (fThreadFilter != -1 && traceEntry->ThreadID() != fThreadFilter)
460 return true;
461 } else {
462 // we need the info if we have filters set
463 if (fTeamFilter != -1 || fThreadFilter != -1)
464 return true;
465 }
466
467 kprintf("allocation %p, size: %" B_PRIuSIZE, allocation,
468 allocationSize);
469
470 if (traceEntry != NULL) {
471 kprintf(", team: %" B_PRId32 ", thread %" B_PRId32
472 ", time %" B_PRId64 "\n", traceEntry->TeamID(),
473 traceEntry->ThreadID(), traceEntry->Time());
474
475 if (fPrintStackTrace)
476 tracing_print_stack_trace(traceEntry->StackTrace());
477 } else
478 kprintf("\n");
479
480 return true;
481 }
482
483 private:
484 bool fPrintStackTrace;
485 addr_t fAddressFilter;
486 team_id fTeamFilter;
487 thread_id fThreadFilter;
488 };
489
490
491 class AllocationDetailPrinterCallback : public AllocationTrackingCallback {
492 public:
AllocationDetailPrinterCallback(addr_t caller)493 AllocationDetailPrinterCallback(addr_t caller)
494 :
495 fCaller(caller)
496 {
497 }
498
ProcessTrackingInfo(AllocationTrackingInfo * info,void * allocation,size_t allocationSize)499 virtual bool ProcessTrackingInfo(AllocationTrackingInfo* info,
500 void* allocation, size_t allocationSize)
501 {
502 if (!info->IsInitialized())
503 return true;
504
505 addr_t caller = 0;
506 AbstractTraceEntryWithStackTrace* traceEntry = info->TraceEntry();
507 if (traceEntry != NULL && !info->IsTraceEntryValid())
508 traceEntry = NULL;
509
510 if (traceEntry != NULL) {
511 caller = tracing_find_caller_in_stack_trace(
512 traceEntry->StackTrace(), kSlabCodeAddressRanges,
513 kSlabCodeAddressRangeCount);
514 }
515
516 if (caller != fCaller)
517 return true;
518
519 kprintf("allocation %p, size: %" B_PRIuSIZE "\n", allocation,
520 allocationSize);
521 if (traceEntry != NULL)
522 tracing_print_stack_trace(traceEntry->StackTrace());
523
524 return true;
525 }
526
527 private:
528 addr_t fCaller;
529 };
530
531 } // unnamed namespace
532
533 static caller_info*
get_caller_info(addr_t caller)534 get_caller_info(addr_t caller)
535 {
536 // find the caller info
537 for (int32 i = 0; i < sCallerInfoCount; i++) {
538 if (caller == sCallerInfoTable[i].caller)
539 return &sCallerInfoTable[i];
540 }
541
542 // not found, add a new entry, if there are free slots
543 if (sCallerInfoCount >= kCallerInfoTableSize)
544 return NULL;
545
546 caller_info* info = &sCallerInfoTable[sCallerInfoCount++];
547 info->caller = caller;
548 info->count = 0;
549 info->size = 0;
550
551 return info;
552 }
553
554
555 static int
caller_info_compare_size(const void * _a,const void * _b)556 caller_info_compare_size(const void* _a, const void* _b)
557 {
558 const caller_info* a = (const caller_info*)_a;
559 const caller_info* b = (const caller_info*)_b;
560 return (int)(b->size - a->size);
561 }
562
563
564 static int
caller_info_compare_count(const void * _a,const void * _b)565 caller_info_compare_count(const void* _a, const void* _b)
566 {
567 const caller_info* a = (const caller_info*)_a;
568 const caller_info* b = (const caller_info*)_b;
569 return (int)(b->count - a->count);
570 }
571
572
573 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
574
575 static bool
analyze_allocation_callers(ObjectCache * cache,slab * slab,AllocationTrackingCallback & callback)576 analyze_allocation_callers(ObjectCache* cache, slab* slab,
577 AllocationTrackingCallback& callback)
578 {
579 for (uint32 i = 0; i < slab->size; i++) {
580 if (!callback.ProcessTrackingInfo(&slab->tracking[i],
581 cache->ObjectAtIndex(slab, i), cache->object_size)) {
582 return false;
583 }
584 }
585
586 return true;
587 }
588
589
590 static bool
analyze_allocation_callers(ObjectCache * cache,const SlabList & slabList,AllocationTrackingCallback & callback)591 analyze_allocation_callers(ObjectCache* cache, const SlabList& slabList,
592 AllocationTrackingCallback& callback)
593 {
594 for (SlabList::ConstIterator it = slabList.GetIterator();
595 slab* slab = it.Next();) {
596 if (!analyze_allocation_callers(cache, slab, callback))
597 return false;
598 }
599
600 return true;
601 }
602
603
604 static bool
analyze_allocation_callers(ObjectCache * cache,AllocationTrackingCallback & callback)605 analyze_allocation_callers(ObjectCache* cache,
606 AllocationTrackingCallback& callback)
607 {
608 return analyze_allocation_callers(cache, cache->full, callback)
609 && analyze_allocation_callers(cache, cache->partial, callback);
610 }
611
612 #endif // SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
613
614
615 static int
dump_allocation_infos(int argc,char ** argv)616 dump_allocation_infos(int argc, char **argv)
617 {
618 ObjectCache* cache = NULL;
619 slab* slab = NULL;
620 addr_t addressFilter = 0;
621 team_id teamFilter = -1;
622 thread_id threadFilter = -1;
623 bool printStackTraces = false;
624
625 for (int32 i = 1; i < argc; i++) {
626 if (strcmp(argv[i], "--stacktrace") == 0)
627 printStackTraces = true;
628 else if (strcmp(argv[i], "-a") == 0) {
629 uint64 address;
630 if (++i >= argc
631 || !evaluate_debug_expression(argv[i], &address, true)) {
632 print_debugger_command_usage(argv[0]);
633 return 0;
634 }
635
636 addressFilter = address;
637 } else if (strcmp(argv[i], "-o") == 0) {
638 uint64 cacheAddress;
639 if (++i >= argc
640 || !evaluate_debug_expression(argv[i], &cacheAddress, true)) {
641 print_debugger_command_usage(argv[0]);
642 return 0;
643 }
644
645 cache = (ObjectCache*)(addr_t)cacheAddress;
646 } else if (strcasecmp(argv[i], "-s") == 0) {
647 uint64 slabAddress;
648 if (++i >= argc
649 || !evaluate_debug_expression(argv[i], &slabAddress, true)) {
650 print_debugger_command_usage(argv[0]);
651 return 0;
652 }
653
654 void* slabPages = (void*)slabAddress;
655 if (strcmp(argv[i], "-s") == 0) {
656 slab = (struct slab*)(addr_t)slabAddress;
657 slabPages = slab->pages;
658 }
659
660 cache = MemoryManager::DebugObjectCacheForAddress(slabPages);
661 if (cache == NULL) {
662 kprintf("Couldn't find object cache for address %p.\n",
663 slabPages);
664 return 0;
665 }
666
667 if (slab == NULL) {
668 slab = cache->ObjectSlab(slabPages);
669
670 if (slab == NULL) {
671 kprintf("Couldn't find slab for address %p.\n", slabPages);
672 return 0;
673 }
674 }
675 } else if (strcmp(argv[i], "--team") == 0) {
676 uint64 team;
677 if (++i >= argc
678 || !evaluate_debug_expression(argv[i], &team, true)) {
679 print_debugger_command_usage(argv[0]);
680 return 0;
681 }
682
683 teamFilter = team;
684 } else if (strcmp(argv[i], "--thread") == 0) {
685 uint64 thread;
686 if (++i >= argc
687 || !evaluate_debug_expression(argv[i], &thread, true)) {
688 print_debugger_command_usage(argv[0]);
689 return 0;
690 }
691
692 threadFilter = thread;
693 } else {
694 print_debugger_command_usage(argv[0]);
695 return 0;
696 }
697 }
698
699 AllocationInfoPrinterCallback callback(printStackTraces, addressFilter,
700 teamFilter, threadFilter);
701
702 if (slab != NULL || cache != NULL) {
703 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
704 if (slab != NULL) {
705 if (!analyze_allocation_callers(cache, slab, callback))
706 return 0;
707 } else if (cache != NULL) {
708 if (!analyze_allocation_callers(cache, callback))
709 return 0;
710 }
711 #else
712 kprintf("Object cache allocation tracking not available. "
713 "SLAB_OBJECT_CACHE_TRACING (%d) and "
714 "SLAB_OBJECT_CACHE_TRACING_STACK_TRACE (%d) must be enabled.\n",
715 SLAB_OBJECT_CACHE_TRACING, SLAB_OBJECT_CACHE_TRACING_STACK_TRACE);
716 return 0;
717 #endif
718 } else {
719 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
720
721 for (ObjectCacheList::Iterator it = sObjectCaches.GetIterator();
722 it.HasNext();) {
723 if (!analyze_allocation_callers(it.Next(), callback))
724 return 0;
725 }
726 #endif
727
728 #if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
729 if (!MemoryManager::AnalyzeAllocationCallers(callback))
730 return 0;
731 #endif
732 }
733
734 return 0;
735 }
736
737
738 static int
dump_allocations_per_caller(int argc,char ** argv)739 dump_allocations_per_caller(int argc, char **argv)
740 {
741 bool sortBySize = true;
742 bool resetAllocationInfos = false;
743 bool printDetails = false;
744 ObjectCache* cache = NULL;
745 addr_t caller = 0;
746
747 for (int32 i = 1; i < argc; i++) {
748 if (strcmp(argv[i], "-c") == 0) {
749 sortBySize = false;
750 } else if (strcmp(argv[i], "-d") == 0) {
751 uint64 callerAddress;
752 if (++i >= argc
753 || !evaluate_debug_expression(argv[i], &callerAddress, true)) {
754 print_debugger_command_usage(argv[0]);
755 return 0;
756 }
757
758 caller = callerAddress;
759 printDetails = true;
760 } else if (strcmp(argv[i], "-o") == 0) {
761 uint64 cacheAddress;
762 if (++i >= argc
763 || !evaluate_debug_expression(argv[i], &cacheAddress, true)) {
764 print_debugger_command_usage(argv[0]);
765 return 0;
766 }
767
768 cache = (ObjectCache*)(addr_t)cacheAddress;
769 } else if (strcmp(argv[i], "-r") == 0) {
770 resetAllocationInfos = true;
771 } else {
772 print_debugger_command_usage(argv[0]);
773 return 0;
774 }
775 }
776
777 sCallerInfoCount = 0;
778
779 AllocationCollectorCallback collectorCallback(resetAllocationInfos);
780 AllocationDetailPrinterCallback detailsCallback(caller);
781 AllocationTrackingCallback& callback = printDetails
782 ? (AllocationTrackingCallback&)detailsCallback
783 : (AllocationTrackingCallback&)collectorCallback;
784
785 if (cache != NULL) {
786 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
787 if (!analyze_allocation_callers(cache, callback))
788 return 0;
789 #else
790 kprintf("Object cache allocation tracking not available. "
791 "SLAB_OBJECT_CACHE_TRACING (%d) and "
792 "SLAB_OBJECT_CACHE_TRACING_STACK_TRACE (%d) must be enabled.\n",
793 SLAB_OBJECT_CACHE_TRACING, SLAB_OBJECT_CACHE_TRACING_STACK_TRACE);
794 return 0;
795 #endif
796 } else {
797 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
798
799 for (ObjectCacheList::Iterator it = sObjectCaches.GetIterator();
800 it.HasNext();) {
801 if (!analyze_allocation_callers(it.Next(), callback))
802 return 0;
803 }
804 #endif
805
806 #if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
807 if (!MemoryManager::AnalyzeAllocationCallers(callback))
808 return 0;
809 #endif
810 }
811
812 if (printDetails)
813 return 0;
814
815 // sort the array
816 qsort(sCallerInfoTable, sCallerInfoCount, sizeof(caller_info),
817 sortBySize ? &caller_info_compare_size : &caller_info_compare_count);
818
819 kprintf("%" B_PRId32 " different callers, sorted by %s...\n\n",
820 sCallerInfoCount, sortBySize ? "size" : "count");
821
822 size_t totalAllocationSize = 0;
823 size_t totalAllocationCount = 0;
824
825 kprintf(" count size caller\n");
826 kprintf("----------------------------------\n");
827 for (int32 i = 0; i < sCallerInfoCount; i++) {
828 caller_info& info = sCallerInfoTable[i];
829 kprintf("%10" B_PRIuSIZE " %10" B_PRIuSIZE " %p", info.count,
830 info.size, (void*)info.caller);
831
832 const char* symbol;
833 const char* imageName;
834 bool exactMatch;
835 addr_t baseAddress;
836
837 if (elf_debug_lookup_symbol_address(info.caller, &baseAddress, &symbol,
838 &imageName, &exactMatch) == B_OK) {
839 kprintf(" %s + %#" B_PRIxADDR " (%s)%s\n", symbol,
840 info.caller - baseAddress, imageName,
841 exactMatch ? "" : " (nearest)");
842 } else
843 kprintf("\n");
844
845 totalAllocationCount += info.count;
846 totalAllocationSize += info.size;
847 }
848
849 kprintf("\ntotal allocations: %" B_PRIuSIZE ", %" B_PRIuSIZE " bytes\n",
850 totalAllocationCount, totalAllocationSize);
851
852 return 0;
853 }
854
855 #endif // SLAB_ALLOCATION_TRACKING_AVAILABLE
856
857
858 void
add_alloc_tracing_entry(ObjectCache * cache,uint32 flags,void * object)859 add_alloc_tracing_entry(ObjectCache* cache, uint32 flags, void* object)
860 {
861 #if SLAB_OBJECT_CACHE_TRACING
862 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
863 MutexLocker _(cache->lock);
864 cache->TrackingInfoFor(object)->Init(T(Alloc(cache, flags, object)));
865 #else
866 T(Alloc(cache, flags, object));
867 #endif
868 #endif
869 }
870
871
872 // #pragma mark -
873
874
875 void
request_memory_manager_maintenance()876 request_memory_manager_maintenance()
877 {
878 MutexLocker locker(sMaintenanceLock);
879 sMaintenanceCondition.NotifyAll();
880 }
881
882
883 // #pragma mark -
884
885
886 static void
delete_object_cache_internal(object_cache * cache)887 delete_object_cache_internal(object_cache* cache)
888 {
889 if (!(cache->flags & CACHE_NO_DEPOT))
890 object_depot_destroy(&cache->depot, 0);
891
892 mutex_lock(&cache->lock);
893
894 if (!cache->full.IsEmpty())
895 panic("cache destroy: still has full slabs");
896
897 if (!cache->partial.IsEmpty())
898 panic("cache destroy: still has partial slabs");
899
900 while (!cache->empty.IsEmpty())
901 cache->ReturnSlab(cache->empty.RemoveHead(), 0);
902
903 mutex_destroy(&cache->lock);
904 cache->Delete();
905 }
906
907
908 static void
increase_object_reserve(ObjectCache * cache)909 increase_object_reserve(ObjectCache* cache)
910 {
911 MutexLocker locker(sMaintenanceLock);
912
913 cache->maintenance_resize = true;
914
915 if (!cache->maintenance_pending) {
916 cache->maintenance_pending = true;
917 sMaintenanceQueue.Add(cache);
918 sMaintenanceCondition.NotifyAll();
919 }
920 }
921
922
923 /*! Makes sure that \a objectCount objects can be allocated.
924 */
925 static status_t
object_cache_reserve_internal(ObjectCache * cache,size_t objectCount,uint32 flags)926 object_cache_reserve_internal(ObjectCache* cache, size_t objectCount,
927 uint32 flags)
928 {
929 // If someone else is already adding slabs, we wait for that to be finished
930 // first.
931 thread_id thread = find_thread(NULL);
932 while (true) {
933 if (objectCount <= cache->total_objects - cache->used_count)
934 return B_OK;
935
936 ObjectCacheResizeEntry* resizeEntry = NULL;
937 if (cache->resize_entry_dont_wait != NULL) {
938 resizeEntry = cache->resize_entry_dont_wait;
939 if (thread == resizeEntry->thread)
940 return B_WOULD_BLOCK;
941 // Note: We could still have reentered the function, i.e.
942 // resize_entry_can_wait would be ours. That doesn't matter much,
943 // though, since after the don't-wait thread has done its job
944 // everyone will be happy.
945 } else if (cache->resize_entry_can_wait != NULL) {
946 resizeEntry = cache->resize_entry_can_wait;
947 if (thread == resizeEntry->thread)
948 return B_WOULD_BLOCK;
949
950 if ((flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0)
951 break;
952 } else
953 break;
954
955 resizeEntry->condition.Wait(&cache->lock);
956 }
957
958 // prepare the resize entry others can wait on
959 ObjectCacheResizeEntry*& resizeEntry
960 = (flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0
961 ? cache->resize_entry_dont_wait : cache->resize_entry_can_wait;
962
963 ObjectCacheResizeEntry myResizeEntry;
964 resizeEntry = &myResizeEntry;
965 resizeEntry->condition.Init(cache, "wait for slabs");
966 resizeEntry->thread = thread;
967
968 // add new slabs until there are as many free ones as requested
969 while (objectCount > cache->total_objects - cache->used_count) {
970 slab* newSlab = cache->CreateSlab(flags);
971 if (newSlab == NULL) {
972 resizeEntry->condition.NotifyAll();
973 resizeEntry = NULL;
974 return B_NO_MEMORY;
975 }
976
977 cache->usage += cache->slab_size;
978 cache->total_objects += newSlab->size;
979
980 cache->empty.Add(newSlab);
981 cache->empty_count++;
982 }
983
984 resizeEntry->condition.NotifyAll();
985 resizeEntry = NULL;
986
987 return B_OK;
988 }
989
990
991 static void
object_cache_low_memory(void * dummy,uint32 resources,int32 level)992 object_cache_low_memory(void* dummy, uint32 resources, int32 level)
993 {
994 if (level == B_NO_LOW_RESOURCE)
995 return;
996
997 MutexLocker cacheListLocker(sObjectCacheListLock);
998
999 // Append the first cache to the end of the queue. We assume that it is
1000 // one of the caches that will never be deleted and thus we use it as a
1001 // marker.
1002 ObjectCache* firstCache = sObjectCaches.RemoveHead();
1003 sObjectCaches.Add(firstCache);
1004 cacheListLocker.Unlock();
1005
1006 ObjectCache* cache;
1007 do {
1008 cacheListLocker.Lock();
1009
1010 cache = sObjectCaches.RemoveHead();
1011 sObjectCaches.Add(cache);
1012
1013 MutexLocker maintenanceLocker(sMaintenanceLock);
1014 if (cache->maintenance_pending || cache->maintenance_in_progress) {
1015 // We don't want to mess with caches in maintenance.
1016 continue;
1017 }
1018
1019 cache->maintenance_pending = true;
1020 cache->maintenance_in_progress = true;
1021
1022 maintenanceLocker.Unlock();
1023 cacheListLocker.Unlock();
1024
1025 // We are calling the reclaimer without the object cache lock
1026 // to give the owner a chance to return objects to the slabs.
1027
1028 if (cache->reclaimer)
1029 cache->reclaimer(cache->cookie, level);
1030
1031 if ((cache->flags & CACHE_NO_DEPOT) == 0)
1032 object_depot_make_empty(&cache->depot, 0);
1033
1034 MutexLocker cacheLocker(cache->lock);
1035 size_t minimumAllowed;
1036
1037 switch (level) {
1038 case B_LOW_RESOURCE_NOTE:
1039 minimumAllowed = cache->pressure / 2 + 1;
1040 cache->pressure -= cache->pressure / 8;
1041 break;
1042
1043 case B_LOW_RESOURCE_WARNING:
1044 cache->pressure /= 2;
1045 minimumAllowed = 0;
1046 break;
1047
1048 default:
1049 cache->pressure = 0;
1050 minimumAllowed = 0;
1051 break;
1052 }
1053
1054 while (cache->empty_count > minimumAllowed) {
1055 // make sure we respect the cache's minimum object reserve
1056 size_t objectsPerSlab = cache->empty.Head()->size;
1057 size_t freeObjects = cache->total_objects - cache->used_count;
1058 if (freeObjects < cache->min_object_reserve + objectsPerSlab)
1059 break;
1060
1061 cache->ReturnSlab(cache->empty.RemoveHead(), 0);
1062 cache->empty_count--;
1063 }
1064
1065 cacheLocker.Unlock();
1066
1067 // Check whether in the meantime someone has really requested
1068 // maintenance for the cache.
1069 maintenanceLocker.Lock();
1070
1071 if (cache->maintenance_delete) {
1072 delete_object_cache_internal(cache);
1073 continue;
1074 }
1075
1076 cache->maintenance_in_progress = false;
1077
1078 if (cache->maintenance_resize)
1079 sMaintenanceQueue.Add(cache);
1080 else
1081 cache->maintenance_pending = false;
1082 } while (cache != firstCache);
1083 }
1084
1085
1086 static status_t
object_cache_maintainer(void *)1087 object_cache_maintainer(void*)
1088 {
1089 while (true) {
1090 MutexLocker locker(sMaintenanceLock);
1091
1092 // wait for the next request
1093 while (sMaintenanceQueue.IsEmpty()) {
1094 // perform memory manager maintenance, if needed
1095 if (MemoryManager::MaintenanceNeeded()) {
1096 locker.Unlock();
1097 MemoryManager::PerformMaintenance();
1098 locker.Lock();
1099 continue;
1100 }
1101
1102 sMaintenanceCondition.Wait(locker.Get());
1103 }
1104
1105 ObjectCache* cache = sMaintenanceQueue.RemoveHead();
1106
1107 while (true) {
1108 bool resizeRequested = cache->maintenance_resize;
1109 bool deleteRequested = cache->maintenance_delete;
1110
1111 if (!resizeRequested && !deleteRequested) {
1112 cache->maintenance_pending = false;
1113 cache->maintenance_in_progress = false;
1114 break;
1115 }
1116
1117 cache->maintenance_resize = false;
1118 cache->maintenance_in_progress = true;
1119
1120 locker.Unlock();
1121
1122 if (deleteRequested) {
1123 delete_object_cache_internal(cache);
1124 break;
1125 }
1126
1127 // resize the cache, if necessary
1128
1129 MutexLocker cacheLocker(cache->lock);
1130
1131 if (resizeRequested) {
1132 status_t error = object_cache_reserve_internal(cache,
1133 cache->min_object_reserve, 0);
1134 if (error != B_OK) {
1135 dprintf("object cache resizer: Failed to resize object "
1136 "cache %p!\n", cache);
1137 break;
1138 }
1139 }
1140
1141 locker.Lock();
1142 }
1143 }
1144
1145 // never can get here
1146 return B_OK;
1147 }
1148
1149
1150 // #pragma mark - public API
1151
1152
1153 object_cache*
create_object_cache(const char * name,size_t object_size,size_t alignment,void * cookie,object_cache_constructor constructor,object_cache_destructor destructor)1154 create_object_cache(const char* name, size_t object_size, size_t alignment,
1155 void* cookie, object_cache_constructor constructor,
1156 object_cache_destructor destructor)
1157 {
1158 return create_object_cache_etc(name, object_size, alignment, 0, 0, 0, 0,
1159 cookie, constructor, destructor, NULL);
1160 }
1161
1162
1163 object_cache*
create_object_cache_etc(const char * name,size_t objectSize,size_t alignment,size_t maximum,size_t magazineCapacity,size_t maxMagazineCount,uint32 flags,void * cookie,object_cache_constructor constructor,object_cache_destructor destructor,object_cache_reclaimer reclaimer)1164 create_object_cache_etc(const char* name, size_t objectSize, size_t alignment,
1165 size_t maximum, size_t magazineCapacity, size_t maxMagazineCount,
1166 uint32 flags, void* cookie, object_cache_constructor constructor,
1167 object_cache_destructor destructor, object_cache_reclaimer reclaimer)
1168 {
1169 ObjectCache* cache;
1170
1171 if (objectSize == 0) {
1172 cache = NULL;
1173 } else if (objectSize <= 256) {
1174 cache = SmallObjectCache::Create(name, objectSize, alignment, maximum,
1175 magazineCapacity, maxMagazineCount, flags, cookie, constructor,
1176 destructor, reclaimer);
1177 } else {
1178 cache = HashedObjectCache::Create(name, objectSize, alignment, maximum,
1179 magazineCapacity, maxMagazineCount, flags, cookie, constructor,
1180 destructor, reclaimer);
1181 }
1182
1183 if (cache != NULL) {
1184 MutexLocker _(sObjectCacheListLock);
1185 sObjectCaches.Add(cache);
1186 }
1187
1188 T(Create(name, objectSize, alignment, maximum, flags, cookie, cache));
1189 return cache;
1190 }
1191
1192
1193 void
delete_object_cache(object_cache * cache)1194 delete_object_cache(object_cache* cache)
1195 {
1196 T(Delete(cache));
1197
1198 {
1199 MutexLocker _(sObjectCacheListLock);
1200 sObjectCaches.Remove(cache);
1201 }
1202
1203 MutexLocker cacheLocker(cache->lock);
1204
1205 {
1206 MutexLocker maintenanceLocker(sMaintenanceLock);
1207 if (cache->maintenance_in_progress) {
1208 // The maintainer thread is working with the cache. Just mark it
1209 // to be deleted.
1210 cache->maintenance_delete = true;
1211 return;
1212 }
1213
1214 // unschedule maintenance
1215 if (cache->maintenance_pending)
1216 sMaintenanceQueue.Remove(cache);
1217 }
1218
1219 // at this point no-one should have a reference to the cache anymore
1220 cacheLocker.Unlock();
1221
1222 delete_object_cache_internal(cache);
1223 }
1224
1225
1226 status_t
object_cache_set_minimum_reserve(object_cache * cache,size_t objectCount)1227 object_cache_set_minimum_reserve(object_cache* cache, size_t objectCount)
1228 {
1229 MutexLocker _(cache->lock);
1230
1231 if (cache->min_object_reserve == objectCount)
1232 return B_OK;
1233
1234 cache->min_object_reserve = objectCount;
1235
1236 increase_object_reserve(cache);
1237
1238 return B_OK;
1239 }
1240
1241
1242 void*
object_cache_alloc(object_cache * cache,uint32 flags)1243 object_cache_alloc(object_cache* cache, uint32 flags)
1244 {
1245 if (!(cache->flags & CACHE_NO_DEPOT)) {
1246 void* object = object_depot_obtain(&cache->depot);
1247 if (object) {
1248 add_alloc_tracing_entry(cache, flags, object);
1249 return fill_allocated_block(object, cache->object_size);
1250 }
1251 }
1252
1253 MutexLocker locker(cache->lock);
1254 slab* source = NULL;
1255
1256 while (true) {
1257 source = cache->partial.Head();
1258 if (source != NULL)
1259 break;
1260
1261 source = cache->empty.RemoveHead();
1262 if (source != NULL) {
1263 cache->empty_count--;
1264 cache->partial.Add(source);
1265 break;
1266 }
1267
1268 if (object_cache_reserve_internal(cache, 1, flags) != B_OK) {
1269 T(Alloc(cache, flags, NULL));
1270 return NULL;
1271 }
1272
1273 cache->pressure++;
1274 }
1275
1276 ParanoiaChecker _2(source);
1277
1278 object_link* link = _pop(source->free);
1279 source->count--;
1280 cache->used_count++;
1281
1282 if (cache->total_objects - cache->used_count < cache->min_object_reserve)
1283 increase_object_reserve(cache);
1284
1285 REMOVE_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, source, &link->next,
1286 sizeof(void*));
1287
1288 TRACE_CACHE(cache, "allocate %p (%p) from %p, %lu remaining.",
1289 link_to_object(link, cache->object_size), link, source, source->count);
1290
1291 if (source->count == 0) {
1292 cache->partial.Remove(source);
1293 cache->full.Add(source);
1294 }
1295
1296 void* object = link_to_object(link, cache->object_size);
1297 locker.Unlock();
1298
1299 add_alloc_tracing_entry(cache, flags, object);
1300 return fill_allocated_block(object, cache->object_size);
1301 }
1302
1303
1304 void
object_cache_free(object_cache * cache,void * object,uint32 flags)1305 object_cache_free(object_cache* cache, void* object, uint32 flags)
1306 {
1307 if (object == NULL)
1308 return;
1309
1310 T(Free(cache, object));
1311
1312 #if PARANOID_KERNEL_FREE
1313 // TODO: allow forcing the check even if we don't find deadbeef
1314 if (*(uint32*)object == 0xdeadbeef) {
1315 if (!cache->AssertObjectNotFreed(object))
1316 return;
1317
1318 if ((cache->flags & CACHE_NO_DEPOT) == 0) {
1319 if (object_depot_contains_object(&cache->depot, object)) {
1320 panic("object_cache: object %p is already freed", object);
1321 return;
1322 }
1323 }
1324 }
1325
1326 fill_freed_block(object, cache->object_size);
1327 #endif
1328
1329 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
1330 mutex_lock(&cache->lock);
1331 cache->TrackingInfoFor(object)->Clear();
1332 mutex_unlock(&cache->lock);
1333 #endif
1334
1335 if ((cache->flags & CACHE_NO_DEPOT) == 0) {
1336 object_depot_store(&cache->depot, object, flags);
1337 return;
1338 }
1339
1340 MutexLocker _(cache->lock);
1341 cache->ReturnObjectToSlab(cache->ObjectSlab(object), object, flags);
1342 }
1343
1344
1345 status_t
object_cache_reserve(object_cache * cache,size_t objectCount,uint32 flags)1346 object_cache_reserve(object_cache* cache, size_t objectCount, uint32 flags)
1347 {
1348 if (objectCount == 0)
1349 return B_OK;
1350
1351 T(Reserve(cache, objectCount, flags));
1352
1353 MutexLocker _(cache->lock);
1354 return object_cache_reserve_internal(cache, objectCount, flags);
1355 }
1356
1357
1358 void
object_cache_get_usage(object_cache * cache,size_t * _allocatedMemory)1359 object_cache_get_usage(object_cache* cache, size_t* _allocatedMemory)
1360 {
1361 MutexLocker _(cache->lock);
1362 *_allocatedMemory = cache->usage;
1363 }
1364
1365
1366 void
slab_init(kernel_args * args)1367 slab_init(kernel_args* args)
1368 {
1369 MemoryManager::Init(args);
1370
1371 new (&sObjectCaches) ObjectCacheList();
1372
1373 block_allocator_init_boot();
1374 }
1375
1376
1377 void
slab_init_post_area()1378 slab_init_post_area()
1379 {
1380 MemoryManager::InitPostArea();
1381
1382 add_debugger_command("slabs", dump_slabs, "list all object caches");
1383 add_debugger_command("slab_cache", dump_cache_info,
1384 "dump information about a specific object cache");
1385 add_debugger_command("slab_depot", dump_object_depot,
1386 "dump contents of an object depot");
1387 add_debugger_command("slab_magazine", dump_depot_magazine,
1388 "dump contents of a depot magazine");
1389 add_debugger_command("slab_object", dump_object_info,
1390 "dump information about an object in an object_cache");
1391 #if SLAB_ALLOCATION_TRACKING_AVAILABLE
1392 add_debugger_command_etc("allocations_per_caller",
1393 &dump_allocations_per_caller,
1394 "Dump current slab allocations summed up per caller",
1395 "[ -c ] [ -d <caller> ] [ -o <object cache> ] [ -r ]\n"
1396 "The current allocations will by summed up by caller (their count and\n"
1397 "size) printed in decreasing order by size or, if \"-c\" is\n"
1398 "specified, by allocation count. If given <object cache> specifies\n"
1399 "the address of the object cache for which to print the allocations.\n"
1400 "If \"-d\" is given, each allocation for caller <caller> is printed\n"
1401 "including the respective stack trace.\n"
1402 "If \"-r\" is given, the allocation infos are reset after gathering\n"
1403 "the information, so the next command invocation will only show the\n"
1404 "allocations made after the reset.\n", 0);
1405 add_debugger_command_etc("allocation_infos",
1406 &dump_allocation_infos,
1407 "Dump current slab allocations",
1408 "[ --stacktrace ] [ -o <object cache> | -s <slab> | -S <address> ] "
1409 "[ -a <allocation> ] [ --team <team ID> ] [ --thread <thread ID> ]\n"
1410 "The current allocations filtered by optional values will be printed.\n"
1411 "If given, <object cache> specifies the address of the object cache\n"
1412 "or <slab> specifies the address of a slab, for which to print the\n"
1413 "allocations. Alternatively <address> specifies any address within\n"
1414 "a slab allocation range.\n"
1415 "The optional \"-a\" address filters for a specific allocation,\n"
1416 "with \"--team\" and \"--thread\" allocations by specific teams\n"
1417 "and/or threads can be filtered (these only work if a corresponding\n"
1418 "tracing entry is still available).\n"
1419 "If \"--stacktrace\" is given, then stack traces of the allocation\n"
1420 "callers are printed, where available\n", 0);
1421 #endif // SLAB_ALLOCATION_TRACKING_AVAILABLE
1422 }
1423
1424
1425 void
slab_init_post_sem()1426 slab_init_post_sem()
1427 {
1428 register_low_resource_handler(object_cache_low_memory, NULL,
1429 B_KERNEL_RESOURCE_PAGES | B_KERNEL_RESOURCE_MEMORY
1430 | B_KERNEL_RESOURCE_ADDRESS_SPACE, 5);
1431
1432 block_allocator_init_rest();
1433 }
1434
1435
1436 void
slab_init_post_thread()1437 slab_init_post_thread()
1438 {
1439 new(&sMaintenanceQueue) MaintenanceQueue;
1440 sMaintenanceCondition.Init(&sMaintenanceQueue, "object cache maintainer");
1441
1442 thread_id objectCacheResizer = spawn_kernel_thread(object_cache_maintainer,
1443 "object cache resizer", B_URGENT_PRIORITY, NULL);
1444 if (objectCacheResizer < 0) {
1445 panic("slab_init_post_thread(): failed to spawn object cache resizer "
1446 "thread\n");
1447 return;
1448 }
1449
1450 resume_thread(objectCacheResizer);
1451 }
1452
1453
1454 RANGE_MARKER_FUNCTION_END(Slab)
1455
1456
1457 #endif // !USE_GUARDED_HEAP_FOR_OBJECT_CACHE
1458