1 /*
2 * Copyright 2010, Ingo Weinhold <ingo_weinhold@gmx.de>.
3 * Distributed under the terms of the MIT License.
4 */
5
6
7 #include "MemoryManager.h"
8
9 #include <algorithm>
10
11 #include <debug.h>
12 #include <tracing.h>
13 #include <util/AutoLock.h>
14 #include <vm/vm.h>
15 #include <vm/vm_page.h>
16 #include <vm/vm_priv.h>
17 #include <vm/VMAddressSpace.h>
18 #include <vm/VMArea.h>
19 #include <vm/VMCache.h>
20 #include <vm/VMTranslationMap.h>
21
22 #include "kernel_debug_config.h"
23
24 #include "ObjectCache.h"
25
26
27 //#define TRACE_MEMORY_MANAGER
28 #ifdef TRACE_MEMORY_MANAGER
29 # define TRACE(x...) dprintf(x)
30 #else
31 # define TRACE(x...) do {} while (false)
32 #endif
33
34 #if DEBUG_SLAB_MEMORY_MANAGER_PARANOID_CHECKS
35 # define PARANOID_CHECKS_ONLY(x) x
36 #else
37 # define PARANOID_CHECKS_ONLY(x)
38 #endif
39
40
41 static const char* const kSlabAreaName = "slab area";
42
43 static void* sAreaTableBuffer[1024];
44
45 mutex MemoryManager::sLock;
46 rw_lock MemoryManager::sAreaTableLock;
47 kernel_args* MemoryManager::sKernelArgs;
48 MemoryManager::AreaTable MemoryManager::sAreaTable;
49 MemoryManager::Area* MemoryManager::sFreeAreas;
50 int MemoryManager::sFreeAreaCount;
51 MemoryManager::MetaChunkList MemoryManager::sFreeCompleteMetaChunks;
52 MemoryManager::MetaChunkList MemoryManager::sFreeShortMetaChunks;
53 MemoryManager::MetaChunkList MemoryManager::sPartialMetaChunksSmall;
54 MemoryManager::MetaChunkList MemoryManager::sPartialMetaChunksMedium;
55 MemoryManager::AllocationEntry* MemoryManager::sAllocationEntryCanWait;
56 MemoryManager::AllocationEntry* MemoryManager::sAllocationEntryDontWait;
57 bool MemoryManager::sMaintenanceNeeded;
58
59
60 RANGE_MARKER_FUNCTION_BEGIN(SlabMemoryManager)
61
62
63 // #pragma mark - kernel tracing
64
65
66 #if SLAB_MEMORY_MANAGER_TRACING
67
68
69 //namespace SlabMemoryManagerCacheTracing {
70 struct MemoryManager::Tracing {
71
72 class MemoryManagerTraceEntry
73 : public TRACE_ENTRY_SELECTOR(SLAB_MEMORY_MANAGER_TRACING_STACK_TRACE) {
74 public:
MemoryManagerTraceEntry()75 MemoryManagerTraceEntry()
76 :
77 TraceEntryBase(SLAB_MEMORY_MANAGER_TRACING_STACK_TRACE, 0, true)
78 {
79 }
80 };
81
82
83 class Allocate : public MemoryManagerTraceEntry {
84 public:
Allocate(ObjectCache * cache,uint32 flags)85 Allocate(ObjectCache* cache, uint32 flags)
86 :
87 MemoryManagerTraceEntry(),
88 fCache(cache),
89 fFlags(flags)
90 {
91 Initialized();
92 }
93
AddDump(TraceOutput & out)94 virtual void AddDump(TraceOutput& out)
95 {
96 out.Print("slab memory manager alloc: cache: %p, flags: %#" B_PRIx32,
97 fCache, fFlags);
98 }
99
100 private:
101 ObjectCache* fCache;
102 uint32 fFlags;
103 };
104
105
106 class Free : public MemoryManagerTraceEntry {
107 public:
Free(void * address,uint32 flags)108 Free(void* address, uint32 flags)
109 :
110 MemoryManagerTraceEntry(),
111 fAddress(address),
112 fFlags(flags)
113 {
114 Initialized();
115 }
116
AddDump(TraceOutput & out)117 virtual void AddDump(TraceOutput& out)
118 {
119 out.Print("slab memory manager free: address: %p, flags: %#" B_PRIx32,
120 fAddress, fFlags);
121 }
122
123 private:
124 void* fAddress;
125 uint32 fFlags;
126 };
127
128
129 class AllocateRaw : public MemoryManagerTraceEntry {
130 public:
AllocateRaw(size_t size,uint32 flags)131 AllocateRaw(size_t size, uint32 flags)
132 :
133 MemoryManagerTraceEntry(),
134 fSize(size),
135 fFlags(flags)
136 {
137 Initialized();
138 }
139
AddDump(TraceOutput & out)140 virtual void AddDump(TraceOutput& out)
141 {
142 out.Print("slab memory manager alloc raw: size: %" B_PRIuSIZE
143 ", flags: %#" B_PRIx32, fSize, fFlags);
144 }
145
146 private:
147 size_t fSize;
148 uint32 fFlags;
149 };
150
151
152 class FreeRawOrReturnCache : public MemoryManagerTraceEntry {
153 public:
FreeRawOrReturnCache(void * address,uint32 flags)154 FreeRawOrReturnCache(void* address, uint32 flags)
155 :
156 MemoryManagerTraceEntry(),
157 fAddress(address),
158 fFlags(flags)
159 {
160 Initialized();
161 }
162
AddDump(TraceOutput & out)163 virtual void AddDump(TraceOutput& out)
164 {
165 out.Print("slab memory manager free raw/return: address: %p, flags: %#"
166 B_PRIx32, fAddress, fFlags);
167 }
168
169 private:
170 void* fAddress;
171 uint32 fFlags;
172 };
173
174
175 class AllocateArea : public MemoryManagerTraceEntry {
176 public:
AllocateArea(Area * area,uint32 flags)177 AllocateArea(Area* area, uint32 flags)
178 :
179 MemoryManagerTraceEntry(),
180 fArea(area),
181 fFlags(flags)
182 {
183 Initialized();
184 }
185
AddDump(TraceOutput & out)186 virtual void AddDump(TraceOutput& out)
187 {
188 out.Print("slab memory manager alloc area: flags: %#" B_PRIx32
189 " -> %p", fFlags, fArea);
190 }
191
192 private:
193 Area* fArea;
194 uint32 fFlags;
195 };
196
197
198 class AddArea : public MemoryManagerTraceEntry {
199 public:
AddArea(Area * area)200 AddArea(Area* area)
201 :
202 MemoryManagerTraceEntry(),
203 fArea(area)
204 {
205 Initialized();
206 }
207
AddDump(TraceOutput & out)208 virtual void AddDump(TraceOutput& out)
209 {
210 out.Print("slab memory manager add area: %p", fArea);
211 }
212
213 private:
214 Area* fArea;
215 };
216
217
218 class FreeArea : public MemoryManagerTraceEntry {
219 public:
FreeArea(Area * area,bool areaRemoved,uint32 flags)220 FreeArea(Area* area, bool areaRemoved, uint32 flags)
221 :
222 MemoryManagerTraceEntry(),
223 fArea(area),
224 fFlags(flags),
225 fRemoved(areaRemoved)
226 {
227 Initialized();
228 }
229
AddDump(TraceOutput & out)230 virtual void AddDump(TraceOutput& out)
231 {
232 out.Print("slab memory manager free area: %p%s, flags: %#" B_PRIx32,
233 fArea, fRemoved ? " (removed)" : "", fFlags);
234 }
235
236 private:
237 Area* fArea;
238 uint32 fFlags;
239 bool fRemoved;
240 };
241
242
243 class AllocateMetaChunk : public MemoryManagerTraceEntry {
244 public:
AllocateMetaChunk(MetaChunk * metaChunk)245 AllocateMetaChunk(MetaChunk* metaChunk)
246 :
247 MemoryManagerTraceEntry(),
248 fMetaChunk(metaChunk->chunkBase)
249 {
250 Initialized();
251 }
252
AddDump(TraceOutput & out)253 virtual void AddDump(TraceOutput& out)
254 {
255 out.Print("slab memory manager alloc meta chunk: %#" B_PRIxADDR,
256 fMetaChunk);
257 }
258
259 private:
260 addr_t fMetaChunk;
261 };
262
263
264 class FreeMetaChunk : public MemoryManagerTraceEntry {
265 public:
FreeMetaChunk(MetaChunk * metaChunk)266 FreeMetaChunk(MetaChunk* metaChunk)
267 :
268 MemoryManagerTraceEntry(),
269 fMetaChunk(metaChunk->chunkBase)
270 {
271 Initialized();
272 }
273
AddDump(TraceOutput & out)274 virtual void AddDump(TraceOutput& out)
275 {
276 out.Print("slab memory manager free meta chunk: %#" B_PRIxADDR,
277 fMetaChunk);
278 }
279
280 private:
281 addr_t fMetaChunk;
282 };
283
284
285 class AllocateChunk : public MemoryManagerTraceEntry {
286 public:
AllocateChunk(size_t chunkSize,MetaChunk * metaChunk,Chunk * chunk)287 AllocateChunk(size_t chunkSize, MetaChunk* metaChunk, Chunk* chunk)
288 :
289 MemoryManagerTraceEntry(),
290 fChunkSize(chunkSize),
291 fMetaChunk(metaChunk->chunkBase),
292 fChunk(chunk - metaChunk->chunks)
293 {
294 Initialized();
295 }
296
AddDump(TraceOutput & out)297 virtual void AddDump(TraceOutput& out)
298 {
299 out.Print("slab memory manager alloc chunk: size: %" B_PRIuSIZE
300 " -> meta chunk: %#" B_PRIxADDR ", chunk: %" B_PRIu32, fChunkSize,
301 fMetaChunk, fChunk);
302 }
303
304 private:
305 size_t fChunkSize;
306 addr_t fMetaChunk;
307 uint32 fChunk;
308 };
309
310
311 class AllocateChunks : public MemoryManagerTraceEntry {
312 public:
AllocateChunks(size_t chunkSize,uint32 chunkCount,MetaChunk * metaChunk,Chunk * chunk)313 AllocateChunks(size_t chunkSize, uint32 chunkCount, MetaChunk* metaChunk,
314 Chunk* chunk)
315 :
316 MemoryManagerTraceEntry(),
317 fMetaChunk(metaChunk->chunkBase),
318 fChunkSize(chunkSize),
319 fChunkCount(chunkCount),
320 fChunk(chunk - metaChunk->chunks)
321 {
322 Initialized();
323 }
324
AddDump(TraceOutput & out)325 virtual void AddDump(TraceOutput& out)
326 {
327 out.Print("slab memory manager alloc chunks: size: %" B_PRIuSIZE
328 ", count %" B_PRIu32 " -> meta chunk: %#" B_PRIxADDR ", chunk: %"
329 B_PRIu32, fChunkSize, fChunkCount, fMetaChunk, fChunk);
330 }
331
332 private:
333 addr_t fMetaChunk;
334 size_t fChunkSize;
335 uint32 fChunkCount;
336 uint32 fChunk;
337 };
338
339
340 class FreeChunk : public MemoryManagerTraceEntry {
341 public:
FreeChunk(MetaChunk * metaChunk,Chunk * chunk)342 FreeChunk(MetaChunk* metaChunk, Chunk* chunk)
343 :
344 MemoryManagerTraceEntry(),
345 fMetaChunk(metaChunk->chunkBase),
346 fChunk(chunk - metaChunk->chunks)
347 {
348 Initialized();
349 }
350
AddDump(TraceOutput & out)351 virtual void AddDump(TraceOutput& out)
352 {
353 out.Print("slab memory manager free chunk: meta chunk: %#" B_PRIxADDR
354 ", chunk: %" B_PRIu32, fMetaChunk, fChunk);
355 }
356
357 private:
358 addr_t fMetaChunk;
359 uint32 fChunk;
360 };
361
362
363 class Map : public MemoryManagerTraceEntry {
364 public:
Map(addr_t address,size_t size,uint32 flags)365 Map(addr_t address, size_t size, uint32 flags)
366 :
367 MemoryManagerTraceEntry(),
368 fAddress(address),
369 fSize(size),
370 fFlags(flags)
371 {
372 Initialized();
373 }
374
AddDump(TraceOutput & out)375 virtual void AddDump(TraceOutput& out)
376 {
377 out.Print("slab memory manager map: %#" B_PRIxADDR ", size: %"
378 B_PRIuSIZE ", flags: %#" B_PRIx32, fAddress, fSize, fFlags);
379 }
380
381 private:
382 addr_t fAddress;
383 size_t fSize;
384 uint32 fFlags;
385 };
386
387
388 class Unmap : public MemoryManagerTraceEntry {
389 public:
Unmap(addr_t address,size_t size,uint32 flags)390 Unmap(addr_t address, size_t size, uint32 flags)
391 :
392 MemoryManagerTraceEntry(),
393 fAddress(address),
394 fSize(size),
395 fFlags(flags)
396 {
397 Initialized();
398 }
399
AddDump(TraceOutput & out)400 virtual void AddDump(TraceOutput& out)
401 {
402 out.Print("slab memory manager unmap: %#" B_PRIxADDR ", size: %"
403 B_PRIuSIZE ", flags: %#" B_PRIx32, fAddress, fSize, fFlags);
404 }
405
406 private:
407 addr_t fAddress;
408 size_t fSize;
409 uint32 fFlags;
410 };
411
412
413 //} // namespace SlabMemoryManagerCacheTracing
414 }; // struct MemoryManager::Tracing
415
416
417 //# define T(x) new(std::nothrow) SlabMemoryManagerCacheTracing::x
418 # define T(x) new(std::nothrow) MemoryManager::Tracing::x
419
420 #else
421 # define T(x)
422 #endif // SLAB_MEMORY_MANAGER_TRACING
423
424
425 // #pragma mark - MemoryManager
426
427
428 /*static*/ void
Init(kernel_args * args)429 MemoryManager::Init(kernel_args* args)
430 {
431 mutex_init(&sLock, "slab memory manager");
432 rw_lock_init(&sAreaTableLock, "slab memory manager area table");
433 sKernelArgs = args;
434
435 new(&sFreeCompleteMetaChunks) MetaChunkList;
436 new(&sFreeShortMetaChunks) MetaChunkList;
437 new(&sPartialMetaChunksSmall) MetaChunkList;
438 new(&sPartialMetaChunksMedium) MetaChunkList;
439
440 new(&sAreaTable) AreaTable;
441 sAreaTable.Resize(sAreaTableBuffer, sizeof(sAreaTableBuffer), true);
442 // A bit hacky: The table now owns the memory. Since we never resize or
443 // free it, that's not a problem, though.
444
445 sFreeAreas = NULL;
446 sFreeAreaCount = 0;
447 sMaintenanceNeeded = false;
448
449 #if USE_DEBUG_HEAP_FOR_MALLOC || USE_GUARDED_HEAP_FOR_MALLOC
450 // Allocate one area immediately. Otherwise, we might try to allocate before
451 // post-area initialization but after page initialization, during which time
452 // we can't actually reserve pages.
453 MutexLocker locker(sLock);
454 Area* area = NULL;
455 _AllocateArea(0, area);
456 _AddArea(area);
457 #endif
458 }
459
460
461 /*static*/ void
InitPostArea()462 MemoryManager::InitPostArea()
463 {
464 sKernelArgs = NULL;
465
466 // Convert all areas to actual areas. This loop might look a bit weird, but
467 // is necessary since creating the actual area involves memory allocations,
468 // which in turn can change the situation.
469 bool done;
470 do {
471 done = true;
472
473 for (AreaTable::Iterator it = sAreaTable.GetIterator();
474 Area* area = it.Next();) {
475 if (area->vmArea == NULL) {
476 _ConvertEarlyArea(area);
477 done = false;
478 break;
479 }
480 }
481 } while (!done);
482
483 // unmap and free unused pages
484 if (sFreeAreas != NULL) {
485 // Just "leak" all but the first of the free areas -- the VM will
486 // automatically free all unclaimed memory.
487 sFreeAreas->next = NULL;
488 sFreeAreaCount = 1;
489
490 Area* area = sFreeAreas;
491 _ConvertEarlyArea(area);
492 _UnmapFreeChunksEarly(area);
493 }
494
495 for (AreaTable::Iterator it = sAreaTable.GetIterator();
496 Area* area = it.Next();) {
497 _UnmapFreeChunksEarly(area);
498 }
499
500 sMaintenanceNeeded = true;
501 // might not be necessary, but doesn't harm
502
503 add_debugger_command_etc("slab_area", &_DumpArea,
504 "Dump information on a given slab area",
505 "[ -c ] <area>\n"
506 "Dump information on a given slab area specified by its base "
507 "address.\n"
508 "If \"-c\" is given, the chunks of all meta chunks area printed as "
509 "well.\n", 0);
510 add_debugger_command_etc("slab_areas", &_DumpAreas,
511 "List all slab areas",
512 "\n"
513 "Lists all slab areas.\n", 0);
514 add_debugger_command_etc("slab_meta_chunk", &_DumpMetaChunk,
515 "Dump information on a given slab meta chunk",
516 "<meta chunk>\n"
517 "Dump information on a given slab meta chunk specified by its base "
518 "or object address.\n", 0);
519 add_debugger_command_etc("slab_meta_chunks", &_DumpMetaChunks,
520 "List all non-full slab meta chunks",
521 "[ -c ]\n"
522 "Lists all non-full slab meta chunks.\n"
523 "If \"-c\" is given, the chunks of all meta chunks area printed as "
524 "well.\n", 0);
525 add_debugger_command_etc("slab_raw_allocations", &_DumpRawAllocations,
526 "List all raw allocations in slab areas",
527 "\n"
528 "Lists all raw allocations in slab areas.\n", 0);
529 }
530
531
532 /*static*/ status_t
Allocate(ObjectCache * cache,uint32 flags,void * & _pages)533 MemoryManager::Allocate(ObjectCache* cache, uint32 flags, void*& _pages)
534 {
535 // TODO: Support CACHE_UNLOCKED_PAGES!
536
537 T(Allocate(cache, flags));
538
539 size_t chunkSize = cache->slab_size;
540
541 TRACE("MemoryManager::Allocate(%p, %#" B_PRIx32 "): chunkSize: %"
542 B_PRIuSIZE "\n", cache, flags, chunkSize);
543
544 MutexLocker locker(sLock);
545
546 // allocate a chunk
547 MetaChunk* metaChunk;
548 Chunk* chunk;
549 status_t error = _AllocateChunks(chunkSize, 1, flags, metaChunk, chunk);
550 if (error != B_OK)
551 return error;
552
553 // map the chunk
554 Area* area = metaChunk->GetArea();
555 addr_t chunkAddress = _ChunkAddress(metaChunk, chunk);
556
557 locker.Unlock();
558 error = _MapChunk(area->vmArea, chunkAddress, chunkSize, 0, flags);
559 locker.Lock();
560 if (error != B_OK) {
561 // something failed -- free the chunk
562 _FreeChunk(area, metaChunk, chunk, chunkAddress, true, flags);
563 return error;
564 }
565
566 chunk->reference = (addr_t)cache;
567 _pages = (void*)chunkAddress;
568
569 TRACE("MemoryManager::Allocate() done: %p (meta chunk: %d, chunk %d)\n",
570 _pages, int(metaChunk - area->metaChunks),
571 int(chunk - metaChunk->chunks));
572 return B_OK;
573 }
574
575
576 /*static*/ void
Free(void * pages,uint32 flags)577 MemoryManager::Free(void* pages, uint32 flags)
578 {
579 TRACE("MemoryManager::Free(%p, %#" B_PRIx32 ")\n", pages, flags);
580
581 T(Free(pages, flags));
582
583 // get the area and the meta chunk
584 Area* area = _AreaForAddress((addr_t)pages);
585 MetaChunk* metaChunk = &area->metaChunks[
586 ((addr_t)pages % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE];
587
588 ASSERT(metaChunk->chunkSize > 0);
589 ASSERT((addr_t)pages >= metaChunk->chunkBase);
590 ASSERT(((addr_t)pages % metaChunk->chunkSize) == 0);
591
592 // get the chunk
593 uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)pages);
594 Chunk* chunk = &metaChunk->chunks[chunkIndex];
595
596 ASSERT(chunk->next != NULL);
597 ASSERT(chunk->next < metaChunk->chunks
598 || chunk->next
599 >= metaChunk->chunks + SLAB_SMALL_CHUNKS_PER_META_CHUNK);
600
601 // and free it
602 MutexLocker locker(sLock);
603 _FreeChunk(area, metaChunk, chunk, (addr_t)pages, false, flags);
604 }
605
606
607 /*static*/ status_t
AllocateRaw(size_t size,uint32 flags,void * & _pages)608 MemoryManager::AllocateRaw(size_t size, uint32 flags, void*& _pages)
609 {
610 #if SLAB_MEMORY_MANAGER_TRACING
611 #if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
612 AbstractTraceEntryWithStackTrace* traceEntry = T(AllocateRaw(size, flags));
613 size += sizeof(AllocationTrackingInfo);
614 #else
615 T(AllocateRaw(size, flags));
616 #endif
617 #endif
618
619 size = ROUNDUP(size, SLAB_CHUNK_SIZE_SMALL);
620
621 TRACE("MemoryManager::AllocateRaw(%" B_PRIuSIZE ", %#" B_PRIx32 ")\n", size,
622 flags);
623
624 if (size > SLAB_CHUNK_SIZE_LARGE || (flags & CACHE_ALIGN_ON_SIZE) != 0) {
625 // Requested size greater than a large chunk or an aligned allocation.
626 // Allocate as an area.
627 if ((flags & CACHE_DONT_LOCK_KERNEL_SPACE) != 0)
628 return B_WOULD_BLOCK;
629
630 virtual_address_restrictions virtualRestrictions = {};
631 virtualRestrictions.address_specification
632 = (flags & CACHE_ALIGN_ON_SIZE) != 0
633 ? B_ANY_KERNEL_BLOCK_ADDRESS : B_ANY_KERNEL_ADDRESS;
634 physical_address_restrictions physicalRestrictions = {};
635 area_id area = create_area_etc(VMAddressSpace::KernelID(),
636 "slab large raw allocation", size, B_FULL_LOCK,
637 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
638 ((flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0
639 ? CREATE_AREA_DONT_WAIT : 0)
640 | CREATE_AREA_DONT_CLEAR, 0,
641 &virtualRestrictions, &physicalRestrictions, &_pages);
642
643 status_t result = area >= 0 ? B_OK : area;
644 if (result == B_OK) {
645 fill_allocated_block(_pages, size);
646 #if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
647 _AddTrackingInfo(_pages, size, traceEntry);
648 #endif
649 }
650
651 return result;
652 }
653
654 // determine chunk size (small or medium)
655 size_t chunkSize = SLAB_CHUNK_SIZE_SMALL;
656 uint32 chunkCount = size / SLAB_CHUNK_SIZE_SMALL;
657
658 if (size % SLAB_CHUNK_SIZE_MEDIUM == 0) {
659 chunkSize = SLAB_CHUNK_SIZE_MEDIUM;
660 chunkCount = size / SLAB_CHUNK_SIZE_MEDIUM;
661 }
662
663 MutexLocker locker(sLock);
664
665 // allocate the chunks
666 MetaChunk* metaChunk;
667 Chunk* chunk;
668 status_t error = _AllocateChunks(chunkSize, chunkCount, flags, metaChunk,
669 chunk);
670 if (error != B_OK)
671 return error;
672
673 // map the chunks
674 Area* area = metaChunk->GetArea();
675 addr_t chunkAddress = _ChunkAddress(metaChunk, chunk);
676
677 locker.Unlock();
678 error = _MapChunk(area->vmArea, chunkAddress, size, 0, flags);
679 locker.Lock();
680 if (error != B_OK) {
681 // something failed -- free the chunks
682 for (uint32 i = 0; i < chunkCount; i++)
683 _FreeChunk(area, metaChunk, chunk + i, chunkAddress, true, flags);
684 return error;
685 }
686
687 chunk->reference = (addr_t)chunkAddress + size - 1;
688 _pages = (void*)chunkAddress;
689
690 fill_allocated_block(_pages, size);
691 #if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
692 _AddTrackingInfo(_pages, size, traceEntry);
693 #endif
694
695 TRACE("MemoryManager::AllocateRaw() done: %p (meta chunk: %d, chunk %d)\n",
696 _pages, int(metaChunk - area->metaChunks),
697 int(chunk - metaChunk->chunks));
698 return B_OK;
699 }
700
701
702 /*static*/ ObjectCache*
FreeRawOrReturnCache(void * pages,uint32 flags)703 MemoryManager::FreeRawOrReturnCache(void* pages, uint32 flags)
704 {
705 TRACE("MemoryManager::FreeRawOrReturnCache(%p, %#" B_PRIx32 ")\n", pages,
706 flags);
707
708 T(FreeRawOrReturnCache(pages, flags));
709
710 if ((flags & CACHE_DONT_LOCK_KERNEL_SPACE) != 0) {
711 panic("cannot proceed without locking kernel space!");
712 return NULL;
713 }
714
715 // get the area
716 addr_t areaBase = _AreaBaseAddressForAddress((addr_t)pages);
717
718 ReadLocker readLocker(sAreaTableLock);
719 Area* area = sAreaTable.Lookup(areaBase);
720 readLocker.Unlock();
721
722 if (area == NULL) {
723 // Probably a large allocation. Look up the VM area.
724 VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
725 addressSpace->ReadLock();
726 VMArea* area = addressSpace->LookupArea((addr_t)pages);
727 addressSpace->ReadUnlock();
728
729 if (area != NULL && (addr_t)pages == area->Base())
730 delete_area(area->id);
731 else
732 panic("freeing unknown block %p from area %p", pages, area);
733
734 return NULL;
735 }
736
737 MetaChunk* metaChunk = &area->metaChunks[
738 ((addr_t)pages % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE];
739
740 // get the chunk
741 ASSERT(metaChunk->chunkSize > 0);
742 ASSERT((addr_t)pages >= metaChunk->chunkBase);
743 uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)pages);
744 Chunk* chunk = &metaChunk->chunks[chunkIndex];
745
746 addr_t reference = chunk->reference;
747 if ((reference & 1) == 0)
748 return (ObjectCache*)reference;
749
750 // Seems we have a raw chunk allocation.
751 ASSERT((addr_t)pages == _ChunkAddress(metaChunk, chunk));
752 ASSERT(reference > (addr_t)pages);
753 ASSERT(reference <= areaBase + SLAB_AREA_SIZE - 1);
754 size_t size = reference - (addr_t)pages + 1;
755 ASSERT((size % SLAB_CHUNK_SIZE_SMALL) == 0);
756
757 // unmap the chunks
758 _UnmapChunk(area->vmArea, (addr_t)pages, size, flags);
759
760 // and free them
761 MutexLocker locker(sLock);
762 uint32 chunkCount = size / metaChunk->chunkSize;
763 for (uint32 i = 0; i < chunkCount; i++)
764 _FreeChunk(area, metaChunk, chunk + i, (addr_t)pages, true, flags);
765
766 return NULL;
767 }
768
769
770 /*static*/ size_t
AcceptableChunkSize(size_t size)771 MemoryManager::AcceptableChunkSize(size_t size)
772 {
773 if (size <= SLAB_CHUNK_SIZE_SMALL)
774 return SLAB_CHUNK_SIZE_SMALL;
775 if (size <= SLAB_CHUNK_SIZE_MEDIUM)
776 return SLAB_CHUNK_SIZE_MEDIUM;
777 return SLAB_CHUNK_SIZE_LARGE;
778 }
779
780
781 /*static*/ ObjectCache*
GetAllocationInfo(void * address,size_t & _size)782 MemoryManager::GetAllocationInfo(void* address, size_t& _size)
783 {
784 // get the area
785 ReadLocker readLocker(sAreaTableLock);
786 Area* area = sAreaTable.Lookup(_AreaBaseAddressForAddress((addr_t)address));
787 readLocker.Unlock();
788
789 if (area == NULL) {
790 VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
791 addressSpace->ReadLock();
792 VMArea* area = addressSpace->LookupArea((addr_t)address);
793 if (area != NULL && (addr_t)address == area->Base())
794 _size = area->Size();
795 else
796 _size = 0;
797 addressSpace->ReadUnlock();
798
799 return NULL;
800 }
801
802 MetaChunk* metaChunk = &area->metaChunks[
803 ((addr_t)address % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE];
804
805 // get the chunk
806 ASSERT(metaChunk->chunkSize > 0);
807 ASSERT((addr_t)address >= metaChunk->chunkBase);
808 uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)address);
809
810 addr_t reference = metaChunk->chunks[chunkIndex].reference;
811 if ((reference & 1) == 0) {
812 ObjectCache* cache = (ObjectCache*)reference;
813 _size = cache->object_size;
814 return cache;
815 }
816
817 _size = reference - (addr_t)address + 1;
818 return NULL;
819 }
820
821
822 /*static*/ ObjectCache*
CacheForAddress(void * address)823 MemoryManager::CacheForAddress(void* address)
824 {
825 // get the area
826 ReadLocker readLocker(sAreaTableLock);
827 Area* area = sAreaTable.Lookup(_AreaBaseAddressForAddress((addr_t)address));
828 readLocker.Unlock();
829
830 if (area == NULL)
831 return NULL;
832
833 MetaChunk* metaChunk = &area->metaChunks[
834 ((addr_t)address % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE];
835
836 // get the chunk
837 ASSERT(metaChunk->chunkSize > 0);
838 ASSERT((addr_t)address >= metaChunk->chunkBase);
839 uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)address);
840
841 addr_t reference = metaChunk->chunks[chunkIndex].reference;
842 return (reference & 1) == 0 ? (ObjectCache*)reference : NULL;
843 }
844
845
846 /*static*/ void
PerformMaintenance()847 MemoryManager::PerformMaintenance()
848 {
849 MutexLocker locker(sLock);
850
851 while (sMaintenanceNeeded) {
852 sMaintenanceNeeded = false;
853
854 // We want to keep one or two areas as a reserve. This way we have at
855 // least one area to use in situations when we aren't allowed to
856 // allocate one and also avoid ping-pong effects.
857 if (sFreeAreaCount > 0 && sFreeAreaCount <= 2)
858 return;
859
860 if (sFreeAreaCount == 0) {
861 // try to allocate one
862 Area* area;
863 if (_AllocateArea(0, area) != B_OK)
864 return;
865
866 _PushFreeArea(area);
867 if (sFreeAreaCount > 2)
868 sMaintenanceNeeded = true;
869 } else {
870 // free until we only have two free ones
871 while (sFreeAreaCount > 2)
872 _FreeArea(_PopFreeArea(), true, 0);
873
874 if (sFreeAreaCount == 0)
875 sMaintenanceNeeded = true;
876 }
877 }
878 }
879
880
881 #if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
882
883 /*static*/ bool
AnalyzeAllocationCallers(AllocationTrackingCallback & callback)884 MemoryManager::AnalyzeAllocationCallers(AllocationTrackingCallback& callback)
885 {
886 for (AreaTable::Iterator it = sAreaTable.GetIterator();
887 Area* area = it.Next();) {
888 for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA; i++) {
889 MetaChunk* metaChunk = area->metaChunks + i;
890 if (metaChunk->chunkSize == 0)
891 continue;
892
893 for (uint32 k = 0; k < metaChunk->chunkCount; k++) {
894 Chunk* chunk = metaChunk->chunks + k;
895
896 // skip free chunks
897 if (_IsChunkFree(metaChunk, chunk))
898 continue;
899
900 addr_t reference = chunk->reference;
901 if ((reference & 1) == 0 || reference == 1)
902 continue;
903
904 addr_t chunkAddress = _ChunkAddress(metaChunk, chunk);
905 size_t size = reference - chunkAddress + 1;
906
907 if (!callback.ProcessTrackingInfo(
908 _TrackingInfoFor((void*)chunkAddress, size),
909 (void*)chunkAddress, size)) {
910 return false;
911 }
912 }
913 }
914 }
915
916 return true;
917 }
918
919 #endif // SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
920
921
922 /*static*/ ObjectCache*
DebugObjectCacheForAddress(void * address)923 MemoryManager::DebugObjectCacheForAddress(void* address)
924 {
925 // get the area
926 addr_t areaBase = _AreaBaseAddressForAddress((addr_t)address);
927 Area* area = sAreaTable.Lookup(areaBase);
928
929 if (area == NULL)
930 return NULL;
931
932 MetaChunk* metaChunk = &area->metaChunks[
933 ((addr_t)address % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE];
934
935 // get the chunk
936 if (metaChunk->chunkSize == 0)
937 return NULL;
938 if ((addr_t)address < metaChunk->chunkBase)
939 return NULL;
940
941 uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)address);
942 Chunk* chunk = &metaChunk->chunks[chunkIndex];
943
944 addr_t reference = chunk->reference;
945 if ((reference & 1) == 0)
946 return (ObjectCache*)reference;
947
948 return NULL;
949 }
950
951
952 /*static*/ status_t
_AllocateChunks(size_t chunkSize,uint32 chunkCount,uint32 flags,MetaChunk * & _metaChunk,Chunk * & _chunk)953 MemoryManager::_AllocateChunks(size_t chunkSize, uint32 chunkCount,
954 uint32 flags, MetaChunk*& _metaChunk, Chunk*& _chunk)
955 {
956 MetaChunkList* metaChunkList = NULL;
957 if (chunkSize == SLAB_CHUNK_SIZE_SMALL) {
958 metaChunkList = &sPartialMetaChunksSmall;
959 } else if (chunkSize == SLAB_CHUNK_SIZE_MEDIUM) {
960 metaChunkList = &sPartialMetaChunksMedium;
961 } else if (chunkSize != SLAB_CHUNK_SIZE_LARGE) {
962 panic("MemoryManager::_AllocateChunks(): Unsupported chunk size: %"
963 B_PRIuSIZE, chunkSize);
964 return B_BAD_VALUE;
965 }
966
967 if (_GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk, _chunk))
968 return B_OK;
969
970 if (sFreeAreas != NULL) {
971 _AddArea(_PopFreeArea());
972 _RequestMaintenance();
973
974 return _GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk,
975 _chunk) ? B_OK : B_NO_MEMORY;
976 }
977
978 if ((flags & CACHE_DONT_LOCK_KERNEL_SPACE) != 0) {
979 // We can't create an area with this limitation and we must not wait for
980 // someone else doing that.
981 return B_WOULD_BLOCK;
982 }
983
984 // We need to allocate a new area. Wait, if someone else is trying to do
985 // the same.
986 while (true) {
987 AllocationEntry* allocationEntry = NULL;
988 if (sAllocationEntryDontWait != NULL) {
989 allocationEntry = sAllocationEntryDontWait;
990 } else if (sAllocationEntryCanWait != NULL
991 && (flags & CACHE_DONT_WAIT_FOR_MEMORY) == 0) {
992 allocationEntry = sAllocationEntryCanWait;
993 } else
994 break;
995
996 allocationEntry->condition.Wait(&sLock);
997
998 if (_GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk,
999 _chunk)) {
1000 return B_OK;
1001 }
1002 }
1003
1004 // prepare the allocation entry others can wait on
1005 AllocationEntry*& allocationEntry
1006 = (flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0
1007 ? sAllocationEntryDontWait : sAllocationEntryCanWait;
1008
1009 AllocationEntry myResizeEntry;
1010 allocationEntry = &myResizeEntry;
1011 allocationEntry->condition.Init(metaChunkList, "wait for slab area");
1012 allocationEntry->thread = find_thread(NULL);
1013
1014 Area* area;
1015 status_t error = _AllocateArea(flags, area);
1016
1017 allocationEntry->condition.NotifyAll();
1018 allocationEntry = NULL;
1019
1020 if (error != B_OK)
1021 return error;
1022
1023 // Try again to get a meta chunk. Something might have been freed in the
1024 // meantime. We can free the area in this case.
1025 if (_GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk, _chunk)) {
1026 _FreeArea(area, true, flags);
1027 return B_OK;
1028 }
1029
1030 _AddArea(area);
1031 return _GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk,
1032 _chunk) ? B_OK : B_NO_MEMORY;
1033 }
1034
1035
1036 /*static*/ bool
_GetChunks(MetaChunkList * metaChunkList,size_t chunkSize,uint32 chunkCount,MetaChunk * & _metaChunk,Chunk * & _chunk)1037 MemoryManager::_GetChunks(MetaChunkList* metaChunkList, size_t chunkSize,
1038 uint32 chunkCount, MetaChunk*& _metaChunk, Chunk*& _chunk)
1039 {
1040 // the common and less complicated special case
1041 if (chunkCount == 1)
1042 return _GetChunk(metaChunkList, chunkSize, _metaChunk, _chunk);
1043
1044 ASSERT(metaChunkList != NULL);
1045
1046 // Iterate through the partial meta chunk list and try to find a free
1047 // range that is large enough.
1048 MetaChunk* metaChunk = NULL;
1049 for (MetaChunkList::Iterator it = metaChunkList->GetIterator();
1050 (metaChunk = it.Next()) != NULL;) {
1051 if (metaChunk->firstFreeChunk + chunkCount - 1
1052 <= metaChunk->lastFreeChunk) {
1053 break;
1054 }
1055 }
1056
1057 if (metaChunk == NULL) {
1058 // try to get a free meta chunk
1059 if ((SLAB_CHUNK_SIZE_LARGE - SLAB_AREA_STRUCT_OFFSET - kAreaAdminSize)
1060 / chunkSize >= chunkCount) {
1061 metaChunk = sFreeShortMetaChunks.RemoveHead();
1062 }
1063 if (metaChunk == NULL)
1064 metaChunk = sFreeCompleteMetaChunks.RemoveHead();
1065
1066 if (metaChunk == NULL)
1067 return false;
1068
1069 metaChunkList->Add(metaChunk);
1070 metaChunk->GetArea()->usedMetaChunkCount++;
1071 _PrepareMetaChunk(metaChunk, chunkSize);
1072
1073 T(AllocateMetaChunk(metaChunk));
1074 }
1075
1076 // pull the chunks out of the free list
1077 Chunk* firstChunk = metaChunk->chunks + metaChunk->firstFreeChunk;
1078 Chunk* lastChunk = firstChunk + (chunkCount - 1);
1079 Chunk** chunkPointer = &metaChunk->freeChunks;
1080 uint32 remainingChunks = chunkCount;
1081 while (remainingChunks > 0) {
1082 ASSERT_PRINT(chunkPointer, "remaining: %" B_PRIu32 "/%" B_PRIu32
1083 ", area: %p, meta chunk: %" B_PRIdSSIZE "\n", remainingChunks,
1084 chunkCount, metaChunk->GetArea(),
1085 metaChunk - metaChunk->GetArea()->metaChunks);
1086 Chunk* chunk = *chunkPointer;
1087 if (chunk >= firstChunk && chunk <= lastChunk) {
1088 *chunkPointer = chunk->next;
1089 chunk->reference = 1;
1090 remainingChunks--;
1091 } else
1092 chunkPointer = &chunk->next;
1093 }
1094
1095 // allocate the chunks
1096 metaChunk->usedChunkCount += chunkCount;
1097 if (metaChunk->usedChunkCount == metaChunk->chunkCount) {
1098 // meta chunk is full now -- remove it from its list
1099 if (metaChunkList != NULL)
1100 metaChunkList->Remove(metaChunk);
1101 }
1102
1103 // update the free range
1104 metaChunk->firstFreeChunk += chunkCount;
1105
1106 PARANOID_CHECKS_ONLY(_CheckMetaChunk(metaChunk));
1107
1108 _chunk = firstChunk;
1109 _metaChunk = metaChunk;
1110
1111 T(AllocateChunks(chunkSize, chunkCount, metaChunk, firstChunk));
1112
1113 return true;
1114 }
1115
1116
1117 /*static*/ bool
_GetChunk(MetaChunkList * metaChunkList,size_t chunkSize,MetaChunk * & _metaChunk,Chunk * & _chunk)1118 MemoryManager::_GetChunk(MetaChunkList* metaChunkList, size_t chunkSize,
1119 MetaChunk*& _metaChunk, Chunk*& _chunk)
1120 {
1121 MetaChunk* metaChunk = metaChunkList != NULL
1122 ? metaChunkList->Head() : NULL;
1123 if (metaChunk == NULL) {
1124 // no partial meta chunk -- maybe there's a free one
1125 if (chunkSize == SLAB_CHUNK_SIZE_LARGE) {
1126 metaChunk = sFreeCompleteMetaChunks.RemoveHead();
1127 } else {
1128 metaChunk = sFreeShortMetaChunks.RemoveHead();
1129 if (metaChunk == NULL)
1130 metaChunk = sFreeCompleteMetaChunks.RemoveHead();
1131 if (metaChunk != NULL)
1132 metaChunkList->Add(metaChunk);
1133 }
1134
1135 if (metaChunk == NULL)
1136 return false;
1137
1138 metaChunk->GetArea()->usedMetaChunkCount++;
1139 _PrepareMetaChunk(metaChunk, chunkSize);
1140
1141 T(AllocateMetaChunk(metaChunk));
1142 }
1143
1144 // allocate the chunk
1145 if (++metaChunk->usedChunkCount == metaChunk->chunkCount) {
1146 // meta chunk is full now -- remove it from its list
1147 if (metaChunkList != NULL)
1148 metaChunkList->Remove(metaChunk);
1149 }
1150
1151 _chunk = _pop(metaChunk->freeChunks);
1152 _metaChunk = metaChunk;
1153
1154 _chunk->reference = 1;
1155
1156 // update the free range
1157 uint32 chunkIndex = _chunk - metaChunk->chunks;
1158 if (chunkIndex >= metaChunk->firstFreeChunk
1159 && chunkIndex <= metaChunk->lastFreeChunk) {
1160 if (chunkIndex - metaChunk->firstFreeChunk
1161 <= metaChunk->lastFreeChunk - chunkIndex) {
1162 metaChunk->firstFreeChunk = chunkIndex + 1;
1163 } else
1164 metaChunk->lastFreeChunk = chunkIndex - 1;
1165 }
1166
1167 PARANOID_CHECKS_ONLY(_CheckMetaChunk(metaChunk));
1168
1169 T(AllocateChunk(chunkSize, metaChunk, _chunk));
1170
1171 return true;
1172 }
1173
1174
1175 /*static*/ void
_FreeChunk(Area * area,MetaChunk * metaChunk,Chunk * chunk,addr_t chunkAddress,bool alreadyUnmapped,uint32 flags)1176 MemoryManager::_FreeChunk(Area* area, MetaChunk* metaChunk, Chunk* chunk,
1177 addr_t chunkAddress, bool alreadyUnmapped, uint32 flags)
1178 {
1179 // unmap the chunk
1180 if (!alreadyUnmapped) {
1181 mutex_unlock(&sLock);
1182 _UnmapChunk(area->vmArea, chunkAddress, metaChunk->chunkSize, flags);
1183 mutex_lock(&sLock);
1184 }
1185
1186 T(FreeChunk(metaChunk, chunk));
1187
1188 _push(metaChunk->freeChunks, chunk);
1189
1190 uint32 chunkIndex = chunk - metaChunk->chunks;
1191
1192 // free the meta chunk, if it is unused now
1193 PARANOID_CHECKS_ONLY(bool areaDeleted = false;)
1194 ASSERT(metaChunk->usedChunkCount > 0);
1195 if (--metaChunk->usedChunkCount == 0) {
1196 T(FreeMetaChunk(metaChunk));
1197
1198 // remove from partial meta chunk list
1199 if (metaChunk->chunkSize == SLAB_CHUNK_SIZE_SMALL)
1200 sPartialMetaChunksSmall.Remove(metaChunk);
1201 else if (metaChunk->chunkSize == SLAB_CHUNK_SIZE_MEDIUM)
1202 sPartialMetaChunksMedium.Remove(metaChunk);
1203
1204 // mark empty
1205 metaChunk->chunkSize = 0;
1206
1207 // add to free list
1208 if (metaChunk == area->metaChunks)
1209 sFreeShortMetaChunks.Add(metaChunk, false);
1210 else
1211 sFreeCompleteMetaChunks.Add(metaChunk, false);
1212
1213 // free the area, if it is unused now
1214 ASSERT(area->usedMetaChunkCount > 0);
1215 if (--area->usedMetaChunkCount == 0) {
1216 _FreeArea(area, false, flags);
1217 PARANOID_CHECKS_ONLY(areaDeleted = true;)
1218 }
1219 } else if (metaChunk->usedChunkCount == metaChunk->chunkCount - 1) {
1220 // the meta chunk was full before -- add it back to its partial chunk
1221 // list
1222 if (metaChunk->chunkSize == SLAB_CHUNK_SIZE_SMALL)
1223 sPartialMetaChunksSmall.Add(metaChunk, false);
1224 else if (metaChunk->chunkSize == SLAB_CHUNK_SIZE_MEDIUM)
1225 sPartialMetaChunksMedium.Add(metaChunk, false);
1226
1227 metaChunk->firstFreeChunk = chunkIndex;
1228 metaChunk->lastFreeChunk = chunkIndex;
1229 } else {
1230 // extend the free range, if the chunk adjoins
1231 if (chunkIndex + 1 == metaChunk->firstFreeChunk) {
1232 uint32 firstFree = chunkIndex;
1233 for (; firstFree > 0; firstFree--) {
1234 Chunk* previousChunk = &metaChunk->chunks[firstFree - 1];
1235 if (!_IsChunkFree(metaChunk, previousChunk))
1236 break;
1237 }
1238 metaChunk->firstFreeChunk = firstFree;
1239 } else if (chunkIndex == (uint32)metaChunk->lastFreeChunk + 1) {
1240 uint32 lastFree = chunkIndex;
1241 for (; lastFree + 1 < metaChunk->chunkCount; lastFree++) {
1242 Chunk* nextChunk = &metaChunk->chunks[lastFree + 1];
1243 if (!_IsChunkFree(metaChunk, nextChunk))
1244 break;
1245 }
1246 metaChunk->lastFreeChunk = lastFree;
1247 }
1248 }
1249
1250 PARANOID_CHECKS_ONLY(
1251 if (!areaDeleted)
1252 _CheckMetaChunk(metaChunk);
1253 )
1254 }
1255
1256
1257 /*static*/ void
_PrepareMetaChunk(MetaChunk * metaChunk,size_t chunkSize)1258 MemoryManager::_PrepareMetaChunk(MetaChunk* metaChunk, size_t chunkSize)
1259 {
1260 Area* area = metaChunk->GetArea();
1261
1262 if (metaChunk == area->metaChunks) {
1263 // the first chunk is shorter
1264 size_t unusableSize = ROUNDUP(SLAB_AREA_STRUCT_OFFSET + kAreaAdminSize,
1265 chunkSize);
1266 metaChunk->chunkBase = area->BaseAddress() + unusableSize;
1267 metaChunk->totalSize = SLAB_CHUNK_SIZE_LARGE - unusableSize;
1268 }
1269
1270 metaChunk->chunkSize = chunkSize;
1271 metaChunk->chunkCount = metaChunk->totalSize / chunkSize;
1272 metaChunk->usedChunkCount = 0;
1273
1274 metaChunk->freeChunks = NULL;
1275 for (int32 i = metaChunk->chunkCount - 1; i >= 0; i--)
1276 _push(metaChunk->freeChunks, metaChunk->chunks + i);
1277
1278 metaChunk->firstFreeChunk = 0;
1279 metaChunk->lastFreeChunk = metaChunk->chunkCount - 1;
1280
1281 PARANOID_CHECKS_ONLY(_CheckMetaChunk(metaChunk));
1282 }
1283
1284
1285 /*static*/ void
_AddArea(Area * area)1286 MemoryManager::_AddArea(Area* area)
1287 {
1288 T(AddArea(area));
1289
1290 // add the area to the hash table
1291 WriteLocker writeLocker(sAreaTableLock);
1292 sAreaTable.InsertUnchecked(area);
1293 writeLocker.Unlock();
1294
1295 // add the area's meta chunks to the free lists
1296 sFreeShortMetaChunks.Add(&area->metaChunks[0]);
1297 for (int32 i = 1; i < SLAB_META_CHUNKS_PER_AREA; i++)
1298 sFreeCompleteMetaChunks.Add(&area->metaChunks[i]);
1299 }
1300
1301
1302 /*static*/ status_t
_AllocateArea(uint32 flags,Area * & _area)1303 MemoryManager::_AllocateArea(uint32 flags, Area*& _area)
1304 {
1305 TRACE("MemoryManager::_AllocateArea(%#" B_PRIx32 ")\n", flags);
1306
1307 ASSERT((flags & CACHE_DONT_LOCK_KERNEL_SPACE) == 0);
1308
1309 mutex_unlock(&sLock);
1310
1311 size_t pagesNeededToMap = 0;
1312 void* areaBase;
1313 Area* area;
1314 VMArea* vmArea = NULL;
1315
1316 if (sKernelArgs == NULL) {
1317 // create an area
1318 uint32 areaCreationFlags = (flags & CACHE_PRIORITY_VIP) != 0
1319 ? CREATE_AREA_PRIORITY_VIP : 0;
1320 area_id areaID = vm_create_null_area(B_SYSTEM_TEAM, kSlabAreaName,
1321 &areaBase, B_ANY_KERNEL_BLOCK_ADDRESS, SLAB_AREA_SIZE,
1322 areaCreationFlags);
1323 if (areaID < 0) {
1324 mutex_lock(&sLock);
1325 return areaID;
1326 }
1327
1328 area = _AreaForAddress((addr_t)areaBase);
1329
1330 // map the memory for the administrative structure
1331 VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
1332 VMTranslationMap* translationMap = addressSpace->TranslationMap();
1333
1334 pagesNeededToMap = translationMap->MaxPagesNeededToMap(
1335 (addr_t)area, (addr_t)areaBase + SLAB_AREA_SIZE - 1);
1336
1337 vmArea = VMAreas::Lookup(areaID);
1338 status_t error = _MapChunk(vmArea, (addr_t)area, kAreaAdminSize,
1339 pagesNeededToMap, flags);
1340 if (error != B_OK) {
1341 delete_area(areaID);
1342 mutex_lock(&sLock);
1343 return error;
1344 }
1345
1346 dprintf("slab memory manager: created area %p (%" B_PRId32 ")\n", area,
1347 areaID);
1348 } else {
1349 // no areas yet -- allocate raw memory
1350 areaBase = (void*)vm_allocate_early(sKernelArgs, SLAB_AREA_SIZE,
1351 SLAB_AREA_SIZE, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
1352 SLAB_AREA_SIZE);
1353 if (areaBase == NULL) {
1354 mutex_lock(&sLock);
1355 return B_NO_MEMORY;
1356 }
1357 area = _AreaForAddress((addr_t)areaBase);
1358
1359 TRACE("MemoryManager::_AllocateArea(): allocated early area %p\n",
1360 area);
1361 }
1362
1363 // init the area structure
1364 area->vmArea = vmArea;
1365 area->reserved_memory_for_mapping = pagesNeededToMap * B_PAGE_SIZE;
1366 area->usedMetaChunkCount = 0;
1367 area->fullyMapped = vmArea == NULL;
1368
1369 // init the meta chunks
1370 for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA; i++) {
1371 MetaChunk* metaChunk = area->metaChunks + i;
1372 metaChunk->chunkSize = 0;
1373 metaChunk->chunkBase = (addr_t)areaBase + i * SLAB_CHUNK_SIZE_LARGE;
1374 metaChunk->totalSize = SLAB_CHUNK_SIZE_LARGE;
1375 // Note: chunkBase and totalSize aren't correct for the first
1376 // meta chunk. They will be set in _PrepareMetaChunk().
1377 metaChunk->chunkCount = 0;
1378 metaChunk->usedChunkCount = 0;
1379 metaChunk->freeChunks = NULL;
1380 }
1381
1382 mutex_lock(&sLock);
1383 _area = area;
1384
1385 T(AllocateArea(area, flags));
1386
1387 return B_OK;
1388 }
1389
1390
1391 /*static*/ void
_FreeArea(Area * area,bool areaRemoved,uint32 flags)1392 MemoryManager::_FreeArea(Area* area, bool areaRemoved, uint32 flags)
1393 {
1394 TRACE("MemoryManager::_FreeArea(%p, %#" B_PRIx32 ")\n", area, flags);
1395
1396 T(FreeArea(area, areaRemoved, flags));
1397
1398 ASSERT(area->usedMetaChunkCount == 0);
1399
1400 if (!areaRemoved) {
1401 // remove the area's meta chunks from the free lists
1402 ASSERT(area->metaChunks[0].usedChunkCount == 0);
1403 sFreeShortMetaChunks.Remove(&area->metaChunks[0]);
1404
1405 for (int32 i = 1; i < SLAB_META_CHUNKS_PER_AREA; i++) {
1406 ASSERT(area->metaChunks[i].usedChunkCount == 0);
1407 sFreeCompleteMetaChunks.Remove(&area->metaChunks[i]);
1408 }
1409
1410 // remove the area from the hash table
1411 WriteLocker writeLocker(sAreaTableLock);
1412 sAreaTable.RemoveUnchecked(area);
1413 writeLocker.Unlock();
1414 }
1415
1416 // We want to keep one or two free areas as a reserve.
1417 if (sFreeAreaCount <= 1) {
1418 _PushFreeArea(area);
1419 return;
1420 }
1421
1422 if (area->vmArea == NULL || (flags & CACHE_DONT_LOCK_KERNEL_SPACE) != 0) {
1423 // This is either early in the boot process or we aren't allowed to
1424 // delete the area now.
1425 _PushFreeArea(area);
1426 _RequestMaintenance();
1427 return;
1428 }
1429
1430 mutex_unlock(&sLock);
1431
1432 dprintf("slab memory manager: deleting area %p (%" B_PRId32 ")\n", area,
1433 area->vmArea->id);
1434
1435 size_t memoryToUnreserve = area->reserved_memory_for_mapping;
1436 delete_area(area->vmArea->id);
1437 vm_unreserve_memory(memoryToUnreserve);
1438
1439 mutex_lock(&sLock);
1440 }
1441
1442
1443 /*static*/ status_t
_MapChunk(VMArea * vmArea,addr_t address,size_t size,size_t reserveAdditionalMemory,uint32 flags)1444 MemoryManager::_MapChunk(VMArea* vmArea, addr_t address, size_t size,
1445 size_t reserveAdditionalMemory, uint32 flags)
1446 {
1447 TRACE("MemoryManager::_MapChunk(%p, %#" B_PRIxADDR ", %#" B_PRIxSIZE
1448 ")\n", vmArea, address, size);
1449
1450 T(Map(address, size, flags));
1451
1452 if (vmArea == NULL) {
1453 // everything is mapped anyway
1454 return B_OK;
1455 }
1456
1457 VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
1458 VMTranslationMap* translationMap = addressSpace->TranslationMap();
1459
1460 // reserve memory for the chunk
1461 int priority = (flags & CACHE_PRIORITY_VIP) != 0
1462 ? VM_PRIORITY_VIP : VM_PRIORITY_SYSTEM;
1463 size_t reservedMemory = size + reserveAdditionalMemory;
1464 status_t error = vm_try_reserve_memory(size, priority,
1465 (flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0 ? 0 : 1000000);
1466 if (error != B_OK)
1467 return error;
1468
1469 // reserve the pages we need now
1470 size_t reservedPages = size / B_PAGE_SIZE
1471 + translationMap->MaxPagesNeededToMap(address, address + size - 1);
1472 vm_page_reservation reservation;
1473 if ((flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0) {
1474 if (!vm_page_try_reserve_pages(&reservation, reservedPages, priority)) {
1475 vm_unreserve_memory(reservedMemory);
1476 return B_WOULD_BLOCK;
1477 }
1478 } else
1479 vm_page_reserve_pages(&reservation, reservedPages, priority);
1480
1481 VMCache* cache = vm_area_get_locked_cache(vmArea);
1482
1483 // map the pages
1484 translationMap->Lock();
1485
1486 addr_t areaOffset = address - vmArea->Base();
1487 addr_t endAreaOffset = areaOffset + size;
1488 for (size_t offset = areaOffset; offset < endAreaOffset;
1489 offset += B_PAGE_SIZE) {
1490 vm_page* page = vm_page_allocate_page(&reservation, PAGE_STATE_WIRED);
1491 cache->InsertPage(page, offset);
1492
1493 page->IncrementWiredCount();
1494 atomic_add(&gMappedPagesCount, 1);
1495 DEBUG_PAGE_ACCESS_END(page);
1496
1497 translationMap->Map(vmArea->Base() + offset,
1498 page->physical_page_number * B_PAGE_SIZE,
1499 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
1500 vmArea->MemoryType(), &reservation);
1501 }
1502
1503 translationMap->Unlock();
1504
1505 cache->ReleaseRefAndUnlock();
1506
1507 vm_page_unreserve_pages(&reservation);
1508
1509 return B_OK;
1510 }
1511
1512
1513 /*static*/ status_t
_UnmapChunk(VMArea * vmArea,addr_t address,size_t size,uint32 flags)1514 MemoryManager::_UnmapChunk(VMArea* vmArea, addr_t address, size_t size,
1515 uint32 flags)
1516 {
1517 T(Unmap(address, size, flags));
1518
1519 if (vmArea == NULL)
1520 return B_ERROR;
1521
1522 TRACE("MemoryManager::_UnmapChunk(%p, %#" B_PRIxADDR ", %#" B_PRIxSIZE
1523 ")\n", vmArea, address, size);
1524
1525 VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
1526 VMTranslationMap* translationMap = addressSpace->TranslationMap();
1527 VMCache* cache = vm_area_get_locked_cache(vmArea);
1528
1529 // unmap the pages
1530 translationMap->Lock();
1531 translationMap->Unmap(address, address + size - 1);
1532 atomic_add(&gMappedPagesCount, -(size / B_PAGE_SIZE));
1533 translationMap->Unlock();
1534
1535 // free the pages
1536 addr_t areaPageOffset = (address - vmArea->Base()) / B_PAGE_SIZE;
1537 addr_t areaPageEndOffset = areaPageOffset + size / B_PAGE_SIZE;
1538 VMCachePagesTree::Iterator it = cache->pages.GetIterator(
1539 areaPageOffset, true, true);
1540 while (vm_page* page = it.Next()) {
1541 if (page->cache_offset >= areaPageEndOffset)
1542 break;
1543
1544 DEBUG_PAGE_ACCESS_START(page);
1545
1546 page->DecrementWiredCount();
1547
1548 cache->RemovePage(page);
1549 // the iterator is remove-safe
1550 vm_page_free(cache, page);
1551 }
1552
1553 cache->ReleaseRefAndUnlock();
1554
1555 vm_unreserve_memory(size);
1556
1557 return B_OK;
1558 }
1559
1560
1561 /*static*/ void
_UnmapFreeChunksEarly(Area * area)1562 MemoryManager::_UnmapFreeChunksEarly(Area* area)
1563 {
1564 if (!area->fullyMapped)
1565 return;
1566
1567 TRACE("MemoryManager::_UnmapFreeChunksEarly(%p)\n", area);
1568
1569 // unmap the space before the Area structure
1570 #if SLAB_AREA_STRUCT_OFFSET > 0
1571 _UnmapChunk(area->vmArea, area->BaseAddress(), SLAB_AREA_STRUCT_OFFSET,
1572 0);
1573 #endif
1574
1575 for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA; i++) {
1576 MetaChunk* metaChunk = area->metaChunks + i;
1577 if (metaChunk->chunkSize == 0) {
1578 // meta chunk is free -- unmap it completely
1579 if (i == 0) {
1580 _UnmapChunk(area->vmArea, (addr_t)area + kAreaAdminSize,
1581 SLAB_CHUNK_SIZE_LARGE - kAreaAdminSize, 0);
1582 } else {
1583 _UnmapChunk(area->vmArea,
1584 area->BaseAddress() + i * SLAB_CHUNK_SIZE_LARGE,
1585 SLAB_CHUNK_SIZE_LARGE, 0);
1586 }
1587 } else {
1588 // unmap free chunks
1589 for (Chunk* chunk = metaChunk->freeChunks; chunk != NULL;
1590 chunk = chunk->next) {
1591 _UnmapChunk(area->vmArea, _ChunkAddress(metaChunk, chunk),
1592 metaChunk->chunkSize, 0);
1593 }
1594
1595 // The first meta chunk might have space before its first chunk.
1596 if (i == 0) {
1597 addr_t unusedStart = (addr_t)area + kAreaAdminSize;
1598 if (unusedStart < metaChunk->chunkBase) {
1599 _UnmapChunk(area->vmArea, unusedStart,
1600 metaChunk->chunkBase - unusedStart, 0);
1601 }
1602 }
1603 }
1604 }
1605
1606 area->fullyMapped = false;
1607 }
1608
1609
1610 /*static*/ void
_ConvertEarlyArea(Area * area)1611 MemoryManager::_ConvertEarlyArea(Area* area)
1612 {
1613 void* address = (void*)area->BaseAddress();
1614 area_id areaID = create_area(kSlabAreaName, &address, B_EXACT_ADDRESS,
1615 SLAB_AREA_SIZE, B_ALREADY_WIRED,
1616 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
1617 if (areaID < 0)
1618 panic("out of memory");
1619
1620 area->vmArea = VMAreas::Lookup(areaID);
1621 }
1622
1623
1624 /*static*/ void
_RequestMaintenance()1625 MemoryManager::_RequestMaintenance()
1626 {
1627 if ((sFreeAreaCount > 0 && sFreeAreaCount <= 2) || sMaintenanceNeeded)
1628 return;
1629
1630 sMaintenanceNeeded = true;
1631 request_memory_manager_maintenance();
1632 }
1633
1634
1635 /*static*/ bool
_IsChunkInFreeList(const MetaChunk * metaChunk,const Chunk * chunk)1636 MemoryManager::_IsChunkInFreeList(const MetaChunk* metaChunk,
1637 const Chunk* chunk)
1638 {
1639 Chunk* freeChunk = metaChunk->freeChunks;
1640 while (freeChunk != NULL) {
1641 if (freeChunk == chunk)
1642 return true;
1643 freeChunk = freeChunk->next;
1644 }
1645
1646 return false;
1647 }
1648
1649
1650 #if DEBUG_SLAB_MEMORY_MANAGER_PARANOID_CHECKS
1651
1652 /*static*/ void
_CheckMetaChunk(MetaChunk * metaChunk)1653 MemoryManager::_CheckMetaChunk(MetaChunk* metaChunk)
1654 {
1655 Area* area = metaChunk->GetArea();
1656 int32 metaChunkIndex = metaChunk - area->metaChunks;
1657 if (metaChunkIndex < 0 || metaChunkIndex >= SLAB_META_CHUNKS_PER_AREA) {
1658 panic("invalid meta chunk %p!", metaChunk);
1659 return;
1660 }
1661
1662 switch (metaChunk->chunkSize) {
1663 case 0:
1664 // unused
1665 return;
1666 case SLAB_CHUNK_SIZE_SMALL:
1667 case SLAB_CHUNK_SIZE_MEDIUM:
1668 case SLAB_CHUNK_SIZE_LARGE:
1669 break;
1670 default:
1671 panic("meta chunk %p has invalid chunk size: %" B_PRIuSIZE,
1672 metaChunk, metaChunk->chunkSize);
1673 return;
1674 }
1675
1676 if (metaChunk->totalSize > SLAB_CHUNK_SIZE_LARGE) {
1677 panic("meta chunk %p has invalid total size: %" B_PRIuSIZE,
1678 metaChunk, metaChunk->totalSize);
1679 return;
1680 }
1681
1682 addr_t expectedBase = area->BaseAddress()
1683 + metaChunkIndex * SLAB_CHUNK_SIZE_LARGE;
1684 if (metaChunk->chunkBase < expectedBase
1685 || metaChunk->chunkBase - expectedBase + metaChunk->totalSize
1686 > SLAB_CHUNK_SIZE_LARGE) {
1687 panic("meta chunk %p has invalid base address: %" B_PRIxADDR, metaChunk,
1688 metaChunk->chunkBase);
1689 return;
1690 }
1691
1692 if (metaChunk->chunkCount != metaChunk->totalSize / metaChunk->chunkSize) {
1693 panic("meta chunk %p has invalid chunk count: %u", metaChunk,
1694 metaChunk->chunkCount);
1695 return;
1696 }
1697
1698 if (metaChunk->usedChunkCount > metaChunk->chunkCount) {
1699 panic("meta chunk %p has invalid unused chunk count: %u", metaChunk,
1700 metaChunk->usedChunkCount);
1701 return;
1702 }
1703
1704 if (metaChunk->firstFreeChunk > metaChunk->chunkCount) {
1705 panic("meta chunk %p has invalid first free chunk: %u", metaChunk,
1706 metaChunk->firstFreeChunk);
1707 return;
1708 }
1709
1710 if (metaChunk->lastFreeChunk >= metaChunk->chunkCount) {
1711 panic("meta chunk %p has invalid last free chunk: %u", metaChunk,
1712 metaChunk->lastFreeChunk);
1713 return;
1714 }
1715
1716 // check free list for structural sanity
1717 uint32 freeChunks = 0;
1718 for (Chunk* chunk = metaChunk->freeChunks; chunk != NULL;
1719 chunk = chunk->next) {
1720 if ((addr_t)chunk % sizeof(Chunk) != 0 || chunk < metaChunk->chunks
1721 || chunk >= metaChunk->chunks + metaChunk->chunkCount) {
1722 panic("meta chunk %p has invalid element in free list, chunk: %p",
1723 metaChunk, chunk);
1724 return;
1725 }
1726
1727 if (++freeChunks > metaChunk->chunkCount) {
1728 panic("meta chunk %p has cyclic free list", metaChunk);
1729 return;
1730 }
1731 }
1732
1733 if (freeChunks + metaChunk->usedChunkCount > metaChunk->chunkCount) {
1734 panic("meta chunk %p has mismatching free/used chunk counts: total: "
1735 "%u, used: %u, free: %" B_PRIu32, metaChunk, metaChunk->chunkCount,
1736 metaChunk->usedChunkCount, freeChunks);
1737 return;
1738 }
1739
1740 // count used chunks by looking at their reference/next field
1741 uint32 usedChunks = 0;
1742 for (uint32 i = 0; i < metaChunk->chunkCount; i++) {
1743 if (!_IsChunkFree(metaChunk, metaChunk->chunks + i))
1744 usedChunks++;
1745 }
1746
1747 if (usedChunks != metaChunk->usedChunkCount) {
1748 panic("meta chunk %p has used chunks that appear free: total: "
1749 "%u, used: %u, appearing used: %" B_PRIu32, metaChunk,
1750 metaChunk->chunkCount, metaChunk->usedChunkCount, usedChunks);
1751 return;
1752 }
1753
1754 // check free range
1755 for (uint32 i = metaChunk->firstFreeChunk; i < metaChunk->lastFreeChunk;
1756 i++) {
1757 if (!_IsChunkFree(metaChunk, metaChunk->chunks + i)) {
1758 panic("meta chunk %p has used chunk in free range, chunk: %p (%"
1759 B_PRIu32 ", free range: %u - %u)", metaChunk,
1760 metaChunk->chunks + i, i, metaChunk->firstFreeChunk,
1761 metaChunk->lastFreeChunk);
1762 return;
1763 }
1764 }
1765 }
1766
1767 #endif // DEBUG_SLAB_MEMORY_MANAGER_PARANOID_CHECKS
1768
1769
1770 /*static*/ int
_DumpRawAllocations(int argc,char ** argv)1771 MemoryManager::_DumpRawAllocations(int argc, char** argv)
1772 {
1773 kprintf("%-*s meta chunk chunk %-*s size (KB)\n",
1774 B_PRINTF_POINTER_WIDTH, "area", B_PRINTF_POINTER_WIDTH, "base");
1775
1776 size_t totalSize = 0;
1777
1778 for (AreaTable::Iterator it = sAreaTable.GetIterator();
1779 Area* area = it.Next();) {
1780 for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA; i++) {
1781 MetaChunk* metaChunk = area->metaChunks + i;
1782 if (metaChunk->chunkSize == 0)
1783 continue;
1784 for (uint32 k = 0; k < metaChunk->chunkCount; k++) {
1785 Chunk* chunk = metaChunk->chunks + k;
1786
1787 // skip free chunks
1788 if (_IsChunkFree(metaChunk, chunk))
1789 continue;
1790
1791 addr_t reference = chunk->reference;
1792 if ((reference & 1) == 0 || reference == 1)
1793 continue;
1794
1795 addr_t chunkAddress = _ChunkAddress(metaChunk, chunk);
1796 size_t size = reference - chunkAddress + 1;
1797 totalSize += size;
1798
1799 kprintf("%p %10" B_PRId32 " %5" B_PRIu32 " %p %9"
1800 B_PRIuSIZE "\n", area, i, k, (void*)chunkAddress,
1801 size / 1024);
1802 }
1803 }
1804 }
1805
1806 kprintf("total:%*s%9" B_PRIuSIZE "\n", (2 * B_PRINTF_POINTER_WIDTH) + 21,
1807 "", totalSize / 1024);
1808
1809 return 0;
1810 }
1811
1812
1813 /*static*/ void
_PrintMetaChunkTableHeader(bool printChunks)1814 MemoryManager::_PrintMetaChunkTableHeader(bool printChunks)
1815 {
1816 if (printChunks)
1817 kprintf("chunk base cache object size cache name\n");
1818 else
1819 kprintf("chunk base\n");
1820 }
1821
1822 /*static*/ void
_DumpMetaChunk(MetaChunk * metaChunk,bool printChunks,bool printHeader)1823 MemoryManager::_DumpMetaChunk(MetaChunk* metaChunk, bool printChunks,
1824 bool printHeader)
1825 {
1826 if (printHeader)
1827 _PrintMetaChunkTableHeader(printChunks);
1828
1829 const char* type = "empty";
1830 if (metaChunk->chunkSize != 0) {
1831 switch (metaChunk->chunkSize) {
1832 case SLAB_CHUNK_SIZE_SMALL:
1833 type = "small";
1834 break;
1835 case SLAB_CHUNK_SIZE_MEDIUM:
1836 type = "medium";
1837 break;
1838 case SLAB_CHUNK_SIZE_LARGE:
1839 type = "large";
1840 break;
1841 }
1842 }
1843
1844 int metaChunkIndex = metaChunk - metaChunk->GetArea()->metaChunks;
1845 kprintf("%5d %p --- %6s meta chunk", metaChunkIndex,
1846 (void*)metaChunk->chunkBase, type);
1847 if (metaChunk->chunkSize != 0) {
1848 kprintf(": %4u/%4u used, %-4u-%4u free ------------\n",
1849 metaChunk->usedChunkCount, metaChunk->chunkCount,
1850 metaChunk->firstFreeChunk, metaChunk->lastFreeChunk);
1851 } else
1852 kprintf(" --------------------------------------------\n");
1853
1854 if (metaChunk->chunkSize == 0 || !printChunks)
1855 return;
1856
1857 for (uint32 i = 0; i < metaChunk->chunkCount; i++) {
1858 Chunk* chunk = metaChunk->chunks + i;
1859
1860 // skip free chunks
1861 if (_IsChunkFree(metaChunk, chunk)) {
1862 if (!_IsChunkInFreeList(metaChunk, chunk)) {
1863 kprintf("%5" B_PRIu32 " %p appears free, but isn't in free "
1864 "list!\n", i, (void*)_ChunkAddress(metaChunk, chunk));
1865 }
1866
1867 continue;
1868 }
1869
1870 addr_t reference = chunk->reference;
1871 if ((reference & 1) == 0) {
1872 ObjectCache* cache = (ObjectCache*)reference;
1873 kprintf("%5" B_PRIu32 " %p %p %11" B_PRIuSIZE " %s\n", i,
1874 (void*)_ChunkAddress(metaChunk, chunk), cache,
1875 cache != NULL ? cache->object_size : 0,
1876 cache != NULL ? cache->name : "");
1877 } else if (reference != 1) {
1878 kprintf("%5" B_PRIu32 " %p raw allocation up to %p\n", i,
1879 (void*)_ChunkAddress(metaChunk, chunk), (void*)reference);
1880 }
1881 }
1882 }
1883
1884
1885 /*static*/ int
_DumpMetaChunk(int argc,char ** argv)1886 MemoryManager::_DumpMetaChunk(int argc, char** argv)
1887 {
1888 if (argc != 2) {
1889 print_debugger_command_usage(argv[0]);
1890 return 0;
1891 }
1892
1893 uint64 address;
1894 if (!evaluate_debug_expression(argv[1], &address, false))
1895 return 0;
1896
1897 Area* area = _AreaForAddress(address);
1898
1899 MetaChunk* metaChunk;
1900 if ((addr_t)address >= (addr_t)area->metaChunks
1901 && (addr_t)address
1902 < (addr_t)(area->metaChunks + SLAB_META_CHUNKS_PER_AREA)) {
1903 metaChunk = (MetaChunk*)(addr_t)address;
1904 } else {
1905 metaChunk = area->metaChunks
1906 + (address % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE;
1907 }
1908
1909 _DumpMetaChunk(metaChunk, true, true);
1910
1911 return 0;
1912 }
1913
1914
1915 /*static*/ void
_DumpMetaChunks(const char * name,MetaChunkList & metaChunkList,bool printChunks)1916 MemoryManager::_DumpMetaChunks(const char* name, MetaChunkList& metaChunkList,
1917 bool printChunks)
1918 {
1919 kprintf("%s:\n", name);
1920
1921 for (MetaChunkList::Iterator it = metaChunkList.GetIterator();
1922 MetaChunk* metaChunk = it.Next();) {
1923 _DumpMetaChunk(metaChunk, printChunks, false);
1924 }
1925 }
1926
1927
1928 /*static*/ int
_DumpMetaChunks(int argc,char ** argv)1929 MemoryManager::_DumpMetaChunks(int argc, char** argv)
1930 {
1931 bool printChunks = argc > 1 && strcmp(argv[1], "-c") == 0;
1932
1933 _PrintMetaChunkTableHeader(printChunks);
1934 _DumpMetaChunks("free complete", sFreeCompleteMetaChunks, printChunks);
1935 _DumpMetaChunks("free short", sFreeShortMetaChunks, printChunks);
1936 _DumpMetaChunks("partial small", sPartialMetaChunksSmall, printChunks);
1937 _DumpMetaChunks("partial medium", sPartialMetaChunksMedium, printChunks);
1938
1939 return 0;
1940 }
1941
1942
1943 /*static*/ int
_DumpArea(int argc,char ** argv)1944 MemoryManager::_DumpArea(int argc, char** argv)
1945 {
1946 bool printChunks = false;
1947
1948 int argi = 1;
1949 while (argi < argc) {
1950 if (argv[argi][0] != '-')
1951 break;
1952 const char* arg = argv[argi++];
1953 if (strcmp(arg, "-c") == 0) {
1954 printChunks = true;
1955 } else {
1956 print_debugger_command_usage(argv[0]);
1957 return 0;
1958 }
1959 }
1960
1961 if (argi + 1 != argc) {
1962 print_debugger_command_usage(argv[0]);
1963 return 0;
1964 }
1965
1966 uint64 address;
1967 if (!evaluate_debug_expression(argv[argi], &address, false))
1968 return 0;
1969
1970 Area* area = _AreaForAddress((addr_t)address);
1971
1972 for (uint32 k = 0; k < SLAB_META_CHUNKS_PER_AREA; k++) {
1973 MetaChunk* metaChunk = area->metaChunks + k;
1974 _DumpMetaChunk(metaChunk, printChunks, k == 0);
1975 }
1976
1977 return 0;
1978 }
1979
1980
1981 /*static*/ int
_DumpAreas(int argc,char ** argv)1982 MemoryManager::_DumpAreas(int argc, char** argv)
1983 {
1984 kprintf(" %*s %*s meta small medium large\n",
1985 B_PRINTF_POINTER_WIDTH, "base", B_PRINTF_POINTER_WIDTH, "area");
1986
1987 size_t totalTotalSmall = 0;
1988 size_t totalUsedSmall = 0;
1989 size_t totalTotalMedium = 0;
1990 size_t totalUsedMedium = 0;
1991 size_t totalUsedLarge = 0;
1992 uint32 areaCount = 0;
1993
1994 for (AreaTable::Iterator it = sAreaTable.GetIterator();
1995 Area* area = it.Next();) {
1996 areaCount++;
1997
1998 // sum up the free/used counts for the chunk sizes
1999 int totalSmall = 0;
2000 int usedSmall = 0;
2001 int totalMedium = 0;
2002 int usedMedium = 0;
2003 int usedLarge = 0;
2004
2005 for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA; i++) {
2006 MetaChunk* metaChunk = area->metaChunks + i;
2007 if (metaChunk->chunkSize == 0)
2008 continue;
2009
2010 switch (metaChunk->chunkSize) {
2011 case SLAB_CHUNK_SIZE_SMALL:
2012 totalSmall += metaChunk->chunkCount;
2013 usedSmall += metaChunk->usedChunkCount;
2014 break;
2015 case SLAB_CHUNK_SIZE_MEDIUM:
2016 totalMedium += metaChunk->chunkCount;
2017 usedMedium += metaChunk->usedChunkCount;
2018 break;
2019 case SLAB_CHUNK_SIZE_LARGE:
2020 usedLarge += metaChunk->usedChunkCount;
2021 break;
2022 }
2023 }
2024
2025 kprintf("%p %p %2u/%2u %4d/%4d %3d/%3d %5d\n",
2026 area, area->vmArea, area->usedMetaChunkCount,
2027 SLAB_META_CHUNKS_PER_AREA, usedSmall, totalSmall, usedMedium,
2028 totalMedium, usedLarge);
2029
2030 totalTotalSmall += totalSmall;
2031 totalUsedSmall += usedSmall;
2032 totalTotalMedium += totalMedium;
2033 totalUsedMedium += usedMedium;
2034 totalUsedLarge += usedLarge;
2035 }
2036
2037 kprintf("%d free area%s:\n", sFreeAreaCount,
2038 sFreeAreaCount == 1 ? "" : "s");
2039 for (Area* area = sFreeAreas; area != NULL; area = area->next) {
2040 areaCount++;
2041 kprintf("%p %p\n", area, area->vmArea);
2042 }
2043
2044 kprintf("total usage:\n");
2045 kprintf(" small: %" B_PRIuSIZE "/%" B_PRIuSIZE "\n", totalUsedSmall,
2046 totalTotalSmall);
2047 kprintf(" medium: %" B_PRIuSIZE "/%" B_PRIuSIZE "\n", totalUsedMedium,
2048 totalTotalMedium);
2049 kprintf(" large: %" B_PRIuSIZE "\n", totalUsedLarge);
2050 kprintf(" memory: %" B_PRIuSIZE "/%" B_PRIu32 " KB\n",
2051 (totalUsedSmall * SLAB_CHUNK_SIZE_SMALL
2052 + totalUsedMedium * SLAB_CHUNK_SIZE_MEDIUM
2053 + totalUsedLarge * SLAB_CHUNK_SIZE_LARGE) / 1024,
2054 areaCount * SLAB_AREA_SIZE / 1024);
2055 kprintf(" overhead: %" B_PRIuSIZE " KB\n",
2056 areaCount * kAreaAdminSize / 1024);
2057
2058 return 0;
2059 }
2060
2061
2062 #if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
2063
2064 void
_AddTrackingInfo(void * allocation,size_t size,AbstractTraceEntryWithStackTrace * traceEntry)2065 MemoryManager::_AddTrackingInfo(void* allocation, size_t size,
2066 AbstractTraceEntryWithStackTrace* traceEntry)
2067 {
2068 _TrackingInfoFor(allocation, size)->Init(traceEntry);
2069 }
2070
2071 #endif // SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
2072
2073
2074 RANGE_MARKER_FUNCTION_END(SlabMemoryManager)
2075