1 /* 2 * Copyright 2010, Ingo Weinhold <ingo_weinhold@gmx.de>. 3 * Distributed under the terms of the MIT License. 4 */ 5 6 7 #include "MemoryManager.h" 8 9 #include <algorithm> 10 11 #include <debug.h> 12 #include <tracing.h> 13 #include <util/AutoLock.h> 14 #include <vm/vm.h> 15 #include <vm/vm_page.h> 16 #include <vm/vm_priv.h> 17 #include <vm/VMAddressSpace.h> 18 #include <vm/VMArea.h> 19 #include <vm/VMCache.h> 20 #include <vm/VMTranslationMap.h> 21 22 #include "kernel_debug_config.h" 23 24 #include "ObjectCache.h" 25 26 27 //#define TRACE_MEMORY_MANAGER 28 #ifdef TRACE_MEMORY_MANAGER 29 # define TRACE(x...) dprintf(x) 30 #else 31 # define TRACE(x...) do {} while (false) 32 #endif 33 34 #if DEBUG_SLAB_MEMORY_MANAGER_PARANOID_CHECKS 35 # define PARANOID_CHECKS_ONLY(x) x 36 #else 37 # define PARANOID_CHECKS_ONLY(x) 38 #endif 39 40 41 static const char* const kSlabAreaName = "slab area"; 42 43 static void* sAreaTableBuffer[1024]; 44 45 mutex MemoryManager::sLock; 46 rw_lock MemoryManager::sAreaTableLock; 47 kernel_args* MemoryManager::sKernelArgs; 48 MemoryManager::AreaTable MemoryManager::sAreaTable; 49 MemoryManager::Area* MemoryManager::sFreeAreas; 50 int MemoryManager::sFreeAreaCount; 51 MemoryManager::MetaChunkList MemoryManager::sFreeCompleteMetaChunks; 52 MemoryManager::MetaChunkList MemoryManager::sFreeShortMetaChunks; 53 MemoryManager::MetaChunkList MemoryManager::sPartialMetaChunksSmall; 54 MemoryManager::MetaChunkList MemoryManager::sPartialMetaChunksMedium; 55 MemoryManager::AllocationEntry* MemoryManager::sAllocationEntryCanWait; 56 MemoryManager::AllocationEntry* MemoryManager::sAllocationEntryDontWait; 57 bool MemoryManager::sMaintenanceNeeded; 58 59 60 RANGE_MARKER_FUNCTION_BEGIN(SlabMemoryManager) 61 62 63 // #pragma mark - kernel tracing 64 65 66 #if SLAB_MEMORY_MANAGER_TRACING 67 68 69 //namespace SlabMemoryManagerCacheTracing { 70 struct MemoryManager::Tracing { 71 72 class MemoryManagerTraceEntry 73 : public TRACE_ENTRY_SELECTOR(SLAB_MEMORY_MANAGER_TRACING_STACK_TRACE) { 74 public: 75 MemoryManagerTraceEntry() 76 : 77 TraceEntryBase(SLAB_MEMORY_MANAGER_TRACING_STACK_TRACE, 0, true) 78 { 79 } 80 }; 81 82 83 class Allocate : public MemoryManagerTraceEntry { 84 public: 85 Allocate(ObjectCache* cache, uint32 flags) 86 : 87 MemoryManagerTraceEntry(), 88 fCache(cache), 89 fFlags(flags) 90 { 91 Initialized(); 92 } 93 94 virtual void AddDump(TraceOutput& out) 95 { 96 out.Print("slab memory manager alloc: cache: %p, flags: %#" B_PRIx32, 97 fCache, fFlags); 98 } 99 100 private: 101 ObjectCache* fCache; 102 uint32 fFlags; 103 }; 104 105 106 class Free : public MemoryManagerTraceEntry { 107 public: 108 Free(void* address, uint32 flags) 109 : 110 MemoryManagerTraceEntry(), 111 fAddress(address), 112 fFlags(flags) 113 { 114 Initialized(); 115 } 116 117 virtual void AddDump(TraceOutput& out) 118 { 119 out.Print("slab memory manager free: address: %p, flags: %#" B_PRIx32, 120 fAddress, fFlags); 121 } 122 123 private: 124 void* fAddress; 125 uint32 fFlags; 126 }; 127 128 129 class AllocateRaw : public MemoryManagerTraceEntry { 130 public: 131 AllocateRaw(size_t size, uint32 flags) 132 : 133 MemoryManagerTraceEntry(), 134 fSize(size), 135 fFlags(flags) 136 { 137 Initialized(); 138 } 139 140 virtual void AddDump(TraceOutput& out) 141 { 142 out.Print("slab memory manager alloc raw: size: %" B_PRIuSIZE 143 ", flags: %#" B_PRIx32, fSize, fFlags); 144 } 145 146 private: 147 size_t fSize; 148 uint32 fFlags; 149 }; 150 151 152 class FreeRawOrReturnCache : public MemoryManagerTraceEntry { 153 public: 154 FreeRawOrReturnCache(void* address, uint32 flags) 155 : 156 MemoryManagerTraceEntry(), 157 fAddress(address), 158 fFlags(flags) 159 { 160 Initialized(); 161 } 162 163 virtual void AddDump(TraceOutput& out) 164 { 165 out.Print("slab memory manager free raw/return: address: %p, flags: %#" 166 B_PRIx32, fAddress, fFlags); 167 } 168 169 private: 170 void* fAddress; 171 uint32 fFlags; 172 }; 173 174 175 class AllocateArea : public MemoryManagerTraceEntry { 176 public: 177 AllocateArea(Area* area, uint32 flags) 178 : 179 MemoryManagerTraceEntry(), 180 fArea(area), 181 fFlags(flags) 182 { 183 Initialized(); 184 } 185 186 virtual void AddDump(TraceOutput& out) 187 { 188 out.Print("slab memory manager alloc area: flags: %#" B_PRIx32 189 " -> %p", fFlags, fArea); 190 } 191 192 private: 193 Area* fArea; 194 uint32 fFlags; 195 }; 196 197 198 class AddArea : public MemoryManagerTraceEntry { 199 public: 200 AddArea(Area* area) 201 : 202 MemoryManagerTraceEntry(), 203 fArea(area) 204 { 205 Initialized(); 206 } 207 208 virtual void AddDump(TraceOutput& out) 209 { 210 out.Print("slab memory manager add area: %p", fArea); 211 } 212 213 private: 214 Area* fArea; 215 }; 216 217 218 class FreeArea : public MemoryManagerTraceEntry { 219 public: 220 FreeArea(Area* area, bool areaRemoved, uint32 flags) 221 : 222 MemoryManagerTraceEntry(), 223 fArea(area), 224 fFlags(flags), 225 fRemoved(areaRemoved) 226 { 227 Initialized(); 228 } 229 230 virtual void AddDump(TraceOutput& out) 231 { 232 out.Print("slab memory manager free area: %p%s, flags: %#" B_PRIx32, 233 fArea, fRemoved ? " (removed)" : "", fFlags); 234 } 235 236 private: 237 Area* fArea; 238 uint32 fFlags; 239 bool fRemoved; 240 }; 241 242 243 class AllocateMetaChunk : public MemoryManagerTraceEntry { 244 public: 245 AllocateMetaChunk(MetaChunk* metaChunk) 246 : 247 MemoryManagerTraceEntry(), 248 fMetaChunk(metaChunk->chunkBase) 249 { 250 Initialized(); 251 } 252 253 virtual void AddDump(TraceOutput& out) 254 { 255 out.Print("slab memory manager alloc meta chunk: %#" B_PRIxADDR, 256 fMetaChunk); 257 } 258 259 private: 260 addr_t fMetaChunk; 261 }; 262 263 264 class FreeMetaChunk : public MemoryManagerTraceEntry { 265 public: 266 FreeMetaChunk(MetaChunk* metaChunk) 267 : 268 MemoryManagerTraceEntry(), 269 fMetaChunk(metaChunk->chunkBase) 270 { 271 Initialized(); 272 } 273 274 virtual void AddDump(TraceOutput& out) 275 { 276 out.Print("slab memory manager free meta chunk: %#" B_PRIxADDR, 277 fMetaChunk); 278 } 279 280 private: 281 addr_t fMetaChunk; 282 }; 283 284 285 class AllocateChunk : public MemoryManagerTraceEntry { 286 public: 287 AllocateChunk(size_t chunkSize, MetaChunk* metaChunk, Chunk* chunk) 288 : 289 MemoryManagerTraceEntry(), 290 fChunkSize(chunkSize), 291 fMetaChunk(metaChunk->chunkBase), 292 fChunk(chunk - metaChunk->chunks) 293 { 294 Initialized(); 295 } 296 297 virtual void AddDump(TraceOutput& out) 298 { 299 out.Print("slab memory manager alloc chunk: size: %" B_PRIuSIZE 300 " -> meta chunk: %#" B_PRIxADDR ", chunk: %" B_PRIu32, fChunkSize, 301 fMetaChunk, fChunk); 302 } 303 304 private: 305 size_t fChunkSize; 306 addr_t fMetaChunk; 307 uint32 fChunk; 308 }; 309 310 311 class AllocateChunks : public MemoryManagerTraceEntry { 312 public: 313 AllocateChunks(size_t chunkSize, uint32 chunkCount, MetaChunk* metaChunk, 314 Chunk* chunk) 315 : 316 MemoryManagerTraceEntry(), 317 fMetaChunk(metaChunk->chunkBase), 318 fChunkSize(chunkSize), 319 fChunkCount(chunkCount), 320 fChunk(chunk - metaChunk->chunks) 321 { 322 Initialized(); 323 } 324 325 virtual void AddDump(TraceOutput& out) 326 { 327 out.Print("slab memory manager alloc chunks: size: %" B_PRIuSIZE 328 ", count %" B_PRIu32 " -> meta chunk: %#" B_PRIxADDR ", chunk: %" 329 B_PRIu32, fChunkSize, fChunkCount, fMetaChunk, fChunk); 330 } 331 332 private: 333 addr_t fMetaChunk; 334 size_t fChunkSize; 335 uint32 fChunkCount; 336 uint32 fChunk; 337 }; 338 339 340 class FreeChunk : public MemoryManagerTraceEntry { 341 public: 342 FreeChunk(MetaChunk* metaChunk, Chunk* chunk) 343 : 344 MemoryManagerTraceEntry(), 345 fMetaChunk(metaChunk->chunkBase), 346 fChunk(chunk - metaChunk->chunks) 347 { 348 Initialized(); 349 } 350 351 virtual void AddDump(TraceOutput& out) 352 { 353 out.Print("slab memory manager free chunk: meta chunk: %#" B_PRIxADDR 354 ", chunk: %" B_PRIu32, fMetaChunk, fChunk); 355 } 356 357 private: 358 addr_t fMetaChunk; 359 uint32 fChunk; 360 }; 361 362 363 class Map : public MemoryManagerTraceEntry { 364 public: 365 Map(addr_t address, size_t size, uint32 flags) 366 : 367 MemoryManagerTraceEntry(), 368 fAddress(address), 369 fSize(size), 370 fFlags(flags) 371 { 372 Initialized(); 373 } 374 375 virtual void AddDump(TraceOutput& out) 376 { 377 out.Print("slab memory manager map: %#" B_PRIxADDR ", size: %" 378 B_PRIuSIZE ", flags: %#" B_PRIx32, fAddress, fSize, fFlags); 379 } 380 381 private: 382 addr_t fAddress; 383 size_t fSize; 384 uint32 fFlags; 385 }; 386 387 388 class Unmap : public MemoryManagerTraceEntry { 389 public: 390 Unmap(addr_t address, size_t size, uint32 flags) 391 : 392 MemoryManagerTraceEntry(), 393 fAddress(address), 394 fSize(size), 395 fFlags(flags) 396 { 397 Initialized(); 398 } 399 400 virtual void AddDump(TraceOutput& out) 401 { 402 out.Print("slab memory manager unmap: %#" B_PRIxADDR ", size: %" 403 B_PRIuSIZE ", flags: %#" B_PRIx32, fAddress, fSize, fFlags); 404 } 405 406 private: 407 addr_t fAddress; 408 size_t fSize; 409 uint32 fFlags; 410 }; 411 412 413 //} // namespace SlabMemoryManagerCacheTracing 414 }; // struct MemoryManager::Tracing 415 416 417 //# define T(x) new(std::nothrow) SlabMemoryManagerCacheTracing::x 418 # define T(x) new(std::nothrow) MemoryManager::Tracing::x 419 420 #else 421 # define T(x) 422 #endif // SLAB_MEMORY_MANAGER_TRACING 423 424 425 // #pragma mark - MemoryManager 426 427 428 /*static*/ void 429 MemoryManager::Init(kernel_args* args) 430 { 431 mutex_init(&sLock, "slab memory manager"); 432 rw_lock_init(&sAreaTableLock, "slab memory manager area table"); 433 sKernelArgs = args; 434 435 new(&sFreeCompleteMetaChunks) MetaChunkList; 436 new(&sFreeShortMetaChunks) MetaChunkList; 437 new(&sPartialMetaChunksSmall) MetaChunkList; 438 new(&sPartialMetaChunksMedium) MetaChunkList; 439 440 new(&sAreaTable) AreaTable; 441 sAreaTable.Resize(sAreaTableBuffer, sizeof(sAreaTableBuffer), true); 442 // A bit hacky: The table now owns the memory. Since we never resize or 443 // free it, that's not a problem, though. 444 445 sFreeAreas = NULL; 446 sFreeAreaCount = 0; 447 sMaintenanceNeeded = false; 448 } 449 450 451 /*static*/ void 452 MemoryManager::InitPostArea() 453 { 454 sKernelArgs = NULL; 455 456 // Convert all areas to actual areas. This loop might look a bit weird, but 457 // is necessary since creating the actual area involves memory allocations, 458 // which in turn can change the situation. 459 bool done; 460 do { 461 done = true; 462 463 for (AreaTable::Iterator it = sAreaTable.GetIterator(); 464 Area* area = it.Next();) { 465 if (area->vmArea == NULL) { 466 _ConvertEarlyArea(area); 467 done = false; 468 break; 469 } 470 } 471 } while (!done); 472 473 // unmap and free unused pages 474 if (sFreeAreas != NULL) { 475 // Just "leak" all but the first of the free areas -- the VM will 476 // automatically free all unclaimed memory. 477 sFreeAreas->next = NULL; 478 sFreeAreaCount = 1; 479 480 Area* area = sFreeAreas; 481 _ConvertEarlyArea(area); 482 _UnmapFreeChunksEarly(area); 483 } 484 485 for (AreaTable::Iterator it = sAreaTable.GetIterator(); 486 Area* area = it.Next();) { 487 _UnmapFreeChunksEarly(area); 488 } 489 490 sMaintenanceNeeded = true; 491 // might not be necessary, but doesn't harm 492 493 add_debugger_command_etc("slab_area", &_DumpArea, 494 "Dump information on a given slab area", 495 "[ -c ] <area>\n" 496 "Dump information on a given slab area specified by its base " 497 "address.\n" 498 "If \"-c\" is given, the chunks of all meta chunks area printed as " 499 "well.\n", 0); 500 add_debugger_command_etc("slab_areas", &_DumpAreas, 501 "List all slab areas", 502 "\n" 503 "Lists all slab areas.\n", 0); 504 add_debugger_command_etc("slab_meta_chunk", &_DumpMetaChunk, 505 "Dump information on a given slab meta chunk", 506 "<meta chunk>\n" 507 "Dump information on a given slab meta chunk specified by its base " 508 "or object address.\n", 0); 509 add_debugger_command_etc("slab_meta_chunks", &_DumpMetaChunks, 510 "List all non-full slab meta chunks", 511 "[ -c ]\n" 512 "Lists all non-full slab meta chunks.\n" 513 "If \"-c\" is given, the chunks of all meta chunks area printed as " 514 "well.\n", 0); 515 add_debugger_command_etc("slab_raw_allocations", &_DumpRawAllocations, 516 "List all raw allocations in slab areas", 517 "\n" 518 "Lists all raw allocations in slab areas.\n", 0); 519 } 520 521 522 /*static*/ status_t 523 MemoryManager::Allocate(ObjectCache* cache, uint32 flags, void*& _pages) 524 { 525 // TODO: Support CACHE_UNLOCKED_PAGES! 526 527 T(Allocate(cache, flags)); 528 529 size_t chunkSize = cache->slab_size; 530 531 TRACE("MemoryManager::Allocate(%p, %#" B_PRIx32 "): chunkSize: %" 532 B_PRIuSIZE "\n", cache, flags, chunkSize); 533 534 MutexLocker locker(sLock); 535 536 // allocate a chunk 537 MetaChunk* metaChunk; 538 Chunk* chunk; 539 status_t error = _AllocateChunks(chunkSize, 1, flags, metaChunk, chunk); 540 if (error != B_OK) 541 return error; 542 543 // map the chunk 544 Area* area = metaChunk->GetArea(); 545 addr_t chunkAddress = _ChunkAddress(metaChunk, chunk); 546 547 locker.Unlock(); 548 error = _MapChunk(area->vmArea, chunkAddress, chunkSize, 0, flags); 549 locker.Lock(); 550 if (error != B_OK) { 551 // something failed -- free the chunk 552 _FreeChunk(area, metaChunk, chunk, chunkAddress, true, flags); 553 return error; 554 } 555 556 chunk->reference = (addr_t)cache; 557 _pages = (void*)chunkAddress; 558 559 TRACE("MemoryManager::Allocate() done: %p (meta chunk: %d, chunk %d)\n", 560 _pages, int(metaChunk - area->metaChunks), 561 int(chunk - metaChunk->chunks)); 562 return B_OK; 563 } 564 565 566 /*static*/ void 567 MemoryManager::Free(void* pages, uint32 flags) 568 { 569 TRACE("MemoryManager::Free(%p, %#" B_PRIx32 ")\n", pages, flags); 570 571 T(Free(pages, flags)); 572 573 // get the area and the meta chunk 574 Area* area = _AreaForAddress((addr_t)pages); 575 MetaChunk* metaChunk = &area->metaChunks[ 576 ((addr_t)pages % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE]; 577 578 ASSERT(metaChunk->chunkSize > 0); 579 ASSERT((addr_t)pages >= metaChunk->chunkBase); 580 ASSERT(((addr_t)pages % metaChunk->chunkSize) == 0); 581 582 // get the chunk 583 uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)pages); 584 Chunk* chunk = &metaChunk->chunks[chunkIndex]; 585 586 ASSERT(chunk->next != NULL); 587 ASSERT(chunk->next < metaChunk->chunks 588 || chunk->next 589 >= metaChunk->chunks + SLAB_SMALL_CHUNKS_PER_META_CHUNK); 590 591 // and free it 592 MutexLocker locker(sLock); 593 _FreeChunk(area, metaChunk, chunk, (addr_t)pages, false, flags); 594 } 595 596 597 /*static*/ status_t 598 MemoryManager::AllocateRaw(size_t size, uint32 flags, void*& _pages) 599 { 600 #if SLAB_MEMORY_MANAGER_TRACING 601 #if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING 602 AbstractTraceEntryWithStackTrace* traceEntry = T(AllocateRaw(size, flags)); 603 size += sizeof(AllocationTrackingInfo); 604 #else 605 T(AllocateRaw(size, flags)); 606 #endif 607 #endif 608 609 size = ROUNDUP(size, SLAB_CHUNK_SIZE_SMALL); 610 611 TRACE("MemoryManager::AllocateRaw(%" B_PRIuSIZE ", %#" B_PRIx32 ")\n", size, 612 flags); 613 614 if (size > SLAB_CHUNK_SIZE_LARGE || (flags & CACHE_ALIGN_ON_SIZE) != 0) { 615 // Requested size greater than a large chunk or an aligned allocation. 616 // Allocate as an area. 617 if ((flags & CACHE_DONT_LOCK_KERNEL_SPACE) != 0) 618 return B_WOULD_BLOCK; 619 620 virtual_address_restrictions virtualRestrictions = {}; 621 virtualRestrictions.address_specification 622 = (flags & CACHE_ALIGN_ON_SIZE) != 0 623 ? B_ANY_KERNEL_BLOCK_ADDRESS : B_ANY_KERNEL_ADDRESS; 624 physical_address_restrictions physicalRestrictions = {}; 625 area_id area = create_area_etc(VMAddressSpace::KernelID(), 626 "slab large raw allocation", size, B_FULL_LOCK, 627 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 628 ((flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0 629 ? CREATE_AREA_DONT_WAIT : 0) 630 | CREATE_AREA_DONT_CLEAR, 0, 631 &virtualRestrictions, &physicalRestrictions, &_pages); 632 633 status_t result = area >= 0 ? B_OK : area; 634 if (result == B_OK) { 635 fill_allocated_block(_pages, size); 636 #if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING 637 _AddTrackingInfo(_pages, size, traceEntry); 638 #endif 639 } 640 641 return result; 642 } 643 644 // determine chunk size (small or medium) 645 size_t chunkSize = SLAB_CHUNK_SIZE_SMALL; 646 uint32 chunkCount = size / SLAB_CHUNK_SIZE_SMALL; 647 648 if (size % SLAB_CHUNK_SIZE_MEDIUM == 0) { 649 chunkSize = SLAB_CHUNK_SIZE_MEDIUM; 650 chunkCount = size / SLAB_CHUNK_SIZE_MEDIUM; 651 } 652 653 MutexLocker locker(sLock); 654 655 // allocate the chunks 656 MetaChunk* metaChunk; 657 Chunk* chunk; 658 status_t error = _AllocateChunks(chunkSize, chunkCount, flags, metaChunk, 659 chunk); 660 if (error != B_OK) 661 return error; 662 663 // map the chunks 664 Area* area = metaChunk->GetArea(); 665 addr_t chunkAddress = _ChunkAddress(metaChunk, chunk); 666 667 locker.Unlock(); 668 error = _MapChunk(area->vmArea, chunkAddress, size, 0, flags); 669 locker.Lock(); 670 if (error != B_OK) { 671 // something failed -- free the chunks 672 for (uint32 i = 0; i < chunkCount; i++) 673 _FreeChunk(area, metaChunk, chunk + i, chunkAddress, true, flags); 674 return error; 675 } 676 677 chunk->reference = (addr_t)chunkAddress + size - 1; 678 _pages = (void*)chunkAddress; 679 680 fill_allocated_block(_pages, size); 681 #if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING 682 _AddTrackingInfo(_pages, size, traceEntry); 683 #endif 684 685 TRACE("MemoryManager::AllocateRaw() done: %p (meta chunk: %d, chunk %d)\n", 686 _pages, int(metaChunk - area->metaChunks), 687 int(chunk - metaChunk->chunks)); 688 return B_OK; 689 } 690 691 692 /*static*/ ObjectCache* 693 MemoryManager::FreeRawOrReturnCache(void* pages, uint32 flags) 694 { 695 TRACE("MemoryManager::FreeRawOrReturnCache(%p, %#" B_PRIx32 ")\n", pages, 696 flags); 697 698 T(FreeRawOrReturnCache(pages, flags)); 699 700 if ((flags & CACHE_DONT_LOCK_KERNEL_SPACE) != 0) { 701 panic("cannot proceed without locking kernel space!"); 702 return NULL; 703 } 704 705 // get the area 706 addr_t areaBase = _AreaBaseAddressForAddress((addr_t)pages); 707 708 ReadLocker readLocker(sAreaTableLock); 709 Area* area = sAreaTable.Lookup(areaBase); 710 readLocker.Unlock(); 711 712 if (area == NULL) { 713 // Probably a large allocation. Look up the VM area. 714 VMAddressSpace* addressSpace = VMAddressSpace::Kernel(); 715 addressSpace->ReadLock(); 716 VMArea* area = addressSpace->LookupArea((addr_t)pages); 717 addressSpace->ReadUnlock(); 718 719 if (area != NULL && (addr_t)pages == area->Base()) 720 delete_area(area->id); 721 else 722 panic("freeing unknown block %p from area %p", pages, area); 723 724 return NULL; 725 } 726 727 MetaChunk* metaChunk = &area->metaChunks[ 728 ((addr_t)pages % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE]; 729 730 // get the chunk 731 ASSERT(metaChunk->chunkSize > 0); 732 ASSERT((addr_t)pages >= metaChunk->chunkBase); 733 uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)pages); 734 Chunk* chunk = &metaChunk->chunks[chunkIndex]; 735 736 addr_t reference = chunk->reference; 737 if ((reference & 1) == 0) 738 return (ObjectCache*)reference; 739 740 // Seems we have a raw chunk allocation. 741 ASSERT((addr_t)pages == _ChunkAddress(metaChunk, chunk)); 742 ASSERT(reference > (addr_t)pages); 743 ASSERT(reference <= areaBase + SLAB_AREA_SIZE - 1); 744 size_t size = reference - (addr_t)pages + 1; 745 ASSERT((size % SLAB_CHUNK_SIZE_SMALL) == 0); 746 747 // unmap the chunks 748 _UnmapChunk(area->vmArea, (addr_t)pages, size, flags); 749 750 // and free them 751 MutexLocker locker(sLock); 752 uint32 chunkCount = size / metaChunk->chunkSize; 753 for (uint32 i = 0; i < chunkCount; i++) 754 _FreeChunk(area, metaChunk, chunk + i, (addr_t)pages, true, flags); 755 756 return NULL; 757 } 758 759 760 /*static*/ size_t 761 MemoryManager::AcceptableChunkSize(size_t size) 762 { 763 if (size <= SLAB_CHUNK_SIZE_SMALL) 764 return SLAB_CHUNK_SIZE_SMALL; 765 if (size <= SLAB_CHUNK_SIZE_MEDIUM) 766 return SLAB_CHUNK_SIZE_MEDIUM; 767 return SLAB_CHUNK_SIZE_LARGE; 768 } 769 770 771 /*static*/ ObjectCache* 772 MemoryManager::GetAllocationInfo(void* address, size_t& _size) 773 { 774 // get the area 775 ReadLocker readLocker(sAreaTableLock); 776 Area* area = sAreaTable.Lookup(_AreaBaseAddressForAddress((addr_t)address)); 777 readLocker.Unlock(); 778 779 if (area == NULL) { 780 VMAddressSpace* addressSpace = VMAddressSpace::Kernel(); 781 addressSpace->ReadLock(); 782 VMArea* area = addressSpace->LookupArea((addr_t)address); 783 if (area != NULL && (addr_t)address == area->Base()) 784 _size = area->Size(); 785 else 786 _size = 0; 787 addressSpace->ReadUnlock(); 788 789 return NULL; 790 } 791 792 MetaChunk* metaChunk = &area->metaChunks[ 793 ((addr_t)address % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE]; 794 795 // get the chunk 796 ASSERT(metaChunk->chunkSize > 0); 797 ASSERT((addr_t)address >= metaChunk->chunkBase); 798 uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)address); 799 800 addr_t reference = metaChunk->chunks[chunkIndex].reference; 801 if ((reference & 1) == 0) { 802 ObjectCache* cache = (ObjectCache*)reference; 803 _size = cache->object_size; 804 return cache; 805 } 806 807 _size = reference - (addr_t)address + 1; 808 return NULL; 809 } 810 811 812 /*static*/ ObjectCache* 813 MemoryManager::CacheForAddress(void* address) 814 { 815 // get the area 816 ReadLocker readLocker(sAreaTableLock); 817 Area* area = sAreaTable.Lookup(_AreaBaseAddressForAddress((addr_t)address)); 818 readLocker.Unlock(); 819 820 if (area == NULL) 821 return NULL; 822 823 MetaChunk* metaChunk = &area->metaChunks[ 824 ((addr_t)address % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE]; 825 826 // get the chunk 827 ASSERT(metaChunk->chunkSize > 0); 828 ASSERT((addr_t)address >= metaChunk->chunkBase); 829 uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)address); 830 831 addr_t reference = metaChunk->chunks[chunkIndex].reference; 832 return (reference & 1) == 0 ? (ObjectCache*)reference : NULL; 833 } 834 835 836 /*static*/ void 837 MemoryManager::PerformMaintenance() 838 { 839 MutexLocker locker(sLock); 840 841 while (sMaintenanceNeeded) { 842 sMaintenanceNeeded = false; 843 844 // We want to keep one or two areas as a reserve. This way we have at 845 // least one area to use in situations when we aren't allowed to 846 // allocate one and also avoid ping-pong effects. 847 if (sFreeAreaCount > 0 && sFreeAreaCount <= 2) 848 return; 849 850 if (sFreeAreaCount == 0) { 851 // try to allocate one 852 Area* area; 853 if (_AllocateArea(0, area) != B_OK) 854 return; 855 856 _PushFreeArea(area); 857 if (sFreeAreaCount > 2) 858 sMaintenanceNeeded = true; 859 } else { 860 // free until we only have two free ones 861 while (sFreeAreaCount > 2) 862 _FreeArea(_PopFreeArea(), true, 0); 863 864 if (sFreeAreaCount == 0) 865 sMaintenanceNeeded = true; 866 } 867 } 868 } 869 870 871 #if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING 872 873 /*static*/ bool 874 MemoryManager::AnalyzeAllocationCallers(AllocationTrackingCallback& callback) 875 { 876 for (AreaTable::Iterator it = sAreaTable.GetIterator(); 877 Area* area = it.Next();) { 878 for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA; i++) { 879 MetaChunk* metaChunk = area->metaChunks + i; 880 if (metaChunk->chunkSize == 0) 881 continue; 882 883 for (uint32 k = 0; k < metaChunk->chunkCount; k++) { 884 Chunk* chunk = metaChunk->chunks + k; 885 886 // skip free chunks 887 if (_IsChunkFree(metaChunk, chunk)) 888 continue; 889 890 addr_t reference = chunk->reference; 891 if ((reference & 1) == 0 || reference == 1) 892 continue; 893 894 addr_t chunkAddress = _ChunkAddress(metaChunk, chunk); 895 size_t size = reference - chunkAddress + 1; 896 897 if (!callback.ProcessTrackingInfo( 898 _TrackingInfoFor((void*)chunkAddress, size), 899 (void*)chunkAddress, size)) { 900 return false; 901 } 902 } 903 } 904 } 905 906 return true; 907 } 908 909 #endif // SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING 910 911 912 /*static*/ ObjectCache* 913 MemoryManager::DebugObjectCacheForAddress(void* address) 914 { 915 // get the area 916 addr_t areaBase = _AreaBaseAddressForAddress((addr_t)address); 917 Area* area = sAreaTable.Lookup(areaBase); 918 919 if (area == NULL) 920 return NULL; 921 922 MetaChunk* metaChunk = &area->metaChunks[ 923 ((addr_t)address % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE]; 924 925 // get the chunk 926 if (metaChunk->chunkSize == 0) 927 return NULL; 928 if ((addr_t)address < metaChunk->chunkBase) 929 return NULL; 930 931 uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)address); 932 Chunk* chunk = &metaChunk->chunks[chunkIndex]; 933 934 addr_t reference = chunk->reference; 935 if ((reference & 1) == 0) 936 return (ObjectCache*)reference; 937 938 return NULL; 939 } 940 941 942 /*static*/ status_t 943 MemoryManager::_AllocateChunks(size_t chunkSize, uint32 chunkCount, 944 uint32 flags, MetaChunk*& _metaChunk, Chunk*& _chunk) 945 { 946 MetaChunkList* metaChunkList = NULL; 947 if (chunkSize == SLAB_CHUNK_SIZE_SMALL) { 948 metaChunkList = &sPartialMetaChunksSmall; 949 } else if (chunkSize == SLAB_CHUNK_SIZE_MEDIUM) { 950 metaChunkList = &sPartialMetaChunksMedium; 951 } else if (chunkSize != SLAB_CHUNK_SIZE_LARGE) { 952 panic("MemoryManager::_AllocateChunks(): Unsupported chunk size: %" 953 B_PRIuSIZE, chunkSize); 954 return B_BAD_VALUE; 955 } 956 957 if (_GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk, _chunk)) 958 return B_OK; 959 960 if (sFreeAreas != NULL) { 961 _AddArea(_PopFreeArea()); 962 _RequestMaintenance(); 963 964 return _GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk, 965 _chunk) ? B_OK : B_NO_MEMORY; 966 } 967 968 if ((flags & CACHE_DONT_LOCK_KERNEL_SPACE) != 0) { 969 // We can't create an area with this limitation and we must not wait for 970 // someone else doing that. 971 return B_WOULD_BLOCK; 972 } 973 974 // We need to allocate a new area. Wait, if someone else is trying to do 975 // the same. 976 while (true) { 977 AllocationEntry* allocationEntry = NULL; 978 if (sAllocationEntryDontWait != NULL) { 979 allocationEntry = sAllocationEntryDontWait; 980 } else if (sAllocationEntryCanWait != NULL 981 && (flags & CACHE_DONT_WAIT_FOR_MEMORY) == 0) { 982 allocationEntry = sAllocationEntryCanWait; 983 } else 984 break; 985 986 ConditionVariableEntry entry; 987 allocationEntry->condition.Add(&entry); 988 989 mutex_unlock(&sLock); 990 entry.Wait(); 991 mutex_lock(&sLock); 992 993 if (_GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk, 994 _chunk)) { 995 return B_OK; 996 } 997 } 998 999 // prepare the allocation entry others can wait on 1000 AllocationEntry*& allocationEntry 1001 = (flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0 1002 ? sAllocationEntryDontWait : sAllocationEntryCanWait; 1003 1004 AllocationEntry myResizeEntry; 1005 allocationEntry = &myResizeEntry; 1006 allocationEntry->condition.Init(metaChunkList, "wait for slab area"); 1007 allocationEntry->thread = find_thread(NULL); 1008 1009 Area* area; 1010 status_t error = _AllocateArea(flags, area); 1011 1012 allocationEntry->condition.NotifyAll(); 1013 allocationEntry = NULL; 1014 1015 if (error != B_OK) 1016 return error; 1017 1018 // Try again to get a meta chunk. Something might have been freed in the 1019 // meantime. We can free the area in this case. 1020 if (_GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk, _chunk)) { 1021 _FreeArea(area, true, flags); 1022 return B_OK; 1023 } 1024 1025 _AddArea(area); 1026 return _GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk, 1027 _chunk) ? B_OK : B_NO_MEMORY; 1028 } 1029 1030 1031 /*static*/ bool 1032 MemoryManager::_GetChunks(MetaChunkList* metaChunkList, size_t chunkSize, 1033 uint32 chunkCount, MetaChunk*& _metaChunk, Chunk*& _chunk) 1034 { 1035 // the common and less complicated special case 1036 if (chunkCount == 1) 1037 return _GetChunk(metaChunkList, chunkSize, _metaChunk, _chunk); 1038 1039 ASSERT(metaChunkList != NULL); 1040 1041 // Iterate through the partial meta chunk list and try to find a free 1042 // range that is large enough. 1043 MetaChunk* metaChunk = NULL; 1044 for (MetaChunkList::Iterator it = metaChunkList->GetIterator(); 1045 (metaChunk = it.Next()) != NULL;) { 1046 if (metaChunk->firstFreeChunk + chunkCount - 1 1047 <= metaChunk->lastFreeChunk) { 1048 break; 1049 } 1050 } 1051 1052 if (metaChunk == NULL) { 1053 // try to get a free meta chunk 1054 if ((SLAB_CHUNK_SIZE_LARGE - SLAB_AREA_STRUCT_OFFSET - kAreaAdminSize) 1055 / chunkSize >= chunkCount) { 1056 metaChunk = sFreeShortMetaChunks.RemoveHead(); 1057 } 1058 if (metaChunk == NULL) 1059 metaChunk = sFreeCompleteMetaChunks.RemoveHead(); 1060 1061 if (metaChunk == NULL) 1062 return false; 1063 1064 metaChunkList->Add(metaChunk); 1065 metaChunk->GetArea()->usedMetaChunkCount++; 1066 _PrepareMetaChunk(metaChunk, chunkSize); 1067 1068 T(AllocateMetaChunk(metaChunk)); 1069 } 1070 1071 // pull the chunks out of the free list 1072 Chunk* firstChunk = metaChunk->chunks + metaChunk->firstFreeChunk; 1073 Chunk* lastChunk = firstChunk + (chunkCount - 1); 1074 Chunk** chunkPointer = &metaChunk->freeChunks; 1075 uint32 remainingChunks = chunkCount; 1076 while (remainingChunks > 0) { 1077 ASSERT_PRINT(chunkPointer, "remaining: %" B_PRIu32 "/%" B_PRIu32 1078 ", area: %p, meta chunk: %" B_PRIdSSIZE "\n", remainingChunks, 1079 chunkCount, metaChunk->GetArea(), 1080 metaChunk - metaChunk->GetArea()->metaChunks); 1081 Chunk* chunk = *chunkPointer; 1082 if (chunk >= firstChunk && chunk <= lastChunk) { 1083 *chunkPointer = chunk->next; 1084 chunk->reference = 1; 1085 remainingChunks--; 1086 } else 1087 chunkPointer = &chunk->next; 1088 } 1089 1090 // allocate the chunks 1091 metaChunk->usedChunkCount += chunkCount; 1092 if (metaChunk->usedChunkCount == metaChunk->chunkCount) { 1093 // meta chunk is full now -- remove it from its list 1094 if (metaChunkList != NULL) 1095 metaChunkList->Remove(metaChunk); 1096 } 1097 1098 // update the free range 1099 metaChunk->firstFreeChunk += chunkCount; 1100 1101 PARANOID_CHECKS_ONLY(_CheckMetaChunk(metaChunk)); 1102 1103 _chunk = firstChunk; 1104 _metaChunk = metaChunk; 1105 1106 T(AllocateChunks(chunkSize, chunkCount, metaChunk, firstChunk)); 1107 1108 return true; 1109 } 1110 1111 1112 /*static*/ bool 1113 MemoryManager::_GetChunk(MetaChunkList* metaChunkList, size_t chunkSize, 1114 MetaChunk*& _metaChunk, Chunk*& _chunk) 1115 { 1116 MetaChunk* metaChunk = metaChunkList != NULL 1117 ? metaChunkList->Head() : NULL; 1118 if (metaChunk == NULL) { 1119 // no partial meta chunk -- maybe there's a free one 1120 if (chunkSize == SLAB_CHUNK_SIZE_LARGE) { 1121 metaChunk = sFreeCompleteMetaChunks.RemoveHead(); 1122 } else { 1123 metaChunk = sFreeShortMetaChunks.RemoveHead(); 1124 if (metaChunk == NULL) 1125 metaChunk = sFreeCompleteMetaChunks.RemoveHead(); 1126 if (metaChunk != NULL) 1127 metaChunkList->Add(metaChunk); 1128 } 1129 1130 if (metaChunk == NULL) 1131 return false; 1132 1133 metaChunk->GetArea()->usedMetaChunkCount++; 1134 _PrepareMetaChunk(metaChunk, chunkSize); 1135 1136 T(AllocateMetaChunk(metaChunk)); 1137 } 1138 1139 // allocate the chunk 1140 if (++metaChunk->usedChunkCount == metaChunk->chunkCount) { 1141 // meta chunk is full now -- remove it from its list 1142 if (metaChunkList != NULL) 1143 metaChunkList->Remove(metaChunk); 1144 } 1145 1146 _chunk = _pop(metaChunk->freeChunks); 1147 _metaChunk = metaChunk; 1148 1149 _chunk->reference = 1; 1150 1151 // update the free range 1152 uint32 chunkIndex = _chunk - metaChunk->chunks; 1153 if (chunkIndex >= metaChunk->firstFreeChunk 1154 && chunkIndex <= metaChunk->lastFreeChunk) { 1155 if (chunkIndex - metaChunk->firstFreeChunk 1156 <= metaChunk->lastFreeChunk - chunkIndex) { 1157 metaChunk->firstFreeChunk = chunkIndex + 1; 1158 } else 1159 metaChunk->lastFreeChunk = chunkIndex - 1; 1160 } 1161 1162 PARANOID_CHECKS_ONLY(_CheckMetaChunk(metaChunk)); 1163 1164 T(AllocateChunk(chunkSize, metaChunk, _chunk)); 1165 1166 return true; 1167 } 1168 1169 1170 /*static*/ void 1171 MemoryManager::_FreeChunk(Area* area, MetaChunk* metaChunk, Chunk* chunk, 1172 addr_t chunkAddress, bool alreadyUnmapped, uint32 flags) 1173 { 1174 // unmap the chunk 1175 if (!alreadyUnmapped) { 1176 mutex_unlock(&sLock); 1177 _UnmapChunk(area->vmArea, chunkAddress, metaChunk->chunkSize, flags); 1178 mutex_lock(&sLock); 1179 } 1180 1181 T(FreeChunk(metaChunk, chunk)); 1182 1183 _push(metaChunk->freeChunks, chunk); 1184 1185 uint32 chunkIndex = chunk - metaChunk->chunks; 1186 1187 // free the meta chunk, if it is unused now 1188 PARANOID_CHECKS_ONLY(bool areaDeleted = false;) 1189 ASSERT(metaChunk->usedChunkCount > 0); 1190 if (--metaChunk->usedChunkCount == 0) { 1191 T(FreeMetaChunk(metaChunk)); 1192 1193 // remove from partial meta chunk list 1194 if (metaChunk->chunkSize == SLAB_CHUNK_SIZE_SMALL) 1195 sPartialMetaChunksSmall.Remove(metaChunk); 1196 else if (metaChunk->chunkSize == SLAB_CHUNK_SIZE_MEDIUM) 1197 sPartialMetaChunksMedium.Remove(metaChunk); 1198 1199 // mark empty 1200 metaChunk->chunkSize = 0; 1201 1202 // add to free list 1203 if (metaChunk == area->metaChunks) 1204 sFreeShortMetaChunks.Add(metaChunk, false); 1205 else 1206 sFreeCompleteMetaChunks.Add(metaChunk, false); 1207 1208 // free the area, if it is unused now 1209 ASSERT(area->usedMetaChunkCount > 0); 1210 if (--area->usedMetaChunkCount == 0) { 1211 _FreeArea(area, false, flags); 1212 PARANOID_CHECKS_ONLY(areaDeleted = true;) 1213 } 1214 } else if (metaChunk->usedChunkCount == metaChunk->chunkCount - 1) { 1215 // the meta chunk was full before -- add it back to its partial chunk 1216 // list 1217 if (metaChunk->chunkSize == SLAB_CHUNK_SIZE_SMALL) 1218 sPartialMetaChunksSmall.Add(metaChunk, false); 1219 else if (metaChunk->chunkSize == SLAB_CHUNK_SIZE_MEDIUM) 1220 sPartialMetaChunksMedium.Add(metaChunk, false); 1221 1222 metaChunk->firstFreeChunk = chunkIndex; 1223 metaChunk->lastFreeChunk = chunkIndex; 1224 } else { 1225 // extend the free range, if the chunk adjoins 1226 if (chunkIndex + 1 == metaChunk->firstFreeChunk) { 1227 uint32 firstFree = chunkIndex; 1228 for (; firstFree > 0; firstFree--) { 1229 Chunk* previousChunk = &metaChunk->chunks[firstFree - 1]; 1230 if (!_IsChunkFree(metaChunk, previousChunk)) 1231 break; 1232 } 1233 metaChunk->firstFreeChunk = firstFree; 1234 } else if (chunkIndex == (uint32)metaChunk->lastFreeChunk + 1) { 1235 uint32 lastFree = chunkIndex; 1236 for (; lastFree + 1 < metaChunk->chunkCount; lastFree++) { 1237 Chunk* nextChunk = &metaChunk->chunks[lastFree + 1]; 1238 if (!_IsChunkFree(metaChunk, nextChunk)) 1239 break; 1240 } 1241 metaChunk->lastFreeChunk = lastFree; 1242 } 1243 } 1244 1245 PARANOID_CHECKS_ONLY( 1246 if (!areaDeleted) 1247 _CheckMetaChunk(metaChunk); 1248 ) 1249 } 1250 1251 1252 /*static*/ void 1253 MemoryManager::_PrepareMetaChunk(MetaChunk* metaChunk, size_t chunkSize) 1254 { 1255 Area* area = metaChunk->GetArea(); 1256 1257 if (metaChunk == area->metaChunks) { 1258 // the first chunk is shorter 1259 size_t unusableSize = ROUNDUP(SLAB_AREA_STRUCT_OFFSET + kAreaAdminSize, 1260 chunkSize); 1261 metaChunk->chunkBase = area->BaseAddress() + unusableSize; 1262 metaChunk->totalSize = SLAB_CHUNK_SIZE_LARGE - unusableSize; 1263 } 1264 1265 metaChunk->chunkSize = chunkSize; 1266 metaChunk->chunkCount = metaChunk->totalSize / chunkSize; 1267 metaChunk->usedChunkCount = 0; 1268 1269 metaChunk->freeChunks = NULL; 1270 for (int32 i = metaChunk->chunkCount - 1; i >= 0; i--) 1271 _push(metaChunk->freeChunks, metaChunk->chunks + i); 1272 1273 metaChunk->firstFreeChunk = 0; 1274 metaChunk->lastFreeChunk = metaChunk->chunkCount - 1; 1275 1276 PARANOID_CHECKS_ONLY(_CheckMetaChunk(metaChunk)); 1277 } 1278 1279 1280 /*static*/ void 1281 MemoryManager::_AddArea(Area* area) 1282 { 1283 T(AddArea(area)); 1284 1285 // add the area to the hash table 1286 WriteLocker writeLocker(sAreaTableLock); 1287 sAreaTable.InsertUnchecked(area); 1288 writeLocker.Unlock(); 1289 1290 // add the area's meta chunks to the free lists 1291 sFreeShortMetaChunks.Add(&area->metaChunks[0]); 1292 for (int32 i = 1; i < SLAB_META_CHUNKS_PER_AREA; i++) 1293 sFreeCompleteMetaChunks.Add(&area->metaChunks[i]); 1294 } 1295 1296 1297 /*static*/ status_t 1298 MemoryManager::_AllocateArea(uint32 flags, Area*& _area) 1299 { 1300 TRACE("MemoryManager::_AllocateArea(%#" B_PRIx32 ")\n", flags); 1301 1302 ASSERT((flags & CACHE_DONT_LOCK_KERNEL_SPACE) == 0); 1303 1304 mutex_unlock(&sLock); 1305 1306 size_t pagesNeededToMap = 0; 1307 void* areaBase; 1308 Area* area; 1309 VMArea* vmArea = NULL; 1310 1311 if (sKernelArgs == NULL) { 1312 // create an area 1313 uint32 areaCreationFlags = (flags & CACHE_PRIORITY_VIP) != 0 1314 ? CREATE_AREA_PRIORITY_VIP : 0; 1315 area_id areaID = vm_create_null_area(B_SYSTEM_TEAM, kSlabAreaName, 1316 &areaBase, B_ANY_KERNEL_BLOCK_ADDRESS, SLAB_AREA_SIZE, 1317 areaCreationFlags); 1318 if (areaID < 0) { 1319 mutex_lock(&sLock); 1320 return areaID; 1321 } 1322 1323 area = _AreaForAddress((addr_t)areaBase); 1324 1325 // map the memory for the administrative structure 1326 VMAddressSpace* addressSpace = VMAddressSpace::Kernel(); 1327 VMTranslationMap* translationMap = addressSpace->TranslationMap(); 1328 1329 pagesNeededToMap = translationMap->MaxPagesNeededToMap( 1330 (addr_t)area, (addr_t)areaBase + SLAB_AREA_SIZE - 1); 1331 1332 vmArea = VMAreaHash::Lookup(areaID); 1333 status_t error = _MapChunk(vmArea, (addr_t)area, kAreaAdminSize, 1334 pagesNeededToMap, flags); 1335 if (error != B_OK) { 1336 delete_area(areaID); 1337 mutex_lock(&sLock); 1338 return error; 1339 } 1340 1341 dprintf("slab memory manager: created area %p (%" B_PRId32 ")\n", area, 1342 areaID); 1343 } else { 1344 // no areas yet -- allocate raw memory 1345 areaBase = (void*)vm_allocate_early(sKernelArgs, SLAB_AREA_SIZE, 1346 SLAB_AREA_SIZE, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 1347 SLAB_AREA_SIZE); 1348 if (areaBase == NULL) { 1349 mutex_lock(&sLock); 1350 return B_NO_MEMORY; 1351 } 1352 area = _AreaForAddress((addr_t)areaBase); 1353 1354 TRACE("MemoryManager::_AllocateArea(): allocated early area %p\n", 1355 area); 1356 } 1357 1358 // init the area structure 1359 area->vmArea = vmArea; 1360 area->reserved_memory_for_mapping = pagesNeededToMap * B_PAGE_SIZE; 1361 area->usedMetaChunkCount = 0; 1362 area->fullyMapped = vmArea == NULL; 1363 1364 // init the meta chunks 1365 for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA; i++) { 1366 MetaChunk* metaChunk = area->metaChunks + i; 1367 metaChunk->chunkSize = 0; 1368 metaChunk->chunkBase = (addr_t)areaBase + i * SLAB_CHUNK_SIZE_LARGE; 1369 metaChunk->totalSize = SLAB_CHUNK_SIZE_LARGE; 1370 // Note: chunkBase and totalSize aren't correct for the first 1371 // meta chunk. They will be set in _PrepareMetaChunk(). 1372 metaChunk->chunkCount = 0; 1373 metaChunk->usedChunkCount = 0; 1374 metaChunk->freeChunks = NULL; 1375 } 1376 1377 mutex_lock(&sLock); 1378 _area = area; 1379 1380 T(AllocateArea(area, flags)); 1381 1382 return B_OK; 1383 } 1384 1385 1386 /*static*/ void 1387 MemoryManager::_FreeArea(Area* area, bool areaRemoved, uint32 flags) 1388 { 1389 TRACE("MemoryManager::_FreeArea(%p, %#" B_PRIx32 ")\n", area, flags); 1390 1391 T(FreeArea(area, areaRemoved, flags)); 1392 1393 ASSERT(area->usedMetaChunkCount == 0); 1394 1395 if (!areaRemoved) { 1396 // remove the area's meta chunks from the free lists 1397 ASSERT(area->metaChunks[0].usedChunkCount == 0); 1398 sFreeShortMetaChunks.Remove(&area->metaChunks[0]); 1399 1400 for (int32 i = 1; i < SLAB_META_CHUNKS_PER_AREA; i++) { 1401 ASSERT(area->metaChunks[i].usedChunkCount == 0); 1402 sFreeCompleteMetaChunks.Remove(&area->metaChunks[i]); 1403 } 1404 1405 // remove the area from the hash table 1406 WriteLocker writeLocker(sAreaTableLock); 1407 sAreaTable.RemoveUnchecked(area); 1408 writeLocker.Unlock(); 1409 } 1410 1411 // We want to keep one or two free areas as a reserve. 1412 if (sFreeAreaCount <= 1) { 1413 _PushFreeArea(area); 1414 return; 1415 } 1416 1417 if (area->vmArea == NULL || (flags & CACHE_DONT_LOCK_KERNEL_SPACE) != 0) { 1418 // This is either early in the boot process or we aren't allowed to 1419 // delete the area now. 1420 _PushFreeArea(area); 1421 _RequestMaintenance(); 1422 return; 1423 } 1424 1425 mutex_unlock(&sLock); 1426 1427 dprintf("slab memory manager: deleting area %p (%" B_PRId32 ")\n", area, 1428 area->vmArea->id); 1429 1430 size_t memoryToUnreserve = area->reserved_memory_for_mapping; 1431 delete_area(area->vmArea->id); 1432 vm_unreserve_memory(memoryToUnreserve); 1433 1434 mutex_lock(&sLock); 1435 } 1436 1437 1438 /*static*/ status_t 1439 MemoryManager::_MapChunk(VMArea* vmArea, addr_t address, size_t size, 1440 size_t reserveAdditionalMemory, uint32 flags) 1441 { 1442 TRACE("MemoryManager::_MapChunk(%p, %#" B_PRIxADDR ", %#" B_PRIxSIZE 1443 ")\n", vmArea, address, size); 1444 1445 T(Map(address, size, flags)); 1446 1447 if (vmArea == NULL) { 1448 // everything is mapped anyway 1449 return B_OK; 1450 } 1451 1452 VMAddressSpace* addressSpace = VMAddressSpace::Kernel(); 1453 VMTranslationMap* translationMap = addressSpace->TranslationMap(); 1454 1455 // reserve memory for the chunk 1456 int priority = (flags & CACHE_PRIORITY_VIP) != 0 1457 ? VM_PRIORITY_VIP : VM_PRIORITY_SYSTEM; 1458 size_t reservedMemory = size + reserveAdditionalMemory; 1459 status_t error = vm_try_reserve_memory(size, priority, 1460 (flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0 ? 0 : 1000000); 1461 if (error != B_OK) 1462 return error; 1463 1464 // reserve the pages we need now 1465 size_t reservedPages = size / B_PAGE_SIZE 1466 + translationMap->MaxPagesNeededToMap(address, address + size - 1); 1467 vm_page_reservation reservation; 1468 if ((flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0) { 1469 if (!vm_page_try_reserve_pages(&reservation, reservedPages, priority)) { 1470 vm_unreserve_memory(reservedMemory); 1471 return B_WOULD_BLOCK; 1472 } 1473 } else 1474 vm_page_reserve_pages(&reservation, reservedPages, priority); 1475 1476 VMCache* cache = vm_area_get_locked_cache(vmArea); 1477 1478 // map the pages 1479 translationMap->Lock(); 1480 1481 addr_t areaOffset = address - vmArea->Base(); 1482 addr_t endAreaOffset = areaOffset + size; 1483 for (size_t offset = areaOffset; offset < endAreaOffset; 1484 offset += B_PAGE_SIZE) { 1485 vm_page* page = vm_page_allocate_page(&reservation, PAGE_STATE_WIRED); 1486 cache->InsertPage(page, offset); 1487 1488 page->IncrementWiredCount(); 1489 atomic_add(&gMappedPagesCount, 1); 1490 DEBUG_PAGE_ACCESS_END(page); 1491 1492 translationMap->Map(vmArea->Base() + offset, 1493 page->physical_page_number * B_PAGE_SIZE, 1494 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 1495 vmArea->MemoryType(), &reservation); 1496 } 1497 1498 translationMap->Unlock(); 1499 1500 cache->ReleaseRefAndUnlock(); 1501 1502 vm_page_unreserve_pages(&reservation); 1503 1504 return B_OK; 1505 } 1506 1507 1508 /*static*/ status_t 1509 MemoryManager::_UnmapChunk(VMArea* vmArea, addr_t address, size_t size, 1510 uint32 flags) 1511 { 1512 T(Unmap(address, size, flags)); 1513 1514 if (vmArea == NULL) 1515 return B_ERROR; 1516 1517 TRACE("MemoryManager::_UnmapChunk(%p, %#" B_PRIxADDR ", %#" B_PRIxSIZE 1518 ")\n", vmArea, address, size); 1519 1520 VMAddressSpace* addressSpace = VMAddressSpace::Kernel(); 1521 VMTranslationMap* translationMap = addressSpace->TranslationMap(); 1522 VMCache* cache = vm_area_get_locked_cache(vmArea); 1523 1524 // unmap the pages 1525 translationMap->Lock(); 1526 translationMap->Unmap(address, address + size - 1); 1527 atomic_add(&gMappedPagesCount, -(size / B_PAGE_SIZE)); 1528 translationMap->Unlock(); 1529 1530 // free the pages 1531 addr_t areaPageOffset = (address - vmArea->Base()) / B_PAGE_SIZE; 1532 addr_t areaPageEndOffset = areaPageOffset + size / B_PAGE_SIZE; 1533 VMCachePagesTree::Iterator it = cache->pages.GetIterator( 1534 areaPageOffset, true, true); 1535 while (vm_page* page = it.Next()) { 1536 if (page->cache_offset >= areaPageEndOffset) 1537 break; 1538 1539 DEBUG_PAGE_ACCESS_START(page); 1540 1541 page->DecrementWiredCount(); 1542 1543 cache->RemovePage(page); 1544 // the iterator is remove-safe 1545 vm_page_free(cache, page); 1546 } 1547 1548 cache->ReleaseRefAndUnlock(); 1549 1550 vm_unreserve_memory(size); 1551 1552 return B_OK; 1553 } 1554 1555 1556 /*static*/ void 1557 MemoryManager::_UnmapFreeChunksEarly(Area* area) 1558 { 1559 if (!area->fullyMapped) 1560 return; 1561 1562 TRACE("MemoryManager::_UnmapFreeChunksEarly(%p)\n", area); 1563 1564 // unmap the space before the Area structure 1565 #if SLAB_AREA_STRUCT_OFFSET > 0 1566 _UnmapChunk(area->vmArea, area->BaseAddress(), SLAB_AREA_STRUCT_OFFSET, 1567 0); 1568 #endif 1569 1570 for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA; i++) { 1571 MetaChunk* metaChunk = area->metaChunks + i; 1572 if (metaChunk->chunkSize == 0) { 1573 // meta chunk is free -- unmap it completely 1574 if (i == 0) { 1575 _UnmapChunk(area->vmArea, (addr_t)area + kAreaAdminSize, 1576 SLAB_CHUNK_SIZE_LARGE - kAreaAdminSize, 0); 1577 } else { 1578 _UnmapChunk(area->vmArea, 1579 area->BaseAddress() + i * SLAB_CHUNK_SIZE_LARGE, 1580 SLAB_CHUNK_SIZE_LARGE, 0); 1581 } 1582 } else { 1583 // unmap free chunks 1584 for (Chunk* chunk = metaChunk->freeChunks; chunk != NULL; 1585 chunk = chunk->next) { 1586 _UnmapChunk(area->vmArea, _ChunkAddress(metaChunk, chunk), 1587 metaChunk->chunkSize, 0); 1588 } 1589 1590 // The first meta chunk might have space before its first chunk. 1591 if (i == 0) { 1592 addr_t unusedStart = (addr_t)area + kAreaAdminSize; 1593 if (unusedStart < metaChunk->chunkBase) { 1594 _UnmapChunk(area->vmArea, unusedStart, 1595 metaChunk->chunkBase - unusedStart, 0); 1596 } 1597 } 1598 } 1599 } 1600 1601 area->fullyMapped = false; 1602 } 1603 1604 1605 /*static*/ void 1606 MemoryManager::_ConvertEarlyArea(Area* area) 1607 { 1608 void* address = (void*)area->BaseAddress(); 1609 area_id areaID = create_area(kSlabAreaName, &address, B_EXACT_ADDRESS, 1610 SLAB_AREA_SIZE, B_ALREADY_WIRED, 1611 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA); 1612 if (areaID < 0) 1613 panic("out of memory"); 1614 1615 area->vmArea = VMAreaHash::Lookup(areaID); 1616 } 1617 1618 1619 /*static*/ void 1620 MemoryManager::_RequestMaintenance() 1621 { 1622 if ((sFreeAreaCount > 0 && sFreeAreaCount <= 2) || sMaintenanceNeeded) 1623 return; 1624 1625 sMaintenanceNeeded = true; 1626 request_memory_manager_maintenance(); 1627 } 1628 1629 1630 /*static*/ bool 1631 MemoryManager::_IsChunkInFreeList(const MetaChunk* metaChunk, 1632 const Chunk* chunk) 1633 { 1634 Chunk* freeChunk = metaChunk->freeChunks; 1635 while (freeChunk != NULL) { 1636 if (freeChunk == chunk) 1637 return true; 1638 freeChunk = freeChunk->next; 1639 } 1640 1641 return false; 1642 } 1643 1644 1645 #if DEBUG_SLAB_MEMORY_MANAGER_PARANOID_CHECKS 1646 1647 /*static*/ void 1648 MemoryManager::_CheckMetaChunk(MetaChunk* metaChunk) 1649 { 1650 Area* area = metaChunk->GetArea(); 1651 int32 metaChunkIndex = metaChunk - area->metaChunks; 1652 if (metaChunkIndex < 0 || metaChunkIndex >= SLAB_META_CHUNKS_PER_AREA) { 1653 panic("invalid meta chunk %p!", metaChunk); 1654 return; 1655 } 1656 1657 switch (metaChunk->chunkSize) { 1658 case 0: 1659 // unused 1660 return; 1661 case SLAB_CHUNK_SIZE_SMALL: 1662 case SLAB_CHUNK_SIZE_MEDIUM: 1663 case SLAB_CHUNK_SIZE_LARGE: 1664 break; 1665 default: 1666 panic("meta chunk %p has invalid chunk size: %" B_PRIuSIZE, 1667 metaChunk, metaChunk->chunkSize); 1668 return; 1669 } 1670 1671 if (metaChunk->totalSize > SLAB_CHUNK_SIZE_LARGE) { 1672 panic("meta chunk %p has invalid total size: %" B_PRIuSIZE, 1673 metaChunk, metaChunk->totalSize); 1674 return; 1675 } 1676 1677 addr_t expectedBase = area->BaseAddress() 1678 + metaChunkIndex * SLAB_CHUNK_SIZE_LARGE; 1679 if (metaChunk->chunkBase < expectedBase 1680 || metaChunk->chunkBase - expectedBase + metaChunk->totalSize 1681 > SLAB_CHUNK_SIZE_LARGE) { 1682 panic("meta chunk %p has invalid base address: %" B_PRIxADDR, metaChunk, 1683 metaChunk->chunkBase); 1684 return; 1685 } 1686 1687 if (metaChunk->chunkCount != metaChunk->totalSize / metaChunk->chunkSize) { 1688 panic("meta chunk %p has invalid chunk count: %u", metaChunk, 1689 metaChunk->chunkCount); 1690 return; 1691 } 1692 1693 if (metaChunk->usedChunkCount > metaChunk->chunkCount) { 1694 panic("meta chunk %p has invalid unused chunk count: %u", metaChunk, 1695 metaChunk->usedChunkCount); 1696 return; 1697 } 1698 1699 if (metaChunk->firstFreeChunk > metaChunk->chunkCount) { 1700 panic("meta chunk %p has invalid first free chunk: %u", metaChunk, 1701 metaChunk->firstFreeChunk); 1702 return; 1703 } 1704 1705 if (metaChunk->lastFreeChunk >= metaChunk->chunkCount) { 1706 panic("meta chunk %p has invalid last free chunk: %u", metaChunk, 1707 metaChunk->lastFreeChunk); 1708 return; 1709 } 1710 1711 // check free list for structural sanity 1712 uint32 freeChunks = 0; 1713 for (Chunk* chunk = metaChunk->freeChunks; chunk != NULL; 1714 chunk = chunk->next) { 1715 if ((addr_t)chunk % sizeof(Chunk) != 0 || chunk < metaChunk->chunks 1716 || chunk >= metaChunk->chunks + metaChunk->chunkCount) { 1717 panic("meta chunk %p has invalid element in free list, chunk: %p", 1718 metaChunk, chunk); 1719 return; 1720 } 1721 1722 if (++freeChunks > metaChunk->chunkCount) { 1723 panic("meta chunk %p has cyclic free list", metaChunk); 1724 return; 1725 } 1726 } 1727 1728 if (freeChunks + metaChunk->usedChunkCount > metaChunk->chunkCount) { 1729 panic("meta chunk %p has mismatching free/used chunk counts: total: " 1730 "%u, used: %u, free: %" B_PRIu32, metaChunk, metaChunk->chunkCount, 1731 metaChunk->usedChunkCount, freeChunks); 1732 return; 1733 } 1734 1735 // count used chunks by looking at their reference/next field 1736 uint32 usedChunks = 0; 1737 for (uint32 i = 0; i < metaChunk->chunkCount; i++) { 1738 if (!_IsChunkFree(metaChunk, metaChunk->chunks + i)) 1739 usedChunks++; 1740 } 1741 1742 if (usedChunks != metaChunk->usedChunkCount) { 1743 panic("meta chunk %p has used chunks that appear free: total: " 1744 "%u, used: %u, appearing used: %" B_PRIu32, metaChunk, 1745 metaChunk->chunkCount, metaChunk->usedChunkCount, usedChunks); 1746 return; 1747 } 1748 1749 // check free range 1750 for (uint32 i = metaChunk->firstFreeChunk; i < metaChunk->lastFreeChunk; 1751 i++) { 1752 if (!_IsChunkFree(metaChunk, metaChunk->chunks + i)) { 1753 panic("meta chunk %p has used chunk in free range, chunk: %p (%" 1754 B_PRIu32 ", free range: %u - %u)", metaChunk, 1755 metaChunk->chunks + i, i, metaChunk->firstFreeChunk, 1756 metaChunk->lastFreeChunk); 1757 return; 1758 } 1759 } 1760 } 1761 1762 #endif // DEBUG_SLAB_MEMORY_MANAGER_PARANOID_CHECKS 1763 1764 1765 /*static*/ int 1766 MemoryManager::_DumpRawAllocations(int argc, char** argv) 1767 { 1768 kprintf("%-*s meta chunk chunk %-*s size (KB)\n", 1769 B_PRINTF_POINTER_WIDTH, "area", B_PRINTF_POINTER_WIDTH, "base"); 1770 1771 size_t totalSize = 0; 1772 1773 for (AreaTable::Iterator it = sAreaTable.GetIterator(); 1774 Area* area = it.Next();) { 1775 for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA; i++) { 1776 MetaChunk* metaChunk = area->metaChunks + i; 1777 if (metaChunk->chunkSize == 0) 1778 continue; 1779 for (uint32 k = 0; k < metaChunk->chunkCount; k++) { 1780 Chunk* chunk = metaChunk->chunks + k; 1781 1782 // skip free chunks 1783 if (_IsChunkFree(metaChunk, chunk)) 1784 continue; 1785 1786 addr_t reference = chunk->reference; 1787 if ((reference & 1) == 0 || reference == 1) 1788 continue; 1789 1790 addr_t chunkAddress = _ChunkAddress(metaChunk, chunk); 1791 size_t size = reference - chunkAddress + 1; 1792 totalSize += size; 1793 1794 kprintf("%p %10" B_PRId32 " %5" B_PRIu32 " %p %9" 1795 B_PRIuSIZE "\n", area, i, k, (void*)chunkAddress, 1796 size / 1024); 1797 } 1798 } 1799 } 1800 1801 kprintf("total:%*s%9" B_PRIuSIZE "\n", (2 * B_PRINTF_POINTER_WIDTH) + 21, 1802 "", totalSize / 1024); 1803 1804 return 0; 1805 } 1806 1807 1808 /*static*/ void 1809 MemoryManager::_PrintMetaChunkTableHeader(bool printChunks) 1810 { 1811 if (printChunks) 1812 kprintf("chunk base cache object size cache name\n"); 1813 else 1814 kprintf("chunk base\n"); 1815 } 1816 1817 /*static*/ void 1818 MemoryManager::_DumpMetaChunk(MetaChunk* metaChunk, bool printChunks, 1819 bool printHeader) 1820 { 1821 if (printHeader) 1822 _PrintMetaChunkTableHeader(printChunks); 1823 1824 const char* type = "empty"; 1825 if (metaChunk->chunkSize != 0) { 1826 switch (metaChunk->chunkSize) { 1827 case SLAB_CHUNK_SIZE_SMALL: 1828 type = "small"; 1829 break; 1830 case SLAB_CHUNK_SIZE_MEDIUM: 1831 type = "medium"; 1832 break; 1833 case SLAB_CHUNK_SIZE_LARGE: 1834 type = "large"; 1835 break; 1836 } 1837 } 1838 1839 int metaChunkIndex = metaChunk - metaChunk->GetArea()->metaChunks; 1840 kprintf("%5d %p --- %6s meta chunk", metaChunkIndex, 1841 (void*)metaChunk->chunkBase, type); 1842 if (metaChunk->chunkSize != 0) { 1843 kprintf(": %4u/%4u used, %-4u-%4u free ------------\n", 1844 metaChunk->usedChunkCount, metaChunk->chunkCount, 1845 metaChunk->firstFreeChunk, metaChunk->lastFreeChunk); 1846 } else 1847 kprintf(" --------------------------------------------\n"); 1848 1849 if (metaChunk->chunkSize == 0 || !printChunks) 1850 return; 1851 1852 for (uint32 i = 0; i < metaChunk->chunkCount; i++) { 1853 Chunk* chunk = metaChunk->chunks + i; 1854 1855 // skip free chunks 1856 if (_IsChunkFree(metaChunk, chunk)) { 1857 if (!_IsChunkInFreeList(metaChunk, chunk)) { 1858 kprintf("%5" B_PRIu32 " %p appears free, but isn't in free " 1859 "list!\n", i, (void*)_ChunkAddress(metaChunk, chunk)); 1860 } 1861 1862 continue; 1863 } 1864 1865 addr_t reference = chunk->reference; 1866 if ((reference & 1) == 0) { 1867 ObjectCache* cache = (ObjectCache*)reference; 1868 kprintf("%5" B_PRIu32 " %p %p %11" B_PRIuSIZE " %s\n", i, 1869 (void*)_ChunkAddress(metaChunk, chunk), cache, 1870 cache != NULL ? cache->object_size : 0, 1871 cache != NULL ? cache->name : ""); 1872 } else if (reference != 1) { 1873 kprintf("%5" B_PRIu32 " %p raw allocation up to %p\n", i, 1874 (void*)_ChunkAddress(metaChunk, chunk), (void*)reference); 1875 } 1876 } 1877 } 1878 1879 1880 /*static*/ int 1881 MemoryManager::_DumpMetaChunk(int argc, char** argv) 1882 { 1883 if (argc != 2) { 1884 print_debugger_command_usage(argv[0]); 1885 return 0; 1886 } 1887 1888 uint64 address; 1889 if (!evaluate_debug_expression(argv[1], &address, false)) 1890 return 0; 1891 1892 Area* area = _AreaForAddress(address); 1893 1894 MetaChunk* metaChunk; 1895 if ((addr_t)address >= (addr_t)area->metaChunks 1896 && (addr_t)address 1897 < (addr_t)(area->metaChunks + SLAB_META_CHUNKS_PER_AREA)) { 1898 metaChunk = (MetaChunk*)(addr_t)address; 1899 } else { 1900 metaChunk = area->metaChunks 1901 + (address % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE; 1902 } 1903 1904 _DumpMetaChunk(metaChunk, true, true); 1905 1906 return 0; 1907 } 1908 1909 1910 /*static*/ void 1911 MemoryManager::_DumpMetaChunks(const char* name, MetaChunkList& metaChunkList, 1912 bool printChunks) 1913 { 1914 kprintf("%s:\n", name); 1915 1916 for (MetaChunkList::Iterator it = metaChunkList.GetIterator(); 1917 MetaChunk* metaChunk = it.Next();) { 1918 _DumpMetaChunk(metaChunk, printChunks, false); 1919 } 1920 } 1921 1922 1923 /*static*/ int 1924 MemoryManager::_DumpMetaChunks(int argc, char** argv) 1925 { 1926 bool printChunks = argc > 1 && strcmp(argv[1], "-c") == 0; 1927 1928 _PrintMetaChunkTableHeader(printChunks); 1929 _DumpMetaChunks("free complete", sFreeCompleteMetaChunks, printChunks); 1930 _DumpMetaChunks("free short", sFreeShortMetaChunks, printChunks); 1931 _DumpMetaChunks("partial small", sPartialMetaChunksSmall, printChunks); 1932 _DumpMetaChunks("partial medium", sPartialMetaChunksMedium, printChunks); 1933 1934 return 0; 1935 } 1936 1937 1938 /*static*/ int 1939 MemoryManager::_DumpArea(int argc, char** argv) 1940 { 1941 bool printChunks = false; 1942 1943 int argi = 1; 1944 while (argi < argc) { 1945 if (argv[argi][0] != '-') 1946 break; 1947 const char* arg = argv[argi++]; 1948 if (strcmp(arg, "-c") == 0) { 1949 printChunks = true; 1950 } else { 1951 print_debugger_command_usage(argv[0]); 1952 return 0; 1953 } 1954 } 1955 1956 if (argi + 1 != argc) { 1957 print_debugger_command_usage(argv[0]); 1958 return 0; 1959 } 1960 1961 uint64 address; 1962 if (!evaluate_debug_expression(argv[argi], &address, false)) 1963 return 0; 1964 1965 Area* area = _AreaForAddress((addr_t)address); 1966 1967 for (uint32 k = 0; k < SLAB_META_CHUNKS_PER_AREA; k++) { 1968 MetaChunk* metaChunk = area->metaChunks + k; 1969 _DumpMetaChunk(metaChunk, printChunks, k == 0); 1970 } 1971 1972 return 0; 1973 } 1974 1975 1976 /*static*/ int 1977 MemoryManager::_DumpAreas(int argc, char** argv) 1978 { 1979 kprintf(" %*s %*s meta small medium large\n", 1980 B_PRINTF_POINTER_WIDTH, "base", B_PRINTF_POINTER_WIDTH, "area"); 1981 1982 size_t totalTotalSmall = 0; 1983 size_t totalUsedSmall = 0; 1984 size_t totalTotalMedium = 0; 1985 size_t totalUsedMedium = 0; 1986 size_t totalUsedLarge = 0; 1987 uint32 areaCount = 0; 1988 1989 for (AreaTable::Iterator it = sAreaTable.GetIterator(); 1990 Area* area = it.Next();) { 1991 areaCount++; 1992 1993 // sum up the free/used counts for the chunk sizes 1994 int totalSmall = 0; 1995 int usedSmall = 0; 1996 int totalMedium = 0; 1997 int usedMedium = 0; 1998 int usedLarge = 0; 1999 2000 for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA; i++) { 2001 MetaChunk* metaChunk = area->metaChunks + i; 2002 if (metaChunk->chunkSize == 0) 2003 continue; 2004 2005 switch (metaChunk->chunkSize) { 2006 case SLAB_CHUNK_SIZE_SMALL: 2007 totalSmall += metaChunk->chunkCount; 2008 usedSmall += metaChunk->usedChunkCount; 2009 break; 2010 case SLAB_CHUNK_SIZE_MEDIUM: 2011 totalMedium += metaChunk->chunkCount; 2012 usedMedium += metaChunk->usedChunkCount; 2013 break; 2014 case SLAB_CHUNK_SIZE_LARGE: 2015 usedLarge += metaChunk->usedChunkCount; 2016 break; 2017 } 2018 } 2019 2020 kprintf("%p %p %2u/%2u %4d/%4d %3d/%3d %5d\n", 2021 area, area->vmArea, area->usedMetaChunkCount, 2022 SLAB_META_CHUNKS_PER_AREA, usedSmall, totalSmall, usedMedium, 2023 totalMedium, usedLarge); 2024 2025 totalTotalSmall += totalSmall; 2026 totalUsedSmall += usedSmall; 2027 totalTotalMedium += totalMedium; 2028 totalUsedMedium += usedMedium; 2029 totalUsedLarge += usedLarge; 2030 } 2031 2032 kprintf("%d free area%s:\n", sFreeAreaCount, 2033 sFreeAreaCount == 1 ? "" : "s"); 2034 for (Area* area = sFreeAreas; area != NULL; area = area->next) { 2035 areaCount++; 2036 kprintf("%p %p\n", area, area->vmArea); 2037 } 2038 2039 kprintf("total usage:\n"); 2040 kprintf(" small: %" B_PRIuSIZE "/%" B_PRIuSIZE "\n", totalUsedSmall, 2041 totalTotalSmall); 2042 kprintf(" medium: %" B_PRIuSIZE "/%" B_PRIuSIZE "\n", totalUsedMedium, 2043 totalTotalMedium); 2044 kprintf(" large: %" B_PRIuSIZE "\n", totalUsedLarge); 2045 kprintf(" memory: %" B_PRIuSIZE "/%" B_PRIu32 " KB\n", 2046 (totalUsedSmall * SLAB_CHUNK_SIZE_SMALL 2047 + totalUsedMedium * SLAB_CHUNK_SIZE_MEDIUM 2048 + totalUsedLarge * SLAB_CHUNK_SIZE_LARGE) / 1024, 2049 areaCount * SLAB_AREA_SIZE / 1024); 2050 kprintf(" overhead: %" B_PRIuSIZE " KB\n", 2051 areaCount * kAreaAdminSize / 1024); 2052 2053 return 0; 2054 } 2055 2056 2057 #if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING 2058 2059 void 2060 MemoryManager::_AddTrackingInfo(void* allocation, size_t size, 2061 AbstractTraceEntryWithStackTrace* traceEntry) 2062 { 2063 _TrackingInfoFor(allocation, size)->Init(traceEntry); 2064 } 2065 2066 #endif // SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING 2067 2068 2069 RANGE_MARKER_FUNCTION_END(SlabMemoryManager) 2070