1 /* 2 * Copyright 2010, Ingo Weinhold <ingo_weinhold@gmx.de>. 3 * Copyright 2008-2010, Axel Dörfler. All Rights Reserved. 4 * Copyright 2007, Hugo Santos. All Rights Reserved. 5 * 6 * Distributed under the terms of the MIT License. 7 */ 8 9 10 #include <slab/Slab.h> 11 12 #include <algorithm> 13 #include <new> 14 #include <stdlib.h> 15 #include <string.h> 16 17 #include <KernelExport.h> 18 19 #include <condition_variable.h> 20 #include <kernel.h> 21 #include <low_resource_manager.h> 22 #include <slab/ObjectDepot.h> 23 #include <smp.h> 24 #include <tracing.h> 25 #include <util/AutoLock.h> 26 #include <util/DoublyLinkedList.h> 27 #include <util/khash.h> 28 #include <vm/vm.h> 29 #include <vm/VMAddressSpace.h> 30 31 #include "HashedObjectCache.h" 32 #include "MemoryManager.h" 33 #include "slab_private.h" 34 #include "SmallObjectCache.h" 35 36 37 typedef DoublyLinkedList<ObjectCache> ObjectCacheList; 38 39 typedef DoublyLinkedList<ObjectCache, 40 DoublyLinkedListMemberGetLink<ObjectCache, &ObjectCache::maintenance_link> > 41 MaintenanceQueue; 42 43 static ObjectCacheList sObjectCaches; 44 static mutex sObjectCacheListLock = MUTEX_INITIALIZER("object cache list"); 45 46 static mutex sMaintenanceLock 47 = MUTEX_INITIALIZER("object cache resize requests"); 48 static MaintenanceQueue sMaintenanceQueue; 49 static ConditionVariable sMaintenanceCondition; 50 51 52 #if SLAB_OBJECT_CACHE_TRACING 53 54 55 namespace SlabObjectCacheTracing { 56 57 class ObjectCacheTraceEntry : public AbstractTraceEntry { 58 public: 59 ObjectCacheTraceEntry(ObjectCache* cache) 60 : 61 fCache(cache) 62 { 63 } 64 65 protected: 66 ObjectCache* fCache; 67 }; 68 69 70 class Create : public ObjectCacheTraceEntry { 71 public: 72 Create(const char* name, size_t objectSize, size_t alignment, 73 size_t maxByteUsage, uint32 flags, void* cookie, 74 ObjectCache* cache) 75 : 76 ObjectCacheTraceEntry(cache), 77 fObjectSize(objectSize), 78 fAlignment(alignment), 79 fMaxByteUsage(maxByteUsage), 80 fFlags(flags), 81 fCookie(cookie) 82 { 83 fName = alloc_tracing_buffer_strcpy(name, 64, false); 84 Initialized(); 85 } 86 87 virtual void AddDump(TraceOutput& out) 88 { 89 out.Print("object cache create: name: \"%s\", object size: %lu, " 90 "alignment: %lu, max usage: %lu, flags: 0x%lx, cookie: %p " 91 "-> cache: %p", fName, fObjectSize, fAlignment, fMaxByteUsage, 92 fFlags, fCookie, fCache); 93 } 94 95 private: 96 const char* fName; 97 size_t fObjectSize; 98 size_t fAlignment; 99 size_t fMaxByteUsage; 100 uint32 fFlags; 101 void* fCookie; 102 }; 103 104 105 class Delete : public ObjectCacheTraceEntry { 106 public: 107 Delete(ObjectCache* cache) 108 : 109 ObjectCacheTraceEntry(cache) 110 { 111 Initialized(); 112 } 113 114 virtual void AddDump(TraceOutput& out) 115 { 116 out.Print("object cache delete: %p", fCache); 117 } 118 }; 119 120 121 class Alloc : public ObjectCacheTraceEntry { 122 public: 123 Alloc(ObjectCache* cache, uint32 flags, void* object) 124 : 125 ObjectCacheTraceEntry(cache), 126 fFlags(flags), 127 fObject(object) 128 { 129 Initialized(); 130 } 131 132 virtual void AddDump(TraceOutput& out) 133 { 134 out.Print("object cache alloc: cache: %p, flags: 0x%lx -> " 135 "object: %p", fCache, fFlags, fObject); 136 } 137 138 private: 139 uint32 fFlags; 140 void* fObject; 141 }; 142 143 144 class Free : public ObjectCacheTraceEntry { 145 public: 146 Free(ObjectCache* cache, void* object) 147 : 148 ObjectCacheTraceEntry(cache), 149 fObject(object) 150 { 151 Initialized(); 152 } 153 154 virtual void AddDump(TraceOutput& out) 155 { 156 out.Print("object cache free: cache: %p, object: %p", fCache, 157 fObject); 158 } 159 160 private: 161 void* fObject; 162 }; 163 164 165 class Reserve : public ObjectCacheTraceEntry { 166 public: 167 Reserve(ObjectCache* cache, size_t count, uint32 flags) 168 : 169 ObjectCacheTraceEntry(cache), 170 fCount(count), 171 fFlags(flags) 172 { 173 Initialized(); 174 } 175 176 virtual void AddDump(TraceOutput& out) 177 { 178 out.Print("object cache reserve: cache: %p, count: %lu, " 179 "flags: 0x%lx", fCache, fCount, fFlags); 180 } 181 182 private: 183 uint32 fCount; 184 uint32 fFlags; 185 }; 186 187 188 } // namespace SlabObjectCacheTracing 189 190 # define T(x) new(std::nothrow) SlabObjectCacheTracing::x 191 192 #else 193 # define T(x) 194 #endif // SLAB_OBJECT_CACHE_TRACING 195 196 197 // #pragma mark - 198 199 200 static void 201 dump_slab(::slab* slab) 202 { 203 kprintf(" %p %p %6" B_PRIuSIZE " %6" B_PRIuSIZE " %6" B_PRIuSIZE " %p\n", 204 slab, slab->pages, slab->size, slab->count, slab->offset, slab->free); 205 } 206 207 208 static int 209 dump_slabs(int argc, char* argv[]) 210 { 211 kprintf("%10s %22s %8s %8s %6s %8s %8s %8s\n", "address", "name", 212 "objsize", "usage", "empty", "usedobj", "total", "flags"); 213 214 ObjectCacheList::Iterator it = sObjectCaches.GetIterator(); 215 216 while (it.HasNext()) { 217 ObjectCache* cache = it.Next(); 218 219 kprintf("%p %22s %8lu %8lu %6lu %8lu %8lu %8lx\n", cache, cache->name, 220 cache->object_size, cache->usage, cache->empty_count, 221 cache->used_count, cache->total_objects, cache->flags); 222 } 223 224 return 0; 225 } 226 227 228 static int 229 dump_cache_info(int argc, char* argv[]) 230 { 231 if (argc < 2) { 232 kprintf("usage: cache_info [address]\n"); 233 return 0; 234 } 235 236 ObjectCache* cache = (ObjectCache*)parse_expression(argv[1]); 237 238 kprintf("name: %s\n", cache->name); 239 kprintf("lock: %p\n", &cache->lock); 240 kprintf("object_size: %lu\n", cache->object_size); 241 kprintf("cache_color_cycle: %lu\n", cache->cache_color_cycle); 242 kprintf("total_objects: %lu\n", cache->total_objects); 243 kprintf("used_count: %lu\n", cache->used_count); 244 kprintf("empty_count: %lu\n", cache->empty_count); 245 kprintf("pressure: %lu\n", cache->pressure); 246 kprintf("slab_size: %lu\n", cache->slab_size); 247 kprintf("usage: %lu\n", cache->usage); 248 kprintf("maximum: %lu\n", cache->maximum); 249 kprintf("flags: 0x%lx\n", cache->flags); 250 kprintf("cookie: %p\n", cache->cookie); 251 kprintf("resize entry don't wait: %p\n", cache->resize_entry_dont_wait); 252 kprintf("resize entry can wait: %p\n", cache->resize_entry_can_wait); 253 254 kprintf(" slab chunk size used offset free\n"); 255 256 SlabList::Iterator iterator = cache->empty.GetIterator(); 257 if (iterator.HasNext()) 258 kprintf("empty:\n"); 259 while (::slab* slab = iterator.Next()) 260 dump_slab(slab); 261 262 iterator = cache->partial.GetIterator(); 263 if (iterator.HasNext()) 264 kprintf("partial:\n"); 265 while (::slab* slab = iterator.Next()) 266 dump_slab(slab); 267 268 iterator = cache->full.GetIterator(); 269 if (iterator.HasNext()) 270 kprintf("full:\n"); 271 while (::slab* slab = iterator.Next()) 272 dump_slab(slab); 273 274 if ((cache->flags & CACHE_NO_DEPOT) == 0) { 275 kprintf("depot:\n"); 276 dump_object_depot(&cache->depot); 277 } 278 279 return 0; 280 } 281 282 283 // #pragma mark - 284 285 286 void 287 request_memory_manager_maintenance() 288 { 289 MutexLocker locker(sMaintenanceLock); 290 sMaintenanceCondition.NotifyAll(); 291 } 292 293 294 // #pragma mark - 295 296 297 static void 298 delete_object_cache_internal(object_cache* cache) 299 { 300 if (!(cache->flags & CACHE_NO_DEPOT)) 301 object_depot_destroy(&cache->depot, 0); 302 303 mutex_lock(&cache->lock); 304 305 if (!cache->full.IsEmpty()) 306 panic("cache destroy: still has full slabs"); 307 308 if (!cache->partial.IsEmpty()) 309 panic("cache destroy: still has partial slabs"); 310 311 while (!cache->empty.IsEmpty()) 312 cache->ReturnSlab(cache->empty.RemoveHead(), 0); 313 314 mutex_destroy(&cache->lock); 315 cache->Delete(); 316 } 317 318 319 static void 320 increase_object_reserve(ObjectCache* cache) 321 { 322 MutexLocker locker(sMaintenanceLock); 323 324 cache->maintenance_resize = true; 325 326 if (!cache->maintenance_pending) { 327 cache->maintenance_pending = true; 328 sMaintenanceQueue.Add(cache); 329 sMaintenanceCondition.NotifyAll(); 330 } 331 } 332 333 334 /*! Makes sure that \a objectCount objects can be allocated. 335 */ 336 static status_t 337 object_cache_reserve_internal(ObjectCache* cache, size_t objectCount, 338 uint32 flags) 339 { 340 // If someone else is already adding slabs, we wait for that to be finished 341 // first. 342 thread_id thread = find_thread(NULL); 343 while (true) { 344 if (objectCount <= cache->total_objects - cache->used_count) 345 return B_OK; 346 347 ObjectCacheResizeEntry* resizeEntry = NULL; 348 if (cache->resize_entry_dont_wait != NULL) { 349 resizeEntry = cache->resize_entry_dont_wait; 350 if (thread == resizeEntry->thread) 351 return B_WOULD_BLOCK; 352 // Note: We could still have reentered the function, i.e. 353 // resize_entry_can_wait would be ours. That doesn't matter much, 354 // though, since after the don't-wait thread has done its job 355 // everyone will be happy. 356 } else if (cache->resize_entry_can_wait != NULL) { 357 resizeEntry = cache->resize_entry_can_wait; 358 if (thread == resizeEntry->thread) 359 return B_WOULD_BLOCK; 360 361 if ((flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0) 362 break; 363 } else 364 break; 365 366 ConditionVariableEntry entry; 367 resizeEntry->condition.Add(&entry); 368 369 cache->Unlock(); 370 entry.Wait(); 371 cache->Lock(); 372 } 373 374 // prepare the resize entry others can wait on 375 ObjectCacheResizeEntry*& resizeEntry 376 = (flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0 377 ? cache->resize_entry_dont_wait : cache->resize_entry_can_wait; 378 379 ObjectCacheResizeEntry myResizeEntry; 380 resizeEntry = &myResizeEntry; 381 resizeEntry->condition.Init(cache, "wait for slabs"); 382 resizeEntry->thread = thread; 383 384 // add new slabs until there are as many free ones as requested 385 while (objectCount > cache->total_objects - cache->used_count) { 386 slab* newSlab = cache->CreateSlab(flags); 387 if (newSlab == NULL) { 388 resizeEntry->condition.NotifyAll(); 389 resizeEntry = NULL; 390 return B_NO_MEMORY; 391 } 392 393 cache->usage += cache->slab_size; 394 cache->total_objects += newSlab->size; 395 396 cache->empty.Add(newSlab); 397 cache->empty_count++; 398 } 399 400 resizeEntry->condition.NotifyAll(); 401 resizeEntry = NULL; 402 403 return B_OK; 404 } 405 406 407 static void 408 object_cache_low_memory(void* dummy, uint32 resources, int32 level) 409 { 410 if (level == B_NO_LOW_RESOURCE) 411 return; 412 413 MutexLocker cacheListLocker(sObjectCacheListLock); 414 415 // Append the first cache to the end of the queue. We assume that it is 416 // one of the caches that will never be deleted and thus we use it as a 417 // marker. 418 ObjectCache* firstCache = sObjectCaches.RemoveHead(); 419 sObjectCaches.Add(firstCache); 420 cacheListLocker.Unlock(); 421 422 ObjectCache* cache; 423 do { 424 cacheListLocker.Lock(); 425 426 cache = sObjectCaches.RemoveHead(); 427 sObjectCaches.Add(cache); 428 429 MutexLocker maintenanceLocker(sMaintenanceLock); 430 if (cache->maintenance_pending || cache->maintenance_in_progress) { 431 // We don't want to mess with caches in maintenance. 432 continue; 433 } 434 435 cache->maintenance_pending = true; 436 cache->maintenance_in_progress = true; 437 438 maintenanceLocker.Unlock(); 439 cacheListLocker.Unlock(); 440 441 // We are calling the reclaimer without the object cache lock 442 // to give the owner a chance to return objects to the slabs. 443 444 if (cache->reclaimer) 445 cache->reclaimer(cache->cookie, level); 446 447 if ((cache->flags & CACHE_NO_DEPOT) == 0) 448 object_depot_make_empty(&cache->depot, 0); 449 450 MutexLocker cacheLocker(cache->lock); 451 size_t minimumAllowed; 452 453 switch (level) { 454 case B_LOW_RESOURCE_NOTE: 455 minimumAllowed = cache->pressure / 2 + 1; 456 break; 457 458 case B_LOW_RESOURCE_WARNING: 459 cache->pressure /= 2; 460 minimumAllowed = 0; 461 break; 462 463 default: 464 cache->pressure = 0; 465 minimumAllowed = 0; 466 break; 467 } 468 469 while (cache->empty_count > minimumAllowed) { 470 // make sure we respect the cache's minimum object reserve 471 size_t objectsPerSlab = cache->empty.Head()->size; 472 size_t freeObjects = cache->total_objects - cache->used_count; 473 if (freeObjects < cache->min_object_reserve + objectsPerSlab) 474 break; 475 476 cache->ReturnSlab(cache->empty.RemoveHead(), 0); 477 cache->empty_count--; 478 } 479 480 cacheLocker.Unlock(); 481 482 // Check whether in the meantime someone has really requested 483 // maintenance for the cache. 484 maintenanceLocker.Lock(); 485 486 if (cache->maintenance_delete) { 487 delete_object_cache_internal(cache); 488 continue; 489 } 490 491 cache->maintenance_in_progress = false; 492 493 if (cache->maintenance_resize) 494 sMaintenanceQueue.Add(cache); 495 else 496 cache->maintenance_pending = false; 497 } while (cache != firstCache); 498 } 499 500 501 static status_t 502 object_cache_maintainer(void*) 503 { 504 while (true) { 505 MutexLocker locker(sMaintenanceLock); 506 507 // wait for the next request 508 while (sMaintenanceQueue.IsEmpty()) { 509 // perform memory manager maintenance, if needed 510 if (MemoryManager::MaintenanceNeeded()) { 511 locker.Unlock(); 512 MemoryManager::PerformMaintenance(); 513 locker.Lock(); 514 continue; 515 } 516 517 ConditionVariableEntry entry; 518 sMaintenanceCondition.Add(&entry); 519 locker.Unlock(); 520 entry.Wait(); 521 locker.Lock(); 522 } 523 524 ObjectCache* cache = sMaintenanceQueue.RemoveHead(); 525 526 while (true) { 527 bool resizeRequested = cache->maintenance_resize; 528 bool deleteRequested = cache->maintenance_delete; 529 530 if (!resizeRequested && !deleteRequested) { 531 cache->maintenance_pending = false; 532 cache->maintenance_in_progress = false; 533 break; 534 } 535 536 cache->maintenance_resize = false; 537 cache->maintenance_in_progress = true; 538 539 locker.Unlock(); 540 541 if (deleteRequested) { 542 delete_object_cache_internal(cache); 543 break; 544 } 545 546 // resize the cache, if necessary 547 548 MutexLocker cacheLocker(cache->lock); 549 550 if (resizeRequested) { 551 status_t error = object_cache_reserve_internal(cache, 552 cache->min_object_reserve, 0); 553 if (error != B_OK) { 554 dprintf("object cache resizer: Failed to resize object " 555 "cache %p!\n", cache); 556 break; 557 } 558 } 559 560 locker.Lock(); 561 } 562 } 563 564 // never can get here 565 return B_OK; 566 } 567 568 569 // #pragma mark - public API 570 571 572 object_cache* 573 create_object_cache(const char* name, size_t object_size, size_t alignment, 574 void* cookie, object_cache_constructor constructor, 575 object_cache_destructor destructor) 576 { 577 return create_object_cache_etc(name, object_size, alignment, 0, 0, 0, 0, 578 cookie, constructor, destructor, NULL); 579 } 580 581 582 object_cache* 583 create_object_cache_etc(const char* name, size_t objectSize, size_t alignment, 584 size_t maximum, size_t magazineCapacity, size_t maxMagazineCount, 585 uint32 flags, void* cookie, object_cache_constructor constructor, 586 object_cache_destructor destructor, object_cache_reclaimer reclaimer) 587 { 588 ObjectCache* cache; 589 590 if (objectSize == 0) { 591 cache = NULL; 592 } else if (objectSize <= 256) { 593 cache = SmallObjectCache::Create(name, objectSize, alignment, maximum, 594 magazineCapacity, maxMagazineCount, flags, cookie, constructor, 595 destructor, reclaimer); 596 } else { 597 cache = HashedObjectCache::Create(name, objectSize, alignment, maximum, 598 magazineCapacity, maxMagazineCount, flags, cookie, constructor, 599 destructor, reclaimer); 600 } 601 602 if (cache != NULL) { 603 MutexLocker _(sObjectCacheListLock); 604 sObjectCaches.Add(cache); 605 } 606 607 T(Create(name, objectSize, alignment, maximum, flags, cookie, cache)); 608 return cache; 609 } 610 611 612 void 613 delete_object_cache(object_cache* cache) 614 { 615 T(Delete(cache)); 616 617 { 618 MutexLocker _(sObjectCacheListLock); 619 sObjectCaches.Remove(cache); 620 } 621 622 MutexLocker cacheLocker(cache->lock); 623 624 { 625 MutexLocker maintenanceLocker(sMaintenanceLock); 626 if (cache->maintenance_in_progress) { 627 // The maintainer thread is working with the cache. Just mark it 628 // to be deleted. 629 cache->maintenance_delete = true; 630 return; 631 } 632 633 // unschedule maintenance 634 if (cache->maintenance_pending) 635 sMaintenanceQueue.Remove(cache); 636 } 637 638 // at this point no-one should have a reference to the cache anymore 639 cacheLocker.Unlock(); 640 641 delete_object_cache_internal(cache); 642 } 643 644 645 status_t 646 object_cache_set_minimum_reserve(object_cache* cache, size_t objectCount) 647 { 648 MutexLocker _(cache->lock); 649 650 if (cache->min_object_reserve == objectCount) 651 return B_OK; 652 653 cache->min_object_reserve = objectCount; 654 655 increase_object_reserve(cache); 656 657 return B_OK; 658 } 659 660 661 void* 662 object_cache_alloc(object_cache* cache, uint32 flags) 663 { 664 if (!(cache->flags & CACHE_NO_DEPOT)) { 665 void* object = object_depot_obtain(&cache->depot); 666 if (object) { 667 T(Alloc(cache, flags, object)); 668 return object; 669 } 670 } 671 672 MutexLocker _(cache->lock); 673 slab* source = NULL; 674 675 while (true) { 676 source = cache->partial.Head(); 677 if (source != NULL) 678 break; 679 680 source = cache->empty.RemoveHead(); 681 if (source != NULL) { 682 cache->empty_count--; 683 cache->partial.Add(source); 684 break; 685 } 686 687 if (object_cache_reserve_internal(cache, 1, flags) != B_OK) { 688 T(Alloc(cache, flags, NULL)); 689 return NULL; 690 } 691 692 cache->pressure++; 693 } 694 695 ParanoiaChecker _2(source); 696 697 object_link* link = _pop(source->free); 698 source->count--; 699 cache->used_count++; 700 701 if (cache->total_objects - cache->used_count < cache->min_object_reserve) 702 increase_object_reserve(cache); 703 704 REMOVE_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, source, &link->next, 705 sizeof(void*)); 706 707 TRACE_CACHE(cache, "allocate %p (%p) from %p, %lu remaining.", 708 link_to_object(link, cache->object_size), link, source, source->count); 709 710 if (source->count == 0) { 711 cache->partial.Remove(source); 712 cache->full.Add(source); 713 } 714 715 void* object = link_to_object(link, cache->object_size); 716 T(Alloc(cache, flags, object)); 717 return object; 718 } 719 720 721 void 722 object_cache_free(object_cache* cache, void* object, uint32 flags) 723 { 724 if (object == NULL) 725 return; 726 727 T(Free(cache, object)); 728 729 if (!(cache->flags & CACHE_NO_DEPOT)) { 730 object_depot_store(&cache->depot, object, flags); 731 return; 732 } 733 734 MutexLocker _(cache->lock); 735 cache->ReturnObjectToSlab(cache->ObjectSlab(object), object, flags); 736 } 737 738 739 status_t 740 object_cache_reserve(object_cache* cache, size_t objectCount, uint32 flags) 741 { 742 if (objectCount == 0) 743 return B_OK; 744 745 T(Reserve(cache, objectCount, flags)); 746 747 MutexLocker _(cache->lock); 748 return object_cache_reserve_internal(cache, objectCount, flags); 749 } 750 751 752 void 753 object_cache_get_usage(object_cache* cache, size_t* _allocatedMemory) 754 { 755 MutexLocker _(cache->lock); 756 *_allocatedMemory = cache->usage; 757 } 758 759 760 void 761 slab_init(kernel_args* args) 762 { 763 MemoryManager::Init(args); 764 765 new (&sObjectCaches) ObjectCacheList(); 766 767 block_allocator_init_boot(); 768 } 769 770 771 void 772 slab_init_post_area() 773 { 774 MemoryManager::InitPostArea(); 775 776 add_debugger_command("slabs", dump_slabs, "list all object caches"); 777 add_debugger_command("slab_cache", dump_cache_info, 778 "dump information about a specific object cache"); 779 add_debugger_command("slab_depot", dump_object_depot, 780 "dump contents of an object depot"); 781 add_debugger_command("slab_magazine", dump_depot_magazine, 782 "dump contents of a depot magazine"); 783 } 784 785 786 void 787 slab_init_post_sem() 788 { 789 register_low_resource_handler(object_cache_low_memory, NULL, 790 B_KERNEL_RESOURCE_PAGES | B_KERNEL_RESOURCE_MEMORY 791 | B_KERNEL_RESOURCE_ADDRESS_SPACE, 5); 792 793 block_allocator_init_rest(); 794 } 795 796 797 void 798 slab_init_post_thread() 799 { 800 new(&sMaintenanceQueue) MaintenanceQueue; 801 sMaintenanceCondition.Init(&sMaintenanceQueue, "object cache maintainer"); 802 803 thread_id objectCacheResizer = spawn_kernel_thread(object_cache_maintainer, 804 "object cache resizer", B_URGENT_PRIORITY, NULL); 805 if (objectCacheResizer < 0) { 806 panic("slab_init_post_thread(): failed to spawn object cache resizer " 807 "thread\n"); 808 return; 809 } 810 811 resume_thread(objectCacheResizer); 812 } 813