1 /* 2 * Copyright 2010, Ingo Weinhold <ingo_weinhold@gmx.de>. 3 * Copyright 2008, Axel Dörfler. All Rights Reserved. 4 * Copyright 2007, Hugo Santos. All Rights Reserved. 5 * 6 * Distributed under the terms of the MIT License. 7 */ 8 9 10 #include <slab/Slab.h> 11 12 #include <algorithm> 13 #include <new> 14 #include <signal.h> 15 #include <stdlib.h> 16 #include <string.h> 17 18 #include <KernelExport.h> 19 20 #include <condition_variable.h> 21 #include <kernel.h> 22 #include <low_resource_manager.h> 23 #include <slab/ObjectDepot.h> 24 #include <smp.h> 25 #include <tracing.h> 26 #include <util/AutoLock.h> 27 #include <util/DoublyLinkedList.h> 28 #include <util/khash.h> 29 #include <vm/vm.h> 30 #include <vm/VMAddressSpace.h> 31 32 #include "HashedObjectCache.h" 33 #include "MemoryManager.h" 34 #include "slab_private.h" 35 #include "SmallObjectCache.h" 36 37 38 typedef DoublyLinkedList<ObjectCache> ObjectCacheList; 39 40 typedef DoublyLinkedList<ObjectCache, 41 DoublyLinkedListMemberGetLink<ObjectCache, &ObjectCache::maintenance_link> > 42 MaintenanceQueue; 43 44 static ObjectCacheList sObjectCaches; 45 static mutex sObjectCacheListLock = MUTEX_INITIALIZER("object cache list"); 46 47 static mutex sMaintenanceLock 48 = MUTEX_INITIALIZER("object cache resize requests"); 49 static MaintenanceQueue sMaintenanceQueue; 50 static ConditionVariable sMaintenanceCondition; 51 52 53 #if SLAB_OBJECT_CACHE_TRACING 54 55 56 namespace SlabObjectCacheTracing { 57 58 class ObjectCacheTraceEntry : public AbstractTraceEntry { 59 public: 60 ObjectCacheTraceEntry(ObjectCache* cache) 61 : 62 fCache(cache) 63 { 64 } 65 66 protected: 67 ObjectCache* fCache; 68 }; 69 70 71 class Create : public ObjectCacheTraceEntry { 72 public: 73 Create(const char* name, size_t objectSize, size_t alignment, 74 size_t maxByteUsage, uint32 flags, void* cookie, 75 ObjectCache* cache) 76 : 77 ObjectCacheTraceEntry(cache), 78 fObjectSize(objectSize), 79 fAlignment(alignment), 80 fMaxByteUsage(maxByteUsage), 81 fFlags(flags), 82 fCookie(cookie) 83 { 84 fName = alloc_tracing_buffer_strcpy(name, 64, false); 85 Initialized(); 86 } 87 88 virtual void AddDump(TraceOutput& out) 89 { 90 out.Print("object cache create: name: \"%s\", object size: %lu, " 91 "alignment: %lu, max usage: %lu, flags: 0x%lx, cookie: %p " 92 "-> cache: %p", fName, fObjectSize, fAlignment, fMaxByteUsage, 93 fFlags, fCookie, fCache); 94 } 95 96 private: 97 const char* fName; 98 size_t fObjectSize; 99 size_t fAlignment; 100 size_t fMaxByteUsage; 101 uint32 fFlags; 102 void* fCookie; 103 }; 104 105 106 class Delete : public ObjectCacheTraceEntry { 107 public: 108 Delete(ObjectCache* cache) 109 : 110 ObjectCacheTraceEntry(cache) 111 { 112 Initialized(); 113 } 114 115 virtual void AddDump(TraceOutput& out) 116 { 117 out.Print("object cache delete: %p", fCache); 118 } 119 }; 120 121 122 class Alloc : public ObjectCacheTraceEntry { 123 public: 124 Alloc(ObjectCache* cache, uint32 flags, void* object) 125 : 126 ObjectCacheTraceEntry(cache), 127 fFlags(flags), 128 fObject(object) 129 { 130 Initialized(); 131 } 132 133 virtual void AddDump(TraceOutput& out) 134 { 135 out.Print("object cache alloc: cache: %p, flags: 0x%lx -> " 136 "object: %p", fCache, fFlags, fObject); 137 } 138 139 private: 140 uint32 fFlags; 141 void* fObject; 142 }; 143 144 145 class Free : public ObjectCacheTraceEntry { 146 public: 147 Free(ObjectCache* cache, void* object) 148 : 149 ObjectCacheTraceEntry(cache), 150 fObject(object) 151 { 152 Initialized(); 153 } 154 155 virtual void AddDump(TraceOutput& out) 156 { 157 out.Print("object cache free: cache: %p, object: %p", fCache, 158 fObject); 159 } 160 161 private: 162 void* fObject; 163 }; 164 165 166 class Reserve : public ObjectCacheTraceEntry { 167 public: 168 Reserve(ObjectCache* cache, size_t count, uint32 flags) 169 : 170 ObjectCacheTraceEntry(cache), 171 fCount(count), 172 fFlags(flags) 173 { 174 Initialized(); 175 } 176 177 virtual void AddDump(TraceOutput& out) 178 { 179 out.Print("object cache reserve: cache: %p, count: %lu, " 180 "flags: 0x%lx", fCache, fCount, fFlags); 181 } 182 183 private: 184 uint32 fCount; 185 uint32 fFlags; 186 }; 187 188 189 } // namespace SlabObjectCacheTracing 190 191 # define T(x) new(std::nothrow) SlabObjectCacheTracing::x 192 193 #else 194 # define T(x) 195 #endif // SLAB_OBJECT_CACHE_TRACING 196 197 198 // #pragma mark - 199 200 201 static int 202 dump_slabs(int argc, char* argv[]) 203 { 204 kprintf("%10s %22s %8s %8s %6s %8s %8s %8s\n", "address", "name", 205 "objsize", "usage", "empty", "usedobj", "total", "flags"); 206 207 ObjectCacheList::Iterator it = sObjectCaches.GetIterator(); 208 209 while (it.HasNext()) { 210 ObjectCache* cache = it.Next(); 211 212 kprintf("%p %22s %8lu %8lu %6lu %8lu %8lu %8lx\n", cache, cache->name, 213 cache->object_size, cache->usage, cache->empty_count, 214 cache->used_count, cache->total_objects, cache->flags); 215 } 216 217 return 0; 218 } 219 220 221 static int 222 dump_cache_info(int argc, char* argv[]) 223 { 224 if (argc < 2) { 225 kprintf("usage: cache_info [address]\n"); 226 return 0; 227 } 228 229 ObjectCache* cache = (ObjectCache*)strtoul(argv[1], NULL, 16); 230 231 kprintf("name: %s\n", cache->name); 232 kprintf("lock: %p\n", &cache->lock); 233 kprintf("object_size: %lu\n", cache->object_size); 234 kprintf("cache_color_cycle: %lu\n", cache->cache_color_cycle); 235 kprintf("used_count: %lu\n", cache->used_count); 236 kprintf("empty_count: %lu\n", cache->empty_count); 237 kprintf("pressure: %lu\n", cache->pressure); 238 kprintf("slab_size: %lu\n", cache->slab_size); 239 kprintf("usage: %lu\n", cache->usage); 240 kprintf("maximum: %lu\n", cache->maximum); 241 kprintf("flags: 0x%lx\n", cache->flags); 242 kprintf("cookie: %p\n", cache->cookie); 243 kprintf("resize entry don't wait: %p\n", cache->resize_entry_dont_wait); 244 kprintf("resize entry can wait: %p\n", cache->resize_entry_can_wait); 245 246 return 0; 247 } 248 249 250 // #pragma mark - 251 252 253 void 254 request_memory_manager_maintenance() 255 { 256 MutexLocker locker(sMaintenanceLock); 257 sMaintenanceCondition.NotifyAll(); 258 } 259 260 261 // #pragma mark - 262 263 264 static void 265 delete_object_cache_internal(object_cache* cache) 266 { 267 if (!(cache->flags & CACHE_NO_DEPOT)) 268 object_depot_destroy(&cache->depot, 0); 269 270 mutex_lock(&cache->lock); 271 272 if (!cache->full.IsEmpty()) 273 panic("cache destroy: still has full slabs"); 274 275 if (!cache->partial.IsEmpty()) 276 panic("cache destroy: still has partial slabs"); 277 278 while (!cache->empty.IsEmpty()) 279 cache->ReturnSlab(cache->empty.RemoveHead(), 0); 280 281 mutex_destroy(&cache->lock); 282 cache->Delete(); 283 } 284 285 286 static void 287 increase_object_reserve(ObjectCache* cache) 288 { 289 MutexLocker locker(sMaintenanceLock); 290 291 cache->maintenance_resize = true; 292 293 if (!cache->maintenance_pending) { 294 cache->maintenance_pending = true; 295 sMaintenanceQueue.Add(cache); 296 sMaintenanceCondition.NotifyAll(); 297 } 298 } 299 300 301 /*! Makes sure that \a objectCount objects can be allocated. 302 */ 303 static status_t 304 object_cache_reserve_internal(ObjectCache* cache, size_t objectCount, 305 uint32 flags) 306 { 307 // If someone else is already adding slabs, we wait for that to be finished 308 // first. 309 thread_id thread = find_thread(NULL); 310 while (true) { 311 if (objectCount <= cache->total_objects - cache->used_count) 312 return B_OK; 313 314 ObjectCacheResizeEntry* resizeEntry = NULL; 315 if (cache->resize_entry_dont_wait != NULL) { 316 resizeEntry = cache->resize_entry_dont_wait; 317 if (thread == resizeEntry->thread) 318 return B_WOULD_BLOCK; 319 // Note: We could still have reentered the function, i.e. 320 // resize_entry_can_wait would be ours. That doesn't matter much, 321 // though, since after the don't-wait thread has done its job 322 // everyone will be happy. 323 } else if (cache->resize_entry_can_wait != NULL) { 324 resizeEntry = cache->resize_entry_can_wait; 325 if (thread == resizeEntry->thread) 326 return B_WOULD_BLOCK; 327 328 if ((flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0) 329 break; 330 } else 331 break; 332 333 ConditionVariableEntry entry; 334 resizeEntry->condition.Add(&entry); 335 336 cache->Unlock(); 337 entry.Wait(); 338 cache->Lock(); 339 } 340 341 // prepare the resize entry others can wait on 342 ObjectCacheResizeEntry*& resizeEntry 343 = (flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0 344 ? cache->resize_entry_dont_wait : cache->resize_entry_can_wait; 345 346 ObjectCacheResizeEntry myResizeEntry; 347 resizeEntry = &myResizeEntry; 348 resizeEntry->condition.Init(cache, "wait for slabs"); 349 resizeEntry->thread = thread; 350 351 // add new slabs until there are as many free ones as requested 352 while (objectCount > cache->total_objects - cache->used_count) { 353 slab* newSlab = cache->CreateSlab(flags); 354 if (newSlab == NULL) { 355 resizeEntry->condition.NotifyAll(); 356 resizeEntry = NULL; 357 return B_NO_MEMORY; 358 } 359 360 cache->empty.Add(newSlab); 361 cache->empty_count++; 362 } 363 364 resizeEntry->condition.NotifyAll(); 365 resizeEntry = NULL; 366 367 return B_OK; 368 } 369 370 371 static void 372 object_cache_low_memory(void* dummy, uint32 resources, int32 level) 373 { 374 if (level == B_NO_LOW_RESOURCE) 375 return; 376 377 MutexLocker cacheListLocker(sObjectCacheListLock); 378 379 // Append the first cache to the end of the queue. We assume that it is 380 // one of the caches that will never be deleted and thus we use it as a 381 // marker. 382 ObjectCache* firstCache = sObjectCaches.RemoveHead(); 383 sObjectCaches.Add(firstCache); 384 cacheListLocker.Unlock(); 385 386 ObjectCache* cache; 387 do { 388 cacheListLocker.Lock(); 389 390 cache = sObjectCaches.RemoveHead(); 391 sObjectCaches.Add(cache); 392 393 MutexLocker maintenanceLocker(sMaintenanceLock); 394 if (cache->maintenance_pending || cache->maintenance_in_progress) { 395 // We don't want to mess with caches in maintenance. 396 continue; 397 } 398 399 cache->maintenance_pending = true; 400 cache->maintenance_in_progress = true; 401 402 maintenanceLocker.Unlock(); 403 cacheListLocker.Unlock(); 404 405 // We are calling the reclaimer without the object cache lock 406 // to give the owner a chance to return objects to the slabs. 407 408 if (cache->reclaimer) 409 cache->reclaimer(cache->cookie, level); 410 411 MutexLocker cacheLocker(cache->lock); 412 size_t minimumAllowed; 413 414 switch (level) { 415 case B_LOW_RESOURCE_NOTE: 416 minimumAllowed = cache->pressure / 2 + 1; 417 break; 418 419 case B_LOW_RESOURCE_WARNING: 420 cache->pressure /= 2; 421 minimumAllowed = 0; 422 break; 423 424 default: 425 cache->pressure = 0; 426 minimumAllowed = 0; 427 break; 428 } 429 430 while (cache->empty_count > minimumAllowed) { 431 // make sure we respect the cache's minimum object reserve 432 size_t objectsPerSlab = cache->empty.Head()->size; 433 size_t freeObjects = cache->total_objects - cache->used_count; 434 if (freeObjects < cache->min_object_reserve + objectsPerSlab) 435 break; 436 437 cache->ReturnSlab(cache->empty.RemoveHead(), 0); 438 cache->empty_count--; 439 } 440 441 cacheLocker.Unlock(); 442 443 // Check whether in the meantime someone has really requested 444 // maintenance for the cache. 445 maintenanceLocker.Lock(); 446 447 if (cache->maintenance_delete) { 448 delete_object_cache_internal(cache); 449 continue; 450 } 451 452 cache->maintenance_in_progress = false; 453 454 if (cache->maintenance_resize) 455 sMaintenanceQueue.Add(cache); 456 else 457 cache->maintenance_pending = false; 458 } while (cache != firstCache); 459 } 460 461 462 static status_t 463 object_cache_maintainer(void*) 464 { 465 while (true) { 466 MutexLocker locker(sMaintenanceLock); 467 468 // wait for the next request 469 while (sMaintenanceQueue.IsEmpty()) { 470 // perform memory manager maintenance, if needed 471 if (MemoryManager::MaintenanceNeeded()) { 472 locker.Unlock(); 473 MemoryManager::PerformMaintenance(); 474 locker.Lock(); 475 continue; 476 } 477 478 ConditionVariableEntry entry; 479 sMaintenanceCondition.Add(&entry); 480 locker.Unlock(); 481 entry.Wait(); 482 locker.Lock(); 483 } 484 485 ObjectCache* cache = sMaintenanceQueue.RemoveHead(); 486 487 while (true) { 488 bool resizeRequested = cache->maintenance_resize; 489 bool deleteRequested = cache->maintenance_delete; 490 491 if (!resizeRequested && !deleteRequested) { 492 cache->maintenance_pending = false; 493 cache->maintenance_in_progress = false; 494 break; 495 } 496 497 cache->maintenance_resize = false; 498 cache->maintenance_in_progress = true; 499 500 locker.Unlock(); 501 502 if (deleteRequested) { 503 delete_object_cache_internal(cache); 504 break; 505 } 506 507 // resize the cache, if necessary 508 509 MutexLocker cacheLocker(cache->lock); 510 511 if (resizeRequested) { 512 status_t error = object_cache_reserve_internal(cache, 513 cache->min_object_reserve, 0); 514 if (error != B_OK) { 515 dprintf("object cache resizer: Failed to resize object " 516 "cache %p!\n", cache); 517 break; 518 } 519 } 520 521 locker.Lock(); 522 } 523 } 524 525 // never can get here 526 return B_OK; 527 } 528 529 530 // #pragma mark - public API 531 532 533 object_cache* 534 create_object_cache(const char* name, size_t object_size, size_t alignment, 535 void* cookie, object_cache_constructor constructor, 536 object_cache_destructor destructor) 537 { 538 return create_object_cache_etc(name, object_size, alignment, 0, 0, cookie, 539 constructor, destructor, NULL); 540 } 541 542 543 object_cache* 544 create_object_cache_etc(const char* name, size_t objectSize, size_t alignment, 545 size_t maximum, uint32 flags, void* cookie, 546 object_cache_constructor constructor, object_cache_destructor destructor, 547 object_cache_reclaimer reclaimer) 548 { 549 ObjectCache* cache; 550 551 if (objectSize == 0) { 552 cache = NULL; 553 } else if (objectSize <= 256) { 554 cache = SmallObjectCache::Create(name, objectSize, alignment, maximum, 555 flags, cookie, constructor, destructor, reclaimer); 556 } else { 557 cache = HashedObjectCache::Create(name, objectSize, alignment, 558 maximum, flags, cookie, constructor, destructor, reclaimer); 559 } 560 561 if (cache != NULL) { 562 MutexLocker _(sObjectCacheListLock); 563 sObjectCaches.Add(cache); 564 } 565 566 T(Create(name, objectSize, alignment, maximum, flags, cookie, cache)); 567 return cache; 568 } 569 570 571 void 572 delete_object_cache(object_cache* cache) 573 { 574 T(Delete(cache)); 575 576 { 577 MutexLocker _(sObjectCacheListLock); 578 sObjectCaches.Remove(cache); 579 } 580 581 MutexLocker cacheLocker(cache->lock); 582 583 { 584 MutexLocker maintenanceLocker(sMaintenanceLock); 585 if (cache->maintenance_in_progress) { 586 // The maintainer thread is working with the cache. Just mark it 587 // to be deleted. 588 cache->maintenance_delete = true; 589 return; 590 } 591 592 // unschedule maintenance 593 if (cache->maintenance_pending) 594 sMaintenanceQueue.Remove(cache); 595 } 596 597 // at this point no-one should have a reference to the cache anymore 598 cacheLocker.Unlock(); 599 600 delete_object_cache_internal(cache); 601 } 602 603 604 status_t 605 object_cache_set_minimum_reserve(object_cache* cache, size_t objectCount) 606 { 607 MutexLocker _(cache->lock); 608 609 if (cache->min_object_reserve == objectCount) 610 return B_OK; 611 612 cache->min_object_reserve = objectCount; 613 614 increase_object_reserve(cache); 615 616 return B_OK; 617 } 618 619 620 void* 621 object_cache_alloc(object_cache* cache, uint32 flags) 622 { 623 if (!(cache->flags & CACHE_NO_DEPOT)) { 624 void* object = object_depot_obtain(&cache->depot); 625 if (object) { 626 T(Alloc(cache, flags, object)); 627 return object; 628 } 629 } 630 631 MutexLocker _(cache->lock); 632 slab* source = NULL; 633 634 while (true) { 635 source = cache->partial.Head(); 636 if (source != NULL) 637 break; 638 639 source = cache->empty.RemoveHead(); 640 if (source != NULL) { 641 cache->empty_count--; 642 cache->partial.Add(source); 643 break; 644 } 645 646 if (object_cache_reserve_internal(cache, 1, flags) != B_OK) { 647 T(Alloc(cache, flags, NULL)); 648 return NULL; 649 } 650 651 cache->pressure++; 652 } 653 654 ParanoiaChecker _2(source); 655 656 object_link* link = _pop(source->free); 657 source->count--; 658 cache->used_count++; 659 660 if (cache->total_objects - cache->used_count < cache->min_object_reserve) 661 increase_object_reserve(cache); 662 663 REMOVE_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, source, &link->next, 664 sizeof(void*)); 665 666 TRACE_CACHE(cache, "allocate %p (%p) from %p, %lu remaining.", 667 link_to_object(link, cache->object_size), link, source, source->count); 668 669 if (source->count == 0) { 670 cache->partial.Remove(source); 671 cache->full.Add(source); 672 } 673 674 void* object = link_to_object(link, cache->object_size); 675 T(Alloc(cache, flags, object)); 676 return object; 677 } 678 679 680 void 681 object_cache_free(object_cache* cache, void* object, uint32 flags) 682 { 683 if (object == NULL) 684 return; 685 686 T(Free(cache, object)); 687 688 if (!(cache->flags & CACHE_NO_DEPOT)) { 689 if (object_depot_store(&cache->depot, object, flags)) 690 return; 691 } 692 693 MutexLocker _(cache->lock); 694 cache->ReturnObjectToSlab(cache->ObjectSlab(object), object, flags); 695 } 696 697 698 status_t 699 object_cache_reserve(object_cache* cache, size_t objectCount, uint32 flags) 700 { 701 if (objectCount == 0) 702 return B_OK; 703 704 T(Reserve(cache, objectCount, flags)); 705 706 MutexLocker _(cache->lock); 707 return object_cache_reserve_internal(cache, objectCount, flags); 708 } 709 710 711 void 712 object_cache_get_usage(object_cache* cache, size_t* _allocatedMemory) 713 { 714 MutexLocker _(cache->lock); 715 *_allocatedMemory = cache->usage; 716 } 717 718 719 void 720 slab_init(kernel_args* args) 721 { 722 MemoryManager::Init(args); 723 724 new (&sObjectCaches) ObjectCacheList(); 725 726 block_allocator_init_boot(); 727 728 add_debugger_command("slabs", dump_slabs, "list all object caches"); 729 add_debugger_command("slab_cache", dump_cache_info, 730 "dump information about a specific object cache"); 731 } 732 733 734 void 735 slab_init_post_area() 736 { 737 MemoryManager::InitPostArea(); 738 } 739 740 741 void 742 slab_init_post_sem() 743 { 744 register_low_resource_handler(object_cache_low_memory, NULL, 745 B_KERNEL_RESOURCE_PAGES | B_KERNEL_RESOURCE_MEMORY 746 | B_KERNEL_RESOURCE_ADDRESS_SPACE, 5); 747 748 block_allocator_init_rest(); 749 } 750 751 752 void 753 slab_init_post_thread() 754 { 755 new(&sMaintenanceQueue) MaintenanceQueue; 756 sMaintenanceCondition.Init(&sMaintenanceQueue, "object cache maintainer"); 757 758 thread_id objectCacheResizer = spawn_kernel_thread(object_cache_maintainer, 759 "object cache resizer", B_URGENT_PRIORITY, NULL); 760 if (objectCacheResizer < 0) { 761 panic("slab_init_post_thread(): failed to spawn object cache resizer " 762 "thread\n"); 763 return; 764 } 765 766 send_signal_etc(objectCacheResizer, SIGCONT, B_DO_NOT_RESCHEDULE); 767 } 768