1 /* 2 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de. 3 * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de. 4 * Distributed under the terms of the MIT License. 5 * 6 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved. 7 * Distributed under the terms of the NewOS License. 8 */ 9 10 11 #include <vm/VMCache.h> 12 13 #include <stddef.h> 14 #include <stdlib.h> 15 16 #include <algorithm> 17 18 #include <arch/cpu.h> 19 #include <condition_variable.h> 20 #include <heap.h> 21 #include <int.h> 22 #include <kernel.h> 23 #include <slab/Slab.h> 24 #include <smp.h> 25 #include <tracing.h> 26 #include <util/AutoLock.h> 27 #include <vfs.h> 28 #include <vm/vm.h> 29 #include <vm/vm_page.h> 30 #include <vm/vm_priv.h> 31 #include <vm/vm_types.h> 32 #include <vm/VMAddressSpace.h> 33 #include <vm/VMArea.h> 34 35 // needed for the factory only 36 #include "VMAnonymousCache.h" 37 #include "VMAnonymousNoSwapCache.h" 38 #include "VMDeviceCache.h" 39 #include "VMNullCache.h" 40 #include "../cache/vnode_store.h" 41 42 43 //#define TRACE_VM_CACHE 44 #ifdef TRACE_VM_CACHE 45 # define TRACE(x) dprintf x 46 #else 47 # define TRACE(x) ; 48 #endif 49 50 51 #if DEBUG_CACHE_LIST 52 VMCache* gDebugCacheList; 53 #endif 54 static mutex sCacheListLock = MUTEX_INITIALIZER("global VMCache list"); 55 // The lock is also needed when the debug feature is disabled. 56 57 ObjectCache* gCacheRefObjectCache; 58 #if ENABLE_SWAP_SUPPORT 59 ObjectCache* gAnonymousCacheObjectCache; 60 #endif 61 ObjectCache* gAnonymousNoSwapCacheObjectCache; 62 ObjectCache* gVnodeCacheObjectCache; 63 ObjectCache* gDeviceCacheObjectCache; 64 ObjectCache* gNullCacheObjectCache; 65 66 67 struct VMCache::PageEventWaiter { 68 Thread* thread; 69 PageEventWaiter* next; 70 vm_page* page; 71 uint32 events; 72 }; 73 74 75 #if VM_CACHE_TRACING 76 77 namespace VMCacheTracing { 78 79 class VMCacheTraceEntry : public AbstractTraceEntry { 80 public: 81 VMCacheTraceEntry(VMCache* cache) 82 : 83 fCache(cache) 84 { 85 #if VM_CACHE_TRACING_STACK_TRACE 86 fStackTrace = capture_tracing_stack_trace( 87 VM_CACHE_TRACING_STACK_TRACE, 0, true); 88 // Don't capture userland stack trace to avoid potential 89 // deadlocks. 90 #endif 91 } 92 93 #if VM_CACHE_TRACING_STACK_TRACE 94 virtual void DumpStackTrace(TraceOutput& out) 95 { 96 out.PrintStackTrace(fStackTrace); 97 } 98 #endif 99 100 VMCache* Cache() const 101 { 102 return fCache; 103 } 104 105 protected: 106 VMCache* fCache; 107 #if VM_CACHE_TRACING_STACK_TRACE 108 tracing_stack_trace* fStackTrace; 109 #endif 110 }; 111 112 113 class Create : public VMCacheTraceEntry { 114 public: 115 Create(VMCache* cache) 116 : 117 VMCacheTraceEntry(cache) 118 { 119 Initialized(); 120 } 121 122 virtual void AddDump(TraceOutput& out) 123 { 124 out.Print("vm cache create: -> cache: %p", fCache); 125 } 126 }; 127 128 129 class Delete : public VMCacheTraceEntry { 130 public: 131 Delete(VMCache* cache) 132 : 133 VMCacheTraceEntry(cache) 134 { 135 Initialized(); 136 } 137 138 virtual void AddDump(TraceOutput& out) 139 { 140 out.Print("vm cache delete: cache: %p", fCache); 141 } 142 }; 143 144 145 class SetMinimalCommitment : public VMCacheTraceEntry { 146 public: 147 SetMinimalCommitment(VMCache* cache, off_t commitment) 148 : 149 VMCacheTraceEntry(cache), 150 fOldCommitment(cache->committed_size), 151 fCommitment(commitment) 152 { 153 Initialized(); 154 } 155 156 virtual void AddDump(TraceOutput& out) 157 { 158 out.Print("vm cache set min commitment: cache: %p, " 159 "commitment: %" B_PRIdOFF " -> %" B_PRIdOFF, fCache, 160 fOldCommitment, fCommitment); 161 } 162 163 private: 164 off_t fOldCommitment; 165 off_t fCommitment; 166 }; 167 168 169 class Resize : public VMCacheTraceEntry { 170 public: 171 Resize(VMCache* cache, off_t size) 172 : 173 VMCacheTraceEntry(cache), 174 fOldSize(cache->virtual_end), 175 fSize(size) 176 { 177 Initialized(); 178 } 179 180 virtual void AddDump(TraceOutput& out) 181 { 182 out.Print("vm cache resize: cache: %p, size: %" B_PRIdOFF " -> %" 183 B_PRIdOFF, fCache, fOldSize, fSize); 184 } 185 186 private: 187 off_t fOldSize; 188 off_t fSize; 189 }; 190 191 192 class Rebase : public VMCacheTraceEntry { 193 public: 194 Rebase(VMCache* cache, off_t base) 195 : 196 VMCacheTraceEntry(cache), 197 fOldBase(cache->virtual_base), 198 fBase(base) 199 { 200 Initialized(); 201 } 202 203 virtual void AddDump(TraceOutput& out) 204 { 205 out.Print("vm cache rebase: cache: %p, base: %lld -> %lld", fCache, 206 fOldBase, fBase); 207 } 208 209 private: 210 off_t fOldBase; 211 off_t fBase; 212 }; 213 214 215 class AddConsumer : public VMCacheTraceEntry { 216 public: 217 AddConsumer(VMCache* cache, VMCache* consumer) 218 : 219 VMCacheTraceEntry(cache), 220 fConsumer(consumer) 221 { 222 Initialized(); 223 } 224 225 virtual void AddDump(TraceOutput& out) 226 { 227 out.Print("vm cache add consumer: cache: %p, consumer: %p", fCache, 228 fConsumer); 229 } 230 231 VMCache* Consumer() const 232 { 233 return fConsumer; 234 } 235 236 private: 237 VMCache* fConsumer; 238 }; 239 240 241 class RemoveConsumer : public VMCacheTraceEntry { 242 public: 243 RemoveConsumer(VMCache* cache, VMCache* consumer) 244 : 245 VMCacheTraceEntry(cache), 246 fConsumer(consumer) 247 { 248 Initialized(); 249 } 250 251 virtual void AddDump(TraceOutput& out) 252 { 253 out.Print("vm cache remove consumer: cache: %p, consumer: %p", 254 fCache, fConsumer); 255 } 256 257 private: 258 VMCache* fConsumer; 259 }; 260 261 262 class Merge : public VMCacheTraceEntry { 263 public: 264 Merge(VMCache* cache, VMCache* consumer) 265 : 266 VMCacheTraceEntry(cache), 267 fConsumer(consumer) 268 { 269 Initialized(); 270 } 271 272 virtual void AddDump(TraceOutput& out) 273 { 274 out.Print("vm cache merge with consumer: cache: %p, consumer: %p", 275 fCache, fConsumer); 276 } 277 278 private: 279 VMCache* fConsumer; 280 }; 281 282 283 class InsertArea : public VMCacheTraceEntry { 284 public: 285 InsertArea(VMCache* cache, VMArea* area) 286 : 287 VMCacheTraceEntry(cache), 288 fArea(area) 289 { 290 Initialized(); 291 } 292 293 virtual void AddDump(TraceOutput& out) 294 { 295 out.Print("vm cache insert area: cache: %p, area: %p", fCache, 296 fArea); 297 } 298 299 VMArea* Area() const 300 { 301 return fArea; 302 } 303 304 private: 305 VMArea* fArea; 306 }; 307 308 309 class RemoveArea : public VMCacheTraceEntry { 310 public: 311 RemoveArea(VMCache* cache, VMArea* area) 312 : 313 VMCacheTraceEntry(cache), 314 fArea(area) 315 { 316 Initialized(); 317 } 318 319 virtual void AddDump(TraceOutput& out) 320 { 321 out.Print("vm cache remove area: cache: %p, area: %p", fCache, 322 fArea); 323 } 324 325 private: 326 VMArea* fArea; 327 }; 328 329 } // namespace VMCacheTracing 330 331 # define T(x) new(std::nothrow) VMCacheTracing::x; 332 333 # if VM_CACHE_TRACING >= 2 334 335 namespace VMCacheTracing { 336 337 class InsertPage : public VMCacheTraceEntry { 338 public: 339 InsertPage(VMCache* cache, vm_page* page, off_t offset) 340 : 341 VMCacheTraceEntry(cache), 342 fPage(page), 343 fOffset(offset) 344 { 345 Initialized(); 346 } 347 348 virtual void AddDump(TraceOutput& out) 349 { 350 out.Print("vm cache insert page: cache: %p, page: %p, offset: %" 351 B_PRIdOFF, fCache, fPage, fOffset); 352 } 353 354 private: 355 vm_page* fPage; 356 off_t fOffset; 357 }; 358 359 360 class RemovePage : public VMCacheTraceEntry { 361 public: 362 RemovePage(VMCache* cache, vm_page* page) 363 : 364 VMCacheTraceEntry(cache), 365 fPage(page) 366 { 367 Initialized(); 368 } 369 370 virtual void AddDump(TraceOutput& out) 371 { 372 out.Print("vm cache remove page: cache: %p, page: %p", fCache, 373 fPage); 374 } 375 376 private: 377 vm_page* fPage; 378 }; 379 380 } // namespace VMCacheTracing 381 382 # define T2(x) new(std::nothrow) VMCacheTracing::x; 383 # else 384 # define T2(x) ; 385 # endif 386 #else 387 # define T(x) ; 388 # define T2(x) ; 389 #endif 390 391 392 // #pragma mark - debugger commands 393 394 395 #if VM_CACHE_TRACING 396 397 398 static void* 399 cache_stack_find_area_cache(const TraceEntryIterator& baseIterator, void* area) 400 { 401 using namespace VMCacheTracing; 402 403 // find the previous "insert area" entry for the given area 404 TraceEntryIterator iterator = baseIterator; 405 TraceEntry* entry = iterator.Current(); 406 while (entry != NULL) { 407 if (InsertArea* insertAreaEntry = dynamic_cast<InsertArea*>(entry)) { 408 if (insertAreaEntry->Area() == area) 409 return insertAreaEntry->Cache(); 410 } 411 412 entry = iterator.Previous(); 413 } 414 415 return NULL; 416 } 417 418 419 static void* 420 cache_stack_find_consumer(const TraceEntryIterator& baseIterator, void* cache) 421 { 422 using namespace VMCacheTracing; 423 424 // find the previous "add consumer" or "create" entry for the given cache 425 TraceEntryIterator iterator = baseIterator; 426 TraceEntry* entry = iterator.Current(); 427 while (entry != NULL) { 428 if (Create* createEntry = dynamic_cast<Create*>(entry)) { 429 if (createEntry->Cache() == cache) 430 return NULL; 431 } else if (AddConsumer* addEntry = dynamic_cast<AddConsumer*>(entry)) { 432 if (addEntry->Consumer() == cache) 433 return addEntry->Cache(); 434 } 435 436 entry = iterator.Previous(); 437 } 438 439 return NULL; 440 } 441 442 443 static int 444 command_cache_stack(int argc, char** argv) 445 { 446 if (argc < 3 || argc > 4) { 447 print_debugger_command_usage(argv[0]); 448 return 0; 449 } 450 451 bool isArea = false; 452 453 int argi = 1; 454 if (argc == 4) { 455 if (strcmp(argv[argi], "area") != 0) { 456 print_debugger_command_usage(argv[0]); 457 return 0; 458 } 459 460 argi++; 461 isArea = true; 462 } 463 464 uint64 addressValue; 465 uint64 debugEntryIndex; 466 if (!evaluate_debug_expression(argv[argi++], &addressValue, false) 467 || !evaluate_debug_expression(argv[argi++], &debugEntryIndex, false)) { 468 return 0; 469 } 470 471 TraceEntryIterator baseIterator; 472 if (baseIterator.MoveTo((int32)debugEntryIndex) == NULL) { 473 kprintf("Invalid tracing entry index %" B_PRIu64 "\n", debugEntryIndex); 474 return 0; 475 } 476 477 void* address = (void*)(addr_t)addressValue; 478 479 kprintf("cache stack for %s %p at %" B_PRIu64 ":\n", 480 isArea ? "area" : "cache", address, debugEntryIndex); 481 if (isArea) { 482 address = cache_stack_find_area_cache(baseIterator, address); 483 if (address == NULL) { 484 kprintf(" cache not found\n"); 485 return 0; 486 } 487 } 488 489 while (address != NULL) { 490 kprintf(" %p\n", address); 491 address = cache_stack_find_consumer(baseIterator, address); 492 } 493 494 return 0; 495 } 496 497 498 #endif // VM_CACHE_TRACING 499 500 501 // #pragma mark - 502 503 504 status_t 505 vm_cache_init(kernel_args* args) 506 { 507 // Create object caches for the structures we allocate here. 508 gCacheRefObjectCache = create_object_cache("cache refs", sizeof(VMCacheRef), 509 0, NULL, NULL, NULL); 510 #if ENABLE_SWAP_SUPPORT 511 gAnonymousCacheObjectCache = create_object_cache("anon caches", 512 sizeof(VMAnonymousCache), 0, NULL, NULL, NULL); 513 #endif 514 gAnonymousNoSwapCacheObjectCache = create_object_cache( 515 "anon no-swap caches", sizeof(VMAnonymousNoSwapCache), 0, NULL, NULL, 516 NULL); 517 gVnodeCacheObjectCache = create_object_cache("vnode caches", 518 sizeof(VMVnodeCache), 0, NULL, NULL, NULL); 519 gDeviceCacheObjectCache = create_object_cache("device caches", 520 sizeof(VMDeviceCache), 0, NULL, NULL, NULL); 521 gNullCacheObjectCache = create_object_cache("null caches", 522 sizeof(VMNullCache), 0, NULL, NULL, NULL); 523 524 if (gCacheRefObjectCache == NULL 525 #if ENABLE_SWAP_SUPPORT 526 || gAnonymousCacheObjectCache == NULL 527 #endif 528 || gAnonymousNoSwapCacheObjectCache == NULL 529 || gVnodeCacheObjectCache == NULL 530 || gDeviceCacheObjectCache == NULL 531 || gNullCacheObjectCache == NULL) { 532 panic("vm_cache_init(): Failed to create object caches!"); 533 return B_NO_MEMORY; 534 } 535 536 return B_OK; 537 } 538 539 540 void 541 vm_cache_init_post_heap() 542 { 543 #if VM_CACHE_TRACING 544 add_debugger_command_etc("cache_stack", &command_cache_stack, 545 "List the ancestors (sources) of a VMCache at the time given by " 546 "tracing entry index", 547 "[ \"area\" ] <address> <tracing entry index>\n" 548 "All ancestors (sources) of a given VMCache at the time given by the\n" 549 "tracing entry index are listed. If \"area\" is given the supplied\n" 550 "address is an area instead of a cache address. The listing will\n" 551 "start with the area's cache at that point.\n", 552 0); 553 #endif // VM_CACHE_TRACING 554 } 555 556 557 VMCache* 558 vm_cache_acquire_locked_page_cache(vm_page* page, bool dontWait) 559 { 560 mutex_lock(&sCacheListLock); 561 562 while (dontWait) { 563 VMCacheRef* cacheRef = page->CacheRef(); 564 if (cacheRef == NULL) { 565 mutex_unlock(&sCacheListLock); 566 return NULL; 567 } 568 569 VMCache* cache = cacheRef->cache; 570 if (!cache->TryLock()) { 571 mutex_unlock(&sCacheListLock); 572 return NULL; 573 } 574 575 if (cacheRef == page->CacheRef()) { 576 mutex_unlock(&sCacheListLock); 577 cache->AcquireRefLocked(); 578 return cache; 579 } 580 581 // the cache changed in the meantime 582 cache->Unlock(); 583 } 584 585 while (true) { 586 VMCacheRef* cacheRef = page->CacheRef(); 587 if (cacheRef == NULL) { 588 mutex_unlock(&sCacheListLock); 589 return NULL; 590 } 591 592 VMCache* cache = cacheRef->cache; 593 if (!cache->SwitchLock(&sCacheListLock)) { 594 // cache has been deleted 595 mutex_lock(&sCacheListLock); 596 continue; 597 } 598 599 mutex_lock(&sCacheListLock); 600 if (cache == page->Cache()) { 601 mutex_unlock(&sCacheListLock); 602 cache->AcquireRefLocked(); 603 return cache; 604 } 605 606 // the cache changed in the meantime 607 cache->Unlock(); 608 } 609 } 610 611 612 // #pragma mark - VMCache 613 614 615 VMCacheRef::VMCacheRef(VMCache* cache) 616 : 617 cache(cache), 618 ref_count(1) 619 { 620 } 621 622 623 // #pragma mark - VMCache 624 625 626 bool 627 VMCache::_IsMergeable() const 628 { 629 return areas == NULL && temporary && !consumers.IsEmpty() 630 && consumers.Head() == consumers.Tail(); 631 } 632 633 634 VMCache::VMCache() 635 : 636 fCacheRef(NULL) 637 { 638 } 639 640 641 VMCache::~VMCache() 642 { 643 object_cache_delete(gCacheRefObjectCache, fCacheRef); 644 } 645 646 647 status_t 648 VMCache::Init(uint32 cacheType, uint32 allocationFlags) 649 { 650 mutex_init(&fLock, "VMCache"); 651 652 areas = NULL; 653 fRefCount = 1; 654 source = NULL; 655 virtual_base = 0; 656 virtual_end = 0; 657 committed_size = 0; 658 temporary = 0; 659 page_count = 0; 660 fWiredPagesCount = 0; 661 type = cacheType; 662 fPageEventWaiters = NULL; 663 664 #if DEBUG_CACHE_LIST 665 debug_previous = NULL; 666 debug_next = NULL; 667 // initialize in case the following fails 668 #endif 669 670 fCacheRef = new(gCacheRefObjectCache, allocationFlags) VMCacheRef(this); 671 if (fCacheRef == NULL) 672 return B_NO_MEMORY; 673 674 #if DEBUG_CACHE_LIST 675 mutex_lock(&sCacheListLock); 676 677 if (gDebugCacheList != NULL) 678 gDebugCacheList->debug_previous = this; 679 debug_next = gDebugCacheList; 680 gDebugCacheList = this; 681 682 mutex_unlock(&sCacheListLock); 683 #endif 684 685 return B_OK; 686 } 687 688 689 void 690 VMCache::Delete() 691 { 692 if (areas != NULL) 693 panic("cache %p to be deleted still has areas", this); 694 if (!consumers.IsEmpty()) 695 panic("cache %p to be deleted still has consumers", this); 696 697 T(Delete(this)); 698 699 // free all of the pages in the cache 700 while (vm_page* page = pages.Root()) { 701 if (!page->mappings.IsEmpty() || page->WiredCount() != 0) { 702 panic("remove page %p from cache %p: page still has mappings!\n" 703 "@!page %p; cache %p", page, this, page, this); 704 } 705 706 // remove it 707 pages.Remove(page); 708 page->SetCacheRef(NULL); 709 710 TRACE(("vm_cache_release_ref: freeing page 0x%lx\n", 711 page->physical_page_number)); 712 DEBUG_PAGE_ACCESS_START(page); 713 vm_page_free(this, page); 714 } 715 716 // remove the ref to the source 717 if (source) 718 source->_RemoveConsumer(this); 719 720 // We lock and unlock the sCacheListLock, even if the DEBUG_CACHE_LIST is 721 // not enabled. This synchronization point is needed for 722 // vm_cache_acquire_locked_page_cache(). 723 mutex_lock(&sCacheListLock); 724 725 #if DEBUG_CACHE_LIST 726 if (debug_previous) 727 debug_previous->debug_next = debug_next; 728 if (debug_next) 729 debug_next->debug_previous = debug_previous; 730 if (this == gDebugCacheList) 731 gDebugCacheList = debug_next; 732 #endif 733 734 mutex_destroy(&fLock); 735 736 mutex_unlock(&sCacheListLock); 737 738 DeleteObject(); 739 } 740 741 742 void 743 VMCache::Unlock(bool consumerLocked) 744 { 745 while (fRefCount == 1 && _IsMergeable()) { 746 VMCache* consumer = consumers.Head(); 747 if (consumerLocked) { 748 _MergeWithOnlyConsumer(); 749 } else if (consumer->TryLock()) { 750 _MergeWithOnlyConsumer(); 751 consumer->Unlock(); 752 } else { 753 // Someone else has locked the consumer ATM. Unlock this cache and 754 // wait for the consumer lock. Increment the cache's ref count 755 // temporarily, so that no one else will try what we are doing or 756 // delete the cache. 757 fRefCount++; 758 bool consumerLockedTemp = consumer->SwitchLock(&fLock); 759 Lock(); 760 fRefCount--; 761 762 if (consumerLockedTemp) { 763 if (fRefCount == 1 && _IsMergeable() 764 && consumer == consumers.Head()) { 765 // nothing has changed in the meantime -- merge 766 _MergeWithOnlyConsumer(); 767 } 768 769 consumer->Unlock(); 770 } 771 } 772 } 773 774 if (fRefCount == 0) { 775 // delete this cache 776 Delete(); 777 } else 778 mutex_unlock(&fLock); 779 } 780 781 782 vm_page* 783 VMCache::LookupPage(off_t offset) 784 { 785 AssertLocked(); 786 787 vm_page* page = pages.Lookup((page_num_t)(offset >> PAGE_SHIFT)); 788 789 #if KDEBUG 790 if (page != NULL && page->Cache() != this) 791 panic("page %p not in cache %p\n", page, this); 792 #endif 793 794 return page; 795 } 796 797 798 void 799 VMCache::InsertPage(vm_page* page, off_t offset) 800 { 801 TRACE(("VMCache::InsertPage(): cache %p, page %p, offset %" B_PRIdOFF "\n", 802 this, page, offset)); 803 AssertLocked(); 804 805 if (page->CacheRef() != NULL) { 806 panic("insert page %p into cache %p: page cache is set to %p\n", 807 page, this, page->Cache()); 808 } 809 810 T2(InsertPage(this, page, offset)); 811 812 page->cache_offset = (page_num_t)(offset >> PAGE_SHIFT); 813 page_count++; 814 page->SetCacheRef(fCacheRef); 815 816 #if KDEBUG 817 vm_page* otherPage = pages.Lookup(page->cache_offset); 818 if (otherPage != NULL) { 819 panic("VMCache::InsertPage(): there's already page %p with cache " 820 "offset %" B_PRIuPHYSADDR " in cache %p; inserting page %p", 821 otherPage, page->cache_offset, this, page); 822 } 823 #endif // KDEBUG 824 825 pages.Insert(page); 826 827 if (page->WiredCount() > 0) 828 IncrementWiredPagesCount(); 829 } 830 831 832 /*! Removes the vm_page from this cache. Of course, the page must 833 really be in this cache or evil things will happen. 834 The cache lock must be held. 835 */ 836 void 837 VMCache::RemovePage(vm_page* page) 838 { 839 TRACE(("VMCache::RemovePage(): cache %p, page %p\n", this, page)); 840 AssertLocked(); 841 842 if (page->Cache() != this) { 843 panic("remove page %p from cache %p: page cache is set to %p\n", page, 844 this, page->Cache()); 845 } 846 847 T2(RemovePage(this, page)); 848 849 pages.Remove(page); 850 page_count--; 851 page->SetCacheRef(NULL); 852 853 if (page->WiredCount() > 0) 854 DecrementWiredPagesCount(); 855 } 856 857 858 /*! Moves the given page from its current cache inserts it into this cache 859 at the given offset. 860 Both caches must be locked. 861 */ 862 void 863 VMCache::MovePage(vm_page* page, off_t offset) 864 { 865 VMCache* oldCache = page->Cache(); 866 867 AssertLocked(); 868 oldCache->AssertLocked(); 869 870 // remove from old cache 871 oldCache->pages.Remove(page); 872 oldCache->page_count--; 873 T2(RemovePage(oldCache, page)); 874 875 // change the offset 876 page->cache_offset = offset >> PAGE_SHIFT; 877 878 // insert here 879 pages.Insert(page); 880 page_count++; 881 page->SetCacheRef(fCacheRef); 882 883 if (page->WiredCount() > 0) { 884 IncrementWiredPagesCount(); 885 oldCache->DecrementWiredPagesCount(); 886 } 887 888 T2(InsertPage(this, page, page->cache_offset << PAGE_SHIFT)); 889 } 890 891 /*! Moves the given page from its current cache inserts it into this cache. 892 Both caches must be locked. 893 */ 894 void 895 VMCache::MovePage(vm_page* page) 896 { 897 MovePage(page, page->cache_offset << PAGE_SHIFT); 898 } 899 900 901 /*! Moves all pages from the given cache to this one. 902 Both caches must be locked. This cache must be empty. 903 */ 904 void 905 VMCache::MoveAllPages(VMCache* fromCache) 906 { 907 AssertLocked(); 908 fromCache->AssertLocked(); 909 ASSERT(page_count == 0); 910 911 std::swap(fromCache->pages, pages); 912 page_count = fromCache->page_count; 913 fromCache->page_count = 0; 914 fWiredPagesCount = fromCache->fWiredPagesCount; 915 fromCache->fWiredPagesCount = 0; 916 917 // swap the VMCacheRefs 918 mutex_lock(&sCacheListLock); 919 std::swap(fCacheRef, fromCache->fCacheRef); 920 fCacheRef->cache = this; 921 fromCache->fCacheRef->cache = fromCache; 922 mutex_unlock(&sCacheListLock); 923 924 #if VM_CACHE_TRACING >= 2 925 for (VMCachePagesTree::Iterator it = pages.GetIterator(); 926 vm_page* page = it.Next();) { 927 T2(RemovePage(fromCache, page)); 928 T2(InsertPage(this, page, page->cache_offset << PAGE_SHIFT)); 929 } 930 #endif 931 } 932 933 934 /*! Waits until one or more events happened for a given page which belongs to 935 this cache. 936 The cache must be locked. It will be unlocked by the method. \a relock 937 specifies whether the method shall re-lock the cache before returning. 938 \param page The page for which to wait. 939 \param events The mask of events the caller is interested in. 940 \param relock If \c true, the cache will be locked when returning, 941 otherwise it won't be locked. 942 */ 943 void 944 VMCache::WaitForPageEvents(vm_page* page, uint32 events, bool relock) 945 { 946 PageEventWaiter waiter; 947 waiter.thread = thread_get_current_thread(); 948 waiter.next = fPageEventWaiters; 949 waiter.page = page; 950 waiter.events = events; 951 952 fPageEventWaiters = &waiter; 953 954 thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_OTHER, 955 "cache page events"); 956 957 Unlock(); 958 thread_block(); 959 960 if (relock) 961 Lock(); 962 } 963 964 965 /*! Makes this case the source of the \a consumer cache, 966 and adds the \a consumer to its list. 967 This also grabs a reference to the source cache. 968 Assumes you have the cache and the consumer's lock held. 969 */ 970 void 971 VMCache::AddConsumer(VMCache* consumer) 972 { 973 TRACE(("add consumer vm cache %p to cache %p\n", consumer, this)); 974 AssertLocked(); 975 consumer->AssertLocked(); 976 977 T(AddConsumer(this, consumer)); 978 979 consumer->source = this; 980 consumers.Add(consumer); 981 982 AcquireRefLocked(); 983 AcquireStoreRef(); 984 } 985 986 987 /*! Adds the \a area to this cache. 988 Assumes you have the locked the cache. 989 */ 990 status_t 991 VMCache::InsertAreaLocked(VMArea* area) 992 { 993 TRACE(("VMCache::InsertAreaLocked(cache %p, area %p)\n", this, area)); 994 AssertLocked(); 995 996 T(InsertArea(this, area)); 997 998 area->cache_next = areas; 999 if (area->cache_next) 1000 area->cache_next->cache_prev = area; 1001 area->cache_prev = NULL; 1002 areas = area; 1003 1004 AcquireStoreRef(); 1005 1006 return B_OK; 1007 } 1008 1009 1010 status_t 1011 VMCache::RemoveArea(VMArea* area) 1012 { 1013 TRACE(("VMCache::RemoveArea(cache %p, area %p)\n", this, area)); 1014 1015 T(RemoveArea(this, area)); 1016 1017 // We release the store reference first, since otherwise we would reverse 1018 // the locking order or even deadlock ourselves (... -> free_vnode() -> ... 1019 // -> bfs_remove_vnode() -> ... -> file_cache_set_size() -> mutex_lock()). 1020 // Also cf. _RemoveConsumer(). 1021 ReleaseStoreRef(); 1022 1023 AutoLocker<VMCache> locker(this); 1024 1025 if (area->cache_prev) 1026 area->cache_prev->cache_next = area->cache_next; 1027 if (area->cache_next) 1028 area->cache_next->cache_prev = area->cache_prev; 1029 if (areas == area) 1030 areas = area->cache_next; 1031 1032 return B_OK; 1033 } 1034 1035 1036 /*! Transfers the areas from \a fromCache to this cache. This cache must not 1037 have areas yet. Both caches must be locked. 1038 */ 1039 void 1040 VMCache::TransferAreas(VMCache* fromCache) 1041 { 1042 AssertLocked(); 1043 fromCache->AssertLocked(); 1044 ASSERT(areas == NULL); 1045 1046 areas = fromCache->areas; 1047 fromCache->areas = NULL; 1048 1049 for (VMArea* area = areas; area != NULL; area = area->cache_next) { 1050 area->cache = this; 1051 AcquireRefLocked(); 1052 fromCache->ReleaseRefLocked(); 1053 1054 T(RemoveArea(fromCache, area)); 1055 T(InsertArea(this, area)); 1056 } 1057 } 1058 1059 1060 uint32 1061 VMCache::CountWritableAreas(VMArea* ignoreArea) const 1062 { 1063 uint32 count = 0; 1064 1065 for (VMArea* area = areas; area != NULL; area = area->cache_next) { 1066 if (area != ignoreArea 1067 && (area->protection & (B_WRITE_AREA | B_KERNEL_WRITE_AREA)) != 0) { 1068 count++; 1069 } 1070 } 1071 1072 return count; 1073 } 1074 1075 1076 status_t 1077 VMCache::WriteModified() 1078 { 1079 TRACE(("VMCache::WriteModified(cache = %p)\n", this)); 1080 1081 if (temporary) 1082 return B_OK; 1083 1084 Lock(); 1085 status_t status = vm_page_write_modified_pages(this); 1086 Unlock(); 1087 1088 return status; 1089 } 1090 1091 1092 /*! Commits the memory to the store if the \a commitment is larger than 1093 what's committed already. 1094 Assumes you have the cache's lock held. 1095 */ 1096 status_t 1097 VMCache::SetMinimalCommitment(off_t commitment, int priority) 1098 { 1099 TRACE(("VMCache::SetMinimalCommitment(cache %p, commitment %" B_PRIdOFF 1100 ")\n", this, commitment)); 1101 AssertLocked(); 1102 1103 T(SetMinimalCommitment(this, commitment)); 1104 1105 status_t status = B_OK; 1106 1107 // If we don't have enough committed space to cover through to the new end 1108 // of the area... 1109 if (committed_size < commitment) { 1110 // ToDo: should we check if the cache's virtual size is large 1111 // enough for a commitment of that size? 1112 1113 // try to commit more memory 1114 status = Commit(commitment, priority); 1115 } 1116 1117 return status; 1118 } 1119 1120 1121 bool 1122 VMCache::_FreePageRange(VMCachePagesTree::Iterator it, 1123 page_num_t* toPage = NULL) 1124 { 1125 for (vm_page* page = it.Next(); 1126 page != NULL && (toPage == NULL || page->cache_offset < *toPage); 1127 page = it.Next()) { 1128 1129 if (page->busy) { 1130 if (page->busy_writing) { 1131 // We cannot wait for the page to become available 1132 // as we might cause a deadlock this way 1133 page->busy_writing = false; 1134 // this will notify the writer to free the page 1135 continue; 1136 } 1137 1138 // wait for page to become unbusy 1139 WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, true); 1140 return true; 1141 } 1142 1143 // remove the page and put it into the free queue 1144 DEBUG_PAGE_ACCESS_START(page); 1145 vm_remove_all_page_mappings(page); 1146 ASSERT(page->WiredCount() == 0); 1147 // TODO: Find a real solution! If the page is wired 1148 // temporarily (e.g. by lock_memory()), we actually must not 1149 // unmap it! 1150 RemovePage(page); 1151 // Note: When iterating through a IteratableSplayTree 1152 // removing the current node is safe. 1153 1154 vm_page_free(this, page); 1155 } 1156 1157 return false; 1158 } 1159 1160 1161 /*! This function updates the size field of the cache. 1162 If needed, it will free up all pages that don't belong to the cache anymore. 1163 The cache lock must be held when you call it. 1164 Since removed pages don't belong to the cache any longer, they are not 1165 written back before they will be removed. 1166 1167 Note, this function may temporarily release the cache lock in case it 1168 has to wait for busy pages. 1169 */ 1170 status_t 1171 VMCache::Resize(off_t newSize, int priority) 1172 { 1173 TRACE(("VMCache::Resize(cache %p, newSize %" B_PRIdOFF ") old size %" 1174 B_PRIdOFF "\n", this, newSize, this->virtual_end)); 1175 this->AssertLocked(); 1176 1177 T(Resize(this, newSize)); 1178 1179 status_t status = Commit(newSize - virtual_base, priority); 1180 if (status != B_OK) 1181 return status; 1182 1183 page_num_t oldPageCount = (page_num_t)((virtual_end + B_PAGE_SIZE - 1) 1184 >> PAGE_SHIFT); 1185 page_num_t newPageCount = (page_num_t)((newSize + B_PAGE_SIZE - 1) 1186 >> PAGE_SHIFT); 1187 1188 if (newPageCount < oldPageCount) { 1189 // we need to remove all pages in the cache outside of the new virtual 1190 // size 1191 while (_FreePageRange(pages.GetIterator(newPageCount, true, true))) 1192 ; 1193 } 1194 1195 virtual_end = newSize; 1196 return B_OK; 1197 } 1198 1199 /*! This function updates the virtual_base field of the cache. 1200 If needed, it will free up all pages that don't belong to the cache anymore. 1201 The cache lock must be held when you call it. 1202 Since removed pages don't belong to the cache any longer, they are not 1203 written back before they will be removed. 1204 1205 Note, this function may temporarily release the cache lock in case it 1206 has to wait for busy pages. 1207 */ 1208 status_t 1209 VMCache::Rebase(off_t newBase, int priority) 1210 { 1211 TRACE(("VMCache::Rebase(cache %p, newBase %Ld) old base %Ld\n", 1212 this, newBase, this->virtual_base)); 1213 this->AssertLocked(); 1214 1215 T(Rebase(this, newBase)); 1216 1217 status_t status = Commit(virtual_end - newBase, priority); 1218 if (status != B_OK) 1219 return status; 1220 1221 page_num_t basePage = (page_num_t)(newBase >> PAGE_SHIFT); 1222 1223 if (newBase > virtual_base) { 1224 // we need to remove all pages in the cache outside of the new virtual 1225 // base 1226 while (_FreePageRange(pages.GetIterator(), &basePage)) 1227 ; 1228 } 1229 1230 virtual_base = newBase; 1231 return B_OK; 1232 } 1233 1234 1235 /*! Moves pages in the given range from the source cache into this cache. Both 1236 caches must be locked. 1237 */ 1238 status_t 1239 VMCache::Adopt(VMCache* source, off_t offset, off_t size, off_t newOffset) 1240 { 1241 page_num_t startPage = offset >> PAGE_SHIFT; 1242 page_num_t endPage = (offset + size + B_PAGE_SIZE - 1) >> PAGE_SHIFT; 1243 off_t offsetChange = newOffset - offset; 1244 1245 VMCachePagesTree::Iterator it = source->pages.GetIterator(startPage, true, 1246 true); 1247 for (vm_page* page = it.Next(); 1248 page != NULL && page->cache_offset < endPage; 1249 page = it.Next()) { 1250 MovePage(page, (page->cache_offset << PAGE_SHIFT) + offsetChange); 1251 } 1252 1253 return B_OK; 1254 } 1255 1256 1257 /*! Discards pages in the given range. */ 1258 status_t 1259 VMCache::Discard(off_t offset, off_t size) 1260 { 1261 page_num_t startPage = offset >> PAGE_SHIFT; 1262 page_num_t endPage = (offset + size + B_PAGE_SIZE - 1) >> PAGE_SHIFT; 1263 while (_FreePageRange(pages.GetIterator(startPage, true, true), &endPage)) 1264 ; 1265 1266 return B_OK; 1267 } 1268 1269 1270 /*! You have to call this function with the VMCache lock held. */ 1271 status_t 1272 VMCache::FlushAndRemoveAllPages() 1273 { 1274 ASSERT_LOCKED_MUTEX(&fLock); 1275 1276 while (page_count > 0) { 1277 // write back modified pages 1278 status_t status = vm_page_write_modified_pages(this); 1279 if (status != B_OK) 1280 return status; 1281 1282 // remove pages 1283 for (VMCachePagesTree::Iterator it = pages.GetIterator(); 1284 vm_page* page = it.Next();) { 1285 if (page->busy) { 1286 // wait for page to become unbusy 1287 WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, true); 1288 1289 // restart from the start of the list 1290 it = pages.GetIterator(); 1291 continue; 1292 } 1293 1294 // skip modified pages -- they will be written back in the next 1295 // iteration 1296 if (page->State() == PAGE_STATE_MODIFIED) 1297 continue; 1298 1299 // We can't remove mapped pages. 1300 if (page->IsMapped()) 1301 return B_BUSY; 1302 1303 DEBUG_PAGE_ACCESS_START(page); 1304 RemovePage(page); 1305 vm_page_free(this, page); 1306 // Note: When iterating through a IteratableSplayTree 1307 // removing the current node is safe. 1308 } 1309 } 1310 1311 return B_OK; 1312 } 1313 1314 1315 status_t 1316 VMCache::Commit(off_t size, int priority) 1317 { 1318 committed_size = size; 1319 return B_OK; 1320 } 1321 1322 1323 /*! Returns whether the cache's underlying backing store could deliver the 1324 page at the given offset. 1325 1326 Basically it returns whether a Read() at \a offset would at least read a 1327 partial page (assuming that no unexpected errors occur or the situation 1328 changes in the meantime). 1329 */ 1330 bool 1331 VMCache::HasPage(off_t offset) 1332 { 1333 // In accordance with Fault() the default implementation doesn't have a 1334 // backing store and doesn't allow faults. 1335 return false; 1336 } 1337 1338 1339 status_t 1340 VMCache::Read(off_t offset, const generic_io_vec *vecs, size_t count, 1341 uint32 flags, generic_size_t *_numBytes) 1342 { 1343 return B_ERROR; 1344 } 1345 1346 1347 status_t 1348 VMCache::Write(off_t offset, const generic_io_vec *vecs, size_t count, 1349 uint32 flags, generic_size_t *_numBytes) 1350 { 1351 return B_ERROR; 1352 } 1353 1354 1355 status_t 1356 VMCache::WriteAsync(off_t offset, const generic_io_vec* vecs, size_t count, 1357 generic_size_t numBytes, uint32 flags, AsyncIOCallback* callback) 1358 { 1359 // Not supported, fall back to the synchronous hook. 1360 generic_size_t transferred = numBytes; 1361 status_t error = Write(offset, vecs, count, flags, &transferred); 1362 1363 if (callback != NULL) 1364 callback->IOFinished(error, transferred != numBytes, transferred); 1365 1366 return error; 1367 } 1368 1369 1370 /*! \brief Returns whether the cache can write the page at the given offset. 1371 1372 The cache must be locked when this function is invoked. 1373 1374 @param offset The page offset. 1375 @return \c true, if the page can be written, \c false otherwise. 1376 */ 1377 bool 1378 VMCache::CanWritePage(off_t offset) 1379 { 1380 return false; 1381 } 1382 1383 1384 status_t 1385 VMCache::Fault(struct VMAddressSpace *aspace, off_t offset) 1386 { 1387 return B_BAD_ADDRESS; 1388 } 1389 1390 1391 void 1392 VMCache::Merge(VMCache* source) 1393 { 1394 for (VMCachePagesTree::Iterator it = source->pages.GetIterator(); 1395 vm_page* page = it.Next();) { 1396 // Note: Removing the current node while iterating through a 1397 // IteratableSplayTree is safe. 1398 vm_page* consumerPage = LookupPage( 1399 (off_t)page->cache_offset << PAGE_SHIFT); 1400 if (consumerPage == NULL) { 1401 // the page is not yet in the consumer cache - move it upwards 1402 MovePage(page); 1403 } 1404 } 1405 } 1406 1407 1408 status_t 1409 VMCache::AcquireUnreferencedStoreRef() 1410 { 1411 return B_OK; 1412 } 1413 1414 1415 void 1416 VMCache::AcquireStoreRef() 1417 { 1418 } 1419 1420 1421 void 1422 VMCache::ReleaseStoreRef() 1423 { 1424 } 1425 1426 1427 /*! Kernel debugger version of HasPage(). 1428 Does not do any locking. 1429 */ 1430 bool 1431 VMCache::DebugHasPage(off_t offset) 1432 { 1433 // default that works for all subclasses that don't lock anyway 1434 return HasPage(offset); 1435 } 1436 1437 1438 /*! Kernel debugger version of LookupPage(). 1439 Does not do any locking. 1440 */ 1441 vm_page* 1442 VMCache::DebugLookupPage(off_t offset) 1443 { 1444 return pages.Lookup((page_num_t)(offset >> PAGE_SHIFT)); 1445 } 1446 1447 1448 void 1449 VMCache::Dump(bool showPages) const 1450 { 1451 kprintf("CACHE %p:\n", this); 1452 kprintf(" ref_count: %" B_PRId32 "\n", RefCount()); 1453 kprintf(" source: %p\n", source); 1454 kprintf(" type: %s\n", vm_cache_type_to_string(type)); 1455 kprintf(" virtual_base: 0x%" B_PRIx64 "\n", virtual_base); 1456 kprintf(" virtual_end: 0x%" B_PRIx64 "\n", virtual_end); 1457 kprintf(" temporary: %" B_PRIu32 "\n", temporary); 1458 kprintf(" lock: %p\n", &fLock); 1459 #if KDEBUG 1460 kprintf(" lock.holder: %" B_PRId32 "\n", fLock.holder); 1461 #endif 1462 kprintf(" areas:\n"); 1463 1464 for (VMArea* area = areas; area != NULL; area = area->cache_next) { 1465 kprintf(" area 0x%" B_PRIx32 ", %s\n", area->id, area->name); 1466 kprintf("\tbase_addr: 0x%lx, size: 0x%lx\n", area->Base(), 1467 area->Size()); 1468 kprintf("\tprotection: 0x%" B_PRIx32 "\n", area->protection); 1469 kprintf("\towner: 0x%" B_PRIx32 "\n", area->address_space->ID()); 1470 } 1471 1472 kprintf(" consumers:\n"); 1473 for (ConsumerList::ConstIterator it = consumers.GetIterator(); 1474 VMCache* consumer = it.Next();) { 1475 kprintf("\t%p\n", consumer); 1476 } 1477 1478 kprintf(" pages:\n"); 1479 if (showPages) { 1480 for (VMCachePagesTree::ConstIterator it = pages.GetIterator(); 1481 vm_page* page = it.Next();) { 1482 if (!vm_page_is_dummy(page)) { 1483 kprintf("\t%p ppn %#" B_PRIxPHYSADDR " offset %#" B_PRIxPHYSADDR 1484 " state %u (%s) wired_count %u\n", page, 1485 page->physical_page_number, page->cache_offset, 1486 page->State(), page_state_to_string(page->State()), 1487 page->WiredCount()); 1488 } else { 1489 kprintf("\t%p DUMMY PAGE state %u (%s)\n", 1490 page, page->State(), page_state_to_string(page->State())); 1491 } 1492 } 1493 } else 1494 kprintf("\t%" B_PRIu32 " in cache\n", page_count); 1495 } 1496 1497 1498 /*! Wakes up threads waiting for page events. 1499 \param page The page for which events occurred. 1500 \param events The mask of events that occurred. 1501 */ 1502 void 1503 VMCache::_NotifyPageEvents(vm_page* page, uint32 events) 1504 { 1505 PageEventWaiter** it = &fPageEventWaiters; 1506 while (PageEventWaiter* waiter = *it) { 1507 if (waiter->page == page && (waiter->events & events) != 0) { 1508 // remove from list and unblock 1509 *it = waiter->next; 1510 thread_unblock(waiter->thread, B_OK); 1511 } else 1512 it = &waiter->next; 1513 } 1514 } 1515 1516 1517 /*! Merges the given cache with its only consumer. 1518 The caller must hold both the cache's and the consumer's lock. The method 1519 does release neither lock. 1520 */ 1521 void 1522 VMCache::_MergeWithOnlyConsumer() 1523 { 1524 VMCache* consumer = consumers.RemoveHead(); 1525 1526 TRACE(("merge vm cache %p (ref == %" B_PRId32 ") with vm cache %p\n", 1527 this, this->fRefCount, consumer)); 1528 1529 T(Merge(this, consumer)); 1530 1531 // merge the cache 1532 consumer->Merge(this); 1533 1534 // The remaining consumer has got a new source. 1535 if (source != NULL) { 1536 VMCache* newSource = source; 1537 1538 newSource->Lock(); 1539 1540 newSource->consumers.Remove(this); 1541 newSource->consumers.Add(consumer); 1542 consumer->source = newSource; 1543 source = NULL; 1544 1545 newSource->Unlock(); 1546 } else 1547 consumer->source = NULL; 1548 1549 // Release the reference the cache's consumer owned. The consumer takes 1550 // over the cache's ref to its source (if any) instead. 1551 ReleaseRefLocked(); 1552 } 1553 1554 1555 /*! Removes the \a consumer from this cache. 1556 It will also release the reference to the cache owned by the consumer. 1557 Assumes you have the consumer's cache lock held. This cache must not be 1558 locked. 1559 */ 1560 void 1561 VMCache::_RemoveConsumer(VMCache* consumer) 1562 { 1563 TRACE(("remove consumer vm cache %p from cache %p\n", consumer, this)); 1564 consumer->AssertLocked(); 1565 1566 T(RemoveConsumer(this, consumer)); 1567 1568 // Remove the store ref before locking the cache. Otherwise we'd call into 1569 // the VFS while holding the cache lock, which would reverse the usual 1570 // locking order. 1571 ReleaseStoreRef(); 1572 1573 // remove the consumer from the cache, but keep its reference until later 1574 Lock(); 1575 consumers.Remove(consumer); 1576 consumer->source = NULL; 1577 1578 ReleaseRefAndUnlock(); 1579 } 1580 1581 1582 // #pragma mark - VMCacheFactory 1583 // TODO: Move to own source file! 1584 1585 1586 /*static*/ status_t 1587 VMCacheFactory::CreateAnonymousCache(VMCache*& _cache, bool canOvercommit, 1588 int32 numPrecommittedPages, int32 numGuardPages, bool swappable, 1589 int priority) 1590 { 1591 uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY 1592 | HEAP_DONT_LOCK_KERNEL_SPACE; 1593 if (priority >= VM_PRIORITY_VIP) 1594 allocationFlags |= HEAP_PRIORITY_VIP; 1595 1596 #if ENABLE_SWAP_SUPPORT 1597 if (swappable) { 1598 VMAnonymousCache* cache 1599 = new(gAnonymousCacheObjectCache, allocationFlags) VMAnonymousCache; 1600 if (cache == NULL) 1601 return B_NO_MEMORY; 1602 1603 status_t error = cache->Init(canOvercommit, numPrecommittedPages, 1604 numGuardPages, allocationFlags); 1605 if (error != B_OK) { 1606 cache->Delete(); 1607 return error; 1608 } 1609 1610 T(Create(cache)); 1611 1612 _cache = cache; 1613 return B_OK; 1614 } 1615 #endif 1616 1617 VMAnonymousNoSwapCache* cache 1618 = new(gAnonymousNoSwapCacheObjectCache, allocationFlags) 1619 VMAnonymousNoSwapCache; 1620 if (cache == NULL) 1621 return B_NO_MEMORY; 1622 1623 status_t error = cache->Init(canOvercommit, numPrecommittedPages, 1624 numGuardPages, allocationFlags); 1625 if (error != B_OK) { 1626 cache->Delete(); 1627 return error; 1628 } 1629 1630 T(Create(cache)); 1631 1632 _cache = cache; 1633 return B_OK; 1634 } 1635 1636 1637 /*static*/ status_t 1638 VMCacheFactory::CreateVnodeCache(VMCache*& _cache, struct vnode* vnode) 1639 { 1640 const uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY 1641 | HEAP_DONT_LOCK_KERNEL_SPACE; 1642 // Note: Vnode cache creation is never VIP. 1643 1644 VMVnodeCache* cache 1645 = new(gVnodeCacheObjectCache, allocationFlags) VMVnodeCache; 1646 if (cache == NULL) 1647 return B_NO_MEMORY; 1648 1649 status_t error = cache->Init(vnode, allocationFlags); 1650 if (error != B_OK) { 1651 cache->Delete(); 1652 return error; 1653 } 1654 1655 T(Create(cache)); 1656 1657 _cache = cache; 1658 return B_OK; 1659 } 1660 1661 1662 /*static*/ status_t 1663 VMCacheFactory::CreateDeviceCache(VMCache*& _cache, addr_t baseAddress) 1664 { 1665 const uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY 1666 | HEAP_DONT_LOCK_KERNEL_SPACE; 1667 // Note: Device cache creation is never VIP. 1668 1669 VMDeviceCache* cache 1670 = new(gDeviceCacheObjectCache, allocationFlags) VMDeviceCache; 1671 if (cache == NULL) 1672 return B_NO_MEMORY; 1673 1674 status_t error = cache->Init(baseAddress, allocationFlags); 1675 if (error != B_OK) { 1676 cache->Delete(); 1677 return error; 1678 } 1679 1680 T(Create(cache)); 1681 1682 _cache = cache; 1683 return B_OK; 1684 } 1685 1686 1687 /*static*/ status_t 1688 VMCacheFactory::CreateNullCache(int priority, VMCache*& _cache) 1689 { 1690 uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY 1691 | HEAP_DONT_LOCK_KERNEL_SPACE; 1692 if (priority >= VM_PRIORITY_VIP) 1693 allocationFlags |= HEAP_PRIORITY_VIP; 1694 1695 VMNullCache* cache 1696 = new(gNullCacheObjectCache, allocationFlags) VMNullCache; 1697 if (cache == NULL) 1698 return B_NO_MEMORY; 1699 1700 status_t error = cache->Init(allocationFlags); 1701 if (error != B_OK) { 1702 cache->Delete(); 1703 return error; 1704 } 1705 1706 T(Create(cache)); 1707 1708 _cache = cache; 1709 return B_OK; 1710 } 1711