1 /* 2 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de. 3 * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de. 4 * Distributed under the terms of the MIT License. 5 * 6 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved. 7 * Distributed under the terms of the NewOS License. 8 */ 9 10 11 #include <vm/VMCache.h> 12 13 #include <stddef.h> 14 #include <stdlib.h> 15 16 #include <algorithm> 17 18 #include <arch/cpu.h> 19 #include <condition_variable.h> 20 #include <heap.h> 21 #include <int.h> 22 #include <kernel.h> 23 #include <slab/Slab.h> 24 #include <smp.h> 25 #include <thread.h> 26 #include <tracing.h> 27 #include <util/AutoLock.h> 28 #include <vfs.h> 29 #include <vm/vm.h> 30 #include <vm/vm_page.h> 31 #include <vm/vm_priv.h> 32 #include <vm/vm_types.h> 33 #include <vm/VMAddressSpace.h> 34 #include <vm/VMArea.h> 35 36 // needed for the factory only 37 #include "VMAnonymousCache.h" 38 #include "VMAnonymousNoSwapCache.h" 39 #include "VMDeviceCache.h" 40 #include "VMNullCache.h" 41 #include "../cache/vnode_store.h" 42 43 44 //#define TRACE_VM_CACHE 45 #ifdef TRACE_VM_CACHE 46 # define TRACE(x) dprintf x 47 #else 48 # define TRACE(x) ; 49 #endif 50 51 52 #if DEBUG_CACHE_LIST 53 VMCache* gDebugCacheList; 54 #endif 55 static rw_lock sCacheListLock = RW_LOCK_INITIALIZER("global VMCache list"); 56 // The lock is also needed when the debug feature is disabled. 57 58 ObjectCache* gCacheRefObjectCache; 59 #if ENABLE_SWAP_SUPPORT 60 ObjectCache* gAnonymousCacheObjectCache; 61 #endif 62 ObjectCache* gAnonymousNoSwapCacheObjectCache; 63 ObjectCache* gVnodeCacheObjectCache; 64 ObjectCache* gDeviceCacheObjectCache; 65 ObjectCache* gNullCacheObjectCache; 66 67 68 struct VMCache::PageEventWaiter { 69 Thread* thread; 70 PageEventWaiter* next; 71 vm_page* page; 72 uint32 events; 73 }; 74 75 76 #if VM_CACHE_TRACING 77 78 namespace VMCacheTracing { 79 80 class VMCacheTraceEntry : public AbstractTraceEntry { 81 public: 82 VMCacheTraceEntry(VMCache* cache) 83 : 84 fCache(cache) 85 { 86 #if VM_CACHE_TRACING_STACK_TRACE 87 fStackTrace = capture_tracing_stack_trace( 88 VM_CACHE_TRACING_STACK_TRACE, 0, true); 89 // Don't capture userland stack trace to avoid potential 90 // deadlocks. 91 #endif 92 } 93 94 #if VM_CACHE_TRACING_STACK_TRACE 95 virtual void DumpStackTrace(TraceOutput& out) 96 { 97 out.PrintStackTrace(fStackTrace); 98 } 99 #endif 100 101 VMCache* Cache() const 102 { 103 return fCache; 104 } 105 106 protected: 107 VMCache* fCache; 108 #if VM_CACHE_TRACING_STACK_TRACE 109 tracing_stack_trace* fStackTrace; 110 #endif 111 }; 112 113 114 class Create : public VMCacheTraceEntry { 115 public: 116 Create(VMCache* cache) 117 : 118 VMCacheTraceEntry(cache) 119 { 120 Initialized(); 121 } 122 123 virtual void AddDump(TraceOutput& out) 124 { 125 out.Print("vm cache create: -> cache: %p", fCache); 126 } 127 }; 128 129 130 class Delete : public VMCacheTraceEntry { 131 public: 132 Delete(VMCache* cache) 133 : 134 VMCacheTraceEntry(cache) 135 { 136 Initialized(); 137 } 138 139 virtual void AddDump(TraceOutput& out) 140 { 141 out.Print("vm cache delete: cache: %p", fCache); 142 } 143 }; 144 145 146 class SetMinimalCommitment : public VMCacheTraceEntry { 147 public: 148 SetMinimalCommitment(VMCache* cache, off_t commitment) 149 : 150 VMCacheTraceEntry(cache), 151 fOldCommitment(cache->committed_size), 152 fCommitment(commitment) 153 { 154 Initialized(); 155 } 156 157 virtual void AddDump(TraceOutput& out) 158 { 159 out.Print("vm cache set min commitment: cache: %p, " 160 "commitment: %" B_PRIdOFF " -> %" B_PRIdOFF, fCache, 161 fOldCommitment, fCommitment); 162 } 163 164 private: 165 off_t fOldCommitment; 166 off_t fCommitment; 167 }; 168 169 170 class Resize : public VMCacheTraceEntry { 171 public: 172 Resize(VMCache* cache, off_t size) 173 : 174 VMCacheTraceEntry(cache), 175 fOldSize(cache->virtual_end), 176 fSize(size) 177 { 178 Initialized(); 179 } 180 181 virtual void AddDump(TraceOutput& out) 182 { 183 out.Print("vm cache resize: cache: %p, size: %" B_PRIdOFF " -> %" 184 B_PRIdOFF, fCache, fOldSize, fSize); 185 } 186 187 private: 188 off_t fOldSize; 189 off_t fSize; 190 }; 191 192 193 class Rebase : public VMCacheTraceEntry { 194 public: 195 Rebase(VMCache* cache, off_t base) 196 : 197 VMCacheTraceEntry(cache), 198 fOldBase(cache->virtual_base), 199 fBase(base) 200 { 201 Initialized(); 202 } 203 204 virtual void AddDump(TraceOutput& out) 205 { 206 out.Print("vm cache rebase: cache: %p, base: %lld -> %lld", fCache, 207 fOldBase, fBase); 208 } 209 210 private: 211 off_t fOldBase; 212 off_t fBase; 213 }; 214 215 216 class AddConsumer : public VMCacheTraceEntry { 217 public: 218 AddConsumer(VMCache* cache, VMCache* consumer) 219 : 220 VMCacheTraceEntry(cache), 221 fConsumer(consumer) 222 { 223 Initialized(); 224 } 225 226 virtual void AddDump(TraceOutput& out) 227 { 228 out.Print("vm cache add consumer: cache: %p, consumer: %p", fCache, 229 fConsumer); 230 } 231 232 VMCache* Consumer() const 233 { 234 return fConsumer; 235 } 236 237 private: 238 VMCache* fConsumer; 239 }; 240 241 242 class RemoveConsumer : public VMCacheTraceEntry { 243 public: 244 RemoveConsumer(VMCache* cache, VMCache* consumer) 245 : 246 VMCacheTraceEntry(cache), 247 fConsumer(consumer) 248 { 249 Initialized(); 250 } 251 252 virtual void AddDump(TraceOutput& out) 253 { 254 out.Print("vm cache remove consumer: cache: %p, consumer: %p", 255 fCache, fConsumer); 256 } 257 258 private: 259 VMCache* fConsumer; 260 }; 261 262 263 class Merge : public VMCacheTraceEntry { 264 public: 265 Merge(VMCache* cache, VMCache* consumer) 266 : 267 VMCacheTraceEntry(cache), 268 fConsumer(consumer) 269 { 270 Initialized(); 271 } 272 273 virtual void AddDump(TraceOutput& out) 274 { 275 out.Print("vm cache merge with consumer: cache: %p, consumer: %p", 276 fCache, fConsumer); 277 } 278 279 private: 280 VMCache* fConsumer; 281 }; 282 283 284 class InsertArea : public VMCacheTraceEntry { 285 public: 286 InsertArea(VMCache* cache, VMArea* area) 287 : 288 VMCacheTraceEntry(cache), 289 fArea(area) 290 { 291 Initialized(); 292 } 293 294 virtual void AddDump(TraceOutput& out) 295 { 296 out.Print("vm cache insert area: cache: %p, area: %p", fCache, 297 fArea); 298 } 299 300 VMArea* Area() const 301 { 302 return fArea; 303 } 304 305 private: 306 VMArea* fArea; 307 }; 308 309 310 class RemoveArea : public VMCacheTraceEntry { 311 public: 312 RemoveArea(VMCache* cache, VMArea* area) 313 : 314 VMCacheTraceEntry(cache), 315 fArea(area) 316 { 317 Initialized(); 318 } 319 320 virtual void AddDump(TraceOutput& out) 321 { 322 out.Print("vm cache remove area: cache: %p, area: %p", fCache, 323 fArea); 324 } 325 326 private: 327 VMArea* fArea; 328 }; 329 330 } // namespace VMCacheTracing 331 332 # define T(x) new(std::nothrow) VMCacheTracing::x; 333 334 # if VM_CACHE_TRACING >= 2 335 336 namespace VMCacheTracing { 337 338 class InsertPage : public VMCacheTraceEntry { 339 public: 340 InsertPage(VMCache* cache, vm_page* page, off_t offset) 341 : 342 VMCacheTraceEntry(cache), 343 fPage(page), 344 fOffset(offset) 345 { 346 Initialized(); 347 } 348 349 virtual void AddDump(TraceOutput& out) 350 { 351 out.Print("vm cache insert page: cache: %p, page: %p, offset: %" 352 B_PRIdOFF, fCache, fPage, fOffset); 353 } 354 355 private: 356 vm_page* fPage; 357 off_t fOffset; 358 }; 359 360 361 class RemovePage : public VMCacheTraceEntry { 362 public: 363 RemovePage(VMCache* cache, vm_page* page) 364 : 365 VMCacheTraceEntry(cache), 366 fPage(page) 367 { 368 Initialized(); 369 } 370 371 virtual void AddDump(TraceOutput& out) 372 { 373 out.Print("vm cache remove page: cache: %p, page: %p", fCache, 374 fPage); 375 } 376 377 private: 378 vm_page* fPage; 379 }; 380 381 } // namespace VMCacheTracing 382 383 # define T2(x) new(std::nothrow) VMCacheTracing::x; 384 # else 385 # define T2(x) ; 386 # endif 387 #else 388 # define T(x) ; 389 # define T2(x) ; 390 #endif 391 392 393 // #pragma mark - debugger commands 394 395 396 #if VM_CACHE_TRACING 397 398 399 static void* 400 cache_stack_find_area_cache(const TraceEntryIterator& baseIterator, void* area) 401 { 402 using namespace VMCacheTracing; 403 404 // find the previous "insert area" entry for the given area 405 TraceEntryIterator iterator = baseIterator; 406 TraceEntry* entry = iterator.Current(); 407 while (entry != NULL) { 408 if (InsertArea* insertAreaEntry = dynamic_cast<InsertArea*>(entry)) { 409 if (insertAreaEntry->Area() == area) 410 return insertAreaEntry->Cache(); 411 } 412 413 entry = iterator.Previous(); 414 } 415 416 return NULL; 417 } 418 419 420 static void* 421 cache_stack_find_consumer(const TraceEntryIterator& baseIterator, void* cache) 422 { 423 using namespace VMCacheTracing; 424 425 // find the previous "add consumer" or "create" entry for the given cache 426 TraceEntryIterator iterator = baseIterator; 427 TraceEntry* entry = iterator.Current(); 428 while (entry != NULL) { 429 if (Create* createEntry = dynamic_cast<Create*>(entry)) { 430 if (createEntry->Cache() == cache) 431 return NULL; 432 } else if (AddConsumer* addEntry = dynamic_cast<AddConsumer*>(entry)) { 433 if (addEntry->Consumer() == cache) 434 return addEntry->Cache(); 435 } 436 437 entry = iterator.Previous(); 438 } 439 440 return NULL; 441 } 442 443 444 static int 445 command_cache_stack(int argc, char** argv) 446 { 447 if (argc < 3 || argc > 4) { 448 print_debugger_command_usage(argv[0]); 449 return 0; 450 } 451 452 bool isArea = false; 453 454 int argi = 1; 455 if (argc == 4) { 456 if (strcmp(argv[argi], "area") != 0) { 457 print_debugger_command_usage(argv[0]); 458 return 0; 459 } 460 461 argi++; 462 isArea = true; 463 } 464 465 uint64 addressValue; 466 uint64 debugEntryIndex; 467 if (!evaluate_debug_expression(argv[argi++], &addressValue, false) 468 || !evaluate_debug_expression(argv[argi++], &debugEntryIndex, false)) { 469 return 0; 470 } 471 472 TraceEntryIterator baseIterator; 473 if (baseIterator.MoveTo((int32)debugEntryIndex) == NULL) { 474 kprintf("Invalid tracing entry index %" B_PRIu64 "\n", debugEntryIndex); 475 return 0; 476 } 477 478 void* address = (void*)(addr_t)addressValue; 479 480 kprintf("cache stack for %s %p at %" B_PRIu64 ":\n", 481 isArea ? "area" : "cache", address, debugEntryIndex); 482 if (isArea) { 483 address = cache_stack_find_area_cache(baseIterator, address); 484 if (address == NULL) { 485 kprintf(" cache not found\n"); 486 return 0; 487 } 488 } 489 490 while (address != NULL) { 491 kprintf(" %p\n", address); 492 address = cache_stack_find_consumer(baseIterator, address); 493 } 494 495 return 0; 496 } 497 498 499 #endif // VM_CACHE_TRACING 500 501 502 // #pragma mark - 503 504 505 status_t 506 vm_cache_init(kernel_args* args) 507 { 508 // Create object caches for the structures we allocate here. 509 gCacheRefObjectCache = create_object_cache("cache refs", sizeof(VMCacheRef), 510 0, NULL, NULL, NULL); 511 #if ENABLE_SWAP_SUPPORT 512 gAnonymousCacheObjectCache = create_object_cache("anon caches", 513 sizeof(VMAnonymousCache), 0, NULL, NULL, NULL); 514 #endif 515 gAnonymousNoSwapCacheObjectCache = create_object_cache( 516 "anon no-swap caches", sizeof(VMAnonymousNoSwapCache), 0, NULL, NULL, 517 NULL); 518 gVnodeCacheObjectCache = create_object_cache("vnode caches", 519 sizeof(VMVnodeCache), 0, NULL, NULL, NULL); 520 gDeviceCacheObjectCache = create_object_cache("device caches", 521 sizeof(VMDeviceCache), 0, NULL, NULL, NULL); 522 gNullCacheObjectCache = create_object_cache("null caches", 523 sizeof(VMNullCache), 0, NULL, NULL, NULL); 524 525 if (gCacheRefObjectCache == NULL 526 #if ENABLE_SWAP_SUPPORT 527 || gAnonymousCacheObjectCache == NULL 528 #endif 529 || gAnonymousNoSwapCacheObjectCache == NULL 530 || gVnodeCacheObjectCache == NULL 531 || gDeviceCacheObjectCache == NULL 532 || gNullCacheObjectCache == NULL) { 533 panic("vm_cache_init(): Failed to create object caches!"); 534 return B_NO_MEMORY; 535 } 536 537 return B_OK; 538 } 539 540 541 void 542 vm_cache_init_post_heap() 543 { 544 #if VM_CACHE_TRACING 545 add_debugger_command_etc("cache_stack", &command_cache_stack, 546 "List the ancestors (sources) of a VMCache at the time given by " 547 "tracing entry index", 548 "[ \"area\" ] <address> <tracing entry index>\n" 549 "All ancestors (sources) of a given VMCache at the time given by the\n" 550 "tracing entry index are listed. If \"area\" is given the supplied\n" 551 "address is an area instead of a cache address. The listing will\n" 552 "start with the area's cache at that point.\n", 553 0); 554 #endif // VM_CACHE_TRACING 555 } 556 557 558 VMCache* 559 vm_cache_acquire_locked_page_cache(vm_page* page, bool dontWait) 560 { 561 rw_lock_read_lock(&sCacheListLock); 562 563 while (true) { 564 VMCacheRef* cacheRef = page->CacheRef(); 565 if (cacheRef == NULL) { 566 rw_lock_read_unlock(&sCacheListLock); 567 return NULL; 568 } 569 570 VMCache* cache = cacheRef->cache; 571 if (dontWait) { 572 if (!cache->TryLock()) { 573 rw_lock_read_unlock(&sCacheListLock); 574 return NULL; 575 } 576 } else { 577 if (!cache->SwitchFromReadLock(&sCacheListLock)) { 578 // cache has been deleted 579 rw_lock_read_lock(&sCacheListLock); 580 continue; 581 } 582 rw_lock_read_lock(&sCacheListLock); 583 } 584 585 if (cache == page->Cache()) { 586 rw_lock_read_unlock(&sCacheListLock); 587 cache->AcquireRefLocked(); 588 return cache; 589 } 590 591 // the cache changed in the meantime 592 cache->Unlock(); 593 } 594 } 595 596 597 // #pragma mark - VMCache 598 599 600 VMCacheRef::VMCacheRef(VMCache* cache) 601 : 602 cache(cache), 603 ref_count(1) 604 { 605 } 606 607 608 // #pragma mark - VMCache 609 610 611 bool 612 VMCache::_IsMergeable() const 613 { 614 return areas == NULL && temporary && !unmergeable 615 && !consumers.IsEmpty() && consumers.Head() == consumers.Tail(); 616 } 617 618 619 VMCache::VMCache() 620 : 621 fCacheRef(NULL) 622 { 623 } 624 625 626 VMCache::~VMCache() 627 { 628 object_cache_delete(gCacheRefObjectCache, fCacheRef); 629 } 630 631 632 status_t 633 VMCache::Init(uint32 cacheType, uint32 allocationFlags) 634 { 635 mutex_init(&fLock, "VMCache"); 636 637 areas = NULL; 638 fRefCount = 1; 639 source = NULL; 640 virtual_base = 0; 641 virtual_end = 0; 642 committed_size = 0; 643 temporary = 0; 644 unmergeable = 0; 645 page_count = 0; 646 fWiredPagesCount = 0; 647 type = cacheType; 648 fPageEventWaiters = NULL; 649 650 #if DEBUG_CACHE_LIST 651 debug_previous = NULL; 652 debug_next = NULL; 653 // initialize in case the following fails 654 #endif 655 656 fCacheRef = new(gCacheRefObjectCache, allocationFlags) VMCacheRef(this); 657 if (fCacheRef == NULL) 658 return B_NO_MEMORY; 659 660 #if DEBUG_CACHE_LIST 661 rw_lock_write_lock(&sCacheListLock); 662 663 if (gDebugCacheList != NULL) 664 gDebugCacheList->debug_previous = this; 665 debug_next = gDebugCacheList; 666 gDebugCacheList = this; 667 668 rw_lock_write_unlock(&sCacheListLock); 669 #endif 670 671 return B_OK; 672 } 673 674 675 void 676 VMCache::Delete() 677 { 678 if (areas != NULL) 679 panic("cache %p to be deleted still has areas", this); 680 if (!consumers.IsEmpty()) 681 panic("cache %p to be deleted still has consumers", this); 682 683 T(Delete(this)); 684 685 // free all of the pages in the cache 686 while (vm_page* page = pages.Root()) { 687 if (!page->mappings.IsEmpty() || page->WiredCount() != 0) { 688 panic("remove page %p from cache %p: page still has mappings!\n" 689 "@!page %p; cache %p", page, this, page, this); 690 } 691 692 // remove it 693 pages.Remove(page); 694 page->SetCacheRef(NULL); 695 696 TRACE(("vm_cache_release_ref: freeing page 0x%lx\n", 697 page->physical_page_number)); 698 DEBUG_PAGE_ACCESS_START(page); 699 vm_page_free(this, page); 700 } 701 702 // remove the ref to the source 703 if (source) 704 source->_RemoveConsumer(this); 705 706 // We lock and unlock the sCacheListLock, even if the DEBUG_CACHE_LIST is 707 // not enabled. This synchronization point is needed for 708 // vm_cache_acquire_locked_page_cache(). 709 rw_lock_write_lock(&sCacheListLock); 710 711 #if DEBUG_CACHE_LIST 712 if (debug_previous) 713 debug_previous->debug_next = debug_next; 714 if (debug_next) 715 debug_next->debug_previous = debug_previous; 716 if (this == gDebugCacheList) 717 gDebugCacheList = debug_next; 718 #endif 719 720 mutex_destroy(&fLock); 721 722 rw_lock_write_unlock(&sCacheListLock); 723 724 DeleteObject(); 725 } 726 727 728 void 729 VMCache::Unlock(bool consumerLocked) 730 { 731 while (fRefCount == 1 && _IsMergeable()) { 732 VMCache* consumer = consumers.Head(); 733 if (consumerLocked) { 734 _MergeWithOnlyConsumer(); 735 } else if (consumer->TryLock()) { 736 _MergeWithOnlyConsumer(); 737 consumer->Unlock(); 738 } else { 739 // Someone else has locked the consumer ATM. Unlock this cache and 740 // wait for the consumer lock. Increment the cache's ref count 741 // temporarily, so that no one else will try what we are doing or 742 // delete the cache. 743 fRefCount++; 744 bool consumerLockedTemp = consumer->SwitchLock(&fLock); 745 Lock(); 746 fRefCount--; 747 748 if (consumerLockedTemp) { 749 if (fRefCount == 1 && _IsMergeable() 750 && consumer == consumers.Head()) { 751 // nothing has changed in the meantime -- merge 752 _MergeWithOnlyConsumer(); 753 } 754 755 consumer->Unlock(); 756 } 757 } 758 } 759 760 if (fRefCount == 0) { 761 // delete this cache 762 Delete(); 763 } else 764 mutex_unlock(&fLock); 765 } 766 767 768 vm_page* 769 VMCache::LookupPage(off_t offset) 770 { 771 AssertLocked(); 772 773 vm_page* page = pages.Lookup((page_num_t)(offset >> PAGE_SHIFT)); 774 775 #if KDEBUG 776 if (page != NULL && page->Cache() != this) 777 panic("page %p not in cache %p\n", page, this); 778 #endif 779 780 return page; 781 } 782 783 784 void 785 VMCache::InsertPage(vm_page* page, off_t offset) 786 { 787 TRACE(("VMCache::InsertPage(): cache %p, page %p, offset %" B_PRIdOFF "\n", 788 this, page, offset)); 789 AssertLocked(); 790 791 if (page->CacheRef() != NULL) { 792 panic("insert page %p into cache %p: page cache is set to %p\n", 793 page, this, page->Cache()); 794 } 795 796 T2(InsertPage(this, page, offset)); 797 798 page->cache_offset = (page_num_t)(offset >> PAGE_SHIFT); 799 page_count++; 800 page->SetCacheRef(fCacheRef); 801 802 #if KDEBUG 803 vm_page* otherPage = pages.Lookup(page->cache_offset); 804 if (otherPage != NULL) { 805 panic("VMCache::InsertPage(): there's already page %p with cache " 806 "offset %" B_PRIuPHYSADDR " in cache %p; inserting page %p", 807 otherPage, page->cache_offset, this, page); 808 } 809 #endif // KDEBUG 810 811 pages.Insert(page); 812 813 if (page->WiredCount() > 0) 814 IncrementWiredPagesCount(); 815 } 816 817 818 /*! Removes the vm_page from this cache. Of course, the page must 819 really be in this cache or evil things will happen. 820 The cache lock must be held. 821 */ 822 void 823 VMCache::RemovePage(vm_page* page) 824 { 825 TRACE(("VMCache::RemovePage(): cache %p, page %p\n", this, page)); 826 AssertLocked(); 827 828 if (page->Cache() != this) { 829 panic("remove page %p from cache %p: page cache is set to %p\n", page, 830 this, page->Cache()); 831 } 832 833 T2(RemovePage(this, page)); 834 835 pages.Remove(page); 836 page_count--; 837 page->SetCacheRef(NULL); 838 839 if (page->WiredCount() > 0) 840 DecrementWiredPagesCount(); 841 } 842 843 844 /*! Moves the given page from its current cache inserts it into this cache 845 at the given offset. 846 Both caches must be locked. 847 */ 848 void 849 VMCache::MovePage(vm_page* page, off_t offset) 850 { 851 VMCache* oldCache = page->Cache(); 852 853 AssertLocked(); 854 oldCache->AssertLocked(); 855 856 // remove from old cache 857 oldCache->pages.Remove(page); 858 oldCache->page_count--; 859 T2(RemovePage(oldCache, page)); 860 861 // change the offset 862 page->cache_offset = offset >> PAGE_SHIFT; 863 864 // insert here 865 pages.Insert(page); 866 page_count++; 867 page->SetCacheRef(fCacheRef); 868 869 if (page->WiredCount() > 0) { 870 IncrementWiredPagesCount(); 871 oldCache->DecrementWiredPagesCount(); 872 } 873 874 T2(InsertPage(this, page, page->cache_offset << PAGE_SHIFT)); 875 } 876 877 /*! Moves the given page from its current cache inserts it into this cache. 878 Both caches must be locked. 879 */ 880 void 881 VMCache::MovePage(vm_page* page) 882 { 883 MovePage(page, page->cache_offset << PAGE_SHIFT); 884 } 885 886 887 /*! Moves all pages from the given cache to this one. 888 Both caches must be locked. This cache must be empty. 889 */ 890 void 891 VMCache::MoveAllPages(VMCache* fromCache) 892 { 893 AssertLocked(); 894 fromCache->AssertLocked(); 895 ASSERT(page_count == 0); 896 897 std::swap(fromCache->pages, pages); 898 page_count = fromCache->page_count; 899 fromCache->page_count = 0; 900 fWiredPagesCount = fromCache->fWiredPagesCount; 901 fromCache->fWiredPagesCount = 0; 902 903 // swap the VMCacheRefs 904 rw_lock_write_lock(&sCacheListLock); 905 std::swap(fCacheRef, fromCache->fCacheRef); 906 fCacheRef->cache = this; 907 fromCache->fCacheRef->cache = fromCache; 908 rw_lock_write_unlock(&sCacheListLock); 909 910 #if VM_CACHE_TRACING >= 2 911 for (VMCachePagesTree::Iterator it = pages.GetIterator(); 912 vm_page* page = it.Next();) { 913 T2(RemovePage(fromCache, page)); 914 T2(InsertPage(this, page, page->cache_offset << PAGE_SHIFT)); 915 } 916 #endif 917 } 918 919 920 /*! Waits until one or more events happened for a given page which belongs to 921 this cache. 922 The cache must be locked. It will be unlocked by the method. \a relock 923 specifies whether the method shall re-lock the cache before returning. 924 \param page The page for which to wait. 925 \param events The mask of events the caller is interested in. 926 \param relock If \c true, the cache will be locked when returning, 927 otherwise it won't be locked. 928 */ 929 void 930 VMCache::WaitForPageEvents(vm_page* page, uint32 events, bool relock) 931 { 932 PageEventWaiter waiter; 933 waiter.thread = thread_get_current_thread(); 934 waiter.next = fPageEventWaiters; 935 waiter.page = page; 936 waiter.events = events; 937 938 fPageEventWaiters = &waiter; 939 940 thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_OTHER_OBJECT, page); 941 942 Unlock(); 943 thread_block(); 944 945 if (relock) 946 Lock(); 947 } 948 949 950 /*! Makes this case the source of the \a consumer cache, 951 and adds the \a consumer to its list. 952 This also grabs a reference to the source cache. 953 Assumes you have the cache and the consumer's lock held. 954 */ 955 void 956 VMCache::AddConsumer(VMCache* consumer) 957 { 958 TRACE(("add consumer vm cache %p to cache %p\n", consumer, this)); 959 AssertLocked(); 960 consumer->AssertLocked(); 961 962 T(AddConsumer(this, consumer)); 963 964 consumer->source = this; 965 consumers.Add(consumer); 966 967 AcquireRefLocked(); 968 AcquireStoreRef(); 969 } 970 971 972 /*! Adds the \a area to this cache. 973 Assumes you have the locked the cache. 974 */ 975 status_t 976 VMCache::InsertAreaLocked(VMArea* area) 977 { 978 TRACE(("VMCache::InsertAreaLocked(cache %p, area %p)\n", this, area)); 979 AssertLocked(); 980 981 T(InsertArea(this, area)); 982 983 area->cache_next = areas; 984 if (area->cache_next) 985 area->cache_next->cache_prev = area; 986 area->cache_prev = NULL; 987 areas = area; 988 989 AcquireStoreRef(); 990 991 return B_OK; 992 } 993 994 995 status_t 996 VMCache::RemoveArea(VMArea* area) 997 { 998 TRACE(("VMCache::RemoveArea(cache %p, area %p)\n", this, area)); 999 1000 T(RemoveArea(this, area)); 1001 1002 // We release the store reference first, since otherwise we would reverse 1003 // the locking order or even deadlock ourselves (... -> free_vnode() -> ... 1004 // -> bfs_remove_vnode() -> ... -> file_cache_set_size() -> mutex_lock()). 1005 // Also cf. _RemoveConsumer(). 1006 ReleaseStoreRef(); 1007 1008 AutoLocker<VMCache> locker(this); 1009 1010 if (area->cache_prev) 1011 area->cache_prev->cache_next = area->cache_next; 1012 if (area->cache_next) 1013 area->cache_next->cache_prev = area->cache_prev; 1014 if (areas == area) 1015 areas = area->cache_next; 1016 1017 return B_OK; 1018 } 1019 1020 1021 /*! Transfers the areas from \a fromCache to this cache. This cache must not 1022 have areas yet. Both caches must be locked. 1023 */ 1024 void 1025 VMCache::TransferAreas(VMCache* fromCache) 1026 { 1027 AssertLocked(); 1028 fromCache->AssertLocked(); 1029 ASSERT(areas == NULL); 1030 1031 areas = fromCache->areas; 1032 fromCache->areas = NULL; 1033 1034 for (VMArea* area = areas; area != NULL; area = area->cache_next) { 1035 area->cache = this; 1036 AcquireRefLocked(); 1037 fromCache->ReleaseRefLocked(); 1038 1039 T(RemoveArea(fromCache, area)); 1040 T(InsertArea(this, area)); 1041 } 1042 } 1043 1044 1045 uint32 1046 VMCache::CountWritableAreas(VMArea* ignoreArea) const 1047 { 1048 uint32 count = 0; 1049 1050 for (VMArea* area = areas; area != NULL; area = area->cache_next) { 1051 if (area != ignoreArea 1052 && (area->protection & (B_WRITE_AREA | B_KERNEL_WRITE_AREA)) != 0) { 1053 count++; 1054 } 1055 } 1056 1057 return count; 1058 } 1059 1060 1061 status_t 1062 VMCache::WriteModified() 1063 { 1064 TRACE(("VMCache::WriteModified(cache = %p)\n", this)); 1065 1066 if (temporary) 1067 return B_OK; 1068 1069 Lock(); 1070 status_t status = vm_page_write_modified_pages(this); 1071 Unlock(); 1072 1073 return status; 1074 } 1075 1076 1077 /*! Commits the memory to the store if the \a commitment is larger than 1078 what's committed already. 1079 Assumes you have the cache's lock held. 1080 */ 1081 status_t 1082 VMCache::SetMinimalCommitment(off_t commitment, int priority) 1083 { 1084 TRACE(("VMCache::SetMinimalCommitment(cache %p, commitment %" B_PRIdOFF 1085 ")\n", this, commitment)); 1086 T(SetMinimalCommitment(this, commitment)); 1087 1088 status_t status = B_OK; 1089 1090 // If we don't have enough committed space to cover through to the new end 1091 // of the area... 1092 if (committed_size < commitment) { 1093 ASSERT(commitment <= ROUNDUP(virtual_end - virtual_base, B_PAGE_SIZE)); 1094 1095 // try to commit more memory 1096 status = Commit(commitment, priority); 1097 } 1098 1099 return status; 1100 } 1101 1102 1103 bool 1104 VMCache::_FreePageRange(VMCachePagesTree::Iterator it, 1105 page_num_t* toPage = NULL) 1106 { 1107 for (vm_page* page = it.Next(); 1108 page != NULL && (toPage == NULL || page->cache_offset < *toPage); 1109 page = it.Next()) { 1110 1111 if (page->busy) { 1112 if (page->busy_writing) { 1113 // We cannot wait for the page to become available 1114 // as we might cause a deadlock this way 1115 page->busy_writing = false; 1116 // this will notify the writer to free the page 1117 continue; 1118 } 1119 1120 // wait for page to become unbusy 1121 WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, true); 1122 return true; 1123 } 1124 1125 // remove the page and put it into the free queue 1126 DEBUG_PAGE_ACCESS_START(page); 1127 vm_remove_all_page_mappings(page); 1128 ASSERT(page->WiredCount() == 0); 1129 // TODO: Find a real solution! If the page is wired 1130 // temporarily (e.g. by lock_memory()), we actually must not 1131 // unmap it! 1132 RemovePage(page); 1133 // Note: When iterating through a IteratableSplayTree 1134 // removing the current node is safe. 1135 1136 vm_page_free(this, page); 1137 } 1138 1139 return false; 1140 } 1141 1142 1143 /*! This function updates the size field of the cache. 1144 If needed, it will free up all pages that don't belong to the cache anymore. 1145 The cache lock must be held when you call it. 1146 Since removed pages don't belong to the cache any longer, they are not 1147 written back before they will be removed. 1148 1149 Note, this function may temporarily release the cache lock in case it 1150 has to wait for busy pages. 1151 */ 1152 status_t 1153 VMCache::Resize(off_t newSize, int priority) 1154 { 1155 TRACE(("VMCache::Resize(cache %p, newSize %" B_PRIdOFF ") old size %" 1156 B_PRIdOFF "\n", this, newSize, this->virtual_end)); 1157 T(Resize(this, newSize)); 1158 1159 AssertLocked(); 1160 1161 page_num_t oldPageCount = (page_num_t)((virtual_end + B_PAGE_SIZE - 1) 1162 >> PAGE_SHIFT); 1163 page_num_t newPageCount = (page_num_t)((newSize + B_PAGE_SIZE - 1) 1164 >> PAGE_SHIFT); 1165 1166 if (newPageCount < oldPageCount) { 1167 // we need to remove all pages in the cache outside of the new virtual 1168 // size 1169 while (_FreePageRange(pages.GetIterator(newPageCount, true, true))) 1170 ; 1171 } 1172 1173 status_t status = Commit(newSize - virtual_base, priority); 1174 if (status != B_OK) 1175 return status; 1176 1177 virtual_end = newSize; 1178 return B_OK; 1179 } 1180 1181 /*! This function updates the virtual_base field of the cache. 1182 If needed, it will free up all pages that don't belong to the cache anymore. 1183 The cache lock must be held when you call it. 1184 Since removed pages don't belong to the cache any longer, they are not 1185 written back before they will be removed. 1186 1187 Note, this function may temporarily release the cache lock in case it 1188 has to wait for busy pages. 1189 */ 1190 status_t 1191 VMCache::Rebase(off_t newBase, int priority) 1192 { 1193 TRACE(("VMCache::Rebase(cache %p, newBase %lld) old base %lld\n", 1194 this, newBase, this->virtual_base)); 1195 this->AssertLocked(); 1196 1197 T(Rebase(this, newBase)); 1198 1199 status_t status = Commit(virtual_end - newBase, priority); 1200 if (status != B_OK) 1201 return status; 1202 1203 page_num_t basePage = (page_num_t)(newBase >> PAGE_SHIFT); 1204 1205 if (newBase > virtual_base) { 1206 // we need to remove all pages in the cache outside of the new virtual 1207 // base 1208 while (_FreePageRange(pages.GetIterator(), &basePage)) 1209 ; 1210 } 1211 1212 virtual_base = newBase; 1213 return B_OK; 1214 } 1215 1216 1217 /*! Moves pages in the given range from the source cache into this cache. Both 1218 caches must be locked. 1219 */ 1220 status_t 1221 VMCache::Adopt(VMCache* source, off_t offset, off_t size, off_t newOffset) 1222 { 1223 page_num_t startPage = offset >> PAGE_SHIFT; 1224 page_num_t endPage = (offset + size + B_PAGE_SIZE - 1) >> PAGE_SHIFT; 1225 off_t offsetChange = newOffset - offset; 1226 1227 VMCachePagesTree::Iterator it = source->pages.GetIterator(startPage, true, 1228 true); 1229 for (vm_page* page = it.Next(); 1230 page != NULL && page->cache_offset < endPage; 1231 page = it.Next()) { 1232 MovePage(page, (page->cache_offset << PAGE_SHIFT) + offsetChange); 1233 } 1234 1235 return B_OK; 1236 } 1237 1238 1239 /*! Discards pages in the given range. */ 1240 status_t 1241 VMCache::Discard(off_t offset, off_t size) 1242 { 1243 page_num_t startPage = offset >> PAGE_SHIFT; 1244 page_num_t endPage = (offset + size + B_PAGE_SIZE - 1) >> PAGE_SHIFT; 1245 while (_FreePageRange(pages.GetIterator(startPage, true, true), &endPage)) 1246 ; 1247 1248 return B_OK; 1249 } 1250 1251 1252 /*! You have to call this function with the VMCache lock held. */ 1253 status_t 1254 VMCache::FlushAndRemoveAllPages() 1255 { 1256 ASSERT_LOCKED_MUTEX(&fLock); 1257 1258 while (page_count > 0) { 1259 // write back modified pages 1260 status_t status = vm_page_write_modified_pages(this); 1261 if (status != B_OK) 1262 return status; 1263 1264 // remove pages 1265 for (VMCachePagesTree::Iterator it = pages.GetIterator(); 1266 vm_page* page = it.Next();) { 1267 if (page->busy) { 1268 // wait for page to become unbusy 1269 WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, true); 1270 1271 // restart from the start of the list 1272 it = pages.GetIterator(); 1273 continue; 1274 } 1275 1276 // skip modified pages -- they will be written back in the next 1277 // iteration 1278 if (page->State() == PAGE_STATE_MODIFIED) 1279 continue; 1280 1281 // We can't remove mapped pages. 1282 if (page->IsMapped()) 1283 return B_BUSY; 1284 1285 DEBUG_PAGE_ACCESS_START(page); 1286 RemovePage(page); 1287 vm_page_free(this, page); 1288 // Note: When iterating through a IteratableSplayTree 1289 // removing the current node is safe. 1290 } 1291 } 1292 1293 return B_OK; 1294 } 1295 1296 1297 status_t 1298 VMCache::Commit(off_t size, int priority) 1299 { 1300 committed_size = size; 1301 return B_OK; 1302 } 1303 1304 1305 /*! Returns whether the cache's underlying backing store could deliver the 1306 page at the given offset. 1307 1308 Basically it returns whether a Read() at \a offset would at least read a 1309 partial page (assuming that no unexpected errors occur or the situation 1310 changes in the meantime). 1311 */ 1312 bool 1313 VMCache::HasPage(off_t offset) 1314 { 1315 // In accordance with Fault() the default implementation doesn't have a 1316 // backing store and doesn't allow faults. 1317 return false; 1318 } 1319 1320 1321 status_t 1322 VMCache::Read(off_t offset, const generic_io_vec *vecs, size_t count, 1323 uint32 flags, generic_size_t *_numBytes) 1324 { 1325 return B_ERROR; 1326 } 1327 1328 1329 status_t 1330 VMCache::Write(off_t offset, const generic_io_vec *vecs, size_t count, 1331 uint32 flags, generic_size_t *_numBytes) 1332 { 1333 return B_ERROR; 1334 } 1335 1336 1337 status_t 1338 VMCache::WriteAsync(off_t offset, const generic_io_vec* vecs, size_t count, 1339 generic_size_t numBytes, uint32 flags, AsyncIOCallback* callback) 1340 { 1341 // Not supported, fall back to the synchronous hook. 1342 generic_size_t transferred = numBytes; 1343 status_t error = Write(offset, vecs, count, flags, &transferred); 1344 1345 if (callback != NULL) 1346 callback->IOFinished(error, transferred != numBytes, transferred); 1347 1348 return error; 1349 } 1350 1351 1352 /*! \brief Returns whether the cache can write the page at the given offset. 1353 1354 The cache must be locked when this function is invoked. 1355 1356 @param offset The page offset. 1357 @return \c true, if the page can be written, \c false otherwise. 1358 */ 1359 bool 1360 VMCache::CanWritePage(off_t offset) 1361 { 1362 return false; 1363 } 1364 1365 1366 status_t 1367 VMCache::Fault(struct VMAddressSpace *aspace, off_t offset) 1368 { 1369 return B_BAD_ADDRESS; 1370 } 1371 1372 1373 void 1374 VMCache::Merge(VMCache* source) 1375 { 1376 for (VMCachePagesTree::Iterator it = source->pages.GetIterator(); 1377 vm_page* page = it.Next();) { 1378 // Note: Removing the current node while iterating through a 1379 // IteratableSplayTree is safe. 1380 vm_page* consumerPage = LookupPage( 1381 (off_t)page->cache_offset << PAGE_SHIFT); 1382 if (consumerPage == NULL) { 1383 // the page is not yet in the consumer cache - move it upwards 1384 MovePage(page); 1385 } 1386 } 1387 } 1388 1389 1390 status_t 1391 VMCache::AcquireUnreferencedStoreRef() 1392 { 1393 return B_OK; 1394 } 1395 1396 1397 void 1398 VMCache::AcquireStoreRef() 1399 { 1400 } 1401 1402 1403 void 1404 VMCache::ReleaseStoreRef() 1405 { 1406 } 1407 1408 1409 /*! Kernel debugger version of HasPage(). 1410 Does not do any locking. 1411 */ 1412 bool 1413 VMCache::DebugHasPage(off_t offset) 1414 { 1415 // default that works for all subclasses that don't lock anyway 1416 return HasPage(offset); 1417 } 1418 1419 1420 /*! Kernel debugger version of LookupPage(). 1421 Does not do any locking. 1422 */ 1423 vm_page* 1424 VMCache::DebugLookupPage(off_t offset) 1425 { 1426 return pages.Lookup((page_num_t)(offset >> PAGE_SHIFT)); 1427 } 1428 1429 1430 void 1431 VMCache::Dump(bool showPages) const 1432 { 1433 kprintf("CACHE %p:\n", this); 1434 kprintf(" ref_count: %" B_PRId32 "\n", RefCount()); 1435 kprintf(" source: %p\n", source); 1436 kprintf(" type: %s\n", vm_cache_type_to_string(type)); 1437 kprintf(" virtual_base: 0x%" B_PRIx64 "\n", virtual_base); 1438 kprintf(" virtual_end: 0x%" B_PRIx64 "\n", virtual_end); 1439 kprintf(" temporary: %" B_PRIu32 "\n", uint32(temporary)); 1440 kprintf(" lock: %p\n", &fLock); 1441 #if KDEBUG 1442 kprintf(" lock.holder: %" B_PRId32 "\n", fLock.holder); 1443 #endif 1444 kprintf(" areas:\n"); 1445 1446 for (VMArea* area = areas; area != NULL; area = area->cache_next) { 1447 kprintf(" area 0x%" B_PRIx32 ", %s\n", area->id, area->name); 1448 kprintf("\tbase_addr: 0x%lx, size: 0x%lx\n", area->Base(), 1449 area->Size()); 1450 kprintf("\tprotection: 0x%" B_PRIx32 "\n", area->protection); 1451 kprintf("\towner: 0x%" B_PRIx32 "\n", area->address_space->ID()); 1452 } 1453 1454 kprintf(" consumers:\n"); 1455 for (ConsumerList::ConstIterator it = consumers.GetIterator(); 1456 VMCache* consumer = it.Next();) { 1457 kprintf("\t%p\n", consumer); 1458 } 1459 1460 kprintf(" pages:\n"); 1461 if (showPages) { 1462 for (VMCachePagesTree::ConstIterator it = pages.GetIterator(); 1463 vm_page* page = it.Next();) { 1464 if (!vm_page_is_dummy(page)) { 1465 kprintf("\t%p ppn %#" B_PRIxPHYSADDR " offset %#" B_PRIxPHYSADDR 1466 " state %u (%s) wired_count %u\n", page, 1467 page->physical_page_number, page->cache_offset, 1468 page->State(), page_state_to_string(page->State()), 1469 page->WiredCount()); 1470 } else { 1471 kprintf("\t%p DUMMY PAGE state %u (%s)\n", 1472 page, page->State(), page_state_to_string(page->State())); 1473 } 1474 } 1475 } else 1476 kprintf("\t%" B_PRIu32 " in cache\n", page_count); 1477 } 1478 1479 1480 /*! Wakes up threads waiting for page events. 1481 \param page The page for which events occurred. 1482 \param events The mask of events that occurred. 1483 */ 1484 void 1485 VMCache::_NotifyPageEvents(vm_page* page, uint32 events) 1486 { 1487 PageEventWaiter** it = &fPageEventWaiters; 1488 while (PageEventWaiter* waiter = *it) { 1489 if (waiter->page == page && (waiter->events & events) != 0) { 1490 // remove from list and unblock 1491 *it = waiter->next; 1492 thread_unblock(waiter->thread, B_OK); 1493 } else 1494 it = &waiter->next; 1495 } 1496 } 1497 1498 1499 /*! Merges the given cache with its only consumer. 1500 The caller must hold both the cache's and the consumer's lock. The method 1501 does release neither lock. 1502 */ 1503 void 1504 VMCache::_MergeWithOnlyConsumer() 1505 { 1506 VMCache* consumer = consumers.RemoveHead(); 1507 1508 TRACE(("merge vm cache %p (ref == %" B_PRId32 ") with vm cache %p\n", 1509 this, this->fRefCount, consumer)); 1510 1511 T(Merge(this, consumer)); 1512 1513 // merge the cache 1514 consumer->Merge(this); 1515 1516 // The remaining consumer has got a new source. 1517 if (source != NULL) { 1518 VMCache* newSource = source; 1519 1520 newSource->Lock(); 1521 1522 newSource->consumers.Remove(this); 1523 newSource->consumers.Add(consumer); 1524 consumer->source = newSource; 1525 source = NULL; 1526 1527 newSource->Unlock(); 1528 } else 1529 consumer->source = NULL; 1530 1531 // Release the reference the cache's consumer owned. The consumer takes 1532 // over the cache's ref to its source (if any) instead. 1533 ReleaseRefLocked(); 1534 } 1535 1536 1537 /*! Removes the \a consumer from this cache. 1538 It will also release the reference to the cache owned by the consumer. 1539 Assumes you have the consumer's cache lock held. This cache must not be 1540 locked. 1541 */ 1542 void 1543 VMCache::_RemoveConsumer(VMCache* consumer) 1544 { 1545 TRACE(("remove consumer vm cache %p from cache %p\n", consumer, this)); 1546 consumer->AssertLocked(); 1547 1548 T(RemoveConsumer(this, consumer)); 1549 1550 // Remove the store ref before locking the cache. Otherwise we'd call into 1551 // the VFS while holding the cache lock, which would reverse the usual 1552 // locking order. 1553 ReleaseStoreRef(); 1554 1555 // remove the consumer from the cache, but keep its reference until later 1556 Lock(); 1557 consumers.Remove(consumer); 1558 consumer->source = NULL; 1559 1560 ReleaseRefAndUnlock(); 1561 } 1562 1563 1564 // #pragma mark - VMCacheFactory 1565 // TODO: Move to own source file! 1566 1567 1568 /*static*/ status_t 1569 VMCacheFactory::CreateAnonymousCache(VMCache*& _cache, bool canOvercommit, 1570 int32 numPrecommittedPages, int32 numGuardPages, bool swappable, 1571 int priority) 1572 { 1573 uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY 1574 | HEAP_DONT_LOCK_KERNEL_SPACE; 1575 if (priority >= VM_PRIORITY_VIP) 1576 allocationFlags |= HEAP_PRIORITY_VIP; 1577 1578 #if ENABLE_SWAP_SUPPORT 1579 if (swappable) { 1580 VMAnonymousCache* cache 1581 = new(gAnonymousCacheObjectCache, allocationFlags) VMAnonymousCache; 1582 if (cache == NULL) 1583 return B_NO_MEMORY; 1584 1585 status_t error = cache->Init(canOvercommit, numPrecommittedPages, 1586 numGuardPages, allocationFlags); 1587 if (error != B_OK) { 1588 cache->Delete(); 1589 return error; 1590 } 1591 1592 T(Create(cache)); 1593 1594 _cache = cache; 1595 return B_OK; 1596 } 1597 #endif 1598 1599 VMAnonymousNoSwapCache* cache 1600 = new(gAnonymousNoSwapCacheObjectCache, allocationFlags) 1601 VMAnonymousNoSwapCache; 1602 if (cache == NULL) 1603 return B_NO_MEMORY; 1604 1605 status_t error = cache->Init(canOvercommit, numPrecommittedPages, 1606 numGuardPages, allocationFlags); 1607 if (error != B_OK) { 1608 cache->Delete(); 1609 return error; 1610 } 1611 1612 T(Create(cache)); 1613 1614 _cache = cache; 1615 return B_OK; 1616 } 1617 1618 1619 /*static*/ status_t 1620 VMCacheFactory::CreateVnodeCache(VMCache*& _cache, struct vnode* vnode) 1621 { 1622 const uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY 1623 | HEAP_DONT_LOCK_KERNEL_SPACE; 1624 // Note: Vnode cache creation is never VIP. 1625 1626 VMVnodeCache* cache 1627 = new(gVnodeCacheObjectCache, allocationFlags) VMVnodeCache; 1628 if (cache == NULL) 1629 return B_NO_MEMORY; 1630 1631 status_t error = cache->Init(vnode, allocationFlags); 1632 if (error != B_OK) { 1633 cache->Delete(); 1634 return error; 1635 } 1636 1637 T(Create(cache)); 1638 1639 _cache = cache; 1640 return B_OK; 1641 } 1642 1643 1644 /*static*/ status_t 1645 VMCacheFactory::CreateDeviceCache(VMCache*& _cache, addr_t baseAddress) 1646 { 1647 const uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY 1648 | HEAP_DONT_LOCK_KERNEL_SPACE; 1649 // Note: Device cache creation is never VIP. 1650 1651 VMDeviceCache* cache 1652 = new(gDeviceCacheObjectCache, allocationFlags) VMDeviceCache; 1653 if (cache == NULL) 1654 return B_NO_MEMORY; 1655 1656 status_t error = cache->Init(baseAddress, allocationFlags); 1657 if (error != B_OK) { 1658 cache->Delete(); 1659 return error; 1660 } 1661 1662 T(Create(cache)); 1663 1664 _cache = cache; 1665 return B_OK; 1666 } 1667 1668 1669 /*static*/ status_t 1670 VMCacheFactory::CreateNullCache(int priority, VMCache*& _cache) 1671 { 1672 uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY 1673 | HEAP_DONT_LOCK_KERNEL_SPACE; 1674 if (priority >= VM_PRIORITY_VIP) 1675 allocationFlags |= HEAP_PRIORITY_VIP; 1676 1677 VMNullCache* cache 1678 = new(gNullCacheObjectCache, allocationFlags) VMNullCache; 1679 if (cache == NULL) 1680 return B_NO_MEMORY; 1681 1682 status_t error = cache->Init(allocationFlags); 1683 if (error != B_OK) { 1684 cache->Delete(); 1685 return error; 1686 } 1687 1688 T(Create(cache)); 1689 1690 _cache = cache; 1691 return B_OK; 1692 } 1693