1 /*
2 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
4 * Distributed under the terms of the MIT License.
5 *
6 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7 * Distributed under the terms of the NewOS License.
8 */
9
10
11 #include <vm/VMCache.h>
12
13 #include <stddef.h>
14 #include <stdlib.h>
15
16 #include <algorithm>
17
18 #include <arch/cpu.h>
19 #include <condition_variable.h>
20 #include <heap.h>
21 #include <int.h>
22 #include <kernel.h>
23 #include <slab/Slab.h>
24 #include <smp.h>
25 #include <thread.h>
26 #include <tracing.h>
27 #include <util/AutoLock.h>
28 #include <vfs.h>
29 #include <vm/vm.h>
30 #include <vm/vm_page.h>
31 #include <vm/vm_priv.h>
32 #include <vm/vm_types.h>
33 #include <vm/VMAddressSpace.h>
34 #include <vm/VMArea.h>
35
36 // needed for the factory only
37 #include "VMAnonymousCache.h"
38 #include "VMAnonymousNoSwapCache.h"
39 #include "VMDeviceCache.h"
40 #include "VMNullCache.h"
41 #include "../cache/vnode_store.h"
42
43
44 //#define TRACE_VM_CACHE
45 #ifdef TRACE_VM_CACHE
46 # define TRACE(x) dprintf x
47 #else
48 # define TRACE(x) ;
49 #endif
50
51
52 #if DEBUG_CACHE_LIST
53 VMCache* gDebugCacheList;
54 #endif
55 static rw_lock sCacheListLock = RW_LOCK_INITIALIZER("global VMCache list");
56 // The lock is also needed when the debug feature is disabled.
57
58 ObjectCache* gCacheRefObjectCache;
59 #if ENABLE_SWAP_SUPPORT
60 ObjectCache* gAnonymousCacheObjectCache;
61 #endif
62 ObjectCache* gAnonymousNoSwapCacheObjectCache;
63 ObjectCache* gVnodeCacheObjectCache;
64 ObjectCache* gDeviceCacheObjectCache;
65 ObjectCache* gNullCacheObjectCache;
66
67
68 struct VMCache::PageEventWaiter {
69 Thread* thread;
70 PageEventWaiter* next;
71 vm_page* page;
72 uint32 events;
73 };
74
75
76 #if VM_CACHE_TRACING
77
78 namespace VMCacheTracing {
79
80 class VMCacheTraceEntry : public AbstractTraceEntry {
81 public:
VMCacheTraceEntry(VMCache * cache)82 VMCacheTraceEntry(VMCache* cache)
83 :
84 fCache(cache)
85 {
86 #if VM_CACHE_TRACING_STACK_TRACE
87 fStackTrace = capture_tracing_stack_trace(
88 VM_CACHE_TRACING_STACK_TRACE, 0, true);
89 // Don't capture userland stack trace to avoid potential
90 // deadlocks.
91 #endif
92 }
93
94 #if VM_CACHE_TRACING_STACK_TRACE
DumpStackTrace(TraceOutput & out)95 virtual void DumpStackTrace(TraceOutput& out)
96 {
97 out.PrintStackTrace(fStackTrace);
98 }
99 #endif
100
Cache() const101 VMCache* Cache() const
102 {
103 return fCache;
104 }
105
106 protected:
107 VMCache* fCache;
108 #if VM_CACHE_TRACING_STACK_TRACE
109 tracing_stack_trace* fStackTrace;
110 #endif
111 };
112
113
114 class Create : public VMCacheTraceEntry {
115 public:
Create(VMCache * cache)116 Create(VMCache* cache)
117 :
118 VMCacheTraceEntry(cache)
119 {
120 Initialized();
121 }
122
AddDump(TraceOutput & out)123 virtual void AddDump(TraceOutput& out)
124 {
125 out.Print("vm cache create: -> cache: %p", fCache);
126 }
127 };
128
129
130 class Delete : public VMCacheTraceEntry {
131 public:
Delete(VMCache * cache)132 Delete(VMCache* cache)
133 :
134 VMCacheTraceEntry(cache)
135 {
136 Initialized();
137 }
138
AddDump(TraceOutput & out)139 virtual void AddDump(TraceOutput& out)
140 {
141 out.Print("vm cache delete: cache: %p", fCache);
142 }
143 };
144
145
146 class SetMinimalCommitment : public VMCacheTraceEntry {
147 public:
SetMinimalCommitment(VMCache * cache,off_t commitment)148 SetMinimalCommitment(VMCache* cache, off_t commitment)
149 :
150 VMCacheTraceEntry(cache),
151 fOldCommitment(cache->committed_size),
152 fCommitment(commitment)
153 {
154 Initialized();
155 }
156
AddDump(TraceOutput & out)157 virtual void AddDump(TraceOutput& out)
158 {
159 out.Print("vm cache set min commitment: cache: %p, "
160 "commitment: %" B_PRIdOFF " -> %" B_PRIdOFF, fCache,
161 fOldCommitment, fCommitment);
162 }
163
164 private:
165 off_t fOldCommitment;
166 off_t fCommitment;
167 };
168
169
170 class Resize : public VMCacheTraceEntry {
171 public:
Resize(VMCache * cache,off_t size)172 Resize(VMCache* cache, off_t size)
173 :
174 VMCacheTraceEntry(cache),
175 fOldSize(cache->virtual_end),
176 fSize(size)
177 {
178 Initialized();
179 }
180
AddDump(TraceOutput & out)181 virtual void AddDump(TraceOutput& out)
182 {
183 out.Print("vm cache resize: cache: %p, size: %" B_PRIdOFF " -> %"
184 B_PRIdOFF, fCache, fOldSize, fSize);
185 }
186
187 private:
188 off_t fOldSize;
189 off_t fSize;
190 };
191
192
193 class Rebase : public VMCacheTraceEntry {
194 public:
Rebase(VMCache * cache,off_t base)195 Rebase(VMCache* cache, off_t base)
196 :
197 VMCacheTraceEntry(cache),
198 fOldBase(cache->virtual_base),
199 fBase(base)
200 {
201 Initialized();
202 }
203
AddDump(TraceOutput & out)204 virtual void AddDump(TraceOutput& out)
205 {
206 out.Print("vm cache rebase: cache: %p, base: %lld -> %lld", fCache,
207 fOldBase, fBase);
208 }
209
210 private:
211 off_t fOldBase;
212 off_t fBase;
213 };
214
215
216 class AddConsumer : public VMCacheTraceEntry {
217 public:
AddConsumer(VMCache * cache,VMCache * consumer)218 AddConsumer(VMCache* cache, VMCache* consumer)
219 :
220 VMCacheTraceEntry(cache),
221 fConsumer(consumer)
222 {
223 Initialized();
224 }
225
AddDump(TraceOutput & out)226 virtual void AddDump(TraceOutput& out)
227 {
228 out.Print("vm cache add consumer: cache: %p, consumer: %p", fCache,
229 fConsumer);
230 }
231
Consumer() const232 VMCache* Consumer() const
233 {
234 return fConsumer;
235 }
236
237 private:
238 VMCache* fConsumer;
239 };
240
241
242 class RemoveConsumer : public VMCacheTraceEntry {
243 public:
RemoveConsumer(VMCache * cache,VMCache * consumer)244 RemoveConsumer(VMCache* cache, VMCache* consumer)
245 :
246 VMCacheTraceEntry(cache),
247 fConsumer(consumer)
248 {
249 Initialized();
250 }
251
AddDump(TraceOutput & out)252 virtual void AddDump(TraceOutput& out)
253 {
254 out.Print("vm cache remove consumer: cache: %p, consumer: %p",
255 fCache, fConsumer);
256 }
257
258 private:
259 VMCache* fConsumer;
260 };
261
262
263 class Merge : public VMCacheTraceEntry {
264 public:
Merge(VMCache * cache,VMCache * consumer)265 Merge(VMCache* cache, VMCache* consumer)
266 :
267 VMCacheTraceEntry(cache),
268 fConsumer(consumer)
269 {
270 Initialized();
271 }
272
AddDump(TraceOutput & out)273 virtual void AddDump(TraceOutput& out)
274 {
275 out.Print("vm cache merge with consumer: cache: %p, consumer: %p",
276 fCache, fConsumer);
277 }
278
279 private:
280 VMCache* fConsumer;
281 };
282
283
284 class InsertArea : public VMCacheTraceEntry {
285 public:
InsertArea(VMCache * cache,VMArea * area)286 InsertArea(VMCache* cache, VMArea* area)
287 :
288 VMCacheTraceEntry(cache),
289 fArea(area)
290 {
291 Initialized();
292 }
293
AddDump(TraceOutput & out)294 virtual void AddDump(TraceOutput& out)
295 {
296 out.Print("vm cache insert area: cache: %p, area: %p", fCache,
297 fArea);
298 }
299
Area() const300 VMArea* Area() const
301 {
302 return fArea;
303 }
304
305 private:
306 VMArea* fArea;
307 };
308
309
310 class RemoveArea : public VMCacheTraceEntry {
311 public:
RemoveArea(VMCache * cache,VMArea * area)312 RemoveArea(VMCache* cache, VMArea* area)
313 :
314 VMCacheTraceEntry(cache),
315 fArea(area)
316 {
317 Initialized();
318 }
319
AddDump(TraceOutput & out)320 virtual void AddDump(TraceOutput& out)
321 {
322 out.Print("vm cache remove area: cache: %p, area: %p", fCache,
323 fArea);
324 }
325
326 private:
327 VMArea* fArea;
328 };
329
330 } // namespace VMCacheTracing
331
332 # define T(x) new(std::nothrow) VMCacheTracing::x;
333
334 # if VM_CACHE_TRACING >= 2
335
336 namespace VMCacheTracing {
337
338 class InsertPage : public VMCacheTraceEntry {
339 public:
InsertPage(VMCache * cache,vm_page * page,off_t offset)340 InsertPage(VMCache* cache, vm_page* page, off_t offset)
341 :
342 VMCacheTraceEntry(cache),
343 fPage(page),
344 fOffset(offset)
345 {
346 Initialized();
347 }
348
AddDump(TraceOutput & out)349 virtual void AddDump(TraceOutput& out)
350 {
351 out.Print("vm cache insert page: cache: %p, page: %p, offset: %"
352 B_PRIdOFF, fCache, fPage, fOffset);
353 }
354
355 private:
356 vm_page* fPage;
357 off_t fOffset;
358 };
359
360
361 class RemovePage : public VMCacheTraceEntry {
362 public:
RemovePage(VMCache * cache,vm_page * page)363 RemovePage(VMCache* cache, vm_page* page)
364 :
365 VMCacheTraceEntry(cache),
366 fPage(page)
367 {
368 Initialized();
369 }
370
AddDump(TraceOutput & out)371 virtual void AddDump(TraceOutput& out)
372 {
373 out.Print("vm cache remove page: cache: %p, page: %p", fCache,
374 fPage);
375 }
376
377 private:
378 vm_page* fPage;
379 };
380
381 } // namespace VMCacheTracing
382
383 # define T2(x) new(std::nothrow) VMCacheTracing::x;
384 # else
385 # define T2(x) ;
386 # endif
387 #else
388 # define T(x) ;
389 # define T2(x) ;
390 #endif
391
392
393 // #pragma mark - debugger commands
394
395
396 #if VM_CACHE_TRACING
397
398
399 static void*
cache_stack_find_area_cache(const TraceEntryIterator & baseIterator,void * area)400 cache_stack_find_area_cache(const TraceEntryIterator& baseIterator, void* area)
401 {
402 using namespace VMCacheTracing;
403
404 // find the previous "insert area" entry for the given area
405 TraceEntryIterator iterator = baseIterator;
406 TraceEntry* entry = iterator.Current();
407 while (entry != NULL) {
408 if (InsertArea* insertAreaEntry = dynamic_cast<InsertArea*>(entry)) {
409 if (insertAreaEntry->Area() == area)
410 return insertAreaEntry->Cache();
411 }
412
413 entry = iterator.Previous();
414 }
415
416 return NULL;
417 }
418
419
420 static void*
cache_stack_find_consumer(const TraceEntryIterator & baseIterator,void * cache)421 cache_stack_find_consumer(const TraceEntryIterator& baseIterator, void* cache)
422 {
423 using namespace VMCacheTracing;
424
425 // find the previous "add consumer" or "create" entry for the given cache
426 TraceEntryIterator iterator = baseIterator;
427 TraceEntry* entry = iterator.Current();
428 while (entry != NULL) {
429 if (Create* createEntry = dynamic_cast<Create*>(entry)) {
430 if (createEntry->Cache() == cache)
431 return NULL;
432 } else if (AddConsumer* addEntry = dynamic_cast<AddConsumer*>(entry)) {
433 if (addEntry->Consumer() == cache)
434 return addEntry->Cache();
435 }
436
437 entry = iterator.Previous();
438 }
439
440 return NULL;
441 }
442
443
444 static int
command_cache_stack(int argc,char ** argv)445 command_cache_stack(int argc, char** argv)
446 {
447 if (argc < 3 || argc > 4) {
448 print_debugger_command_usage(argv[0]);
449 return 0;
450 }
451
452 bool isArea = false;
453
454 int argi = 1;
455 if (argc == 4) {
456 if (strcmp(argv[argi], "area") != 0) {
457 print_debugger_command_usage(argv[0]);
458 return 0;
459 }
460
461 argi++;
462 isArea = true;
463 }
464
465 uint64 addressValue;
466 uint64 debugEntryIndex;
467 if (!evaluate_debug_expression(argv[argi++], &addressValue, false)
468 || !evaluate_debug_expression(argv[argi++], &debugEntryIndex, false)) {
469 return 0;
470 }
471
472 TraceEntryIterator baseIterator;
473 if (baseIterator.MoveTo((int32)debugEntryIndex) == NULL) {
474 kprintf("Invalid tracing entry index %" B_PRIu64 "\n", debugEntryIndex);
475 return 0;
476 }
477
478 void* address = (void*)(addr_t)addressValue;
479
480 kprintf("cache stack for %s %p at %" B_PRIu64 ":\n",
481 isArea ? "area" : "cache", address, debugEntryIndex);
482 if (isArea) {
483 address = cache_stack_find_area_cache(baseIterator, address);
484 if (address == NULL) {
485 kprintf(" cache not found\n");
486 return 0;
487 }
488 }
489
490 while (address != NULL) {
491 kprintf(" %p\n", address);
492 address = cache_stack_find_consumer(baseIterator, address);
493 }
494
495 return 0;
496 }
497
498
499 #endif // VM_CACHE_TRACING
500
501
502 // #pragma mark -
503
504
505 status_t
vm_cache_init(kernel_args * args)506 vm_cache_init(kernel_args* args)
507 {
508 // Create object caches for the structures we allocate here.
509 gCacheRefObjectCache = create_object_cache("cache refs", sizeof(VMCacheRef),
510 0, NULL, NULL, NULL);
511 #if ENABLE_SWAP_SUPPORT
512 gAnonymousCacheObjectCache = create_object_cache("anon caches",
513 sizeof(VMAnonymousCache), 0, NULL, NULL, NULL);
514 #endif
515 gAnonymousNoSwapCacheObjectCache = create_object_cache(
516 "anon no-swap caches", sizeof(VMAnonymousNoSwapCache), 0, NULL, NULL,
517 NULL);
518 gVnodeCacheObjectCache = create_object_cache("vnode caches",
519 sizeof(VMVnodeCache), 0, NULL, NULL, NULL);
520 gDeviceCacheObjectCache = create_object_cache("device caches",
521 sizeof(VMDeviceCache), 0, NULL, NULL, NULL);
522 gNullCacheObjectCache = create_object_cache("null caches",
523 sizeof(VMNullCache), 0, NULL, NULL, NULL);
524
525 if (gCacheRefObjectCache == NULL
526 #if ENABLE_SWAP_SUPPORT
527 || gAnonymousCacheObjectCache == NULL
528 #endif
529 || gAnonymousNoSwapCacheObjectCache == NULL
530 || gVnodeCacheObjectCache == NULL
531 || gDeviceCacheObjectCache == NULL
532 || gNullCacheObjectCache == NULL) {
533 panic("vm_cache_init(): Failed to create object caches!");
534 return B_NO_MEMORY;
535 }
536
537 return B_OK;
538 }
539
540
541 void
vm_cache_init_post_heap()542 vm_cache_init_post_heap()
543 {
544 #if VM_CACHE_TRACING
545 add_debugger_command_etc("cache_stack", &command_cache_stack,
546 "List the ancestors (sources) of a VMCache at the time given by "
547 "tracing entry index",
548 "[ \"area\" ] <address> <tracing entry index>\n"
549 "All ancestors (sources) of a given VMCache at the time given by the\n"
550 "tracing entry index are listed. If \"area\" is given the supplied\n"
551 "address is an area instead of a cache address. The listing will\n"
552 "start with the area's cache at that point.\n",
553 0);
554 #endif // VM_CACHE_TRACING
555 }
556
557
558 VMCache*
vm_cache_acquire_locked_page_cache(vm_page * page,bool dontWait)559 vm_cache_acquire_locked_page_cache(vm_page* page, bool dontWait)
560 {
561 rw_lock_read_lock(&sCacheListLock);
562
563 while (true) {
564 VMCacheRef* cacheRef = page->CacheRef();
565 if (cacheRef == NULL) {
566 rw_lock_read_unlock(&sCacheListLock);
567 return NULL;
568 }
569
570 VMCache* cache = cacheRef->cache;
571 if (dontWait) {
572 if (!cache->TryLock()) {
573 rw_lock_read_unlock(&sCacheListLock);
574 return NULL;
575 }
576 } else {
577 if (!cache->SwitchFromReadLock(&sCacheListLock)) {
578 // cache has been deleted
579 rw_lock_read_lock(&sCacheListLock);
580 continue;
581 }
582 rw_lock_read_lock(&sCacheListLock);
583 }
584
585 if (cache == page->Cache()) {
586 rw_lock_read_unlock(&sCacheListLock);
587 cache->AcquireRefLocked();
588 return cache;
589 }
590
591 // the cache changed in the meantime
592 cache->Unlock();
593 }
594 }
595
596
597 // #pragma mark - VMCache
598
599
VMCacheRef(VMCache * cache)600 VMCacheRef::VMCacheRef(VMCache* cache)
601 :
602 cache(cache),
603 ref_count(1)
604 {
605 }
606
607
608 // #pragma mark - VMCache
609
610
611 bool
_IsMergeable() const612 VMCache::_IsMergeable() const
613 {
614 return areas == NULL && temporary && !unmergeable
615 && !consumers.IsEmpty() && consumers.Head() == consumers.Tail();
616 }
617
618
VMCache()619 VMCache::VMCache()
620 :
621 fCacheRef(NULL)
622 {
623 }
624
625
~VMCache()626 VMCache::~VMCache()
627 {
628 object_cache_delete(gCacheRefObjectCache, fCacheRef);
629 }
630
631
632 status_t
Init(uint32 cacheType,uint32 allocationFlags)633 VMCache::Init(uint32 cacheType, uint32 allocationFlags)
634 {
635 mutex_init(&fLock, "VMCache");
636
637 areas = NULL;
638 fRefCount = 1;
639 source = NULL;
640 virtual_base = 0;
641 virtual_end = 0;
642 committed_size = 0;
643 temporary = 0;
644 unmergeable = 0;
645 page_count = 0;
646 fWiredPagesCount = 0;
647 type = cacheType;
648 fPageEventWaiters = NULL;
649
650 #if DEBUG_CACHE_LIST
651 debug_previous = NULL;
652 debug_next = NULL;
653 // initialize in case the following fails
654 #endif
655
656 fCacheRef = new(gCacheRefObjectCache, allocationFlags) VMCacheRef(this);
657 if (fCacheRef == NULL)
658 return B_NO_MEMORY;
659
660 #if DEBUG_CACHE_LIST
661 rw_lock_write_lock(&sCacheListLock);
662
663 if (gDebugCacheList != NULL)
664 gDebugCacheList->debug_previous = this;
665 debug_next = gDebugCacheList;
666 gDebugCacheList = this;
667
668 rw_lock_write_unlock(&sCacheListLock);
669 #endif
670
671 return B_OK;
672 }
673
674
675 void
Delete()676 VMCache::Delete()
677 {
678 if (areas != NULL)
679 panic("cache %p to be deleted still has areas", this);
680 if (!consumers.IsEmpty())
681 panic("cache %p to be deleted still has consumers", this);
682
683 T(Delete(this));
684
685 // free all of the pages in the cache
686 while (vm_page* page = pages.Root()) {
687 if (!page->mappings.IsEmpty() || page->WiredCount() != 0) {
688 panic("remove page %p from cache %p: page still has mappings!\n"
689 "@!page %p; cache %p", page, this, page, this);
690 }
691
692 // remove it
693 pages.Remove(page);
694 page->SetCacheRef(NULL);
695
696 TRACE(("vm_cache_release_ref: freeing page 0x%lx\n",
697 page->physical_page_number));
698 DEBUG_PAGE_ACCESS_START(page);
699 vm_page_free(this, page);
700 }
701
702 // remove the ref to the source
703 if (source)
704 source->_RemoveConsumer(this);
705
706 // We lock and unlock the sCacheListLock, even if the DEBUG_CACHE_LIST is
707 // not enabled. This synchronization point is needed for
708 // vm_cache_acquire_locked_page_cache().
709 rw_lock_write_lock(&sCacheListLock);
710
711 #if DEBUG_CACHE_LIST
712 if (debug_previous)
713 debug_previous->debug_next = debug_next;
714 if (debug_next)
715 debug_next->debug_previous = debug_previous;
716 if (this == gDebugCacheList)
717 gDebugCacheList = debug_next;
718 #endif
719
720 mutex_destroy(&fLock);
721
722 rw_lock_write_unlock(&sCacheListLock);
723
724 DeleteObject();
725 }
726
727
728 void
Unlock(bool consumerLocked)729 VMCache::Unlock(bool consumerLocked)
730 {
731 while (fRefCount == 1 && _IsMergeable()) {
732 VMCache* consumer = consumers.Head();
733 if (consumerLocked) {
734 _MergeWithOnlyConsumer();
735 } else if (consumer->TryLock()) {
736 _MergeWithOnlyConsumer();
737 consumer->Unlock();
738 } else {
739 // Someone else has locked the consumer ATM. Unlock this cache and
740 // wait for the consumer lock. Increment the cache's ref count
741 // temporarily, so that no one else will try what we are doing or
742 // delete the cache.
743 fRefCount++;
744 bool consumerLockedTemp = consumer->SwitchLock(&fLock);
745 Lock();
746 fRefCount--;
747
748 if (consumerLockedTemp) {
749 if (fRefCount == 1 && _IsMergeable()
750 && consumer == consumers.Head()) {
751 // nothing has changed in the meantime -- merge
752 _MergeWithOnlyConsumer();
753 }
754
755 consumer->Unlock();
756 }
757 }
758 }
759
760 if (fRefCount == 0) {
761 // delete this cache
762 Delete();
763 } else
764 mutex_unlock(&fLock);
765 }
766
767
768 vm_page*
LookupPage(off_t offset)769 VMCache::LookupPage(off_t offset)
770 {
771 AssertLocked();
772
773 vm_page* page = pages.Lookup((page_num_t)(offset >> PAGE_SHIFT));
774
775 #if KDEBUG
776 if (page != NULL && page->Cache() != this)
777 panic("page %p not in cache %p\n", page, this);
778 #endif
779
780 return page;
781 }
782
783
784 void
InsertPage(vm_page * page,off_t offset)785 VMCache::InsertPage(vm_page* page, off_t offset)
786 {
787 TRACE(("VMCache::InsertPage(): cache %p, page %p, offset %" B_PRIdOFF "\n",
788 this, page, offset));
789 AssertLocked();
790
791 if (page->CacheRef() != NULL) {
792 panic("insert page %p into cache %p: page cache is set to %p\n",
793 page, this, page->Cache());
794 }
795
796 T2(InsertPage(this, page, offset));
797
798 page->cache_offset = (page_num_t)(offset >> PAGE_SHIFT);
799 page_count++;
800 page->SetCacheRef(fCacheRef);
801
802 #if KDEBUG
803 vm_page* otherPage = pages.Lookup(page->cache_offset);
804 if (otherPage != NULL) {
805 panic("VMCache::InsertPage(): there's already page %p with cache "
806 "offset %" B_PRIuPHYSADDR " in cache %p; inserting page %p",
807 otherPage, page->cache_offset, this, page);
808 }
809 #endif // KDEBUG
810
811 pages.Insert(page);
812
813 if (page->WiredCount() > 0)
814 IncrementWiredPagesCount();
815 }
816
817
818 /*! Removes the vm_page from this cache. Of course, the page must
819 really be in this cache or evil things will happen.
820 The cache lock must be held.
821 */
822 void
RemovePage(vm_page * page)823 VMCache::RemovePage(vm_page* page)
824 {
825 TRACE(("VMCache::RemovePage(): cache %p, page %p\n", this, page));
826 AssertLocked();
827
828 if (page->Cache() != this) {
829 panic("remove page %p from cache %p: page cache is set to %p\n", page,
830 this, page->Cache());
831 }
832
833 T2(RemovePage(this, page));
834
835 pages.Remove(page);
836 page_count--;
837 page->SetCacheRef(NULL);
838
839 if (page->WiredCount() > 0)
840 DecrementWiredPagesCount();
841 }
842
843
844 /*! Moves the given page from its current cache inserts it into this cache
845 at the given offset.
846 Both caches must be locked.
847 */
848 void
MovePage(vm_page * page,off_t offset)849 VMCache::MovePage(vm_page* page, off_t offset)
850 {
851 VMCache* oldCache = page->Cache();
852
853 AssertLocked();
854 oldCache->AssertLocked();
855
856 // remove from old cache
857 oldCache->pages.Remove(page);
858 oldCache->page_count--;
859 T2(RemovePage(oldCache, page));
860
861 // change the offset
862 page->cache_offset = offset >> PAGE_SHIFT;
863
864 // insert here
865 pages.Insert(page);
866 page_count++;
867 page->SetCacheRef(fCacheRef);
868
869 if (page->WiredCount() > 0) {
870 IncrementWiredPagesCount();
871 oldCache->DecrementWiredPagesCount();
872 }
873
874 T2(InsertPage(this, page, page->cache_offset << PAGE_SHIFT));
875 }
876
877 /*! Moves the given page from its current cache inserts it into this cache.
878 Both caches must be locked.
879 */
880 void
MovePage(vm_page * page)881 VMCache::MovePage(vm_page* page)
882 {
883 MovePage(page, page->cache_offset << PAGE_SHIFT);
884 }
885
886
887 /*! Moves all pages from the given cache to this one.
888 Both caches must be locked. This cache must be empty.
889 */
890 void
MoveAllPages(VMCache * fromCache)891 VMCache::MoveAllPages(VMCache* fromCache)
892 {
893 AssertLocked();
894 fromCache->AssertLocked();
895 ASSERT(page_count == 0);
896
897 std::swap(fromCache->pages, pages);
898 page_count = fromCache->page_count;
899 fromCache->page_count = 0;
900 fWiredPagesCount = fromCache->fWiredPagesCount;
901 fromCache->fWiredPagesCount = 0;
902
903 // swap the VMCacheRefs
904 rw_lock_write_lock(&sCacheListLock);
905 std::swap(fCacheRef, fromCache->fCacheRef);
906 fCacheRef->cache = this;
907 fromCache->fCacheRef->cache = fromCache;
908 rw_lock_write_unlock(&sCacheListLock);
909
910 #if VM_CACHE_TRACING >= 2
911 for (VMCachePagesTree::Iterator it = pages.GetIterator();
912 vm_page* page = it.Next();) {
913 T2(RemovePage(fromCache, page));
914 T2(InsertPage(this, page, page->cache_offset << PAGE_SHIFT));
915 }
916 #endif
917 }
918
919
920 /*! Waits until one or more events happened for a given page which belongs to
921 this cache.
922 The cache must be locked. It will be unlocked by the method. \a relock
923 specifies whether the method shall re-lock the cache before returning.
924 \param page The page for which to wait.
925 \param events The mask of events the caller is interested in.
926 \param relock If \c true, the cache will be locked when returning,
927 otherwise it won't be locked.
928 */
929 void
WaitForPageEvents(vm_page * page,uint32 events,bool relock)930 VMCache::WaitForPageEvents(vm_page* page, uint32 events, bool relock)
931 {
932 PageEventWaiter waiter;
933 waiter.thread = thread_get_current_thread();
934 waiter.next = fPageEventWaiters;
935 waiter.page = page;
936 waiter.events = events;
937
938 fPageEventWaiters = &waiter;
939
940 thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_OTHER_OBJECT, page);
941
942 Unlock();
943 thread_block();
944
945 if (relock)
946 Lock();
947 }
948
949
950 /*! Makes this case the source of the \a consumer cache,
951 and adds the \a consumer to its list.
952 This also grabs a reference to the source cache.
953 Assumes you have the cache and the consumer's lock held.
954 */
955 void
AddConsumer(VMCache * consumer)956 VMCache::AddConsumer(VMCache* consumer)
957 {
958 TRACE(("add consumer vm cache %p to cache %p\n", consumer, this));
959 AssertLocked();
960 consumer->AssertLocked();
961
962 T(AddConsumer(this, consumer));
963
964 consumer->source = this;
965 consumers.Add(consumer);
966
967 AcquireRefLocked();
968 AcquireStoreRef();
969 }
970
971
972 /*! Adds the \a area to this cache.
973 Assumes you have the locked the cache.
974 */
975 status_t
InsertAreaLocked(VMArea * area)976 VMCache::InsertAreaLocked(VMArea* area)
977 {
978 TRACE(("VMCache::InsertAreaLocked(cache %p, area %p)\n", this, area));
979 AssertLocked();
980
981 T(InsertArea(this, area));
982
983 area->cache_next = areas;
984 if (area->cache_next)
985 area->cache_next->cache_prev = area;
986 area->cache_prev = NULL;
987 areas = area;
988
989 AcquireStoreRef();
990
991 return B_OK;
992 }
993
994
995 status_t
RemoveArea(VMArea * area)996 VMCache::RemoveArea(VMArea* area)
997 {
998 TRACE(("VMCache::RemoveArea(cache %p, area %p)\n", this, area));
999
1000 T(RemoveArea(this, area));
1001
1002 // We release the store reference first, since otherwise we would reverse
1003 // the locking order or even deadlock ourselves (... -> free_vnode() -> ...
1004 // -> bfs_remove_vnode() -> ... -> file_cache_set_size() -> mutex_lock()).
1005 // Also cf. _RemoveConsumer().
1006 ReleaseStoreRef();
1007
1008 AutoLocker<VMCache> locker(this);
1009
1010 if (area->cache_prev)
1011 area->cache_prev->cache_next = area->cache_next;
1012 if (area->cache_next)
1013 area->cache_next->cache_prev = area->cache_prev;
1014 if (areas == area)
1015 areas = area->cache_next;
1016
1017 return B_OK;
1018 }
1019
1020
1021 /*! Transfers the areas from \a fromCache to this cache. This cache must not
1022 have areas yet. Both caches must be locked.
1023 */
1024 void
TransferAreas(VMCache * fromCache)1025 VMCache::TransferAreas(VMCache* fromCache)
1026 {
1027 AssertLocked();
1028 fromCache->AssertLocked();
1029 ASSERT(areas == NULL);
1030
1031 areas = fromCache->areas;
1032 fromCache->areas = NULL;
1033
1034 for (VMArea* area = areas; area != NULL; area = area->cache_next) {
1035 area->cache = this;
1036 AcquireRefLocked();
1037 fromCache->ReleaseRefLocked();
1038
1039 T(RemoveArea(fromCache, area));
1040 T(InsertArea(this, area));
1041 }
1042 }
1043
1044
1045 uint32
CountWritableAreas(VMArea * ignoreArea) const1046 VMCache::CountWritableAreas(VMArea* ignoreArea) const
1047 {
1048 uint32 count = 0;
1049
1050 for (VMArea* area = areas; area != NULL; area = area->cache_next) {
1051 if (area != ignoreArea
1052 && (area->protection & (B_WRITE_AREA | B_KERNEL_WRITE_AREA)) != 0) {
1053 count++;
1054 }
1055 }
1056
1057 return count;
1058 }
1059
1060
1061 status_t
WriteModified()1062 VMCache::WriteModified()
1063 {
1064 TRACE(("VMCache::WriteModified(cache = %p)\n", this));
1065
1066 if (temporary)
1067 return B_OK;
1068
1069 Lock();
1070 status_t status = vm_page_write_modified_pages(this);
1071 Unlock();
1072
1073 return status;
1074 }
1075
1076
1077 /*! Commits the memory to the store if the \a commitment is larger than
1078 what's committed already.
1079 Assumes you have the cache's lock held.
1080 */
1081 status_t
SetMinimalCommitment(off_t commitment,int priority)1082 VMCache::SetMinimalCommitment(off_t commitment, int priority)
1083 {
1084 TRACE(("VMCache::SetMinimalCommitment(cache %p, commitment %" B_PRIdOFF
1085 ")\n", this, commitment));
1086 T(SetMinimalCommitment(this, commitment));
1087
1088 status_t status = B_OK;
1089
1090 // If we don't have enough committed space to cover through to the new end
1091 // of the area...
1092 if (committed_size < commitment) {
1093 #if KDEBUG
1094 const off_t size = ROUNDUP(virtual_end - virtual_base, B_PAGE_SIZE);
1095 ASSERT_PRINT(commitment <= size, "cache %p, commitment %" B_PRIdOFF ", size %" B_PRIdOFF,
1096 this, commitment, size);
1097 #endif
1098
1099 // try to commit more memory
1100 status = Commit(commitment, priority);
1101 }
1102
1103 return status;
1104 }
1105
1106
1107 bool
_FreePageRange(VMCachePagesTree::Iterator it,page_num_t * toPage=NULL)1108 VMCache::_FreePageRange(VMCachePagesTree::Iterator it,
1109 page_num_t* toPage = NULL)
1110 {
1111 for (vm_page* page = it.Next();
1112 page != NULL && (toPage == NULL || page->cache_offset < *toPage);
1113 page = it.Next()) {
1114
1115 if (page->busy) {
1116 if (page->busy_writing) {
1117 // We cannot wait for the page to become available
1118 // as we might cause a deadlock this way
1119 page->busy_writing = false;
1120 // this will notify the writer to free the page
1121 continue;
1122 }
1123
1124 // wait for page to become unbusy
1125 WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, true);
1126 return true;
1127 }
1128
1129 // remove the page and put it into the free queue
1130 DEBUG_PAGE_ACCESS_START(page);
1131 vm_remove_all_page_mappings(page);
1132 ASSERT(page->WiredCount() == 0);
1133 // TODO: Find a real solution! If the page is wired
1134 // temporarily (e.g. by lock_memory()), we actually must not
1135 // unmap it!
1136 RemovePage(page);
1137 // Note: When iterating through a IteratableSplayTree
1138 // removing the current node is safe.
1139
1140 vm_page_free(this, page);
1141 }
1142
1143 return false;
1144 }
1145
1146
1147 /*! This function updates the size field of the cache.
1148 If needed, it will free up all pages that don't belong to the cache anymore.
1149 The cache lock must be held when you call it.
1150 Since removed pages don't belong to the cache any longer, they are not
1151 written back before they will be removed.
1152
1153 Note, this function may temporarily release the cache lock in case it
1154 has to wait for busy pages.
1155 */
1156 status_t
Resize(off_t newSize,int priority)1157 VMCache::Resize(off_t newSize, int priority)
1158 {
1159 TRACE(("VMCache::Resize(cache %p, newSize %" B_PRIdOFF ") old size %"
1160 B_PRIdOFF "\n", this, newSize, this->virtual_end));
1161 T(Resize(this, newSize));
1162
1163 AssertLocked();
1164
1165 page_num_t oldPageCount = (page_num_t)((virtual_end + B_PAGE_SIZE - 1)
1166 >> PAGE_SHIFT);
1167 page_num_t newPageCount = (page_num_t)((newSize + B_PAGE_SIZE - 1)
1168 >> PAGE_SHIFT);
1169
1170 if (newPageCount < oldPageCount) {
1171 // we need to remove all pages in the cache outside of the new virtual
1172 // size
1173 while (_FreePageRange(pages.GetIterator(newPageCount, true, true)))
1174 ;
1175 }
1176
1177 status_t status = Commit(newSize - virtual_base, priority);
1178 if (status != B_OK)
1179 return status;
1180
1181 virtual_end = newSize;
1182 return B_OK;
1183 }
1184
1185 /*! This function updates the virtual_base field of the cache.
1186 If needed, it will free up all pages that don't belong to the cache anymore.
1187 The cache lock must be held when you call it.
1188 Since removed pages don't belong to the cache any longer, they are not
1189 written back before they will be removed.
1190
1191 Note, this function may temporarily release the cache lock in case it
1192 has to wait for busy pages.
1193 */
1194 status_t
Rebase(off_t newBase,int priority)1195 VMCache::Rebase(off_t newBase, int priority)
1196 {
1197 TRACE(("VMCache::Rebase(cache %p, newBase %lld) old base %lld\n",
1198 this, newBase, this->virtual_base));
1199 this->AssertLocked();
1200
1201 T(Rebase(this, newBase));
1202
1203 status_t status = Commit(virtual_end - newBase, priority);
1204 if (status != B_OK)
1205 return status;
1206
1207 page_num_t basePage = (page_num_t)(newBase >> PAGE_SHIFT);
1208
1209 if (newBase > virtual_base) {
1210 // we need to remove all pages in the cache outside of the new virtual
1211 // base
1212 while (_FreePageRange(pages.GetIterator(), &basePage))
1213 ;
1214 }
1215
1216 virtual_base = newBase;
1217 return B_OK;
1218 }
1219
1220
1221 /*! Moves pages in the given range from the source cache into this cache. Both
1222 caches must be locked.
1223 */
1224 status_t
Adopt(VMCache * source,off_t offset,off_t size,off_t newOffset)1225 VMCache::Adopt(VMCache* source, off_t offset, off_t size, off_t newOffset)
1226 {
1227 page_num_t startPage = offset >> PAGE_SHIFT;
1228 page_num_t endPage = (offset + size + B_PAGE_SIZE - 1) >> PAGE_SHIFT;
1229 off_t offsetChange = newOffset - offset;
1230
1231 VMCachePagesTree::Iterator it = source->pages.GetIterator(startPage, true,
1232 true);
1233 for (vm_page* page = it.Next();
1234 page != NULL && page->cache_offset < endPage;
1235 page = it.Next()) {
1236 MovePage(page, (page->cache_offset << PAGE_SHIFT) + offsetChange);
1237 }
1238
1239 return B_OK;
1240 }
1241
1242
1243 /*! Discards pages in the given range. */
1244 status_t
Discard(off_t offset,off_t size)1245 VMCache::Discard(off_t offset, off_t size)
1246 {
1247 page_num_t startPage = offset >> PAGE_SHIFT;
1248 page_num_t endPage = (offset + size + B_PAGE_SIZE - 1) >> PAGE_SHIFT;
1249 while (_FreePageRange(pages.GetIterator(startPage, true, true), &endPage))
1250 ;
1251
1252 return B_OK;
1253 }
1254
1255
1256 /*! You have to call this function with the VMCache lock held. */
1257 status_t
FlushAndRemoveAllPages()1258 VMCache::FlushAndRemoveAllPages()
1259 {
1260 ASSERT_LOCKED_MUTEX(&fLock);
1261
1262 while (page_count > 0) {
1263 // write back modified pages
1264 status_t status = vm_page_write_modified_pages(this);
1265 if (status != B_OK)
1266 return status;
1267
1268 // remove pages
1269 for (VMCachePagesTree::Iterator it = pages.GetIterator();
1270 vm_page* page = it.Next();) {
1271 if (page->busy) {
1272 // wait for page to become unbusy
1273 WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, true);
1274
1275 // restart from the start of the list
1276 it = pages.GetIterator();
1277 continue;
1278 }
1279
1280 // skip modified pages -- they will be written back in the next
1281 // iteration
1282 if (page->State() == PAGE_STATE_MODIFIED)
1283 continue;
1284
1285 // We can't remove mapped pages.
1286 if (page->IsMapped())
1287 return B_BUSY;
1288
1289 DEBUG_PAGE_ACCESS_START(page);
1290 RemovePage(page);
1291 vm_page_free(this, page);
1292 // Note: When iterating through a IteratableSplayTree
1293 // removing the current node is safe.
1294 }
1295 }
1296
1297 return B_OK;
1298 }
1299
1300
1301 status_t
Commit(off_t size,int priority)1302 VMCache::Commit(off_t size, int priority)
1303 {
1304 ASSERT_UNREACHABLE();
1305 return B_NOT_SUPPORTED;
1306 }
1307
1308
1309 /*! Returns whether the cache's underlying backing store could deliver the
1310 page at the given offset.
1311
1312 Basically it returns whether a Read() at \a offset would at least read a
1313 partial page (assuming that no unexpected errors occur or the situation
1314 changes in the meantime).
1315 */
1316 bool
HasPage(off_t offset)1317 VMCache::HasPage(off_t offset)
1318 {
1319 // In accordance with Fault() the default implementation doesn't have a
1320 // backing store and doesn't allow faults.
1321 return false;
1322 }
1323
1324
1325 status_t
Read(off_t offset,const generic_io_vec * vecs,size_t count,uint32 flags,generic_size_t * _numBytes)1326 VMCache::Read(off_t offset, const generic_io_vec *vecs, size_t count,
1327 uint32 flags, generic_size_t *_numBytes)
1328 {
1329 return B_ERROR;
1330 }
1331
1332
1333 status_t
Write(off_t offset,const generic_io_vec * vecs,size_t count,uint32 flags,generic_size_t * _numBytes)1334 VMCache::Write(off_t offset, const generic_io_vec *vecs, size_t count,
1335 uint32 flags, generic_size_t *_numBytes)
1336 {
1337 return B_ERROR;
1338 }
1339
1340
1341 status_t
WriteAsync(off_t offset,const generic_io_vec * vecs,size_t count,generic_size_t numBytes,uint32 flags,AsyncIOCallback * callback)1342 VMCache::WriteAsync(off_t offset, const generic_io_vec* vecs, size_t count,
1343 generic_size_t numBytes, uint32 flags, AsyncIOCallback* callback)
1344 {
1345 // Not supported, fall back to the synchronous hook.
1346 generic_size_t transferred = numBytes;
1347 status_t error = Write(offset, vecs, count, flags, &transferred);
1348
1349 if (callback != NULL)
1350 callback->IOFinished(error, transferred != numBytes, transferred);
1351
1352 return error;
1353 }
1354
1355
1356 /*! \brief Returns whether the cache can write the page at the given offset.
1357
1358 The cache must be locked when this function is invoked.
1359
1360 @param offset The page offset.
1361 @return \c true, if the page can be written, \c false otherwise.
1362 */
1363 bool
CanWritePage(off_t offset)1364 VMCache::CanWritePage(off_t offset)
1365 {
1366 return false;
1367 }
1368
1369
1370 status_t
Fault(struct VMAddressSpace * aspace,off_t offset)1371 VMCache::Fault(struct VMAddressSpace *aspace, off_t offset)
1372 {
1373 return B_BAD_ADDRESS;
1374 }
1375
1376
1377 void
Merge(VMCache * source)1378 VMCache::Merge(VMCache* source)
1379 {
1380 for (VMCachePagesTree::Iterator it = source->pages.GetIterator();
1381 vm_page* page = it.Next();) {
1382 // Note: Removing the current node while iterating through a
1383 // IteratableSplayTree is safe.
1384 vm_page* consumerPage = LookupPage(
1385 (off_t)page->cache_offset << PAGE_SHIFT);
1386 if (consumerPage == NULL) {
1387 // the page is not yet in the consumer cache - move it upwards
1388 MovePage(page);
1389 }
1390 }
1391 }
1392
1393
1394 status_t
AcquireUnreferencedStoreRef()1395 VMCache::AcquireUnreferencedStoreRef()
1396 {
1397 return B_OK;
1398 }
1399
1400
1401 void
AcquireStoreRef()1402 VMCache::AcquireStoreRef()
1403 {
1404 }
1405
1406
1407 void
ReleaseStoreRef()1408 VMCache::ReleaseStoreRef()
1409 {
1410 }
1411
1412
1413 /*! Kernel debugger version of HasPage().
1414 Does not do any locking.
1415 */
1416 bool
DebugHasPage(off_t offset)1417 VMCache::DebugHasPage(off_t offset)
1418 {
1419 // default that works for all subclasses that don't lock anyway
1420 return HasPage(offset);
1421 }
1422
1423
1424 /*! Kernel debugger version of LookupPage().
1425 Does not do any locking.
1426 */
1427 vm_page*
DebugLookupPage(off_t offset)1428 VMCache::DebugLookupPage(off_t offset)
1429 {
1430 return pages.Lookup((page_num_t)(offset >> PAGE_SHIFT));
1431 }
1432
1433
1434 void
Dump(bool showPages) const1435 VMCache::Dump(bool showPages) const
1436 {
1437 kprintf("CACHE %p:\n", this);
1438 kprintf(" ref_count: %" B_PRId32 "\n", RefCount());
1439 kprintf(" source: %p\n", source);
1440 kprintf(" type: %s\n", vm_cache_type_to_string(type));
1441 kprintf(" virtual_base: 0x%" B_PRIx64 "\n", virtual_base);
1442 kprintf(" virtual_end: 0x%" B_PRIx64 "\n", virtual_end);
1443 kprintf(" temporary: %" B_PRIu32 "\n", uint32(temporary));
1444 kprintf(" lock: %p\n", &fLock);
1445 #if KDEBUG
1446 kprintf(" lock.holder: %" B_PRId32 "\n", fLock.holder);
1447 #endif
1448 kprintf(" areas:\n");
1449
1450 for (VMArea* area = areas; area != NULL; area = area->cache_next) {
1451 kprintf(" area 0x%" B_PRIx32 ", %s\n", area->id, area->name);
1452 kprintf("\tbase_addr: 0x%lx, size: 0x%lx\n", area->Base(),
1453 area->Size());
1454 kprintf("\tprotection: 0x%" B_PRIx32 "\n", area->protection);
1455 kprintf("\towner: 0x%" B_PRIx32 "\n", area->address_space->ID());
1456 }
1457
1458 kprintf(" consumers:\n");
1459 for (ConsumerList::ConstIterator it = consumers.GetIterator();
1460 VMCache* consumer = it.Next();) {
1461 kprintf("\t%p\n", consumer);
1462 }
1463
1464 kprintf(" pages:\n");
1465 if (showPages) {
1466 for (VMCachePagesTree::ConstIterator it = pages.GetIterator();
1467 vm_page* page = it.Next();) {
1468 if (!vm_page_is_dummy(page)) {
1469 kprintf("\t%p ppn %#" B_PRIxPHYSADDR " offset %#" B_PRIxPHYSADDR
1470 " state %u (%s) wired_count %u\n", page,
1471 page->physical_page_number, page->cache_offset,
1472 page->State(), page_state_to_string(page->State()),
1473 page->WiredCount());
1474 } else {
1475 kprintf("\t%p DUMMY PAGE state %u (%s)\n",
1476 page, page->State(), page_state_to_string(page->State()));
1477 }
1478 }
1479 } else
1480 kprintf("\t%" B_PRIu32 " in cache\n", page_count);
1481 }
1482
1483
1484 /*! Wakes up threads waiting for page events.
1485 \param page The page for which events occurred.
1486 \param events The mask of events that occurred.
1487 */
1488 void
_NotifyPageEvents(vm_page * page,uint32 events)1489 VMCache::_NotifyPageEvents(vm_page* page, uint32 events)
1490 {
1491 PageEventWaiter** it = &fPageEventWaiters;
1492 while (PageEventWaiter* waiter = *it) {
1493 if (waiter->page == page && (waiter->events & events) != 0) {
1494 // remove from list and unblock
1495 *it = waiter->next;
1496 thread_unblock(waiter->thread, B_OK);
1497 } else
1498 it = &waiter->next;
1499 }
1500 }
1501
1502
1503 /*! Merges the given cache with its only consumer.
1504 The caller must hold both the cache's and the consumer's lock. The method
1505 does release neither lock.
1506 */
1507 void
_MergeWithOnlyConsumer()1508 VMCache::_MergeWithOnlyConsumer()
1509 {
1510 VMCache* consumer = consumers.RemoveHead();
1511
1512 TRACE(("merge vm cache %p (ref == %" B_PRId32 ") with vm cache %p\n",
1513 this, this->fRefCount, consumer));
1514
1515 T(Merge(this, consumer));
1516
1517 // merge the cache
1518 consumer->Merge(this);
1519
1520 // The remaining consumer has got a new source.
1521 if (source != NULL) {
1522 VMCache* newSource = source;
1523
1524 newSource->Lock();
1525
1526 newSource->consumers.Remove(this);
1527 newSource->consumers.Add(consumer);
1528 consumer->source = newSource;
1529 source = NULL;
1530
1531 newSource->Unlock();
1532 } else
1533 consumer->source = NULL;
1534
1535 // Release the reference the cache's consumer owned. The consumer takes
1536 // over the cache's ref to its source (if any) instead.
1537 ReleaseRefLocked();
1538 }
1539
1540
1541 /*! Removes the \a consumer from this cache.
1542 It will also release the reference to the cache owned by the consumer.
1543 Assumes you have the consumer's cache lock held. This cache must not be
1544 locked.
1545 */
1546 void
_RemoveConsumer(VMCache * consumer)1547 VMCache::_RemoveConsumer(VMCache* consumer)
1548 {
1549 TRACE(("remove consumer vm cache %p from cache %p\n", consumer, this));
1550 consumer->AssertLocked();
1551
1552 T(RemoveConsumer(this, consumer));
1553
1554 // Remove the store ref before locking the cache. Otherwise we'd call into
1555 // the VFS while holding the cache lock, which would reverse the usual
1556 // locking order.
1557 ReleaseStoreRef();
1558
1559 // remove the consumer from the cache, but keep its reference until later
1560 Lock();
1561 consumers.Remove(consumer);
1562 consumer->source = NULL;
1563
1564 ReleaseRefAndUnlock();
1565 }
1566
1567
1568 // #pragma mark - VMCacheFactory
1569 // TODO: Move to own source file!
1570
1571
1572 /*static*/ status_t
CreateAnonymousCache(VMCache * & _cache,bool canOvercommit,int32 numPrecommittedPages,int32 numGuardPages,bool swappable,int priority)1573 VMCacheFactory::CreateAnonymousCache(VMCache*& _cache, bool canOvercommit,
1574 int32 numPrecommittedPages, int32 numGuardPages, bool swappable,
1575 int priority)
1576 {
1577 uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY
1578 | HEAP_DONT_LOCK_KERNEL_SPACE;
1579 if (priority >= VM_PRIORITY_VIP)
1580 allocationFlags |= HEAP_PRIORITY_VIP;
1581
1582 #if ENABLE_SWAP_SUPPORT
1583 if (swappable) {
1584 VMAnonymousCache* cache
1585 = new(gAnonymousCacheObjectCache, allocationFlags) VMAnonymousCache;
1586 if (cache == NULL)
1587 return B_NO_MEMORY;
1588
1589 status_t error = cache->Init(canOvercommit, numPrecommittedPages,
1590 numGuardPages, allocationFlags);
1591 if (error != B_OK) {
1592 cache->Delete();
1593 return error;
1594 }
1595
1596 T(Create(cache));
1597
1598 _cache = cache;
1599 return B_OK;
1600 }
1601 #endif
1602
1603 VMAnonymousNoSwapCache* cache
1604 = new(gAnonymousNoSwapCacheObjectCache, allocationFlags)
1605 VMAnonymousNoSwapCache;
1606 if (cache == NULL)
1607 return B_NO_MEMORY;
1608
1609 status_t error = cache->Init(canOvercommit, numPrecommittedPages,
1610 numGuardPages, allocationFlags);
1611 if (error != B_OK) {
1612 cache->Delete();
1613 return error;
1614 }
1615
1616 T(Create(cache));
1617
1618 _cache = cache;
1619 return B_OK;
1620 }
1621
1622
1623 /*static*/ status_t
CreateVnodeCache(VMCache * & _cache,struct vnode * vnode)1624 VMCacheFactory::CreateVnodeCache(VMCache*& _cache, struct vnode* vnode)
1625 {
1626 const uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY
1627 | HEAP_DONT_LOCK_KERNEL_SPACE;
1628 // Note: Vnode cache creation is never VIP.
1629
1630 VMVnodeCache* cache
1631 = new(gVnodeCacheObjectCache, allocationFlags) VMVnodeCache;
1632 if (cache == NULL)
1633 return B_NO_MEMORY;
1634
1635 status_t error = cache->Init(vnode, allocationFlags);
1636 if (error != B_OK) {
1637 cache->Delete();
1638 return error;
1639 }
1640
1641 T(Create(cache));
1642
1643 _cache = cache;
1644 return B_OK;
1645 }
1646
1647
1648 /*static*/ status_t
CreateDeviceCache(VMCache * & _cache,addr_t baseAddress)1649 VMCacheFactory::CreateDeviceCache(VMCache*& _cache, addr_t baseAddress)
1650 {
1651 const uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY
1652 | HEAP_DONT_LOCK_KERNEL_SPACE;
1653 // Note: Device cache creation is never VIP.
1654
1655 VMDeviceCache* cache
1656 = new(gDeviceCacheObjectCache, allocationFlags) VMDeviceCache;
1657 if (cache == NULL)
1658 return B_NO_MEMORY;
1659
1660 status_t error = cache->Init(baseAddress, allocationFlags);
1661 if (error != B_OK) {
1662 cache->Delete();
1663 return error;
1664 }
1665
1666 T(Create(cache));
1667
1668 _cache = cache;
1669 return B_OK;
1670 }
1671
1672
1673 /*static*/ status_t
CreateNullCache(int priority,VMCache * & _cache)1674 VMCacheFactory::CreateNullCache(int priority, VMCache*& _cache)
1675 {
1676 uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY
1677 | HEAP_DONT_LOCK_KERNEL_SPACE;
1678 if (priority >= VM_PRIORITY_VIP)
1679 allocationFlags |= HEAP_PRIORITY_VIP;
1680
1681 VMNullCache* cache
1682 = new(gNullCacheObjectCache, allocationFlags) VMNullCache;
1683 if (cache == NULL)
1684 return B_NO_MEMORY;
1685
1686 status_t error = cache->Init(allocationFlags);
1687 if (error != B_OK) {
1688 cache->Delete();
1689 return error;
1690 }
1691
1692 T(Create(cache));
1693
1694 _cache = cache;
1695 return B_OK;
1696 }
1697