xref: /haiku/src/system/kernel/vm/VMCache.cpp (revision a0fd8467d97cfa23c536bc5ff205c7a2aa648852)
1 /*
2  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 #include <vm/VMCache.h>
12 
13 #include <stddef.h>
14 #include <stdlib.h>
15 
16 #include <algorithm>
17 
18 #include <arch/cpu.h>
19 #include <condition_variable.h>
20 #include <heap.h>
21 #include <int.h>
22 #include <kernel.h>
23 #include <slab/Slab.h>
24 #include <smp.h>
25 #include <thread.h>
26 #include <tracing.h>
27 #include <util/AutoLock.h>
28 #include <vfs.h>
29 #include <vm/vm.h>
30 #include <vm/vm_page.h>
31 #include <vm/vm_priv.h>
32 #include <vm/vm_types.h>
33 #include <vm/VMAddressSpace.h>
34 #include <vm/VMArea.h>
35 
36 // needed for the factory only
37 #include "VMAnonymousCache.h"
38 #include "VMAnonymousNoSwapCache.h"
39 #include "VMDeviceCache.h"
40 #include "VMNullCache.h"
41 #include "../cache/vnode_store.h"
42 
43 
44 //#define TRACE_VM_CACHE
45 #ifdef TRACE_VM_CACHE
46 #	define TRACE(x) dprintf x
47 #else
48 #	define TRACE(x) ;
49 #endif
50 
51 
52 #if DEBUG_CACHE_LIST
53 VMCache* gDebugCacheList;
54 #endif
55 static rw_lock sCacheListLock = RW_LOCK_INITIALIZER("global VMCache list");
56 	// The lock is also needed when the debug feature is disabled.
57 
58 ObjectCache* gCacheRefObjectCache;
59 #if ENABLE_SWAP_SUPPORT
60 ObjectCache* gAnonymousCacheObjectCache;
61 #endif
62 ObjectCache* gAnonymousNoSwapCacheObjectCache;
63 ObjectCache* gVnodeCacheObjectCache;
64 ObjectCache* gDeviceCacheObjectCache;
65 ObjectCache* gNullCacheObjectCache;
66 
67 
68 struct VMCache::PageEventWaiter {
69 	Thread*				thread;
70 	PageEventWaiter*	next;
71 	vm_page*			page;
72 	uint32				events;
73 };
74 
75 
76 #if VM_CACHE_TRACING
77 
78 namespace VMCacheTracing {
79 
80 class VMCacheTraceEntry : public AbstractTraceEntry {
81 	public:
82 		VMCacheTraceEntry(VMCache* cache)
83 			:
84 			fCache(cache)
85 		{
86 #if VM_CACHE_TRACING_STACK_TRACE
87 			fStackTrace = capture_tracing_stack_trace(
88 				VM_CACHE_TRACING_STACK_TRACE, 0, true);
89 				// Don't capture userland stack trace to avoid potential
90 				// deadlocks.
91 #endif
92 		}
93 
94 #if VM_CACHE_TRACING_STACK_TRACE
95 		virtual void DumpStackTrace(TraceOutput& out)
96 		{
97 			out.PrintStackTrace(fStackTrace);
98 		}
99 #endif
100 
101 		VMCache* Cache() const
102 		{
103 			return fCache;
104 		}
105 
106 	protected:
107 		VMCache*	fCache;
108 #if VM_CACHE_TRACING_STACK_TRACE
109 		tracing_stack_trace* fStackTrace;
110 #endif
111 };
112 
113 
114 class Create : public VMCacheTraceEntry {
115 	public:
116 		Create(VMCache* cache)
117 			:
118 			VMCacheTraceEntry(cache)
119 		{
120 			Initialized();
121 		}
122 
123 		virtual void AddDump(TraceOutput& out)
124 		{
125 			out.Print("vm cache create: -> cache: %p", fCache);
126 		}
127 };
128 
129 
130 class Delete : public VMCacheTraceEntry {
131 	public:
132 		Delete(VMCache* cache)
133 			:
134 			VMCacheTraceEntry(cache)
135 		{
136 			Initialized();
137 		}
138 
139 		virtual void AddDump(TraceOutput& out)
140 		{
141 			out.Print("vm cache delete: cache: %p", fCache);
142 		}
143 };
144 
145 
146 class SetMinimalCommitment : public VMCacheTraceEntry {
147 	public:
148 		SetMinimalCommitment(VMCache* cache, off_t commitment)
149 			:
150 			VMCacheTraceEntry(cache),
151 			fOldCommitment(cache->committed_size),
152 			fCommitment(commitment)
153 		{
154 			Initialized();
155 		}
156 
157 		virtual void AddDump(TraceOutput& out)
158 		{
159 			out.Print("vm cache set min commitment: cache: %p, "
160 				"commitment: %" B_PRIdOFF " -> %" B_PRIdOFF, fCache,
161 				fOldCommitment, fCommitment);
162 		}
163 
164 	private:
165 		off_t	fOldCommitment;
166 		off_t	fCommitment;
167 };
168 
169 
170 class Resize : public VMCacheTraceEntry {
171 	public:
172 		Resize(VMCache* cache, off_t size)
173 			:
174 			VMCacheTraceEntry(cache),
175 			fOldSize(cache->virtual_end),
176 			fSize(size)
177 		{
178 			Initialized();
179 		}
180 
181 		virtual void AddDump(TraceOutput& out)
182 		{
183 			out.Print("vm cache resize: cache: %p, size: %" B_PRIdOFF " -> %"
184 				B_PRIdOFF, fCache, fOldSize, fSize);
185 		}
186 
187 	private:
188 		off_t	fOldSize;
189 		off_t	fSize;
190 };
191 
192 
193 class Rebase : public VMCacheTraceEntry {
194 	public:
195 		Rebase(VMCache* cache, off_t base)
196 			:
197 			VMCacheTraceEntry(cache),
198 			fOldBase(cache->virtual_base),
199 			fBase(base)
200 		{
201 			Initialized();
202 		}
203 
204 		virtual void AddDump(TraceOutput& out)
205 		{
206 			out.Print("vm cache rebase: cache: %p, base: %lld -> %lld", fCache,
207 				fOldBase, fBase);
208 		}
209 
210 	private:
211 		off_t	fOldBase;
212 		off_t	fBase;
213 };
214 
215 
216 class AddConsumer : public VMCacheTraceEntry {
217 	public:
218 		AddConsumer(VMCache* cache, VMCache* consumer)
219 			:
220 			VMCacheTraceEntry(cache),
221 			fConsumer(consumer)
222 		{
223 			Initialized();
224 		}
225 
226 		virtual void AddDump(TraceOutput& out)
227 		{
228 			out.Print("vm cache add consumer: cache: %p, consumer: %p", fCache,
229 				fConsumer);
230 		}
231 
232 		VMCache* Consumer() const
233 		{
234 			return fConsumer;
235 		}
236 
237 	private:
238 		VMCache*	fConsumer;
239 };
240 
241 
242 class RemoveConsumer : public VMCacheTraceEntry {
243 	public:
244 		RemoveConsumer(VMCache* cache, VMCache* consumer)
245 			:
246 			VMCacheTraceEntry(cache),
247 			fConsumer(consumer)
248 		{
249 			Initialized();
250 		}
251 
252 		virtual void AddDump(TraceOutput& out)
253 		{
254 			out.Print("vm cache remove consumer: cache: %p, consumer: %p",
255 				fCache, fConsumer);
256 		}
257 
258 	private:
259 		VMCache*	fConsumer;
260 };
261 
262 
263 class Merge : public VMCacheTraceEntry {
264 	public:
265 		Merge(VMCache* cache, VMCache* consumer)
266 			:
267 			VMCacheTraceEntry(cache),
268 			fConsumer(consumer)
269 		{
270 			Initialized();
271 		}
272 
273 		virtual void AddDump(TraceOutput& out)
274 		{
275 			out.Print("vm cache merge with consumer: cache: %p, consumer: %p",
276 				fCache, fConsumer);
277 		}
278 
279 	private:
280 		VMCache*	fConsumer;
281 };
282 
283 
284 class InsertArea : public VMCacheTraceEntry {
285 	public:
286 		InsertArea(VMCache* cache, VMArea* area)
287 			:
288 			VMCacheTraceEntry(cache),
289 			fArea(area)
290 		{
291 			Initialized();
292 		}
293 
294 		virtual void AddDump(TraceOutput& out)
295 		{
296 			out.Print("vm cache insert area: cache: %p, area: %p", fCache,
297 				fArea);
298 		}
299 
300 		VMArea*	Area() const
301 		{
302 			return fArea;
303 		}
304 
305 	private:
306 		VMArea*	fArea;
307 };
308 
309 
310 class RemoveArea : public VMCacheTraceEntry {
311 	public:
312 		RemoveArea(VMCache* cache, VMArea* area)
313 			:
314 			VMCacheTraceEntry(cache),
315 			fArea(area)
316 		{
317 			Initialized();
318 		}
319 
320 		virtual void AddDump(TraceOutput& out)
321 		{
322 			out.Print("vm cache remove area: cache: %p, area: %p", fCache,
323 				fArea);
324 		}
325 
326 	private:
327 		VMArea*	fArea;
328 };
329 
330 }	// namespace VMCacheTracing
331 
332 #	define T(x) new(std::nothrow) VMCacheTracing::x;
333 
334 #	if VM_CACHE_TRACING >= 2
335 
336 namespace VMCacheTracing {
337 
338 class InsertPage : public VMCacheTraceEntry {
339 	public:
340 		InsertPage(VMCache* cache, vm_page* page, off_t offset)
341 			:
342 			VMCacheTraceEntry(cache),
343 			fPage(page),
344 			fOffset(offset)
345 		{
346 			Initialized();
347 		}
348 
349 		virtual void AddDump(TraceOutput& out)
350 		{
351 			out.Print("vm cache insert page: cache: %p, page: %p, offset: %"
352 				B_PRIdOFF, fCache, fPage, fOffset);
353 		}
354 
355 	private:
356 		vm_page*	fPage;
357 		off_t		fOffset;
358 };
359 
360 
361 class RemovePage : public VMCacheTraceEntry {
362 	public:
363 		RemovePage(VMCache* cache, vm_page* page)
364 			:
365 			VMCacheTraceEntry(cache),
366 			fPage(page)
367 		{
368 			Initialized();
369 		}
370 
371 		virtual void AddDump(TraceOutput& out)
372 		{
373 			out.Print("vm cache remove page: cache: %p, page: %p", fCache,
374 				fPage);
375 		}
376 
377 	private:
378 		vm_page*	fPage;
379 };
380 
381 }	// namespace VMCacheTracing
382 
383 #		define T2(x) new(std::nothrow) VMCacheTracing::x;
384 #	else
385 #		define T2(x) ;
386 #	endif
387 #else
388 #	define T(x) ;
389 #	define T2(x) ;
390 #endif
391 
392 
393 //	#pragma mark - debugger commands
394 
395 
396 #if VM_CACHE_TRACING
397 
398 
399 static void*
400 cache_stack_find_area_cache(const TraceEntryIterator& baseIterator, void* area)
401 {
402 	using namespace VMCacheTracing;
403 
404 	// find the previous "insert area" entry for the given area
405 	TraceEntryIterator iterator = baseIterator;
406 	TraceEntry* entry = iterator.Current();
407 	while (entry != NULL) {
408 		if (InsertArea* insertAreaEntry = dynamic_cast<InsertArea*>(entry)) {
409 			if (insertAreaEntry->Area() == area)
410 				return insertAreaEntry->Cache();
411 		}
412 
413 		entry = iterator.Previous();
414 	}
415 
416 	return NULL;
417 }
418 
419 
420 static void*
421 cache_stack_find_consumer(const TraceEntryIterator& baseIterator, void* cache)
422 {
423 	using namespace VMCacheTracing;
424 
425 	// find the previous "add consumer" or "create" entry for the given cache
426 	TraceEntryIterator iterator = baseIterator;
427 	TraceEntry* entry = iterator.Current();
428 	while (entry != NULL) {
429 		if (Create* createEntry = dynamic_cast<Create*>(entry)) {
430 			if (createEntry->Cache() == cache)
431 				return NULL;
432 		} else if (AddConsumer* addEntry = dynamic_cast<AddConsumer*>(entry)) {
433 			if (addEntry->Consumer() == cache)
434 				return addEntry->Cache();
435 		}
436 
437 		entry = iterator.Previous();
438 	}
439 
440 	return NULL;
441 }
442 
443 
444 static int
445 command_cache_stack(int argc, char** argv)
446 {
447 	if (argc < 3 || argc > 4) {
448 		print_debugger_command_usage(argv[0]);
449 		return 0;
450 	}
451 
452 	bool isArea = false;
453 
454 	int argi = 1;
455 	if (argc == 4) {
456 		if (strcmp(argv[argi], "area") != 0) {
457 			print_debugger_command_usage(argv[0]);
458 			return 0;
459 		}
460 
461 		argi++;
462 		isArea = true;
463 	}
464 
465 	uint64 addressValue;
466 	uint64 debugEntryIndex;
467 	if (!evaluate_debug_expression(argv[argi++], &addressValue, false)
468 		|| !evaluate_debug_expression(argv[argi++], &debugEntryIndex, false)) {
469 		return 0;
470 	}
471 
472 	TraceEntryIterator baseIterator;
473 	if (baseIterator.MoveTo((int32)debugEntryIndex) == NULL) {
474 		kprintf("Invalid tracing entry index %" B_PRIu64 "\n", debugEntryIndex);
475 		return 0;
476 	}
477 
478 	void* address = (void*)(addr_t)addressValue;
479 
480 	kprintf("cache stack for %s %p at %" B_PRIu64 ":\n",
481 		isArea ? "area" : "cache", address, debugEntryIndex);
482 	if (isArea) {
483 		address = cache_stack_find_area_cache(baseIterator, address);
484 		if (address == NULL) {
485 			kprintf("  cache not found\n");
486 			return 0;
487 		}
488 	}
489 
490 	while (address != NULL) {
491 		kprintf("  %p\n", address);
492 		address = cache_stack_find_consumer(baseIterator, address);
493 	}
494 
495 	return 0;
496 }
497 
498 
499 #endif	// VM_CACHE_TRACING
500 
501 
502 //	#pragma mark -
503 
504 
505 status_t
506 vm_cache_init(kernel_args* args)
507 {
508 	// Create object caches for the structures we allocate here.
509 	gCacheRefObjectCache = create_object_cache("cache refs", sizeof(VMCacheRef),
510 		0, NULL, NULL, NULL);
511 #if ENABLE_SWAP_SUPPORT
512 	gAnonymousCacheObjectCache = create_object_cache("anon caches",
513 		sizeof(VMAnonymousCache), 0, NULL, NULL, NULL);
514 #endif
515 	gAnonymousNoSwapCacheObjectCache = create_object_cache(
516 		"anon no-swap caches", sizeof(VMAnonymousNoSwapCache), 0, NULL, NULL,
517 		NULL);
518 	gVnodeCacheObjectCache = create_object_cache("vnode caches",
519 		sizeof(VMVnodeCache), 0, NULL, NULL, NULL);
520 	gDeviceCacheObjectCache = create_object_cache("device caches",
521 		sizeof(VMDeviceCache), 0, NULL, NULL, NULL);
522 	gNullCacheObjectCache = create_object_cache("null caches",
523 		sizeof(VMNullCache), 0, NULL, NULL, NULL);
524 
525 	if (gCacheRefObjectCache == NULL
526 #if ENABLE_SWAP_SUPPORT
527 		|| gAnonymousCacheObjectCache == NULL
528 #endif
529 		|| gAnonymousNoSwapCacheObjectCache == NULL
530 		|| gVnodeCacheObjectCache == NULL
531 		|| gDeviceCacheObjectCache == NULL
532 		|| gNullCacheObjectCache == NULL) {
533 		panic("vm_cache_init(): Failed to create object caches!");
534 		return B_NO_MEMORY;
535 	}
536 
537 	return B_OK;
538 }
539 
540 
541 void
542 vm_cache_init_post_heap()
543 {
544 #if VM_CACHE_TRACING
545 	add_debugger_command_etc("cache_stack", &command_cache_stack,
546 		"List the ancestors (sources) of a VMCache at the time given by "
547 			"tracing entry index",
548 		"[ \"area\" ] <address> <tracing entry index>\n"
549 		"All ancestors (sources) of a given VMCache at the time given by the\n"
550 		"tracing entry index are listed. If \"area\" is given the supplied\n"
551 		"address is an area instead of a cache address. The listing will\n"
552 		"start with the area's cache at that point.\n",
553 		0);
554 #endif	// VM_CACHE_TRACING
555 }
556 
557 
558 VMCache*
559 vm_cache_acquire_locked_page_cache(vm_page* page, bool dontWait)
560 {
561 	rw_lock_read_lock(&sCacheListLock);
562 
563 	while (true) {
564 		VMCacheRef* cacheRef = page->CacheRef();
565 		if (cacheRef == NULL) {
566 			rw_lock_read_unlock(&sCacheListLock);
567 			return NULL;
568 		}
569 
570 		VMCache* cache = cacheRef->cache;
571 		if (dontWait) {
572 			if (!cache->TryLock()) {
573 				rw_lock_read_unlock(&sCacheListLock);
574 				return NULL;
575 			}
576 		} else {
577 			if (!cache->SwitchFromReadLock(&sCacheListLock)) {
578 				// cache has been deleted
579 				rw_lock_read_lock(&sCacheListLock);
580 				continue;
581 			}
582 			rw_lock_read_lock(&sCacheListLock);
583 		}
584 
585 		if (cache == page->Cache()) {
586 			rw_lock_read_unlock(&sCacheListLock);
587 			cache->AcquireRefLocked();
588 			return cache;
589 		}
590 
591 		// the cache changed in the meantime
592 		cache->Unlock();
593 	}
594 }
595 
596 
597 // #pragma mark - VMCache
598 
599 
600 VMCacheRef::VMCacheRef(VMCache* cache)
601 	:
602 	cache(cache),
603 	ref_count(1)
604 {
605 }
606 
607 
608 // #pragma mark - VMCache
609 
610 
611 bool
612 VMCache::_IsMergeable() const
613 {
614 	return areas == NULL && temporary && !unmergeable
615 		&& !consumers.IsEmpty() && consumers.Head() == consumers.Tail();
616 }
617 
618 
619 VMCache::VMCache()
620 	:
621 	fCacheRef(NULL)
622 {
623 }
624 
625 
626 VMCache::~VMCache()
627 {
628 	object_cache_delete(gCacheRefObjectCache, fCacheRef);
629 }
630 
631 
632 status_t
633 VMCache::Init(uint32 cacheType, uint32 allocationFlags)
634 {
635 	mutex_init(&fLock, "VMCache");
636 
637 	areas = NULL;
638 	fRefCount = 1;
639 	source = NULL;
640 	virtual_base = 0;
641 	virtual_end = 0;
642 	committed_size = 0;
643 	temporary = 0;
644 	unmergeable = 0;
645 	page_count = 0;
646 	fWiredPagesCount = 0;
647 	type = cacheType;
648 	fPageEventWaiters = NULL;
649 
650 #if DEBUG_CACHE_LIST
651 	debug_previous = NULL;
652 	debug_next = NULL;
653 		// initialize in case the following fails
654 #endif
655 
656 	fCacheRef = new(gCacheRefObjectCache, allocationFlags) VMCacheRef(this);
657 	if (fCacheRef == NULL)
658 		return B_NO_MEMORY;
659 
660 #if DEBUG_CACHE_LIST
661 	rw_lock_write_lock(&sCacheListLock);
662 
663 	if (gDebugCacheList != NULL)
664 		gDebugCacheList->debug_previous = this;
665 	debug_next = gDebugCacheList;
666 	gDebugCacheList = this;
667 
668 	rw_lock_write_unlock(&sCacheListLock);
669 #endif
670 
671 	return B_OK;
672 }
673 
674 
675 void
676 VMCache::Delete()
677 {
678 	if (areas != NULL)
679 		panic("cache %p to be deleted still has areas", this);
680 	if (!consumers.IsEmpty())
681 		panic("cache %p to be deleted still has consumers", this);
682 
683 	T(Delete(this));
684 
685 	// free all of the pages in the cache
686 	while (vm_page* page = pages.Root()) {
687 		if (!page->mappings.IsEmpty() || page->WiredCount() != 0) {
688 			panic("remove page %p from cache %p: page still has mappings!\n"
689 				"@!page %p; cache %p", page, this, page, this);
690 		}
691 
692 		// remove it
693 		pages.Remove(page);
694 		page->SetCacheRef(NULL);
695 
696 		TRACE(("vm_cache_release_ref: freeing page 0x%lx\n",
697 			page->physical_page_number));
698 		DEBUG_PAGE_ACCESS_START(page);
699 		vm_page_free(this, page);
700 	}
701 
702 	// remove the ref to the source
703 	if (source)
704 		source->_RemoveConsumer(this);
705 
706 	// We lock and unlock the sCacheListLock, even if the DEBUG_CACHE_LIST is
707 	// not enabled. This synchronization point is needed for
708 	// vm_cache_acquire_locked_page_cache().
709 	rw_lock_write_lock(&sCacheListLock);
710 
711 #if DEBUG_CACHE_LIST
712 	if (debug_previous)
713 		debug_previous->debug_next = debug_next;
714 	if (debug_next)
715 		debug_next->debug_previous = debug_previous;
716 	if (this == gDebugCacheList)
717 		gDebugCacheList = debug_next;
718 #endif
719 
720 	mutex_destroy(&fLock);
721 
722 	rw_lock_write_unlock(&sCacheListLock);
723 
724 	DeleteObject();
725 }
726 
727 
728 void
729 VMCache::Unlock(bool consumerLocked)
730 {
731 	while (fRefCount == 1 && _IsMergeable()) {
732 		VMCache* consumer = consumers.Head();
733 		if (consumerLocked) {
734 			_MergeWithOnlyConsumer();
735 		} else if (consumer->TryLock()) {
736 			_MergeWithOnlyConsumer();
737 			consumer->Unlock();
738 		} else {
739 			// Someone else has locked the consumer ATM. Unlock this cache and
740 			// wait for the consumer lock. Increment the cache's ref count
741 			// temporarily, so that no one else will try what we are doing or
742 			// delete the cache.
743 			fRefCount++;
744 			bool consumerLockedTemp = consumer->SwitchLock(&fLock);
745 			Lock();
746 			fRefCount--;
747 
748 			if (consumerLockedTemp) {
749 				if (fRefCount == 1 && _IsMergeable()
750 						&& consumer == consumers.Head()) {
751 					// nothing has changed in the meantime -- merge
752 					_MergeWithOnlyConsumer();
753 				}
754 
755 				consumer->Unlock();
756 			}
757 		}
758 	}
759 
760 	if (fRefCount == 0) {
761 		// delete this cache
762 		Delete();
763 	} else
764 		mutex_unlock(&fLock);
765 }
766 
767 
768 vm_page*
769 VMCache::LookupPage(off_t offset)
770 {
771 	AssertLocked();
772 
773 	vm_page* page = pages.Lookup((page_num_t)(offset >> PAGE_SHIFT));
774 
775 #if KDEBUG
776 	if (page != NULL && page->Cache() != this)
777 		panic("page %p not in cache %p\n", page, this);
778 #endif
779 
780 	return page;
781 }
782 
783 
784 void
785 VMCache::InsertPage(vm_page* page, off_t offset)
786 {
787 	TRACE(("VMCache::InsertPage(): cache %p, page %p, offset %" B_PRIdOFF "\n",
788 		this, page, offset));
789 	AssertLocked();
790 
791 	if (page->CacheRef() != NULL) {
792 		panic("insert page %p into cache %p: page cache is set to %p\n",
793 			page, this, page->Cache());
794 	}
795 
796 	T2(InsertPage(this, page, offset));
797 
798 	page->cache_offset = (page_num_t)(offset >> PAGE_SHIFT);
799 	page_count++;
800 	page->SetCacheRef(fCacheRef);
801 
802 #if KDEBUG
803 	vm_page* otherPage = pages.Lookup(page->cache_offset);
804 	if (otherPage != NULL) {
805 		panic("VMCache::InsertPage(): there's already page %p with cache "
806 			"offset %" B_PRIuPHYSADDR " in cache %p; inserting page %p",
807 			otherPage, page->cache_offset, this, page);
808 	}
809 #endif	// KDEBUG
810 
811 	pages.Insert(page);
812 
813 	if (page->WiredCount() > 0)
814 		IncrementWiredPagesCount();
815 }
816 
817 
818 /*!	Removes the vm_page from this cache. Of course, the page must
819 	really be in this cache or evil things will happen.
820 	The cache lock must be held.
821 */
822 void
823 VMCache::RemovePage(vm_page* page)
824 {
825 	TRACE(("VMCache::RemovePage(): cache %p, page %p\n", this, page));
826 	AssertLocked();
827 
828 	if (page->Cache() != this) {
829 		panic("remove page %p from cache %p: page cache is set to %p\n", page,
830 			this, page->Cache());
831 	}
832 
833 	T2(RemovePage(this, page));
834 
835 	pages.Remove(page);
836 	page_count--;
837 	page->SetCacheRef(NULL);
838 
839 	if (page->WiredCount() > 0)
840 		DecrementWiredPagesCount();
841 }
842 
843 
844 /*!	Moves the given page from its current cache inserts it into this cache
845 	at the given offset.
846 	Both caches must be locked.
847 */
848 void
849 VMCache::MovePage(vm_page* page, off_t offset)
850 {
851 	VMCache* oldCache = page->Cache();
852 
853 	AssertLocked();
854 	oldCache->AssertLocked();
855 
856 	// remove from old cache
857 	oldCache->pages.Remove(page);
858 	oldCache->page_count--;
859 	T2(RemovePage(oldCache, page));
860 
861 	// change the offset
862 	page->cache_offset = offset >> PAGE_SHIFT;
863 
864 	// insert here
865 	pages.Insert(page);
866 	page_count++;
867 	page->SetCacheRef(fCacheRef);
868 
869 	if (page->WiredCount() > 0) {
870 		IncrementWiredPagesCount();
871 		oldCache->DecrementWiredPagesCount();
872 	}
873 
874 	T2(InsertPage(this, page, page->cache_offset << PAGE_SHIFT));
875 }
876 
877 /*!	Moves the given page from its current cache inserts it into this cache.
878 	Both caches must be locked.
879 */
880 void
881 VMCache::MovePage(vm_page* page)
882 {
883 	MovePage(page, page->cache_offset << PAGE_SHIFT);
884 }
885 
886 
887 /*!	Moves all pages from the given cache to this one.
888 	Both caches must be locked. This cache must be empty.
889 */
890 void
891 VMCache::MoveAllPages(VMCache* fromCache)
892 {
893 	AssertLocked();
894 	fromCache->AssertLocked();
895 	ASSERT(page_count == 0);
896 
897 	std::swap(fromCache->pages, pages);
898 	page_count = fromCache->page_count;
899 	fromCache->page_count = 0;
900 	fWiredPagesCount = fromCache->fWiredPagesCount;
901 	fromCache->fWiredPagesCount = 0;
902 
903 	// swap the VMCacheRefs
904 	rw_lock_write_lock(&sCacheListLock);
905 	std::swap(fCacheRef, fromCache->fCacheRef);
906 	fCacheRef->cache = this;
907 	fromCache->fCacheRef->cache = fromCache;
908 	rw_lock_write_unlock(&sCacheListLock);
909 
910 #if VM_CACHE_TRACING >= 2
911 	for (VMCachePagesTree::Iterator it = pages.GetIterator();
912 			vm_page* page = it.Next();) {
913 		T2(RemovePage(fromCache, page));
914 		T2(InsertPage(this, page, page->cache_offset << PAGE_SHIFT));
915 	}
916 #endif
917 }
918 
919 
920 /*!	Waits until one or more events happened for a given page which belongs to
921 	this cache.
922 	The cache must be locked. It will be unlocked by the method. \a relock
923 	specifies whether the method shall re-lock the cache before returning.
924 	\param page The page for which to wait.
925 	\param events The mask of events the caller is interested in.
926 	\param relock If \c true, the cache will be locked when returning,
927 		otherwise it won't be locked.
928 */
929 void
930 VMCache::WaitForPageEvents(vm_page* page, uint32 events, bool relock)
931 {
932 	PageEventWaiter waiter;
933 	waiter.thread = thread_get_current_thread();
934 	waiter.next = fPageEventWaiters;
935 	waiter.page = page;
936 	waiter.events = events;
937 
938 	fPageEventWaiters = &waiter;
939 
940 	thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_OTHER_OBJECT, page);
941 
942 	Unlock();
943 	thread_block();
944 
945 	if (relock)
946 		Lock();
947 }
948 
949 
950 /*!	Makes this case the source of the \a consumer cache,
951 	and adds the \a consumer to its list.
952 	This also grabs a reference to the source cache.
953 	Assumes you have the cache and the consumer's lock held.
954 */
955 void
956 VMCache::AddConsumer(VMCache* consumer)
957 {
958 	TRACE(("add consumer vm cache %p to cache %p\n", consumer, this));
959 	AssertLocked();
960 	consumer->AssertLocked();
961 
962 	T(AddConsumer(this, consumer));
963 
964 	consumer->source = this;
965 	consumers.Add(consumer);
966 
967 	AcquireRefLocked();
968 	AcquireStoreRef();
969 }
970 
971 
972 /*!	Adds the \a area to this cache.
973 	Assumes you have the locked the cache.
974 */
975 status_t
976 VMCache::InsertAreaLocked(VMArea* area)
977 {
978 	TRACE(("VMCache::InsertAreaLocked(cache %p, area %p)\n", this, area));
979 	AssertLocked();
980 
981 	T(InsertArea(this, area));
982 
983 	area->cache_next = areas;
984 	if (area->cache_next)
985 		area->cache_next->cache_prev = area;
986 	area->cache_prev = NULL;
987 	areas = area;
988 
989 	AcquireStoreRef();
990 
991 	return B_OK;
992 }
993 
994 
995 status_t
996 VMCache::RemoveArea(VMArea* area)
997 {
998 	TRACE(("VMCache::RemoveArea(cache %p, area %p)\n", this, area));
999 
1000 	T(RemoveArea(this, area));
1001 
1002 	// We release the store reference first, since otherwise we would reverse
1003 	// the locking order or even deadlock ourselves (... -> free_vnode() -> ...
1004 	// -> bfs_remove_vnode() -> ... -> file_cache_set_size() -> mutex_lock()).
1005 	// Also cf. _RemoveConsumer().
1006 	ReleaseStoreRef();
1007 
1008 	AutoLocker<VMCache> locker(this);
1009 
1010 	if (area->cache_prev)
1011 		area->cache_prev->cache_next = area->cache_next;
1012 	if (area->cache_next)
1013 		area->cache_next->cache_prev = area->cache_prev;
1014 	if (areas == area)
1015 		areas = area->cache_next;
1016 
1017 	return B_OK;
1018 }
1019 
1020 
1021 /*!	Transfers the areas from \a fromCache to this cache. This cache must not
1022 	have areas yet. Both caches must be locked.
1023 */
1024 void
1025 VMCache::TransferAreas(VMCache* fromCache)
1026 {
1027 	AssertLocked();
1028 	fromCache->AssertLocked();
1029 	ASSERT(areas == NULL);
1030 
1031 	areas = fromCache->areas;
1032 	fromCache->areas = NULL;
1033 
1034 	for (VMArea* area = areas; area != NULL; area = area->cache_next) {
1035 		area->cache = this;
1036 		AcquireRefLocked();
1037 		fromCache->ReleaseRefLocked();
1038 
1039 		T(RemoveArea(fromCache, area));
1040 		T(InsertArea(this, area));
1041 	}
1042 }
1043 
1044 
1045 uint32
1046 VMCache::CountWritableAreas(VMArea* ignoreArea) const
1047 {
1048 	uint32 count = 0;
1049 
1050 	for (VMArea* area = areas; area != NULL; area = area->cache_next) {
1051 		if (area != ignoreArea
1052 			&& (area->protection & (B_WRITE_AREA | B_KERNEL_WRITE_AREA)) != 0) {
1053 			count++;
1054 		}
1055 	}
1056 
1057 	return count;
1058 }
1059 
1060 
1061 status_t
1062 VMCache::WriteModified()
1063 {
1064 	TRACE(("VMCache::WriteModified(cache = %p)\n", this));
1065 
1066 	if (temporary)
1067 		return B_OK;
1068 
1069 	Lock();
1070 	status_t status = vm_page_write_modified_pages(this);
1071 	Unlock();
1072 
1073 	return status;
1074 }
1075 
1076 
1077 /*!	Commits the memory to the store if the \a commitment is larger than
1078 	what's committed already.
1079 	Assumes you have the cache's lock held.
1080 */
1081 status_t
1082 VMCache::SetMinimalCommitment(off_t commitment, int priority)
1083 {
1084 	TRACE(("VMCache::SetMinimalCommitment(cache %p, commitment %" B_PRIdOFF
1085 		")\n", this, commitment));
1086 	AssertLocked();
1087 
1088 	T(SetMinimalCommitment(this, commitment));
1089 
1090 	status_t status = B_OK;
1091 
1092 	// If we don't have enough committed space to cover through to the new end
1093 	// of the area...
1094 	if (committed_size < commitment) {
1095 		ASSERT(commitment <= ROUNDUP(virtual_end - virtual_base, B_PAGE_SIZE));
1096 
1097 		// try to commit more memory
1098 		status = Commit(commitment, priority);
1099 	}
1100 
1101 	return status;
1102 }
1103 
1104 
1105 bool
1106 VMCache::_FreePageRange(VMCachePagesTree::Iterator it,
1107 	page_num_t* toPage = NULL)
1108 {
1109 	for (vm_page* page = it.Next();
1110 		page != NULL && (toPage == NULL || page->cache_offset < *toPage);
1111 		page = it.Next()) {
1112 
1113 		if (page->busy) {
1114 			if (page->busy_writing) {
1115 				// We cannot wait for the page to become available
1116 				// as we might cause a deadlock this way
1117 				page->busy_writing = false;
1118 					// this will notify the writer to free the page
1119 				continue;
1120 			}
1121 
1122 			// wait for page to become unbusy
1123 			WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, true);
1124 			return true;
1125 		}
1126 
1127 		// remove the page and put it into the free queue
1128 		DEBUG_PAGE_ACCESS_START(page);
1129 		vm_remove_all_page_mappings(page);
1130 		ASSERT(page->WiredCount() == 0);
1131 			// TODO: Find a real solution! If the page is wired
1132 			// temporarily (e.g. by lock_memory()), we actually must not
1133 			// unmap it!
1134 		RemovePage(page);
1135 			// Note: When iterating through a IteratableSplayTree
1136 			// removing the current node is safe.
1137 
1138 		vm_page_free(this, page);
1139 	}
1140 
1141 	return false;
1142 }
1143 
1144 
1145 /*!	This function updates the size field of the cache.
1146 	If needed, it will free up all pages that don't belong to the cache anymore.
1147 	The cache lock must be held when you call it.
1148 	Since removed pages don't belong to the cache any longer, they are not
1149 	written back before they will be removed.
1150 
1151 	Note, this function may temporarily release the cache lock in case it
1152 	has to wait for busy pages.
1153 */
1154 status_t
1155 VMCache::Resize(off_t newSize, int priority)
1156 {
1157 	TRACE(("VMCache::Resize(cache %p, newSize %" B_PRIdOFF ") old size %"
1158 		B_PRIdOFF "\n", this, newSize, this->virtual_end));
1159 	T(Resize(this, newSize));
1160 
1161 	AssertLocked();
1162 
1163 	page_num_t oldPageCount = (page_num_t)((virtual_end + B_PAGE_SIZE - 1)
1164 		>> PAGE_SHIFT);
1165 	page_num_t newPageCount = (page_num_t)((newSize + B_PAGE_SIZE - 1)
1166 		>> PAGE_SHIFT);
1167 
1168 	if (newPageCount < oldPageCount) {
1169 		// we need to remove all pages in the cache outside of the new virtual
1170 		// size
1171 		while (_FreePageRange(pages.GetIterator(newPageCount, true, true)))
1172 			;
1173 	}
1174 
1175 	status_t status = Commit(newSize - virtual_base, priority);
1176 	if (status != B_OK)
1177 		return status;
1178 
1179 	virtual_end = newSize;
1180 	return B_OK;
1181 }
1182 
1183 /*!	This function updates the virtual_base field of the cache.
1184 	If needed, it will free up all pages that don't belong to the cache anymore.
1185 	The cache lock must be held when you call it.
1186 	Since removed pages don't belong to the cache any longer, they are not
1187 	written back before they will be removed.
1188 
1189 	Note, this function may temporarily release the cache lock in case it
1190 	has to wait for busy pages.
1191 */
1192 status_t
1193 VMCache::Rebase(off_t newBase, int priority)
1194 {
1195 	TRACE(("VMCache::Rebase(cache %p, newBase %lld) old base %lld\n",
1196 		this, newBase, this->virtual_base));
1197 	this->AssertLocked();
1198 
1199 	T(Rebase(this, newBase));
1200 
1201 	status_t status = Commit(virtual_end - newBase, priority);
1202 	if (status != B_OK)
1203 		return status;
1204 
1205 	page_num_t basePage = (page_num_t)(newBase >> PAGE_SHIFT);
1206 
1207 	if (newBase > virtual_base) {
1208 		// we need to remove all pages in the cache outside of the new virtual
1209 		// base
1210 		while (_FreePageRange(pages.GetIterator(), &basePage))
1211 			;
1212 	}
1213 
1214 	virtual_base = newBase;
1215 	return B_OK;
1216 }
1217 
1218 
1219 /*!	Moves pages in the given range from the source cache into this cache. Both
1220 	caches must be locked.
1221 */
1222 status_t
1223 VMCache::Adopt(VMCache* source, off_t offset, off_t size, off_t newOffset)
1224 {
1225 	page_num_t startPage = offset >> PAGE_SHIFT;
1226 	page_num_t endPage = (offset + size + B_PAGE_SIZE - 1) >> PAGE_SHIFT;
1227 	off_t offsetChange = newOffset - offset;
1228 
1229 	VMCachePagesTree::Iterator it = source->pages.GetIterator(startPage, true,
1230 		true);
1231 	for (vm_page* page = it.Next();
1232 				page != NULL && page->cache_offset < endPage;
1233 				page = it.Next()) {
1234 		MovePage(page, (page->cache_offset << PAGE_SHIFT) + offsetChange);
1235 	}
1236 
1237 	return B_OK;
1238 }
1239 
1240 
1241 /*! Discards pages in the given range. */
1242 status_t
1243 VMCache::Discard(off_t offset, off_t size)
1244 {
1245 	page_num_t startPage = offset >> PAGE_SHIFT;
1246 	page_num_t endPage = (offset + size + B_PAGE_SIZE - 1) >> PAGE_SHIFT;
1247 	while (_FreePageRange(pages.GetIterator(startPage, true, true), &endPage))
1248 		;
1249 
1250 	return B_OK;
1251 }
1252 
1253 
1254 /*!	You have to call this function with the VMCache lock held. */
1255 status_t
1256 VMCache::FlushAndRemoveAllPages()
1257 {
1258 	ASSERT_LOCKED_MUTEX(&fLock);
1259 
1260 	while (page_count > 0) {
1261 		// write back modified pages
1262 		status_t status = vm_page_write_modified_pages(this);
1263 		if (status != B_OK)
1264 			return status;
1265 
1266 		// remove pages
1267 		for (VMCachePagesTree::Iterator it = pages.GetIterator();
1268 				vm_page* page = it.Next();) {
1269 			if (page->busy) {
1270 				// wait for page to become unbusy
1271 				WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, true);
1272 
1273 				// restart from the start of the list
1274 				it = pages.GetIterator();
1275 				continue;
1276 			}
1277 
1278 			// skip modified pages -- they will be written back in the next
1279 			// iteration
1280 			if (page->State() == PAGE_STATE_MODIFIED)
1281 				continue;
1282 
1283 			// We can't remove mapped pages.
1284 			if (page->IsMapped())
1285 				return B_BUSY;
1286 
1287 			DEBUG_PAGE_ACCESS_START(page);
1288 			RemovePage(page);
1289 			vm_page_free(this, page);
1290 				// Note: When iterating through a IteratableSplayTree
1291 				// removing the current node is safe.
1292 		}
1293 	}
1294 
1295 	return B_OK;
1296 }
1297 
1298 
1299 status_t
1300 VMCache::Commit(off_t size, int priority)
1301 {
1302 	committed_size = size;
1303 	return B_OK;
1304 }
1305 
1306 
1307 /*!	Returns whether the cache's underlying backing store could deliver the
1308 	page at the given offset.
1309 
1310 	Basically it returns whether a Read() at \a offset would at least read a
1311 	partial page (assuming that no unexpected errors occur or the situation
1312 	changes in the meantime).
1313 */
1314 bool
1315 VMCache::HasPage(off_t offset)
1316 {
1317 	// In accordance with Fault() the default implementation doesn't have a
1318 	// backing store and doesn't allow faults.
1319 	return false;
1320 }
1321 
1322 
1323 status_t
1324 VMCache::Read(off_t offset, const generic_io_vec *vecs, size_t count,
1325 	uint32 flags, generic_size_t *_numBytes)
1326 {
1327 	return B_ERROR;
1328 }
1329 
1330 
1331 status_t
1332 VMCache::Write(off_t offset, const generic_io_vec *vecs, size_t count,
1333 	uint32 flags, generic_size_t *_numBytes)
1334 {
1335 	return B_ERROR;
1336 }
1337 
1338 
1339 status_t
1340 VMCache::WriteAsync(off_t offset, const generic_io_vec* vecs, size_t count,
1341 	generic_size_t numBytes, uint32 flags, AsyncIOCallback* callback)
1342 {
1343 	// Not supported, fall back to the synchronous hook.
1344 	generic_size_t transferred = numBytes;
1345 	status_t error = Write(offset, vecs, count, flags, &transferred);
1346 
1347 	if (callback != NULL)
1348 		callback->IOFinished(error, transferred != numBytes, transferred);
1349 
1350 	return error;
1351 }
1352 
1353 
1354 /*!	\brief Returns whether the cache can write the page at the given offset.
1355 
1356 	The cache must be locked when this function is invoked.
1357 
1358 	@param offset The page offset.
1359 	@return \c true, if the page can be written, \c false otherwise.
1360 */
1361 bool
1362 VMCache::CanWritePage(off_t offset)
1363 {
1364 	return false;
1365 }
1366 
1367 
1368 status_t
1369 VMCache::Fault(struct VMAddressSpace *aspace, off_t offset)
1370 {
1371 	return B_BAD_ADDRESS;
1372 }
1373 
1374 
1375 void
1376 VMCache::Merge(VMCache* source)
1377 {
1378 	for (VMCachePagesTree::Iterator it = source->pages.GetIterator();
1379 			vm_page* page = it.Next();) {
1380 		// Note: Removing the current node while iterating through a
1381 		// IteratableSplayTree is safe.
1382 		vm_page* consumerPage = LookupPage(
1383 			(off_t)page->cache_offset << PAGE_SHIFT);
1384 		if (consumerPage == NULL) {
1385 			// the page is not yet in the consumer cache - move it upwards
1386 			MovePage(page);
1387 		}
1388 	}
1389 }
1390 
1391 
1392 status_t
1393 VMCache::AcquireUnreferencedStoreRef()
1394 {
1395 	return B_OK;
1396 }
1397 
1398 
1399 void
1400 VMCache::AcquireStoreRef()
1401 {
1402 }
1403 
1404 
1405 void
1406 VMCache::ReleaseStoreRef()
1407 {
1408 }
1409 
1410 
1411 /*!	Kernel debugger version of HasPage().
1412 	Does not do any locking.
1413 */
1414 bool
1415 VMCache::DebugHasPage(off_t offset)
1416 {
1417 	// default that works for all subclasses that don't lock anyway
1418 	return HasPage(offset);
1419 }
1420 
1421 
1422 /*!	Kernel debugger version of LookupPage().
1423 	Does not do any locking.
1424 */
1425 vm_page*
1426 VMCache::DebugLookupPage(off_t offset)
1427 {
1428 	return pages.Lookup((page_num_t)(offset >> PAGE_SHIFT));
1429 }
1430 
1431 
1432 void
1433 VMCache::Dump(bool showPages) const
1434 {
1435 	kprintf("CACHE %p:\n", this);
1436 	kprintf("  ref_count:    %" B_PRId32 "\n", RefCount());
1437 	kprintf("  source:       %p\n", source);
1438 	kprintf("  type:         %s\n", vm_cache_type_to_string(type));
1439 	kprintf("  virtual_base: 0x%" B_PRIx64 "\n", virtual_base);
1440 	kprintf("  virtual_end:  0x%" B_PRIx64 "\n", virtual_end);
1441 	kprintf("  temporary:    %" B_PRIu32 "\n", uint32(temporary));
1442 	kprintf("  lock:         %p\n", &fLock);
1443 #if KDEBUG
1444 	kprintf("  lock.holder:  %" B_PRId32 "\n", fLock.holder);
1445 #endif
1446 	kprintf("  areas:\n");
1447 
1448 	for (VMArea* area = areas; area != NULL; area = area->cache_next) {
1449 		kprintf("    area 0x%" B_PRIx32 ", %s\n", area->id, area->name);
1450 		kprintf("\tbase_addr:  0x%lx, size: 0x%lx\n", area->Base(),
1451 			area->Size());
1452 		kprintf("\tprotection: 0x%" B_PRIx32 "\n", area->protection);
1453 		kprintf("\towner:      0x%" B_PRIx32 "\n", area->address_space->ID());
1454 	}
1455 
1456 	kprintf("  consumers:\n");
1457 	for (ConsumerList::ConstIterator it = consumers.GetIterator();
1458 		 	VMCache* consumer = it.Next();) {
1459 		kprintf("\t%p\n", consumer);
1460 	}
1461 
1462 	kprintf("  pages:\n");
1463 	if (showPages) {
1464 		for (VMCachePagesTree::ConstIterator it = pages.GetIterator();
1465 				vm_page* page = it.Next();) {
1466 			if (!vm_page_is_dummy(page)) {
1467 				kprintf("\t%p ppn %#" B_PRIxPHYSADDR " offset %#" B_PRIxPHYSADDR
1468 					" state %u (%s) wired_count %u\n", page,
1469 					page->physical_page_number, page->cache_offset,
1470 					page->State(), page_state_to_string(page->State()),
1471 					page->WiredCount());
1472 			} else {
1473 				kprintf("\t%p DUMMY PAGE state %u (%s)\n",
1474 					page, page->State(), page_state_to_string(page->State()));
1475 			}
1476 		}
1477 	} else
1478 		kprintf("\t%" B_PRIu32 " in cache\n", page_count);
1479 }
1480 
1481 
1482 /*!	Wakes up threads waiting for page events.
1483 	\param page The page for which events occurred.
1484 	\param events The mask of events that occurred.
1485 */
1486 void
1487 VMCache::_NotifyPageEvents(vm_page* page, uint32 events)
1488 {
1489 	PageEventWaiter** it = &fPageEventWaiters;
1490 	while (PageEventWaiter* waiter = *it) {
1491 		if (waiter->page == page && (waiter->events & events) != 0) {
1492 			// remove from list and unblock
1493 			*it = waiter->next;
1494 			thread_unblock(waiter->thread, B_OK);
1495 		} else
1496 			it = &waiter->next;
1497 	}
1498 }
1499 
1500 
1501 /*!	Merges the given cache with its only consumer.
1502 	The caller must hold both the cache's and the consumer's lock. The method
1503 	does release neither lock.
1504 */
1505 void
1506 VMCache::_MergeWithOnlyConsumer()
1507 {
1508 	VMCache* consumer = consumers.RemoveHead();
1509 
1510 	TRACE(("merge vm cache %p (ref == %" B_PRId32 ") with vm cache %p\n",
1511 		this, this->fRefCount, consumer));
1512 
1513 	T(Merge(this, consumer));
1514 
1515 	// merge the cache
1516 	consumer->Merge(this);
1517 
1518 	// The remaining consumer has got a new source.
1519 	if (source != NULL) {
1520 		VMCache* newSource = source;
1521 
1522 		newSource->Lock();
1523 
1524 		newSource->consumers.Remove(this);
1525 		newSource->consumers.Add(consumer);
1526 		consumer->source = newSource;
1527 		source = NULL;
1528 
1529 		newSource->Unlock();
1530 	} else
1531 		consumer->source = NULL;
1532 
1533 	// Release the reference the cache's consumer owned. The consumer takes
1534 	// over the cache's ref to its source (if any) instead.
1535 	ReleaseRefLocked();
1536 }
1537 
1538 
1539 /*!	Removes the \a consumer from this cache.
1540 	It will also release the reference to the cache owned by the consumer.
1541 	Assumes you have the consumer's cache lock held. This cache must not be
1542 	locked.
1543 */
1544 void
1545 VMCache::_RemoveConsumer(VMCache* consumer)
1546 {
1547 	TRACE(("remove consumer vm cache %p from cache %p\n", consumer, this));
1548 	consumer->AssertLocked();
1549 
1550 	T(RemoveConsumer(this, consumer));
1551 
1552 	// Remove the store ref before locking the cache. Otherwise we'd call into
1553 	// the VFS while holding the cache lock, which would reverse the usual
1554 	// locking order.
1555 	ReleaseStoreRef();
1556 
1557 	// remove the consumer from the cache, but keep its reference until later
1558 	Lock();
1559 	consumers.Remove(consumer);
1560 	consumer->source = NULL;
1561 
1562 	ReleaseRefAndUnlock();
1563 }
1564 
1565 
1566 // #pragma mark - VMCacheFactory
1567 	// TODO: Move to own source file!
1568 
1569 
1570 /*static*/ status_t
1571 VMCacheFactory::CreateAnonymousCache(VMCache*& _cache, bool canOvercommit,
1572 	int32 numPrecommittedPages, int32 numGuardPages, bool swappable,
1573 	int priority)
1574 {
1575 	uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY
1576 		| HEAP_DONT_LOCK_KERNEL_SPACE;
1577 	if (priority >= VM_PRIORITY_VIP)
1578 		allocationFlags |= HEAP_PRIORITY_VIP;
1579 
1580 #if ENABLE_SWAP_SUPPORT
1581 	if (swappable) {
1582 		VMAnonymousCache* cache
1583 			= new(gAnonymousCacheObjectCache, allocationFlags) VMAnonymousCache;
1584 		if (cache == NULL)
1585 			return B_NO_MEMORY;
1586 
1587 		status_t error = cache->Init(canOvercommit, numPrecommittedPages,
1588 			numGuardPages, allocationFlags);
1589 		if (error != B_OK) {
1590 			cache->Delete();
1591 			return error;
1592 		}
1593 
1594 		T(Create(cache));
1595 
1596 		_cache = cache;
1597 		return B_OK;
1598 	}
1599 #endif
1600 
1601 	VMAnonymousNoSwapCache* cache
1602 		= new(gAnonymousNoSwapCacheObjectCache, allocationFlags)
1603 			VMAnonymousNoSwapCache;
1604 	if (cache == NULL)
1605 		return B_NO_MEMORY;
1606 
1607 	status_t error = cache->Init(canOvercommit, numPrecommittedPages,
1608 		numGuardPages, allocationFlags);
1609 	if (error != B_OK) {
1610 		cache->Delete();
1611 		return error;
1612 	}
1613 
1614 	T(Create(cache));
1615 
1616 	_cache = cache;
1617 	return B_OK;
1618 }
1619 
1620 
1621 /*static*/ status_t
1622 VMCacheFactory::CreateVnodeCache(VMCache*& _cache, struct vnode* vnode)
1623 {
1624 	const uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY
1625 		| HEAP_DONT_LOCK_KERNEL_SPACE;
1626 		// Note: Vnode cache creation is never VIP.
1627 
1628 	VMVnodeCache* cache
1629 		= new(gVnodeCacheObjectCache, allocationFlags) VMVnodeCache;
1630 	if (cache == NULL)
1631 		return B_NO_MEMORY;
1632 
1633 	status_t error = cache->Init(vnode, allocationFlags);
1634 	if (error != B_OK) {
1635 		cache->Delete();
1636 		return error;
1637 	}
1638 
1639 	T(Create(cache));
1640 
1641 	_cache = cache;
1642 	return B_OK;
1643 }
1644 
1645 
1646 /*static*/ status_t
1647 VMCacheFactory::CreateDeviceCache(VMCache*& _cache, addr_t baseAddress)
1648 {
1649 	const uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY
1650 		| HEAP_DONT_LOCK_KERNEL_SPACE;
1651 		// Note: Device cache creation is never VIP.
1652 
1653 	VMDeviceCache* cache
1654 		= new(gDeviceCacheObjectCache, allocationFlags) VMDeviceCache;
1655 	if (cache == NULL)
1656 		return B_NO_MEMORY;
1657 
1658 	status_t error = cache->Init(baseAddress, allocationFlags);
1659 	if (error != B_OK) {
1660 		cache->Delete();
1661 		return error;
1662 	}
1663 
1664 	T(Create(cache));
1665 
1666 	_cache = cache;
1667 	return B_OK;
1668 }
1669 
1670 
1671 /*static*/ status_t
1672 VMCacheFactory::CreateNullCache(int priority, VMCache*& _cache)
1673 {
1674 	uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY
1675 		| HEAP_DONT_LOCK_KERNEL_SPACE;
1676 	if (priority >= VM_PRIORITY_VIP)
1677 		allocationFlags |= HEAP_PRIORITY_VIP;
1678 
1679 	VMNullCache* cache
1680 		= new(gNullCacheObjectCache, allocationFlags) VMNullCache;
1681 	if (cache == NULL)
1682 		return B_NO_MEMORY;
1683 
1684 	status_t error = cache->Init(allocationFlags);
1685 	if (error != B_OK) {
1686 		cache->Delete();
1687 		return error;
1688 	}
1689 
1690 	T(Create(cache));
1691 
1692 	_cache = cache;
1693 	return B_OK;
1694 }
1695