xref: /haiku/src/system/kernel/vm/VMCache.cpp (revision 1a3518cf757c2da8006753f83962da5935bbc82b)
1 /*
2  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 #include <vm/VMCache.h>
12 
13 #include <stddef.h>
14 #include <stdlib.h>
15 
16 #include <algorithm>
17 
18 #include <arch/cpu.h>
19 #include <condition_variable.h>
20 #include <heap.h>
21 #include <int.h>
22 #include <kernel.h>
23 #include <slab/Slab.h>
24 #include <smp.h>
25 #include <tracing.h>
26 #include <util/AutoLock.h>
27 #include <vfs.h>
28 #include <vm/vm.h>
29 #include <vm/vm_page.h>
30 #include <vm/vm_priv.h>
31 #include <vm/vm_types.h>
32 #include <vm/VMAddressSpace.h>
33 #include <vm/VMArea.h>
34 
35 // needed for the factory only
36 #include "VMAnonymousCache.h"
37 #include "VMAnonymousNoSwapCache.h"
38 #include "VMDeviceCache.h"
39 #include "VMNullCache.h"
40 #include "../cache/vnode_store.h"
41 
42 
43 //#define TRACE_VM_CACHE
44 #ifdef TRACE_VM_CACHE
45 #	define TRACE(x) dprintf x
46 #else
47 #	define TRACE(x) ;
48 #endif
49 
50 
51 #if DEBUG_CACHE_LIST
52 VMCache* gDebugCacheList;
53 #endif
54 static mutex sCacheListLock = MUTEX_INITIALIZER("global VMCache list");
55 	// The lock is also needed when the debug feature is disabled.
56 
57 ObjectCache* gCacheRefObjectCache;
58 #if ENABLE_SWAP_SUPPORT
59 ObjectCache* gAnonymousCacheObjectCache;
60 #endif
61 ObjectCache* gAnonymousNoSwapCacheObjectCache;
62 ObjectCache* gVnodeCacheObjectCache;
63 ObjectCache* gDeviceCacheObjectCache;
64 ObjectCache* gNullCacheObjectCache;
65 
66 
67 struct VMCache::PageEventWaiter {
68 	Thread*				thread;
69 	PageEventWaiter*	next;
70 	vm_page*			page;
71 	uint32				events;
72 };
73 
74 
75 #if VM_CACHE_TRACING
76 
77 namespace VMCacheTracing {
78 
79 class VMCacheTraceEntry : public AbstractTraceEntry {
80 	public:
81 		VMCacheTraceEntry(VMCache* cache)
82 			:
83 			fCache(cache)
84 		{
85 #if VM_CACHE_TRACING_STACK_TRACE
86 			fStackTrace = capture_tracing_stack_trace(
87 				VM_CACHE_TRACING_STACK_TRACE, 0, true);
88 				// Don't capture userland stack trace to avoid potential
89 				// deadlocks.
90 #endif
91 		}
92 
93 #if VM_CACHE_TRACING_STACK_TRACE
94 		virtual void DumpStackTrace(TraceOutput& out)
95 		{
96 			out.PrintStackTrace(fStackTrace);
97 		}
98 #endif
99 
100 		VMCache* Cache() const
101 		{
102 			return fCache;
103 		}
104 
105 	protected:
106 		VMCache*	fCache;
107 #if VM_CACHE_TRACING_STACK_TRACE
108 		tracing_stack_trace* fStackTrace;
109 #endif
110 };
111 
112 
113 class Create : public VMCacheTraceEntry {
114 	public:
115 		Create(VMCache* cache)
116 			:
117 			VMCacheTraceEntry(cache)
118 		{
119 			Initialized();
120 		}
121 
122 		virtual void AddDump(TraceOutput& out)
123 		{
124 			out.Print("vm cache create: -> cache: %p", fCache);
125 		}
126 };
127 
128 
129 class Delete : public VMCacheTraceEntry {
130 	public:
131 		Delete(VMCache* cache)
132 			:
133 			VMCacheTraceEntry(cache)
134 		{
135 			Initialized();
136 		}
137 
138 		virtual void AddDump(TraceOutput& out)
139 		{
140 			out.Print("vm cache delete: cache: %p", fCache);
141 		}
142 };
143 
144 
145 class SetMinimalCommitment : public VMCacheTraceEntry {
146 	public:
147 		SetMinimalCommitment(VMCache* cache, off_t commitment)
148 			:
149 			VMCacheTraceEntry(cache),
150 			fOldCommitment(cache->committed_size),
151 			fCommitment(commitment)
152 		{
153 			Initialized();
154 		}
155 
156 		virtual void AddDump(TraceOutput& out)
157 		{
158 			out.Print("vm cache set min commitment: cache: %p, "
159 				"commitment: %" B_PRIdOFF " -> %" B_PRIdOFF, fCache,
160 				fOldCommitment, fCommitment);
161 		}
162 
163 	private:
164 		off_t	fOldCommitment;
165 		off_t	fCommitment;
166 };
167 
168 
169 class Resize : public VMCacheTraceEntry {
170 	public:
171 		Resize(VMCache* cache, off_t size)
172 			:
173 			VMCacheTraceEntry(cache),
174 			fOldSize(cache->virtual_end),
175 			fSize(size)
176 		{
177 			Initialized();
178 		}
179 
180 		virtual void AddDump(TraceOutput& out)
181 		{
182 			out.Print("vm cache resize: cache: %p, size: %" B_PRIdOFF " -> %"
183 				B_PRIdOFF, fCache, fOldSize, fSize);
184 		}
185 
186 	private:
187 		off_t	fOldSize;
188 		off_t	fSize;
189 };
190 
191 
192 class Rebase : public VMCacheTraceEntry {
193 	public:
194 		Rebase(VMCache* cache, off_t base)
195 			:
196 			VMCacheTraceEntry(cache),
197 			fOldBase(cache->virtual_base),
198 			fBase(base)
199 		{
200 			Initialized();
201 		}
202 
203 		virtual void AddDump(TraceOutput& out)
204 		{
205 			out.Print("vm cache rebase: cache: %p, base: %lld -> %lld", fCache,
206 				fOldBase, fBase);
207 		}
208 
209 	private:
210 		off_t	fOldBase;
211 		off_t	fBase;
212 };
213 
214 
215 class AddConsumer : public VMCacheTraceEntry {
216 	public:
217 		AddConsumer(VMCache* cache, VMCache* consumer)
218 			:
219 			VMCacheTraceEntry(cache),
220 			fConsumer(consumer)
221 		{
222 			Initialized();
223 		}
224 
225 		virtual void AddDump(TraceOutput& out)
226 		{
227 			out.Print("vm cache add consumer: cache: %p, consumer: %p", fCache,
228 				fConsumer);
229 		}
230 
231 		VMCache* Consumer() const
232 		{
233 			return fConsumer;
234 		}
235 
236 	private:
237 		VMCache*	fConsumer;
238 };
239 
240 
241 class RemoveConsumer : public VMCacheTraceEntry {
242 	public:
243 		RemoveConsumer(VMCache* cache, VMCache* consumer)
244 			:
245 			VMCacheTraceEntry(cache),
246 			fConsumer(consumer)
247 		{
248 			Initialized();
249 		}
250 
251 		virtual void AddDump(TraceOutput& out)
252 		{
253 			out.Print("vm cache remove consumer: cache: %p, consumer: %p",
254 				fCache, fConsumer);
255 		}
256 
257 	private:
258 		VMCache*	fConsumer;
259 };
260 
261 
262 class Merge : public VMCacheTraceEntry {
263 	public:
264 		Merge(VMCache* cache, VMCache* consumer)
265 			:
266 			VMCacheTraceEntry(cache),
267 			fConsumer(consumer)
268 		{
269 			Initialized();
270 		}
271 
272 		virtual void AddDump(TraceOutput& out)
273 		{
274 			out.Print("vm cache merge with consumer: cache: %p, consumer: %p",
275 				fCache, fConsumer);
276 		}
277 
278 	private:
279 		VMCache*	fConsumer;
280 };
281 
282 
283 class InsertArea : public VMCacheTraceEntry {
284 	public:
285 		InsertArea(VMCache* cache, VMArea* area)
286 			:
287 			VMCacheTraceEntry(cache),
288 			fArea(area)
289 		{
290 			Initialized();
291 		}
292 
293 		virtual void AddDump(TraceOutput& out)
294 		{
295 			out.Print("vm cache insert area: cache: %p, area: %p", fCache,
296 				fArea);
297 		}
298 
299 		VMArea*	Area() const
300 		{
301 			return fArea;
302 		}
303 
304 	private:
305 		VMArea*	fArea;
306 };
307 
308 
309 class RemoveArea : public VMCacheTraceEntry {
310 	public:
311 		RemoveArea(VMCache* cache, VMArea* area)
312 			:
313 			VMCacheTraceEntry(cache),
314 			fArea(area)
315 		{
316 			Initialized();
317 		}
318 
319 		virtual void AddDump(TraceOutput& out)
320 		{
321 			out.Print("vm cache remove area: cache: %p, area: %p", fCache,
322 				fArea);
323 		}
324 
325 	private:
326 		VMArea*	fArea;
327 };
328 
329 }	// namespace VMCacheTracing
330 
331 #	define T(x) new(std::nothrow) VMCacheTracing::x;
332 
333 #	if VM_CACHE_TRACING >= 2
334 
335 namespace VMCacheTracing {
336 
337 class InsertPage : public VMCacheTraceEntry {
338 	public:
339 		InsertPage(VMCache* cache, vm_page* page, off_t offset)
340 			:
341 			VMCacheTraceEntry(cache),
342 			fPage(page),
343 			fOffset(offset)
344 		{
345 			Initialized();
346 		}
347 
348 		virtual void AddDump(TraceOutput& out)
349 		{
350 			out.Print("vm cache insert page: cache: %p, page: %p, offset: %"
351 				B_PRIdOFF, fCache, fPage, fOffset);
352 		}
353 
354 	private:
355 		vm_page*	fPage;
356 		off_t		fOffset;
357 };
358 
359 
360 class RemovePage : public VMCacheTraceEntry {
361 	public:
362 		RemovePage(VMCache* cache, vm_page* page)
363 			:
364 			VMCacheTraceEntry(cache),
365 			fPage(page)
366 		{
367 			Initialized();
368 		}
369 
370 		virtual void AddDump(TraceOutput& out)
371 		{
372 			out.Print("vm cache remove page: cache: %p, page: %p", fCache,
373 				fPage);
374 		}
375 
376 	private:
377 		vm_page*	fPage;
378 };
379 
380 }	// namespace VMCacheTracing
381 
382 #		define T2(x) new(std::nothrow) VMCacheTracing::x;
383 #	else
384 #		define T2(x) ;
385 #	endif
386 #else
387 #	define T(x) ;
388 #	define T2(x) ;
389 #endif
390 
391 
392 //	#pragma mark - debugger commands
393 
394 
395 #if VM_CACHE_TRACING
396 
397 
398 static void*
399 cache_stack_find_area_cache(const TraceEntryIterator& baseIterator, void* area)
400 {
401 	using namespace VMCacheTracing;
402 
403 	// find the previous "insert area" entry for the given area
404 	TraceEntryIterator iterator = baseIterator;
405 	TraceEntry* entry = iterator.Current();
406 	while (entry != NULL) {
407 		if (InsertArea* insertAreaEntry = dynamic_cast<InsertArea*>(entry)) {
408 			if (insertAreaEntry->Area() == area)
409 				return insertAreaEntry->Cache();
410 		}
411 
412 		entry = iterator.Previous();
413 	}
414 
415 	return NULL;
416 }
417 
418 
419 static void*
420 cache_stack_find_consumer(const TraceEntryIterator& baseIterator, void* cache)
421 {
422 	using namespace VMCacheTracing;
423 
424 	// find the previous "add consumer" or "create" entry for the given cache
425 	TraceEntryIterator iterator = baseIterator;
426 	TraceEntry* entry = iterator.Current();
427 	while (entry != NULL) {
428 		if (Create* createEntry = dynamic_cast<Create*>(entry)) {
429 			if (createEntry->Cache() == cache)
430 				return NULL;
431 		} else if (AddConsumer* addEntry = dynamic_cast<AddConsumer*>(entry)) {
432 			if (addEntry->Consumer() == cache)
433 				return addEntry->Cache();
434 		}
435 
436 		entry = iterator.Previous();
437 	}
438 
439 	return NULL;
440 }
441 
442 
443 static int
444 command_cache_stack(int argc, char** argv)
445 {
446 	if (argc < 3 || argc > 4) {
447 		print_debugger_command_usage(argv[0]);
448 		return 0;
449 	}
450 
451 	bool isArea = false;
452 
453 	int argi = 1;
454 	if (argc == 4) {
455 		if (strcmp(argv[argi], "area") != 0) {
456 			print_debugger_command_usage(argv[0]);
457 			return 0;
458 		}
459 
460 		argi++;
461 		isArea = true;
462 	}
463 
464 	uint64 addressValue;
465 	uint64 debugEntryIndex;
466 	if (!evaluate_debug_expression(argv[argi++], &addressValue, false)
467 		|| !evaluate_debug_expression(argv[argi++], &debugEntryIndex, false)) {
468 		return 0;
469 	}
470 
471 	TraceEntryIterator baseIterator;
472 	if (baseIterator.MoveTo((int32)debugEntryIndex) == NULL) {
473 		kprintf("Invalid tracing entry index %" B_PRIu64 "\n", debugEntryIndex);
474 		return 0;
475 	}
476 
477 	void* address = (void*)(addr_t)addressValue;
478 
479 	kprintf("cache stack for %s %p at %" B_PRIu64 ":\n",
480 		isArea ? "area" : "cache", address, debugEntryIndex);
481 	if (isArea) {
482 		address = cache_stack_find_area_cache(baseIterator, address);
483 		if (address == NULL) {
484 			kprintf("  cache not found\n");
485 			return 0;
486 		}
487 	}
488 
489 	while (address != NULL) {
490 		kprintf("  %p\n", address);
491 		address = cache_stack_find_consumer(baseIterator, address);
492 	}
493 
494 	return 0;
495 }
496 
497 
498 #endif	// VM_CACHE_TRACING
499 
500 
501 //	#pragma mark -
502 
503 
504 status_t
505 vm_cache_init(kernel_args* args)
506 {
507 	// Create object caches for the structures we allocate here.
508 	gCacheRefObjectCache = create_object_cache("cache refs", sizeof(VMCacheRef),
509 		0, NULL, NULL, NULL);
510 #if ENABLE_SWAP_SUPPORT
511 	gAnonymousCacheObjectCache = create_object_cache("anon caches",
512 		sizeof(VMAnonymousCache), 0, NULL, NULL, NULL);
513 #endif
514 	gAnonymousNoSwapCacheObjectCache = create_object_cache(
515 		"anon no-swap caches", sizeof(VMAnonymousNoSwapCache), 0, NULL, NULL,
516 		NULL);
517 	gVnodeCacheObjectCache = create_object_cache("vnode caches",
518 		sizeof(VMVnodeCache), 0, NULL, NULL, NULL);
519 	gDeviceCacheObjectCache = create_object_cache("device caches",
520 		sizeof(VMDeviceCache), 0, NULL, NULL, NULL);
521 	gNullCacheObjectCache = create_object_cache("null caches",
522 		sizeof(VMNullCache), 0, NULL, NULL, NULL);
523 
524 	if (gCacheRefObjectCache == NULL
525 #if ENABLE_SWAP_SUPPORT
526 		|| gAnonymousCacheObjectCache == NULL
527 #endif
528 		|| gAnonymousNoSwapCacheObjectCache == NULL
529 		|| gVnodeCacheObjectCache == NULL
530 		|| gDeviceCacheObjectCache == NULL
531 		|| gNullCacheObjectCache == NULL) {
532 		panic("vm_cache_init(): Failed to create object caches!");
533 		return B_NO_MEMORY;
534 	}
535 
536 	return B_OK;
537 }
538 
539 
540 void
541 vm_cache_init_post_heap()
542 {
543 #if VM_CACHE_TRACING
544 	add_debugger_command_etc("cache_stack", &command_cache_stack,
545 		"List the ancestors (sources) of a VMCache at the time given by "
546 			"tracing entry index",
547 		"[ \"area\" ] <address> <tracing entry index>\n"
548 		"All ancestors (sources) of a given VMCache at the time given by the\n"
549 		"tracing entry index are listed. If \"area\" is given the supplied\n"
550 		"address is an area instead of a cache address. The listing will\n"
551 		"start with the area's cache at that point.\n",
552 		0);
553 #endif	// VM_CACHE_TRACING
554 }
555 
556 
557 VMCache*
558 vm_cache_acquire_locked_page_cache(vm_page* page, bool dontWait)
559 {
560 	mutex_lock(&sCacheListLock);
561 
562 	while (dontWait) {
563 		VMCacheRef* cacheRef = page->CacheRef();
564 		if (cacheRef == NULL) {
565 			mutex_unlock(&sCacheListLock);
566 			return NULL;
567 		}
568 
569 		VMCache* cache = cacheRef->cache;
570 		if (!cache->TryLock()) {
571 			mutex_unlock(&sCacheListLock);
572 			return NULL;
573 		}
574 
575 		if (cacheRef == page->CacheRef()) {
576 			mutex_unlock(&sCacheListLock);
577 			cache->AcquireRefLocked();
578 			return cache;
579 		}
580 
581 		// the cache changed in the meantime
582 		cache->Unlock();
583 	}
584 
585 	while (true) {
586 		VMCacheRef* cacheRef = page->CacheRef();
587 		if (cacheRef == NULL) {
588 			mutex_unlock(&sCacheListLock);
589 			return NULL;
590 		}
591 
592 		VMCache* cache = cacheRef->cache;
593 		if (!cache->SwitchLock(&sCacheListLock)) {
594 			// cache has been deleted
595 			mutex_lock(&sCacheListLock);
596 			continue;
597 		}
598 
599 		mutex_lock(&sCacheListLock);
600 		if (cache == page->Cache()) {
601 			mutex_unlock(&sCacheListLock);
602 			cache->AcquireRefLocked();
603 			return cache;
604 		}
605 
606 		// the cache changed in the meantime
607 		cache->Unlock();
608 	}
609 }
610 
611 
612 // #pragma mark - VMCache
613 
614 
615 VMCacheRef::VMCacheRef(VMCache* cache)
616 	:
617 	cache(cache),
618 	ref_count(1)
619 {
620 }
621 
622 
623 // #pragma mark - VMCache
624 
625 
626 bool
627 VMCache::_IsMergeable() const
628 {
629 	return areas == NULL && temporary && !consumers.IsEmpty()
630 		&& consumers.Head() == consumers.Tail();
631 }
632 
633 
634 VMCache::VMCache()
635 	:
636 	fCacheRef(NULL)
637 {
638 }
639 
640 
641 VMCache::~VMCache()
642 {
643 	object_cache_delete(gCacheRefObjectCache, fCacheRef);
644 }
645 
646 
647 status_t
648 VMCache::Init(uint32 cacheType, uint32 allocationFlags)
649 {
650 	mutex_init(&fLock, "VMCache");
651 
652 	areas = NULL;
653 	fRefCount = 1;
654 	source = NULL;
655 	virtual_base = 0;
656 	virtual_end = 0;
657 	committed_size = 0;
658 	temporary = 0;
659 	page_count = 0;
660 	fWiredPagesCount = 0;
661 	type = cacheType;
662 	fPageEventWaiters = NULL;
663 
664 #if DEBUG_CACHE_LIST
665 	debug_previous = NULL;
666 	debug_next = NULL;
667 		// initialize in case the following fails
668 #endif
669 
670 	fCacheRef = new(gCacheRefObjectCache, allocationFlags) VMCacheRef(this);
671 	if (fCacheRef == NULL)
672 		return B_NO_MEMORY;
673 
674 #if DEBUG_CACHE_LIST
675 	mutex_lock(&sCacheListLock);
676 
677 	if (gDebugCacheList != NULL)
678 		gDebugCacheList->debug_previous = this;
679 	debug_next = gDebugCacheList;
680 	gDebugCacheList = this;
681 
682 	mutex_unlock(&sCacheListLock);
683 #endif
684 
685 	return B_OK;
686 }
687 
688 
689 void
690 VMCache::Delete()
691 {
692 	if (areas != NULL)
693 		panic("cache %p to be deleted still has areas", this);
694 	if (!consumers.IsEmpty())
695 		panic("cache %p to be deleted still has consumers", this);
696 
697 	T(Delete(this));
698 
699 	// free all of the pages in the cache
700 	while (vm_page* page = pages.Root()) {
701 		if (!page->mappings.IsEmpty() || page->WiredCount() != 0) {
702 			panic("remove page %p from cache %p: page still has mappings!\n"
703 				"@!page %p; cache %p", page, this, page, this);
704 		}
705 
706 		// remove it
707 		pages.Remove(page);
708 		page->SetCacheRef(NULL);
709 
710 		TRACE(("vm_cache_release_ref: freeing page 0x%lx\n",
711 			page->physical_page_number));
712 		DEBUG_PAGE_ACCESS_START(page);
713 		vm_page_free(this, page);
714 	}
715 
716 	// remove the ref to the source
717 	if (source)
718 		source->_RemoveConsumer(this);
719 
720 	// We lock and unlock the sCacheListLock, even if the DEBUG_CACHE_LIST is
721 	// not enabled. This synchronization point is needed for
722 	// vm_cache_acquire_locked_page_cache().
723 	mutex_lock(&sCacheListLock);
724 
725 #if DEBUG_CACHE_LIST
726 	if (debug_previous)
727 		debug_previous->debug_next = debug_next;
728 	if (debug_next)
729 		debug_next->debug_previous = debug_previous;
730 	if (this == gDebugCacheList)
731 		gDebugCacheList = debug_next;
732 #endif
733 
734 	mutex_destroy(&fLock);
735 
736 	mutex_unlock(&sCacheListLock);
737 
738 	DeleteObject();
739 }
740 
741 
742 void
743 VMCache::Unlock(bool consumerLocked)
744 {
745 	while (fRefCount == 1 && _IsMergeable()) {
746 		VMCache* consumer = consumers.Head();
747 		if (consumerLocked) {
748 			_MergeWithOnlyConsumer();
749 		} else if (consumer->TryLock()) {
750 			_MergeWithOnlyConsumer();
751 			consumer->Unlock();
752 		} else {
753 			// Someone else has locked the consumer ATM. Unlock this cache and
754 			// wait for the consumer lock. Increment the cache's ref count
755 			// temporarily, so that no one else will try what we are doing or
756 			// delete the cache.
757 			fRefCount++;
758 			bool consumerLockedTemp = consumer->SwitchLock(&fLock);
759 			Lock();
760 			fRefCount--;
761 
762 			if (consumerLockedTemp) {
763 				if (fRefCount == 1 && _IsMergeable()
764 						&& consumer == consumers.Head()) {
765 					// nothing has changed in the meantime -- merge
766 					_MergeWithOnlyConsumer();
767 				}
768 
769 				consumer->Unlock();
770 			}
771 		}
772 	}
773 
774 	if (fRefCount == 0) {
775 		// delete this cache
776 		Delete();
777 	} else
778 		mutex_unlock(&fLock);
779 }
780 
781 
782 vm_page*
783 VMCache::LookupPage(off_t offset)
784 {
785 	AssertLocked();
786 
787 	vm_page* page = pages.Lookup((page_num_t)(offset >> PAGE_SHIFT));
788 
789 #if KDEBUG
790 	if (page != NULL && page->Cache() != this)
791 		panic("page %p not in cache %p\n", page, this);
792 #endif
793 
794 	return page;
795 }
796 
797 
798 void
799 VMCache::InsertPage(vm_page* page, off_t offset)
800 {
801 	TRACE(("VMCache::InsertPage(): cache %p, page %p, offset %" B_PRIdOFF "\n",
802 		this, page, offset));
803 	AssertLocked();
804 
805 	if (page->CacheRef() != NULL) {
806 		panic("insert page %p into cache %p: page cache is set to %p\n",
807 			page, this, page->Cache());
808 	}
809 
810 	T2(InsertPage(this, page, offset));
811 
812 	page->cache_offset = (page_num_t)(offset >> PAGE_SHIFT);
813 	page_count++;
814 	page->SetCacheRef(fCacheRef);
815 
816 #if KDEBUG
817 	vm_page* otherPage = pages.Lookup(page->cache_offset);
818 	if (otherPage != NULL) {
819 		panic("VMCache::InsertPage(): there's already page %p with cache "
820 			"offset %" B_PRIuPHYSADDR " in cache %p; inserting page %p",
821 			otherPage, page->cache_offset, this, page);
822 	}
823 #endif	// KDEBUG
824 
825 	pages.Insert(page);
826 
827 	if (page->WiredCount() > 0)
828 		IncrementWiredPagesCount();
829 }
830 
831 
832 /*!	Removes the vm_page from this cache. Of course, the page must
833 	really be in this cache or evil things will happen.
834 	The cache lock must be held.
835 */
836 void
837 VMCache::RemovePage(vm_page* page)
838 {
839 	TRACE(("VMCache::RemovePage(): cache %p, page %p\n", this, page));
840 	AssertLocked();
841 
842 	if (page->Cache() != this) {
843 		panic("remove page %p from cache %p: page cache is set to %p\n", page,
844 			this, page->Cache());
845 	}
846 
847 	T2(RemovePage(this, page));
848 
849 	pages.Remove(page);
850 	page_count--;
851 	page->SetCacheRef(NULL);
852 
853 	if (page->WiredCount() > 0)
854 		DecrementWiredPagesCount();
855 }
856 
857 
858 /*!	Moves the given page from its current cache inserts it into this cache
859 	at the given offset.
860 	Both caches must be locked.
861 */
862 void
863 VMCache::MovePage(vm_page* page, off_t offset)
864 {
865 	VMCache* oldCache = page->Cache();
866 
867 	AssertLocked();
868 	oldCache->AssertLocked();
869 
870 	// remove from old cache
871 	oldCache->pages.Remove(page);
872 	oldCache->page_count--;
873 	T2(RemovePage(oldCache, page));
874 
875 	// change the offset
876 	page->cache_offset = offset >> PAGE_SHIFT;
877 
878 	// insert here
879 	pages.Insert(page);
880 	page_count++;
881 	page->SetCacheRef(fCacheRef);
882 
883 	if (page->WiredCount() > 0) {
884 		IncrementWiredPagesCount();
885 		oldCache->DecrementWiredPagesCount();
886 	}
887 
888 	T2(InsertPage(this, page, page->cache_offset << PAGE_SHIFT));
889 }
890 
891 /*!	Moves the given page from its current cache inserts it into this cache.
892 	Both caches must be locked.
893 */
894 void
895 VMCache::MovePage(vm_page* page)
896 {
897 	MovePage(page, page->cache_offset << PAGE_SHIFT);
898 }
899 
900 
901 /*!	Moves all pages from the given cache to this one.
902 	Both caches must be locked. This cache must be empty.
903 */
904 void
905 VMCache::MoveAllPages(VMCache* fromCache)
906 {
907 	AssertLocked();
908 	fromCache->AssertLocked();
909 	ASSERT(page_count == 0);
910 
911 	std::swap(fromCache->pages, pages);
912 	page_count = fromCache->page_count;
913 	fromCache->page_count = 0;
914 	fWiredPagesCount = fromCache->fWiredPagesCount;
915 	fromCache->fWiredPagesCount = 0;
916 
917 	// swap the VMCacheRefs
918 	mutex_lock(&sCacheListLock);
919 	std::swap(fCacheRef, fromCache->fCacheRef);
920 	fCacheRef->cache = this;
921 	fromCache->fCacheRef->cache = fromCache;
922 	mutex_unlock(&sCacheListLock);
923 
924 #if VM_CACHE_TRACING >= 2
925 	for (VMCachePagesTree::Iterator it = pages.GetIterator();
926 			vm_page* page = it.Next();) {
927 		T2(RemovePage(fromCache, page));
928 		T2(InsertPage(this, page, page->cache_offset << PAGE_SHIFT));
929 	}
930 #endif
931 }
932 
933 
934 /*!	Waits until one or more events happened for a given page which belongs to
935 	this cache.
936 	The cache must be locked. It will be unlocked by the method. \a relock
937 	specifies whether the method shall re-lock the cache before returning.
938 	\param page The page for which to wait.
939 	\param events The mask of events the caller is interested in.
940 	\param relock If \c true, the cache will be locked when returning,
941 		otherwise it won't be locked.
942 */
943 void
944 VMCache::WaitForPageEvents(vm_page* page, uint32 events, bool relock)
945 {
946 	PageEventWaiter waiter;
947 	waiter.thread = thread_get_current_thread();
948 	waiter.next = fPageEventWaiters;
949 	waiter.page = page;
950 	waiter.events = events;
951 
952 	fPageEventWaiters = &waiter;
953 
954 	thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_OTHER,
955 		"cache page events");
956 
957 	Unlock();
958 	thread_block();
959 
960 	if (relock)
961 		Lock();
962 }
963 
964 
965 /*!	Makes this case the source of the \a consumer cache,
966 	and adds the \a consumer to its list.
967 	This also grabs a reference to the source cache.
968 	Assumes you have the cache and the consumer's lock held.
969 */
970 void
971 VMCache::AddConsumer(VMCache* consumer)
972 {
973 	TRACE(("add consumer vm cache %p to cache %p\n", consumer, this));
974 	AssertLocked();
975 	consumer->AssertLocked();
976 
977 	T(AddConsumer(this, consumer));
978 
979 	consumer->source = this;
980 	consumers.Add(consumer);
981 
982 	AcquireRefLocked();
983 	AcquireStoreRef();
984 }
985 
986 
987 /*!	Adds the \a area to this cache.
988 	Assumes you have the locked the cache.
989 */
990 status_t
991 VMCache::InsertAreaLocked(VMArea* area)
992 {
993 	TRACE(("VMCache::InsertAreaLocked(cache %p, area %p)\n", this, area));
994 	AssertLocked();
995 
996 	T(InsertArea(this, area));
997 
998 	area->cache_next = areas;
999 	if (area->cache_next)
1000 		area->cache_next->cache_prev = area;
1001 	area->cache_prev = NULL;
1002 	areas = area;
1003 
1004 	AcquireStoreRef();
1005 
1006 	return B_OK;
1007 }
1008 
1009 
1010 status_t
1011 VMCache::RemoveArea(VMArea* area)
1012 {
1013 	TRACE(("VMCache::RemoveArea(cache %p, area %p)\n", this, area));
1014 
1015 	T(RemoveArea(this, area));
1016 
1017 	// We release the store reference first, since otherwise we would reverse
1018 	// the locking order or even deadlock ourselves (... -> free_vnode() -> ...
1019 	// -> bfs_remove_vnode() -> ... -> file_cache_set_size() -> mutex_lock()).
1020 	// Also cf. _RemoveConsumer().
1021 	ReleaseStoreRef();
1022 
1023 	AutoLocker<VMCache> locker(this);
1024 
1025 	if (area->cache_prev)
1026 		area->cache_prev->cache_next = area->cache_next;
1027 	if (area->cache_next)
1028 		area->cache_next->cache_prev = area->cache_prev;
1029 	if (areas == area)
1030 		areas = area->cache_next;
1031 
1032 	return B_OK;
1033 }
1034 
1035 
1036 /*!	Transfers the areas from \a fromCache to this cache. This cache must not
1037 	have areas yet. Both caches must be locked.
1038 */
1039 void
1040 VMCache::TransferAreas(VMCache* fromCache)
1041 {
1042 	AssertLocked();
1043 	fromCache->AssertLocked();
1044 	ASSERT(areas == NULL);
1045 
1046 	areas = fromCache->areas;
1047 	fromCache->areas = NULL;
1048 
1049 	for (VMArea* area = areas; area != NULL; area = area->cache_next) {
1050 		area->cache = this;
1051 		AcquireRefLocked();
1052 		fromCache->ReleaseRefLocked();
1053 
1054 		T(RemoveArea(fromCache, area));
1055 		T(InsertArea(this, area));
1056 	}
1057 }
1058 
1059 
1060 uint32
1061 VMCache::CountWritableAreas(VMArea* ignoreArea) const
1062 {
1063 	uint32 count = 0;
1064 
1065 	for (VMArea* area = areas; area != NULL; area = area->cache_next) {
1066 		if (area != ignoreArea
1067 			&& (area->protection & (B_WRITE_AREA | B_KERNEL_WRITE_AREA)) != 0) {
1068 			count++;
1069 		}
1070 	}
1071 
1072 	return count;
1073 }
1074 
1075 
1076 status_t
1077 VMCache::WriteModified()
1078 {
1079 	TRACE(("VMCache::WriteModified(cache = %p)\n", this));
1080 
1081 	if (temporary)
1082 		return B_OK;
1083 
1084 	Lock();
1085 	status_t status = vm_page_write_modified_pages(this);
1086 	Unlock();
1087 
1088 	return status;
1089 }
1090 
1091 
1092 /*!	Commits the memory to the store if the \a commitment is larger than
1093 	what's committed already.
1094 	Assumes you have the cache's lock held.
1095 */
1096 status_t
1097 VMCache::SetMinimalCommitment(off_t commitment, int priority)
1098 {
1099 	TRACE(("VMCache::SetMinimalCommitment(cache %p, commitment %" B_PRIdOFF
1100 		")\n", this, commitment));
1101 	AssertLocked();
1102 
1103 	T(SetMinimalCommitment(this, commitment));
1104 
1105 	status_t status = B_OK;
1106 
1107 	// If we don't have enough committed space to cover through to the new end
1108 	// of the area...
1109 	if (committed_size < commitment) {
1110 		// ToDo: should we check if the cache's virtual size is large
1111 		//	enough for a commitment of that size?
1112 
1113 		// try to commit more memory
1114 		status = Commit(commitment, priority);
1115 	}
1116 
1117 	return status;
1118 }
1119 
1120 
1121 bool
1122 VMCache::_FreePageRange(VMCachePagesTree::Iterator it,
1123 	page_num_t* toPage = NULL)
1124 {
1125 	for (vm_page* page = it.Next();
1126 		page != NULL && (toPage == NULL || page->cache_offset < *toPage);
1127 		page = it.Next()) {
1128 
1129 		if (page->busy) {
1130 			if (page->busy_writing) {
1131 				// We cannot wait for the page to become available
1132 				// as we might cause a deadlock this way
1133 				page->busy_writing = false;
1134 					// this will notify the writer to free the page
1135 				continue;
1136 			}
1137 
1138 			// wait for page to become unbusy
1139 			WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, true);
1140 			return true;
1141 		}
1142 
1143 		// remove the page and put it into the free queue
1144 		DEBUG_PAGE_ACCESS_START(page);
1145 		vm_remove_all_page_mappings(page);
1146 		ASSERT(page->WiredCount() == 0);
1147 			// TODO: Find a real solution! If the page is wired
1148 			// temporarily (e.g. by lock_memory()), we actually must not
1149 			// unmap it!
1150 		RemovePage(page);
1151 			// Note: When iterating through a IteratableSplayTree
1152 			// removing the current node is safe.
1153 
1154 		vm_page_free(this, page);
1155 	}
1156 
1157 	return false;
1158 }
1159 
1160 
1161 /*!	This function updates the size field of the cache.
1162 	If needed, it will free up all pages that don't belong to the cache anymore.
1163 	The cache lock must be held when you call it.
1164 	Since removed pages don't belong to the cache any longer, they are not
1165 	written back before they will be removed.
1166 
1167 	Note, this function may temporarily release the cache lock in case it
1168 	has to wait for busy pages.
1169 */
1170 status_t
1171 VMCache::Resize(off_t newSize, int priority)
1172 {
1173 	TRACE(("VMCache::Resize(cache %p, newSize %" B_PRIdOFF ") old size %"
1174 		B_PRIdOFF "\n", this, newSize, this->virtual_end));
1175 	this->AssertLocked();
1176 
1177 	T(Resize(this, newSize));
1178 
1179 	status_t status = Commit(newSize - virtual_base, priority);
1180 	if (status != B_OK)
1181 		return status;
1182 
1183 	page_num_t oldPageCount = (page_num_t)((virtual_end + B_PAGE_SIZE - 1)
1184 		>> PAGE_SHIFT);
1185 	page_num_t newPageCount = (page_num_t)((newSize + B_PAGE_SIZE - 1)
1186 		>> PAGE_SHIFT);
1187 
1188 	if (newPageCount < oldPageCount) {
1189 		// we need to remove all pages in the cache outside of the new virtual
1190 		// size
1191 		while (_FreePageRange(pages.GetIterator(newPageCount, true, true)))
1192 			;
1193 	}
1194 
1195 	virtual_end = newSize;
1196 	return B_OK;
1197 }
1198 
1199 /*!	This function updates the virtual_base field of the cache.
1200 	If needed, it will free up all pages that don't belong to the cache anymore.
1201 	The cache lock must be held when you call it.
1202 	Since removed pages don't belong to the cache any longer, they are not
1203 	written back before they will be removed.
1204 
1205 	Note, this function may temporarily release the cache lock in case it
1206 	has to wait for busy pages.
1207 */
1208 status_t
1209 VMCache::Rebase(off_t newBase, int priority)
1210 {
1211 	TRACE(("VMCache::Rebase(cache %p, newBase %Ld) old base %Ld\n",
1212 		this, newBase, this->virtual_base));
1213 	this->AssertLocked();
1214 
1215 	T(Rebase(this, newBase));
1216 
1217 	status_t status = Commit(virtual_end - newBase, priority);
1218 	if (status != B_OK)
1219 		return status;
1220 
1221 	page_num_t basePage = (page_num_t)(newBase >> PAGE_SHIFT);
1222 
1223 	if (newBase > virtual_base) {
1224 		// we need to remove all pages in the cache outside of the new virtual
1225 		// base
1226 		while (_FreePageRange(pages.GetIterator(), &basePage))
1227 			;
1228 	}
1229 
1230 	virtual_base = newBase;
1231 	return B_OK;
1232 }
1233 
1234 
1235 /*!	Moves pages in the given range from the source cache into this cache. Both
1236 	caches must be locked.
1237 */
1238 status_t
1239 VMCache::Adopt(VMCache* source, off_t offset, off_t size, off_t newOffset)
1240 {
1241 	page_num_t startPage = offset >> PAGE_SHIFT;
1242 	page_num_t endPage = (offset + size + B_PAGE_SIZE - 1) >> PAGE_SHIFT;
1243 	off_t offsetChange = newOffset - offset;
1244 
1245 	VMCachePagesTree::Iterator it = source->pages.GetIterator(startPage, true,
1246 		true);
1247 	for (vm_page* page = it.Next();
1248 				page != NULL && page->cache_offset < endPage;
1249 				page = it.Next()) {
1250 		MovePage(page, (page->cache_offset << PAGE_SHIFT) + offsetChange);
1251 	}
1252 
1253 	return B_OK;
1254 }
1255 
1256 
1257 /*!	You have to call this function with the VMCache lock held. */
1258 status_t
1259 VMCache::FlushAndRemoveAllPages()
1260 {
1261 	ASSERT_LOCKED_MUTEX(&fLock);
1262 
1263 	while (page_count > 0) {
1264 		// write back modified pages
1265 		status_t status = vm_page_write_modified_pages(this);
1266 		if (status != B_OK)
1267 			return status;
1268 
1269 		// remove pages
1270 		for (VMCachePagesTree::Iterator it = pages.GetIterator();
1271 				vm_page* page = it.Next();) {
1272 			if (page->busy) {
1273 				// wait for page to become unbusy
1274 				WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, true);
1275 
1276 				// restart from the start of the list
1277 				it = pages.GetIterator();
1278 				continue;
1279 			}
1280 
1281 			// skip modified pages -- they will be written back in the next
1282 			// iteration
1283 			if (page->State() == PAGE_STATE_MODIFIED)
1284 				continue;
1285 
1286 			// We can't remove mapped pages.
1287 			if (page->IsMapped())
1288 				return B_BUSY;
1289 
1290 			DEBUG_PAGE_ACCESS_START(page);
1291 			RemovePage(page);
1292 			vm_page_free(this, page);
1293 				// Note: When iterating through a IteratableSplayTree
1294 				// removing the current node is safe.
1295 		}
1296 	}
1297 
1298 	return B_OK;
1299 }
1300 
1301 
1302 status_t
1303 VMCache::Commit(off_t size, int priority)
1304 {
1305 	committed_size = size;
1306 	return B_OK;
1307 }
1308 
1309 
1310 /*!	Returns whether the cache's underlying backing store could deliver the
1311 	page at the given offset.
1312 
1313 	Basically it returns whether a Read() at \a offset would at least read a
1314 	partial page (assuming that no unexpected errors occur or the situation
1315 	changes in the meantime).
1316 */
1317 bool
1318 VMCache::HasPage(off_t offset)
1319 {
1320 	// In accordance with Fault() the default implementation doesn't have a
1321 	// backing store and doesn't allow faults.
1322 	return false;
1323 }
1324 
1325 
1326 status_t
1327 VMCache::Read(off_t offset, const generic_io_vec *vecs, size_t count,
1328 	uint32 flags, generic_size_t *_numBytes)
1329 {
1330 	return B_ERROR;
1331 }
1332 
1333 
1334 status_t
1335 VMCache::Write(off_t offset, const generic_io_vec *vecs, size_t count,
1336 	uint32 flags, generic_size_t *_numBytes)
1337 {
1338 	return B_ERROR;
1339 }
1340 
1341 
1342 status_t
1343 VMCache::WriteAsync(off_t offset, const generic_io_vec* vecs, size_t count,
1344 	generic_size_t numBytes, uint32 flags, AsyncIOCallback* callback)
1345 {
1346 	// Not supported, fall back to the synchronous hook.
1347 	generic_size_t transferred = numBytes;
1348 	status_t error = Write(offset, vecs, count, flags, &transferred);
1349 
1350 	if (callback != NULL)
1351 		callback->IOFinished(error, transferred != numBytes, transferred);
1352 
1353 	return error;
1354 }
1355 
1356 
1357 /*!	\brief Returns whether the cache can write the page at the given offset.
1358 
1359 	The cache must be locked when this function is invoked.
1360 
1361 	@param offset The page offset.
1362 	@return \c true, if the page can be written, \c false otherwise.
1363 */
1364 bool
1365 VMCache::CanWritePage(off_t offset)
1366 {
1367 	return false;
1368 }
1369 
1370 
1371 status_t
1372 VMCache::Fault(struct VMAddressSpace *aspace, off_t offset)
1373 {
1374 	return B_BAD_ADDRESS;
1375 }
1376 
1377 
1378 void
1379 VMCache::Merge(VMCache* source)
1380 {
1381 	for (VMCachePagesTree::Iterator it = source->pages.GetIterator();
1382 			vm_page* page = it.Next();) {
1383 		// Note: Removing the current node while iterating through a
1384 		// IteratableSplayTree is safe.
1385 		vm_page* consumerPage = LookupPage(
1386 			(off_t)page->cache_offset << PAGE_SHIFT);
1387 		if (consumerPage == NULL) {
1388 			// the page is not yet in the consumer cache - move it upwards
1389 			MovePage(page);
1390 		}
1391 	}
1392 }
1393 
1394 
1395 status_t
1396 VMCache::AcquireUnreferencedStoreRef()
1397 {
1398 	return B_OK;
1399 }
1400 
1401 
1402 void
1403 VMCache::AcquireStoreRef()
1404 {
1405 }
1406 
1407 
1408 void
1409 VMCache::ReleaseStoreRef()
1410 {
1411 }
1412 
1413 
1414 /*!	Kernel debugger version of HasPage().
1415 	Does not do any locking.
1416 */
1417 bool
1418 VMCache::DebugHasPage(off_t offset)
1419 {
1420 	// default that works for all subclasses that don't lock anyway
1421 	return HasPage(offset);
1422 }
1423 
1424 
1425 /*!	Kernel debugger version of LookupPage().
1426 	Does not do any locking.
1427 */
1428 vm_page*
1429 VMCache::DebugLookupPage(off_t offset)
1430 {
1431 	return pages.Lookup((page_num_t)(offset >> PAGE_SHIFT));
1432 }
1433 
1434 
1435 void
1436 VMCache::Dump(bool showPages) const
1437 {
1438 	kprintf("CACHE %p:\n", this);
1439 	kprintf("  ref_count:    %" B_PRId32 "\n", RefCount());
1440 	kprintf("  source:       %p\n", source);
1441 	kprintf("  type:         %s\n", vm_cache_type_to_string(type));
1442 	kprintf("  virtual_base: 0x%" B_PRIx64 "\n", virtual_base);
1443 	kprintf("  virtual_end:  0x%" B_PRIx64 "\n", virtual_end);
1444 	kprintf("  temporary:    %" B_PRIu32 "\n", temporary);
1445 	kprintf("  lock:         %p\n", &fLock);
1446 #if KDEBUG
1447 	kprintf("  lock.holder:  %" B_PRId32 "\n", fLock.holder);
1448 #endif
1449 	kprintf("  areas:\n");
1450 
1451 	for (VMArea* area = areas; area != NULL; area = area->cache_next) {
1452 		kprintf("    area 0x%" B_PRIx32 ", %s\n", area->id, area->name);
1453 		kprintf("\tbase_addr:  0x%lx, size: 0x%lx\n", area->Base(),
1454 			area->Size());
1455 		kprintf("\tprotection: 0x%" B_PRIx32 "\n", area->protection);
1456 		kprintf("\towner:      0x%" B_PRIx32 "\n", area->address_space->ID());
1457 	}
1458 
1459 	kprintf("  consumers:\n");
1460 	for (ConsumerList::ConstIterator it = consumers.GetIterator();
1461 		 	VMCache* consumer = it.Next();) {
1462 		kprintf("\t%p\n", consumer);
1463 	}
1464 
1465 	kprintf("  pages:\n");
1466 	if (showPages) {
1467 		for (VMCachePagesTree::ConstIterator it = pages.GetIterator();
1468 				vm_page* page = it.Next();) {
1469 			if (!vm_page_is_dummy(page)) {
1470 				kprintf("\t%p ppn %#" B_PRIxPHYSADDR " offset %#" B_PRIxPHYSADDR
1471 					" state %u (%s) wired_count %u\n", page,
1472 					page->physical_page_number, page->cache_offset,
1473 					page->State(), page_state_to_string(page->State()),
1474 					page->WiredCount());
1475 			} else {
1476 				kprintf("\t%p DUMMY PAGE state %u (%s)\n",
1477 					page, page->State(), page_state_to_string(page->State()));
1478 			}
1479 		}
1480 	} else
1481 		kprintf("\t%" B_PRIu32 " in cache\n", page_count);
1482 }
1483 
1484 
1485 /*!	Wakes up threads waiting for page events.
1486 	\param page The page for which events occurred.
1487 	\param events The mask of events that occurred.
1488 */
1489 void
1490 VMCache::_NotifyPageEvents(vm_page* page, uint32 events)
1491 {
1492 	PageEventWaiter** it = &fPageEventWaiters;
1493 	while (PageEventWaiter* waiter = *it) {
1494 		if (waiter->page == page && (waiter->events & events) != 0) {
1495 			// remove from list and unblock
1496 			*it = waiter->next;
1497 			thread_unblock(waiter->thread, B_OK);
1498 		} else
1499 			it = &waiter->next;
1500 	}
1501 }
1502 
1503 
1504 /*!	Merges the given cache with its only consumer.
1505 	The caller must hold both the cache's and the consumer's lock. The method
1506 	does release neither lock.
1507 */
1508 void
1509 VMCache::_MergeWithOnlyConsumer()
1510 {
1511 	VMCache* consumer = consumers.RemoveHead();
1512 
1513 	TRACE(("merge vm cache %p (ref == %" B_PRId32 ") with vm cache %p\n",
1514 		this, this->fRefCount, consumer));
1515 
1516 	T(Merge(this, consumer));
1517 
1518 	// merge the cache
1519 	consumer->Merge(this);
1520 
1521 	// The remaining consumer has got a new source.
1522 	if (source != NULL) {
1523 		VMCache* newSource = source;
1524 
1525 		newSource->Lock();
1526 
1527 		newSource->consumers.Remove(this);
1528 		newSource->consumers.Add(consumer);
1529 		consumer->source = newSource;
1530 		source = NULL;
1531 
1532 		newSource->Unlock();
1533 	} else
1534 		consumer->source = NULL;
1535 
1536 	// Release the reference the cache's consumer owned. The consumer takes
1537 	// over the cache's ref to its source (if any) instead.
1538 	ReleaseRefLocked();
1539 }
1540 
1541 
1542 /*!	Removes the \a consumer from this cache.
1543 	It will also release the reference to the cache owned by the consumer.
1544 	Assumes you have the consumer's cache lock held. This cache must not be
1545 	locked.
1546 */
1547 void
1548 VMCache::_RemoveConsumer(VMCache* consumer)
1549 {
1550 	TRACE(("remove consumer vm cache %p from cache %p\n", consumer, this));
1551 	consumer->AssertLocked();
1552 
1553 	T(RemoveConsumer(this, consumer));
1554 
1555 	// Remove the store ref before locking the cache. Otherwise we'd call into
1556 	// the VFS while holding the cache lock, which would reverse the usual
1557 	// locking order.
1558 	ReleaseStoreRef();
1559 
1560 	// remove the consumer from the cache, but keep its reference until later
1561 	Lock();
1562 	consumers.Remove(consumer);
1563 	consumer->source = NULL;
1564 
1565 	ReleaseRefAndUnlock();
1566 }
1567 
1568 
1569 // #pragma mark - VMCacheFactory
1570 	// TODO: Move to own source file!
1571 
1572 
1573 /*static*/ status_t
1574 VMCacheFactory::CreateAnonymousCache(VMCache*& _cache, bool canOvercommit,
1575 	int32 numPrecommittedPages, int32 numGuardPages, bool swappable,
1576 	int priority)
1577 {
1578 	uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY
1579 		| HEAP_DONT_LOCK_KERNEL_SPACE;
1580 	if (priority >= VM_PRIORITY_VIP)
1581 		allocationFlags |= HEAP_PRIORITY_VIP;
1582 
1583 #if ENABLE_SWAP_SUPPORT
1584 	if (swappable) {
1585 		VMAnonymousCache* cache
1586 			= new(gAnonymousCacheObjectCache, allocationFlags) VMAnonymousCache;
1587 		if (cache == NULL)
1588 			return B_NO_MEMORY;
1589 
1590 		status_t error = cache->Init(canOvercommit, numPrecommittedPages,
1591 			numGuardPages, allocationFlags);
1592 		if (error != B_OK) {
1593 			cache->Delete();
1594 			return error;
1595 		}
1596 
1597 		T(Create(cache));
1598 
1599 		_cache = cache;
1600 		return B_OK;
1601 	}
1602 #endif
1603 
1604 	VMAnonymousNoSwapCache* cache
1605 		= new(gAnonymousNoSwapCacheObjectCache, allocationFlags)
1606 			VMAnonymousNoSwapCache;
1607 	if (cache == NULL)
1608 		return B_NO_MEMORY;
1609 
1610 	status_t error = cache->Init(canOvercommit, numPrecommittedPages,
1611 		numGuardPages, allocationFlags);
1612 	if (error != B_OK) {
1613 		cache->Delete();
1614 		return error;
1615 	}
1616 
1617 	T(Create(cache));
1618 
1619 	_cache = cache;
1620 	return B_OK;
1621 }
1622 
1623 
1624 /*static*/ status_t
1625 VMCacheFactory::CreateVnodeCache(VMCache*& _cache, struct vnode* vnode)
1626 {
1627 	const uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY
1628 		| HEAP_DONT_LOCK_KERNEL_SPACE;
1629 		// Note: Vnode cache creation is never VIP.
1630 
1631 	VMVnodeCache* cache
1632 		= new(gVnodeCacheObjectCache, allocationFlags) VMVnodeCache;
1633 	if (cache == NULL)
1634 		return B_NO_MEMORY;
1635 
1636 	status_t error = cache->Init(vnode, allocationFlags);
1637 	if (error != B_OK) {
1638 		cache->Delete();
1639 		return error;
1640 	}
1641 
1642 	T(Create(cache));
1643 
1644 	_cache = cache;
1645 	return B_OK;
1646 }
1647 
1648 
1649 /*static*/ status_t
1650 VMCacheFactory::CreateDeviceCache(VMCache*& _cache, addr_t baseAddress)
1651 {
1652 	const uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY
1653 		| HEAP_DONT_LOCK_KERNEL_SPACE;
1654 		// Note: Device cache creation is never VIP.
1655 
1656 	VMDeviceCache* cache
1657 		= new(gDeviceCacheObjectCache, allocationFlags) VMDeviceCache;
1658 	if (cache == NULL)
1659 		return B_NO_MEMORY;
1660 
1661 	status_t error = cache->Init(baseAddress, allocationFlags);
1662 	if (error != B_OK) {
1663 		cache->Delete();
1664 		return error;
1665 	}
1666 
1667 	T(Create(cache));
1668 
1669 	_cache = cache;
1670 	return B_OK;
1671 }
1672 
1673 
1674 /*static*/ status_t
1675 VMCacheFactory::CreateNullCache(int priority, VMCache*& _cache)
1676 {
1677 	uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY
1678 		| HEAP_DONT_LOCK_KERNEL_SPACE;
1679 	if (priority >= VM_PRIORITY_VIP)
1680 		allocationFlags |= HEAP_PRIORITY_VIP;
1681 
1682 	VMNullCache* cache
1683 		= new(gNullCacheObjectCache, allocationFlags) VMNullCache;
1684 	if (cache == NULL)
1685 		return B_NO_MEMORY;
1686 
1687 	status_t error = cache->Init(allocationFlags);
1688 	if (error != B_OK) {
1689 		cache->Delete();
1690 		return error;
1691 	}
1692 
1693 	T(Create(cache));
1694 
1695 	_cache = cache;
1696 	return B_OK;
1697 }
1698