xref: /haiku/src/system/kernel/vm/VMCache.cpp (revision 4bd6250035acae76540b58c380555236a8dfd4e0)
1 /*
2  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 #include <vm/VMCache.h>
12 
13 #include <stddef.h>
14 #include <stdlib.h>
15 
16 #include <algorithm>
17 
18 #include <arch/cpu.h>
19 #include <condition_variable.h>
20 #include <heap.h>
21 #include <int.h>
22 #include <kernel.h>
23 #include <slab/Slab.h>
24 #include <smp.h>
25 #include <tracing.h>
26 #include <util/AutoLock.h>
27 #include <vfs.h>
28 #include <vm/vm.h>
29 #include <vm/vm_page.h>
30 #include <vm/vm_priv.h>
31 #include <vm/vm_types.h>
32 #include <vm/VMAddressSpace.h>
33 #include <vm/VMArea.h>
34 
35 // needed for the factory only
36 #include "VMAnonymousCache.h"
37 #include "VMAnonymousNoSwapCache.h"
38 #include "VMDeviceCache.h"
39 #include "VMNullCache.h"
40 #include "../cache/vnode_store.h"
41 
42 
43 //#define TRACE_VM_CACHE
44 #ifdef TRACE_VM_CACHE
45 #	define TRACE(x) dprintf x
46 #else
47 #	define TRACE(x) ;
48 #endif
49 
50 
51 #if DEBUG_CACHE_LIST
52 VMCache* gDebugCacheList;
53 #endif
54 static mutex sCacheListLock = MUTEX_INITIALIZER("global VMCache list");
55 	// The lock is also needed when the debug feature is disabled.
56 
57 ObjectCache* gCacheRefObjectCache;
58 ObjectCache* gAnonymousCacheObjectCache;
59 ObjectCache* gAnonymousNoSwapCacheObjectCache;
60 ObjectCache* gVnodeCacheObjectCache;
61 ObjectCache* gDeviceCacheObjectCache;
62 ObjectCache* gNullCacheObjectCache;
63 
64 
65 struct VMCache::PageEventWaiter {
66 	Thread*				thread;
67 	PageEventWaiter*	next;
68 	vm_page*			page;
69 	uint32				events;
70 };
71 
72 
73 #if VM_CACHE_TRACING
74 
75 namespace VMCacheTracing {
76 
77 class VMCacheTraceEntry : public AbstractTraceEntry {
78 	public:
79 		VMCacheTraceEntry(VMCache* cache)
80 			:
81 			fCache(cache)
82 		{
83 #if VM_CACHE_TRACING_STACK_TRACE
84 			fStackTrace = capture_tracing_stack_trace(
85 				VM_CACHE_TRACING_STACK_TRACE, 0, true);
86 				// Don't capture userland stack trace to avoid potential
87 				// deadlocks.
88 #endif
89 		}
90 
91 #if VM_CACHE_TRACING_STACK_TRACE
92 		virtual void DumpStackTrace(TraceOutput& out)
93 		{
94 			out.PrintStackTrace(fStackTrace);
95 		}
96 #endif
97 
98 		VMCache* Cache() const
99 		{
100 			return fCache;
101 		}
102 
103 	protected:
104 		VMCache*	fCache;
105 #if VM_CACHE_TRACING_STACK_TRACE
106 		tracing_stack_trace* fStackTrace;
107 #endif
108 };
109 
110 
111 class Create : public VMCacheTraceEntry {
112 	public:
113 		Create(VMCache* cache)
114 			:
115 			VMCacheTraceEntry(cache)
116 		{
117 			Initialized();
118 		}
119 
120 		virtual void AddDump(TraceOutput& out)
121 		{
122 			out.Print("vm cache create: -> cache: %p", fCache);
123 		}
124 };
125 
126 
127 class Delete : public VMCacheTraceEntry {
128 	public:
129 		Delete(VMCache* cache)
130 			:
131 			VMCacheTraceEntry(cache)
132 		{
133 			Initialized();
134 		}
135 
136 		virtual void AddDump(TraceOutput& out)
137 		{
138 			out.Print("vm cache delete: cache: %p", fCache);
139 		}
140 };
141 
142 
143 class SetMinimalCommitment : public VMCacheTraceEntry {
144 	public:
145 		SetMinimalCommitment(VMCache* cache, off_t commitment)
146 			:
147 			VMCacheTraceEntry(cache),
148 			fOldCommitment(cache->committed_size),
149 			fCommitment(commitment)
150 		{
151 			Initialized();
152 		}
153 
154 		virtual void AddDump(TraceOutput& out)
155 		{
156 			out.Print("vm cache set min commitment: cache: %p, "
157 				"commitment: %" B_PRIdOFF " -> %" B_PRIdOFF, fCache,
158 				fOldCommitment, fCommitment);
159 		}
160 
161 	private:
162 		off_t	fOldCommitment;
163 		off_t	fCommitment;
164 };
165 
166 
167 class Resize : public VMCacheTraceEntry {
168 	public:
169 		Resize(VMCache* cache, off_t size)
170 			:
171 			VMCacheTraceEntry(cache),
172 			fOldSize(cache->virtual_end),
173 			fSize(size)
174 		{
175 			Initialized();
176 		}
177 
178 		virtual void AddDump(TraceOutput& out)
179 		{
180 			out.Print("vm cache resize: cache: %p, size: %" B_PRIdOFF " -> %"
181 				B_PRIdOFF, fCache, fOldSize, fSize);
182 		}
183 
184 	private:
185 		off_t	fOldSize;
186 		off_t	fSize;
187 };
188 
189 
190 class Rebase : public VMCacheTraceEntry {
191 	public:
192 		Rebase(VMCache* cache, off_t base)
193 			:
194 			VMCacheTraceEntry(cache),
195 			fOldBase(cache->virtual_base),
196 			fBase(base)
197 		{
198 			Initialized();
199 		}
200 
201 		virtual void AddDump(TraceOutput& out)
202 		{
203 			out.Print("vm cache rebase: cache: %p, base: %lld -> %lld", fCache,
204 				fOldBase, fBase);
205 		}
206 
207 	private:
208 		off_t	fOldBase;
209 		off_t	fBase;
210 };
211 
212 
213 class AddConsumer : public VMCacheTraceEntry {
214 	public:
215 		AddConsumer(VMCache* cache, VMCache* consumer)
216 			:
217 			VMCacheTraceEntry(cache),
218 			fConsumer(consumer)
219 		{
220 			Initialized();
221 		}
222 
223 		virtual void AddDump(TraceOutput& out)
224 		{
225 			out.Print("vm cache add consumer: cache: %p, consumer: %p", fCache,
226 				fConsumer);
227 		}
228 
229 		VMCache* Consumer() const
230 		{
231 			return fConsumer;
232 		}
233 
234 	private:
235 		VMCache*	fConsumer;
236 };
237 
238 
239 class RemoveConsumer : public VMCacheTraceEntry {
240 	public:
241 		RemoveConsumer(VMCache* cache, VMCache* consumer)
242 			:
243 			VMCacheTraceEntry(cache),
244 			fConsumer(consumer)
245 		{
246 			Initialized();
247 		}
248 
249 		virtual void AddDump(TraceOutput& out)
250 		{
251 			out.Print("vm cache remove consumer: cache: %p, consumer: %p",
252 				fCache, fConsumer);
253 		}
254 
255 	private:
256 		VMCache*	fConsumer;
257 };
258 
259 
260 class Merge : public VMCacheTraceEntry {
261 	public:
262 		Merge(VMCache* cache, VMCache* consumer)
263 			:
264 			VMCacheTraceEntry(cache),
265 			fConsumer(consumer)
266 		{
267 			Initialized();
268 		}
269 
270 		virtual void AddDump(TraceOutput& out)
271 		{
272 			out.Print("vm cache merge with consumer: cache: %p, consumer: %p",
273 				fCache, fConsumer);
274 		}
275 
276 	private:
277 		VMCache*	fConsumer;
278 };
279 
280 
281 class InsertArea : public VMCacheTraceEntry {
282 	public:
283 		InsertArea(VMCache* cache, VMArea* area)
284 			:
285 			VMCacheTraceEntry(cache),
286 			fArea(area)
287 		{
288 			Initialized();
289 		}
290 
291 		virtual void AddDump(TraceOutput& out)
292 		{
293 			out.Print("vm cache insert area: cache: %p, area: %p", fCache,
294 				fArea);
295 		}
296 
297 		VMArea*	Area() const
298 		{
299 			return fArea;
300 		}
301 
302 	private:
303 		VMArea*	fArea;
304 };
305 
306 
307 class RemoveArea : public VMCacheTraceEntry {
308 	public:
309 		RemoveArea(VMCache* cache, VMArea* area)
310 			:
311 			VMCacheTraceEntry(cache),
312 			fArea(area)
313 		{
314 			Initialized();
315 		}
316 
317 		virtual void AddDump(TraceOutput& out)
318 		{
319 			out.Print("vm cache remove area: cache: %p, area: %p", fCache,
320 				fArea);
321 		}
322 
323 	private:
324 		VMArea*	fArea;
325 };
326 
327 }	// namespace VMCacheTracing
328 
329 #	define T(x) new(std::nothrow) VMCacheTracing::x;
330 
331 #	if VM_CACHE_TRACING >= 2
332 
333 namespace VMCacheTracing {
334 
335 class InsertPage : public VMCacheTraceEntry {
336 	public:
337 		InsertPage(VMCache* cache, vm_page* page, off_t offset)
338 			:
339 			VMCacheTraceEntry(cache),
340 			fPage(page),
341 			fOffset(offset)
342 		{
343 			Initialized();
344 		}
345 
346 		virtual void AddDump(TraceOutput& out)
347 		{
348 			out.Print("vm cache insert page: cache: %p, page: %p, offset: %"
349 				B_PRIdOFF, fCache, fPage, fOffset);
350 		}
351 
352 	private:
353 		vm_page*	fPage;
354 		off_t		fOffset;
355 };
356 
357 
358 class RemovePage : public VMCacheTraceEntry {
359 	public:
360 		RemovePage(VMCache* cache, vm_page* page)
361 			:
362 			VMCacheTraceEntry(cache),
363 			fPage(page)
364 		{
365 			Initialized();
366 		}
367 
368 		virtual void AddDump(TraceOutput& out)
369 		{
370 			out.Print("vm cache remove page: cache: %p, page: %p", fCache,
371 				fPage);
372 		}
373 
374 	private:
375 		vm_page*	fPage;
376 };
377 
378 }	// namespace VMCacheTracing
379 
380 #		define T2(x) new(std::nothrow) VMCacheTracing::x;
381 #	else
382 #		define T2(x) ;
383 #	endif
384 #else
385 #	define T(x) ;
386 #	define T2(x) ;
387 #endif
388 
389 
390 //	#pragma mark - debugger commands
391 
392 
393 #if VM_CACHE_TRACING
394 
395 
396 static void*
397 cache_stack_find_area_cache(const TraceEntryIterator& baseIterator, void* area)
398 {
399 	using namespace VMCacheTracing;
400 
401 	// find the previous "insert area" entry for the given area
402 	TraceEntryIterator iterator = baseIterator;
403 	TraceEntry* entry = iterator.Current();
404 	while (entry != NULL) {
405 		if (InsertArea* insertAreaEntry = dynamic_cast<InsertArea*>(entry)) {
406 			if (insertAreaEntry->Area() == area)
407 				return insertAreaEntry->Cache();
408 		}
409 
410 		entry = iterator.Previous();
411 	}
412 
413 	return NULL;
414 }
415 
416 
417 static void*
418 cache_stack_find_consumer(const TraceEntryIterator& baseIterator, void* cache)
419 {
420 	using namespace VMCacheTracing;
421 
422 	// find the previous "add consumer" or "create" entry for the given cache
423 	TraceEntryIterator iterator = baseIterator;
424 	TraceEntry* entry = iterator.Current();
425 	while (entry != NULL) {
426 		if (Create* createEntry = dynamic_cast<Create*>(entry)) {
427 			if (createEntry->Cache() == cache)
428 				return NULL;
429 		} else if (AddConsumer* addEntry = dynamic_cast<AddConsumer*>(entry)) {
430 			if (addEntry->Consumer() == cache)
431 				return addEntry->Cache();
432 		}
433 
434 		entry = iterator.Previous();
435 	}
436 
437 	return NULL;
438 }
439 
440 
441 static int
442 command_cache_stack(int argc, char** argv)
443 {
444 	if (argc < 3 || argc > 4) {
445 		print_debugger_command_usage(argv[0]);
446 		return 0;
447 	}
448 
449 	bool isArea = false;
450 
451 	int argi = 1;
452 	if (argc == 4) {
453 		if (strcmp(argv[argi], "area") != 0) {
454 			print_debugger_command_usage(argv[0]);
455 			return 0;
456 		}
457 
458 		argi++;
459 		isArea = true;
460 	}
461 
462 	uint64 addressValue;
463 	uint64 debugEntryIndex;
464 	if (!evaluate_debug_expression(argv[argi++], &addressValue, false)
465 		|| !evaluate_debug_expression(argv[argi++], &debugEntryIndex, false)) {
466 		return 0;
467 	}
468 
469 	TraceEntryIterator baseIterator;
470 	if (baseIterator.MoveTo((int32)debugEntryIndex) == NULL) {
471 		kprintf("Invalid tracing entry index %" B_PRIu64 "\n", debugEntryIndex);
472 		return 0;
473 	}
474 
475 	void* address = (void*)(addr_t)addressValue;
476 
477 	kprintf("cache stack for %s %p at %" B_PRIu64 ":\n",
478 		isArea ? "area" : "cache", address, debugEntryIndex);
479 	if (isArea) {
480 		address = cache_stack_find_area_cache(baseIterator, address);
481 		if (address == NULL) {
482 			kprintf("  cache not found\n");
483 			return 0;
484 		}
485 	}
486 
487 	while (address != NULL) {
488 		kprintf("  %p\n", address);
489 		address = cache_stack_find_consumer(baseIterator, address);
490 	}
491 
492 	return 0;
493 }
494 
495 
496 #endif	// VM_CACHE_TRACING
497 
498 
499 //	#pragma mark -
500 
501 
502 status_t
503 vm_cache_init(kernel_args* args)
504 {
505 	// Create object caches for the structures we allocate here.
506 	gCacheRefObjectCache = create_object_cache("cache refs", sizeof(VMCacheRef),
507 		0, NULL, NULL, NULL);
508 	gAnonymousCacheObjectCache = create_object_cache("anon caches",
509 		sizeof(VMAnonymousCache), 0, NULL, NULL, NULL);
510 	gAnonymousNoSwapCacheObjectCache = create_object_cache(
511 		"anon no-swap caches", sizeof(VMAnonymousNoSwapCache), 0, NULL, NULL,
512 		NULL);
513 	gVnodeCacheObjectCache = create_object_cache("vnode caches",
514 		sizeof(VMVnodeCache), 0, NULL, NULL, NULL);
515 	gDeviceCacheObjectCache = create_object_cache("device caches",
516 		sizeof(VMDeviceCache), 0, NULL, NULL, NULL);
517 	gNullCacheObjectCache = create_object_cache("null caches",
518 		sizeof(VMNullCache), 0, NULL, NULL, NULL);
519 
520 	if (gCacheRefObjectCache == NULL || gAnonymousCacheObjectCache == NULL
521 		|| gAnonymousNoSwapCacheObjectCache == NULL
522 		|| gVnodeCacheObjectCache == NULL
523 		|| gDeviceCacheObjectCache == NULL
524 		|| gNullCacheObjectCache == NULL) {
525 		panic("vm_cache_init(): Failed to create object caches!");
526 		return B_NO_MEMORY;
527 	}
528 
529 	return B_OK;
530 }
531 
532 
533 void
534 vm_cache_init_post_heap()
535 {
536 #if VM_CACHE_TRACING
537 	add_debugger_command_etc("cache_stack", &command_cache_stack,
538 		"List the ancestors (sources) of a VMCache at the time given by "
539 			"tracing entry index",
540 		"[ \"area\" ] <address> <tracing entry index>\n"
541 		"All ancestors (sources) of a given VMCache at the time given by the\n"
542 		"tracing entry index are listed. If \"area\" is given the supplied\n"
543 		"address is an area instead of a cache address. The listing will\n"
544 		"start with the area's cache at that point.\n",
545 		0);
546 #endif	// VM_CACHE_TRACING
547 }
548 
549 
550 VMCache*
551 vm_cache_acquire_locked_page_cache(vm_page* page, bool dontWait)
552 {
553 	mutex_lock(&sCacheListLock);
554 
555 	while (dontWait) {
556 		VMCacheRef* cacheRef = page->CacheRef();
557 		if (cacheRef == NULL) {
558 			mutex_unlock(&sCacheListLock);
559 			return NULL;
560 		}
561 
562 		VMCache* cache = cacheRef->cache;
563 		if (!cache->TryLock()) {
564 			mutex_unlock(&sCacheListLock);
565 			return NULL;
566 		}
567 
568 		if (cacheRef == page->CacheRef()) {
569 			mutex_unlock(&sCacheListLock);
570 			cache->AcquireRefLocked();
571 			return cache;
572 		}
573 
574 		// the cache changed in the meantime
575 		cache->Unlock();
576 	}
577 
578 	while (true) {
579 		VMCacheRef* cacheRef = page->CacheRef();
580 		if (cacheRef == NULL) {
581 			mutex_unlock(&sCacheListLock);
582 			return NULL;
583 		}
584 
585 		VMCache* cache = cacheRef->cache;
586 		if (!cache->SwitchLock(&sCacheListLock)) {
587 			// cache has been deleted
588 			mutex_lock(&sCacheListLock);
589 			continue;
590 		}
591 
592 		mutex_lock(&sCacheListLock);
593 		if (cache == page->Cache()) {
594 			mutex_unlock(&sCacheListLock);
595 			cache->AcquireRefLocked();
596 			return cache;
597 		}
598 
599 		// the cache changed in the meantime
600 		cache->Unlock();
601 	}
602 }
603 
604 
605 // #pragma mark - VMCache
606 
607 
608 VMCacheRef::VMCacheRef(VMCache* cache)
609 	:
610 	cache(cache),
611 	ref_count(1)
612 {
613 }
614 
615 
616 // #pragma mark - VMCache
617 
618 
619 bool
620 VMCache::_IsMergeable() const
621 {
622 	return areas == NULL && temporary && !consumers.IsEmpty()
623 		&& consumers.Head() == consumers.Tail();
624 }
625 
626 
627 VMCache::VMCache()
628 	:
629 	fCacheRef(NULL)
630 {
631 }
632 
633 
634 VMCache::~VMCache()
635 {
636 	object_cache_delete(gCacheRefObjectCache, fCacheRef);
637 }
638 
639 
640 status_t
641 VMCache::Init(uint32 cacheType, uint32 allocationFlags)
642 {
643 	mutex_init(&fLock, "VMCache");
644 
645 	areas = NULL;
646 	fRefCount = 1;
647 	source = NULL;
648 	virtual_base = 0;
649 	virtual_end = 0;
650 	committed_size = 0;
651 	temporary = 0;
652 	page_count = 0;
653 	fWiredPagesCount = 0;
654 	type = cacheType;
655 	fPageEventWaiters = NULL;
656 
657 #if DEBUG_CACHE_LIST
658 	debug_previous = NULL;
659 	debug_next = NULL;
660 		// initialize in case the following fails
661 #endif
662 
663 	fCacheRef = new(gCacheRefObjectCache, allocationFlags) VMCacheRef(this);
664 	if (fCacheRef == NULL)
665 		return B_NO_MEMORY;
666 
667 #if DEBUG_CACHE_LIST
668 	mutex_lock(&sCacheListLock);
669 
670 	if (gDebugCacheList != NULL)
671 		gDebugCacheList->debug_previous = this;
672 	debug_next = gDebugCacheList;
673 	gDebugCacheList = this;
674 
675 	mutex_unlock(&sCacheListLock);
676 #endif
677 
678 	return B_OK;
679 }
680 
681 
682 void
683 VMCache::Delete()
684 {
685 	if (areas != NULL)
686 		panic("cache %p to be deleted still has areas", this);
687 	if (!consumers.IsEmpty())
688 		panic("cache %p to be deleted still has consumers", this);
689 
690 	T(Delete(this));
691 
692 	// free all of the pages in the cache
693 	while (vm_page* page = pages.Root()) {
694 		if (!page->mappings.IsEmpty() || page->WiredCount() != 0) {
695 			panic("remove page %p from cache %p: page still has mappings!\n"
696 				"@!page %p; cache %p", page, this, page, this);
697 		}
698 
699 		// remove it
700 		pages.Remove(page);
701 		page->SetCacheRef(NULL);
702 
703 		TRACE(("vm_cache_release_ref: freeing page 0x%lx\n",
704 			page->physical_page_number));
705 		DEBUG_PAGE_ACCESS_START(page);
706 		vm_page_free(this, page);
707 	}
708 
709 	// remove the ref to the source
710 	if (source)
711 		source->_RemoveConsumer(this);
712 
713 	// We lock and unlock the sCacheListLock, even if the DEBUG_CACHE_LIST is
714 	// not enabled. This synchronization point is needed for
715 	// vm_cache_acquire_locked_page_cache().
716 	mutex_lock(&sCacheListLock);
717 
718 #if DEBUG_CACHE_LIST
719 	if (debug_previous)
720 		debug_previous->debug_next = debug_next;
721 	if (debug_next)
722 		debug_next->debug_previous = debug_previous;
723 	if (this == gDebugCacheList)
724 		gDebugCacheList = debug_next;
725 #endif
726 
727 	mutex_destroy(&fLock);
728 
729 	mutex_unlock(&sCacheListLock);
730 
731 	DeleteObject();
732 }
733 
734 
735 void
736 VMCache::Unlock(bool consumerLocked)
737 {
738 	while (fRefCount == 1 && _IsMergeable()) {
739 		VMCache* consumer = consumers.Head();
740 		if (consumerLocked) {
741 			_MergeWithOnlyConsumer();
742 		} else if (consumer->TryLock()) {
743 			_MergeWithOnlyConsumer();
744 			consumer->Unlock();
745 		} else {
746 			// Someone else has locked the consumer ATM. Unlock this cache and
747 			// wait for the consumer lock. Increment the cache's ref count
748 			// temporarily, so that no one else will try what we are doing or
749 			// delete the cache.
750 			fRefCount++;
751 			bool consumerLockedTemp = consumer->SwitchLock(&fLock);
752 			Lock();
753 			fRefCount--;
754 
755 			if (consumerLockedTemp) {
756 				if (fRefCount == 1 && _IsMergeable()
757 						&& consumer == consumers.Head()) {
758 					// nothing has changed in the meantime -- merge
759 					_MergeWithOnlyConsumer();
760 				}
761 
762 				consumer->Unlock();
763 			}
764 		}
765 	}
766 
767 	if (fRefCount == 0) {
768 		// delete this cache
769 		Delete();
770 	} else
771 		mutex_unlock(&fLock);
772 }
773 
774 
775 vm_page*
776 VMCache::LookupPage(off_t offset)
777 {
778 	AssertLocked();
779 
780 	vm_page* page = pages.Lookup((page_num_t)(offset >> PAGE_SHIFT));
781 
782 #if KDEBUG
783 	if (page != NULL && page->Cache() != this)
784 		panic("page %p not in cache %p\n", page, this);
785 #endif
786 
787 	return page;
788 }
789 
790 
791 void
792 VMCache::InsertPage(vm_page* page, off_t offset)
793 {
794 	TRACE(("VMCache::InsertPage(): cache %p, page %p, offset %" B_PRIdOFF "\n",
795 		this, page, offset));
796 	AssertLocked();
797 
798 	if (page->CacheRef() != NULL) {
799 		panic("insert page %p into cache %p: page cache is set to %p\n",
800 			page, this, page->Cache());
801 	}
802 
803 	T2(InsertPage(this, page, offset));
804 
805 	page->cache_offset = (page_num_t)(offset >> PAGE_SHIFT);
806 	page_count++;
807 	page->SetCacheRef(fCacheRef);
808 
809 #if KDEBUG
810 	vm_page* otherPage = pages.Lookup(page->cache_offset);
811 	if (otherPage != NULL) {
812 		panic("VMCache::InsertPage(): there's already page %p with cache "
813 			"offset %" B_PRIuPHYSADDR " in cache %p; inserting page %p",
814 			otherPage, page->cache_offset, this, page);
815 	}
816 #endif	// KDEBUG
817 
818 	pages.Insert(page);
819 
820 	if (page->WiredCount() > 0)
821 		IncrementWiredPagesCount();
822 }
823 
824 
825 /*!	Removes the vm_page from this cache. Of course, the page must
826 	really be in this cache or evil things will happen.
827 	The cache lock must be held.
828 */
829 void
830 VMCache::RemovePage(vm_page* page)
831 {
832 	TRACE(("VMCache::RemovePage(): cache %p, page %p\n", this, page));
833 	AssertLocked();
834 
835 	if (page->Cache() != this) {
836 		panic("remove page %p from cache %p: page cache is set to %p\n", page,
837 			this, page->Cache());
838 	}
839 
840 	T2(RemovePage(this, page));
841 
842 	pages.Remove(page);
843 	page_count--;
844 	page->SetCacheRef(NULL);
845 
846 	if (page->WiredCount() > 0)
847 		DecrementWiredPagesCount();
848 }
849 
850 
851 /*!	Moves the given page from its current cache inserts it into this cache
852 	at the given offset.
853 	Both caches must be locked.
854 */
855 void
856 VMCache::MovePage(vm_page* page, off_t offset)
857 {
858 	VMCache* oldCache = page->Cache();
859 
860 	AssertLocked();
861 	oldCache->AssertLocked();
862 
863 	// remove from old cache
864 	oldCache->pages.Remove(page);
865 	oldCache->page_count--;
866 	T2(RemovePage(oldCache, page));
867 
868 	// change the offset
869 	page->cache_offset = offset >> PAGE_SHIFT;
870 
871 	// insert here
872 	pages.Insert(page);
873 	page_count++;
874 	page->SetCacheRef(fCacheRef);
875 
876 	if (page->WiredCount() > 0) {
877 		IncrementWiredPagesCount();
878 		oldCache->DecrementWiredPagesCount();
879 	}
880 
881 	T2(InsertPage(this, page, page->cache_offset << PAGE_SHIFT));
882 }
883 
884 /*!	Moves the given page from its current cache inserts it into this cache.
885 	Both caches must be locked.
886 */
887 void
888 VMCache::MovePage(vm_page* page)
889 {
890 	MovePage(page, page->cache_offset << PAGE_SHIFT);
891 }
892 
893 
894 /*!	Moves all pages from the given cache to this one.
895 	Both caches must be locked. This cache must be empty.
896 */
897 void
898 VMCache::MoveAllPages(VMCache* fromCache)
899 {
900 	AssertLocked();
901 	fromCache->AssertLocked();
902 	ASSERT(page_count == 0);
903 
904 	std::swap(fromCache->pages, pages);
905 	page_count = fromCache->page_count;
906 	fromCache->page_count = 0;
907 	fWiredPagesCount = fromCache->fWiredPagesCount;
908 	fromCache->fWiredPagesCount = 0;
909 
910 	// swap the VMCacheRefs
911 	mutex_lock(&sCacheListLock);
912 	std::swap(fCacheRef, fromCache->fCacheRef);
913 	fCacheRef->cache = this;
914 	fromCache->fCacheRef->cache = fromCache;
915 	mutex_unlock(&sCacheListLock);
916 
917 #if VM_CACHE_TRACING >= 2
918 	for (VMCachePagesTree::Iterator it = pages.GetIterator();
919 			vm_page* page = it.Next();) {
920 		T2(RemovePage(fromCache, page));
921 		T2(InsertPage(this, page, page->cache_offset << PAGE_SHIFT));
922 	}
923 #endif
924 }
925 
926 
927 /*!	Waits until one or more events happened for a given page which belongs to
928 	this cache.
929 	The cache must be locked. It will be unlocked by the method. \a relock
930 	specifies whether the method shall re-lock the cache before returning.
931 	\param page The page for which to wait.
932 	\param events The mask of events the caller is interested in.
933 	\param relock If \c true, the cache will be locked when returning,
934 		otherwise it won't be locked.
935 */
936 void
937 VMCache::WaitForPageEvents(vm_page* page, uint32 events, bool relock)
938 {
939 	PageEventWaiter waiter;
940 	waiter.thread = thread_get_current_thread();
941 	waiter.next = fPageEventWaiters;
942 	waiter.page = page;
943 	waiter.events = events;
944 
945 	fPageEventWaiters = &waiter;
946 
947 	thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_OTHER,
948 		"cache page events");
949 
950 	Unlock();
951 	thread_block();
952 
953 	if (relock)
954 		Lock();
955 }
956 
957 
958 /*!	Makes this case the source of the \a consumer cache,
959 	and adds the \a consumer to its list.
960 	This also grabs a reference to the source cache.
961 	Assumes you have the cache and the consumer's lock held.
962 */
963 void
964 VMCache::AddConsumer(VMCache* consumer)
965 {
966 	TRACE(("add consumer vm cache %p to cache %p\n", consumer, this));
967 	AssertLocked();
968 	consumer->AssertLocked();
969 
970 	T(AddConsumer(this, consumer));
971 
972 	consumer->source = this;
973 	consumers.Add(consumer);
974 
975 	AcquireRefLocked();
976 	AcquireStoreRef();
977 }
978 
979 
980 /*!	Adds the \a area to this cache.
981 	Assumes you have the locked the cache.
982 */
983 status_t
984 VMCache::InsertAreaLocked(VMArea* area)
985 {
986 	TRACE(("VMCache::InsertAreaLocked(cache %p, area %p)\n", this, area));
987 	AssertLocked();
988 
989 	T(InsertArea(this, area));
990 
991 	area->cache_next = areas;
992 	if (area->cache_next)
993 		area->cache_next->cache_prev = area;
994 	area->cache_prev = NULL;
995 	areas = area;
996 
997 	AcquireStoreRef();
998 
999 	return B_OK;
1000 }
1001 
1002 
1003 status_t
1004 VMCache::RemoveArea(VMArea* area)
1005 {
1006 	TRACE(("VMCache::RemoveArea(cache %p, area %p)\n", this, area));
1007 
1008 	T(RemoveArea(this, area));
1009 
1010 	// We release the store reference first, since otherwise we would reverse
1011 	// the locking order or even deadlock ourselves (... -> free_vnode() -> ...
1012 	// -> bfs_remove_vnode() -> ... -> file_cache_set_size() -> mutex_lock()).
1013 	// Also cf. _RemoveConsumer().
1014 	ReleaseStoreRef();
1015 
1016 	AutoLocker<VMCache> locker(this);
1017 
1018 	if (area->cache_prev)
1019 		area->cache_prev->cache_next = area->cache_next;
1020 	if (area->cache_next)
1021 		area->cache_next->cache_prev = area->cache_prev;
1022 	if (areas == area)
1023 		areas = area->cache_next;
1024 
1025 	return B_OK;
1026 }
1027 
1028 
1029 /*!	Transfers the areas from \a fromCache to this cache. This cache must not
1030 	have areas yet. Both caches must be locked.
1031 */
1032 void
1033 VMCache::TransferAreas(VMCache* fromCache)
1034 {
1035 	AssertLocked();
1036 	fromCache->AssertLocked();
1037 	ASSERT(areas == NULL);
1038 
1039 	areas = fromCache->areas;
1040 	fromCache->areas = NULL;
1041 
1042 	for (VMArea* area = areas; area != NULL; area = area->cache_next) {
1043 		area->cache = this;
1044 		AcquireRefLocked();
1045 		fromCache->ReleaseRefLocked();
1046 
1047 		T(RemoveArea(fromCache, area));
1048 		T(InsertArea(this, area));
1049 	}
1050 }
1051 
1052 
1053 uint32
1054 VMCache::CountWritableAreas(VMArea* ignoreArea) const
1055 {
1056 	uint32 count = 0;
1057 
1058 	for (VMArea* area = areas; area != NULL; area = area->cache_next) {
1059 		if (area != ignoreArea
1060 			&& (area->protection & (B_WRITE_AREA | B_KERNEL_WRITE_AREA)) != 0) {
1061 			count++;
1062 		}
1063 	}
1064 
1065 	return count;
1066 }
1067 
1068 
1069 status_t
1070 VMCache::WriteModified()
1071 {
1072 	TRACE(("VMCache::WriteModified(cache = %p)\n", this));
1073 
1074 	if (temporary)
1075 		return B_OK;
1076 
1077 	Lock();
1078 	status_t status = vm_page_write_modified_pages(this);
1079 	Unlock();
1080 
1081 	return status;
1082 }
1083 
1084 
1085 /*!	Commits the memory to the store if the \a commitment is larger than
1086 	what's committed already.
1087 	Assumes you have the cache's lock held.
1088 */
1089 status_t
1090 VMCache::SetMinimalCommitment(off_t commitment, int priority)
1091 {
1092 	TRACE(("VMCache::SetMinimalCommitment(cache %p, commitment %" B_PRIdOFF
1093 		")\n", this, commitment));
1094 	AssertLocked();
1095 
1096 	T(SetMinimalCommitment(this, commitment));
1097 
1098 	status_t status = B_OK;
1099 
1100 	// If we don't have enough committed space to cover through to the new end
1101 	// of the area...
1102 	if (committed_size < commitment) {
1103 		// ToDo: should we check if the cache's virtual size is large
1104 		//	enough for a commitment of that size?
1105 
1106 		// try to commit more memory
1107 		status = Commit(commitment, priority);
1108 	}
1109 
1110 	return status;
1111 }
1112 
1113 
1114 /*!	This function updates the size field of the cache.
1115 	If needed, it will free up all pages that don't belong to the cache anymore.
1116 	The cache lock must be held when you call it.
1117 	Since removed pages don't belong to the cache any longer, they are not
1118 	written back before they will be removed.
1119 
1120 	Note, this function may temporarily release the cache lock in case it
1121 	has to wait for busy pages.
1122 */
1123 status_t
1124 VMCache::Resize(off_t newSize, int priority)
1125 {
1126 	TRACE(("VMCache::Resize(cache %p, newSize %" B_PRIdOFF ") old size %"
1127 		B_PRIdOFF "\n", this, newSize, this->virtual_end));
1128 	this->AssertLocked();
1129 
1130 	T(Resize(this, newSize));
1131 
1132 	status_t status = Commit(newSize - virtual_base, priority);
1133 	if (status != B_OK)
1134 		return status;
1135 
1136 	uint32 oldPageCount = (uint32)((virtual_end + B_PAGE_SIZE - 1)
1137 		>> PAGE_SHIFT);
1138 	uint32 newPageCount = (uint32)((newSize + B_PAGE_SIZE - 1) >> PAGE_SHIFT);
1139 
1140 	if (newPageCount < oldPageCount) {
1141 		// we need to remove all pages in the cache outside of the new virtual
1142 		// size
1143 		for (VMCachePagesTree::Iterator it
1144 					= pages.GetIterator(newPageCount, true, true);
1145 				vm_page* page = it.Next();) {
1146 			if (page->busy) {
1147 				if (page->busy_writing) {
1148 					// We cannot wait for the page to become available
1149 					// as we might cause a deadlock this way
1150 					page->busy_writing = false;
1151 						// this will notify the writer to free the page
1152 				} else {
1153 					// wait for page to become unbusy
1154 					WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, true);
1155 
1156 					// restart from the start of the list
1157 					it = pages.GetIterator(newPageCount, true, true);
1158 				}
1159 				continue;
1160 			}
1161 
1162 			// remove the page and put it into the free queue
1163 			DEBUG_PAGE_ACCESS_START(page);
1164 			vm_remove_all_page_mappings(page);
1165 			ASSERT(page->WiredCount() == 0);
1166 				// TODO: Find a real solution! If the page is wired
1167 				// temporarily (e.g. by lock_memory()), we actually must not
1168 				// unmap it!
1169 			RemovePage(page);
1170 			vm_page_free(this, page);
1171 				// Note: When iterating through a IteratableSplayTree
1172 				// removing the current node is safe.
1173 		}
1174 	}
1175 
1176 	virtual_end = newSize;
1177 	return B_OK;
1178 }
1179 
1180 /*!	This function updates the virtual_base field of the cache.
1181 	If needed, it will free up all pages that don't belong to the cache anymore.
1182 	The cache lock must be held when you call it.
1183 	Since removed pages don't belong to the cache any longer, they are not
1184 	written back before they will be removed.
1185 
1186 	Note, this function may temporarily release the cache lock in case it
1187 	has to wait for busy pages.
1188 */
1189 status_t
1190 VMCache::Rebase(off_t newBase, int priority)
1191 {
1192 	TRACE(("VMCache::Rebase(cache %p, newBase %Ld) old base %Ld\n",
1193 		this, newBase, this->virtual_base));
1194 	this->AssertLocked();
1195 
1196 	T(Rebase(this, newBase));
1197 
1198 	status_t status = Commit(virtual_end - newBase, priority);
1199 	if (status != B_OK)
1200 		return status;
1201 
1202 	uint32 basePage = (uint32)(newBase >> PAGE_SHIFT);
1203 
1204 	if (newBase > virtual_base) {
1205 		// we need to remove all pages in the cache outside of the new virtual
1206 		// size
1207 		VMCachePagesTree::Iterator it = pages.GetIterator();
1208 		for (vm_page* page = it.Next();
1209 				page != NULL && page->cache_offset < basePage;
1210 				page = it.Next()) {
1211 			if (page->busy) {
1212 				if (page->busy_writing) {
1213 					// We cannot wait for the page to become available
1214 					// as we might cause a deadlock this way
1215 					page->busy_writing = false;
1216 						// this will notify the writer to free the page
1217 				} else {
1218 					// wait for page to become unbusy
1219 					WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, true);
1220 
1221 					// restart from the start of the list
1222 					it = pages.GetIterator();
1223 				}
1224 				continue;
1225 			}
1226 
1227 			// remove the page and put it into the free queue
1228 			DEBUG_PAGE_ACCESS_START(page);
1229 			vm_remove_all_page_mappings(page);
1230 			ASSERT(page->WiredCount() == 0);
1231 				// TODO: Find a real solution! If the page is wired
1232 				// temporarily (e.g. by lock_memory()), we actually must not
1233 				// unmap it!
1234 			RemovePage(page);
1235 			vm_page_free(this, page);
1236 				// Note: When iterating through a IteratableSplayTree
1237 				// removing the current node is safe.
1238 		}
1239 	}
1240 
1241 	virtual_base = newBase;
1242 	return B_OK;
1243 }
1244 
1245 
1246 /*!	Moves pages in the given range from the source cache into this cache. Both
1247 	caches must be locked.
1248 */
1249 status_t
1250 VMCache::Adopt(VMCache* source, off_t offset, off_t size, off_t newOffset)
1251 {
1252 	page_num_t startPage = offset >> PAGE_SHIFT;
1253 	page_num_t endPage = (offset + size + B_PAGE_SIZE - 1) >> PAGE_SHIFT;
1254 	off_t offsetChange = newOffset - offset;
1255 
1256 	VMCachePagesTree::Iterator it = source->pages.GetIterator(startPage, true,
1257 		true);
1258 	for (vm_page* page = it.Next();
1259 				page != NULL && page->cache_offset < endPage;
1260 				page = it.Next()) {
1261 		MovePage(page, (page->cache_offset << PAGE_SHIFT) + offsetChange);
1262 	}
1263 
1264 	return B_OK;
1265 }
1266 
1267 
1268 /*!	You have to call this function with the VMCache lock held. */
1269 status_t
1270 VMCache::FlushAndRemoveAllPages()
1271 {
1272 	ASSERT_LOCKED_MUTEX(&fLock);
1273 
1274 	while (page_count > 0) {
1275 		// write back modified pages
1276 		status_t status = vm_page_write_modified_pages(this);
1277 		if (status != B_OK)
1278 			return status;
1279 
1280 		// remove pages
1281 		for (VMCachePagesTree::Iterator it = pages.GetIterator();
1282 				vm_page* page = it.Next();) {
1283 			if (page->busy) {
1284 				// wait for page to become unbusy
1285 				WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, true);
1286 
1287 				// restart from the start of the list
1288 				it = pages.GetIterator();
1289 				continue;
1290 			}
1291 
1292 			// skip modified pages -- they will be written back in the next
1293 			// iteration
1294 			if (page->State() == PAGE_STATE_MODIFIED)
1295 				continue;
1296 
1297 			// We can't remove mapped pages.
1298 			if (page->IsMapped())
1299 				return B_BUSY;
1300 
1301 			DEBUG_PAGE_ACCESS_START(page);
1302 			RemovePage(page);
1303 			vm_page_free(this, page);
1304 				// Note: When iterating through a IteratableSplayTree
1305 				// removing the current node is safe.
1306 		}
1307 	}
1308 
1309 	return B_OK;
1310 }
1311 
1312 
1313 status_t
1314 VMCache::Commit(off_t size, int priority)
1315 {
1316 	committed_size = size;
1317 	return B_OK;
1318 }
1319 
1320 
1321 /*!	Returns whether the cache's underlying backing store could deliver the
1322 	page at the given offset.
1323 
1324 	Basically it returns whether a Read() at \a offset would at least read a
1325 	partial page (assuming that no unexpected errors occur or the situation
1326 	changes in the meantime).
1327 */
1328 bool
1329 VMCache::HasPage(off_t offset)
1330 {
1331 	// In accordance with Fault() the default implementation doesn't have a
1332 	// backing store and doesn't allow faults.
1333 	return false;
1334 }
1335 
1336 
1337 status_t
1338 VMCache::Read(off_t offset, const generic_io_vec *vecs, size_t count,
1339 	uint32 flags, generic_size_t *_numBytes)
1340 {
1341 	return B_ERROR;
1342 }
1343 
1344 
1345 status_t
1346 VMCache::Write(off_t offset, const generic_io_vec *vecs, size_t count,
1347 	uint32 flags, generic_size_t *_numBytes)
1348 {
1349 	return B_ERROR;
1350 }
1351 
1352 
1353 status_t
1354 VMCache::WriteAsync(off_t offset, const generic_io_vec* vecs, size_t count,
1355 	generic_size_t numBytes, uint32 flags, AsyncIOCallback* callback)
1356 {
1357 	// Not supported, fall back to the synchronous hook.
1358 	generic_size_t transferred = numBytes;
1359 	status_t error = Write(offset, vecs, count, flags, &transferred);
1360 
1361 	if (callback != NULL)
1362 		callback->IOFinished(error, transferred != numBytes, transferred);
1363 
1364 	return error;
1365 }
1366 
1367 
1368 /*!	\brief Returns whether the cache can write the page at the given offset.
1369 
1370 	The cache must be locked when this function is invoked.
1371 
1372 	@param offset The page offset.
1373 	@return \c true, if the page can be written, \c false otherwise.
1374 */
1375 bool
1376 VMCache::CanWritePage(off_t offset)
1377 {
1378 	return false;
1379 }
1380 
1381 
1382 status_t
1383 VMCache::Fault(struct VMAddressSpace *aspace, off_t offset)
1384 {
1385 	return B_BAD_ADDRESS;
1386 }
1387 
1388 
1389 void
1390 VMCache::Merge(VMCache* source)
1391 {
1392 	for (VMCachePagesTree::Iterator it = source->pages.GetIterator();
1393 			vm_page* page = it.Next();) {
1394 		// Note: Removing the current node while iterating through a
1395 		// IteratableSplayTree is safe.
1396 		vm_page* consumerPage = LookupPage(
1397 			(off_t)page->cache_offset << PAGE_SHIFT);
1398 		if (consumerPage == NULL) {
1399 			// the page is not yet in the consumer cache - move it upwards
1400 			MovePage(page);
1401 		}
1402 	}
1403 }
1404 
1405 
1406 status_t
1407 VMCache::AcquireUnreferencedStoreRef()
1408 {
1409 	return B_OK;
1410 }
1411 
1412 
1413 void
1414 VMCache::AcquireStoreRef()
1415 {
1416 }
1417 
1418 
1419 void
1420 VMCache::ReleaseStoreRef()
1421 {
1422 }
1423 
1424 
1425 /*!	Kernel debugger version of HasPage().
1426 	Does not do any locking.
1427 */
1428 bool
1429 VMCache::DebugHasPage(off_t offset)
1430 {
1431 	// default that works for all subclasses that don't lock anyway
1432 	return HasPage(offset);
1433 }
1434 
1435 
1436 /*!	Kernel debugger version of LookupPage().
1437 	Does not do any locking.
1438 */
1439 vm_page*
1440 VMCache::DebugLookupPage(off_t offset)
1441 {
1442 	return pages.Lookup((page_num_t)(offset >> PAGE_SHIFT));
1443 }
1444 
1445 
1446 void
1447 VMCache::Dump(bool showPages) const
1448 {
1449 	kprintf("CACHE %p:\n", this);
1450 	kprintf("  ref_count:    %" B_PRId32 "\n", RefCount());
1451 	kprintf("  source:       %p\n", source);
1452 	kprintf("  type:         %s\n", vm_cache_type_to_string(type));
1453 	kprintf("  virtual_base: 0x%" B_PRIx64 "\n", virtual_base);
1454 	kprintf("  virtual_end:  0x%" B_PRIx64 "\n", virtual_end);
1455 	kprintf("  temporary:    %" B_PRIu32 "\n", temporary);
1456 	kprintf("  lock:         %p\n", &fLock);
1457 #if KDEBUG
1458 	kprintf("  lock.holder:  %" B_PRId32 "\n", fLock.holder);
1459 #endif
1460 	kprintf("  areas:\n");
1461 
1462 	for (VMArea* area = areas; area != NULL; area = area->cache_next) {
1463 		kprintf("    area 0x%" B_PRIx32 ", %s\n", area->id, area->name);
1464 		kprintf("\tbase_addr:  0x%lx, size: 0x%lx\n", area->Base(),
1465 			area->Size());
1466 		kprintf("\tprotection: 0x%" B_PRIx32 "\n", area->protection);
1467 		kprintf("\towner:      0x%" B_PRIx32 "\n", area->address_space->ID());
1468 	}
1469 
1470 	kprintf("  consumers:\n");
1471 	for (ConsumerList::ConstIterator it = consumers.GetIterator();
1472 		 	VMCache* consumer = it.Next();) {
1473 		kprintf("\t%p\n", consumer);
1474 	}
1475 
1476 	kprintf("  pages:\n");
1477 	if (showPages) {
1478 		for (VMCachePagesTree::ConstIterator it = pages.GetIterator();
1479 				vm_page* page = it.Next();) {
1480 			if (!vm_page_is_dummy(page)) {
1481 				kprintf("\t%p ppn %#" B_PRIxPHYSADDR " offset %#" B_PRIxPHYSADDR
1482 					" state %u (%s) wired_count %u\n", page,
1483 					page->physical_page_number, page->cache_offset,
1484 					page->State(), page_state_to_string(page->State()),
1485 					page->WiredCount());
1486 			} else {
1487 				kprintf("\t%p DUMMY PAGE state %u (%s)\n",
1488 					page, page->State(), page_state_to_string(page->State()));
1489 			}
1490 		}
1491 	} else
1492 		kprintf("\t%" B_PRIu32 " in cache\n", page_count);
1493 }
1494 
1495 
1496 /*!	Wakes up threads waiting for page events.
1497 	\param page The page for which events occurred.
1498 	\param events The mask of events that occurred.
1499 */
1500 void
1501 VMCache::_NotifyPageEvents(vm_page* page, uint32 events)
1502 {
1503 	PageEventWaiter** it = &fPageEventWaiters;
1504 	while (PageEventWaiter* waiter = *it) {
1505 		if (waiter->page == page && (waiter->events & events) != 0) {
1506 			// remove from list and unblock
1507 			*it = waiter->next;
1508 			thread_unblock(waiter->thread, B_OK);
1509 		} else
1510 			it = &waiter->next;
1511 	}
1512 }
1513 
1514 
1515 /*!	Merges the given cache with its only consumer.
1516 	The caller must hold both the cache's and the consumer's lock. The method
1517 	does release neither lock.
1518 */
1519 void
1520 VMCache::_MergeWithOnlyConsumer()
1521 {
1522 	VMCache* consumer = consumers.RemoveHead();
1523 
1524 	TRACE(("merge vm cache %p (ref == %" B_PRId32 ") with vm cache %p\n",
1525 		this, this->fRefCount, consumer));
1526 
1527 	T(Merge(this, consumer));
1528 
1529 	// merge the cache
1530 	consumer->Merge(this);
1531 
1532 	// The remaining consumer has got a new source.
1533 	if (source != NULL) {
1534 		VMCache* newSource = source;
1535 
1536 		newSource->Lock();
1537 
1538 		newSource->consumers.Remove(this);
1539 		newSource->consumers.Add(consumer);
1540 		consumer->source = newSource;
1541 		source = NULL;
1542 
1543 		newSource->Unlock();
1544 	} else
1545 		consumer->source = NULL;
1546 
1547 	// Release the reference the cache's consumer owned. The consumer takes
1548 	// over the cache's ref to its source (if any) instead.
1549 	ReleaseRefLocked();
1550 }
1551 
1552 
1553 /*!	Removes the \a consumer from this cache.
1554 	It will also release the reference to the cache owned by the consumer.
1555 	Assumes you have the consumer's cache lock held. This cache must not be
1556 	locked.
1557 */
1558 void
1559 VMCache::_RemoveConsumer(VMCache* consumer)
1560 {
1561 	TRACE(("remove consumer vm cache %p from cache %p\n", consumer, this));
1562 	consumer->AssertLocked();
1563 
1564 	T(RemoveConsumer(this, consumer));
1565 
1566 	// Remove the store ref before locking the cache. Otherwise we'd call into
1567 	// the VFS while holding the cache lock, which would reverse the usual
1568 	// locking order.
1569 	ReleaseStoreRef();
1570 
1571 	// remove the consumer from the cache, but keep its reference until later
1572 	Lock();
1573 	consumers.Remove(consumer);
1574 	consumer->source = NULL;
1575 
1576 	ReleaseRefAndUnlock();
1577 }
1578 
1579 
1580 // #pragma mark - VMCacheFactory
1581 	// TODO: Move to own source file!
1582 
1583 
1584 /*static*/ status_t
1585 VMCacheFactory::CreateAnonymousCache(VMCache*& _cache, bool canOvercommit,
1586 	int32 numPrecommittedPages, int32 numGuardPages, bool swappable,
1587 	int priority)
1588 {
1589 	uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY
1590 		| HEAP_DONT_LOCK_KERNEL_SPACE;
1591 	if (priority >= VM_PRIORITY_VIP)
1592 		allocationFlags |= HEAP_PRIORITY_VIP;
1593 
1594 #if ENABLE_SWAP_SUPPORT
1595 	if (swappable) {
1596 		VMAnonymousCache* cache
1597 			= new(gAnonymousCacheObjectCache, allocationFlags) VMAnonymousCache;
1598 		if (cache == NULL)
1599 			return B_NO_MEMORY;
1600 
1601 		status_t error = cache->Init(canOvercommit, numPrecommittedPages,
1602 			numGuardPages, allocationFlags);
1603 		if (error != B_OK) {
1604 			cache->Delete();
1605 			return error;
1606 		}
1607 
1608 		T(Create(cache));
1609 
1610 		_cache = cache;
1611 		return B_OK;
1612 	}
1613 #endif
1614 
1615 	VMAnonymousNoSwapCache* cache
1616 		= new(gAnonymousNoSwapCacheObjectCache, allocationFlags)
1617 			VMAnonymousNoSwapCache;
1618 	if (cache == NULL)
1619 		return B_NO_MEMORY;
1620 
1621 	status_t error = cache->Init(canOvercommit, numPrecommittedPages,
1622 		numGuardPages, allocationFlags);
1623 	if (error != B_OK) {
1624 		cache->Delete();
1625 		return error;
1626 	}
1627 
1628 	T(Create(cache));
1629 
1630 	_cache = cache;
1631 	return B_OK;
1632 }
1633 
1634 
1635 /*static*/ status_t
1636 VMCacheFactory::CreateVnodeCache(VMCache*& _cache, struct vnode* vnode)
1637 {
1638 	const uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY
1639 		| HEAP_DONT_LOCK_KERNEL_SPACE;
1640 		// Note: Vnode cache creation is never VIP.
1641 
1642 	VMVnodeCache* cache
1643 		= new(gVnodeCacheObjectCache, allocationFlags) VMVnodeCache;
1644 	if (cache == NULL)
1645 		return B_NO_MEMORY;
1646 
1647 	status_t error = cache->Init(vnode, allocationFlags);
1648 	if (error != B_OK) {
1649 		cache->Delete();
1650 		return error;
1651 	}
1652 
1653 	T(Create(cache));
1654 
1655 	_cache = cache;
1656 	return B_OK;
1657 }
1658 
1659 
1660 /*static*/ status_t
1661 VMCacheFactory::CreateDeviceCache(VMCache*& _cache, addr_t baseAddress)
1662 {
1663 	const uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY
1664 		| HEAP_DONT_LOCK_KERNEL_SPACE;
1665 		// Note: Device cache creation is never VIP.
1666 
1667 	VMDeviceCache* cache
1668 		= new(gDeviceCacheObjectCache, allocationFlags) VMDeviceCache;
1669 	if (cache == NULL)
1670 		return B_NO_MEMORY;
1671 
1672 	status_t error = cache->Init(baseAddress, allocationFlags);
1673 	if (error != B_OK) {
1674 		cache->Delete();
1675 		return error;
1676 	}
1677 
1678 	T(Create(cache));
1679 
1680 	_cache = cache;
1681 	return B_OK;
1682 }
1683 
1684 
1685 /*static*/ status_t
1686 VMCacheFactory::CreateNullCache(int priority, VMCache*& _cache)
1687 {
1688 	uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY
1689 		| HEAP_DONT_LOCK_KERNEL_SPACE;
1690 	if (priority >= VM_PRIORITY_VIP)
1691 		allocationFlags |= HEAP_PRIORITY_VIP;
1692 
1693 	VMNullCache* cache
1694 		= new(gNullCacheObjectCache, allocationFlags) VMNullCache;
1695 	if (cache == NULL)
1696 		return B_NO_MEMORY;
1697 
1698 	status_t error = cache->Init(allocationFlags);
1699 	if (error != B_OK) {
1700 		cache->Delete();
1701 		return error;
1702 	}
1703 
1704 	T(Create(cache));
1705 
1706 	_cache = cache;
1707 	return B_OK;
1708 }
1709