xref: /haiku/src/system/kernel/vm/VMCache.cpp (revision c6657ffe026e0d708e566d5467e0eb56de9d33a6)
1 /*
2  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 #include <vm/VMCache.h>
12 
13 #include <stddef.h>
14 #include <stdlib.h>
15 
16 #include <algorithm>
17 
18 #include <arch/cpu.h>
19 #include <condition_variable.h>
20 #include <heap.h>
21 #include <int.h>
22 #include <kernel.h>
23 #include <slab/Slab.h>
24 #include <smp.h>
25 #include <tracing.h>
26 #include <util/AutoLock.h>
27 #include <vfs.h>
28 #include <vm/vm.h>
29 #include <vm/vm_page.h>
30 #include <vm/vm_priv.h>
31 #include <vm/vm_types.h>
32 #include <vm/VMAddressSpace.h>
33 #include <vm/VMArea.h>
34 
35 // needed for the factory only
36 #include "VMAnonymousCache.h"
37 #include "VMAnonymousNoSwapCache.h"
38 #include "VMDeviceCache.h"
39 #include "VMNullCache.h"
40 #include "../cache/vnode_store.h"
41 
42 
43 //#define TRACE_VM_CACHE
44 #ifdef TRACE_VM_CACHE
45 #	define TRACE(x) dprintf x
46 #else
47 #	define TRACE(x) ;
48 #endif
49 
50 
51 #if DEBUG_CACHE_LIST
52 VMCache* gDebugCacheList;
53 #endif
54 static mutex sCacheListLock = MUTEX_INITIALIZER("global VMCache list");
55 	// The lock is also needed when the debug feature is disabled.
56 
57 ObjectCache* gCacheRefObjectCache;
58 ObjectCache* gAnonymousCacheObjectCache;
59 ObjectCache* gAnonymousNoSwapCacheObjectCache;
60 ObjectCache* gVnodeCacheObjectCache;
61 ObjectCache* gDeviceCacheObjectCache;
62 ObjectCache* gNullCacheObjectCache;
63 
64 
65 struct VMCache::PageEventWaiter {
66 	Thread*				thread;
67 	PageEventWaiter*	next;
68 	vm_page*			page;
69 	uint32				events;
70 };
71 
72 
73 #if VM_CACHE_TRACING
74 
75 namespace VMCacheTracing {
76 
77 class VMCacheTraceEntry : public AbstractTraceEntry {
78 	public:
79 		VMCacheTraceEntry(VMCache* cache)
80 			:
81 			fCache(cache)
82 		{
83 #if VM_CACHE_TRACING_STACK_TRACE
84 			fStackTrace = capture_tracing_stack_trace(
85 				VM_CACHE_TRACING_STACK_TRACE, 0, true);
86 				// Don't capture userland stack trace to avoid potential
87 				// deadlocks.
88 #endif
89 		}
90 
91 #if VM_CACHE_TRACING_STACK_TRACE
92 		virtual void DumpStackTrace(TraceOutput& out)
93 		{
94 			out.PrintStackTrace(fStackTrace);
95 		}
96 #endif
97 
98 		VMCache* Cache() const
99 		{
100 			return fCache;
101 		}
102 
103 	protected:
104 		VMCache*	fCache;
105 #if VM_CACHE_TRACING_STACK_TRACE
106 		tracing_stack_trace* fStackTrace;
107 #endif
108 };
109 
110 
111 class Create : public VMCacheTraceEntry {
112 	public:
113 		Create(VMCache* cache)
114 			:
115 			VMCacheTraceEntry(cache)
116 		{
117 			Initialized();
118 		}
119 
120 		virtual void AddDump(TraceOutput& out)
121 		{
122 			out.Print("vm cache create: -> cache: %p", fCache);
123 		}
124 };
125 
126 
127 class Delete : public VMCacheTraceEntry {
128 	public:
129 		Delete(VMCache* cache)
130 			:
131 			VMCacheTraceEntry(cache)
132 		{
133 			Initialized();
134 		}
135 
136 		virtual void AddDump(TraceOutput& out)
137 		{
138 			out.Print("vm cache delete: cache: %p", fCache);
139 		}
140 };
141 
142 
143 class SetMinimalCommitment : public VMCacheTraceEntry {
144 	public:
145 		SetMinimalCommitment(VMCache* cache, off_t commitment)
146 			:
147 			VMCacheTraceEntry(cache),
148 			fOldCommitment(cache->committed_size),
149 			fCommitment(commitment)
150 		{
151 			Initialized();
152 		}
153 
154 		virtual void AddDump(TraceOutput& out)
155 		{
156 			out.Print("vm cache set min commitment: cache: %p, "
157 				"commitment: %" B_PRIdOFF " -> %" B_PRIdOFF, fCache,
158 				fOldCommitment, fCommitment);
159 		}
160 
161 	private:
162 		off_t	fOldCommitment;
163 		off_t	fCommitment;
164 };
165 
166 
167 class Resize : public VMCacheTraceEntry {
168 	public:
169 		Resize(VMCache* cache, off_t size)
170 			:
171 			VMCacheTraceEntry(cache),
172 			fOldSize(cache->virtual_end),
173 			fSize(size)
174 		{
175 			Initialized();
176 		}
177 
178 		virtual void AddDump(TraceOutput& out)
179 		{
180 			out.Print("vm cache resize: cache: %p, size: %" B_PRIdOFF " -> %"
181 				B_PRIdOFF, fCache, fOldSize, fSize);
182 		}
183 
184 	private:
185 		off_t	fOldSize;
186 		off_t	fSize;
187 };
188 
189 
190 class Rebase : public VMCacheTraceEntry {
191 	public:
192 		Rebase(VMCache* cache, off_t base)
193 			:
194 			VMCacheTraceEntry(cache),
195 			fOldBase(cache->virtual_base),
196 			fBase(base)
197 		{
198 			Initialized();
199 		}
200 
201 		virtual void AddDump(TraceOutput& out)
202 		{
203 			out.Print("vm cache rebase: cache: %p, base: %lld -> %lld", fCache,
204 				fOldBase, fBase);
205 		}
206 
207 	private:
208 		off_t	fOldBase;
209 		off_t	fBase;
210 };
211 
212 
213 class AddConsumer : public VMCacheTraceEntry {
214 	public:
215 		AddConsumer(VMCache* cache, VMCache* consumer)
216 			:
217 			VMCacheTraceEntry(cache),
218 			fConsumer(consumer)
219 		{
220 			Initialized();
221 		}
222 
223 		virtual void AddDump(TraceOutput& out)
224 		{
225 			out.Print("vm cache add consumer: cache: %p, consumer: %p", fCache,
226 				fConsumer);
227 		}
228 
229 		VMCache* Consumer() const
230 		{
231 			return fConsumer;
232 		}
233 
234 	private:
235 		VMCache*	fConsumer;
236 };
237 
238 
239 class RemoveConsumer : public VMCacheTraceEntry {
240 	public:
241 		RemoveConsumer(VMCache* cache, VMCache* consumer)
242 			:
243 			VMCacheTraceEntry(cache),
244 			fConsumer(consumer)
245 		{
246 			Initialized();
247 		}
248 
249 		virtual void AddDump(TraceOutput& out)
250 		{
251 			out.Print("vm cache remove consumer: cache: %p, consumer: %p",
252 				fCache, fConsumer);
253 		}
254 
255 	private:
256 		VMCache*	fConsumer;
257 };
258 
259 
260 class Merge : public VMCacheTraceEntry {
261 	public:
262 		Merge(VMCache* cache, VMCache* consumer)
263 			:
264 			VMCacheTraceEntry(cache),
265 			fConsumer(consumer)
266 		{
267 			Initialized();
268 		}
269 
270 		virtual void AddDump(TraceOutput& out)
271 		{
272 			out.Print("vm cache merge with consumer: cache: %p, consumer: %p",
273 				fCache, fConsumer);
274 		}
275 
276 	private:
277 		VMCache*	fConsumer;
278 };
279 
280 
281 class InsertArea : public VMCacheTraceEntry {
282 	public:
283 		InsertArea(VMCache* cache, VMArea* area)
284 			:
285 			VMCacheTraceEntry(cache),
286 			fArea(area)
287 		{
288 			Initialized();
289 		}
290 
291 		virtual void AddDump(TraceOutput& out)
292 		{
293 			out.Print("vm cache insert area: cache: %p, area: %p", fCache,
294 				fArea);
295 		}
296 
297 		VMArea*	Area() const
298 		{
299 			return fArea;
300 		}
301 
302 	private:
303 		VMArea*	fArea;
304 };
305 
306 
307 class RemoveArea : public VMCacheTraceEntry {
308 	public:
309 		RemoveArea(VMCache* cache, VMArea* area)
310 			:
311 			VMCacheTraceEntry(cache),
312 			fArea(area)
313 		{
314 			Initialized();
315 		}
316 
317 		virtual void AddDump(TraceOutput& out)
318 		{
319 			out.Print("vm cache remove area: cache: %p, area: %p", fCache,
320 				fArea);
321 		}
322 
323 	private:
324 		VMArea*	fArea;
325 };
326 
327 }	// namespace VMCacheTracing
328 
329 #	define T(x) new(std::nothrow) VMCacheTracing::x;
330 
331 #	if VM_CACHE_TRACING >= 2
332 
333 namespace VMCacheTracing {
334 
335 class InsertPage : public VMCacheTraceEntry {
336 	public:
337 		InsertPage(VMCache* cache, vm_page* page, off_t offset)
338 			:
339 			VMCacheTraceEntry(cache),
340 			fPage(page),
341 			fOffset(offset)
342 		{
343 			Initialized();
344 		}
345 
346 		virtual void AddDump(TraceOutput& out)
347 		{
348 			out.Print("vm cache insert page: cache: %p, page: %p, offset: %"
349 				B_PRIdOFF, fCache, fPage, fOffset);
350 		}
351 
352 	private:
353 		vm_page*	fPage;
354 		off_t		fOffset;
355 };
356 
357 
358 class RemovePage : public VMCacheTraceEntry {
359 	public:
360 		RemovePage(VMCache* cache, vm_page* page)
361 			:
362 			VMCacheTraceEntry(cache),
363 			fPage(page)
364 		{
365 			Initialized();
366 		}
367 
368 		virtual void AddDump(TraceOutput& out)
369 		{
370 			out.Print("vm cache remove page: cache: %p, page: %p", fCache,
371 				fPage);
372 		}
373 
374 	private:
375 		vm_page*	fPage;
376 };
377 
378 }	// namespace VMCacheTracing
379 
380 #		define T2(x) new(std::nothrow) VMCacheTracing::x;
381 #	else
382 #		define T2(x) ;
383 #	endif
384 #else
385 #	define T(x) ;
386 #	define T2(x) ;
387 #endif
388 
389 
390 //	#pragma mark - debugger commands
391 
392 
393 #if VM_CACHE_TRACING
394 
395 
396 static void*
397 cache_stack_find_area_cache(const TraceEntryIterator& baseIterator, void* area)
398 {
399 	using namespace VMCacheTracing;
400 
401 	// find the previous "insert area" entry for the given area
402 	TraceEntryIterator iterator = baseIterator;
403 	TraceEntry* entry = iterator.Current();
404 	while (entry != NULL) {
405 		if (InsertArea* insertAreaEntry = dynamic_cast<InsertArea*>(entry)) {
406 			if (insertAreaEntry->Area() == area)
407 				return insertAreaEntry->Cache();
408 		}
409 
410 		entry = iterator.Previous();
411 	}
412 
413 	return NULL;
414 }
415 
416 
417 static void*
418 cache_stack_find_consumer(const TraceEntryIterator& baseIterator, void* cache)
419 {
420 	using namespace VMCacheTracing;
421 
422 	// find the previous "add consumer" or "create" entry for the given cache
423 	TraceEntryIterator iterator = baseIterator;
424 	TraceEntry* entry = iterator.Current();
425 	while (entry != NULL) {
426 		if (Create* createEntry = dynamic_cast<Create*>(entry)) {
427 			if (createEntry->Cache() == cache)
428 				return NULL;
429 		} else if (AddConsumer* addEntry = dynamic_cast<AddConsumer*>(entry)) {
430 			if (addEntry->Consumer() == cache)
431 				return addEntry->Cache();
432 		}
433 
434 		entry = iterator.Previous();
435 	}
436 
437 	return NULL;
438 }
439 
440 
441 static int
442 command_cache_stack(int argc, char** argv)
443 {
444 	if (argc < 3 || argc > 4) {
445 		print_debugger_command_usage(argv[0]);
446 		return 0;
447 	}
448 
449 	bool isArea = false;
450 
451 	int argi = 1;
452 	if (argc == 4) {
453 		if (strcmp(argv[argi], "area") != 0) {
454 			print_debugger_command_usage(argv[0]);
455 			return 0;
456 		}
457 
458 		argi++;
459 		isArea = true;
460 	}
461 
462 	uint64 addressValue;
463 	uint64 debugEntryIndex;
464 	if (!evaluate_debug_expression(argv[argi++], &addressValue, false)
465 		|| !evaluate_debug_expression(argv[argi++], &debugEntryIndex, false)) {
466 		return 0;
467 	}
468 
469 	TraceEntryIterator baseIterator;
470 	if (baseIterator.MoveTo((int32)debugEntryIndex) == NULL) {
471 		kprintf("Invalid tracing entry index %" B_PRIu64 "\n", debugEntryIndex);
472 		return 0;
473 	}
474 
475 	void* address = (void*)(addr_t)addressValue;
476 
477 	kprintf("cache stack for %s %p at %" B_PRIu64 ":\n",
478 		isArea ? "area" : "cache", address, debugEntryIndex);
479 	if (isArea) {
480 		address = cache_stack_find_area_cache(baseIterator, address);
481 		if (address == NULL) {
482 			kprintf("  cache not found\n");
483 			return 0;
484 		}
485 	}
486 
487 	while (address != NULL) {
488 		kprintf("  %p\n", address);
489 		address = cache_stack_find_consumer(baseIterator, address);
490 	}
491 
492 	return 0;
493 }
494 
495 
496 #endif	// VM_CACHE_TRACING
497 
498 
499 //	#pragma mark -
500 
501 
502 status_t
503 vm_cache_init(kernel_args* args)
504 {
505 	// Create object caches for the structures we allocate here.
506 	gCacheRefObjectCache = create_object_cache("cache refs", sizeof(VMCacheRef),
507 		0, NULL, NULL, NULL);
508 	gAnonymousCacheObjectCache = create_object_cache("anon caches",
509 		sizeof(VMAnonymousCache), 0, NULL, NULL, NULL);
510 	gAnonymousNoSwapCacheObjectCache = create_object_cache(
511 		"anon no-swap caches", sizeof(VMAnonymousNoSwapCache), 0, NULL, NULL,
512 		NULL);
513 	gVnodeCacheObjectCache = create_object_cache("vnode caches",
514 		sizeof(VMVnodeCache), 0, NULL, NULL, NULL);
515 	gDeviceCacheObjectCache = create_object_cache("device caches",
516 		sizeof(VMDeviceCache), 0, NULL, NULL, NULL);
517 	gNullCacheObjectCache = create_object_cache("null caches",
518 		sizeof(VMNullCache), 0, NULL, NULL, NULL);
519 
520 	if (gCacheRefObjectCache == NULL || gAnonymousCacheObjectCache == NULL
521 		|| gAnonymousNoSwapCacheObjectCache == NULL
522 		|| gVnodeCacheObjectCache == NULL
523 		|| gDeviceCacheObjectCache == NULL
524 		|| gNullCacheObjectCache == NULL) {
525 		panic("vm_cache_init(): Failed to create object caches!");
526 		return B_NO_MEMORY;
527 	}
528 
529 	return B_OK;
530 }
531 
532 
533 void
534 vm_cache_init_post_heap()
535 {
536 #if VM_CACHE_TRACING
537 	add_debugger_command_etc("cache_stack", &command_cache_stack,
538 		"List the ancestors (sources) of a VMCache at the time given by "
539 			"tracing entry index",
540 		"[ \"area\" ] <address> <tracing entry index>\n"
541 		"All ancestors (sources) of a given VMCache at the time given by the\n"
542 		"tracing entry index are listed. If \"area\" is given the supplied\n"
543 		"address is an area instead of a cache address. The listing will\n"
544 		"start with the area's cache at that point.\n",
545 		0);
546 #endif	// VM_CACHE_TRACING
547 }
548 
549 
550 VMCache*
551 vm_cache_acquire_locked_page_cache(vm_page* page, bool dontWait)
552 {
553 	mutex_lock(&sCacheListLock);
554 
555 	while (dontWait) {
556 		VMCacheRef* cacheRef = page->CacheRef();
557 		if (cacheRef == NULL) {
558 			mutex_unlock(&sCacheListLock);
559 			return NULL;
560 		}
561 
562 		VMCache* cache = cacheRef->cache;
563 		if (!cache->TryLock()) {
564 			mutex_unlock(&sCacheListLock);
565 			return NULL;
566 		}
567 
568 		if (cacheRef == page->CacheRef()) {
569 			mutex_unlock(&sCacheListLock);
570 			cache->AcquireRefLocked();
571 			return cache;
572 		}
573 
574 		// the cache changed in the meantime
575 		cache->Unlock();
576 	}
577 
578 	while (true) {
579 		VMCacheRef* cacheRef = page->CacheRef();
580 		if (cacheRef == NULL) {
581 			mutex_unlock(&sCacheListLock);
582 			return NULL;
583 		}
584 
585 		VMCache* cache = cacheRef->cache;
586 		if (!cache->SwitchLock(&sCacheListLock)) {
587 			// cache has been deleted
588 			mutex_lock(&sCacheListLock);
589 			continue;
590 		}
591 
592 		mutex_lock(&sCacheListLock);
593 		if (cache == page->Cache()) {
594 			mutex_unlock(&sCacheListLock);
595 			cache->AcquireRefLocked();
596 			return cache;
597 		}
598 
599 		// the cache changed in the meantime
600 		cache->Unlock();
601 	}
602 }
603 
604 
605 // #pragma mark - VMCache
606 
607 
608 VMCacheRef::VMCacheRef(VMCache* cache)
609 	:
610 	cache(cache),
611 	ref_count(1)
612 {
613 }
614 
615 
616 // #pragma mark - VMCache
617 
618 
619 bool
620 VMCache::_IsMergeable() const
621 {
622 	return areas == NULL && temporary && !consumers.IsEmpty()
623 		&& consumers.Head() == consumers.Tail();
624 }
625 
626 
627 VMCache::VMCache()
628 	:
629 	fCacheRef(NULL)
630 {
631 }
632 
633 
634 VMCache::~VMCache()
635 {
636 	object_cache_delete(gCacheRefObjectCache, fCacheRef);
637 }
638 
639 
640 status_t
641 VMCache::Init(uint32 cacheType, uint32 allocationFlags)
642 {
643 	mutex_init(&fLock, "VMCache");
644 
645 	areas = NULL;
646 	fRefCount = 1;
647 	source = NULL;
648 	virtual_base = 0;
649 	virtual_end = 0;
650 	committed_size = 0;
651 	temporary = 0;
652 	page_count = 0;
653 	fWiredPagesCount = 0;
654 	type = cacheType;
655 	fPageEventWaiters = NULL;
656 
657 #if DEBUG_CACHE_LIST
658 	debug_previous = NULL;
659 	debug_next = NULL;
660 		// initialize in case the following fails
661 #endif
662 
663 	fCacheRef = new(gCacheRefObjectCache, allocationFlags) VMCacheRef(this);
664 	if (fCacheRef == NULL)
665 		return B_NO_MEMORY;
666 
667 #if DEBUG_CACHE_LIST
668 	mutex_lock(&sCacheListLock);
669 
670 	if (gDebugCacheList != NULL)
671 		gDebugCacheList->debug_previous = this;
672 	debug_next = gDebugCacheList;
673 	gDebugCacheList = this;
674 
675 	mutex_unlock(&sCacheListLock);
676 #endif
677 
678 	return B_OK;
679 }
680 
681 
682 void
683 VMCache::Delete()
684 {
685 	if (areas != NULL)
686 		panic("cache %p to be deleted still has areas", this);
687 	if (!consumers.IsEmpty())
688 		panic("cache %p to be deleted still has consumers", this);
689 
690 	T(Delete(this));
691 
692 	// free all of the pages in the cache
693 	while (vm_page* page = pages.Root()) {
694 		if (!page->mappings.IsEmpty() || page->WiredCount() != 0) {
695 			panic("remove page %p from cache %p: page still has mappings!\n"
696 				"@!page %p; cache %p", page, this, page, this);
697 		}
698 
699 		// remove it
700 		pages.Remove(page);
701 		page->SetCacheRef(NULL);
702 
703 		TRACE(("vm_cache_release_ref: freeing page 0x%lx\n",
704 			page->physical_page_number));
705 		DEBUG_PAGE_ACCESS_START(page);
706 		vm_page_free(this, page);
707 	}
708 
709 	// remove the ref to the source
710 	if (source)
711 		source->_RemoveConsumer(this);
712 
713 	// We lock and unlock the sCacheListLock, even if the DEBUG_CACHE_LIST is
714 	// not enabled. This synchronization point is needed for
715 	// vm_cache_acquire_locked_page_cache().
716 	mutex_lock(&sCacheListLock);
717 
718 #if DEBUG_CACHE_LIST
719 	if (debug_previous)
720 		debug_previous->debug_next = debug_next;
721 	if (debug_next)
722 		debug_next->debug_previous = debug_previous;
723 	if (this == gDebugCacheList)
724 		gDebugCacheList = debug_next;
725 #endif
726 
727 	mutex_destroy(&fLock);
728 
729 	mutex_unlock(&sCacheListLock);
730 
731 	DeleteObject();
732 }
733 
734 
735 void
736 VMCache::Unlock(bool consumerLocked)
737 {
738 	while (fRefCount == 1 && _IsMergeable()) {
739 		VMCache* consumer = consumers.Head();
740 		if (consumerLocked) {
741 			_MergeWithOnlyConsumer();
742 		} else if (consumer->TryLock()) {
743 			_MergeWithOnlyConsumer();
744 			consumer->Unlock();
745 		} else {
746 			// Someone else has locked the consumer ATM. Unlock this cache and
747 			// wait for the consumer lock. Increment the cache's ref count
748 			// temporarily, so that no one else will try what we are doing or
749 			// delete the cache.
750 			fRefCount++;
751 			bool consumerLockedTemp = consumer->SwitchLock(&fLock);
752 			Lock();
753 			fRefCount--;
754 
755 			if (consumerLockedTemp) {
756 				if (fRefCount == 1 && _IsMergeable()
757 						&& consumer == consumers.Head()) {
758 					// nothing has changed in the meantime -- merge
759 					_MergeWithOnlyConsumer();
760 				}
761 
762 				consumer->Unlock();
763 			}
764 		}
765 	}
766 
767 	if (fRefCount == 0) {
768 		// delete this cache
769 		Delete();
770 	} else
771 		mutex_unlock(&fLock);
772 }
773 
774 
775 vm_page*
776 VMCache::LookupPage(off_t offset)
777 {
778 	AssertLocked();
779 
780 	vm_page* page = pages.Lookup((page_num_t)(offset >> PAGE_SHIFT));
781 
782 #if KDEBUG
783 	if (page != NULL && page->Cache() != this)
784 		panic("page %p not in cache %p\n", page, this);
785 #endif
786 
787 	return page;
788 }
789 
790 
791 void
792 VMCache::InsertPage(vm_page* page, off_t offset)
793 {
794 	TRACE(("VMCache::InsertPage(): cache %p, page %p, offset %" B_PRIdOFF "\n",
795 		this, page, offset));
796 	AssertLocked();
797 
798 	if (page->CacheRef() != NULL) {
799 		panic("insert page %p into cache %p: page cache is set to %p\n",
800 			page, this, page->Cache());
801 	}
802 
803 	T2(InsertPage(this, page, offset));
804 
805 	page->cache_offset = (page_num_t)(offset >> PAGE_SHIFT);
806 	page_count++;
807 	page->SetCacheRef(fCacheRef);
808 
809 #if KDEBUG
810 	vm_page* otherPage = pages.Lookup(page->cache_offset);
811 	if (otherPage != NULL) {
812 		panic("VMCache::InsertPage(): there's already page %p with cache "
813 			"offset %" B_PRIuPHYSADDR " in cache %p; inserting page %p",
814 			otherPage, page->cache_offset, this, page);
815 	}
816 #endif	// KDEBUG
817 
818 	pages.Insert(page);
819 
820 	if (page->WiredCount() > 0)
821 		IncrementWiredPagesCount();
822 }
823 
824 
825 /*!	Removes the vm_page from this cache. Of course, the page must
826 	really be in this cache or evil things will happen.
827 	The cache lock must be held.
828 */
829 void
830 VMCache::RemovePage(vm_page* page)
831 {
832 	TRACE(("VMCache::RemovePage(): cache %p, page %p\n", this, page));
833 	AssertLocked();
834 
835 	if (page->Cache() != this) {
836 		panic("remove page %p from cache %p: page cache is set to %p\n", page,
837 			this, page->Cache());
838 	}
839 
840 	T2(RemovePage(this, page));
841 
842 	pages.Remove(page);
843 	page_count--;
844 	page->SetCacheRef(NULL);
845 
846 	if (page->WiredCount() > 0)
847 		DecrementWiredPagesCount();
848 }
849 
850 
851 /*!	Moves the given page from its current cache inserts it into this cache
852 	at the given offset.
853 	Both caches must be locked.
854 */
855 void
856 VMCache::MovePage(vm_page* page, off_t offset)
857 {
858 	VMCache* oldCache = page->Cache();
859 
860 	AssertLocked();
861 	oldCache->AssertLocked();
862 
863 	// remove from old cache
864 	oldCache->pages.Remove(page);
865 	oldCache->page_count--;
866 	T2(RemovePage(oldCache, page));
867 
868 	// change the offset
869 	page->cache_offset = offset >> PAGE_SHIFT;
870 
871 	// insert here
872 	pages.Insert(page);
873 	page_count++;
874 	page->SetCacheRef(fCacheRef);
875 
876 	if (page->WiredCount() > 0) {
877 		IncrementWiredPagesCount();
878 		oldCache->DecrementWiredPagesCount();
879 	}
880 
881 	T2(InsertPage(this, page, page->cache_offset << PAGE_SHIFT));
882 }
883 
884 /*!	Moves the given page from its current cache inserts it into this cache.
885 	Both caches must be locked.
886 */
887 void
888 VMCache::MovePage(vm_page* page)
889 {
890 	MovePage(page, page->cache_offset << PAGE_SHIFT);
891 }
892 
893 
894 /*!	Moves all pages from the given cache to this one.
895 	Both caches must be locked. This cache must be empty.
896 */
897 void
898 VMCache::MoveAllPages(VMCache* fromCache)
899 {
900 	AssertLocked();
901 	fromCache->AssertLocked();
902 	ASSERT(page_count == 0);
903 
904 	std::swap(fromCache->pages, pages);
905 	page_count = fromCache->page_count;
906 	fromCache->page_count = 0;
907 	fWiredPagesCount = fromCache->fWiredPagesCount;
908 	fromCache->fWiredPagesCount = 0;
909 
910 	// swap the VMCacheRefs
911 	mutex_lock(&sCacheListLock);
912 	std::swap(fCacheRef, fromCache->fCacheRef);
913 	fCacheRef->cache = this;
914 	fromCache->fCacheRef->cache = fromCache;
915 	mutex_unlock(&sCacheListLock);
916 
917 #if VM_CACHE_TRACING >= 2
918 	for (VMCachePagesTree::Iterator it = pages.GetIterator();
919 			vm_page* page = it.Next();) {
920 		T2(RemovePage(fromCache, page));
921 		T2(InsertPage(this, page, page->cache_offset << PAGE_SHIFT));
922 	}
923 #endif
924 }
925 
926 
927 /*!	Moves the given pages from their current cache and inserts them into this
928 	cache. Both caches must be locked.
929 */
930 void
931 VMCache::MovePageRange(VMCache* source, off_t offset, off_t size,
932 		off_t newOffset)
933 {
934 	page_num_t startPage = offset >> PAGE_SHIFT;
935 	page_num_t endPage = (offset + size + B_PAGE_SIZE - 1) >> PAGE_SHIFT;
936 	int32 offsetChange = (int32)(newOffset - offset);
937 
938 	VMCachePagesTree::Iterator it = source->pages.GetIterator(startPage, true,
939 		true);
940 	for (vm_page* page = it.Next();
941 				page != NULL && page->cache_offset < endPage;
942 				page = it.Next()) {
943 		MovePage(page, (page->cache_offset << PAGE_SHIFT) + offsetChange);
944 	}
945 }
946 
947 
948 /*!	Waits until one or more events happened for a given page which belongs to
949 	this cache.
950 	The cache must be locked. It will be unlocked by the method. \a relock
951 	specifies whether the method shall re-lock the cache before returning.
952 	\param page The page for which to wait.
953 	\param events The mask of events the caller is interested in.
954 	\param relock If \c true, the cache will be locked when returning,
955 		otherwise it won't be locked.
956 */
957 void
958 VMCache::WaitForPageEvents(vm_page* page, uint32 events, bool relock)
959 {
960 	PageEventWaiter waiter;
961 	waiter.thread = thread_get_current_thread();
962 	waiter.next = fPageEventWaiters;
963 	waiter.page = page;
964 	waiter.events = events;
965 
966 	fPageEventWaiters = &waiter;
967 
968 	thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_OTHER,
969 		"cache page events");
970 
971 	Unlock();
972 	thread_block();
973 
974 	if (relock)
975 		Lock();
976 }
977 
978 
979 /*!	Makes this case the source of the \a consumer cache,
980 	and adds the \a consumer to its list.
981 	This also grabs a reference to the source cache.
982 	Assumes you have the cache and the consumer's lock held.
983 */
984 void
985 VMCache::AddConsumer(VMCache* consumer)
986 {
987 	TRACE(("add consumer vm cache %p to cache %p\n", consumer, this));
988 	AssertLocked();
989 	consumer->AssertLocked();
990 
991 	T(AddConsumer(this, consumer));
992 
993 	consumer->source = this;
994 	consumers.Add(consumer);
995 
996 	AcquireRefLocked();
997 	AcquireStoreRef();
998 }
999 
1000 
1001 /*!	Adds the \a area to this cache.
1002 	Assumes you have the locked the cache.
1003 */
1004 status_t
1005 VMCache::InsertAreaLocked(VMArea* area)
1006 {
1007 	TRACE(("VMCache::InsertAreaLocked(cache %p, area %p)\n", this, area));
1008 	AssertLocked();
1009 
1010 	T(InsertArea(this, area));
1011 
1012 	area->cache_next = areas;
1013 	if (area->cache_next)
1014 		area->cache_next->cache_prev = area;
1015 	area->cache_prev = NULL;
1016 	areas = area;
1017 
1018 	AcquireStoreRef();
1019 
1020 	return B_OK;
1021 }
1022 
1023 
1024 status_t
1025 VMCache::RemoveArea(VMArea* area)
1026 {
1027 	TRACE(("VMCache::RemoveArea(cache %p, area %p)\n", this, area));
1028 
1029 	T(RemoveArea(this, area));
1030 
1031 	// We release the store reference first, since otherwise we would reverse
1032 	// the locking order or even deadlock ourselves (... -> free_vnode() -> ...
1033 	// -> bfs_remove_vnode() -> ... -> file_cache_set_size() -> mutex_lock()).
1034 	// Also cf. _RemoveConsumer().
1035 	ReleaseStoreRef();
1036 
1037 	AutoLocker<VMCache> locker(this);
1038 
1039 	if (area->cache_prev)
1040 		area->cache_prev->cache_next = area->cache_next;
1041 	if (area->cache_next)
1042 		area->cache_next->cache_prev = area->cache_prev;
1043 	if (areas == area)
1044 		areas = area->cache_next;
1045 
1046 	return B_OK;
1047 }
1048 
1049 
1050 /*!	Transfers the areas from \a fromCache to this cache. This cache must not
1051 	have areas yet. Both caches must be locked.
1052 */
1053 void
1054 VMCache::TransferAreas(VMCache* fromCache)
1055 {
1056 	AssertLocked();
1057 	fromCache->AssertLocked();
1058 	ASSERT(areas == NULL);
1059 
1060 	areas = fromCache->areas;
1061 	fromCache->areas = NULL;
1062 
1063 	for (VMArea* area = areas; area != NULL; area = area->cache_next) {
1064 		area->cache = this;
1065 		AcquireRefLocked();
1066 		fromCache->ReleaseRefLocked();
1067 
1068 		T(RemoveArea(fromCache, area));
1069 		T(InsertArea(this, area));
1070 	}
1071 }
1072 
1073 
1074 uint32
1075 VMCache::CountWritableAreas(VMArea* ignoreArea) const
1076 {
1077 	uint32 count = 0;
1078 
1079 	for (VMArea* area = areas; area != NULL; area = area->cache_next) {
1080 		if (area != ignoreArea
1081 			&& (area->protection & (B_WRITE_AREA | B_KERNEL_WRITE_AREA)) != 0) {
1082 			count++;
1083 		}
1084 	}
1085 
1086 	return count;
1087 }
1088 
1089 
1090 status_t
1091 VMCache::WriteModified()
1092 {
1093 	TRACE(("VMCache::WriteModified(cache = %p)\n", this));
1094 
1095 	if (temporary)
1096 		return B_OK;
1097 
1098 	Lock();
1099 	status_t status = vm_page_write_modified_pages(this);
1100 	Unlock();
1101 
1102 	return status;
1103 }
1104 
1105 
1106 /*!	Commits the memory to the store if the \a commitment is larger than
1107 	what's committed already.
1108 	Assumes you have the cache's lock held.
1109 */
1110 status_t
1111 VMCache::SetMinimalCommitment(off_t commitment, int priority)
1112 {
1113 	TRACE(("VMCache::SetMinimalCommitment(cache %p, commitment %" B_PRIdOFF
1114 		")\n", this, commitment));
1115 	AssertLocked();
1116 
1117 	T(SetMinimalCommitment(this, commitment));
1118 
1119 	status_t status = B_OK;
1120 
1121 	// If we don't have enough committed space to cover through to the new end
1122 	// of the area...
1123 	if (committed_size < commitment) {
1124 		// ToDo: should we check if the cache's virtual size is large
1125 		//	enough for a commitment of that size?
1126 
1127 		// try to commit more memory
1128 		status = Commit(commitment, priority);
1129 	}
1130 
1131 	return status;
1132 }
1133 
1134 
1135 /*!	This function updates the size field of the cache.
1136 	If needed, it will free up all pages that don't belong to the cache anymore.
1137 	The cache lock must be held when you call it.
1138 	Since removed pages don't belong to the cache any longer, they are not
1139 	written back before they will be removed.
1140 
1141 	Note, this function may temporarily release the cache lock in case it
1142 	has to wait for busy pages.
1143 */
1144 status_t
1145 VMCache::Resize(off_t newSize, int priority)
1146 {
1147 	TRACE(("VMCache::Resize(cache %p, newSize %" B_PRIdOFF ") old size %"
1148 		B_PRIdOFF "\n", this, newSize, this->virtual_end));
1149 	this->AssertLocked();
1150 
1151 	T(Resize(this, newSize));
1152 
1153 	status_t status = Commit(newSize - virtual_base, priority);
1154 	if (status != B_OK)
1155 		return status;
1156 
1157 	uint32 oldPageCount = (uint32)((virtual_end + B_PAGE_SIZE - 1)
1158 		>> PAGE_SHIFT);
1159 	uint32 newPageCount = (uint32)((newSize + B_PAGE_SIZE - 1) >> PAGE_SHIFT);
1160 
1161 	if (newPageCount < oldPageCount) {
1162 		// we need to remove all pages in the cache outside of the new virtual
1163 		// size
1164 		for (VMCachePagesTree::Iterator it
1165 					= pages.GetIterator(newPageCount, true, true);
1166 				vm_page* page = it.Next();) {
1167 			if (page->busy) {
1168 				if (page->busy_writing) {
1169 					// We cannot wait for the page to become available
1170 					// as we might cause a deadlock this way
1171 					page->busy_writing = false;
1172 						// this will notify the writer to free the page
1173 				} else {
1174 					// wait for page to become unbusy
1175 					WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, true);
1176 
1177 					// restart from the start of the list
1178 					it = pages.GetIterator(newPageCount, true, true);
1179 				}
1180 				continue;
1181 			}
1182 
1183 			// remove the page and put it into the free queue
1184 			DEBUG_PAGE_ACCESS_START(page);
1185 			vm_remove_all_page_mappings(page);
1186 			ASSERT(page->WiredCount() == 0);
1187 				// TODO: Find a real solution! If the page is wired
1188 				// temporarily (e.g. by lock_memory()), we actually must not
1189 				// unmap it!
1190 			RemovePage(page);
1191 			vm_page_free(this, page);
1192 				// Note: When iterating through a IteratableSplayTree
1193 				// removing the current node is safe.
1194 		}
1195 	}
1196 
1197 	virtual_end = newSize;
1198 	return B_OK;
1199 }
1200 
1201 /*!	This function updates the virtual_base field of the cache.
1202 	If needed, it will free up all pages that don't belong to the cache anymore.
1203 	The cache lock must be held when you call it.
1204 	Since removed pages don't belong to the cache any longer, they are not
1205 	written back before they will be removed.
1206 
1207 	Note, this function may temporarily release the cache lock in case it
1208 	has to wait for busy pages.
1209 */
1210 status_t
1211 VMCache::Rebase(off_t newBase, int priority)
1212 {
1213 	TRACE(("VMCache::Rebase(cache %p, newBase %Ld) old base %Ld\n",
1214 		this, newBase, this->virtual_base));
1215 	this->AssertLocked();
1216 
1217 	T(Rebase(this, newBase));
1218 
1219 	status_t status = Commit(virtual_end - newBase, priority);
1220 	if (status != B_OK)
1221 		return status;
1222 
1223 	uint32 basePage = (uint32)(newBase >> PAGE_SHIFT);
1224 
1225 	if (newBase > virtual_base) {
1226 		// we need to remove all pages in the cache outside of the new virtual
1227 		// size
1228 		VMCachePagesTree::Iterator it = pages.GetIterator();
1229 		for (vm_page* page = it.Next();
1230 				page != NULL && page->cache_offset < basePage;
1231 				page = it.Next()) {
1232 			if (page->busy) {
1233 				if (page->busy_writing) {
1234 					// We cannot wait for the page to become available
1235 					// as we might cause a deadlock this way
1236 					page->busy_writing = false;
1237 						// this will notify the writer to free the page
1238 				} else {
1239 					// wait for page to become unbusy
1240 					WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, true);
1241 
1242 					// restart from the start of the list
1243 					it = pages.GetIterator();
1244 				}
1245 				continue;
1246 			}
1247 
1248 			// remove the page and put it into the free queue
1249 			DEBUG_PAGE_ACCESS_START(page);
1250 			vm_remove_all_page_mappings(page);
1251 			ASSERT(page->WiredCount() == 0);
1252 				// TODO: Find a real solution! If the page is wired
1253 				// temporarily (e.g. by lock_memory()), we actually must not
1254 				// unmap it!
1255 			RemovePage(page);
1256 			vm_page_free(this, page);
1257 				// Note: When iterating through a IteratableSplayTree
1258 				// removing the current node is safe.
1259 		}
1260 	}
1261 
1262 	virtual_base = newBase;
1263 	return B_OK;
1264 }
1265 
1266 
1267 /*!	You have to call this function with the VMCache lock held. */
1268 status_t
1269 VMCache::FlushAndRemoveAllPages()
1270 {
1271 	ASSERT_LOCKED_MUTEX(&fLock);
1272 
1273 	while (page_count > 0) {
1274 		// write back modified pages
1275 		status_t status = vm_page_write_modified_pages(this);
1276 		if (status != B_OK)
1277 			return status;
1278 
1279 		// remove pages
1280 		for (VMCachePagesTree::Iterator it = pages.GetIterator();
1281 				vm_page* page = it.Next();) {
1282 			if (page->busy) {
1283 				// wait for page to become unbusy
1284 				WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, true);
1285 
1286 				// restart from the start of the list
1287 				it = pages.GetIterator();
1288 				continue;
1289 			}
1290 
1291 			// skip modified pages -- they will be written back in the next
1292 			// iteration
1293 			if (page->State() == PAGE_STATE_MODIFIED)
1294 				continue;
1295 
1296 			// We can't remove mapped pages.
1297 			if (page->IsMapped())
1298 				return B_BUSY;
1299 
1300 			DEBUG_PAGE_ACCESS_START(page);
1301 			RemovePage(page);
1302 			vm_page_free(this, page);
1303 				// Note: When iterating through a IteratableSplayTree
1304 				// removing the current node is safe.
1305 		}
1306 	}
1307 
1308 	return B_OK;
1309 }
1310 
1311 
1312 status_t
1313 VMCache::Commit(off_t size, int priority)
1314 {
1315 	committed_size = size;
1316 	return B_OK;
1317 }
1318 
1319 
1320 /*!	Returns whether the cache's underlying backing store could deliver the
1321 	page at the given offset.
1322 
1323 	Basically it returns whether a Read() at \a offset would at least read a
1324 	partial page (assuming that no unexpected errors occur or the situation
1325 	changes in the meantime).
1326 */
1327 bool
1328 VMCache::HasPage(off_t offset)
1329 {
1330 	// In accordance with Fault() the default implementation doesn't have a
1331 	// backing store and doesn't allow faults.
1332 	return false;
1333 }
1334 
1335 
1336 status_t
1337 VMCache::Read(off_t offset, const generic_io_vec *vecs, size_t count,
1338 	uint32 flags, generic_size_t *_numBytes)
1339 {
1340 	return B_ERROR;
1341 }
1342 
1343 
1344 status_t
1345 VMCache::Write(off_t offset, const generic_io_vec *vecs, size_t count,
1346 	uint32 flags, generic_size_t *_numBytes)
1347 {
1348 	return B_ERROR;
1349 }
1350 
1351 
1352 status_t
1353 VMCache::WriteAsync(off_t offset, const generic_io_vec* vecs, size_t count,
1354 	generic_size_t numBytes, uint32 flags, AsyncIOCallback* callback)
1355 {
1356 	// Not supported, fall back to the synchronous hook.
1357 	generic_size_t transferred = numBytes;
1358 	status_t error = Write(offset, vecs, count, flags, &transferred);
1359 
1360 	if (callback != NULL)
1361 		callback->IOFinished(error, transferred != numBytes, transferred);
1362 
1363 	return error;
1364 }
1365 
1366 
1367 /*!	\brief Returns whether the cache can write the page at the given offset.
1368 
1369 	The cache must be locked when this function is invoked.
1370 
1371 	@param offset The page offset.
1372 	@return \c true, if the page can be written, \c false otherwise.
1373 */
1374 bool
1375 VMCache::CanWritePage(off_t offset)
1376 {
1377 	return false;
1378 }
1379 
1380 
1381 status_t
1382 VMCache::Fault(struct VMAddressSpace *aspace, off_t offset)
1383 {
1384 	return B_BAD_ADDRESS;
1385 }
1386 
1387 
1388 void
1389 VMCache::Merge(VMCache* source)
1390 {
1391 	for (VMCachePagesTree::Iterator it = source->pages.GetIterator();
1392 			vm_page* page = it.Next();) {
1393 		// Note: Removing the current node while iterating through a
1394 		// IteratableSplayTree is safe.
1395 		vm_page* consumerPage = LookupPage(
1396 			(off_t)page->cache_offset << PAGE_SHIFT);
1397 		if (consumerPage == NULL) {
1398 			// the page is not yet in the consumer cache - move it upwards
1399 			MovePage(page);
1400 		}
1401 	}
1402 }
1403 
1404 
1405 status_t
1406 VMCache::AcquireUnreferencedStoreRef()
1407 {
1408 	return B_OK;
1409 }
1410 
1411 
1412 void
1413 VMCache::AcquireStoreRef()
1414 {
1415 }
1416 
1417 
1418 void
1419 VMCache::ReleaseStoreRef()
1420 {
1421 }
1422 
1423 
1424 /*!	Kernel debugger version of HasPage().
1425 	Does not do any locking.
1426 */
1427 bool
1428 VMCache::DebugHasPage(off_t offset)
1429 {
1430 	// default that works for all subclasses that don't lock anyway
1431 	return HasPage(offset);
1432 }
1433 
1434 
1435 /*!	Kernel debugger version of LookupPage().
1436 	Does not do any locking.
1437 */
1438 vm_page*
1439 VMCache::DebugLookupPage(off_t offset)
1440 {
1441 	return pages.Lookup((page_num_t)(offset >> PAGE_SHIFT));
1442 }
1443 
1444 
1445 void
1446 VMCache::Dump(bool showPages) const
1447 {
1448 	kprintf("CACHE %p:\n", this);
1449 	kprintf("  ref_count:    %" B_PRId32 "\n", RefCount());
1450 	kprintf("  source:       %p\n", source);
1451 	kprintf("  type:         %s\n", vm_cache_type_to_string(type));
1452 	kprintf("  virtual_base: 0x%" B_PRIx64 "\n", virtual_base);
1453 	kprintf("  virtual_end:  0x%" B_PRIx64 "\n", virtual_end);
1454 	kprintf("  temporary:    %" B_PRIu32 "\n", temporary);
1455 	kprintf("  lock:         %p\n", &fLock);
1456 #if KDEBUG
1457 	kprintf("  lock.holder:  %" B_PRId32 "\n", fLock.holder);
1458 #endif
1459 	kprintf("  areas:\n");
1460 
1461 	for (VMArea* area = areas; area != NULL; area = area->cache_next) {
1462 		kprintf("    area 0x%" B_PRIx32 ", %s\n", area->id, area->name);
1463 		kprintf("\tbase_addr:  0x%lx, size: 0x%lx\n", area->Base(),
1464 			area->Size());
1465 		kprintf("\tprotection: 0x%" B_PRIx32 "\n", area->protection);
1466 		kprintf("\towner:      0x%" B_PRIx32 "\n", area->address_space->ID());
1467 	}
1468 
1469 	kprintf("  consumers:\n");
1470 	for (ConsumerList::ConstIterator it = consumers.GetIterator();
1471 		 	VMCache* consumer = it.Next();) {
1472 		kprintf("\t%p\n", consumer);
1473 	}
1474 
1475 	kprintf("  pages:\n");
1476 	if (showPages) {
1477 		for (VMCachePagesTree::ConstIterator it = pages.GetIterator();
1478 				vm_page* page = it.Next();) {
1479 			if (!vm_page_is_dummy(page)) {
1480 				kprintf("\t%p ppn %#" B_PRIxPHYSADDR " offset %#" B_PRIxPHYSADDR
1481 					" state %u (%s) wired_count %u\n", page,
1482 					page->physical_page_number, page->cache_offset,
1483 					page->State(), page_state_to_string(page->State()),
1484 					page->WiredCount());
1485 			} else {
1486 				kprintf("\t%p DUMMY PAGE state %u (%s)\n",
1487 					page, page->State(), page_state_to_string(page->State()));
1488 			}
1489 		}
1490 	} else
1491 		kprintf("\t%" B_PRIu32 " in cache\n", page_count);
1492 }
1493 
1494 
1495 /*!	Wakes up threads waiting for page events.
1496 	\param page The page for which events occurred.
1497 	\param events The mask of events that occurred.
1498 */
1499 void
1500 VMCache::_NotifyPageEvents(vm_page* page, uint32 events)
1501 {
1502 	PageEventWaiter** it = &fPageEventWaiters;
1503 	while (PageEventWaiter* waiter = *it) {
1504 		if (waiter->page == page && (waiter->events & events) != 0) {
1505 			// remove from list and unblock
1506 			*it = waiter->next;
1507 			thread_unblock(waiter->thread, B_OK);
1508 		} else
1509 			it = &waiter->next;
1510 	}
1511 }
1512 
1513 
1514 /*!	Merges the given cache with its only consumer.
1515 	The caller must hold both the cache's and the consumer's lock. The method
1516 	does release neither lock.
1517 */
1518 void
1519 VMCache::_MergeWithOnlyConsumer()
1520 {
1521 	VMCache* consumer = consumers.RemoveHead();
1522 
1523 	TRACE(("merge vm cache %p (ref == %" B_PRId32 ") with vm cache %p\n",
1524 		this, this->fRefCount, consumer));
1525 
1526 	T(Merge(this, consumer));
1527 
1528 	// merge the cache
1529 	consumer->Merge(this);
1530 
1531 	// The remaining consumer has got a new source.
1532 	if (source != NULL) {
1533 		VMCache* newSource = source;
1534 
1535 		newSource->Lock();
1536 
1537 		newSource->consumers.Remove(this);
1538 		newSource->consumers.Add(consumer);
1539 		consumer->source = newSource;
1540 		source = NULL;
1541 
1542 		newSource->Unlock();
1543 	} else
1544 		consumer->source = NULL;
1545 
1546 	// Release the reference the cache's consumer owned. The consumer takes
1547 	// over the cache's ref to its source (if any) instead.
1548 	ReleaseRefLocked();
1549 }
1550 
1551 
1552 /*!	Removes the \a consumer from this cache.
1553 	It will also release the reference to the cache owned by the consumer.
1554 	Assumes you have the consumer's cache lock held. This cache must not be
1555 	locked.
1556 */
1557 void
1558 VMCache::_RemoveConsumer(VMCache* consumer)
1559 {
1560 	TRACE(("remove consumer vm cache %p from cache %p\n", consumer, this));
1561 	consumer->AssertLocked();
1562 
1563 	T(RemoveConsumer(this, consumer));
1564 
1565 	// Remove the store ref before locking the cache. Otherwise we'd call into
1566 	// the VFS while holding the cache lock, which would reverse the usual
1567 	// locking order.
1568 	ReleaseStoreRef();
1569 
1570 	// remove the consumer from the cache, but keep its reference until later
1571 	Lock();
1572 	consumers.Remove(consumer);
1573 	consumer->source = NULL;
1574 
1575 	ReleaseRefAndUnlock();
1576 }
1577 
1578 
1579 // #pragma mark - VMCacheFactory
1580 	// TODO: Move to own source file!
1581 
1582 
1583 /*static*/ status_t
1584 VMCacheFactory::CreateAnonymousCache(VMCache*& _cache, bool canOvercommit,
1585 	int32 numPrecommittedPages, int32 numGuardPages, bool swappable,
1586 	int priority)
1587 {
1588 	uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY
1589 		| HEAP_DONT_LOCK_KERNEL_SPACE;
1590 	if (priority >= VM_PRIORITY_VIP)
1591 		allocationFlags |= HEAP_PRIORITY_VIP;
1592 
1593 #if ENABLE_SWAP_SUPPORT
1594 	if (swappable) {
1595 		VMAnonymousCache* cache
1596 			= new(gAnonymousCacheObjectCache, allocationFlags) VMAnonymousCache;
1597 		if (cache == NULL)
1598 			return B_NO_MEMORY;
1599 
1600 		status_t error = cache->Init(canOvercommit, numPrecommittedPages,
1601 			numGuardPages, allocationFlags);
1602 		if (error != B_OK) {
1603 			cache->Delete();
1604 			return error;
1605 		}
1606 
1607 		T(Create(cache));
1608 
1609 		_cache = cache;
1610 		return B_OK;
1611 	}
1612 #endif
1613 
1614 	VMAnonymousNoSwapCache* cache
1615 		= new(gAnonymousNoSwapCacheObjectCache, allocationFlags)
1616 			VMAnonymousNoSwapCache;
1617 	if (cache == NULL)
1618 		return B_NO_MEMORY;
1619 
1620 	status_t error = cache->Init(canOvercommit, numPrecommittedPages,
1621 		numGuardPages, allocationFlags);
1622 	if (error != B_OK) {
1623 		cache->Delete();
1624 		return error;
1625 	}
1626 
1627 	T(Create(cache));
1628 
1629 	_cache = cache;
1630 	return B_OK;
1631 }
1632 
1633 
1634 /*static*/ status_t
1635 VMCacheFactory::CreateVnodeCache(VMCache*& _cache, struct vnode* vnode)
1636 {
1637 	const uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY
1638 		| HEAP_DONT_LOCK_KERNEL_SPACE;
1639 		// Note: Vnode cache creation is never VIP.
1640 
1641 	VMVnodeCache* cache
1642 		= new(gVnodeCacheObjectCache, allocationFlags) VMVnodeCache;
1643 	if (cache == NULL)
1644 		return B_NO_MEMORY;
1645 
1646 	status_t error = cache->Init(vnode, allocationFlags);
1647 	if (error != B_OK) {
1648 		cache->Delete();
1649 		return error;
1650 	}
1651 
1652 	T(Create(cache));
1653 
1654 	_cache = cache;
1655 	return B_OK;
1656 }
1657 
1658 
1659 /*static*/ status_t
1660 VMCacheFactory::CreateDeviceCache(VMCache*& _cache, addr_t baseAddress)
1661 {
1662 	const uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY
1663 		| HEAP_DONT_LOCK_KERNEL_SPACE;
1664 		// Note: Device cache creation is never VIP.
1665 
1666 	VMDeviceCache* cache
1667 		= new(gDeviceCacheObjectCache, allocationFlags) VMDeviceCache;
1668 	if (cache == NULL)
1669 		return B_NO_MEMORY;
1670 
1671 	status_t error = cache->Init(baseAddress, allocationFlags);
1672 	if (error != B_OK) {
1673 		cache->Delete();
1674 		return error;
1675 	}
1676 
1677 	T(Create(cache));
1678 
1679 	_cache = cache;
1680 	return B_OK;
1681 }
1682 
1683 
1684 /*static*/ status_t
1685 VMCacheFactory::CreateNullCache(int priority, VMCache*& _cache)
1686 {
1687 	uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY
1688 		| HEAP_DONT_LOCK_KERNEL_SPACE;
1689 	if (priority >= VM_PRIORITY_VIP)
1690 		allocationFlags |= HEAP_PRIORITY_VIP;
1691 
1692 	VMNullCache* cache
1693 		= new(gNullCacheObjectCache, allocationFlags) VMNullCache;
1694 	if (cache == NULL)
1695 		return B_NO_MEMORY;
1696 
1697 	status_t error = cache->Init(allocationFlags);
1698 	if (error != B_OK) {
1699 		cache->Delete();
1700 		return error;
1701 	}
1702 
1703 	T(Create(cache));
1704 
1705 	_cache = cache;
1706 	return B_OK;
1707 }
1708