xref: /haiku/src/system/kernel/vm/VMCache.cpp (revision 02354704729d38c3b078c696adc1bbbd33cbcf72)
1 /*
2  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 #include <vm/VMCache.h>
12 
13 #include <stddef.h>
14 #include <stdlib.h>
15 
16 #include <algorithm>
17 
18 #include <arch/cpu.h>
19 #include <condition_variable.h>
20 #include <heap.h>
21 #include <int.h>
22 #include <kernel.h>
23 #include <slab/Slab.h>
24 #include <smp.h>
25 #include <thread.h>
26 #include <tracing.h>
27 #include <util/AutoLock.h>
28 #include <vfs.h>
29 #include <vm/vm.h>
30 #include <vm/vm_page.h>
31 #include <vm/vm_priv.h>
32 #include <vm/vm_types.h>
33 #include <vm/VMAddressSpace.h>
34 #include <vm/VMArea.h>
35 
36 // needed for the factory only
37 #include "VMAnonymousCache.h"
38 #include "VMAnonymousNoSwapCache.h"
39 #include "VMDeviceCache.h"
40 #include "VMNullCache.h"
41 #include "../cache/vnode_store.h"
42 
43 
44 //#define TRACE_VM_CACHE
45 #ifdef TRACE_VM_CACHE
46 #	define TRACE(x) dprintf x
47 #else
48 #	define TRACE(x) ;
49 #endif
50 
51 
52 #if DEBUG_CACHE_LIST
53 VMCache* gDebugCacheList;
54 #endif
55 static mutex sCacheListLock = MUTEX_INITIALIZER("global VMCache list");
56 	// The lock is also needed when the debug feature is disabled.
57 
58 ObjectCache* gCacheRefObjectCache;
59 #if ENABLE_SWAP_SUPPORT
60 ObjectCache* gAnonymousCacheObjectCache;
61 #endif
62 ObjectCache* gAnonymousNoSwapCacheObjectCache;
63 ObjectCache* gVnodeCacheObjectCache;
64 ObjectCache* gDeviceCacheObjectCache;
65 ObjectCache* gNullCacheObjectCache;
66 
67 
68 struct VMCache::PageEventWaiter {
69 	Thread*				thread;
70 	PageEventWaiter*	next;
71 	vm_page*			page;
72 	uint32				events;
73 };
74 
75 
76 #if VM_CACHE_TRACING
77 
78 namespace VMCacheTracing {
79 
80 class VMCacheTraceEntry : public AbstractTraceEntry {
81 	public:
82 		VMCacheTraceEntry(VMCache* cache)
83 			:
84 			fCache(cache)
85 		{
86 #if VM_CACHE_TRACING_STACK_TRACE
87 			fStackTrace = capture_tracing_stack_trace(
88 				VM_CACHE_TRACING_STACK_TRACE, 0, true);
89 				// Don't capture userland stack trace to avoid potential
90 				// deadlocks.
91 #endif
92 		}
93 
94 #if VM_CACHE_TRACING_STACK_TRACE
95 		virtual void DumpStackTrace(TraceOutput& out)
96 		{
97 			out.PrintStackTrace(fStackTrace);
98 		}
99 #endif
100 
101 		VMCache* Cache() const
102 		{
103 			return fCache;
104 		}
105 
106 	protected:
107 		VMCache*	fCache;
108 #if VM_CACHE_TRACING_STACK_TRACE
109 		tracing_stack_trace* fStackTrace;
110 #endif
111 };
112 
113 
114 class Create : public VMCacheTraceEntry {
115 	public:
116 		Create(VMCache* cache)
117 			:
118 			VMCacheTraceEntry(cache)
119 		{
120 			Initialized();
121 		}
122 
123 		virtual void AddDump(TraceOutput& out)
124 		{
125 			out.Print("vm cache create: -> cache: %p", fCache);
126 		}
127 };
128 
129 
130 class Delete : public VMCacheTraceEntry {
131 	public:
132 		Delete(VMCache* cache)
133 			:
134 			VMCacheTraceEntry(cache)
135 		{
136 			Initialized();
137 		}
138 
139 		virtual void AddDump(TraceOutput& out)
140 		{
141 			out.Print("vm cache delete: cache: %p", fCache);
142 		}
143 };
144 
145 
146 class SetMinimalCommitment : public VMCacheTraceEntry {
147 	public:
148 		SetMinimalCommitment(VMCache* cache, off_t commitment)
149 			:
150 			VMCacheTraceEntry(cache),
151 			fOldCommitment(cache->committed_size),
152 			fCommitment(commitment)
153 		{
154 			Initialized();
155 		}
156 
157 		virtual void AddDump(TraceOutput& out)
158 		{
159 			out.Print("vm cache set min commitment: cache: %p, "
160 				"commitment: %" B_PRIdOFF " -> %" B_PRIdOFF, fCache,
161 				fOldCommitment, fCommitment);
162 		}
163 
164 	private:
165 		off_t	fOldCommitment;
166 		off_t	fCommitment;
167 };
168 
169 
170 class Resize : public VMCacheTraceEntry {
171 	public:
172 		Resize(VMCache* cache, off_t size)
173 			:
174 			VMCacheTraceEntry(cache),
175 			fOldSize(cache->virtual_end),
176 			fSize(size)
177 		{
178 			Initialized();
179 		}
180 
181 		virtual void AddDump(TraceOutput& out)
182 		{
183 			out.Print("vm cache resize: cache: %p, size: %" B_PRIdOFF " -> %"
184 				B_PRIdOFF, fCache, fOldSize, fSize);
185 		}
186 
187 	private:
188 		off_t	fOldSize;
189 		off_t	fSize;
190 };
191 
192 
193 class Rebase : public VMCacheTraceEntry {
194 	public:
195 		Rebase(VMCache* cache, off_t base)
196 			:
197 			VMCacheTraceEntry(cache),
198 			fOldBase(cache->virtual_base),
199 			fBase(base)
200 		{
201 			Initialized();
202 		}
203 
204 		virtual void AddDump(TraceOutput& out)
205 		{
206 			out.Print("vm cache rebase: cache: %p, base: %lld -> %lld", fCache,
207 				fOldBase, fBase);
208 		}
209 
210 	private:
211 		off_t	fOldBase;
212 		off_t	fBase;
213 };
214 
215 
216 class AddConsumer : public VMCacheTraceEntry {
217 	public:
218 		AddConsumer(VMCache* cache, VMCache* consumer)
219 			:
220 			VMCacheTraceEntry(cache),
221 			fConsumer(consumer)
222 		{
223 			Initialized();
224 		}
225 
226 		virtual void AddDump(TraceOutput& out)
227 		{
228 			out.Print("vm cache add consumer: cache: %p, consumer: %p", fCache,
229 				fConsumer);
230 		}
231 
232 		VMCache* Consumer() const
233 		{
234 			return fConsumer;
235 		}
236 
237 	private:
238 		VMCache*	fConsumer;
239 };
240 
241 
242 class RemoveConsumer : public VMCacheTraceEntry {
243 	public:
244 		RemoveConsumer(VMCache* cache, VMCache* consumer)
245 			:
246 			VMCacheTraceEntry(cache),
247 			fConsumer(consumer)
248 		{
249 			Initialized();
250 		}
251 
252 		virtual void AddDump(TraceOutput& out)
253 		{
254 			out.Print("vm cache remove consumer: cache: %p, consumer: %p",
255 				fCache, fConsumer);
256 		}
257 
258 	private:
259 		VMCache*	fConsumer;
260 };
261 
262 
263 class Merge : public VMCacheTraceEntry {
264 	public:
265 		Merge(VMCache* cache, VMCache* consumer)
266 			:
267 			VMCacheTraceEntry(cache),
268 			fConsumer(consumer)
269 		{
270 			Initialized();
271 		}
272 
273 		virtual void AddDump(TraceOutput& out)
274 		{
275 			out.Print("vm cache merge with consumer: cache: %p, consumer: %p",
276 				fCache, fConsumer);
277 		}
278 
279 	private:
280 		VMCache*	fConsumer;
281 };
282 
283 
284 class InsertArea : public VMCacheTraceEntry {
285 	public:
286 		InsertArea(VMCache* cache, VMArea* area)
287 			:
288 			VMCacheTraceEntry(cache),
289 			fArea(area)
290 		{
291 			Initialized();
292 		}
293 
294 		virtual void AddDump(TraceOutput& out)
295 		{
296 			out.Print("vm cache insert area: cache: %p, area: %p", fCache,
297 				fArea);
298 		}
299 
300 		VMArea*	Area() const
301 		{
302 			return fArea;
303 		}
304 
305 	private:
306 		VMArea*	fArea;
307 };
308 
309 
310 class RemoveArea : public VMCacheTraceEntry {
311 	public:
312 		RemoveArea(VMCache* cache, VMArea* area)
313 			:
314 			VMCacheTraceEntry(cache),
315 			fArea(area)
316 		{
317 			Initialized();
318 		}
319 
320 		virtual void AddDump(TraceOutput& out)
321 		{
322 			out.Print("vm cache remove area: cache: %p, area: %p", fCache,
323 				fArea);
324 		}
325 
326 	private:
327 		VMArea*	fArea;
328 };
329 
330 }	// namespace VMCacheTracing
331 
332 #	define T(x) new(std::nothrow) VMCacheTracing::x;
333 
334 #	if VM_CACHE_TRACING >= 2
335 
336 namespace VMCacheTracing {
337 
338 class InsertPage : public VMCacheTraceEntry {
339 	public:
340 		InsertPage(VMCache* cache, vm_page* page, off_t offset)
341 			:
342 			VMCacheTraceEntry(cache),
343 			fPage(page),
344 			fOffset(offset)
345 		{
346 			Initialized();
347 		}
348 
349 		virtual void AddDump(TraceOutput& out)
350 		{
351 			out.Print("vm cache insert page: cache: %p, page: %p, offset: %"
352 				B_PRIdOFF, fCache, fPage, fOffset);
353 		}
354 
355 	private:
356 		vm_page*	fPage;
357 		off_t		fOffset;
358 };
359 
360 
361 class RemovePage : public VMCacheTraceEntry {
362 	public:
363 		RemovePage(VMCache* cache, vm_page* page)
364 			:
365 			VMCacheTraceEntry(cache),
366 			fPage(page)
367 		{
368 			Initialized();
369 		}
370 
371 		virtual void AddDump(TraceOutput& out)
372 		{
373 			out.Print("vm cache remove page: cache: %p, page: %p", fCache,
374 				fPage);
375 		}
376 
377 	private:
378 		vm_page*	fPage;
379 };
380 
381 }	// namespace VMCacheTracing
382 
383 #		define T2(x) new(std::nothrow) VMCacheTracing::x;
384 #	else
385 #		define T2(x) ;
386 #	endif
387 #else
388 #	define T(x) ;
389 #	define T2(x) ;
390 #endif
391 
392 
393 //	#pragma mark - debugger commands
394 
395 
396 #if VM_CACHE_TRACING
397 
398 
399 static void*
400 cache_stack_find_area_cache(const TraceEntryIterator& baseIterator, void* area)
401 {
402 	using namespace VMCacheTracing;
403 
404 	// find the previous "insert area" entry for the given area
405 	TraceEntryIterator iterator = baseIterator;
406 	TraceEntry* entry = iterator.Current();
407 	while (entry != NULL) {
408 		if (InsertArea* insertAreaEntry = dynamic_cast<InsertArea*>(entry)) {
409 			if (insertAreaEntry->Area() == area)
410 				return insertAreaEntry->Cache();
411 		}
412 
413 		entry = iterator.Previous();
414 	}
415 
416 	return NULL;
417 }
418 
419 
420 static void*
421 cache_stack_find_consumer(const TraceEntryIterator& baseIterator, void* cache)
422 {
423 	using namespace VMCacheTracing;
424 
425 	// find the previous "add consumer" or "create" entry for the given cache
426 	TraceEntryIterator iterator = baseIterator;
427 	TraceEntry* entry = iterator.Current();
428 	while (entry != NULL) {
429 		if (Create* createEntry = dynamic_cast<Create*>(entry)) {
430 			if (createEntry->Cache() == cache)
431 				return NULL;
432 		} else if (AddConsumer* addEntry = dynamic_cast<AddConsumer*>(entry)) {
433 			if (addEntry->Consumer() == cache)
434 				return addEntry->Cache();
435 		}
436 
437 		entry = iterator.Previous();
438 	}
439 
440 	return NULL;
441 }
442 
443 
444 static int
445 command_cache_stack(int argc, char** argv)
446 {
447 	if (argc < 3 || argc > 4) {
448 		print_debugger_command_usage(argv[0]);
449 		return 0;
450 	}
451 
452 	bool isArea = false;
453 
454 	int argi = 1;
455 	if (argc == 4) {
456 		if (strcmp(argv[argi], "area") != 0) {
457 			print_debugger_command_usage(argv[0]);
458 			return 0;
459 		}
460 
461 		argi++;
462 		isArea = true;
463 	}
464 
465 	uint64 addressValue;
466 	uint64 debugEntryIndex;
467 	if (!evaluate_debug_expression(argv[argi++], &addressValue, false)
468 		|| !evaluate_debug_expression(argv[argi++], &debugEntryIndex, false)) {
469 		return 0;
470 	}
471 
472 	TraceEntryIterator baseIterator;
473 	if (baseIterator.MoveTo((int32)debugEntryIndex) == NULL) {
474 		kprintf("Invalid tracing entry index %" B_PRIu64 "\n", debugEntryIndex);
475 		return 0;
476 	}
477 
478 	void* address = (void*)(addr_t)addressValue;
479 
480 	kprintf("cache stack for %s %p at %" B_PRIu64 ":\n",
481 		isArea ? "area" : "cache", address, debugEntryIndex);
482 	if (isArea) {
483 		address = cache_stack_find_area_cache(baseIterator, address);
484 		if (address == NULL) {
485 			kprintf("  cache not found\n");
486 			return 0;
487 		}
488 	}
489 
490 	while (address != NULL) {
491 		kprintf("  %p\n", address);
492 		address = cache_stack_find_consumer(baseIterator, address);
493 	}
494 
495 	return 0;
496 }
497 
498 
499 #endif	// VM_CACHE_TRACING
500 
501 
502 //	#pragma mark -
503 
504 
505 status_t
506 vm_cache_init(kernel_args* args)
507 {
508 	// Create object caches for the structures we allocate here.
509 	gCacheRefObjectCache = create_object_cache("cache refs", sizeof(VMCacheRef),
510 		0, NULL, NULL, NULL);
511 #if ENABLE_SWAP_SUPPORT
512 	gAnonymousCacheObjectCache = create_object_cache("anon caches",
513 		sizeof(VMAnonymousCache), 0, NULL, NULL, NULL);
514 #endif
515 	gAnonymousNoSwapCacheObjectCache = create_object_cache(
516 		"anon no-swap caches", sizeof(VMAnonymousNoSwapCache), 0, NULL, NULL,
517 		NULL);
518 	gVnodeCacheObjectCache = create_object_cache("vnode caches",
519 		sizeof(VMVnodeCache), 0, NULL, NULL, NULL);
520 	gDeviceCacheObjectCache = create_object_cache("device caches",
521 		sizeof(VMDeviceCache), 0, NULL, NULL, NULL);
522 	gNullCacheObjectCache = create_object_cache("null caches",
523 		sizeof(VMNullCache), 0, NULL, NULL, NULL);
524 
525 	if (gCacheRefObjectCache == NULL
526 #if ENABLE_SWAP_SUPPORT
527 		|| gAnonymousCacheObjectCache == NULL
528 #endif
529 		|| gAnonymousNoSwapCacheObjectCache == NULL
530 		|| gVnodeCacheObjectCache == NULL
531 		|| gDeviceCacheObjectCache == NULL
532 		|| gNullCacheObjectCache == NULL) {
533 		panic("vm_cache_init(): Failed to create object caches!");
534 		return B_NO_MEMORY;
535 	}
536 
537 	return B_OK;
538 }
539 
540 
541 void
542 vm_cache_init_post_heap()
543 {
544 #if VM_CACHE_TRACING
545 	add_debugger_command_etc("cache_stack", &command_cache_stack,
546 		"List the ancestors (sources) of a VMCache at the time given by "
547 			"tracing entry index",
548 		"[ \"area\" ] <address> <tracing entry index>\n"
549 		"All ancestors (sources) of a given VMCache at the time given by the\n"
550 		"tracing entry index are listed. If \"area\" is given the supplied\n"
551 		"address is an area instead of a cache address. The listing will\n"
552 		"start with the area's cache at that point.\n",
553 		0);
554 #endif	// VM_CACHE_TRACING
555 }
556 
557 
558 VMCache*
559 vm_cache_acquire_locked_page_cache(vm_page* page, bool dontWait)
560 {
561 	mutex_lock(&sCacheListLock);
562 
563 	while (dontWait) {
564 		VMCacheRef* cacheRef = page->CacheRef();
565 		if (cacheRef == NULL) {
566 			mutex_unlock(&sCacheListLock);
567 			return NULL;
568 		}
569 
570 		VMCache* cache = cacheRef->cache;
571 		if (!cache->TryLock()) {
572 			mutex_unlock(&sCacheListLock);
573 			return NULL;
574 		}
575 
576 		if (cacheRef == page->CacheRef()) {
577 			mutex_unlock(&sCacheListLock);
578 			cache->AcquireRefLocked();
579 			return cache;
580 		}
581 
582 		// the cache changed in the meantime
583 		cache->Unlock();
584 	}
585 
586 	while (true) {
587 		VMCacheRef* cacheRef = page->CacheRef();
588 		if (cacheRef == NULL) {
589 			mutex_unlock(&sCacheListLock);
590 			return NULL;
591 		}
592 
593 		VMCache* cache = cacheRef->cache;
594 		if (!cache->SwitchLock(&sCacheListLock)) {
595 			// cache has been deleted
596 			mutex_lock(&sCacheListLock);
597 			continue;
598 		}
599 
600 		mutex_lock(&sCacheListLock);
601 		if (cache == page->Cache()) {
602 			mutex_unlock(&sCacheListLock);
603 			cache->AcquireRefLocked();
604 			return cache;
605 		}
606 
607 		// the cache changed in the meantime
608 		cache->Unlock();
609 	}
610 }
611 
612 
613 // #pragma mark - VMCache
614 
615 
616 VMCacheRef::VMCacheRef(VMCache* cache)
617 	:
618 	cache(cache),
619 	ref_count(1)
620 {
621 }
622 
623 
624 // #pragma mark - VMCache
625 
626 
627 bool
628 VMCache::_IsMergeable() const
629 {
630 	return areas == NULL && temporary && !consumers.IsEmpty()
631 		&& consumers.Head() == consumers.Tail();
632 }
633 
634 
635 VMCache::VMCache()
636 	:
637 	fCacheRef(NULL)
638 {
639 }
640 
641 
642 VMCache::~VMCache()
643 {
644 	object_cache_delete(gCacheRefObjectCache, fCacheRef);
645 }
646 
647 
648 status_t
649 VMCache::Init(uint32 cacheType, uint32 allocationFlags)
650 {
651 	mutex_init(&fLock, "VMCache");
652 
653 	areas = NULL;
654 	fRefCount = 1;
655 	source = NULL;
656 	virtual_base = 0;
657 	virtual_end = 0;
658 	committed_size = 0;
659 	temporary = 0;
660 	page_count = 0;
661 	fWiredPagesCount = 0;
662 	type = cacheType;
663 	fPageEventWaiters = NULL;
664 
665 #if DEBUG_CACHE_LIST
666 	debug_previous = NULL;
667 	debug_next = NULL;
668 		// initialize in case the following fails
669 #endif
670 
671 	fCacheRef = new(gCacheRefObjectCache, allocationFlags) VMCacheRef(this);
672 	if (fCacheRef == NULL)
673 		return B_NO_MEMORY;
674 
675 #if DEBUG_CACHE_LIST
676 	mutex_lock(&sCacheListLock);
677 
678 	if (gDebugCacheList != NULL)
679 		gDebugCacheList->debug_previous = this;
680 	debug_next = gDebugCacheList;
681 	gDebugCacheList = this;
682 
683 	mutex_unlock(&sCacheListLock);
684 #endif
685 
686 	return B_OK;
687 }
688 
689 
690 void
691 VMCache::Delete()
692 {
693 	if (areas != NULL)
694 		panic("cache %p to be deleted still has areas", this);
695 	if (!consumers.IsEmpty())
696 		panic("cache %p to be deleted still has consumers", this);
697 
698 	T(Delete(this));
699 
700 	// free all of the pages in the cache
701 	while (vm_page* page = pages.Root()) {
702 		if (!page->mappings.IsEmpty() || page->WiredCount() != 0) {
703 			panic("remove page %p from cache %p: page still has mappings!\n"
704 				"@!page %p; cache %p", page, this, page, this);
705 		}
706 
707 		// remove it
708 		pages.Remove(page);
709 		page->SetCacheRef(NULL);
710 
711 		TRACE(("vm_cache_release_ref: freeing page 0x%lx\n",
712 			page->physical_page_number));
713 		DEBUG_PAGE_ACCESS_START(page);
714 		vm_page_free(this, page);
715 	}
716 
717 	// remove the ref to the source
718 	if (source)
719 		source->_RemoveConsumer(this);
720 
721 	// We lock and unlock the sCacheListLock, even if the DEBUG_CACHE_LIST is
722 	// not enabled. This synchronization point is needed for
723 	// vm_cache_acquire_locked_page_cache().
724 	mutex_lock(&sCacheListLock);
725 
726 #if DEBUG_CACHE_LIST
727 	if (debug_previous)
728 		debug_previous->debug_next = debug_next;
729 	if (debug_next)
730 		debug_next->debug_previous = debug_previous;
731 	if (this == gDebugCacheList)
732 		gDebugCacheList = debug_next;
733 #endif
734 
735 	mutex_destroy(&fLock);
736 
737 	mutex_unlock(&sCacheListLock);
738 
739 	DeleteObject();
740 }
741 
742 
743 void
744 VMCache::Unlock(bool consumerLocked)
745 {
746 	while (fRefCount == 1 && _IsMergeable()) {
747 		VMCache* consumer = consumers.Head();
748 		if (consumerLocked) {
749 			_MergeWithOnlyConsumer();
750 		} else if (consumer->TryLock()) {
751 			_MergeWithOnlyConsumer();
752 			consumer->Unlock();
753 		} else {
754 			// Someone else has locked the consumer ATM. Unlock this cache and
755 			// wait for the consumer lock. Increment the cache's ref count
756 			// temporarily, so that no one else will try what we are doing or
757 			// delete the cache.
758 			fRefCount++;
759 			bool consumerLockedTemp = consumer->SwitchLock(&fLock);
760 			Lock();
761 			fRefCount--;
762 
763 			if (consumerLockedTemp) {
764 				if (fRefCount == 1 && _IsMergeable()
765 						&& consumer == consumers.Head()) {
766 					// nothing has changed in the meantime -- merge
767 					_MergeWithOnlyConsumer();
768 				}
769 
770 				consumer->Unlock();
771 			}
772 		}
773 	}
774 
775 	if (fRefCount == 0) {
776 		// delete this cache
777 		Delete();
778 	} else
779 		mutex_unlock(&fLock);
780 }
781 
782 
783 vm_page*
784 VMCache::LookupPage(off_t offset)
785 {
786 	AssertLocked();
787 
788 	vm_page* page = pages.Lookup((page_num_t)(offset >> PAGE_SHIFT));
789 
790 #if KDEBUG
791 	if (page != NULL && page->Cache() != this)
792 		panic("page %p not in cache %p\n", page, this);
793 #endif
794 
795 	return page;
796 }
797 
798 
799 void
800 VMCache::InsertPage(vm_page* page, off_t offset)
801 {
802 	TRACE(("VMCache::InsertPage(): cache %p, page %p, offset %" B_PRIdOFF "\n",
803 		this, page, offset));
804 	AssertLocked();
805 
806 	if (page->CacheRef() != NULL) {
807 		panic("insert page %p into cache %p: page cache is set to %p\n",
808 			page, this, page->Cache());
809 	}
810 
811 	T2(InsertPage(this, page, offset));
812 
813 	page->cache_offset = (page_num_t)(offset >> PAGE_SHIFT);
814 	page_count++;
815 	page->SetCacheRef(fCacheRef);
816 
817 #if KDEBUG
818 	vm_page* otherPage = pages.Lookup(page->cache_offset);
819 	if (otherPage != NULL) {
820 		panic("VMCache::InsertPage(): there's already page %p with cache "
821 			"offset %" B_PRIuPHYSADDR " in cache %p; inserting page %p",
822 			otherPage, page->cache_offset, this, page);
823 	}
824 #endif	// KDEBUG
825 
826 	pages.Insert(page);
827 
828 	if (page->WiredCount() > 0)
829 		IncrementWiredPagesCount();
830 }
831 
832 
833 /*!	Removes the vm_page from this cache. Of course, the page must
834 	really be in this cache or evil things will happen.
835 	The cache lock must be held.
836 */
837 void
838 VMCache::RemovePage(vm_page* page)
839 {
840 	TRACE(("VMCache::RemovePage(): cache %p, page %p\n", this, page));
841 	AssertLocked();
842 
843 	if (page->Cache() != this) {
844 		panic("remove page %p from cache %p: page cache is set to %p\n", page,
845 			this, page->Cache());
846 	}
847 
848 	T2(RemovePage(this, page));
849 
850 	pages.Remove(page);
851 	page_count--;
852 	page->SetCacheRef(NULL);
853 
854 	if (page->WiredCount() > 0)
855 		DecrementWiredPagesCount();
856 }
857 
858 
859 /*!	Moves the given page from its current cache inserts it into this cache
860 	at the given offset.
861 	Both caches must be locked.
862 */
863 void
864 VMCache::MovePage(vm_page* page, off_t offset)
865 {
866 	VMCache* oldCache = page->Cache();
867 
868 	AssertLocked();
869 	oldCache->AssertLocked();
870 
871 	// remove from old cache
872 	oldCache->pages.Remove(page);
873 	oldCache->page_count--;
874 	T2(RemovePage(oldCache, page));
875 
876 	// change the offset
877 	page->cache_offset = offset >> PAGE_SHIFT;
878 
879 	// insert here
880 	pages.Insert(page);
881 	page_count++;
882 	page->SetCacheRef(fCacheRef);
883 
884 	if (page->WiredCount() > 0) {
885 		IncrementWiredPagesCount();
886 		oldCache->DecrementWiredPagesCount();
887 	}
888 
889 	T2(InsertPage(this, page, page->cache_offset << PAGE_SHIFT));
890 }
891 
892 /*!	Moves the given page from its current cache inserts it into this cache.
893 	Both caches must be locked.
894 */
895 void
896 VMCache::MovePage(vm_page* page)
897 {
898 	MovePage(page, page->cache_offset << PAGE_SHIFT);
899 }
900 
901 
902 /*!	Moves all pages from the given cache to this one.
903 	Both caches must be locked. This cache must be empty.
904 */
905 void
906 VMCache::MoveAllPages(VMCache* fromCache)
907 {
908 	AssertLocked();
909 	fromCache->AssertLocked();
910 	ASSERT(page_count == 0);
911 
912 	std::swap(fromCache->pages, pages);
913 	page_count = fromCache->page_count;
914 	fromCache->page_count = 0;
915 	fWiredPagesCount = fromCache->fWiredPagesCount;
916 	fromCache->fWiredPagesCount = 0;
917 
918 	// swap the VMCacheRefs
919 	mutex_lock(&sCacheListLock);
920 	std::swap(fCacheRef, fromCache->fCacheRef);
921 	fCacheRef->cache = this;
922 	fromCache->fCacheRef->cache = fromCache;
923 	mutex_unlock(&sCacheListLock);
924 
925 #if VM_CACHE_TRACING >= 2
926 	for (VMCachePagesTree::Iterator it = pages.GetIterator();
927 			vm_page* page = it.Next();) {
928 		T2(RemovePage(fromCache, page));
929 		T2(InsertPage(this, page, page->cache_offset << PAGE_SHIFT));
930 	}
931 #endif
932 }
933 
934 
935 /*!	Waits until one or more events happened for a given page which belongs to
936 	this cache.
937 	The cache must be locked. It will be unlocked by the method. \a relock
938 	specifies whether the method shall re-lock the cache before returning.
939 	\param page The page for which to wait.
940 	\param events The mask of events the caller is interested in.
941 	\param relock If \c true, the cache will be locked when returning,
942 		otherwise it won't be locked.
943 */
944 void
945 VMCache::WaitForPageEvents(vm_page* page, uint32 events, bool relock)
946 {
947 	PageEventWaiter waiter;
948 	waiter.thread = thread_get_current_thread();
949 	waiter.next = fPageEventWaiters;
950 	waiter.page = page;
951 	waiter.events = events;
952 
953 	fPageEventWaiters = &waiter;
954 
955 	thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_OTHER,
956 		"cache page events");
957 
958 	Unlock();
959 	thread_block();
960 
961 	if (relock)
962 		Lock();
963 }
964 
965 
966 /*!	Makes this case the source of the \a consumer cache,
967 	and adds the \a consumer to its list.
968 	This also grabs a reference to the source cache.
969 	Assumes you have the cache and the consumer's lock held.
970 */
971 void
972 VMCache::AddConsumer(VMCache* consumer)
973 {
974 	TRACE(("add consumer vm cache %p to cache %p\n", consumer, this));
975 	AssertLocked();
976 	consumer->AssertLocked();
977 
978 	T(AddConsumer(this, consumer));
979 
980 	consumer->source = this;
981 	consumers.Add(consumer);
982 
983 	AcquireRefLocked();
984 	AcquireStoreRef();
985 }
986 
987 
988 /*!	Adds the \a area to this cache.
989 	Assumes you have the locked the cache.
990 */
991 status_t
992 VMCache::InsertAreaLocked(VMArea* area)
993 {
994 	TRACE(("VMCache::InsertAreaLocked(cache %p, area %p)\n", this, area));
995 	AssertLocked();
996 
997 	T(InsertArea(this, area));
998 
999 	area->cache_next = areas;
1000 	if (area->cache_next)
1001 		area->cache_next->cache_prev = area;
1002 	area->cache_prev = NULL;
1003 	areas = area;
1004 
1005 	AcquireStoreRef();
1006 
1007 	return B_OK;
1008 }
1009 
1010 
1011 status_t
1012 VMCache::RemoveArea(VMArea* area)
1013 {
1014 	TRACE(("VMCache::RemoveArea(cache %p, area %p)\n", this, area));
1015 
1016 	T(RemoveArea(this, area));
1017 
1018 	// We release the store reference first, since otherwise we would reverse
1019 	// the locking order or even deadlock ourselves (... -> free_vnode() -> ...
1020 	// -> bfs_remove_vnode() -> ... -> file_cache_set_size() -> mutex_lock()).
1021 	// Also cf. _RemoveConsumer().
1022 	ReleaseStoreRef();
1023 
1024 	AutoLocker<VMCache> locker(this);
1025 
1026 	if (area->cache_prev)
1027 		area->cache_prev->cache_next = area->cache_next;
1028 	if (area->cache_next)
1029 		area->cache_next->cache_prev = area->cache_prev;
1030 	if (areas == area)
1031 		areas = area->cache_next;
1032 
1033 	return B_OK;
1034 }
1035 
1036 
1037 /*!	Transfers the areas from \a fromCache to this cache. This cache must not
1038 	have areas yet. Both caches must be locked.
1039 */
1040 void
1041 VMCache::TransferAreas(VMCache* fromCache)
1042 {
1043 	AssertLocked();
1044 	fromCache->AssertLocked();
1045 	ASSERT(areas == NULL);
1046 
1047 	areas = fromCache->areas;
1048 	fromCache->areas = NULL;
1049 
1050 	for (VMArea* area = areas; area != NULL; area = area->cache_next) {
1051 		area->cache = this;
1052 		AcquireRefLocked();
1053 		fromCache->ReleaseRefLocked();
1054 
1055 		T(RemoveArea(fromCache, area));
1056 		T(InsertArea(this, area));
1057 	}
1058 }
1059 
1060 
1061 uint32
1062 VMCache::CountWritableAreas(VMArea* ignoreArea) const
1063 {
1064 	uint32 count = 0;
1065 
1066 	for (VMArea* area = areas; area != NULL; area = area->cache_next) {
1067 		if (area != ignoreArea
1068 			&& (area->protection & (B_WRITE_AREA | B_KERNEL_WRITE_AREA)) != 0) {
1069 			count++;
1070 		}
1071 	}
1072 
1073 	return count;
1074 }
1075 
1076 
1077 status_t
1078 VMCache::WriteModified()
1079 {
1080 	TRACE(("VMCache::WriteModified(cache = %p)\n", this));
1081 
1082 	if (temporary)
1083 		return B_OK;
1084 
1085 	Lock();
1086 	status_t status = vm_page_write_modified_pages(this);
1087 	Unlock();
1088 
1089 	return status;
1090 }
1091 
1092 
1093 /*!	Commits the memory to the store if the \a commitment is larger than
1094 	what's committed already.
1095 	Assumes you have the cache's lock held.
1096 */
1097 status_t
1098 VMCache::SetMinimalCommitment(off_t commitment, int priority)
1099 {
1100 	TRACE(("VMCache::SetMinimalCommitment(cache %p, commitment %" B_PRIdOFF
1101 		")\n", this, commitment));
1102 	AssertLocked();
1103 
1104 	T(SetMinimalCommitment(this, commitment));
1105 
1106 	status_t status = B_OK;
1107 
1108 	// If we don't have enough committed space to cover through to the new end
1109 	// of the area...
1110 	if (committed_size < commitment) {
1111 		// ToDo: should we check if the cache's virtual size is large
1112 		//	enough for a commitment of that size?
1113 
1114 		// try to commit more memory
1115 		status = Commit(commitment, priority);
1116 	}
1117 
1118 	return status;
1119 }
1120 
1121 
1122 bool
1123 VMCache::_FreePageRange(VMCachePagesTree::Iterator it,
1124 	page_num_t* toPage = NULL)
1125 {
1126 	for (vm_page* page = it.Next();
1127 		page != NULL && (toPage == NULL || page->cache_offset < *toPage);
1128 		page = it.Next()) {
1129 
1130 		if (page->busy) {
1131 			if (page->busy_writing) {
1132 				// We cannot wait for the page to become available
1133 				// as we might cause a deadlock this way
1134 				page->busy_writing = false;
1135 					// this will notify the writer to free the page
1136 				continue;
1137 			}
1138 
1139 			// wait for page to become unbusy
1140 			WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, true);
1141 			return true;
1142 		}
1143 
1144 		// remove the page and put it into the free queue
1145 		DEBUG_PAGE_ACCESS_START(page);
1146 		vm_remove_all_page_mappings(page);
1147 		ASSERT(page->WiredCount() == 0);
1148 			// TODO: Find a real solution! If the page is wired
1149 			// temporarily (e.g. by lock_memory()), we actually must not
1150 			// unmap it!
1151 		RemovePage(page);
1152 			// Note: When iterating through a IteratableSplayTree
1153 			// removing the current node is safe.
1154 
1155 		vm_page_free(this, page);
1156 	}
1157 
1158 	return false;
1159 }
1160 
1161 
1162 /*!	This function updates the size field of the cache.
1163 	If needed, it will free up all pages that don't belong to the cache anymore.
1164 	The cache lock must be held when you call it.
1165 	Since removed pages don't belong to the cache any longer, they are not
1166 	written back before they will be removed.
1167 
1168 	Note, this function may temporarily release the cache lock in case it
1169 	has to wait for busy pages.
1170 */
1171 status_t
1172 VMCache::Resize(off_t newSize, int priority)
1173 {
1174 	TRACE(("VMCache::Resize(cache %p, newSize %" B_PRIdOFF ") old size %"
1175 		B_PRIdOFF "\n", this, newSize, this->virtual_end));
1176 	this->AssertLocked();
1177 
1178 	T(Resize(this, newSize));
1179 
1180 	status_t status = Commit(newSize - virtual_base, priority);
1181 	if (status != B_OK)
1182 		return status;
1183 
1184 	page_num_t oldPageCount = (page_num_t)((virtual_end + B_PAGE_SIZE - 1)
1185 		>> PAGE_SHIFT);
1186 	page_num_t newPageCount = (page_num_t)((newSize + B_PAGE_SIZE - 1)
1187 		>> PAGE_SHIFT);
1188 
1189 	if (newPageCount < oldPageCount) {
1190 		// we need to remove all pages in the cache outside of the new virtual
1191 		// size
1192 		while (_FreePageRange(pages.GetIterator(newPageCount, true, true)))
1193 			;
1194 	}
1195 
1196 	virtual_end = newSize;
1197 	return B_OK;
1198 }
1199 
1200 /*!	This function updates the virtual_base field of the cache.
1201 	If needed, it will free up all pages that don't belong to the cache anymore.
1202 	The cache lock must be held when you call it.
1203 	Since removed pages don't belong to the cache any longer, they are not
1204 	written back before they will be removed.
1205 
1206 	Note, this function may temporarily release the cache lock in case it
1207 	has to wait for busy pages.
1208 */
1209 status_t
1210 VMCache::Rebase(off_t newBase, int priority)
1211 {
1212 	TRACE(("VMCache::Rebase(cache %p, newBase %Ld) old base %Ld\n",
1213 		this, newBase, this->virtual_base));
1214 	this->AssertLocked();
1215 
1216 	T(Rebase(this, newBase));
1217 
1218 	status_t status = Commit(virtual_end - newBase, priority);
1219 	if (status != B_OK)
1220 		return status;
1221 
1222 	page_num_t basePage = (page_num_t)(newBase >> PAGE_SHIFT);
1223 
1224 	if (newBase > virtual_base) {
1225 		// we need to remove all pages in the cache outside of the new virtual
1226 		// base
1227 		while (_FreePageRange(pages.GetIterator(), &basePage))
1228 			;
1229 	}
1230 
1231 	virtual_base = newBase;
1232 	return B_OK;
1233 }
1234 
1235 
1236 /*!	Moves pages in the given range from the source cache into this cache. Both
1237 	caches must be locked.
1238 */
1239 status_t
1240 VMCache::Adopt(VMCache* source, off_t offset, off_t size, off_t newOffset)
1241 {
1242 	page_num_t startPage = offset >> PAGE_SHIFT;
1243 	page_num_t endPage = (offset + size + B_PAGE_SIZE - 1) >> PAGE_SHIFT;
1244 	off_t offsetChange = newOffset - offset;
1245 
1246 	VMCachePagesTree::Iterator it = source->pages.GetIterator(startPage, true,
1247 		true);
1248 	for (vm_page* page = it.Next();
1249 				page != NULL && page->cache_offset < endPage;
1250 				page = it.Next()) {
1251 		MovePage(page, (page->cache_offset << PAGE_SHIFT) + offsetChange);
1252 	}
1253 
1254 	return B_OK;
1255 }
1256 
1257 
1258 /*! Discards pages in the given range. */
1259 status_t
1260 VMCache::Discard(off_t offset, off_t size)
1261 {
1262 	page_num_t startPage = offset >> PAGE_SHIFT;
1263 	page_num_t endPage = (offset + size + B_PAGE_SIZE - 1) >> PAGE_SHIFT;
1264 	while (_FreePageRange(pages.GetIterator(startPage, true, true), &endPage))
1265 		;
1266 
1267 	return B_OK;
1268 }
1269 
1270 
1271 /*!	You have to call this function with the VMCache lock held. */
1272 status_t
1273 VMCache::FlushAndRemoveAllPages()
1274 {
1275 	ASSERT_LOCKED_MUTEX(&fLock);
1276 
1277 	while (page_count > 0) {
1278 		// write back modified pages
1279 		status_t status = vm_page_write_modified_pages(this);
1280 		if (status != B_OK)
1281 			return status;
1282 
1283 		// remove pages
1284 		for (VMCachePagesTree::Iterator it = pages.GetIterator();
1285 				vm_page* page = it.Next();) {
1286 			if (page->busy) {
1287 				// wait for page to become unbusy
1288 				WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, true);
1289 
1290 				// restart from the start of the list
1291 				it = pages.GetIterator();
1292 				continue;
1293 			}
1294 
1295 			// skip modified pages -- they will be written back in the next
1296 			// iteration
1297 			if (page->State() == PAGE_STATE_MODIFIED)
1298 				continue;
1299 
1300 			// We can't remove mapped pages.
1301 			if (page->IsMapped())
1302 				return B_BUSY;
1303 
1304 			DEBUG_PAGE_ACCESS_START(page);
1305 			RemovePage(page);
1306 			vm_page_free(this, page);
1307 				// Note: When iterating through a IteratableSplayTree
1308 				// removing the current node is safe.
1309 		}
1310 	}
1311 
1312 	return B_OK;
1313 }
1314 
1315 
1316 status_t
1317 VMCache::Commit(off_t size, int priority)
1318 {
1319 	committed_size = size;
1320 	return B_OK;
1321 }
1322 
1323 
1324 /*!	Returns whether the cache's underlying backing store could deliver the
1325 	page at the given offset.
1326 
1327 	Basically it returns whether a Read() at \a offset would at least read a
1328 	partial page (assuming that no unexpected errors occur or the situation
1329 	changes in the meantime).
1330 */
1331 bool
1332 VMCache::HasPage(off_t offset)
1333 {
1334 	// In accordance with Fault() the default implementation doesn't have a
1335 	// backing store and doesn't allow faults.
1336 	return false;
1337 }
1338 
1339 
1340 status_t
1341 VMCache::Read(off_t offset, const generic_io_vec *vecs, size_t count,
1342 	uint32 flags, generic_size_t *_numBytes)
1343 {
1344 	return B_ERROR;
1345 }
1346 
1347 
1348 status_t
1349 VMCache::Write(off_t offset, const generic_io_vec *vecs, size_t count,
1350 	uint32 flags, generic_size_t *_numBytes)
1351 {
1352 	return B_ERROR;
1353 }
1354 
1355 
1356 status_t
1357 VMCache::WriteAsync(off_t offset, const generic_io_vec* vecs, size_t count,
1358 	generic_size_t numBytes, uint32 flags, AsyncIOCallback* callback)
1359 {
1360 	// Not supported, fall back to the synchronous hook.
1361 	generic_size_t transferred = numBytes;
1362 	status_t error = Write(offset, vecs, count, flags, &transferred);
1363 
1364 	if (callback != NULL)
1365 		callback->IOFinished(error, transferred != numBytes, transferred);
1366 
1367 	return error;
1368 }
1369 
1370 
1371 /*!	\brief Returns whether the cache can write the page at the given offset.
1372 
1373 	The cache must be locked when this function is invoked.
1374 
1375 	@param offset The page offset.
1376 	@return \c true, if the page can be written, \c false otherwise.
1377 */
1378 bool
1379 VMCache::CanWritePage(off_t offset)
1380 {
1381 	return false;
1382 }
1383 
1384 
1385 status_t
1386 VMCache::Fault(struct VMAddressSpace *aspace, off_t offset)
1387 {
1388 	return B_BAD_ADDRESS;
1389 }
1390 
1391 
1392 void
1393 VMCache::Merge(VMCache* source)
1394 {
1395 	for (VMCachePagesTree::Iterator it = source->pages.GetIterator();
1396 			vm_page* page = it.Next();) {
1397 		// Note: Removing the current node while iterating through a
1398 		// IteratableSplayTree is safe.
1399 		vm_page* consumerPage = LookupPage(
1400 			(off_t)page->cache_offset << PAGE_SHIFT);
1401 		if (consumerPage == NULL) {
1402 			// the page is not yet in the consumer cache - move it upwards
1403 			MovePage(page);
1404 		}
1405 	}
1406 }
1407 
1408 
1409 status_t
1410 VMCache::AcquireUnreferencedStoreRef()
1411 {
1412 	return B_OK;
1413 }
1414 
1415 
1416 void
1417 VMCache::AcquireStoreRef()
1418 {
1419 }
1420 
1421 
1422 void
1423 VMCache::ReleaseStoreRef()
1424 {
1425 }
1426 
1427 
1428 /*!	Kernel debugger version of HasPage().
1429 	Does not do any locking.
1430 */
1431 bool
1432 VMCache::DebugHasPage(off_t offset)
1433 {
1434 	// default that works for all subclasses that don't lock anyway
1435 	return HasPage(offset);
1436 }
1437 
1438 
1439 /*!	Kernel debugger version of LookupPage().
1440 	Does not do any locking.
1441 */
1442 vm_page*
1443 VMCache::DebugLookupPage(off_t offset)
1444 {
1445 	return pages.Lookup((page_num_t)(offset >> PAGE_SHIFT));
1446 }
1447 
1448 
1449 void
1450 VMCache::Dump(bool showPages) const
1451 {
1452 	kprintf("CACHE %p:\n", this);
1453 	kprintf("  ref_count:    %" B_PRId32 "\n", RefCount());
1454 	kprintf("  source:       %p\n", source);
1455 	kprintf("  type:         %s\n", vm_cache_type_to_string(type));
1456 	kprintf("  virtual_base: 0x%" B_PRIx64 "\n", virtual_base);
1457 	kprintf("  virtual_end:  0x%" B_PRIx64 "\n", virtual_end);
1458 	kprintf("  temporary:    %" B_PRIu32 "\n", temporary);
1459 	kprintf("  lock:         %p\n", &fLock);
1460 #if KDEBUG
1461 	kprintf("  lock.holder:  %" B_PRId32 "\n", fLock.holder);
1462 #endif
1463 	kprintf("  areas:\n");
1464 
1465 	for (VMArea* area = areas; area != NULL; area = area->cache_next) {
1466 		kprintf("    area 0x%" B_PRIx32 ", %s\n", area->id, area->name);
1467 		kprintf("\tbase_addr:  0x%lx, size: 0x%lx\n", area->Base(),
1468 			area->Size());
1469 		kprintf("\tprotection: 0x%" B_PRIx32 "\n", area->protection);
1470 		kprintf("\towner:      0x%" B_PRIx32 "\n", area->address_space->ID());
1471 	}
1472 
1473 	kprintf("  consumers:\n");
1474 	for (ConsumerList::ConstIterator it = consumers.GetIterator();
1475 		 	VMCache* consumer = it.Next();) {
1476 		kprintf("\t%p\n", consumer);
1477 	}
1478 
1479 	kprintf("  pages:\n");
1480 	if (showPages) {
1481 		for (VMCachePagesTree::ConstIterator it = pages.GetIterator();
1482 				vm_page* page = it.Next();) {
1483 			if (!vm_page_is_dummy(page)) {
1484 				kprintf("\t%p ppn %#" B_PRIxPHYSADDR " offset %#" B_PRIxPHYSADDR
1485 					" state %u (%s) wired_count %u\n", page,
1486 					page->physical_page_number, page->cache_offset,
1487 					page->State(), page_state_to_string(page->State()),
1488 					page->WiredCount());
1489 			} else {
1490 				kprintf("\t%p DUMMY PAGE state %u (%s)\n",
1491 					page, page->State(), page_state_to_string(page->State()));
1492 			}
1493 		}
1494 	} else
1495 		kprintf("\t%" B_PRIu32 " in cache\n", page_count);
1496 }
1497 
1498 
1499 /*!	Wakes up threads waiting for page events.
1500 	\param page The page for which events occurred.
1501 	\param events The mask of events that occurred.
1502 */
1503 void
1504 VMCache::_NotifyPageEvents(vm_page* page, uint32 events)
1505 {
1506 	PageEventWaiter** it = &fPageEventWaiters;
1507 	while (PageEventWaiter* waiter = *it) {
1508 		if (waiter->page == page && (waiter->events & events) != 0) {
1509 			// remove from list and unblock
1510 			*it = waiter->next;
1511 			thread_unblock(waiter->thread, B_OK);
1512 		} else
1513 			it = &waiter->next;
1514 	}
1515 }
1516 
1517 
1518 /*!	Merges the given cache with its only consumer.
1519 	The caller must hold both the cache's and the consumer's lock. The method
1520 	does release neither lock.
1521 */
1522 void
1523 VMCache::_MergeWithOnlyConsumer()
1524 {
1525 	VMCache* consumer = consumers.RemoveHead();
1526 
1527 	TRACE(("merge vm cache %p (ref == %" B_PRId32 ") with vm cache %p\n",
1528 		this, this->fRefCount, consumer));
1529 
1530 	T(Merge(this, consumer));
1531 
1532 	// merge the cache
1533 	consumer->Merge(this);
1534 
1535 	// The remaining consumer has got a new source.
1536 	if (source != NULL) {
1537 		VMCache* newSource = source;
1538 
1539 		newSource->Lock();
1540 
1541 		newSource->consumers.Remove(this);
1542 		newSource->consumers.Add(consumer);
1543 		consumer->source = newSource;
1544 		source = NULL;
1545 
1546 		newSource->Unlock();
1547 	} else
1548 		consumer->source = NULL;
1549 
1550 	// Release the reference the cache's consumer owned. The consumer takes
1551 	// over the cache's ref to its source (if any) instead.
1552 	ReleaseRefLocked();
1553 }
1554 
1555 
1556 /*!	Removes the \a consumer from this cache.
1557 	It will also release the reference to the cache owned by the consumer.
1558 	Assumes you have the consumer's cache lock held. This cache must not be
1559 	locked.
1560 */
1561 void
1562 VMCache::_RemoveConsumer(VMCache* consumer)
1563 {
1564 	TRACE(("remove consumer vm cache %p from cache %p\n", consumer, this));
1565 	consumer->AssertLocked();
1566 
1567 	T(RemoveConsumer(this, consumer));
1568 
1569 	// Remove the store ref before locking the cache. Otherwise we'd call into
1570 	// the VFS while holding the cache lock, which would reverse the usual
1571 	// locking order.
1572 	ReleaseStoreRef();
1573 
1574 	// remove the consumer from the cache, but keep its reference until later
1575 	Lock();
1576 	consumers.Remove(consumer);
1577 	consumer->source = NULL;
1578 
1579 	ReleaseRefAndUnlock();
1580 }
1581 
1582 
1583 // #pragma mark - VMCacheFactory
1584 	// TODO: Move to own source file!
1585 
1586 
1587 /*static*/ status_t
1588 VMCacheFactory::CreateAnonymousCache(VMCache*& _cache, bool canOvercommit,
1589 	int32 numPrecommittedPages, int32 numGuardPages, bool swappable,
1590 	int priority)
1591 {
1592 	uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY
1593 		| HEAP_DONT_LOCK_KERNEL_SPACE;
1594 	if (priority >= VM_PRIORITY_VIP)
1595 		allocationFlags |= HEAP_PRIORITY_VIP;
1596 
1597 #if ENABLE_SWAP_SUPPORT
1598 	if (swappable) {
1599 		VMAnonymousCache* cache
1600 			= new(gAnonymousCacheObjectCache, allocationFlags) VMAnonymousCache;
1601 		if (cache == NULL)
1602 			return B_NO_MEMORY;
1603 
1604 		status_t error = cache->Init(canOvercommit, numPrecommittedPages,
1605 			numGuardPages, allocationFlags);
1606 		if (error != B_OK) {
1607 			cache->Delete();
1608 			return error;
1609 		}
1610 
1611 		T(Create(cache));
1612 
1613 		_cache = cache;
1614 		return B_OK;
1615 	}
1616 #endif
1617 
1618 	VMAnonymousNoSwapCache* cache
1619 		= new(gAnonymousNoSwapCacheObjectCache, allocationFlags)
1620 			VMAnonymousNoSwapCache;
1621 	if (cache == NULL)
1622 		return B_NO_MEMORY;
1623 
1624 	status_t error = cache->Init(canOvercommit, numPrecommittedPages,
1625 		numGuardPages, allocationFlags);
1626 	if (error != B_OK) {
1627 		cache->Delete();
1628 		return error;
1629 	}
1630 
1631 	T(Create(cache));
1632 
1633 	_cache = cache;
1634 	return B_OK;
1635 }
1636 
1637 
1638 /*static*/ status_t
1639 VMCacheFactory::CreateVnodeCache(VMCache*& _cache, struct vnode* vnode)
1640 {
1641 	const uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY
1642 		| HEAP_DONT_LOCK_KERNEL_SPACE;
1643 		// Note: Vnode cache creation is never VIP.
1644 
1645 	VMVnodeCache* cache
1646 		= new(gVnodeCacheObjectCache, allocationFlags) VMVnodeCache;
1647 	if (cache == NULL)
1648 		return B_NO_MEMORY;
1649 
1650 	status_t error = cache->Init(vnode, allocationFlags);
1651 	if (error != B_OK) {
1652 		cache->Delete();
1653 		return error;
1654 	}
1655 
1656 	T(Create(cache));
1657 
1658 	_cache = cache;
1659 	return B_OK;
1660 }
1661 
1662 
1663 /*static*/ status_t
1664 VMCacheFactory::CreateDeviceCache(VMCache*& _cache, addr_t baseAddress)
1665 {
1666 	const uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY
1667 		| HEAP_DONT_LOCK_KERNEL_SPACE;
1668 		// Note: Device cache creation is never VIP.
1669 
1670 	VMDeviceCache* cache
1671 		= new(gDeviceCacheObjectCache, allocationFlags) VMDeviceCache;
1672 	if (cache == NULL)
1673 		return B_NO_MEMORY;
1674 
1675 	status_t error = cache->Init(baseAddress, allocationFlags);
1676 	if (error != B_OK) {
1677 		cache->Delete();
1678 		return error;
1679 	}
1680 
1681 	T(Create(cache));
1682 
1683 	_cache = cache;
1684 	return B_OK;
1685 }
1686 
1687 
1688 /*static*/ status_t
1689 VMCacheFactory::CreateNullCache(int priority, VMCache*& _cache)
1690 {
1691 	uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY
1692 		| HEAP_DONT_LOCK_KERNEL_SPACE;
1693 	if (priority >= VM_PRIORITY_VIP)
1694 		allocationFlags |= HEAP_PRIORITY_VIP;
1695 
1696 	VMNullCache* cache
1697 		= new(gNullCacheObjectCache, allocationFlags) VMNullCache;
1698 	if (cache == NULL)
1699 		return B_NO_MEMORY;
1700 
1701 	status_t error = cache->Init(allocationFlags);
1702 	if (error != B_OK) {
1703 		cache->Delete();
1704 		return error;
1705 	}
1706 
1707 	T(Create(cache));
1708 
1709 	_cache = cache;
1710 	return B_OK;
1711 }
1712