xref: /haiku/src/system/kernel/vm/VMCache.cpp (revision b3fe70844e087a579563b43cf4c1b2525946ca27)
1 /*
2  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 #include <vm/VMCache.h>
12 
13 #include <stddef.h>
14 #include <stdlib.h>
15 
16 #include <algorithm>
17 
18 #include <arch/cpu.h>
19 #include <condition_variable.h>
20 #include <heap.h>
21 #include <int.h>
22 #include <kernel.h>
23 #include <slab/Slab.h>
24 #include <smp.h>
25 #include <tracing.h>
26 #include <util/khash.h>
27 #include <util/AutoLock.h>
28 #include <vfs.h>
29 #include <vm/vm.h>
30 #include <vm/vm_page.h>
31 #include <vm/vm_priv.h>
32 #include <vm/vm_types.h>
33 #include <vm/VMAddressSpace.h>
34 #include <vm/VMArea.h>
35 
36 // needed for the factory only
37 #include "VMAnonymousCache.h"
38 #include "VMAnonymousNoSwapCache.h"
39 #include "VMDeviceCache.h"
40 #include "VMNullCache.h"
41 #include "../cache/vnode_store.h"
42 
43 
44 //#define TRACE_VM_CACHE
45 #ifdef TRACE_VM_CACHE
46 #	define TRACE(x) dprintf x
47 #else
48 #	define TRACE(x) ;
49 #endif
50 
51 
52 #if DEBUG_CACHE_LIST
53 VMCache* gDebugCacheList;
54 #endif
55 static mutex sCacheListLock = MUTEX_INITIALIZER("global VMCache list");
56 	// The lock is also needed when the debug feature is disabled.
57 
58 ObjectCache* gCacheRefObjectCache;
59 ObjectCache* gAnonymousCacheObjectCache;
60 ObjectCache* gAnonymousNoSwapCacheObjectCache;
61 ObjectCache* gVnodeCacheObjectCache;
62 ObjectCache* gDeviceCacheObjectCache;
63 ObjectCache* gNullCacheObjectCache;
64 
65 
66 struct VMCache::PageEventWaiter {
67 	Thread*				thread;
68 	PageEventWaiter*	next;
69 	vm_page*			page;
70 	uint32				events;
71 };
72 
73 
74 #if VM_CACHE_TRACING
75 
76 namespace VMCacheTracing {
77 
78 class VMCacheTraceEntry : public AbstractTraceEntry {
79 	public:
80 		VMCacheTraceEntry(VMCache* cache)
81 			:
82 			fCache(cache)
83 		{
84 #if VM_CACHE_TRACING_STACK_TRACE
85 			fStackTrace = capture_tracing_stack_trace(
86 				VM_CACHE_TRACING_STACK_TRACE, 0, true);
87 				// Don't capture userland stack trace to avoid potential
88 				// deadlocks.
89 #endif
90 		}
91 
92 #if VM_CACHE_TRACING_STACK_TRACE
93 		virtual void DumpStackTrace(TraceOutput& out)
94 		{
95 			out.PrintStackTrace(fStackTrace);
96 		}
97 #endif
98 
99 		VMCache* Cache() const
100 		{
101 			return fCache;
102 		}
103 
104 	protected:
105 		VMCache*	fCache;
106 #if VM_CACHE_TRACING_STACK_TRACE
107 		tracing_stack_trace* fStackTrace;
108 #endif
109 };
110 
111 
112 class Create : public VMCacheTraceEntry {
113 	public:
114 		Create(VMCache* cache)
115 			:
116 			VMCacheTraceEntry(cache)
117 		{
118 			Initialized();
119 		}
120 
121 		virtual void AddDump(TraceOutput& out)
122 		{
123 			out.Print("vm cache create: -> cache: %p", fCache);
124 		}
125 };
126 
127 
128 class Delete : public VMCacheTraceEntry {
129 	public:
130 		Delete(VMCache* cache)
131 			:
132 			VMCacheTraceEntry(cache)
133 		{
134 			Initialized();
135 		}
136 
137 		virtual void AddDump(TraceOutput& out)
138 		{
139 			out.Print("vm cache delete: cache: %p", fCache);
140 		}
141 };
142 
143 
144 class SetMinimalCommitment : public VMCacheTraceEntry {
145 	public:
146 		SetMinimalCommitment(VMCache* cache, off_t commitment)
147 			:
148 			VMCacheTraceEntry(cache),
149 			fOldCommitment(cache->committed_size),
150 			fCommitment(commitment)
151 		{
152 			Initialized();
153 		}
154 
155 		virtual void AddDump(TraceOutput& out)
156 		{
157 			out.Print("vm cache set min commitment: cache: %p, "
158 				"commitment: %" B_PRIdOFF " -> %" B_PRIdOFF, fCache,
159 				fOldCommitment, fCommitment);
160 		}
161 
162 	private:
163 		off_t	fOldCommitment;
164 		off_t	fCommitment;
165 };
166 
167 
168 class Resize : public VMCacheTraceEntry {
169 	public:
170 		Resize(VMCache* cache, off_t size)
171 			:
172 			VMCacheTraceEntry(cache),
173 			fOldSize(cache->virtual_end),
174 			fSize(size)
175 		{
176 			Initialized();
177 		}
178 
179 		virtual void AddDump(TraceOutput& out)
180 		{
181 			out.Print("vm cache resize: cache: %p, size: %" B_PRIdOFF " -> %"
182 				B_PRIdOFF, fCache, fOldSize, fSize);
183 		}
184 
185 	private:
186 		off_t	fOldSize;
187 		off_t	fSize;
188 };
189 
190 
191 class AddConsumer : public VMCacheTraceEntry {
192 	public:
193 		AddConsumer(VMCache* cache, VMCache* consumer)
194 			:
195 			VMCacheTraceEntry(cache),
196 			fConsumer(consumer)
197 		{
198 			Initialized();
199 		}
200 
201 		virtual void AddDump(TraceOutput& out)
202 		{
203 			out.Print("vm cache add consumer: cache: %p, consumer: %p", fCache,
204 				fConsumer);
205 		}
206 
207 		VMCache* Consumer() const
208 		{
209 			return fConsumer;
210 		}
211 
212 	private:
213 		VMCache*	fConsumer;
214 };
215 
216 
217 class RemoveConsumer : public VMCacheTraceEntry {
218 	public:
219 		RemoveConsumer(VMCache* cache, VMCache* consumer)
220 			:
221 			VMCacheTraceEntry(cache),
222 			fConsumer(consumer)
223 		{
224 			Initialized();
225 		}
226 
227 		virtual void AddDump(TraceOutput& out)
228 		{
229 			out.Print("vm cache remove consumer: cache: %p, consumer: %p",
230 				fCache, fConsumer);
231 		}
232 
233 	private:
234 		VMCache*	fConsumer;
235 };
236 
237 
238 class Merge : public VMCacheTraceEntry {
239 	public:
240 		Merge(VMCache* cache, VMCache* consumer)
241 			:
242 			VMCacheTraceEntry(cache),
243 			fConsumer(consumer)
244 		{
245 			Initialized();
246 		}
247 
248 		virtual void AddDump(TraceOutput& out)
249 		{
250 			out.Print("vm cache merge with consumer: cache: %p, consumer: %p",
251 				fCache, fConsumer);
252 		}
253 
254 	private:
255 		VMCache*	fConsumer;
256 };
257 
258 
259 class InsertArea : public VMCacheTraceEntry {
260 	public:
261 		InsertArea(VMCache* cache, VMArea* area)
262 			:
263 			VMCacheTraceEntry(cache),
264 			fArea(area)
265 		{
266 			Initialized();
267 		}
268 
269 		virtual void AddDump(TraceOutput& out)
270 		{
271 			out.Print("vm cache insert area: cache: %p, area: %p", fCache,
272 				fArea);
273 		}
274 
275 		VMArea*	Area() const
276 		{
277 			return fArea;
278 		}
279 
280 	private:
281 		VMArea*	fArea;
282 };
283 
284 
285 class RemoveArea : public VMCacheTraceEntry {
286 	public:
287 		RemoveArea(VMCache* cache, VMArea* area)
288 			:
289 			VMCacheTraceEntry(cache),
290 			fArea(area)
291 		{
292 			Initialized();
293 		}
294 
295 		virtual void AddDump(TraceOutput& out)
296 		{
297 			out.Print("vm cache remove area: cache: %p, area: %p", fCache,
298 				fArea);
299 		}
300 
301 	private:
302 		VMArea*	fArea;
303 };
304 
305 }	// namespace VMCacheTracing
306 
307 #	define T(x) new(std::nothrow) VMCacheTracing::x;
308 
309 #	if VM_CACHE_TRACING >= 2
310 
311 namespace VMCacheTracing {
312 
313 class InsertPage : public VMCacheTraceEntry {
314 	public:
315 		InsertPage(VMCache* cache, vm_page* page, off_t offset)
316 			:
317 			VMCacheTraceEntry(cache),
318 			fPage(page),
319 			fOffset(offset)
320 		{
321 			Initialized();
322 		}
323 
324 		virtual void AddDump(TraceOutput& out)
325 		{
326 			out.Print("vm cache insert page: cache: %p, page: %p, offset: %"
327 				B_PRIdOFF, fCache, fPage, fOffset);
328 		}
329 
330 	private:
331 		vm_page*	fPage;
332 		off_t		fOffset;
333 };
334 
335 
336 class RemovePage : public VMCacheTraceEntry {
337 	public:
338 		RemovePage(VMCache* cache, vm_page* page)
339 			:
340 			VMCacheTraceEntry(cache),
341 			fPage(page)
342 		{
343 			Initialized();
344 		}
345 
346 		virtual void AddDump(TraceOutput& out)
347 		{
348 			out.Print("vm cache remove page: cache: %p, page: %p", fCache,
349 				fPage);
350 		}
351 
352 	private:
353 		vm_page*	fPage;
354 };
355 
356 }	// namespace VMCacheTracing
357 
358 #		define T2(x) new(std::nothrow) VMCacheTracing::x;
359 #	else
360 #		define T2(x) ;
361 #	endif
362 #else
363 #	define T(x) ;
364 #	define T2(x) ;
365 #endif
366 
367 
368 //	#pragma mark - debugger commands
369 
370 
371 #if VM_CACHE_TRACING
372 
373 
374 static void*
375 cache_stack_find_area_cache(const TraceEntryIterator& baseIterator, void* area)
376 {
377 	using namespace VMCacheTracing;
378 
379 	// find the previous "insert area" entry for the given area
380 	TraceEntryIterator iterator = baseIterator;
381 	TraceEntry* entry = iterator.Current();
382 	while (entry != NULL) {
383 		if (InsertArea* insertAreaEntry = dynamic_cast<InsertArea*>(entry)) {
384 			if (insertAreaEntry->Area() == area)
385 				return insertAreaEntry->Cache();
386 		}
387 
388 		entry = iterator.Previous();
389 	}
390 
391 	return NULL;
392 }
393 
394 
395 static void*
396 cache_stack_find_consumer(const TraceEntryIterator& baseIterator, void* cache)
397 {
398 	using namespace VMCacheTracing;
399 
400 	// find the previous "add consumer" or "create" entry for the given cache
401 	TraceEntryIterator iterator = baseIterator;
402 	TraceEntry* entry = iterator.Current();
403 	while (entry != NULL) {
404 		if (Create* createEntry = dynamic_cast<Create*>(entry)) {
405 			if (createEntry->Cache() == cache)
406 				return NULL;
407 		} else if (AddConsumer* addEntry = dynamic_cast<AddConsumer*>(entry)) {
408 			if (addEntry->Consumer() == cache)
409 				return addEntry->Cache();
410 		}
411 
412 		entry = iterator.Previous();
413 	}
414 
415 	return NULL;
416 }
417 
418 
419 static int
420 command_cache_stack(int argc, char** argv)
421 {
422 	if (argc < 3 || argc > 4) {
423 		print_debugger_command_usage(argv[0]);
424 		return 0;
425 	}
426 
427 	bool isArea = false;
428 
429 	int argi = 1;
430 	if (argc == 4) {
431 		if (strcmp(argv[argi], "area") != 0) {
432 			print_debugger_command_usage(argv[0]);
433 			return 0;
434 		}
435 
436 		argi++;
437 		isArea = true;
438 	}
439 
440 	uint64 addressValue;
441 	uint64 debugEntryIndex;
442 	if (!evaluate_debug_expression(argv[argi++], &addressValue, false)
443 		|| !evaluate_debug_expression(argv[argi++], &debugEntryIndex, false)) {
444 		return 0;
445 	}
446 
447 	TraceEntryIterator baseIterator;
448 	if (baseIterator.MoveTo((int32)debugEntryIndex) == NULL) {
449 		kprintf("Invalid tracing entry index %" B_PRIu64 "\n", debugEntryIndex);
450 		return 0;
451 	}
452 
453 	void* address = (void*)(addr_t)addressValue;
454 
455 	kprintf("cache stack for %s %p at %" B_PRIu64 ":\n",
456 		isArea ? "area" : "cache", address, debugEntryIndex);
457 	if (isArea) {
458 		address = cache_stack_find_area_cache(baseIterator, address);
459 		if (address == NULL) {
460 			kprintf("  cache not found\n");
461 			return 0;
462 		}
463 	}
464 
465 	while (address != NULL) {
466 		kprintf("  %p\n", address);
467 		address = cache_stack_find_consumer(baseIterator, address);
468 	}
469 
470 	return 0;
471 }
472 
473 
474 #endif	// VM_CACHE_TRACING
475 
476 
477 //	#pragma mark -
478 
479 
480 status_t
481 vm_cache_init(kernel_args* args)
482 {
483 	// Create object caches for the structures we allocate here.
484 	gCacheRefObjectCache = create_object_cache("cache refs", sizeof(VMCacheRef),
485 		0, NULL, NULL, NULL);
486 	gAnonymousCacheObjectCache = create_object_cache("anon caches",
487 		sizeof(VMAnonymousCache), 0, NULL, NULL, NULL);
488 	gAnonymousNoSwapCacheObjectCache = create_object_cache(
489 		"anon no-swap caches", sizeof(VMAnonymousNoSwapCache), 0, NULL, NULL,
490 		NULL);
491 	gVnodeCacheObjectCache = create_object_cache("vnode caches",
492 		sizeof(VMVnodeCache), 0, NULL, NULL, NULL);
493 	gDeviceCacheObjectCache = create_object_cache("device caches",
494 		sizeof(VMDeviceCache), 0, NULL, NULL, NULL);
495 	gNullCacheObjectCache = create_object_cache("null caches",
496 		sizeof(VMNullCache), 0, NULL, NULL, NULL);
497 
498 	if (gCacheRefObjectCache == NULL || gAnonymousCacheObjectCache == NULL
499 		|| gAnonymousNoSwapCacheObjectCache == NULL
500 		|| gVnodeCacheObjectCache == NULL
501 		|| gDeviceCacheObjectCache == NULL
502 		|| gNullCacheObjectCache == NULL) {
503 		panic("vm_cache_init(): Failed to create object caches!");
504 		return B_NO_MEMORY;
505 	}
506 
507 	return B_OK;
508 }
509 
510 
511 void
512 vm_cache_init_post_heap()
513 {
514 #if VM_CACHE_TRACING
515 	add_debugger_command_etc("cache_stack", &command_cache_stack,
516 		"List the ancestors (sources) of a VMCache at the time given by "
517 			"tracing entry index",
518 		"[ \"area\" ] <address> <tracing entry index>\n"
519 		"All ancestors (sources) of a given VMCache at the time given by the\n"
520 		"tracing entry index are listed. If \"area\" is given the supplied\n"
521 		"address is an area instead of a cache address. The listing will\n"
522 		"start with the area's cache at that point.\n",
523 		0);
524 #endif	// VM_CACHE_TRACING
525 }
526 
527 
528 VMCache*
529 vm_cache_acquire_locked_page_cache(vm_page* page, bool dontWait)
530 {
531 	mutex_lock(&sCacheListLock);
532 
533 	while (dontWait) {
534 		VMCacheRef* cacheRef = page->CacheRef();
535 		if (cacheRef == NULL) {
536 			mutex_unlock(&sCacheListLock);
537 			return NULL;
538 		}
539 
540 		VMCache* cache = cacheRef->cache;
541 		if (!cache->TryLock()) {
542 			mutex_unlock(&sCacheListLock);
543 			return NULL;
544 		}
545 
546 		if (cacheRef == page->CacheRef()) {
547 			mutex_unlock(&sCacheListLock);
548 			cache->AcquireRefLocked();
549 			return cache;
550 		}
551 
552 		// the cache changed in the meantime
553 		cache->Unlock();
554 	}
555 
556 	while (true) {
557 		VMCacheRef* cacheRef = page->CacheRef();
558 		if (cacheRef == NULL) {
559 			mutex_unlock(&sCacheListLock);
560 			return NULL;
561 		}
562 
563 		VMCache* cache = cacheRef->cache;
564 		if (!cache->SwitchLock(&sCacheListLock)) {
565 			// cache has been deleted
566 			mutex_lock(&sCacheListLock);
567 			continue;
568 		}
569 
570 		mutex_lock(&sCacheListLock);
571 		if (cache == page->Cache()) {
572 			mutex_unlock(&sCacheListLock);
573 			cache->AcquireRefLocked();
574 			return cache;
575 		}
576 
577 		// the cache changed in the meantime
578 		cache->Unlock();
579 	}
580 }
581 
582 
583 // #pragma mark - VMCache
584 
585 
586 VMCacheRef::VMCacheRef(VMCache* cache)
587 	:
588 	cache(cache),
589 	ref_count(1)
590 {
591 }
592 
593 
594 // #pragma mark - VMCache
595 
596 
597 bool
598 VMCache::_IsMergeable() const
599 {
600 	return areas == NULL && temporary && !consumers.IsEmpty()
601 		&& consumers.Head() == consumers.Tail();
602 }
603 
604 
605 VMCache::VMCache()
606 	:
607 	fCacheRef(NULL)
608 {
609 }
610 
611 
612 VMCache::~VMCache()
613 {
614 	object_cache_delete(gCacheRefObjectCache, fCacheRef);
615 }
616 
617 
618 status_t
619 VMCache::Init(uint32 cacheType, uint32 allocationFlags)
620 {
621 	mutex_init(&fLock, "VMCache");
622 
623 	areas = NULL;
624 	fRefCount = 1;
625 	source = NULL;
626 	virtual_base = 0;
627 	virtual_end = 0;
628 	committed_size = 0;
629 	temporary = 0;
630 	page_count = 0;
631 	fWiredPagesCount = 0;
632 	type = cacheType;
633 	fPageEventWaiters = NULL;
634 
635 #if DEBUG_CACHE_LIST
636 	debug_previous = NULL;
637 	debug_next = NULL;
638 		// initialize in case the following fails
639 #endif
640 
641 	fCacheRef = new(gCacheRefObjectCache, allocationFlags) VMCacheRef(this);
642 	if (fCacheRef == NULL)
643 		return B_NO_MEMORY;
644 
645 #if DEBUG_CACHE_LIST
646 	mutex_lock(&sCacheListLock);
647 
648 	if (gDebugCacheList != NULL)
649 		gDebugCacheList->debug_previous = this;
650 	debug_next = gDebugCacheList;
651 	gDebugCacheList = this;
652 
653 	mutex_unlock(&sCacheListLock);
654 #endif
655 
656 	return B_OK;
657 }
658 
659 
660 void
661 VMCache::Delete()
662 {
663 	if (areas != NULL)
664 		panic("cache %p to be deleted still has areas", this);
665 	if (!consumers.IsEmpty())
666 		panic("cache %p to be deleted still has consumers", this);
667 
668 	T(Delete(this));
669 
670 	// free all of the pages in the cache
671 	while (vm_page* page = pages.Root()) {
672 		if (!page->mappings.IsEmpty() || page->WiredCount() != 0) {
673 			panic("remove page %p from cache %p: page still has mappings!\n"
674 				"@!page %p; cache %p", page, this, page, this);
675 		}
676 
677 		// remove it
678 		pages.Remove(page);
679 		page->SetCacheRef(NULL);
680 
681 		TRACE(("vm_cache_release_ref: freeing page 0x%lx\n",
682 			page->physical_page_number));
683 		DEBUG_PAGE_ACCESS_START(page);
684 		vm_page_free(this, page);
685 	}
686 
687 	// remove the ref to the source
688 	if (source)
689 		source->_RemoveConsumer(this);
690 
691 	// We lock and unlock the sCacheListLock, even if the DEBUG_CACHE_LIST is
692 	// not enabled. This synchronization point is needed for
693 	// vm_cache_acquire_locked_page_cache().
694 	mutex_lock(&sCacheListLock);
695 
696 #if DEBUG_CACHE_LIST
697 	if (debug_previous)
698 		debug_previous->debug_next = debug_next;
699 	if (debug_next)
700 		debug_next->debug_previous = debug_previous;
701 	if (this == gDebugCacheList)
702 		gDebugCacheList = debug_next;
703 #endif
704 
705 	mutex_destroy(&fLock);
706 
707 	mutex_unlock(&sCacheListLock);
708 
709 	DeleteObject();
710 }
711 
712 
713 void
714 VMCache::Unlock(bool consumerLocked)
715 {
716 	while (fRefCount == 1 && _IsMergeable()) {
717 		VMCache* consumer = consumers.Head();
718 		if (consumerLocked) {
719 			_MergeWithOnlyConsumer();
720 		} else if (consumer->TryLock()) {
721 			_MergeWithOnlyConsumer();
722 			consumer->Unlock();
723 		} else {
724 			// Someone else has locked the consumer ATM. Unlock this cache and
725 			// wait for the consumer lock. Increment the cache's ref count
726 			// temporarily, so that no one else will try what we are doing or
727 			// delete the cache.
728 			fRefCount++;
729 			bool consumerLockedTemp = consumer->SwitchLock(&fLock);
730 			Lock();
731 			fRefCount--;
732 
733 			if (consumerLockedTemp) {
734 				if (fRefCount == 1 && _IsMergeable()
735 						&& consumer == consumers.Head()) {
736 					// nothing has changed in the meantime -- merge
737 					_MergeWithOnlyConsumer();
738 				}
739 
740 				consumer->Unlock();
741 			}
742 		}
743 	}
744 
745 	if (fRefCount == 0) {
746 		// delete this cache
747 		Delete();
748 	} else
749 		mutex_unlock(&fLock);
750 }
751 
752 
753 vm_page*
754 VMCache::LookupPage(off_t offset)
755 {
756 	AssertLocked();
757 
758 	vm_page* page = pages.Lookup((page_num_t)(offset >> PAGE_SHIFT));
759 
760 #if KDEBUG
761 	if (page != NULL && page->Cache() != this)
762 		panic("page %p not in cache %p\n", page, this);
763 #endif
764 
765 	return page;
766 }
767 
768 
769 void
770 VMCache::InsertPage(vm_page* page, off_t offset)
771 {
772 	TRACE(("VMCache::InsertPage(): cache %p, page %p, offset %" B_PRIdOFF "\n",
773 		this, page, offset));
774 	AssertLocked();
775 
776 	if (page->CacheRef() != NULL) {
777 		panic("insert page %p into cache %p: page cache is set to %p\n",
778 			page, this, page->Cache());
779 	}
780 
781 	T2(InsertPage(this, page, offset));
782 
783 	page->cache_offset = (page_num_t)(offset >> PAGE_SHIFT);
784 	page_count++;
785 	page->SetCacheRef(fCacheRef);
786 
787 #if KDEBUG
788 	vm_page* otherPage = pages.Lookup(page->cache_offset);
789 	if (otherPage != NULL) {
790 		panic("VMCache::InsertPage(): there's already page %p with cache "
791 			"offset %" B_PRIuPHYSADDR " in cache %p; inserting page %p",
792 			otherPage, page->cache_offset, this, page);
793 	}
794 #endif	// KDEBUG
795 
796 	pages.Insert(page);
797 
798 	if (page->WiredCount() > 0)
799 		IncrementWiredPagesCount();
800 }
801 
802 
803 /*!	Removes the vm_page from this cache. Of course, the page must
804 	really be in this cache or evil things will happen.
805 	The cache lock must be held.
806 */
807 void
808 VMCache::RemovePage(vm_page* page)
809 {
810 	TRACE(("VMCache::RemovePage(): cache %p, page %p\n", this, page));
811 	AssertLocked();
812 
813 	if (page->Cache() != this) {
814 		panic("remove page %p from cache %p: page cache is set to %p\n", page,
815 			this, page->Cache());
816 	}
817 
818 	T2(RemovePage(this, page));
819 
820 	pages.Remove(page);
821 	page_count--;
822 	page->SetCacheRef(NULL);
823 
824 	if (page->WiredCount() > 0)
825 		DecrementWiredPagesCount();
826 }
827 
828 
829 /*!	Moves the given page from its current cache inserts it into this cache.
830 	Both caches must be locked.
831 */
832 void
833 VMCache::MovePage(vm_page* page)
834 {
835 	VMCache* oldCache = page->Cache();
836 
837 	AssertLocked();
838 	oldCache->AssertLocked();
839 
840 	// remove from old cache
841 	oldCache->pages.Remove(page);
842 	oldCache->page_count--;
843 	T2(RemovePage(oldCache, page));
844 
845 	// insert here
846 	pages.Insert(page);
847 	page_count++;
848 	page->SetCacheRef(fCacheRef);
849 
850 	if (page->WiredCount() > 0) {
851 		IncrementWiredPagesCount();
852 		oldCache->DecrementWiredPagesCount();
853 	}
854 
855 	T2(InsertPage(this, page, page->cache_offset << PAGE_SHIFT));
856 }
857 
858 
859 /*!	Moves all pages from the given cache to this one.
860 	Both caches must be locked. This cache must be empty.
861 */
862 void
863 VMCache::MoveAllPages(VMCache* fromCache)
864 {
865 	AssertLocked();
866 	fromCache->AssertLocked();
867 	ASSERT(page_count == 0);
868 
869 	std::swap(fromCache->pages, pages);
870 	page_count = fromCache->page_count;
871 	fromCache->page_count = 0;
872 	fWiredPagesCount = fromCache->fWiredPagesCount;
873 	fromCache->fWiredPagesCount = 0;
874 
875 	// swap the VMCacheRefs
876 	mutex_lock(&sCacheListLock);
877 	std::swap(fCacheRef, fromCache->fCacheRef);
878 	fCacheRef->cache = this;
879 	fromCache->fCacheRef->cache = fromCache;
880 	mutex_unlock(&sCacheListLock);
881 
882 #if VM_CACHE_TRACING >= 2
883 	for (VMCachePagesTree::Iterator it = pages.GetIterator();
884 			vm_page* page = it.Next();) {
885 		T2(RemovePage(fromCache, page));
886 		T2(InsertPage(this, page, page->cache_offset << PAGE_SHIFT));
887 	}
888 #endif
889 }
890 
891 
892 /*!	Waits until one or more events happened for a given page which belongs to
893 	this cache.
894 	The cache must be locked. It will be unlocked by the method. \a relock
895 	specifies whether the method shall re-lock the cache before returning.
896 	\param page The page for which to wait.
897 	\param events The mask of events the caller is interested in.
898 	\param relock If \c true, the cache will be locked when returning,
899 		otherwise it won't be locked.
900 */
901 void
902 VMCache::WaitForPageEvents(vm_page* page, uint32 events, bool relock)
903 {
904 	PageEventWaiter waiter;
905 	waiter.thread = thread_get_current_thread();
906 	waiter.next = fPageEventWaiters;
907 	waiter.page = page;
908 	waiter.events = events;
909 
910 	fPageEventWaiters = &waiter;
911 
912 	thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_OTHER,
913 		"cache page events");
914 
915 	Unlock();
916 	thread_block();
917 
918 	if (relock)
919 		Lock();
920 }
921 
922 
923 /*!	Makes this case the source of the \a consumer cache,
924 	and adds the \a consumer to its list.
925 	This also grabs a reference to the source cache.
926 	Assumes you have the cache and the consumer's lock held.
927 */
928 void
929 VMCache::AddConsumer(VMCache* consumer)
930 {
931 	TRACE(("add consumer vm cache %p to cache %p\n", consumer, this));
932 	AssertLocked();
933 	consumer->AssertLocked();
934 
935 	T(AddConsumer(this, consumer));
936 
937 	consumer->source = this;
938 	consumers.Add(consumer);
939 
940 	AcquireRefLocked();
941 	AcquireStoreRef();
942 }
943 
944 
945 /*!	Adds the \a area to this cache.
946 	Assumes you have the locked the cache.
947 */
948 status_t
949 VMCache::InsertAreaLocked(VMArea* area)
950 {
951 	TRACE(("VMCache::InsertAreaLocked(cache %p, area %p)\n", this, area));
952 	AssertLocked();
953 
954 	T(InsertArea(this, area));
955 
956 	area->cache_next = areas;
957 	if (area->cache_next)
958 		area->cache_next->cache_prev = area;
959 	area->cache_prev = NULL;
960 	areas = area;
961 
962 	AcquireStoreRef();
963 
964 	return B_OK;
965 }
966 
967 
968 status_t
969 VMCache::RemoveArea(VMArea* area)
970 {
971 	TRACE(("VMCache::RemoveArea(cache %p, area %p)\n", this, area));
972 
973 	T(RemoveArea(this, area));
974 
975 	// We release the store reference first, since otherwise we would reverse
976 	// the locking order or even deadlock ourselves (... -> free_vnode() -> ...
977 	// -> bfs_remove_vnode() -> ... -> file_cache_set_size() -> mutex_lock()).
978 	// Also cf. _RemoveConsumer().
979 	ReleaseStoreRef();
980 
981 	AutoLocker<VMCache> locker(this);
982 
983 	if (area->cache_prev)
984 		area->cache_prev->cache_next = area->cache_next;
985 	if (area->cache_next)
986 		area->cache_next->cache_prev = area->cache_prev;
987 	if (areas == area)
988 		areas = area->cache_next;
989 
990 	return B_OK;
991 }
992 
993 
994 /*!	Transfers the areas from \a fromCache to this cache. This cache must not
995 	have areas yet. Both caches must be locked.
996 */
997 void
998 VMCache::TransferAreas(VMCache* fromCache)
999 {
1000 	AssertLocked();
1001 	fromCache->AssertLocked();
1002 	ASSERT(areas == NULL);
1003 
1004 	areas = fromCache->areas;
1005 	fromCache->areas = NULL;
1006 
1007 	for (VMArea* area = areas; area != NULL; area = area->cache_next) {
1008 		area->cache = this;
1009 		AcquireRefLocked();
1010 		fromCache->ReleaseRefLocked();
1011 
1012 		T(RemoveArea(fromCache, area));
1013 		T(InsertArea(this, area));
1014 	}
1015 }
1016 
1017 
1018 uint32
1019 VMCache::CountWritableAreas(VMArea* ignoreArea) const
1020 {
1021 	uint32 count = 0;
1022 
1023 	for (VMArea* area = areas; area != NULL; area = area->cache_next) {
1024 		if (area != ignoreArea
1025 			&& (area->protection & (B_WRITE_AREA | B_KERNEL_WRITE_AREA)) != 0) {
1026 			count++;
1027 		}
1028 	}
1029 
1030 	return count;
1031 }
1032 
1033 
1034 status_t
1035 VMCache::WriteModified()
1036 {
1037 	TRACE(("VMCache::WriteModified(cache = %p)\n", this));
1038 
1039 	if (temporary)
1040 		return B_OK;
1041 
1042 	Lock();
1043 	status_t status = vm_page_write_modified_pages(this);
1044 	Unlock();
1045 
1046 	return status;
1047 }
1048 
1049 
1050 /*!	Commits the memory to the store if the \a commitment is larger than
1051 	what's committed already.
1052 	Assumes you have the cache's lock held.
1053 */
1054 status_t
1055 VMCache::SetMinimalCommitment(off_t commitment, int priority)
1056 {
1057 	TRACE(("VMCache::SetMinimalCommitment(cache %p, commitment %" B_PRIdOFF
1058 		")\n", this, commitment));
1059 	AssertLocked();
1060 
1061 	T(SetMinimalCommitment(this, commitment));
1062 
1063 	status_t status = B_OK;
1064 
1065 	// If we don't have enough committed space to cover through to the new end
1066 	// of the area...
1067 	if (committed_size < commitment) {
1068 		// ToDo: should we check if the cache's virtual size is large
1069 		//	enough for a commitment of that size?
1070 
1071 		// try to commit more memory
1072 		status = Commit(commitment, priority);
1073 	}
1074 
1075 	return status;
1076 }
1077 
1078 
1079 /*!	This function updates the size field of the cache.
1080 	If needed, it will free up all pages that don't belong to the cache anymore.
1081 	The cache lock must be held when you call it.
1082 	Since removed pages don't belong to the cache any longer, they are not
1083 	written back before they will be removed.
1084 
1085 	Note, this function may temporarily release the cache lock in case it
1086 	has to wait for busy pages.
1087 */
1088 status_t
1089 VMCache::Resize(off_t newSize, int priority)
1090 {
1091 	TRACE(("VMCache::Resize(cache %p, newSize %" B_PRIdOFF ") old size %"
1092 		B_PRIdOFF "\n", this, newSize, this->virtual_end));
1093 	this->AssertLocked();
1094 
1095 	T(Resize(this, newSize));
1096 
1097 	status_t status = Commit(newSize - virtual_base, priority);
1098 	if (status != B_OK)
1099 		return status;
1100 
1101 	uint32 oldPageCount = (uint32)((virtual_end + B_PAGE_SIZE - 1)
1102 		>> PAGE_SHIFT);
1103 	uint32 newPageCount = (uint32)((newSize + B_PAGE_SIZE - 1) >> PAGE_SHIFT);
1104 
1105 	if (newPageCount < oldPageCount) {
1106 		// we need to remove all pages in the cache outside of the new virtual
1107 		// size
1108 		for (VMCachePagesTree::Iterator it
1109 					= pages.GetIterator(newPageCount, true, true);
1110 				vm_page* page = it.Next();) {
1111 			if (page->busy) {
1112 				if (page->busy_writing) {
1113 					// We cannot wait for the page to become available
1114 					// as we might cause a deadlock this way
1115 					page->busy_writing = false;
1116 						// this will notify the writer to free the page
1117 				} else {
1118 					// wait for page to become unbusy
1119 					WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, true);
1120 
1121 					// restart from the start of the list
1122 					it = pages.GetIterator(newPageCount, true, true);
1123 				}
1124 				continue;
1125 			}
1126 
1127 			// remove the page and put it into the free queue
1128 			DEBUG_PAGE_ACCESS_START(page);
1129 			vm_remove_all_page_mappings(page);
1130 			ASSERT(page->WiredCount() == 0);
1131 				// TODO: Find a real solution! If the page is wired
1132 				// temporarily (e.g. by lock_memory()), we actually must not
1133 				// unmap it!
1134 			RemovePage(page);
1135 			vm_page_free(this, page);
1136 				// Note: When iterating through a IteratableSplayTree
1137 				// removing the current node is safe.
1138 		}
1139 	}
1140 
1141 	virtual_end = newSize;
1142 	return B_OK;
1143 }
1144 
1145 
1146 /*!	You have to call this function with the VMCache lock held. */
1147 status_t
1148 VMCache::FlushAndRemoveAllPages()
1149 {
1150 	ASSERT_LOCKED_MUTEX(&fLock);
1151 
1152 	while (page_count > 0) {
1153 		// write back modified pages
1154 		status_t status = vm_page_write_modified_pages(this);
1155 		if (status != B_OK)
1156 			return status;
1157 
1158 		// remove pages
1159 		for (VMCachePagesTree::Iterator it = pages.GetIterator();
1160 				vm_page* page = it.Next();) {
1161 			if (page->busy) {
1162 				// wait for page to become unbusy
1163 				WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, true);
1164 
1165 				// restart from the start of the list
1166 				it = pages.GetIterator();
1167 				continue;
1168 			}
1169 
1170 			// skip modified pages -- they will be written back in the next
1171 			// iteration
1172 			if (page->State() == PAGE_STATE_MODIFIED)
1173 				continue;
1174 
1175 			// We can't remove mapped pages.
1176 			if (page->IsMapped())
1177 				return B_BUSY;
1178 
1179 			DEBUG_PAGE_ACCESS_START(page);
1180 			RemovePage(page);
1181 			vm_page_free(this, page);
1182 				// Note: When iterating through a IteratableSplayTree
1183 				// removing the current node is safe.
1184 		}
1185 	}
1186 
1187 	return B_OK;
1188 }
1189 
1190 
1191 status_t
1192 VMCache::Commit(off_t size, int priority)
1193 {
1194 	committed_size = size;
1195 	return B_OK;
1196 }
1197 
1198 
1199 /*!	Returns whether the cache's underlying backing store could deliver the
1200 	page at the given offset.
1201 
1202 	Basically it returns whether a Read() at \a offset would at least read a
1203 	partial page (assuming that no unexpected errors occur or the situation
1204 	changes in the meantime).
1205 */
1206 bool
1207 VMCache::HasPage(off_t offset)
1208 {
1209 	// In accordance with Fault() the default implementation doesn't have a
1210 	// backing store and doesn't allow faults.
1211 	return false;
1212 }
1213 
1214 
1215 status_t
1216 VMCache::Read(off_t offset, const generic_io_vec *vecs, size_t count,
1217 	uint32 flags, generic_size_t *_numBytes)
1218 {
1219 	return B_ERROR;
1220 }
1221 
1222 
1223 status_t
1224 VMCache::Write(off_t offset, const generic_io_vec *vecs, size_t count,
1225 	uint32 flags, generic_size_t *_numBytes)
1226 {
1227 	return B_ERROR;
1228 }
1229 
1230 
1231 status_t
1232 VMCache::WriteAsync(off_t offset, const generic_io_vec* vecs, size_t count,
1233 	generic_size_t numBytes, uint32 flags, AsyncIOCallback* callback)
1234 {
1235 	// Not supported, fall back to the synchronous hook.
1236 	generic_size_t transferred = numBytes;
1237 	status_t error = Write(offset, vecs, count, flags, &transferred);
1238 
1239 	if (callback != NULL)
1240 		callback->IOFinished(error, transferred != numBytes, transferred);
1241 
1242 	return error;
1243 }
1244 
1245 
1246 /*!	\brief Returns whether the cache can write the page at the given offset.
1247 
1248 	The cache must be locked when this function is invoked.
1249 
1250 	@param offset The page offset.
1251 	@return \c true, if the page can be written, \c false otherwise.
1252 */
1253 bool
1254 VMCache::CanWritePage(off_t offset)
1255 {
1256 	return false;
1257 }
1258 
1259 
1260 status_t
1261 VMCache::Fault(struct VMAddressSpace *aspace, off_t offset)
1262 {
1263 	return B_BAD_ADDRESS;
1264 }
1265 
1266 
1267 void
1268 VMCache::Merge(VMCache* source)
1269 {
1270 	for (VMCachePagesTree::Iterator it = source->pages.GetIterator();
1271 			vm_page* page = it.Next();) {
1272 		// Note: Removing the current node while iterating through a
1273 		// IteratableSplayTree is safe.
1274 		vm_page* consumerPage = LookupPage(
1275 			(off_t)page->cache_offset << PAGE_SHIFT);
1276 		if (consumerPage == NULL) {
1277 			// the page is not yet in the consumer cache - move it upwards
1278 			MovePage(page);
1279 		}
1280 	}
1281 }
1282 
1283 
1284 status_t
1285 VMCache::AcquireUnreferencedStoreRef()
1286 {
1287 	return B_OK;
1288 }
1289 
1290 
1291 void
1292 VMCache::AcquireStoreRef()
1293 {
1294 }
1295 
1296 
1297 void
1298 VMCache::ReleaseStoreRef()
1299 {
1300 }
1301 
1302 
1303 /*!	Kernel debugger version of HasPage().
1304 	Does not do any locking.
1305 */
1306 bool
1307 VMCache::DebugHasPage(off_t offset)
1308 {
1309 	// default that works for all subclasses that don't lock anyway
1310 	return HasPage(offset);
1311 }
1312 
1313 
1314 /*!	Kernel debugger version of LookupPage().
1315 	Does not do any locking.
1316 */
1317 vm_page*
1318 VMCache::DebugLookupPage(off_t offset)
1319 {
1320 	return pages.Lookup((page_num_t)(offset >> PAGE_SHIFT));
1321 }
1322 
1323 
1324 void
1325 VMCache::Dump(bool showPages) const
1326 {
1327 	kprintf("CACHE %p:\n", this);
1328 	kprintf("  ref_count:    %" B_PRId32 "\n", RefCount());
1329 	kprintf("  source:       %p\n", source);
1330 	kprintf("  type:         %s\n", vm_cache_type_to_string(type));
1331 	kprintf("  virtual_base: 0x%" B_PRIx64 "\n", virtual_base);
1332 	kprintf("  virtual_end:  0x%" B_PRIx64 "\n", virtual_end);
1333 	kprintf("  temporary:    %" B_PRIu32 "\n", temporary);
1334 	kprintf("  lock:         %p\n", &fLock);
1335 #if KDEBUG
1336 	kprintf("  lock.holder:  %" B_PRId32 "\n", fLock.holder);
1337 #endif
1338 	kprintf("  areas:\n");
1339 
1340 	for (VMArea* area = areas; area != NULL; area = area->cache_next) {
1341 		kprintf("    area 0x%" B_PRIx32 ", %s\n", area->id, area->name);
1342 		kprintf("\tbase_addr:  0x%lx, size: 0x%lx\n", area->Base(),
1343 			area->Size());
1344 		kprintf("\tprotection: 0x%" B_PRIx32 "\n", area->protection);
1345 		kprintf("\towner:      0x%" B_PRIx32 "\n", area->address_space->ID());
1346 	}
1347 
1348 	kprintf("  consumers:\n");
1349 	for (ConsumerList::ConstIterator it = consumers.GetIterator();
1350 		 	VMCache* consumer = it.Next();) {
1351 		kprintf("\t%p\n", consumer);
1352 	}
1353 
1354 	kprintf("  pages:\n");
1355 	if (showPages) {
1356 		for (VMCachePagesTree::ConstIterator it = pages.GetIterator();
1357 				vm_page* page = it.Next();) {
1358 			if (!vm_page_is_dummy(page)) {
1359 				kprintf("\t%p ppn %#" B_PRIxPHYSADDR " offset %#" B_PRIxPHYSADDR
1360 					" state %u (%s) wired_count %u\n", page,
1361 					page->physical_page_number, page->cache_offset,
1362 					page->State(), page_state_to_string(page->State()),
1363 					page->WiredCount());
1364 			} else {
1365 				kprintf("\t%p DUMMY PAGE state %u (%s)\n",
1366 					page, page->State(), page_state_to_string(page->State()));
1367 			}
1368 		}
1369 	} else
1370 		kprintf("\t%" B_PRIu32 " in cache\n", page_count);
1371 }
1372 
1373 
1374 /*!	Wakes up threads waiting for page events.
1375 	\param page The page for which events occurred.
1376 	\param events The mask of events that occurred.
1377 */
1378 void
1379 VMCache::_NotifyPageEvents(vm_page* page, uint32 events)
1380 {
1381 	PageEventWaiter** it = &fPageEventWaiters;
1382 	while (PageEventWaiter* waiter = *it) {
1383 		if (waiter->page == page && (waiter->events & events) != 0) {
1384 			// remove from list and unblock
1385 			*it = waiter->next;
1386 			thread_unblock(waiter->thread, B_OK);
1387 		} else
1388 			it = &waiter->next;
1389 	}
1390 }
1391 
1392 
1393 /*!	Merges the given cache with its only consumer.
1394 	The caller must hold both the cache's and the consumer's lock. The method
1395 	does release neither lock.
1396 */
1397 void
1398 VMCache::_MergeWithOnlyConsumer()
1399 {
1400 	VMCache* consumer = consumers.RemoveHead();
1401 
1402 	TRACE(("merge vm cache %p (ref == %" B_PRId32 ") with vm cache %p\n",
1403 		this, this->fRefCount, consumer));
1404 
1405 	T(Merge(this, consumer));
1406 
1407 	// merge the cache
1408 	consumer->Merge(this);
1409 
1410 	// The remaining consumer has got a new source.
1411 	if (source != NULL) {
1412 		VMCache* newSource = source;
1413 
1414 		newSource->Lock();
1415 
1416 		newSource->consumers.Remove(this);
1417 		newSource->consumers.Add(consumer);
1418 		consumer->source = newSource;
1419 		source = NULL;
1420 
1421 		newSource->Unlock();
1422 	} else
1423 		consumer->source = NULL;
1424 
1425 	// Release the reference the cache's consumer owned. The consumer takes
1426 	// over the cache's ref to its source (if any) instead.
1427 	ReleaseRefLocked();
1428 }
1429 
1430 
1431 /*!	Removes the \a consumer from this cache.
1432 	It will also release the reference to the cache owned by the consumer.
1433 	Assumes you have the consumer's cache lock held. This cache must not be
1434 	locked.
1435 */
1436 void
1437 VMCache::_RemoveConsumer(VMCache* consumer)
1438 {
1439 	TRACE(("remove consumer vm cache %p from cache %p\n", consumer, this));
1440 	consumer->AssertLocked();
1441 
1442 	T(RemoveConsumer(this, consumer));
1443 
1444 	// Remove the store ref before locking the cache. Otherwise we'd call into
1445 	// the VFS while holding the cache lock, which would reverse the usual
1446 	// locking order.
1447 	ReleaseStoreRef();
1448 
1449 	// remove the consumer from the cache, but keep its reference until later
1450 	Lock();
1451 	consumers.Remove(consumer);
1452 	consumer->source = NULL;
1453 
1454 	ReleaseRefAndUnlock();
1455 }
1456 
1457 
1458 // #pragma mark - VMCacheFactory
1459 	// TODO: Move to own source file!
1460 
1461 
1462 /*static*/ status_t
1463 VMCacheFactory::CreateAnonymousCache(VMCache*& _cache, bool canOvercommit,
1464 	int32 numPrecommittedPages, int32 numGuardPages, bool swappable,
1465 	int priority)
1466 {
1467 	uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY
1468 		| HEAP_DONT_LOCK_KERNEL_SPACE;
1469 	if (priority >= VM_PRIORITY_VIP)
1470 		allocationFlags |= HEAP_PRIORITY_VIP;
1471 
1472 #if ENABLE_SWAP_SUPPORT
1473 	if (swappable) {
1474 		VMAnonymousCache* cache
1475 			= new(gAnonymousCacheObjectCache, allocationFlags) VMAnonymousCache;
1476 		if (cache == NULL)
1477 			return B_NO_MEMORY;
1478 
1479 		status_t error = cache->Init(canOvercommit, numPrecommittedPages,
1480 			numGuardPages, allocationFlags);
1481 		if (error != B_OK) {
1482 			cache->Delete();
1483 			return error;
1484 		}
1485 
1486 		T(Create(cache));
1487 
1488 		_cache = cache;
1489 		return B_OK;
1490 	}
1491 #endif
1492 
1493 	VMAnonymousNoSwapCache* cache
1494 		= new(gAnonymousNoSwapCacheObjectCache, allocationFlags)
1495 			VMAnonymousNoSwapCache;
1496 	if (cache == NULL)
1497 		return B_NO_MEMORY;
1498 
1499 	status_t error = cache->Init(canOvercommit, numPrecommittedPages,
1500 		numGuardPages, allocationFlags);
1501 	if (error != B_OK) {
1502 		cache->Delete();
1503 		return error;
1504 	}
1505 
1506 	T(Create(cache));
1507 
1508 	_cache = cache;
1509 	return B_OK;
1510 }
1511 
1512 
1513 /*static*/ status_t
1514 VMCacheFactory::CreateVnodeCache(VMCache*& _cache, struct vnode* vnode)
1515 {
1516 	const uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY
1517 		| HEAP_DONT_LOCK_KERNEL_SPACE;
1518 		// Note: Vnode cache creation is never VIP.
1519 
1520 	VMVnodeCache* cache
1521 		= new(gVnodeCacheObjectCache, allocationFlags) VMVnodeCache;
1522 	if (cache == NULL)
1523 		return B_NO_MEMORY;
1524 
1525 	status_t error = cache->Init(vnode, allocationFlags);
1526 	if (error != B_OK) {
1527 		cache->Delete();
1528 		return error;
1529 	}
1530 
1531 	T(Create(cache));
1532 
1533 	_cache = cache;
1534 	return B_OK;
1535 }
1536 
1537 
1538 /*static*/ status_t
1539 VMCacheFactory::CreateDeviceCache(VMCache*& _cache, addr_t baseAddress)
1540 {
1541 	const uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY
1542 		| HEAP_DONT_LOCK_KERNEL_SPACE;
1543 		// Note: Device cache creation is never VIP.
1544 
1545 	VMDeviceCache* cache
1546 		= new(gDeviceCacheObjectCache, allocationFlags) VMDeviceCache;
1547 	if (cache == NULL)
1548 		return B_NO_MEMORY;
1549 
1550 	status_t error = cache->Init(baseAddress, allocationFlags);
1551 	if (error != B_OK) {
1552 		cache->Delete();
1553 		return error;
1554 	}
1555 
1556 	T(Create(cache));
1557 
1558 	_cache = cache;
1559 	return B_OK;
1560 }
1561 
1562 
1563 /*static*/ status_t
1564 VMCacheFactory::CreateNullCache(int priority, VMCache*& _cache)
1565 {
1566 	uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY
1567 		| HEAP_DONT_LOCK_KERNEL_SPACE;
1568 	if (priority >= VM_PRIORITY_VIP)
1569 		allocationFlags |= HEAP_PRIORITY_VIP;
1570 
1571 	VMNullCache* cache
1572 		= new(gNullCacheObjectCache, allocationFlags) VMNullCache;
1573 	if (cache == NULL)
1574 		return B_NO_MEMORY;
1575 
1576 	status_t error = cache->Init(allocationFlags);
1577 	if (error != B_OK) {
1578 		cache->Delete();
1579 		return error;
1580 	}
1581 
1582 	T(Create(cache));
1583 
1584 	_cache = cache;
1585 	return B_OK;
1586 }
1587