xref: /haiku/src/system/kernel/vm/VMCache.cpp (revision 9a6a20d4689307142a7ed26a1437ba47e244e73f)
1 /*
2  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 #include <vm/VMCache.h>
12 
13 #include <stddef.h>
14 #include <stdlib.h>
15 
16 #include <algorithm>
17 
18 #include <arch/cpu.h>
19 #include <condition_variable.h>
20 #include <heap.h>
21 #include <int.h>
22 #include <kernel.h>
23 #include <slab/Slab.h>
24 #include <smp.h>
25 #include <thread.h>
26 #include <tracing.h>
27 #include <util/AutoLock.h>
28 #include <vfs.h>
29 #include <vm/vm.h>
30 #include <vm/vm_page.h>
31 #include <vm/vm_priv.h>
32 #include <vm/vm_types.h>
33 #include <vm/VMAddressSpace.h>
34 #include <vm/VMArea.h>
35 
36 // needed for the factory only
37 #include "VMAnonymousCache.h"
38 #include "VMAnonymousNoSwapCache.h"
39 #include "VMDeviceCache.h"
40 #include "VMNullCache.h"
41 #include "../cache/vnode_store.h"
42 
43 
44 //#define TRACE_VM_CACHE
45 #ifdef TRACE_VM_CACHE
46 #	define TRACE(x) dprintf x
47 #else
48 #	define TRACE(x) ;
49 #endif
50 
51 
52 #if DEBUG_CACHE_LIST
53 VMCache* gDebugCacheList;
54 #endif
55 static rw_lock sCacheListLock = RW_LOCK_INITIALIZER("global VMCache list");
56 	// The lock is also needed when the debug feature is disabled.
57 
58 ObjectCache* gCacheRefObjectCache;
59 #if ENABLE_SWAP_SUPPORT
60 ObjectCache* gAnonymousCacheObjectCache;
61 #endif
62 ObjectCache* gAnonymousNoSwapCacheObjectCache;
63 ObjectCache* gVnodeCacheObjectCache;
64 ObjectCache* gDeviceCacheObjectCache;
65 ObjectCache* gNullCacheObjectCache;
66 
67 
68 struct VMCache::PageEventWaiter {
69 	Thread*				thread;
70 	PageEventWaiter*	next;
71 	vm_page*			page;
72 	uint32				events;
73 };
74 
75 
76 #if VM_CACHE_TRACING
77 
78 namespace VMCacheTracing {
79 
80 class VMCacheTraceEntry : public AbstractTraceEntry {
81 	public:
82 		VMCacheTraceEntry(VMCache* cache)
83 			:
84 			fCache(cache)
85 		{
86 #if VM_CACHE_TRACING_STACK_TRACE
87 			fStackTrace = capture_tracing_stack_trace(
88 				VM_CACHE_TRACING_STACK_TRACE, 0, true);
89 				// Don't capture userland stack trace to avoid potential
90 				// deadlocks.
91 #endif
92 		}
93 
94 #if VM_CACHE_TRACING_STACK_TRACE
95 		virtual void DumpStackTrace(TraceOutput& out)
96 		{
97 			out.PrintStackTrace(fStackTrace);
98 		}
99 #endif
100 
101 		VMCache* Cache() const
102 		{
103 			return fCache;
104 		}
105 
106 	protected:
107 		VMCache*	fCache;
108 #if VM_CACHE_TRACING_STACK_TRACE
109 		tracing_stack_trace* fStackTrace;
110 #endif
111 };
112 
113 
114 class Create : public VMCacheTraceEntry {
115 	public:
116 		Create(VMCache* cache)
117 			:
118 			VMCacheTraceEntry(cache)
119 		{
120 			Initialized();
121 		}
122 
123 		virtual void AddDump(TraceOutput& out)
124 		{
125 			out.Print("vm cache create: -> cache: %p", fCache);
126 		}
127 };
128 
129 
130 class Delete : public VMCacheTraceEntry {
131 	public:
132 		Delete(VMCache* cache)
133 			:
134 			VMCacheTraceEntry(cache)
135 		{
136 			Initialized();
137 		}
138 
139 		virtual void AddDump(TraceOutput& out)
140 		{
141 			out.Print("vm cache delete: cache: %p", fCache);
142 		}
143 };
144 
145 
146 class SetMinimalCommitment : public VMCacheTraceEntry {
147 	public:
148 		SetMinimalCommitment(VMCache* cache, off_t commitment)
149 			:
150 			VMCacheTraceEntry(cache),
151 			fOldCommitment(cache->committed_size),
152 			fCommitment(commitment)
153 		{
154 			Initialized();
155 		}
156 
157 		virtual void AddDump(TraceOutput& out)
158 		{
159 			out.Print("vm cache set min commitment: cache: %p, "
160 				"commitment: %" B_PRIdOFF " -> %" B_PRIdOFF, fCache,
161 				fOldCommitment, fCommitment);
162 		}
163 
164 	private:
165 		off_t	fOldCommitment;
166 		off_t	fCommitment;
167 };
168 
169 
170 class Resize : public VMCacheTraceEntry {
171 	public:
172 		Resize(VMCache* cache, off_t size)
173 			:
174 			VMCacheTraceEntry(cache),
175 			fOldSize(cache->virtual_end),
176 			fSize(size)
177 		{
178 			Initialized();
179 		}
180 
181 		virtual void AddDump(TraceOutput& out)
182 		{
183 			out.Print("vm cache resize: cache: %p, size: %" B_PRIdOFF " -> %"
184 				B_PRIdOFF, fCache, fOldSize, fSize);
185 		}
186 
187 	private:
188 		off_t	fOldSize;
189 		off_t	fSize;
190 };
191 
192 
193 class Rebase : public VMCacheTraceEntry {
194 	public:
195 		Rebase(VMCache* cache, off_t base)
196 			:
197 			VMCacheTraceEntry(cache),
198 			fOldBase(cache->virtual_base),
199 			fBase(base)
200 		{
201 			Initialized();
202 		}
203 
204 		virtual void AddDump(TraceOutput& out)
205 		{
206 			out.Print("vm cache rebase: cache: %p, base: %lld -> %lld", fCache,
207 				fOldBase, fBase);
208 		}
209 
210 	private:
211 		off_t	fOldBase;
212 		off_t	fBase;
213 };
214 
215 
216 class AddConsumer : public VMCacheTraceEntry {
217 	public:
218 		AddConsumer(VMCache* cache, VMCache* consumer)
219 			:
220 			VMCacheTraceEntry(cache),
221 			fConsumer(consumer)
222 		{
223 			Initialized();
224 		}
225 
226 		virtual void AddDump(TraceOutput& out)
227 		{
228 			out.Print("vm cache add consumer: cache: %p, consumer: %p", fCache,
229 				fConsumer);
230 		}
231 
232 		VMCache* Consumer() const
233 		{
234 			return fConsumer;
235 		}
236 
237 	private:
238 		VMCache*	fConsumer;
239 };
240 
241 
242 class RemoveConsumer : public VMCacheTraceEntry {
243 	public:
244 		RemoveConsumer(VMCache* cache, VMCache* consumer)
245 			:
246 			VMCacheTraceEntry(cache),
247 			fConsumer(consumer)
248 		{
249 			Initialized();
250 		}
251 
252 		virtual void AddDump(TraceOutput& out)
253 		{
254 			out.Print("vm cache remove consumer: cache: %p, consumer: %p",
255 				fCache, fConsumer);
256 		}
257 
258 	private:
259 		VMCache*	fConsumer;
260 };
261 
262 
263 class Merge : public VMCacheTraceEntry {
264 	public:
265 		Merge(VMCache* cache, VMCache* consumer)
266 			:
267 			VMCacheTraceEntry(cache),
268 			fConsumer(consumer)
269 		{
270 			Initialized();
271 		}
272 
273 		virtual void AddDump(TraceOutput& out)
274 		{
275 			out.Print("vm cache merge with consumer: cache: %p, consumer: %p",
276 				fCache, fConsumer);
277 		}
278 
279 	private:
280 		VMCache*	fConsumer;
281 };
282 
283 
284 class InsertArea : public VMCacheTraceEntry {
285 	public:
286 		InsertArea(VMCache* cache, VMArea* area)
287 			:
288 			VMCacheTraceEntry(cache),
289 			fArea(area)
290 		{
291 			Initialized();
292 		}
293 
294 		virtual void AddDump(TraceOutput& out)
295 		{
296 			out.Print("vm cache insert area: cache: %p, area: %p", fCache,
297 				fArea);
298 		}
299 
300 		VMArea*	Area() const
301 		{
302 			return fArea;
303 		}
304 
305 	private:
306 		VMArea*	fArea;
307 };
308 
309 
310 class RemoveArea : public VMCacheTraceEntry {
311 	public:
312 		RemoveArea(VMCache* cache, VMArea* area)
313 			:
314 			VMCacheTraceEntry(cache),
315 			fArea(area)
316 		{
317 			Initialized();
318 		}
319 
320 		virtual void AddDump(TraceOutput& out)
321 		{
322 			out.Print("vm cache remove area: cache: %p, area: %p", fCache,
323 				fArea);
324 		}
325 
326 	private:
327 		VMArea*	fArea;
328 };
329 
330 }	// namespace VMCacheTracing
331 
332 #	define T(x) new(std::nothrow) VMCacheTracing::x;
333 
334 #	if VM_CACHE_TRACING >= 2
335 
336 namespace VMCacheTracing {
337 
338 class InsertPage : public VMCacheTraceEntry {
339 	public:
340 		InsertPage(VMCache* cache, vm_page* page, off_t offset)
341 			:
342 			VMCacheTraceEntry(cache),
343 			fPage(page),
344 			fOffset(offset)
345 		{
346 			Initialized();
347 		}
348 
349 		virtual void AddDump(TraceOutput& out)
350 		{
351 			out.Print("vm cache insert page: cache: %p, page: %p, offset: %"
352 				B_PRIdOFF, fCache, fPage, fOffset);
353 		}
354 
355 	private:
356 		vm_page*	fPage;
357 		off_t		fOffset;
358 };
359 
360 
361 class RemovePage : public VMCacheTraceEntry {
362 	public:
363 		RemovePage(VMCache* cache, vm_page* page)
364 			:
365 			VMCacheTraceEntry(cache),
366 			fPage(page)
367 		{
368 			Initialized();
369 		}
370 
371 		virtual void AddDump(TraceOutput& out)
372 		{
373 			out.Print("vm cache remove page: cache: %p, page: %p", fCache,
374 				fPage);
375 		}
376 
377 	private:
378 		vm_page*	fPage;
379 };
380 
381 }	// namespace VMCacheTracing
382 
383 #		define T2(x) new(std::nothrow) VMCacheTracing::x;
384 #	else
385 #		define T2(x) ;
386 #	endif
387 #else
388 #	define T(x) ;
389 #	define T2(x) ;
390 #endif
391 
392 
393 //	#pragma mark - debugger commands
394 
395 
396 #if VM_CACHE_TRACING
397 
398 
399 static void*
400 cache_stack_find_area_cache(const TraceEntryIterator& baseIterator, void* area)
401 {
402 	using namespace VMCacheTracing;
403 
404 	// find the previous "insert area" entry for the given area
405 	TraceEntryIterator iterator = baseIterator;
406 	TraceEntry* entry = iterator.Current();
407 	while (entry != NULL) {
408 		if (InsertArea* insertAreaEntry = dynamic_cast<InsertArea*>(entry)) {
409 			if (insertAreaEntry->Area() == area)
410 				return insertAreaEntry->Cache();
411 		}
412 
413 		entry = iterator.Previous();
414 	}
415 
416 	return NULL;
417 }
418 
419 
420 static void*
421 cache_stack_find_consumer(const TraceEntryIterator& baseIterator, void* cache)
422 {
423 	using namespace VMCacheTracing;
424 
425 	// find the previous "add consumer" or "create" entry for the given cache
426 	TraceEntryIterator iterator = baseIterator;
427 	TraceEntry* entry = iterator.Current();
428 	while (entry != NULL) {
429 		if (Create* createEntry = dynamic_cast<Create*>(entry)) {
430 			if (createEntry->Cache() == cache)
431 				return NULL;
432 		} else if (AddConsumer* addEntry = dynamic_cast<AddConsumer*>(entry)) {
433 			if (addEntry->Consumer() == cache)
434 				return addEntry->Cache();
435 		}
436 
437 		entry = iterator.Previous();
438 	}
439 
440 	return NULL;
441 }
442 
443 
444 static int
445 command_cache_stack(int argc, char** argv)
446 {
447 	if (argc < 3 || argc > 4) {
448 		print_debugger_command_usage(argv[0]);
449 		return 0;
450 	}
451 
452 	bool isArea = false;
453 
454 	int argi = 1;
455 	if (argc == 4) {
456 		if (strcmp(argv[argi], "area") != 0) {
457 			print_debugger_command_usage(argv[0]);
458 			return 0;
459 		}
460 
461 		argi++;
462 		isArea = true;
463 	}
464 
465 	uint64 addressValue;
466 	uint64 debugEntryIndex;
467 	if (!evaluate_debug_expression(argv[argi++], &addressValue, false)
468 		|| !evaluate_debug_expression(argv[argi++], &debugEntryIndex, false)) {
469 		return 0;
470 	}
471 
472 	TraceEntryIterator baseIterator;
473 	if (baseIterator.MoveTo((int32)debugEntryIndex) == NULL) {
474 		kprintf("Invalid tracing entry index %" B_PRIu64 "\n", debugEntryIndex);
475 		return 0;
476 	}
477 
478 	void* address = (void*)(addr_t)addressValue;
479 
480 	kprintf("cache stack for %s %p at %" B_PRIu64 ":\n",
481 		isArea ? "area" : "cache", address, debugEntryIndex);
482 	if (isArea) {
483 		address = cache_stack_find_area_cache(baseIterator, address);
484 		if (address == NULL) {
485 			kprintf("  cache not found\n");
486 			return 0;
487 		}
488 	}
489 
490 	while (address != NULL) {
491 		kprintf("  %p\n", address);
492 		address = cache_stack_find_consumer(baseIterator, address);
493 	}
494 
495 	return 0;
496 }
497 
498 
499 #endif	// VM_CACHE_TRACING
500 
501 
502 //	#pragma mark -
503 
504 
505 status_t
506 vm_cache_init(kernel_args* args)
507 {
508 	// Create object caches for the structures we allocate here.
509 	gCacheRefObjectCache = create_object_cache("cache refs", sizeof(VMCacheRef),
510 		0, NULL, NULL, NULL);
511 #if ENABLE_SWAP_SUPPORT
512 	gAnonymousCacheObjectCache = create_object_cache("anon caches",
513 		sizeof(VMAnonymousCache), 0, NULL, NULL, NULL);
514 #endif
515 	gAnonymousNoSwapCacheObjectCache = create_object_cache(
516 		"anon no-swap caches", sizeof(VMAnonymousNoSwapCache), 0, NULL, NULL,
517 		NULL);
518 	gVnodeCacheObjectCache = create_object_cache("vnode caches",
519 		sizeof(VMVnodeCache), 0, NULL, NULL, NULL);
520 	gDeviceCacheObjectCache = create_object_cache("device caches",
521 		sizeof(VMDeviceCache), 0, NULL, NULL, NULL);
522 	gNullCacheObjectCache = create_object_cache("null caches",
523 		sizeof(VMNullCache), 0, NULL, NULL, NULL);
524 
525 	if (gCacheRefObjectCache == NULL
526 #if ENABLE_SWAP_SUPPORT
527 		|| gAnonymousCacheObjectCache == NULL
528 #endif
529 		|| gAnonymousNoSwapCacheObjectCache == NULL
530 		|| gVnodeCacheObjectCache == NULL
531 		|| gDeviceCacheObjectCache == NULL
532 		|| gNullCacheObjectCache == NULL) {
533 		panic("vm_cache_init(): Failed to create object caches!");
534 		return B_NO_MEMORY;
535 	}
536 
537 	return B_OK;
538 }
539 
540 
541 void
542 vm_cache_init_post_heap()
543 {
544 #if VM_CACHE_TRACING
545 	add_debugger_command_etc("cache_stack", &command_cache_stack,
546 		"List the ancestors (sources) of a VMCache at the time given by "
547 			"tracing entry index",
548 		"[ \"area\" ] <address> <tracing entry index>\n"
549 		"All ancestors (sources) of a given VMCache at the time given by the\n"
550 		"tracing entry index are listed. If \"area\" is given the supplied\n"
551 		"address is an area instead of a cache address. The listing will\n"
552 		"start with the area's cache at that point.\n",
553 		0);
554 #endif	// VM_CACHE_TRACING
555 }
556 
557 
558 VMCache*
559 vm_cache_acquire_locked_page_cache(vm_page* page, bool dontWait)
560 {
561 	rw_lock_read_lock(&sCacheListLock);
562 
563 	while (true) {
564 		VMCacheRef* cacheRef = page->CacheRef();
565 		if (cacheRef == NULL) {
566 			rw_lock_read_unlock(&sCacheListLock);
567 			return NULL;
568 		}
569 
570 		VMCache* cache = cacheRef->cache;
571 		if (dontWait) {
572 			if (!cache->TryLock()) {
573 				rw_lock_read_unlock(&sCacheListLock);
574 				return NULL;
575 			}
576 		} else {
577 			if (!cache->SwitchFromReadLock(&sCacheListLock)) {
578 				// cache has been deleted
579 				rw_lock_read_lock(&sCacheListLock);
580 				continue;
581 			}
582 			rw_lock_read_lock(&sCacheListLock);
583 		}
584 
585 		if (cache == page->Cache()) {
586 			rw_lock_read_unlock(&sCacheListLock);
587 			cache->AcquireRefLocked();
588 			return cache;
589 		}
590 
591 		// the cache changed in the meantime
592 		cache->Unlock();
593 	}
594 }
595 
596 
597 // #pragma mark - VMCache
598 
599 
600 VMCacheRef::VMCacheRef(VMCache* cache)
601 	:
602 	cache(cache),
603 	ref_count(1)
604 {
605 }
606 
607 
608 // #pragma mark - VMCache
609 
610 
611 bool
612 VMCache::_IsMergeable() const
613 {
614 	return areas == NULL && temporary && !unmergeable
615 		&& !consumers.IsEmpty() && consumers.Head() == consumers.Tail();
616 }
617 
618 
619 VMCache::VMCache()
620 	:
621 	fCacheRef(NULL)
622 {
623 }
624 
625 
626 VMCache::~VMCache()
627 {
628 	object_cache_delete(gCacheRefObjectCache, fCacheRef);
629 }
630 
631 
632 status_t
633 VMCache::Init(uint32 cacheType, uint32 allocationFlags)
634 {
635 	mutex_init(&fLock, "VMCache");
636 
637 	areas = NULL;
638 	fRefCount = 1;
639 	source = NULL;
640 	virtual_base = 0;
641 	virtual_end = 0;
642 	committed_size = 0;
643 	temporary = 0;
644 	unmergeable = 0;
645 	page_count = 0;
646 	fWiredPagesCount = 0;
647 	type = cacheType;
648 	fPageEventWaiters = NULL;
649 
650 #if DEBUG_CACHE_LIST
651 	debug_previous = NULL;
652 	debug_next = NULL;
653 		// initialize in case the following fails
654 #endif
655 
656 	fCacheRef = new(gCacheRefObjectCache, allocationFlags) VMCacheRef(this);
657 	if (fCacheRef == NULL)
658 		return B_NO_MEMORY;
659 
660 #if DEBUG_CACHE_LIST
661 	rw_lock_write_lock(&sCacheListLock);
662 
663 	if (gDebugCacheList != NULL)
664 		gDebugCacheList->debug_previous = this;
665 	debug_next = gDebugCacheList;
666 	gDebugCacheList = this;
667 
668 	rw_lock_write_unlock(&sCacheListLock);
669 #endif
670 
671 	return B_OK;
672 }
673 
674 
675 void
676 VMCache::Delete()
677 {
678 	if (areas != NULL)
679 		panic("cache %p to be deleted still has areas", this);
680 	if (!consumers.IsEmpty())
681 		panic("cache %p to be deleted still has consumers", this);
682 
683 	T(Delete(this));
684 
685 	// free all of the pages in the cache
686 	while (vm_page* page = pages.Root()) {
687 		if (!page->mappings.IsEmpty() || page->WiredCount() != 0) {
688 			panic("remove page %p from cache %p: page still has mappings!\n"
689 				"@!page %p; cache %p", page, this, page, this);
690 		}
691 
692 		// remove it
693 		pages.Remove(page);
694 		page->SetCacheRef(NULL);
695 
696 		TRACE(("vm_cache_release_ref: freeing page 0x%lx\n",
697 			page->physical_page_number));
698 		DEBUG_PAGE_ACCESS_START(page);
699 		vm_page_free(this, page);
700 	}
701 
702 	// remove the ref to the source
703 	if (source)
704 		source->_RemoveConsumer(this);
705 
706 	// We lock and unlock the sCacheListLock, even if the DEBUG_CACHE_LIST is
707 	// not enabled. This synchronization point is needed for
708 	// vm_cache_acquire_locked_page_cache().
709 	rw_lock_write_lock(&sCacheListLock);
710 
711 #if DEBUG_CACHE_LIST
712 	if (debug_previous)
713 		debug_previous->debug_next = debug_next;
714 	if (debug_next)
715 		debug_next->debug_previous = debug_previous;
716 	if (this == gDebugCacheList)
717 		gDebugCacheList = debug_next;
718 #endif
719 
720 	mutex_destroy(&fLock);
721 
722 	rw_lock_write_unlock(&sCacheListLock);
723 
724 	DeleteObject();
725 }
726 
727 
728 void
729 VMCache::Unlock(bool consumerLocked)
730 {
731 	while (fRefCount == 1 && _IsMergeable()) {
732 		VMCache* consumer = consumers.Head();
733 		if (consumerLocked) {
734 			_MergeWithOnlyConsumer();
735 		} else if (consumer->TryLock()) {
736 			_MergeWithOnlyConsumer();
737 			consumer->Unlock();
738 		} else {
739 			// Someone else has locked the consumer ATM. Unlock this cache and
740 			// wait for the consumer lock. Increment the cache's ref count
741 			// temporarily, so that no one else will try what we are doing or
742 			// delete the cache.
743 			fRefCount++;
744 			bool consumerLockedTemp = consumer->SwitchLock(&fLock);
745 			Lock();
746 			fRefCount--;
747 
748 			if (consumerLockedTemp) {
749 				if (fRefCount == 1 && _IsMergeable()
750 						&& consumer == consumers.Head()) {
751 					// nothing has changed in the meantime -- merge
752 					_MergeWithOnlyConsumer();
753 				}
754 
755 				consumer->Unlock();
756 			}
757 		}
758 	}
759 
760 	if (fRefCount == 0) {
761 		// delete this cache
762 		Delete();
763 	} else
764 		mutex_unlock(&fLock);
765 }
766 
767 
768 vm_page*
769 VMCache::LookupPage(off_t offset)
770 {
771 	AssertLocked();
772 
773 	vm_page* page = pages.Lookup((page_num_t)(offset >> PAGE_SHIFT));
774 
775 #if KDEBUG
776 	if (page != NULL && page->Cache() != this)
777 		panic("page %p not in cache %p\n", page, this);
778 #endif
779 
780 	return page;
781 }
782 
783 
784 void
785 VMCache::InsertPage(vm_page* page, off_t offset)
786 {
787 	TRACE(("VMCache::InsertPage(): cache %p, page %p, offset %" B_PRIdOFF "\n",
788 		this, page, offset));
789 	AssertLocked();
790 
791 	if (page->CacheRef() != NULL) {
792 		panic("insert page %p into cache %p: page cache is set to %p\n",
793 			page, this, page->Cache());
794 	}
795 
796 	T2(InsertPage(this, page, offset));
797 
798 	page->cache_offset = (page_num_t)(offset >> PAGE_SHIFT);
799 	page_count++;
800 	page->SetCacheRef(fCacheRef);
801 
802 #if KDEBUG
803 	vm_page* otherPage = pages.Lookup(page->cache_offset);
804 	if (otherPage != NULL) {
805 		panic("VMCache::InsertPage(): there's already page %p with cache "
806 			"offset %" B_PRIuPHYSADDR " in cache %p; inserting page %p",
807 			otherPage, page->cache_offset, this, page);
808 	}
809 #endif	// KDEBUG
810 
811 	pages.Insert(page);
812 
813 	if (page->WiredCount() > 0)
814 		IncrementWiredPagesCount();
815 }
816 
817 
818 /*!	Removes the vm_page from this cache. Of course, the page must
819 	really be in this cache or evil things will happen.
820 	The cache lock must be held.
821 */
822 void
823 VMCache::RemovePage(vm_page* page)
824 {
825 	TRACE(("VMCache::RemovePage(): cache %p, page %p\n", this, page));
826 	AssertLocked();
827 
828 	if (page->Cache() != this) {
829 		panic("remove page %p from cache %p: page cache is set to %p\n", page,
830 			this, page->Cache());
831 	}
832 
833 	T2(RemovePage(this, page));
834 
835 	pages.Remove(page);
836 	page_count--;
837 	page->SetCacheRef(NULL);
838 
839 	if (page->WiredCount() > 0)
840 		DecrementWiredPagesCount();
841 }
842 
843 
844 /*!	Moves the given page from its current cache inserts it into this cache
845 	at the given offset.
846 	Both caches must be locked.
847 */
848 void
849 VMCache::MovePage(vm_page* page, off_t offset)
850 {
851 	VMCache* oldCache = page->Cache();
852 
853 	AssertLocked();
854 	oldCache->AssertLocked();
855 
856 	// remove from old cache
857 	oldCache->pages.Remove(page);
858 	oldCache->page_count--;
859 	T2(RemovePage(oldCache, page));
860 
861 	// change the offset
862 	page->cache_offset = offset >> PAGE_SHIFT;
863 
864 	// insert here
865 	pages.Insert(page);
866 	page_count++;
867 	page->SetCacheRef(fCacheRef);
868 
869 	if (page->WiredCount() > 0) {
870 		IncrementWiredPagesCount();
871 		oldCache->DecrementWiredPagesCount();
872 	}
873 
874 	T2(InsertPage(this, page, page->cache_offset << PAGE_SHIFT));
875 }
876 
877 /*!	Moves the given page from its current cache inserts it into this cache.
878 	Both caches must be locked.
879 */
880 void
881 VMCache::MovePage(vm_page* page)
882 {
883 	MovePage(page, page->cache_offset << PAGE_SHIFT);
884 }
885 
886 
887 /*!	Moves all pages from the given cache to this one.
888 	Both caches must be locked. This cache must be empty.
889 */
890 void
891 VMCache::MoveAllPages(VMCache* fromCache)
892 {
893 	AssertLocked();
894 	fromCache->AssertLocked();
895 	ASSERT(page_count == 0);
896 
897 	std::swap(fromCache->pages, pages);
898 	page_count = fromCache->page_count;
899 	fromCache->page_count = 0;
900 	fWiredPagesCount = fromCache->fWiredPagesCount;
901 	fromCache->fWiredPagesCount = 0;
902 
903 	// swap the VMCacheRefs
904 	rw_lock_write_lock(&sCacheListLock);
905 	std::swap(fCacheRef, fromCache->fCacheRef);
906 	fCacheRef->cache = this;
907 	fromCache->fCacheRef->cache = fromCache;
908 	rw_lock_write_unlock(&sCacheListLock);
909 
910 #if VM_CACHE_TRACING >= 2
911 	for (VMCachePagesTree::Iterator it = pages.GetIterator();
912 			vm_page* page = it.Next();) {
913 		T2(RemovePage(fromCache, page));
914 		T2(InsertPage(this, page, page->cache_offset << PAGE_SHIFT));
915 	}
916 #endif
917 }
918 
919 
920 /*!	Waits until one or more events happened for a given page which belongs to
921 	this cache.
922 	The cache must be locked. It will be unlocked by the method. \a relock
923 	specifies whether the method shall re-lock the cache before returning.
924 	\param page The page for which to wait.
925 	\param events The mask of events the caller is interested in.
926 	\param relock If \c true, the cache will be locked when returning,
927 		otherwise it won't be locked.
928 */
929 void
930 VMCache::WaitForPageEvents(vm_page* page, uint32 events, bool relock)
931 {
932 	PageEventWaiter waiter;
933 	waiter.thread = thread_get_current_thread();
934 	waiter.next = fPageEventWaiters;
935 	waiter.page = page;
936 	waiter.events = events;
937 
938 	fPageEventWaiters = &waiter;
939 
940 	thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_OTHER_OBJECT, page);
941 
942 	Unlock();
943 	thread_block();
944 
945 	if (relock)
946 		Lock();
947 }
948 
949 
950 /*!	Makes this case the source of the \a consumer cache,
951 	and adds the \a consumer to its list.
952 	This also grabs a reference to the source cache.
953 	Assumes you have the cache and the consumer's lock held.
954 */
955 void
956 VMCache::AddConsumer(VMCache* consumer)
957 {
958 	TRACE(("add consumer vm cache %p to cache %p\n", consumer, this));
959 	AssertLocked();
960 	consumer->AssertLocked();
961 
962 	T(AddConsumer(this, consumer));
963 
964 	consumer->source = this;
965 	consumers.Add(consumer);
966 
967 	AcquireRefLocked();
968 	AcquireStoreRef();
969 }
970 
971 
972 /*!	Adds the \a area to this cache.
973 	Assumes you have the locked the cache.
974 */
975 status_t
976 VMCache::InsertAreaLocked(VMArea* area)
977 {
978 	TRACE(("VMCache::InsertAreaLocked(cache %p, area %p)\n", this, area));
979 	AssertLocked();
980 
981 	T(InsertArea(this, area));
982 
983 	area->cache_next = areas;
984 	if (area->cache_next)
985 		area->cache_next->cache_prev = area;
986 	area->cache_prev = NULL;
987 	areas = area;
988 
989 	AcquireStoreRef();
990 
991 	return B_OK;
992 }
993 
994 
995 status_t
996 VMCache::RemoveArea(VMArea* area)
997 {
998 	TRACE(("VMCache::RemoveArea(cache %p, area %p)\n", this, area));
999 
1000 	T(RemoveArea(this, area));
1001 
1002 	// We release the store reference first, since otherwise we would reverse
1003 	// the locking order or even deadlock ourselves (... -> free_vnode() -> ...
1004 	// -> bfs_remove_vnode() -> ... -> file_cache_set_size() -> mutex_lock()).
1005 	// Also cf. _RemoveConsumer().
1006 	ReleaseStoreRef();
1007 
1008 	AutoLocker<VMCache> locker(this);
1009 
1010 	if (area->cache_prev)
1011 		area->cache_prev->cache_next = area->cache_next;
1012 	if (area->cache_next)
1013 		area->cache_next->cache_prev = area->cache_prev;
1014 	if (areas == area)
1015 		areas = area->cache_next;
1016 
1017 	return B_OK;
1018 }
1019 
1020 
1021 /*!	Transfers the areas from \a fromCache to this cache. This cache must not
1022 	have areas yet. Both caches must be locked.
1023 */
1024 void
1025 VMCache::TransferAreas(VMCache* fromCache)
1026 {
1027 	AssertLocked();
1028 	fromCache->AssertLocked();
1029 	ASSERT(areas == NULL);
1030 
1031 	areas = fromCache->areas;
1032 	fromCache->areas = NULL;
1033 
1034 	for (VMArea* area = areas; area != NULL; area = area->cache_next) {
1035 		area->cache = this;
1036 		AcquireRefLocked();
1037 		fromCache->ReleaseRefLocked();
1038 
1039 		T(RemoveArea(fromCache, area));
1040 		T(InsertArea(this, area));
1041 	}
1042 }
1043 
1044 
1045 uint32
1046 VMCache::CountWritableAreas(VMArea* ignoreArea) const
1047 {
1048 	uint32 count = 0;
1049 
1050 	for (VMArea* area = areas; area != NULL; area = area->cache_next) {
1051 		if (area != ignoreArea
1052 			&& (area->protection & (B_WRITE_AREA | B_KERNEL_WRITE_AREA)) != 0) {
1053 			count++;
1054 		}
1055 	}
1056 
1057 	return count;
1058 }
1059 
1060 
1061 status_t
1062 VMCache::WriteModified()
1063 {
1064 	TRACE(("VMCache::WriteModified(cache = %p)\n", this));
1065 
1066 	if (temporary)
1067 		return B_OK;
1068 
1069 	Lock();
1070 	status_t status = vm_page_write_modified_pages(this);
1071 	Unlock();
1072 
1073 	return status;
1074 }
1075 
1076 
1077 /*!	Commits the memory to the store if the \a commitment is larger than
1078 	what's committed already.
1079 	Assumes you have the cache's lock held.
1080 */
1081 status_t
1082 VMCache::SetMinimalCommitment(off_t commitment, int priority)
1083 {
1084 	TRACE(("VMCache::SetMinimalCommitment(cache %p, commitment %" B_PRIdOFF
1085 		")\n", this, commitment));
1086 	AssertLocked();
1087 
1088 	T(SetMinimalCommitment(this, commitment));
1089 
1090 	status_t status = B_OK;
1091 
1092 	// If we don't have enough committed space to cover through to the new end
1093 	// of the area...
1094 	if (committed_size < commitment) {
1095 		// ToDo: should we check if the cache's virtual size is large
1096 		//	enough for a commitment of that size?
1097 
1098 		// try to commit more memory
1099 		status = Commit(commitment, priority);
1100 	}
1101 
1102 	return status;
1103 }
1104 
1105 
1106 bool
1107 VMCache::_FreePageRange(VMCachePagesTree::Iterator it,
1108 	page_num_t* toPage = NULL)
1109 {
1110 	for (vm_page* page = it.Next();
1111 		page != NULL && (toPage == NULL || page->cache_offset < *toPage);
1112 		page = it.Next()) {
1113 
1114 		if (page->busy) {
1115 			if (page->busy_writing) {
1116 				// We cannot wait for the page to become available
1117 				// as we might cause a deadlock this way
1118 				page->busy_writing = false;
1119 					// this will notify the writer to free the page
1120 				continue;
1121 			}
1122 
1123 			// wait for page to become unbusy
1124 			WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, true);
1125 			return true;
1126 		}
1127 
1128 		// remove the page and put it into the free queue
1129 		DEBUG_PAGE_ACCESS_START(page);
1130 		vm_remove_all_page_mappings(page);
1131 		ASSERT(page->WiredCount() == 0);
1132 			// TODO: Find a real solution! If the page is wired
1133 			// temporarily (e.g. by lock_memory()), we actually must not
1134 			// unmap it!
1135 		RemovePage(page);
1136 			// Note: When iterating through a IteratableSplayTree
1137 			// removing the current node is safe.
1138 
1139 		vm_page_free(this, page);
1140 	}
1141 
1142 	return false;
1143 }
1144 
1145 
1146 /*!	This function updates the size field of the cache.
1147 	If needed, it will free up all pages that don't belong to the cache anymore.
1148 	The cache lock must be held when you call it.
1149 	Since removed pages don't belong to the cache any longer, they are not
1150 	written back before they will be removed.
1151 
1152 	Note, this function may temporarily release the cache lock in case it
1153 	has to wait for busy pages.
1154 */
1155 status_t
1156 VMCache::Resize(off_t newSize, int priority)
1157 {
1158 	TRACE(("VMCache::Resize(cache %p, newSize %" B_PRIdOFF ") old size %"
1159 		B_PRIdOFF "\n", this, newSize, this->virtual_end));
1160 	this->AssertLocked();
1161 
1162 	T(Resize(this, newSize));
1163 
1164 	status_t status = Commit(newSize - virtual_base, priority);
1165 	if (status != B_OK)
1166 		return status;
1167 
1168 	page_num_t oldPageCount = (page_num_t)((virtual_end + B_PAGE_SIZE - 1)
1169 		>> PAGE_SHIFT);
1170 	page_num_t newPageCount = (page_num_t)((newSize + B_PAGE_SIZE - 1)
1171 		>> PAGE_SHIFT);
1172 
1173 	if (newPageCount < oldPageCount) {
1174 		// we need to remove all pages in the cache outside of the new virtual
1175 		// size
1176 		while (_FreePageRange(pages.GetIterator(newPageCount, true, true)))
1177 			;
1178 	}
1179 
1180 	virtual_end = newSize;
1181 	return B_OK;
1182 }
1183 
1184 /*!	This function updates the virtual_base field of the cache.
1185 	If needed, it will free up all pages that don't belong to the cache anymore.
1186 	The cache lock must be held when you call it.
1187 	Since removed pages don't belong to the cache any longer, they are not
1188 	written back before they will be removed.
1189 
1190 	Note, this function may temporarily release the cache lock in case it
1191 	has to wait for busy pages.
1192 */
1193 status_t
1194 VMCache::Rebase(off_t newBase, int priority)
1195 {
1196 	TRACE(("VMCache::Rebase(cache %p, newBase %lld) old base %lld\n",
1197 		this, newBase, this->virtual_base));
1198 	this->AssertLocked();
1199 
1200 	T(Rebase(this, newBase));
1201 
1202 	status_t status = Commit(virtual_end - newBase, priority);
1203 	if (status != B_OK)
1204 		return status;
1205 
1206 	page_num_t basePage = (page_num_t)(newBase >> PAGE_SHIFT);
1207 
1208 	if (newBase > virtual_base) {
1209 		// we need to remove all pages in the cache outside of the new virtual
1210 		// base
1211 		while (_FreePageRange(pages.GetIterator(), &basePage))
1212 			;
1213 	}
1214 
1215 	virtual_base = newBase;
1216 	return B_OK;
1217 }
1218 
1219 
1220 /*!	Moves pages in the given range from the source cache into this cache. Both
1221 	caches must be locked.
1222 */
1223 status_t
1224 VMCache::Adopt(VMCache* source, off_t offset, off_t size, off_t newOffset)
1225 {
1226 	page_num_t startPage = offset >> PAGE_SHIFT;
1227 	page_num_t endPage = (offset + size + B_PAGE_SIZE - 1) >> PAGE_SHIFT;
1228 	off_t offsetChange = newOffset - offset;
1229 
1230 	VMCachePagesTree::Iterator it = source->pages.GetIterator(startPage, true,
1231 		true);
1232 	for (vm_page* page = it.Next();
1233 				page != NULL && page->cache_offset < endPage;
1234 				page = it.Next()) {
1235 		MovePage(page, (page->cache_offset << PAGE_SHIFT) + offsetChange);
1236 	}
1237 
1238 	return B_OK;
1239 }
1240 
1241 
1242 /*! Discards pages in the given range. */
1243 status_t
1244 VMCache::Discard(off_t offset, off_t size)
1245 {
1246 	page_num_t startPage = offset >> PAGE_SHIFT;
1247 	page_num_t endPage = (offset + size + B_PAGE_SIZE - 1) >> PAGE_SHIFT;
1248 	while (_FreePageRange(pages.GetIterator(startPage, true, true), &endPage))
1249 		;
1250 
1251 	return B_OK;
1252 }
1253 
1254 
1255 /*!	You have to call this function with the VMCache lock held. */
1256 status_t
1257 VMCache::FlushAndRemoveAllPages()
1258 {
1259 	ASSERT_LOCKED_MUTEX(&fLock);
1260 
1261 	while (page_count > 0) {
1262 		// write back modified pages
1263 		status_t status = vm_page_write_modified_pages(this);
1264 		if (status != B_OK)
1265 			return status;
1266 
1267 		// remove pages
1268 		for (VMCachePagesTree::Iterator it = pages.GetIterator();
1269 				vm_page* page = it.Next();) {
1270 			if (page->busy) {
1271 				// wait for page to become unbusy
1272 				WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, true);
1273 
1274 				// restart from the start of the list
1275 				it = pages.GetIterator();
1276 				continue;
1277 			}
1278 
1279 			// skip modified pages -- they will be written back in the next
1280 			// iteration
1281 			if (page->State() == PAGE_STATE_MODIFIED)
1282 				continue;
1283 
1284 			// We can't remove mapped pages.
1285 			if (page->IsMapped())
1286 				return B_BUSY;
1287 
1288 			DEBUG_PAGE_ACCESS_START(page);
1289 			RemovePage(page);
1290 			vm_page_free(this, page);
1291 				// Note: When iterating through a IteratableSplayTree
1292 				// removing the current node is safe.
1293 		}
1294 	}
1295 
1296 	return B_OK;
1297 }
1298 
1299 
1300 status_t
1301 VMCache::Commit(off_t size, int priority)
1302 {
1303 	committed_size = size;
1304 	return B_OK;
1305 }
1306 
1307 
1308 /*!	Returns whether the cache's underlying backing store could deliver the
1309 	page at the given offset.
1310 
1311 	Basically it returns whether a Read() at \a offset would at least read a
1312 	partial page (assuming that no unexpected errors occur or the situation
1313 	changes in the meantime).
1314 */
1315 bool
1316 VMCache::HasPage(off_t offset)
1317 {
1318 	// In accordance with Fault() the default implementation doesn't have a
1319 	// backing store and doesn't allow faults.
1320 	return false;
1321 }
1322 
1323 
1324 status_t
1325 VMCache::Read(off_t offset, const generic_io_vec *vecs, size_t count,
1326 	uint32 flags, generic_size_t *_numBytes)
1327 {
1328 	return B_ERROR;
1329 }
1330 
1331 
1332 status_t
1333 VMCache::Write(off_t offset, const generic_io_vec *vecs, size_t count,
1334 	uint32 flags, generic_size_t *_numBytes)
1335 {
1336 	return B_ERROR;
1337 }
1338 
1339 
1340 status_t
1341 VMCache::WriteAsync(off_t offset, const generic_io_vec* vecs, size_t count,
1342 	generic_size_t numBytes, uint32 flags, AsyncIOCallback* callback)
1343 {
1344 	// Not supported, fall back to the synchronous hook.
1345 	generic_size_t transferred = numBytes;
1346 	status_t error = Write(offset, vecs, count, flags, &transferred);
1347 
1348 	if (callback != NULL)
1349 		callback->IOFinished(error, transferred != numBytes, transferred);
1350 
1351 	return error;
1352 }
1353 
1354 
1355 /*!	\brief Returns whether the cache can write the page at the given offset.
1356 
1357 	The cache must be locked when this function is invoked.
1358 
1359 	@param offset The page offset.
1360 	@return \c true, if the page can be written, \c false otherwise.
1361 */
1362 bool
1363 VMCache::CanWritePage(off_t offset)
1364 {
1365 	return false;
1366 }
1367 
1368 
1369 status_t
1370 VMCache::Fault(struct VMAddressSpace *aspace, off_t offset)
1371 {
1372 	return B_BAD_ADDRESS;
1373 }
1374 
1375 
1376 void
1377 VMCache::Merge(VMCache* source)
1378 {
1379 	for (VMCachePagesTree::Iterator it = source->pages.GetIterator();
1380 			vm_page* page = it.Next();) {
1381 		// Note: Removing the current node while iterating through a
1382 		// IteratableSplayTree is safe.
1383 		vm_page* consumerPage = LookupPage(
1384 			(off_t)page->cache_offset << PAGE_SHIFT);
1385 		if (consumerPage == NULL) {
1386 			// the page is not yet in the consumer cache - move it upwards
1387 			MovePage(page);
1388 		}
1389 	}
1390 }
1391 
1392 
1393 status_t
1394 VMCache::AcquireUnreferencedStoreRef()
1395 {
1396 	return B_OK;
1397 }
1398 
1399 
1400 void
1401 VMCache::AcquireStoreRef()
1402 {
1403 }
1404 
1405 
1406 void
1407 VMCache::ReleaseStoreRef()
1408 {
1409 }
1410 
1411 
1412 /*!	Kernel debugger version of HasPage().
1413 	Does not do any locking.
1414 */
1415 bool
1416 VMCache::DebugHasPage(off_t offset)
1417 {
1418 	// default that works for all subclasses that don't lock anyway
1419 	return HasPage(offset);
1420 }
1421 
1422 
1423 /*!	Kernel debugger version of LookupPage().
1424 	Does not do any locking.
1425 */
1426 vm_page*
1427 VMCache::DebugLookupPage(off_t offset)
1428 {
1429 	return pages.Lookup((page_num_t)(offset >> PAGE_SHIFT));
1430 }
1431 
1432 
1433 void
1434 VMCache::Dump(bool showPages) const
1435 {
1436 	kprintf("CACHE %p:\n", this);
1437 	kprintf("  ref_count:    %" B_PRId32 "\n", RefCount());
1438 	kprintf("  source:       %p\n", source);
1439 	kprintf("  type:         %s\n", vm_cache_type_to_string(type));
1440 	kprintf("  virtual_base: 0x%" B_PRIx64 "\n", virtual_base);
1441 	kprintf("  virtual_end:  0x%" B_PRIx64 "\n", virtual_end);
1442 	kprintf("  temporary:    %" B_PRIu32 "\n", uint32(temporary));
1443 	kprintf("  lock:         %p\n", &fLock);
1444 #if KDEBUG
1445 	kprintf("  lock.holder:  %" B_PRId32 "\n", fLock.holder);
1446 #endif
1447 	kprintf("  areas:\n");
1448 
1449 	for (VMArea* area = areas; area != NULL; area = area->cache_next) {
1450 		kprintf("    area 0x%" B_PRIx32 ", %s\n", area->id, area->name);
1451 		kprintf("\tbase_addr:  0x%lx, size: 0x%lx\n", area->Base(),
1452 			area->Size());
1453 		kprintf("\tprotection: 0x%" B_PRIx32 "\n", area->protection);
1454 		kprintf("\towner:      0x%" B_PRIx32 "\n", area->address_space->ID());
1455 	}
1456 
1457 	kprintf("  consumers:\n");
1458 	for (ConsumerList::ConstIterator it = consumers.GetIterator();
1459 		 	VMCache* consumer = it.Next();) {
1460 		kprintf("\t%p\n", consumer);
1461 	}
1462 
1463 	kprintf("  pages:\n");
1464 	if (showPages) {
1465 		for (VMCachePagesTree::ConstIterator it = pages.GetIterator();
1466 				vm_page* page = it.Next();) {
1467 			if (!vm_page_is_dummy(page)) {
1468 				kprintf("\t%p ppn %#" B_PRIxPHYSADDR " offset %#" B_PRIxPHYSADDR
1469 					" state %u (%s) wired_count %u\n", page,
1470 					page->physical_page_number, page->cache_offset,
1471 					page->State(), page_state_to_string(page->State()),
1472 					page->WiredCount());
1473 			} else {
1474 				kprintf("\t%p DUMMY PAGE state %u (%s)\n",
1475 					page, page->State(), page_state_to_string(page->State()));
1476 			}
1477 		}
1478 	} else
1479 		kprintf("\t%" B_PRIu32 " in cache\n", page_count);
1480 }
1481 
1482 
1483 /*!	Wakes up threads waiting for page events.
1484 	\param page The page for which events occurred.
1485 	\param events The mask of events that occurred.
1486 */
1487 void
1488 VMCache::_NotifyPageEvents(vm_page* page, uint32 events)
1489 {
1490 	PageEventWaiter** it = &fPageEventWaiters;
1491 	while (PageEventWaiter* waiter = *it) {
1492 		if (waiter->page == page && (waiter->events & events) != 0) {
1493 			// remove from list and unblock
1494 			*it = waiter->next;
1495 			thread_unblock(waiter->thread, B_OK);
1496 		} else
1497 			it = &waiter->next;
1498 	}
1499 }
1500 
1501 
1502 /*!	Merges the given cache with its only consumer.
1503 	The caller must hold both the cache's and the consumer's lock. The method
1504 	does release neither lock.
1505 */
1506 void
1507 VMCache::_MergeWithOnlyConsumer()
1508 {
1509 	VMCache* consumer = consumers.RemoveHead();
1510 
1511 	TRACE(("merge vm cache %p (ref == %" B_PRId32 ") with vm cache %p\n",
1512 		this, this->fRefCount, consumer));
1513 
1514 	T(Merge(this, consumer));
1515 
1516 	// merge the cache
1517 	consumer->Merge(this);
1518 
1519 	// The remaining consumer has got a new source.
1520 	if (source != NULL) {
1521 		VMCache* newSource = source;
1522 
1523 		newSource->Lock();
1524 
1525 		newSource->consumers.Remove(this);
1526 		newSource->consumers.Add(consumer);
1527 		consumer->source = newSource;
1528 		source = NULL;
1529 
1530 		newSource->Unlock();
1531 	} else
1532 		consumer->source = NULL;
1533 
1534 	// Release the reference the cache's consumer owned. The consumer takes
1535 	// over the cache's ref to its source (if any) instead.
1536 	ReleaseRefLocked();
1537 }
1538 
1539 
1540 /*!	Removes the \a consumer from this cache.
1541 	It will also release the reference to the cache owned by the consumer.
1542 	Assumes you have the consumer's cache lock held. This cache must not be
1543 	locked.
1544 */
1545 void
1546 VMCache::_RemoveConsumer(VMCache* consumer)
1547 {
1548 	TRACE(("remove consumer vm cache %p from cache %p\n", consumer, this));
1549 	consumer->AssertLocked();
1550 
1551 	T(RemoveConsumer(this, consumer));
1552 
1553 	// Remove the store ref before locking the cache. Otherwise we'd call into
1554 	// the VFS while holding the cache lock, which would reverse the usual
1555 	// locking order.
1556 	ReleaseStoreRef();
1557 
1558 	// remove the consumer from the cache, but keep its reference until later
1559 	Lock();
1560 	consumers.Remove(consumer);
1561 	consumer->source = NULL;
1562 
1563 	ReleaseRefAndUnlock();
1564 }
1565 
1566 
1567 // #pragma mark - VMCacheFactory
1568 	// TODO: Move to own source file!
1569 
1570 
1571 /*static*/ status_t
1572 VMCacheFactory::CreateAnonymousCache(VMCache*& _cache, bool canOvercommit,
1573 	int32 numPrecommittedPages, int32 numGuardPages, bool swappable,
1574 	int priority)
1575 {
1576 	uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY
1577 		| HEAP_DONT_LOCK_KERNEL_SPACE;
1578 	if (priority >= VM_PRIORITY_VIP)
1579 		allocationFlags |= HEAP_PRIORITY_VIP;
1580 
1581 #if ENABLE_SWAP_SUPPORT
1582 	if (swappable) {
1583 		VMAnonymousCache* cache
1584 			= new(gAnonymousCacheObjectCache, allocationFlags) VMAnonymousCache;
1585 		if (cache == NULL)
1586 			return B_NO_MEMORY;
1587 
1588 		status_t error = cache->Init(canOvercommit, numPrecommittedPages,
1589 			numGuardPages, allocationFlags);
1590 		if (error != B_OK) {
1591 			cache->Delete();
1592 			return error;
1593 		}
1594 
1595 		T(Create(cache));
1596 
1597 		_cache = cache;
1598 		return B_OK;
1599 	}
1600 #endif
1601 
1602 	VMAnonymousNoSwapCache* cache
1603 		= new(gAnonymousNoSwapCacheObjectCache, allocationFlags)
1604 			VMAnonymousNoSwapCache;
1605 	if (cache == NULL)
1606 		return B_NO_MEMORY;
1607 
1608 	status_t error = cache->Init(canOvercommit, numPrecommittedPages,
1609 		numGuardPages, allocationFlags);
1610 	if (error != B_OK) {
1611 		cache->Delete();
1612 		return error;
1613 	}
1614 
1615 	T(Create(cache));
1616 
1617 	_cache = cache;
1618 	return B_OK;
1619 }
1620 
1621 
1622 /*static*/ status_t
1623 VMCacheFactory::CreateVnodeCache(VMCache*& _cache, struct vnode* vnode)
1624 {
1625 	const uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY
1626 		| HEAP_DONT_LOCK_KERNEL_SPACE;
1627 		// Note: Vnode cache creation is never VIP.
1628 
1629 	VMVnodeCache* cache
1630 		= new(gVnodeCacheObjectCache, allocationFlags) VMVnodeCache;
1631 	if (cache == NULL)
1632 		return B_NO_MEMORY;
1633 
1634 	status_t error = cache->Init(vnode, allocationFlags);
1635 	if (error != B_OK) {
1636 		cache->Delete();
1637 		return error;
1638 	}
1639 
1640 	T(Create(cache));
1641 
1642 	_cache = cache;
1643 	return B_OK;
1644 }
1645 
1646 
1647 /*static*/ status_t
1648 VMCacheFactory::CreateDeviceCache(VMCache*& _cache, addr_t baseAddress)
1649 {
1650 	const uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY
1651 		| HEAP_DONT_LOCK_KERNEL_SPACE;
1652 		// Note: Device cache creation is never VIP.
1653 
1654 	VMDeviceCache* cache
1655 		= new(gDeviceCacheObjectCache, allocationFlags) VMDeviceCache;
1656 	if (cache == NULL)
1657 		return B_NO_MEMORY;
1658 
1659 	status_t error = cache->Init(baseAddress, allocationFlags);
1660 	if (error != B_OK) {
1661 		cache->Delete();
1662 		return error;
1663 	}
1664 
1665 	T(Create(cache));
1666 
1667 	_cache = cache;
1668 	return B_OK;
1669 }
1670 
1671 
1672 /*static*/ status_t
1673 VMCacheFactory::CreateNullCache(int priority, VMCache*& _cache)
1674 {
1675 	uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY
1676 		| HEAP_DONT_LOCK_KERNEL_SPACE;
1677 	if (priority >= VM_PRIORITY_VIP)
1678 		allocationFlags |= HEAP_PRIORITY_VIP;
1679 
1680 	VMNullCache* cache
1681 		= new(gNullCacheObjectCache, allocationFlags) VMNullCache;
1682 	if (cache == NULL)
1683 		return B_NO_MEMORY;
1684 
1685 	status_t error = cache->Init(allocationFlags);
1686 	if (error != B_OK) {
1687 		cache->Delete();
1688 		return error;
1689 	}
1690 
1691 	T(Create(cache));
1692 
1693 	_cache = cache;
1694 	return B_OK;
1695 }
1696