xref: /haiku/src/system/kernel/vm/VMCache.cpp (revision 522c2f19d458245474fcaf5b0254e0099906b117)
1 /*
2  * Copyright 2008-2009, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 #include <vm/VMCache.h>
12 
13 #include <stddef.h>
14 #include <stdlib.h>
15 
16 #include <arch/cpu.h>
17 #include <condition_variable.h>
18 #include <debug.h>
19 #include <heap.h>
20 #include <int.h>
21 #include <kernel.h>
22 #include <smp.h>
23 #include <tracing.h>
24 #include <util/khash.h>
25 #include <util/AutoLock.h>
26 #include <vfs.h>
27 #include <vm/vm.h>
28 #include <vm/vm_page.h>
29 #include <vm/vm_priv.h>
30 #include <vm/vm_types.h>
31 #include <vm/VMArea.h>
32 
33 
34 //#define TRACE_VM_CACHE
35 #ifdef TRACE_VM_CACHE
36 #	define TRACE(x) dprintf x
37 #else
38 #	define TRACE(x) ;
39 #endif
40 
41 
42 #if DEBUG_CACHE_LIST
43 VMCache* gDebugCacheList;
44 #endif
45 static mutex sCacheListLock = MUTEX_INITIALIZER("global VMCache list");
46 	// The lock is also needed when the debug feature is disabled.
47 
48 
49 struct VMCache::PageEventWaiter {
50 	struct thread*		thread;
51 	PageEventWaiter*	next;
52 	vm_page*			page;
53 	uint32				events;
54 };
55 
56 
57 #if VM_CACHE_TRACING
58 
59 namespace VMCacheTracing {
60 
61 class VMCacheTraceEntry : public AbstractTraceEntry {
62 	public:
63 		VMCacheTraceEntry(VMCache* cache)
64 			:
65 			fCache(cache)
66 		{
67 		}
68 
69 	protected:
70 		VMCache*	fCache;
71 };
72 
73 
74 class Create : public VMCacheTraceEntry {
75 	public:
76 		Create(VMCache* cache)
77 			:
78 			VMCacheTraceEntry(cache)
79 		{
80 			Initialized();
81 		}
82 
83 		virtual void AddDump(TraceOutput& out)
84 		{
85 			out.Print("vm cache create: -> cache: %p", fCache);
86 		}
87 };
88 
89 
90 class Delete : public VMCacheTraceEntry {
91 	public:
92 		Delete(VMCache* cache)
93 			:
94 			VMCacheTraceEntry(cache)
95 		{
96 			Initialized();
97 		}
98 
99 		virtual void AddDump(TraceOutput& out)
100 		{
101 			out.Print("vm cache delete: cache: %p", fCache);
102 		}
103 };
104 
105 
106 class SetMinimalCommitment : public VMCacheTraceEntry {
107 	public:
108 		SetMinimalCommitment(VMCache* cache, off_t commitment)
109 			:
110 			VMCacheTraceEntry(cache),
111 			fOldCommitment(cache->committed_size),
112 			fCommitment(commitment)
113 		{
114 			Initialized();
115 		}
116 
117 		virtual void AddDump(TraceOutput& out)
118 		{
119 			out.Print("vm cache set min commitment: cache: %p, "
120 				"commitment: %lld -> %lld", fCache, fOldCommitment,
121 				fCommitment);
122 		}
123 
124 	private:
125 		off_t	fOldCommitment;
126 		off_t	fCommitment;
127 };
128 
129 
130 class Resize : public VMCacheTraceEntry {
131 	public:
132 		Resize(VMCache* cache, off_t size)
133 			:
134 			VMCacheTraceEntry(cache),
135 			fOldSize(cache->virtual_end),
136 			fSize(size)
137 		{
138 			Initialized();
139 		}
140 
141 		virtual void AddDump(TraceOutput& out)
142 		{
143 			out.Print("vm cache resize: cache: %p, size: %lld -> %lld", fCache,
144 				fOldSize, fSize);
145 		}
146 
147 	private:
148 		off_t	fOldSize;
149 		off_t	fSize;
150 };
151 
152 
153 class AddConsumer : public VMCacheTraceEntry {
154 	public:
155 		AddConsumer(VMCache* cache, VMCache* consumer)
156 			:
157 			VMCacheTraceEntry(cache),
158 			fConsumer(consumer)
159 		{
160 			Initialized();
161 		}
162 
163 		virtual void AddDump(TraceOutput& out)
164 		{
165 			out.Print("vm cache add consumer: cache: %p, consumer: %p", fCache,
166 				fConsumer);
167 		}
168 
169 	private:
170 		VMCache*	fConsumer;
171 };
172 
173 
174 class RemoveConsumer : public VMCacheTraceEntry {
175 	public:
176 		RemoveConsumer(VMCache* cache, VMCache* consumer)
177 			:
178 			VMCacheTraceEntry(cache),
179 			fConsumer(consumer)
180 		{
181 			Initialized();
182 		}
183 
184 		virtual void AddDump(TraceOutput& out)
185 		{
186 			out.Print("vm cache remove consumer: cache: %p, consumer: %p",
187 				fCache, fConsumer);
188 		}
189 
190 	private:
191 		VMCache*	fConsumer;
192 };
193 
194 
195 class Merge : public VMCacheTraceEntry {
196 	public:
197 		Merge(VMCache* cache, VMCache* consumer)
198 			:
199 			VMCacheTraceEntry(cache),
200 			fConsumer(consumer)
201 		{
202 			Initialized();
203 		}
204 
205 		virtual void AddDump(TraceOutput& out)
206 		{
207 			out.Print("vm cache merge with consumer: cache: %p, consumer: %p",
208 				fCache, fConsumer);
209 		}
210 
211 	private:
212 		VMCache*	fConsumer;
213 };
214 
215 
216 class InsertArea : public VMCacheTraceEntry {
217 	public:
218 		InsertArea(VMCache* cache, VMArea* area)
219 			:
220 			VMCacheTraceEntry(cache),
221 			fArea(area)
222 		{
223 			Initialized();
224 		}
225 
226 		virtual void AddDump(TraceOutput& out)
227 		{
228 			out.Print("vm cache insert area: cache: %p, area: %p", fCache,
229 				fArea);
230 		}
231 
232 	private:
233 		VMArea*	fArea;
234 };
235 
236 
237 class RemoveArea : public VMCacheTraceEntry {
238 	public:
239 		RemoveArea(VMCache* cache, VMArea* area)
240 			:
241 			VMCacheTraceEntry(cache),
242 			fArea(area)
243 		{
244 			Initialized();
245 		}
246 
247 		virtual void AddDump(TraceOutput& out)
248 		{
249 			out.Print("vm cache remove area: cache: %p, area: %p", fCache,
250 				fArea);
251 		}
252 
253 	private:
254 		VMArea*	fArea;
255 };
256 
257 }	// namespace VMCacheTracing
258 
259 #	define T(x) new(std::nothrow) VMCacheTracing::x;
260 
261 #	if VM_CACHE_TRACING >= 2
262 
263 namespace VMCacheTracing {
264 
265 class InsertPage : public VMCacheTraceEntry {
266 	public:
267 		InsertPage(VMCache* cache, vm_page* page, off_t offset)
268 			:
269 			VMCacheTraceEntry(cache),
270 			fPage(page),
271 			fOffset(offset)
272 		{
273 			Initialized();
274 		}
275 
276 		virtual void AddDump(TraceOutput& out)
277 		{
278 			out.Print("vm cache insert page: cache: %p, page: %p, offset: %lld",
279 				fCache, fPage, fOffset);
280 		}
281 
282 	private:
283 		vm_page*	fPage;
284 		off_t		fOffset;
285 };
286 
287 
288 class RemovePage : public VMCacheTraceEntry {
289 	public:
290 		RemovePage(VMCache* cache, vm_page* page)
291 			:
292 			VMCacheTraceEntry(cache),
293 			fPage(page)
294 		{
295 			Initialized();
296 		}
297 
298 		virtual void AddDump(TraceOutput& out)
299 		{
300 			out.Print("vm cache remove page: cache: %p, page: %p", fCache,
301 				fPage);
302 		}
303 
304 	private:
305 		vm_page*	fPage;
306 };
307 
308 }	// namespace VMCacheTracing
309 
310 #		define T2(x) new(std::nothrow) VMCacheTracing::x;
311 #	else
312 #		define T2(x) ;
313 #	endif
314 #else
315 #	define T(x) ;
316 #	define T2(x) ;
317 #endif
318 
319 
320 //	#pragma mark -
321 
322 
323 status_t
324 vm_cache_init(kernel_args* args)
325 {
326 	return B_OK;
327 }
328 
329 
330 VMCache*
331 vm_cache_acquire_locked_page_cache(vm_page* page, bool dontWait)
332 {
333 	mutex_lock(&sCacheListLock);
334 
335 	while (dontWait) {
336 		VMCache* cache = page->cache;
337 		if (cache == NULL || !cache->TryLock()) {
338 			mutex_unlock(&sCacheListLock);
339 			return NULL;
340 		}
341 
342 		if (cache == page->cache) {
343 			cache->AcquireRefLocked();
344 			mutex_unlock(&sCacheListLock);
345 			return cache;
346 		}
347 
348 		// the cache changed in the meantime
349 		cache->Unlock();
350 	}
351 
352 	while (true) {
353 		VMCache* cache = page->cache;
354 		if (cache == NULL) {
355 			mutex_unlock(&sCacheListLock);
356 			return NULL;
357 		}
358 
359 		// TODO: this is problematic, as it requires the caller not to have
360 		// a lock on this cache (it might be called via
361 		// vm_page_allocate_page(..., false)).
362 		if (!cache->SwitchLock(&sCacheListLock)) {
363 			// cache has been deleted
364 			mutex_lock(&sCacheListLock);
365 			continue;
366 		}
367 
368 		if (cache == page->cache) {
369 			cache->AcquireRefLocked();
370 			return cache;
371 		}
372 
373 		// the cache changed in the meantime
374 		cache->Unlock();
375 		mutex_lock(&sCacheListLock);
376 	}
377 }
378 
379 
380 // #pragma mark - VMCache
381 
382 
383 bool
384 VMCache::_IsMergeable() const
385 {
386 	return (areas == NULL && temporary
387 		&& !list_is_empty(const_cast<list*>(&consumers))
388 		&& consumers.link.next == consumers.link.prev);
389 }
390 
391 
392 VMCache::VMCache()
393 {
394 }
395 
396 
397 VMCache::~VMCache()
398 {
399 }
400 
401 
402 status_t
403 VMCache::Init(uint32 cacheType)
404 {
405 	mutex_init(&fLock, "VMCache");
406 	VMCache dummyCache;
407 	list_init_etc(&consumers, offset_of_member(dummyCache, consumer_link));
408 	areas = NULL;
409 	fRefCount = 1;
410 	source = NULL;
411 	virtual_base = 0;
412 	virtual_end = 0;
413 	committed_size = 0;
414 	temporary = 0;
415 	scan_skip = 0;
416 	page_count = 0;
417 	type = cacheType;
418 	fPageEventWaiters = NULL;
419 
420 #if DEBUG_CACHE_LIST
421 	mutex_lock(&sCacheListLock);
422 
423 	if (gDebugCacheList)
424 		gDebugCacheList->debug_previous = this;
425 	debug_previous = NULL;
426 	debug_next = gDebugCacheList;
427 	gDebugCacheList = this;
428 
429 	mutex_unlock(&sCacheListLock);
430 #endif
431 
432 	return B_OK;
433 }
434 
435 
436 void
437 VMCache::Delete()
438 {
439 	if (areas != NULL)
440 		panic("cache %p to be deleted still has areas", this);
441 	if (!list_is_empty(&consumers))
442 		panic("cache %p to be deleted still has consumers", this);
443 
444 	T(Delete(this));
445 
446 	// free all of the pages in the cache
447 	while (vm_page* page = pages.Root()) {
448 		if (!page->mappings.IsEmpty() || page->wired_count != 0) {
449 			panic("remove page %p from cache %p: page still has mappings!\n",
450 				page, this);
451 		}
452 
453 		// remove it
454 		pages.Remove(page);
455 		page->cache = NULL;
456 		// TODO: we also need to remove all of the page's mappings!
457 
458 		TRACE(("vm_cache_release_ref: freeing page 0x%lx\n",
459 			oldPage->physical_page_number));
460 		vm_page_free(this, page);
461 	}
462 
463 	// remove the ref to the source
464 	if (source)
465 		source->_RemoveConsumer(this);
466 
467 	// We lock and unlock the sCacheListLock, even if the DEBUG_CACHE_LIST is
468 	// not enabled. This synchronization point is needed for
469 	// vm_cache_acquire_locked_page_cache().
470 	mutex_lock(&sCacheListLock);
471 
472 #if DEBUG_CACHE_LIST
473 	if (debug_previous)
474 		debug_previous->debug_next = debug_next;
475 	if (debug_next)
476 		debug_next->debug_previous = debug_previous;
477 	if (this == gDebugCacheList)
478 		gDebugCacheList = debug_next;
479 #endif
480 
481 	mutex_destroy(&fLock);
482 
483 	mutex_unlock(&sCacheListLock);
484 
485 	delete this;
486 }
487 
488 
489 void
490 VMCache::Unlock()
491 {
492 	while (fRefCount == 1 && _IsMergeable()) {
493 		VMCache* consumer = (VMCache*)list_get_first_item(&consumers);
494 		if (consumer->TryLock()) {
495 			_MergeWithOnlyConsumer();
496 		} else {
497 			// Someone else has locked the consumer ATM. Unlock this cache and
498 			// wait for the consumer lock. Increment the cache's ref count
499 			// temporarily, so that no one else will try what we are doing or
500 			// delete the cache.
501 			fRefCount++;
502 			bool consumerLocked = consumer->SwitchLock(&fLock);
503 			Lock();
504 			fRefCount--;
505 
506 			if (consumerLocked) {
507 				if (fRefCount == 1 && _IsMergeable()
508 						&& consumer == list_get_first_item(&consumers)) {
509 					_MergeWithOnlyConsumer();
510 				} else {
511 					// something changed, get rid of the consumer lock
512 					consumer->Unlock();
513 				}
514 			}
515 		}
516 	}
517 
518 	if (fRefCount == 0) {
519 		// delete this cache
520 		Delete();
521 	} else
522 		mutex_unlock(&fLock);
523 }
524 
525 
526 void
527 VMCache::AcquireRefLocked()
528 {
529 // TODO: Inline!
530 	ASSERT_LOCKED_MUTEX(&fLock);
531 
532 	fRefCount++;
533 }
534 
535 
536 void
537 VMCache::AcquireRef()
538 {
539 	Lock();
540 	fRefCount++;
541 	Unlock();
542 }
543 
544 
545 void
546 VMCache::ReleaseRefLocked()
547 {
548 // TODO: Inline!
549 	ASSERT_LOCKED_MUTEX(&fLock);
550 
551 	fRefCount--;
552 }
553 
554 
555 void
556 VMCache::ReleaseRef()
557 {
558 	Lock();
559 	fRefCount--;
560 	Unlock();
561 }
562 
563 
564 vm_page*
565 VMCache::LookupPage(off_t offset)
566 {
567 	AssertLocked();
568 
569 	vm_page* page = pages.Lookup((page_num_t)(offset >> PAGE_SHIFT));
570 
571 #if KDEBUG
572 	if (page != NULL && page->cache != this)
573 		panic("page %p not in cache %p\n", page, this);
574 #endif
575 
576 	return page;
577 }
578 
579 
580 void
581 VMCache::InsertPage(vm_page* page, off_t offset)
582 {
583 	TRACE(("VMCache::InsertPage(): cache %p, page %p, offset %Ld\n",
584 		this, page, offset));
585 	AssertLocked();
586 
587 	if (page->cache != NULL) {
588 		panic("insert page %p into cache %p: page cache is set to %p\n",
589 			page, this, page->cache);
590 	}
591 
592 	T2(InsertPage(this, page, offset));
593 
594 	page->cache_offset = (page_num_t)(offset >> PAGE_SHIFT);
595 	page_count++;
596 	page->usage_count = 2;
597 	page->cache = this;
598 
599 #if KDEBUG
600 	vm_page* otherPage = pages.Lookup(page->cache_offset);
601 	if (otherPage != NULL) {
602 		panic("VMCache::InsertPage(): there's already page %p with cache "
603 			"offset %lu in cache %p; inserting page %p", otherPage,
604 			page->cache_offset, this, page);
605 	}
606 #endif	// KDEBUG
607 
608 	pages.Insert(page);
609 }
610 
611 
612 /*!	Removes the vm_page from this cache. Of course, the page must
613 	really be in this cache or evil things will happen.
614 	The cache lock must be held.
615 */
616 void
617 VMCache::RemovePage(vm_page* page)
618 {
619 	TRACE(("VMCache::RemovePage(): cache %p, page %p\n", this, page));
620 	AssertLocked();
621 
622 	if (page->cache != this) {
623 		panic("remove page %p from cache %p: page cache is set to %p\n", page,
624 			this, page->cache);
625 	}
626 
627 	T2(RemovePage(this, page));
628 
629 	pages.Remove(page);
630 	page->cache = NULL;
631 	page_count--;
632 }
633 
634 
635 /*!	Waits until one or more events happened for a given page which belongs to
636 	this cache.
637 	The cache must be locked. It will be unlocked by the method. \a relock
638 	specifies whether the method shall re-lock the cache before returning.
639 	\param page The page for which to wait.
640 	\param events The mask of events the caller is interested in.
641 	\param relock If \c true, the cache will be locked when returning,
642 		otherwise it won't be locked.
643 */
644 void
645 VMCache::WaitForPageEvents(vm_page* page, uint32 events, bool relock)
646 {
647 	PageEventWaiter waiter;
648 	waiter.thread = thread_get_current_thread();
649 	waiter.next = fPageEventWaiters;
650 	waiter.page = page;
651 	waiter.events = events;
652 
653 	fPageEventWaiters = &waiter;
654 
655 	thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_OTHER,
656 		"cache page events");
657 
658 	Unlock();
659 	thread_block();
660 
661 	if (relock)
662 		Lock();
663 }
664 
665 
666 /*!	Makes this case the source of the \a consumer cache,
667 	and adds the \a consumer to its list.
668 	This also grabs a reference to the source cache.
669 	Assumes you have the cache and the consumer's lock held.
670 */
671 void
672 VMCache::AddConsumer(VMCache* consumer)
673 {
674 	TRACE(("add consumer vm cache %p to cache %p\n", consumer, cache));
675 	AssertLocked();
676 	consumer->AssertLocked();
677 
678 	T(AddConsumer(this, consumer));
679 
680 	consumer->source = this;
681 	list_add_item(&consumers, consumer);
682 
683 	AcquireRefLocked();
684 	AcquireStoreRef();
685 }
686 
687 
688 /*!	Adds the \a area to this cache.
689 	Assumes you have the locked the cache.
690 */
691 status_t
692 VMCache::InsertAreaLocked(VMArea* area)
693 {
694 	TRACE(("VMCache::InsertAreaLocked(cache %p, area %p)\n", this, area));
695 	AssertLocked();
696 
697 	T(InsertArea(this, area));
698 
699 	area->cache_next = areas;
700 	if (area->cache_next)
701 		area->cache_next->cache_prev = area;
702 	area->cache_prev = NULL;
703 	areas = area;
704 
705 	AcquireStoreRef();
706 
707 	return B_OK;
708 }
709 
710 
711 status_t
712 VMCache::RemoveArea(VMArea* area)
713 {
714 	TRACE(("VMCache::RemoveArea(cache %p, area %p)\n", this, area));
715 
716 	T(RemoveArea(this, area));
717 
718 	// We release the store reference first, since otherwise we would reverse
719 	// the locking order or even deadlock ourselves (... -> free_vnode() -> ...
720 	// -> bfs_remove_vnode() -> ... -> file_cache_set_size() -> mutex_lock()).
721 	// Also cf. _RemoveConsumer().
722 	ReleaseStoreRef();
723 
724 	AutoLocker<VMCache> locker(this);
725 
726 	if (area->cache_prev)
727 		area->cache_prev->cache_next = area->cache_next;
728 	if (area->cache_next)
729 		area->cache_next->cache_prev = area->cache_prev;
730 	if (areas == area)
731 		areas = area->cache_next;
732 
733 	return B_OK;
734 }
735 
736 
737 status_t
738 VMCache::WriteModified()
739 {
740 	TRACE(("VMCache::WriteModified(cache = %p)\n", this));
741 
742 	if (temporary)
743 		return B_OK;
744 
745 	Lock();
746 	status_t status = vm_page_write_modified_pages(this);
747 	Unlock();
748 
749 	return status;
750 }
751 
752 
753 /*!	Commits the memory to the store if the \a commitment is larger than
754 	what's committed already.
755 	Assumes you have the cache's lock held.
756 */
757 status_t
758 VMCache::SetMinimalCommitment(off_t commitment)
759 {
760 	TRACE(("VMCache::SetMinimalCommitment(cache %p, commitment %Ld)\n",
761 		this, commitment));
762 	AssertLocked();
763 
764 	T(SetMinimalCommitment(this, commitment));
765 
766 	status_t status = B_OK;
767 
768 	// If we don't have enough committed space to cover through to the new end
769 	// of the area...
770 	if (committed_size < commitment) {
771 		// ToDo: should we check if the cache's virtual size is large
772 		//	enough for a commitment of that size?
773 
774 		// try to commit more memory
775 		status = Commit(commitment);
776 	}
777 
778 	return status;
779 }
780 
781 
782 /*!	This function updates the size field of the cache.
783 	If needed, it will free up all pages that don't belong to the cache anymore.
784 	The cache lock must be held when you call it.
785 	Since removed pages don't belong to the cache any longer, they are not
786 	written back before they will be removed.
787 
788 	Note, this function may temporarily release the cache lock in case it
789 	has to wait for busy pages.
790 */
791 status_t
792 VMCache::Resize(off_t newSize)
793 {
794 	TRACE(("VMCache::Resize(cache %p, newSize %Ld) old size %Ld\n",
795 		this, newSize, this->virtual_end));
796 	this->AssertLocked();
797 
798 	T(Resize(this, newSize));
799 
800 	status_t status = Commit(newSize - virtual_base);
801 	if (status != B_OK)
802 		return status;
803 
804 	uint32 oldPageCount = (uint32)((virtual_end + B_PAGE_SIZE - 1)
805 		>> PAGE_SHIFT);
806 	uint32 newPageCount = (uint32)((newSize + B_PAGE_SIZE - 1) >> PAGE_SHIFT);
807 
808 	if (newPageCount < oldPageCount) {
809 		// we need to remove all pages in the cache outside of the new virtual
810 		// size
811 		for (VMCachePagesTree::Iterator it
812 					= pages.GetIterator(newPageCount, true, true);
813 				vm_page* page = it.Next();) {
814 			if (page->state == PAGE_STATE_BUSY) {
815 				if (page->busy_writing) {
816 					// We cannot wait for the page to become available
817 					// as we might cause a deadlock this way
818 					page->busy_writing = false;
819 						// this will notify the writer to free the page
820 				} else {
821 					// wait for page to become unbusy
822 					WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, true);
823 
824 					// restart from the start of the list
825 					it = pages.GetIterator(newPageCount, true, true);
826 				}
827 				continue;
828 			}
829 
830 			// remove the page and put it into the free queue
831 			vm_remove_all_page_mappings(page, NULL);
832 			ASSERT(page->wired_count == 0);
833 				// TODO: Find a real solution! Unmapping is probably fine, but
834 				// we have no way of unmapping wired pages here.
835 			RemovePage(page);
836 			vm_page_free(this, page);
837 				// Note: When iterating through a IteratableSplayTree
838 				// removing the current node is safe.
839 		}
840 	}
841 
842 	virtual_end = newSize;
843 	return B_OK;
844 }
845 
846 
847 /*!	You have to call this function with the VMCache lock held. */
848 status_t
849 VMCache::FlushAndRemoveAllPages()
850 {
851 	ASSERT_LOCKED_MUTEX(&fLock);
852 
853 	while (page_count > 0) {
854 		// write back modified pages
855 		status_t status = vm_page_write_modified_pages(this);
856 		if (status != B_OK)
857 			return status;
858 
859 		// remove pages
860 		for (VMCachePagesTree::Iterator it = pages.GetIterator();
861 				vm_page* page = it.Next();) {
862 			if (page->state == PAGE_STATE_BUSY) {
863 				// wait for page to become unbusy
864 				WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, true);
865 
866 				// restart from the start of the list
867 				it = pages.GetIterator();
868 				continue;
869 			}
870 
871 			// skip modified pages -- they will be written back in the next
872 			// iteration
873 			if (page->state == PAGE_STATE_MODIFIED)
874 				continue;
875 
876 			// We can't remove mapped pages.
877 			if (page->wired_count > 0 || !page->mappings.IsEmpty())
878 				return B_BUSY;
879 
880 			RemovePage(page);
881 			vm_page_free(this, page);
882 				// Note: When iterating through a IteratableSplayTree
883 				// removing the current node is safe.
884 		}
885 	}
886 
887 	return B_OK;
888 }
889 
890 
891 status_t
892 VMCache::Commit(off_t size)
893 {
894 	committed_size = size;
895 	return B_OK;
896 }
897 
898 
899 bool
900 VMCache::HasPage(off_t offset)
901 {
902 	return offset >= virtual_base && offset <= virtual_end;
903 }
904 
905 
906 status_t
907 VMCache::Read(off_t offset, const iovec *vecs, size_t count, uint32 flags,
908 	size_t *_numBytes)
909 {
910 	return B_ERROR;
911 }
912 
913 
914 status_t
915 VMCache::Write(off_t offset, const iovec *vecs, size_t count, uint32 flags,
916 	size_t *_numBytes)
917 {
918 	return B_ERROR;
919 }
920 
921 
922 status_t
923 VMCache::WriteAsync(off_t offset, const iovec* vecs, size_t count,
924 	size_t numBytes, uint32 flags, AsyncIOCallback* callback)
925 {
926 	// Not supported, fall back to the synchronous hook.
927 	size_t transferred = numBytes;
928 	status_t error = Write(offset, vecs, count, flags, &transferred);
929 
930 	if (callback != NULL)
931 		callback->IOFinished(error, transferred != numBytes, transferred);
932 
933 	return error;
934 }
935 
936 
937 /*!	\brief Returns whether the cache can write the page at the given offset.
938 
939 	The cache must be locked when this function is invoked.
940 
941 	@param offset The page offset.
942 	@return \c true, if the page can be written, \c false otherwise.
943 */
944 bool
945 VMCache::CanWritePage(off_t offset)
946 {
947 	return false;
948 }
949 
950 
951 status_t
952 VMCache::Fault(struct VMAddressSpace *aspace, off_t offset)
953 {
954 	return B_BAD_ADDRESS;
955 }
956 
957 
958 void
959 VMCache::Merge(VMCache* source)
960 {
961 	for (VMCachePagesTree::Iterator it = source->pages.GetIterator();
962 			vm_page* page = it.Next();) {
963 		// Note: Removing the current node while iterating through a
964 		// IteratableSplayTree is safe.
965 		vm_page* consumerPage = LookupPage(
966 			(off_t)page->cache_offset << PAGE_SHIFT);
967 		if (consumerPage == NULL) {
968 			// the page is not yet in the consumer cache - move it upwards
969 			source->RemovePage(page);
970 			InsertPage(page, (off_t)page->cache_offset << PAGE_SHIFT);
971 #if DEBUG_PAGE_CACHE_TRANSITIONS
972 		} else {
973 			page->debug_flags = 0;
974 			if (consumerPage->state == PAGE_STATE_BUSY)
975 				page->debug_flags |= 0x1;
976 			if (consumerPage->type == PAGE_TYPE_DUMMY)
977 				page->debug_flags |= 0x2;
978 			page->collided_page = consumerPage;
979 			consumerPage->collided_page = page;
980 #endif	// DEBUG_PAGE_CACHE_TRANSITIONS
981 		}
982 	}
983 }
984 
985 
986 status_t
987 VMCache::AcquireUnreferencedStoreRef()
988 {
989 	return B_OK;
990 }
991 
992 
993 void
994 VMCache::AcquireStoreRef()
995 {
996 }
997 
998 
999 void
1000 VMCache::ReleaseStoreRef()
1001 {
1002 }
1003 
1004 
1005 /*!	Wakes up threads waiting for page events.
1006 	\param page The page for which events occurred.
1007 	\param events The mask of events that occurred.
1008 */
1009 void
1010 VMCache::_NotifyPageEvents(vm_page* page, uint32 events)
1011 {
1012 	PageEventWaiter** it = &fPageEventWaiters;
1013 	while (PageEventWaiter* waiter = *it) {
1014 		if (waiter->page == page && (waiter->events & events) != 0) {
1015 			// remove from list and unblock
1016 			*it = waiter->next;
1017 			InterruptsSpinLocker threadsLocker(gThreadSpinlock);
1018 			thread_unblock_locked(waiter->thread, B_OK);
1019 		} else
1020 			it = &waiter->next;
1021 	}
1022 }
1023 
1024 
1025 /*!	Merges the given cache with its only consumer.
1026 	The caller must hold both the cache's and the consumer's lock. The method
1027 	will unlock the consumer lock.
1028 */
1029 void
1030 VMCache::_MergeWithOnlyConsumer()
1031 {
1032 	VMCache* consumer = (VMCache*)list_remove_head_item(&consumers);
1033 
1034 	TRACE(("merge vm cache %p (ref == %ld) with vm cache %p\n",
1035 		this, this->fRefCount, consumer));
1036 
1037 	T(Merge(this, consumer));
1038 
1039 	// merge the cache
1040 	consumer->Merge(this);
1041 
1042 	// The remaining consumer has got a new source.
1043 	if (source != NULL) {
1044 		VMCache* newSource = source;
1045 
1046 		newSource->Lock();
1047 
1048 		list_remove_item(&newSource->consumers, this);
1049 		list_add_item(&newSource->consumers, consumer);
1050 		consumer->source = newSource;
1051 		source = NULL;
1052 
1053 		newSource->Unlock();
1054 	} else
1055 		consumer->source = NULL;
1056 
1057 	// Release the reference the cache's consumer owned. The consumer takes
1058 	// over the cache's ref to its source (if any) instead.
1059 	ReleaseRefLocked();
1060 
1061 	consumer->Unlock();
1062 }
1063 
1064 
1065 /*!	Removes the \a consumer from this cache.
1066 	It will also release the reference to the cache owned by the consumer.
1067 	Assumes you have the consumer's cache lock held. This cache must not be
1068 	locked.
1069 */
1070 void
1071 VMCache::_RemoveConsumer(VMCache* consumer)
1072 {
1073 	TRACE(("remove consumer vm cache %p from cache %p\n", consumer, this));
1074 	consumer->AssertLocked();
1075 
1076 	T(RemoveConsumer(this, consumer));
1077 
1078 	// Remove the store ref before locking the cache. Otherwise we'd call into
1079 	// the VFS while holding the cache lock, which would reverse the usual
1080 	// locking order.
1081 	ReleaseStoreRef();
1082 
1083 	// remove the consumer from the cache, but keep its reference until later
1084 	Lock();
1085 	list_remove_item(&consumers, consumer);
1086 	consumer->source = NULL;
1087 
1088 	ReleaseRefAndUnlock();
1089 }
1090 
1091 
1092 // #pragma mark - VMCacheFactory
1093 	// TODO: Move to own source file!
1094 
1095 
1096 #include <heap.h>
1097 
1098 #include "VMAnonymousCache.h"
1099 #include "VMAnonymousNoSwapCache.h"
1100 #include "VMDeviceCache.h"
1101 #include "VMNullCache.h"
1102 #include "../cache/vnode_store.h"
1103 
1104 
1105 /*static*/ status_t
1106 VMCacheFactory::CreateAnonymousCache(VMCache*& _cache, bool canOvercommit,
1107 	int32 numPrecommittedPages, int32 numGuardPages, bool swappable)
1108 {
1109 #if ENABLE_SWAP_SUPPORT
1110 	if (swappable) {
1111 		VMAnonymousCache* cache = new(nogrow) VMAnonymousCache;
1112 		if (cache == NULL)
1113 			return B_NO_MEMORY;
1114 
1115 		status_t error = cache->Init(canOvercommit, numPrecommittedPages,
1116 			numGuardPages);
1117 		if (error != B_OK) {
1118 			cache->Delete();
1119 			return error;
1120 		}
1121 
1122 		T(Create(cache));
1123 
1124 		_cache = cache;
1125 		return B_OK;
1126 	}
1127 #endif
1128 
1129 	VMAnonymousNoSwapCache* cache = new(nogrow) VMAnonymousNoSwapCache;
1130 	if (cache == NULL)
1131 		return B_NO_MEMORY;
1132 
1133 	status_t error = cache->Init(canOvercommit, numPrecommittedPages,
1134 		numGuardPages);
1135 	if (error != B_OK) {
1136 		cache->Delete();
1137 		return error;
1138 	}
1139 
1140 	T(Create(cache));
1141 
1142 	_cache = cache;
1143 	return B_OK;
1144 }
1145 
1146 
1147 /*static*/ status_t
1148 VMCacheFactory::CreateVnodeCache(VMCache*& _cache, struct vnode* vnode)
1149 {
1150 	VMVnodeCache* cache = new(nogrow) VMVnodeCache;
1151 	if (cache == NULL)
1152 		return B_NO_MEMORY;
1153 
1154 	status_t error = cache->Init(vnode);
1155 	if (error != B_OK) {
1156 		cache->Delete();
1157 		return error;
1158 	}
1159 
1160 	T(Create(cache));
1161 
1162 	_cache = cache;
1163 	return B_OK;
1164 }
1165 
1166 
1167 /*static*/ status_t
1168 VMCacheFactory::CreateDeviceCache(VMCache*& _cache, addr_t baseAddress)
1169 {
1170 	VMDeviceCache* cache = new(nogrow) VMDeviceCache;
1171 	if (cache == NULL)
1172 		return B_NO_MEMORY;
1173 
1174 	status_t error = cache->Init(baseAddress);
1175 	if (error != B_OK) {
1176 		cache->Delete();
1177 		return error;
1178 	}
1179 
1180 	T(Create(cache));
1181 
1182 	_cache = cache;
1183 	return B_OK;
1184 }
1185 
1186 
1187 /*static*/ status_t
1188 VMCacheFactory::CreateNullCache(VMCache*& _cache)
1189 {
1190 	VMNullCache* cache = new(nogrow) VMNullCache;
1191 	if (cache == NULL)
1192 		return B_NO_MEMORY;
1193 
1194 	status_t error = cache->Init();
1195 	if (error != B_OK) {
1196 		cache->Delete();
1197 		return error;
1198 	}
1199 
1200 	T(Create(cache));
1201 
1202 	_cache = cache;
1203 	return B_OK;
1204 }
1205