xref: /haiku/src/system/kernel/vm/VMCache.cpp (revision f34a1dd5d701373687b6f3f0e6e76bd2b1ae6007)
1 /*
2  * Copyright 2008-2009, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 #include <vm/VMCache.h>
11 
12 #include <stddef.h>
13 #include <stdlib.h>
14 
15 #include <arch/cpu.h>
16 #include <condition_variable.h>
17 #include <debug.h>
18 #include <heap.h>
19 #include <int.h>
20 #include <kernel.h>
21 #include <smp.h>
22 #include <tracing.h>
23 #include <util/khash.h>
24 #include <util/AutoLock.h>
25 #include <vfs.h>
26 #include <vm/vm.h>
27 #include <vm/vm_page.h>
28 #include <vm/vm_priv.h>
29 #include <vm/vm_types.h>
30 #include <vm/VMArea.h>
31 
32 
33 //#define TRACE_VM_CACHE
34 #ifdef TRACE_VM_CACHE
35 #	define TRACE(x) dprintf x
36 #else
37 #	define TRACE(x) ;
38 #endif
39 
40 
41 #if DEBUG_CACHE_LIST
42 VMCache* gDebugCacheList;
43 #endif
44 static mutex sCacheListLock = MUTEX_INITIALIZER("global VMCache list");
45 	// The lock is also needed when the debug feature is disabled.
46 
47 
48 #if VM_CACHE_TRACING
49 
50 namespace VMCacheTracing {
51 
52 class VMCacheTraceEntry : public AbstractTraceEntry {
53 	public:
54 		VMCacheTraceEntry(VMCache* cache)
55 			:
56 			fCache(cache)
57 		{
58 		}
59 
60 	protected:
61 		VMCache*	fCache;
62 };
63 
64 
65 class Create : public VMCacheTraceEntry {
66 	public:
67 		Create(VMCache* cache)
68 			:
69 			VMCacheTraceEntry(cache)
70 		{
71 			Initialized();
72 		}
73 
74 		virtual void AddDump(TraceOutput& out)
75 		{
76 			out.Print("vm cache create: -> cache: %p", fCache);
77 		}
78 };
79 
80 
81 class Delete : public VMCacheTraceEntry {
82 	public:
83 		Delete(VMCache* cache)
84 			:
85 			VMCacheTraceEntry(cache)
86 		{
87 			Initialized();
88 		}
89 
90 		virtual void AddDump(TraceOutput& out)
91 		{
92 			out.Print("vm cache delete: cache: %p", fCache);
93 		}
94 };
95 
96 
97 class SetMinimalCommitment : public VMCacheTraceEntry {
98 	public:
99 		SetMinimalCommitment(VMCache* cache, off_t commitment)
100 			:
101 			VMCacheTraceEntry(cache),
102 			fOldCommitment(cache->committed_size),
103 			fCommitment(commitment)
104 		{
105 			Initialized();
106 		}
107 
108 		virtual void AddDump(TraceOutput& out)
109 		{
110 			out.Print("vm cache set min commitment: cache: %p, "
111 				"commitment: %lld -> %lld", fCache, fOldCommitment,
112 				fCommitment);
113 		}
114 
115 	private:
116 		off_t	fOldCommitment;
117 		off_t	fCommitment;
118 };
119 
120 
121 class Resize : public VMCacheTraceEntry {
122 	public:
123 		Resize(VMCache* cache, off_t size)
124 			:
125 			VMCacheTraceEntry(cache),
126 			fOldSize(cache->virtual_end),
127 			fSize(size)
128 		{
129 			Initialized();
130 		}
131 
132 		virtual void AddDump(TraceOutput& out)
133 		{
134 			out.Print("vm cache resize: cache: %p, size: %lld -> %lld", fCache,
135 				fOldSize, fSize);
136 		}
137 
138 	private:
139 		off_t	fOldSize;
140 		off_t	fSize;
141 };
142 
143 
144 class AddConsumer : public VMCacheTraceEntry {
145 	public:
146 		AddConsumer(VMCache* cache, VMCache* consumer)
147 			:
148 			VMCacheTraceEntry(cache),
149 			fConsumer(consumer)
150 		{
151 			Initialized();
152 		}
153 
154 		virtual void AddDump(TraceOutput& out)
155 		{
156 			out.Print("vm cache add consumer: cache: %p, consumer: %p", fCache,
157 				fConsumer);
158 		}
159 
160 	private:
161 		VMCache*	fConsumer;
162 };
163 
164 
165 class RemoveConsumer : public VMCacheTraceEntry {
166 	public:
167 		RemoveConsumer(VMCache* cache, VMCache* consumer)
168 			:
169 			VMCacheTraceEntry(cache),
170 			fConsumer(consumer)
171 		{
172 			Initialized();
173 		}
174 
175 		virtual void AddDump(TraceOutput& out)
176 		{
177 			out.Print("vm cache remove consumer: cache: %p, consumer: %p",
178 				fCache, fConsumer);
179 		}
180 
181 	private:
182 		VMCache*	fConsumer;
183 };
184 
185 
186 class Merge : public VMCacheTraceEntry {
187 	public:
188 		Merge(VMCache* cache, VMCache* consumer)
189 			:
190 			VMCacheTraceEntry(cache),
191 			fConsumer(consumer)
192 		{
193 			Initialized();
194 		}
195 
196 		virtual void AddDump(TraceOutput& out)
197 		{
198 			out.Print("vm cache merge with consumer: cache: %p, consumer: %p",
199 				fCache, fConsumer);
200 		}
201 
202 	private:
203 		VMCache*	fConsumer;
204 };
205 
206 
207 class InsertArea : public VMCacheTraceEntry {
208 	public:
209 		InsertArea(VMCache* cache, VMArea* area)
210 			:
211 			VMCacheTraceEntry(cache),
212 			fArea(area)
213 		{
214 			Initialized();
215 		}
216 
217 		virtual void AddDump(TraceOutput& out)
218 		{
219 			out.Print("vm cache insert area: cache: %p, area: %p", fCache,
220 				fArea);
221 		}
222 
223 	private:
224 		VMArea*	fArea;
225 };
226 
227 
228 class RemoveArea : public VMCacheTraceEntry {
229 	public:
230 		RemoveArea(VMCache* cache, VMArea* area)
231 			:
232 			VMCacheTraceEntry(cache),
233 			fArea(area)
234 		{
235 			Initialized();
236 		}
237 
238 		virtual void AddDump(TraceOutput& out)
239 		{
240 			out.Print("vm cache remove area: cache: %p, area: %p", fCache,
241 				fArea);
242 		}
243 
244 	private:
245 		VMArea*	fArea;
246 };
247 
248 }	// namespace VMCacheTracing
249 
250 #	define T(x) new(std::nothrow) VMCacheTracing::x;
251 
252 #	if VM_CACHE_TRACING >= 2
253 
254 namespace VMCacheTracing {
255 
256 class InsertPage : public VMCacheTraceEntry {
257 	public:
258 		InsertPage(VMCache* cache, vm_page* page, off_t offset)
259 			:
260 			VMCacheTraceEntry(cache),
261 			fPage(page),
262 			fOffset(offset)
263 		{
264 			Initialized();
265 		}
266 
267 		virtual void AddDump(TraceOutput& out)
268 		{
269 			out.Print("vm cache insert page: cache: %p, page: %p, offset: %lld",
270 				fCache, fPage, fOffset);
271 		}
272 
273 	private:
274 		vm_page*	fPage;
275 		off_t		fOffset;
276 };
277 
278 
279 class RemovePage : public VMCacheTraceEntry {
280 	public:
281 		RemovePage(VMCache* cache, vm_page* page)
282 			:
283 			VMCacheTraceEntry(cache),
284 			fPage(page)
285 		{
286 			Initialized();
287 		}
288 
289 		virtual void AddDump(TraceOutput& out)
290 		{
291 			out.Print("vm cache remove page: cache: %p, page: %p", fCache,
292 				fPage);
293 		}
294 
295 	private:
296 		vm_page*	fPage;
297 };
298 
299 }	// namespace VMCacheTracing
300 
301 #		define T2(x) new(std::nothrow) VMCacheTracing::x;
302 #	else
303 #		define T2(x) ;
304 #	endif
305 #else
306 #	define T(x) ;
307 #	define T2(x) ;
308 #endif
309 
310 
311 //	#pragma mark -
312 
313 
314 status_t
315 vm_cache_init(kernel_args* args)
316 {
317 	return B_OK;
318 }
319 
320 
321 VMCache*
322 vm_cache_acquire_locked_page_cache(vm_page* page, bool dontWait)
323 {
324 	mutex_lock(&sCacheListLock);
325 
326 	while (dontWait) {
327 		VMCache* cache = page->cache;
328 		if (cache == NULL || !cache->TryLock()) {
329 			mutex_unlock(&sCacheListLock);
330 			return NULL;
331 		}
332 
333 		if (cache == page->cache) {
334 			cache->AcquireRefLocked();
335 			mutex_unlock(&sCacheListLock);
336 			return cache;
337 		}
338 
339 		// the cache changed in the meantime
340 		cache->Unlock();
341 	}
342 
343 	while (true) {
344 		VMCache* cache = page->cache;
345 		if (cache == NULL) {
346 			mutex_unlock(&sCacheListLock);
347 			return NULL;
348 		}
349 
350 		// TODO: this is problematic, as it requires the caller not to have
351 		// a lock on this cache (it might be called via
352 		// vm_page_allocate_page(..., false)).
353 		if (!cache->SwitchLock(&sCacheListLock)) {
354 			// cache has been deleted
355 			mutex_lock(&sCacheListLock);
356 			continue;
357 		}
358 
359 		if (cache == page->cache) {
360 			cache->AcquireRefLocked();
361 			return cache;
362 		}
363 
364 		// the cache changed in the meantime
365 		cache->Unlock();
366 		mutex_lock(&sCacheListLock);
367 	}
368 }
369 
370 
371 // #pragma mark - VMCache
372 
373 
374 bool
375 VMCache::_IsMergeable() const
376 {
377 	return (areas == NULL && temporary
378 		&& !list_is_empty(const_cast<list*>(&consumers))
379 		&& consumers.link.next == consumers.link.prev);
380 }
381 
382 
383 VMCache::VMCache()
384 {
385 }
386 
387 
388 VMCache::~VMCache()
389 {
390 }
391 
392 
393 status_t
394 VMCache::Init(uint32 cacheType)
395 {
396 	mutex_init(&fLock, "VMCache");
397 	VMCache dummyCache;
398 	list_init_etc(&consumers, offset_of_member(dummyCache, consumer_link));
399 	areas = NULL;
400 	fRefCount = 1;
401 	source = NULL;
402 	virtual_base = 0;
403 	virtual_end = 0;
404 	committed_size = 0;
405 	temporary = 0;
406 	scan_skip = 0;
407 	page_count = 0;
408 	type = cacheType;
409 
410 #if DEBUG_CACHE_LIST
411 	mutex_lock(&sCacheListLock);
412 
413 	if (gDebugCacheList)
414 		gDebugCacheList->debug_previous = this;
415 	debug_previous = NULL;
416 	debug_next = gDebugCacheList;
417 	gDebugCacheList = this;
418 
419 	mutex_unlock(&sCacheListLock);
420 #endif
421 
422 	return B_OK;
423 }
424 
425 
426 void
427 VMCache::Delete()
428 {
429 	if (areas != NULL)
430 		panic("cache %p to be deleted still has areas", this);
431 	if (!list_is_empty(&consumers))
432 		panic("cache %p to be deleted still has consumers", this);
433 
434 	T(Delete(this));
435 
436 	// free all of the pages in the cache
437 	while (vm_page* page = pages.Root()) {
438 		if (!page->mappings.IsEmpty() || page->wired_count != 0) {
439 			panic("remove page %p from cache %p: page still has mappings!\n",
440 				page, this);
441 		}
442 
443 		// remove it
444 		pages.Remove(page);
445 		page->cache = NULL;
446 		// TODO: we also need to remove all of the page's mappings!
447 
448 		TRACE(("vm_cache_release_ref: freeing page 0x%lx\n",
449 			oldPage->physical_page_number));
450 		vm_page_free(this, page);
451 	}
452 
453 	// remove the ref to the source
454 	if (source)
455 		source->_RemoveConsumer(this);
456 
457 	// We lock and unlock the sCacheListLock, even if the DEBUG_CACHE_LIST is
458 	// not enabled. This synchronization point is needed for
459 	// vm_cache_acquire_locked_page_cache().
460 	mutex_lock(&sCacheListLock);
461 
462 #if DEBUG_CACHE_LIST
463 	if (debug_previous)
464 		debug_previous->debug_next = debug_next;
465 	if (debug_next)
466 		debug_next->debug_previous = debug_previous;
467 	if (this == gDebugCacheList)
468 		gDebugCacheList = debug_next;
469 #endif
470 
471 	mutex_destroy(&fLock);
472 
473 	mutex_unlock(&sCacheListLock);
474 
475 	delete this;
476 }
477 
478 
479 void
480 VMCache::Unlock()
481 {
482 	while (fRefCount == 1 && _IsMergeable()) {
483 		VMCache* consumer = (VMCache*)list_get_first_item(&consumers);
484 		if (consumer->TryLock()) {
485 			_MergeWithOnlyConsumer();
486 		} else {
487 			// Someone else has locked the consumer ATM. Unlock this cache and
488 			// wait for the consumer lock. Increment the cache's ref count
489 			// temporarily, so that no one else will try what we are doing or
490 			// delete the cache.
491 			fRefCount++;
492 			bool consumerLocked = consumer->SwitchLock(&fLock);
493 			Lock();
494 			fRefCount--;
495 
496 			if (consumerLocked) {
497 				if (fRefCount == 1 && _IsMergeable()
498 						&& consumer == list_get_first_item(&consumers)) {
499 					_MergeWithOnlyConsumer();
500 				} else {
501 					// something changed, get rid of the consumer lock
502 					consumer->Unlock();
503 				}
504 			}
505 		}
506 	}
507 
508 	if (fRefCount == 0) {
509 		// delete this cache
510 		Delete();
511 	} else
512 		mutex_unlock(&fLock);
513 }
514 
515 
516 void
517 VMCache::AcquireRefLocked()
518 {
519 // TODO: Inline!
520 	ASSERT_LOCKED_MUTEX(&fLock);
521 
522 	fRefCount++;
523 }
524 
525 
526 void
527 VMCache::AcquireRef()
528 {
529 	Lock();
530 	fRefCount++;
531 	Unlock();
532 }
533 
534 
535 void
536 VMCache::ReleaseRefLocked()
537 {
538 // TODO: Inline!
539 	ASSERT_LOCKED_MUTEX(&fLock);
540 
541 	fRefCount--;
542 }
543 
544 
545 void
546 VMCache::ReleaseRef()
547 {
548 	Lock();
549 	fRefCount--;
550 	Unlock();
551 }
552 
553 
554 vm_page*
555 VMCache::LookupPage(off_t offset)
556 {
557 	AssertLocked();
558 
559 	vm_page* page = pages.Lookup((page_num_t)(offset >> PAGE_SHIFT));
560 
561 #if KDEBUG
562 	if (page != NULL && page->cache != this)
563 		panic("page %p not in cache %p\n", page, this);
564 #endif
565 
566 	return page;
567 }
568 
569 
570 void
571 VMCache::InsertPage(vm_page* page, off_t offset)
572 {
573 	TRACE(("VMCache::InsertPage(): cache %p, page %p, offset %Ld\n",
574 		this, page, offset));
575 	AssertLocked();
576 
577 	if (page->cache != NULL) {
578 		panic("insert page %p into cache %p: page cache is set to %p\n",
579 			page, this, page->cache);
580 	}
581 
582 	T2(InsertPage(this, page, offset));
583 
584 	page->cache_offset = (page_num_t)(offset >> PAGE_SHIFT);
585 	page_count++;
586 	page->usage_count = 2;
587 	page->cache = this;
588 
589 #if KDEBUG
590 	vm_page* otherPage = pages.Lookup(page->cache_offset);
591 	if (otherPage != NULL) {
592 		panic("VMCache::InsertPage(): there's already page %p with cache "
593 			"offset %lu in cache %p; inserting page %p", otherPage,
594 			page->cache_offset, this, page);
595 	}
596 #endif	// KDEBUG
597 
598 	pages.Insert(page);
599 }
600 
601 
602 /*!	Removes the vm_page from this cache. Of course, the page must
603 	really be in this cache or evil things will happen.
604 	The cache lock must be held.
605 */
606 void
607 VMCache::RemovePage(vm_page* page)
608 {
609 	TRACE(("VMCache::RemovePage(): cache %p, page %p\n", this, page));
610 	AssertLocked();
611 
612 	if (page->cache != this) {
613 		panic("remove page %p from cache %p: page cache is set to %p\n", page,
614 			this, page->cache);
615 	}
616 
617 	T2(RemovePage(this, page));
618 
619 	pages.Remove(page);
620 	page->cache = NULL;
621 	page_count--;
622 }
623 
624 
625 /*!	Makes this case the source of the \a consumer cache,
626 	and adds the \a consumer to its list.
627 	This also grabs a reference to the source cache.
628 	Assumes you have the cache and the consumer's lock held.
629 */
630 void
631 VMCache::AddConsumer(VMCache* consumer)
632 {
633 	TRACE(("add consumer vm cache %p to cache %p\n", consumer, cache));
634 	AssertLocked();
635 	consumer->AssertLocked();
636 
637 	T(AddConsumer(this, consumer));
638 
639 	consumer->source = this;
640 	list_add_item(&consumers, consumer);
641 
642 	AcquireRefLocked();
643 	AcquireStoreRef();
644 }
645 
646 
647 /*!	Adds the \a area to this cache.
648 	Assumes you have the locked the cache.
649 */
650 status_t
651 VMCache::InsertAreaLocked(VMArea* area)
652 {
653 	TRACE(("VMCache::InsertAreaLocked(cache %p, area %p)\n", this, area));
654 	AssertLocked();
655 
656 	T(InsertArea(this, area));
657 
658 	area->cache_next = areas;
659 	if (area->cache_next)
660 		area->cache_next->cache_prev = area;
661 	area->cache_prev = NULL;
662 	areas = area;
663 
664 	AcquireStoreRef();
665 
666 	return B_OK;
667 }
668 
669 
670 status_t
671 VMCache::RemoveArea(VMArea* area)
672 {
673 	TRACE(("VMCache::RemoveArea(cache %p, area %p)\n", this, area));
674 
675 	T(RemoveArea(this, area));
676 
677 	// We release the store reference first, since otherwise we would reverse
678 	// the locking order or even deadlock ourselves (... -> free_vnode() -> ...
679 	// -> bfs_remove_vnode() -> ... -> file_cache_set_size() -> mutex_lock()).
680 	// Also cf. _RemoveConsumer().
681 	ReleaseStoreRef();
682 
683 	AutoLocker<VMCache> locker(this);
684 
685 	if (area->cache_prev)
686 		area->cache_prev->cache_next = area->cache_next;
687 	if (area->cache_next)
688 		area->cache_next->cache_prev = area->cache_prev;
689 	if (areas == area)
690 		areas = area->cache_next;
691 
692 	return B_OK;
693 }
694 
695 
696 status_t
697 VMCache::WriteModified()
698 {
699 	TRACE(("VMCache::WriteModified(cache = %p)\n", this));
700 
701 	if (temporary)
702 		return B_OK;
703 
704 	Lock();
705 	status_t status = vm_page_write_modified_pages(this);
706 	Unlock();
707 
708 	return status;
709 }
710 
711 
712 /*!	Commits the memory to the store if the \a commitment is larger than
713 	what's committed already.
714 	Assumes you have the cache's lock held.
715 */
716 status_t
717 VMCache::SetMinimalCommitment(off_t commitment)
718 {
719 	TRACE(("VMCache::SetMinimalCommitment(cache %p, commitment %Ld)\n",
720 		this, commitment));
721 	AssertLocked();
722 
723 	T(SetMinimalCommitment(this, commitment));
724 
725 	status_t status = B_OK;
726 
727 	// If we don't have enough committed space to cover through to the new end
728 	// of the area...
729 	if (committed_size < commitment) {
730 		// ToDo: should we check if the cache's virtual size is large
731 		//	enough for a commitment of that size?
732 
733 		// try to commit more memory
734 		status = Commit(commitment);
735 	}
736 
737 	return status;
738 }
739 
740 
741 /*!	This function updates the size field of the cache.
742 	If needed, it will free up all pages that don't belong to the cache anymore.
743 	The cache lock must be held when you call it.
744 	Since removed pages don't belong to the cache any longer, they are not
745 	written back before they will be removed.
746 
747 	Note, this function may temporarily release the cache lock in case it
748 	has to wait for busy pages.
749 */
750 status_t
751 VMCache::Resize(off_t newSize)
752 {
753 	TRACE(("VMCache::Resize(cache %p, newSize %Ld) old size %Ld\n",
754 		this, newSize, this->virtual_end));
755 	this->AssertLocked();
756 
757 	T(Resize(this, newSize));
758 
759 	status_t status = Commit(newSize - virtual_base);
760 	if (status != B_OK)
761 		return status;
762 
763 	uint32 oldPageCount = (uint32)((virtual_end + B_PAGE_SIZE - 1)
764 		>> PAGE_SHIFT);
765 	uint32 newPageCount = (uint32)((newSize + B_PAGE_SIZE - 1) >> PAGE_SHIFT);
766 
767 	if (newPageCount < oldPageCount) {
768 		// we need to remove all pages in the cache outside of the new virtual
769 		// size
770 		for (VMCachePagesTree::Iterator it
771 					= pages.GetIterator(newPageCount, true, true);
772 				vm_page* page = it.Next();) {
773 			if (page->state == PAGE_STATE_BUSY) {
774 				if (page->busy_writing) {
775 					// We cannot wait for the page to become available
776 					// as we might cause a deadlock this way
777 					page->busy_writing = false;
778 						// this will notify the writer to free the page
779 				} else {
780 					// wait for page to become unbusy
781 					ConditionVariableEntry entry;
782 					entry.Add(page);
783 					Unlock();
784 					entry.Wait();
785 					Lock();
786 
787 					// restart from the start of the list
788 					it = pages.GetIterator(newPageCount, true, true);
789 				}
790 				continue;
791 			}
792 
793 			// remove the page and put it into the free queue
794 			vm_remove_all_page_mappings(page, NULL);
795 			ASSERT(page->wired_count == 0);
796 				// TODO: Find a real solution! Unmapping is probably fine, but
797 				// we have no way of unmapping wired pages here.
798 			RemovePage(page);
799 			vm_page_free(this, page);
800 				// Note: When iterating through a IteratableSplayTree
801 				// removing the current node is safe.
802 		}
803 	}
804 
805 	virtual_end = newSize;
806 	return B_OK;
807 }
808 
809 
810 /*!	You have to call this function with the VMCache lock held. */
811 status_t
812 VMCache::FlushAndRemoveAllPages()
813 {
814 	ASSERT_LOCKED_MUTEX(&fLock);
815 
816 	while (page_count > 0) {
817 		// write back modified pages
818 		status_t status = vm_page_write_modified_pages(this);
819 		if (status != B_OK)
820 			return status;
821 
822 		// remove pages
823 		for (VMCachePagesTree::Iterator it = pages.GetIterator();
824 				vm_page* page = it.Next();) {
825 			if (page->state == PAGE_STATE_BUSY) {
826 				// wait for page to become unbusy
827 				ConditionVariableEntry entry;
828 				entry.Add(page);
829 				Unlock();
830 				entry.Wait();
831 				Lock();
832 
833 				// restart from the start of the list
834 				it = pages.GetIterator();
835 				continue;
836 			}
837 
838 			// skip modified pages -- they will be written back in the next
839 			// iteration
840 			if (page->state == PAGE_STATE_MODIFIED)
841 				continue;
842 
843 			// We can't remove mapped pages.
844 			if (page->wired_count > 0 || !page->mappings.IsEmpty())
845 				return B_BUSY;
846 
847 			RemovePage(page);
848 			vm_page_free(this, page);
849 				// Note: When iterating through a IteratableSplayTree
850 				// removing the current node is safe.
851 		}
852 	}
853 
854 	return B_OK;
855 }
856 
857 
858 status_t
859 VMCache::Commit(off_t size)
860 {
861 	committed_size = size;
862 	return B_OK;
863 }
864 
865 
866 bool
867 VMCache::HasPage(off_t offset)
868 {
869 	return offset >= virtual_base && offset <= virtual_end;
870 }
871 
872 
873 status_t
874 VMCache::Read(off_t offset, const iovec *vecs, size_t count, uint32 flags,
875 	size_t *_numBytes)
876 {
877 	return B_ERROR;
878 }
879 
880 
881 status_t
882 VMCache::Write(off_t offset, const iovec *vecs, size_t count, uint32 flags,
883 	size_t *_numBytes)
884 {
885 	return B_ERROR;
886 }
887 
888 
889 status_t
890 VMCache::WriteAsync(off_t offset, const iovec* vecs, size_t count,
891 	size_t numBytes, uint32 flags, AsyncIOCallback* callback)
892 {
893 	// Not supported, fall back to the synchronous hook.
894 	size_t transferred = numBytes;
895 	status_t error = Write(offset, vecs, count, flags, &transferred);
896 
897 	if (callback != NULL)
898 		callback->IOFinished(error, transferred != numBytes, transferred);
899 
900 	return error;
901 }
902 
903 
904 /*!	\brief Returns whether the cache can write the page at the given offset.
905 
906 	The cache must be locked when this function is invoked.
907 
908 	@param offset The page offset.
909 	@return \c true, if the page can be written, \c false otherwise.
910 */
911 bool
912 VMCache::CanWritePage(off_t offset)
913 {
914 	return false;
915 }
916 
917 
918 status_t
919 VMCache::Fault(struct VMAddressSpace *aspace, off_t offset)
920 {
921 	return B_BAD_ADDRESS;
922 }
923 
924 
925 void
926 VMCache::Merge(VMCache* source)
927 {
928 	for (VMCachePagesTree::Iterator it = source->pages.GetIterator();
929 			vm_page* page = it.Next();) {
930 		// Note: Removing the current node while iterating through a
931 		// IteratableSplayTree is safe.
932 		vm_page* consumerPage = LookupPage(
933 			(off_t)page->cache_offset << PAGE_SHIFT);
934 		if (consumerPage == NULL) {
935 			// the page is not yet in the consumer cache - move it upwards
936 			source->RemovePage(page);
937 			InsertPage(page, (off_t)page->cache_offset << PAGE_SHIFT);
938 #if DEBUG_PAGE_CACHE_TRANSITIONS
939 		} else {
940 			page->debug_flags = 0;
941 			if (consumerPage->state == PAGE_STATE_BUSY)
942 				page->debug_flags |= 0x1;
943 			if (consumerPage->type == PAGE_TYPE_DUMMY)
944 				page->debug_flags |= 0x2;
945 			page->collided_page = consumerPage;
946 			consumerPage->collided_page = page;
947 #endif	// DEBUG_PAGE_CACHE_TRANSITIONS
948 		}
949 	}
950 }
951 
952 
953 status_t
954 VMCache::AcquireUnreferencedStoreRef()
955 {
956 	return B_OK;
957 }
958 
959 
960 void
961 VMCache::AcquireStoreRef()
962 {
963 }
964 
965 
966 void
967 VMCache::ReleaseStoreRef()
968 {
969 }
970 
971 
972 /*!	Merges the given cache with its only consumer.
973 	The caller must hold both the cache's and the consumer's lock. The method
974 	will unlock the consumer lock.
975 */
976 void
977 VMCache::_MergeWithOnlyConsumer()
978 {
979 	VMCache* consumer = (VMCache*)list_remove_head_item(&consumers);
980 
981 	TRACE(("merge vm cache %p (ref == %ld) with vm cache %p\n",
982 		this, this->fRefCount, consumer));
983 
984 	T(Merge(this, consumer));
985 
986 	// merge the cache
987 	consumer->Merge(this);
988 
989 	// The remaining consumer has got a new source.
990 	if (source != NULL) {
991 		VMCache* newSource = source;
992 
993 		newSource->Lock();
994 
995 		list_remove_item(&newSource->consumers, this);
996 		list_add_item(&newSource->consumers, consumer);
997 		consumer->source = newSource;
998 		source = NULL;
999 
1000 		newSource->Unlock();
1001 	} else
1002 		consumer->source = NULL;
1003 
1004 	// Release the reference the cache's consumer owned. The consumer takes
1005 	// over the cache's ref to its source (if any) instead.
1006 	ReleaseRefLocked();
1007 
1008 	consumer->Unlock();
1009 }
1010 
1011 
1012 /*!	Removes the \a consumer from this cache.
1013 	It will also release the reference to the cache owned by the consumer.
1014 	Assumes you have the consumer's cache lock held. This cache must not be
1015 	locked.
1016 */
1017 void
1018 VMCache::_RemoveConsumer(VMCache* consumer)
1019 {
1020 	TRACE(("remove consumer vm cache %p from cache %p\n", consumer, this));
1021 	consumer->AssertLocked();
1022 
1023 	T(RemoveConsumer(this, consumer));
1024 
1025 	// Remove the store ref before locking the cache. Otherwise we'd call into
1026 	// the VFS while holding the cache lock, which would reverse the usual
1027 	// locking order.
1028 	ReleaseStoreRef();
1029 
1030 	// remove the consumer from the cache, but keep its reference until later
1031 	Lock();
1032 	list_remove_item(&consumers, consumer);
1033 	consumer->source = NULL;
1034 
1035 	ReleaseRefAndUnlock();
1036 }
1037 
1038 
1039 // #pragma mark - VMCacheFactory
1040 	// TODO: Move to own source file!
1041 
1042 
1043 #include <heap.h>
1044 
1045 #include "VMAnonymousCache.h"
1046 #include "VMAnonymousNoSwapCache.h"
1047 #include "VMDeviceCache.h"
1048 #include "VMNullCache.h"
1049 #include "../cache/vnode_store.h"
1050 
1051 
1052 /*static*/ status_t
1053 VMCacheFactory::CreateAnonymousCache(VMCache*& _cache, bool canOvercommit,
1054 	int32 numPrecommittedPages, int32 numGuardPages, bool swappable)
1055 {
1056 #if ENABLE_SWAP_SUPPORT
1057 	if (swappable) {
1058 		VMAnonymousCache* cache = new(nogrow) VMAnonymousCache;
1059 		if (cache == NULL)
1060 			return B_NO_MEMORY;
1061 
1062 		status_t error = cache->Init(canOvercommit, numPrecommittedPages,
1063 			numGuardPages);
1064 		if (error != B_OK) {
1065 			cache->Delete();
1066 			return error;
1067 		}
1068 
1069 		T(Create(cache));
1070 
1071 		_cache = cache;
1072 		return B_OK;
1073 	}
1074 #endif
1075 
1076 	VMAnonymousNoSwapCache* cache = new(nogrow) VMAnonymousNoSwapCache;
1077 	if (cache == NULL)
1078 		return B_NO_MEMORY;
1079 
1080 	status_t error = cache->Init(canOvercommit, numPrecommittedPages,
1081 		numGuardPages);
1082 	if (error != B_OK) {
1083 		cache->Delete();
1084 		return error;
1085 	}
1086 
1087 	T(Create(cache));
1088 
1089 	_cache = cache;
1090 	return B_OK;
1091 }
1092 
1093 
1094 /*static*/ status_t
1095 VMCacheFactory::CreateVnodeCache(VMCache*& _cache, struct vnode* vnode)
1096 {
1097 	VMVnodeCache* cache = new(nogrow) VMVnodeCache;
1098 	if (cache == NULL)
1099 		return B_NO_MEMORY;
1100 
1101 	status_t error = cache->Init(vnode);
1102 	if (error != B_OK) {
1103 		cache->Delete();
1104 		return error;
1105 	}
1106 
1107 	T(Create(cache));
1108 
1109 	_cache = cache;
1110 	return B_OK;
1111 }
1112 
1113 
1114 /*static*/ status_t
1115 VMCacheFactory::CreateDeviceCache(VMCache*& _cache, addr_t baseAddress)
1116 {
1117 	VMDeviceCache* cache = new(nogrow) VMDeviceCache;
1118 	if (cache == NULL)
1119 		return B_NO_MEMORY;
1120 
1121 	status_t error = cache->Init(baseAddress);
1122 	if (error != B_OK) {
1123 		cache->Delete();
1124 		return error;
1125 	}
1126 
1127 	T(Create(cache));
1128 
1129 	_cache = cache;
1130 	return B_OK;
1131 }
1132 
1133 
1134 /*static*/ status_t
1135 VMCacheFactory::CreateNullCache(VMCache*& _cache)
1136 {
1137 	VMNullCache* cache = new(nogrow) VMNullCache;
1138 	if (cache == NULL)
1139 		return B_NO_MEMORY;
1140 
1141 	status_t error = cache->Init();
1142 	if (error != B_OK) {
1143 		cache->Delete();
1144 		return error;
1145 	}
1146 
1147 	T(Create(cache));
1148 
1149 	_cache = cache;
1150 	return B_OK;
1151 }
1152