xref: /haiku/src/system/kernel/vm/VMCache.cpp (revision 9760dcae2038d47442f4658c2575844c6cf92c40)
1 /*
2  * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 #include <vm/VMCache.h>
12 
13 #include <stddef.h>
14 #include <stdlib.h>
15 
16 #include <algorithm>
17 
18 #include <arch/cpu.h>
19 #include <condition_variable.h>
20 #include <heap.h>
21 #include <int.h>
22 #include <kernel.h>
23 #include <smp.h>
24 #include <tracing.h>
25 #include <util/khash.h>
26 #include <util/AutoLock.h>
27 #include <vfs.h>
28 #include <vm/vm.h>
29 #include <vm/vm_page.h>
30 #include <vm/vm_priv.h>
31 #include <vm/vm_types.h>
32 #include <vm/VMArea.h>
33 
34 
35 //#define TRACE_VM_CACHE
36 #ifdef TRACE_VM_CACHE
37 #	define TRACE(x) dprintf x
38 #else
39 #	define TRACE(x) ;
40 #endif
41 
42 
43 #if DEBUG_CACHE_LIST
44 VMCache* gDebugCacheList;
45 #endif
46 static mutex sCacheListLock = MUTEX_INITIALIZER("global VMCache list");
47 	// The lock is also needed when the debug feature is disabled.
48 
49 
50 struct VMCache::PageEventWaiter {
51 	struct thread*		thread;
52 	PageEventWaiter*	next;
53 	vm_page*			page;
54 	uint32				events;
55 };
56 
57 
58 #if VM_CACHE_TRACING
59 
60 namespace VMCacheTracing {
61 
62 class VMCacheTraceEntry : public AbstractTraceEntry {
63 	public:
64 		VMCacheTraceEntry(VMCache* cache)
65 			:
66 			fCache(cache)
67 		{
68 #if VM_CACHE_TRACING_STACK_TRACE
69 			fStackTrace = capture_tracing_stack_trace(
70 				VM_CACHE_TRACING_STACK_TRACE, 0, true);
71 				// Don't capture userland stack trace to avoid potential
72 				// deadlocks.
73 #endif
74 		}
75 
76 #if VM_CACHE_TRACING_STACK_TRACE
77 		virtual void DumpStackTrace(TraceOutput& out)
78 		{
79 			out.PrintStackTrace(fStackTrace);
80 		}
81 #endif
82 
83 		VMCache* Cache() const
84 		{
85 			return fCache;
86 		}
87 
88 	protected:
89 		VMCache*	fCache;
90 #if VM_CACHE_TRACING_STACK_TRACE
91 		tracing_stack_trace* fStackTrace;
92 #endif
93 };
94 
95 
96 class Create : public VMCacheTraceEntry {
97 	public:
98 		Create(VMCache* cache)
99 			:
100 			VMCacheTraceEntry(cache)
101 		{
102 			Initialized();
103 		}
104 
105 		virtual void AddDump(TraceOutput& out)
106 		{
107 			out.Print("vm cache create: -> cache: %p", fCache);
108 		}
109 };
110 
111 
112 class Delete : public VMCacheTraceEntry {
113 	public:
114 		Delete(VMCache* cache)
115 			:
116 			VMCacheTraceEntry(cache)
117 		{
118 			Initialized();
119 		}
120 
121 		virtual void AddDump(TraceOutput& out)
122 		{
123 			out.Print("vm cache delete: cache: %p", fCache);
124 		}
125 };
126 
127 
128 class SetMinimalCommitment : public VMCacheTraceEntry {
129 	public:
130 		SetMinimalCommitment(VMCache* cache, off_t commitment)
131 			:
132 			VMCacheTraceEntry(cache),
133 			fOldCommitment(cache->committed_size),
134 			fCommitment(commitment)
135 		{
136 			Initialized();
137 		}
138 
139 		virtual void AddDump(TraceOutput& out)
140 		{
141 			out.Print("vm cache set min commitment: cache: %p, "
142 				"commitment: %lld -> %lld", fCache, fOldCommitment,
143 				fCommitment);
144 		}
145 
146 	private:
147 		off_t	fOldCommitment;
148 		off_t	fCommitment;
149 };
150 
151 
152 class Resize : public VMCacheTraceEntry {
153 	public:
154 		Resize(VMCache* cache, off_t size)
155 			:
156 			VMCacheTraceEntry(cache),
157 			fOldSize(cache->virtual_end),
158 			fSize(size)
159 		{
160 			Initialized();
161 		}
162 
163 		virtual void AddDump(TraceOutput& out)
164 		{
165 			out.Print("vm cache resize: cache: %p, size: %lld -> %lld", fCache,
166 				fOldSize, fSize);
167 		}
168 
169 	private:
170 		off_t	fOldSize;
171 		off_t	fSize;
172 };
173 
174 
175 class AddConsumer : public VMCacheTraceEntry {
176 	public:
177 		AddConsumer(VMCache* cache, VMCache* consumer)
178 			:
179 			VMCacheTraceEntry(cache),
180 			fConsumer(consumer)
181 		{
182 			Initialized();
183 		}
184 
185 		virtual void AddDump(TraceOutput& out)
186 		{
187 			out.Print("vm cache add consumer: cache: %p, consumer: %p", fCache,
188 				fConsumer);
189 		}
190 
191 		VMCache* Consumer() const
192 		{
193 			return fConsumer;
194 		}
195 
196 	private:
197 		VMCache*	fConsumer;
198 };
199 
200 
201 class RemoveConsumer : public VMCacheTraceEntry {
202 	public:
203 		RemoveConsumer(VMCache* cache, VMCache* consumer)
204 			:
205 			VMCacheTraceEntry(cache),
206 			fConsumer(consumer)
207 		{
208 			Initialized();
209 		}
210 
211 		virtual void AddDump(TraceOutput& out)
212 		{
213 			out.Print("vm cache remove consumer: cache: %p, consumer: %p",
214 				fCache, fConsumer);
215 		}
216 
217 	private:
218 		VMCache*	fConsumer;
219 };
220 
221 
222 class Merge : public VMCacheTraceEntry {
223 	public:
224 		Merge(VMCache* cache, VMCache* consumer)
225 			:
226 			VMCacheTraceEntry(cache),
227 			fConsumer(consumer)
228 		{
229 			Initialized();
230 		}
231 
232 		virtual void AddDump(TraceOutput& out)
233 		{
234 			out.Print("vm cache merge with consumer: cache: %p, consumer: %p",
235 				fCache, fConsumer);
236 		}
237 
238 	private:
239 		VMCache*	fConsumer;
240 };
241 
242 
243 class InsertArea : public VMCacheTraceEntry {
244 	public:
245 		InsertArea(VMCache* cache, VMArea* area)
246 			:
247 			VMCacheTraceEntry(cache),
248 			fArea(area)
249 		{
250 			Initialized();
251 		}
252 
253 		virtual void AddDump(TraceOutput& out)
254 		{
255 			out.Print("vm cache insert area: cache: %p, area: %p", fCache,
256 				fArea);
257 		}
258 
259 		VMArea*	Area() const
260 		{
261 			return fArea;
262 		}
263 
264 	private:
265 		VMArea*	fArea;
266 };
267 
268 
269 class RemoveArea : public VMCacheTraceEntry {
270 	public:
271 		RemoveArea(VMCache* cache, VMArea* area)
272 			:
273 			VMCacheTraceEntry(cache),
274 			fArea(area)
275 		{
276 			Initialized();
277 		}
278 
279 		virtual void AddDump(TraceOutput& out)
280 		{
281 			out.Print("vm cache remove area: cache: %p, area: %p", fCache,
282 				fArea);
283 		}
284 
285 	private:
286 		VMArea*	fArea;
287 };
288 
289 }	// namespace VMCacheTracing
290 
291 #	define T(x) new(std::nothrow) VMCacheTracing::x;
292 
293 #	if VM_CACHE_TRACING >= 2
294 
295 namespace VMCacheTracing {
296 
297 class InsertPage : public VMCacheTraceEntry {
298 	public:
299 		InsertPage(VMCache* cache, vm_page* page, off_t offset)
300 			:
301 			VMCacheTraceEntry(cache),
302 			fPage(page),
303 			fOffset(offset)
304 		{
305 			Initialized();
306 		}
307 
308 		virtual void AddDump(TraceOutput& out)
309 		{
310 			out.Print("vm cache insert page: cache: %p, page: %p, offset: %lld",
311 				fCache, fPage, fOffset);
312 		}
313 
314 	private:
315 		vm_page*	fPage;
316 		off_t		fOffset;
317 };
318 
319 
320 class RemovePage : public VMCacheTraceEntry {
321 	public:
322 		RemovePage(VMCache* cache, vm_page* page)
323 			:
324 			VMCacheTraceEntry(cache),
325 			fPage(page)
326 		{
327 			Initialized();
328 		}
329 
330 		virtual void AddDump(TraceOutput& out)
331 		{
332 			out.Print("vm cache remove page: cache: %p, page: %p", fCache,
333 				fPage);
334 		}
335 
336 	private:
337 		vm_page*	fPage;
338 };
339 
340 }	// namespace VMCacheTracing
341 
342 #		define T2(x) new(std::nothrow) VMCacheTracing::x;
343 #	else
344 #		define T2(x) ;
345 #	endif
346 #else
347 #	define T(x) ;
348 #	define T2(x) ;
349 #endif
350 
351 
352 //	#pragma mark - debugger commands
353 
354 
355 #if VM_CACHE_TRACING
356 
357 
358 static void*
359 cache_stack_find_area_cache(const TraceEntryIterator& baseIterator, void* area)
360 {
361 	using namespace VMCacheTracing;
362 
363 	// find the previous "insert area" entry for the given area
364 	TraceEntryIterator iterator = baseIterator;
365 	TraceEntry* entry = iterator.Current();
366 	while (entry != NULL) {
367 		if (InsertArea* insertAreaEntry = dynamic_cast<InsertArea*>(entry)) {
368 			if (insertAreaEntry->Area() == area)
369 				return insertAreaEntry->Cache();
370 		}
371 
372 		entry = iterator.Previous();
373 	}
374 
375 	return NULL;
376 }
377 
378 
379 static void*
380 cache_stack_find_consumer(const TraceEntryIterator& baseIterator, void* cache)
381 {
382 	using namespace VMCacheTracing;
383 
384 	// find the previous "add consumer" or "create" entry for the given cache
385 	TraceEntryIterator iterator = baseIterator;
386 	TraceEntry* entry = iterator.Current();
387 	while (entry != NULL) {
388 		if (Create* createEntry = dynamic_cast<Create*>(entry)) {
389 			if (createEntry->Cache() == cache)
390 				return NULL;
391 		} else if (AddConsumer* addEntry = dynamic_cast<AddConsumer*>(entry)) {
392 			if (addEntry->Consumer() == cache)
393 				return addEntry->Cache();
394 		}
395 
396 		entry = iterator.Previous();
397 	}
398 
399 	return NULL;
400 }
401 
402 
403 static int
404 command_cache_stack(int argc, char** argv)
405 {
406 	if (argc < 3 || argc > 4) {
407 		print_debugger_command_usage(argv[0]);
408 		return 0;
409 	}
410 
411 	bool isArea = false;
412 
413 	int argi = 1;
414 	if (argc == 4) {
415 		if (strcmp(argv[argi], "area") != 0) {
416 			print_debugger_command_usage(argv[0]);
417 			return 0;
418 		}
419 
420 		argi++;
421 		isArea = true;
422 	}
423 
424 	uint64 addressValue;
425 	uint64 debugEntryIndex;
426 	if (!evaluate_debug_expression(argv[argi++], &addressValue, false)
427 		|| !evaluate_debug_expression(argv[argi++], &debugEntryIndex, false)) {
428 		return 0;
429 	}
430 
431 	TraceEntryIterator baseIterator;
432 	if (baseIterator.MoveTo((int32)debugEntryIndex) == NULL) {
433 		kprintf("Invalid tracing entry index %" B_PRIu64 "\n", debugEntryIndex);
434 		return 0;
435 	}
436 
437 	void* address = (void*)(addr_t)addressValue;
438 
439 	kprintf("cache stack for %s %p at %" B_PRIu64 ":\n",
440 		isArea ? "area" : "cache", address, debugEntryIndex);
441 	if (isArea) {
442 		address = cache_stack_find_area_cache(baseIterator, address);
443 		if (address == NULL) {
444 			kprintf("  cache not found\n");
445 			return 0;
446 		}
447 	}
448 
449 	while (address != NULL) {
450 		kprintf("  %p\n", address);
451 		address = cache_stack_find_consumer(baseIterator, address);
452 	}
453 
454 	return 0;
455 }
456 
457 
458 #endif	// VM_CACHE_TRACING
459 
460 
461 //	#pragma mark -
462 
463 
464 status_t
465 vm_cache_init(kernel_args* args)
466 {
467 	return B_OK;
468 }
469 
470 
471 void
472 vm_cache_init_post_heap()
473 {
474 #if VM_CACHE_TRACING
475 	add_debugger_command_etc("cache_stack", &command_cache_stack,
476 		"List the ancestors (sources) of a VMCache at the time given by "
477 			"tracing entry index",
478 		"[ \"area\" ] <address> <tracing entry index>\n"
479 		"All ancestors (sources) of a given VMCache at the time given by the\n"
480 		"tracing entry index are listed. If \"area\" is given the supplied\n"
481 		"address is an area instead of a cache address. The listing will\n"
482 		"start with the area's cache at that point.\n",
483 		0);
484 #endif	// VM_CACHE_TRACING
485 }
486 
487 
488 VMCache*
489 vm_cache_acquire_locked_page_cache(vm_page* page, bool dontWait)
490 {
491 	mutex_lock(&sCacheListLock);
492 
493 	while (dontWait) {
494 		VMCache* cache = page->cache;
495 		if (cache == NULL || !cache->TryLock()) {
496 			mutex_unlock(&sCacheListLock);
497 			return NULL;
498 		}
499 
500 		if (cache == page->cache) {
501 			cache->AcquireRefLocked();
502 			mutex_unlock(&sCacheListLock);
503 			return cache;
504 		}
505 
506 		// the cache changed in the meantime
507 		cache->Unlock();
508 	}
509 
510 	while (true) {
511 		VMCache* cache = page->cache;
512 		if (cache == NULL) {
513 			mutex_unlock(&sCacheListLock);
514 			return NULL;
515 		}
516 
517 		if (!cache->SwitchLock(&sCacheListLock)) {
518 			// cache has been deleted
519 			mutex_lock(&sCacheListLock);
520 			continue;
521 		}
522 
523 		if (cache == page->cache) {
524 			cache->AcquireRefLocked();
525 			return cache;
526 		}
527 
528 		// the cache changed in the meantime
529 		cache->Unlock();
530 		mutex_lock(&sCacheListLock);
531 	}
532 }
533 
534 
535 // #pragma mark - VMCache
536 
537 
538 bool
539 VMCache::_IsMergeable() const
540 {
541 	return (areas == NULL && temporary
542 		&& !list_is_empty(const_cast<list*>(&consumers))
543 		&& consumers.link.next == consumers.link.prev);
544 }
545 
546 
547 VMCache::VMCache()
548 {
549 }
550 
551 
552 VMCache::~VMCache()
553 {
554 }
555 
556 
557 status_t
558 VMCache::Init(uint32 cacheType)
559 {
560 	mutex_init(&fLock, "VMCache");
561 	VMCache dummyCache;
562 	list_init_etc(&consumers, offset_of_member(dummyCache, consumer_link));
563 	areas = NULL;
564 	fRefCount = 1;
565 	source = NULL;
566 	virtual_base = 0;
567 	virtual_end = 0;
568 	committed_size = 0;
569 	temporary = 0;
570 	scan_skip = 0;
571 	page_count = 0;
572 	type = cacheType;
573 	fPageEventWaiters = NULL;
574 
575 #if DEBUG_CACHE_LIST
576 	mutex_lock(&sCacheListLock);
577 
578 	if (gDebugCacheList)
579 		gDebugCacheList->debug_previous = this;
580 	debug_previous = NULL;
581 	debug_next = gDebugCacheList;
582 	gDebugCacheList = this;
583 
584 	mutex_unlock(&sCacheListLock);
585 #endif
586 
587 	return B_OK;
588 }
589 
590 
591 void
592 VMCache::Delete()
593 {
594 	if (areas != NULL)
595 		panic("cache %p to be deleted still has areas", this);
596 	if (!list_is_empty(&consumers))
597 		panic("cache %p to be deleted still has consumers", this);
598 
599 	T(Delete(this));
600 
601 	// free all of the pages in the cache
602 	while (vm_page* page = pages.Root()) {
603 		if (!page->mappings.IsEmpty() || page->wired_count != 0) {
604 			panic("remove page %p from cache %p: page still has mappings!\n",
605 				page, this);
606 		}
607 
608 		// remove it
609 		pages.Remove(page);
610 		page->cache = NULL;
611 		// TODO: we also need to remove all of the page's mappings!
612 
613 		TRACE(("vm_cache_release_ref: freeing page 0x%lx\n",
614 			oldPage->physical_page_number));
615 		DEBUG_PAGE_ACCESS_START(page);
616 		vm_page_free(this, page);
617 	}
618 
619 	// remove the ref to the source
620 	if (source)
621 		source->_RemoveConsumer(this);
622 
623 	// We lock and unlock the sCacheListLock, even if the DEBUG_CACHE_LIST is
624 	// not enabled. This synchronization point is needed for
625 	// vm_cache_acquire_locked_page_cache().
626 	mutex_lock(&sCacheListLock);
627 
628 #if DEBUG_CACHE_LIST
629 	if (debug_previous)
630 		debug_previous->debug_next = debug_next;
631 	if (debug_next)
632 		debug_next->debug_previous = debug_previous;
633 	if (this == gDebugCacheList)
634 		gDebugCacheList = debug_next;
635 #endif
636 
637 	mutex_destroy(&fLock);
638 
639 	mutex_unlock(&sCacheListLock);
640 
641 	delete this;
642 }
643 
644 
645 void
646 VMCache::Unlock()
647 {
648 	while (fRefCount == 1 && _IsMergeable()) {
649 		VMCache* consumer = (VMCache*)list_get_first_item(&consumers);
650 		if (consumer->TryLock()) {
651 			_MergeWithOnlyConsumer();
652 		} else {
653 			// Someone else has locked the consumer ATM. Unlock this cache and
654 			// wait for the consumer lock. Increment the cache's ref count
655 			// temporarily, so that no one else will try what we are doing or
656 			// delete the cache.
657 			fRefCount++;
658 			bool consumerLocked = consumer->SwitchLock(&fLock);
659 			Lock();
660 			fRefCount--;
661 
662 			if (consumerLocked) {
663 				if (fRefCount == 1 && _IsMergeable()
664 						&& consumer == list_get_first_item(&consumers)) {
665 					_MergeWithOnlyConsumer();
666 				} else {
667 					// something changed, get rid of the consumer lock
668 					consumer->Unlock();
669 				}
670 			}
671 		}
672 	}
673 
674 	if (fRefCount == 0) {
675 		// delete this cache
676 		Delete();
677 	} else
678 		mutex_unlock(&fLock);
679 }
680 
681 
682 vm_page*
683 VMCache::LookupPage(off_t offset)
684 {
685 	AssertLocked();
686 
687 	vm_page* page = pages.Lookup((page_num_t)(offset >> PAGE_SHIFT));
688 
689 #if KDEBUG
690 	if (page != NULL && page->cache != this)
691 		panic("page %p not in cache %p\n", page, this);
692 #endif
693 
694 	return page;
695 }
696 
697 
698 void
699 VMCache::InsertPage(vm_page* page, off_t offset)
700 {
701 	TRACE(("VMCache::InsertPage(): cache %p, page %p, offset %Ld\n",
702 		this, page, offset));
703 	AssertLocked();
704 
705 	if (page->cache != NULL) {
706 		panic("insert page %p into cache %p: page cache is set to %p\n",
707 			page, this, page->cache);
708 	}
709 
710 	T2(InsertPage(this, page, offset));
711 
712 	page->cache_offset = (page_num_t)(offset >> PAGE_SHIFT);
713 	page_count++;
714 	page->usage_count = 2;
715 	page->cache = this;
716 
717 #if KDEBUG
718 	vm_page* otherPage = pages.Lookup(page->cache_offset);
719 	if (otherPage != NULL) {
720 		panic("VMCache::InsertPage(): there's already page %p with cache "
721 			"offset %lu in cache %p; inserting page %p", otherPage,
722 			page->cache_offset, this, page);
723 	}
724 #endif	// KDEBUG
725 
726 	pages.Insert(page);
727 }
728 
729 
730 /*!	Removes the vm_page from this cache. Of course, the page must
731 	really be in this cache or evil things will happen.
732 	The cache lock must be held.
733 */
734 void
735 VMCache::RemovePage(vm_page* page)
736 {
737 	TRACE(("VMCache::RemovePage(): cache %p, page %p\n", this, page));
738 	AssertLocked();
739 
740 	if (page->cache != this) {
741 		panic("remove page %p from cache %p: page cache is set to %p\n", page,
742 			this, page->cache);
743 	}
744 
745 	T2(RemovePage(this, page));
746 
747 	pages.Remove(page);
748 	page->cache = NULL;
749 	page_count--;
750 }
751 
752 
753 /*!	Moves the given page from its current cache inserts it into this cache.
754 	Both caches must be locked.
755 */
756 void
757 VMCache::MovePage(vm_page* page)
758 {
759 	VMCache* oldCache = page->cache;
760 
761 	AssertLocked();
762 	oldCache->AssertLocked();
763 
764 	// remove from old cache
765 	oldCache->pages.Remove(page);
766 	oldCache->page_count--;
767 	T2(RemovePage(oldCache, page));
768 
769 	// insert here
770 	pages.Insert(page);
771 	page_count++;
772 	page->cache = this;
773 	T2(InsertPage(this, page, page->cache_offset << PAGE_SHIFT));
774 }
775 
776 
777 /*!	Moves all pages from the given cache to this one.
778 	Both caches must be locked. This cache must be empty.
779 */
780 void
781 VMCache::MoveAllPages(VMCache* fromCache)
782 {
783 	AssertLocked();
784 	fromCache->AssertLocked();
785 	ASSERT(page_count == 0);
786 
787 	std::swap(fromCache->pages, pages);
788 	page_count = fromCache->page_count;
789 	fromCache->page_count = 0;
790 
791 	for (VMCachePagesTree::Iterator it = pages.GetIterator();
792 			vm_page* page = it.Next();) {
793 		page->cache = this;
794 		T2(RemovePage(fromCache, page));
795 		T2(InsertPage(this, page, page->cache_offset << PAGE_SHIFT));
796 	}
797 }
798 
799 
800 /*!	Waits until one or more events happened for a given page which belongs to
801 	this cache.
802 	The cache must be locked. It will be unlocked by the method. \a relock
803 	specifies whether the method shall re-lock the cache before returning.
804 	\param page The page for which to wait.
805 	\param events The mask of events the caller is interested in.
806 	\param relock If \c true, the cache will be locked when returning,
807 		otherwise it won't be locked.
808 */
809 void
810 VMCache::WaitForPageEvents(vm_page* page, uint32 events, bool relock)
811 {
812 	PageEventWaiter waiter;
813 	waiter.thread = thread_get_current_thread();
814 	waiter.next = fPageEventWaiters;
815 	waiter.page = page;
816 	waiter.events = events;
817 
818 	fPageEventWaiters = &waiter;
819 
820 	thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_OTHER,
821 		"cache page events");
822 
823 	Unlock();
824 	thread_block();
825 
826 	if (relock)
827 		Lock();
828 }
829 
830 
831 /*!	Makes this case the source of the \a consumer cache,
832 	and adds the \a consumer to its list.
833 	This also grabs a reference to the source cache.
834 	Assumes you have the cache and the consumer's lock held.
835 */
836 void
837 VMCache::AddConsumer(VMCache* consumer)
838 {
839 	TRACE(("add consumer vm cache %p to cache %p\n", consumer, cache));
840 	AssertLocked();
841 	consumer->AssertLocked();
842 
843 	T(AddConsumer(this, consumer));
844 
845 	consumer->source = this;
846 	list_add_item(&consumers, consumer);
847 
848 	AcquireRefLocked();
849 	AcquireStoreRef();
850 }
851 
852 
853 /*!	Adds the \a area to this cache.
854 	Assumes you have the locked the cache.
855 */
856 status_t
857 VMCache::InsertAreaLocked(VMArea* area)
858 {
859 	TRACE(("VMCache::InsertAreaLocked(cache %p, area %p)\n", this, area));
860 	AssertLocked();
861 
862 	T(InsertArea(this, area));
863 
864 	area->cache_next = areas;
865 	if (area->cache_next)
866 		area->cache_next->cache_prev = area;
867 	area->cache_prev = NULL;
868 	areas = area;
869 
870 	AcquireStoreRef();
871 
872 	return B_OK;
873 }
874 
875 
876 status_t
877 VMCache::RemoveArea(VMArea* area)
878 {
879 	TRACE(("VMCache::RemoveArea(cache %p, area %p)\n", this, area));
880 
881 	T(RemoveArea(this, area));
882 
883 	// We release the store reference first, since otherwise we would reverse
884 	// the locking order or even deadlock ourselves (... -> free_vnode() -> ...
885 	// -> bfs_remove_vnode() -> ... -> file_cache_set_size() -> mutex_lock()).
886 	// Also cf. _RemoveConsumer().
887 	ReleaseStoreRef();
888 
889 	AutoLocker<VMCache> locker(this);
890 
891 	if (area->cache_prev)
892 		area->cache_prev->cache_next = area->cache_next;
893 	if (area->cache_next)
894 		area->cache_next->cache_prev = area->cache_prev;
895 	if (areas == area)
896 		areas = area->cache_next;
897 
898 	return B_OK;
899 }
900 
901 
902 /*!	Transfers the areas from \a fromCache to this cache. This cache must not
903 	have areas yet. Both caches must be locked.
904 */
905 void
906 VMCache::TransferAreas(VMCache* fromCache)
907 {
908 	AssertLocked();
909 	fromCache->AssertLocked();
910 	ASSERT(areas == NULL);
911 
912 	areas = fromCache->areas;
913 	fromCache->areas = NULL;
914 
915 	for (VMArea* area = areas; area != NULL; area = area->cache_next) {
916 		area->cache = this;
917 		AcquireRefLocked();
918 		fromCache->ReleaseRefLocked();
919 
920 		T(RemoveArea(fromCache, area));
921 		T(InsertArea(this, area));
922 	}
923 }
924 
925 
926 uint32
927 VMCache::CountWritableAreas(VMArea* ignoreArea) const
928 {
929 	uint32 count = 0;
930 
931 	for (VMArea* area = areas; area != NULL; area = area->cache_next) {
932 		if (area != ignoreArea
933 			&& (area->protection & (B_WRITE_AREA | B_KERNEL_WRITE_AREA)) != 0) {
934 			count++;
935 		}
936 	}
937 
938 	return count;
939 }
940 
941 
942 status_t
943 VMCache::WriteModified()
944 {
945 	TRACE(("VMCache::WriteModified(cache = %p)\n", this));
946 
947 	if (temporary)
948 		return B_OK;
949 
950 	Lock();
951 	status_t status = vm_page_write_modified_pages(this);
952 	Unlock();
953 
954 	return status;
955 }
956 
957 
958 /*!	Commits the memory to the store if the \a commitment is larger than
959 	what's committed already.
960 	Assumes you have the cache's lock held.
961 */
962 status_t
963 VMCache::SetMinimalCommitment(off_t commitment)
964 {
965 	TRACE(("VMCache::SetMinimalCommitment(cache %p, commitment %Ld)\n",
966 		this, commitment));
967 	AssertLocked();
968 
969 	T(SetMinimalCommitment(this, commitment));
970 
971 	status_t status = B_OK;
972 
973 	// If we don't have enough committed space to cover through to the new end
974 	// of the area...
975 	if (committed_size < commitment) {
976 		// ToDo: should we check if the cache's virtual size is large
977 		//	enough for a commitment of that size?
978 
979 		// try to commit more memory
980 		status = Commit(commitment);
981 	}
982 
983 	return status;
984 }
985 
986 
987 /*!	This function updates the size field of the cache.
988 	If needed, it will free up all pages that don't belong to the cache anymore.
989 	The cache lock must be held when you call it.
990 	Since removed pages don't belong to the cache any longer, they are not
991 	written back before they will be removed.
992 
993 	Note, this function may temporarily release the cache lock in case it
994 	has to wait for busy pages.
995 */
996 status_t
997 VMCache::Resize(off_t newSize)
998 {
999 // TODO: This method must be virtual as VMAnonymousCache needs to free allocated
1000 // swap pages!
1001 	TRACE(("VMCache::Resize(cache %p, newSize %Ld) old size %Ld\n",
1002 		this, newSize, this->virtual_end));
1003 	this->AssertLocked();
1004 
1005 	T(Resize(this, newSize));
1006 
1007 	status_t status = Commit(newSize - virtual_base);
1008 	if (status != B_OK)
1009 		return status;
1010 
1011 	uint32 oldPageCount = (uint32)((virtual_end + B_PAGE_SIZE - 1)
1012 		>> PAGE_SHIFT);
1013 	uint32 newPageCount = (uint32)((newSize + B_PAGE_SIZE - 1) >> PAGE_SHIFT);
1014 
1015 	if (newPageCount < oldPageCount) {
1016 		// we need to remove all pages in the cache outside of the new virtual
1017 		// size
1018 		for (VMCachePagesTree::Iterator it
1019 					= pages.GetIterator(newPageCount, true, true);
1020 				vm_page* page = it.Next();) {
1021 			if (page->state == PAGE_STATE_BUSY) {
1022 				if (page->busy_writing) {
1023 					// We cannot wait for the page to become available
1024 					// as we might cause a deadlock this way
1025 					page->busy_writing = false;
1026 						// this will notify the writer to free the page
1027 				} else {
1028 					// wait for page to become unbusy
1029 					WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, true);
1030 
1031 					// restart from the start of the list
1032 					it = pages.GetIterator(newPageCount, true, true);
1033 				}
1034 				continue;
1035 			}
1036 
1037 			// remove the page and put it into the free queue
1038 			DEBUG_PAGE_ACCESS_START(page);
1039 			vm_remove_all_page_mappings(page, NULL);
1040 			ASSERT(page->wired_count == 0);
1041 				// TODO: Find a real solution! Unmapping is probably fine, but
1042 				// we have no way of unmapping wired pages here.
1043 			RemovePage(page);
1044 			vm_page_free(this, page);
1045 				// Note: When iterating through a IteratableSplayTree
1046 				// removing the current node is safe.
1047 		}
1048 	}
1049 
1050 	virtual_end = newSize;
1051 	return B_OK;
1052 }
1053 
1054 
1055 /*!	You have to call this function with the VMCache lock held. */
1056 status_t
1057 VMCache::FlushAndRemoveAllPages()
1058 {
1059 	ASSERT_LOCKED_MUTEX(&fLock);
1060 
1061 	while (page_count > 0) {
1062 		// write back modified pages
1063 		status_t status = vm_page_write_modified_pages(this);
1064 		if (status != B_OK)
1065 			return status;
1066 
1067 		// remove pages
1068 		for (VMCachePagesTree::Iterator it = pages.GetIterator();
1069 				vm_page* page = it.Next();) {
1070 			if (page->state == PAGE_STATE_BUSY) {
1071 				// wait for page to become unbusy
1072 				WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, true);
1073 
1074 				// restart from the start of the list
1075 				it = pages.GetIterator();
1076 				continue;
1077 			}
1078 
1079 			// skip modified pages -- they will be written back in the next
1080 			// iteration
1081 			if (page->state == PAGE_STATE_MODIFIED)
1082 				continue;
1083 
1084 			// We can't remove mapped pages.
1085 			if (page->wired_count > 0 || !page->mappings.IsEmpty())
1086 				return B_BUSY;
1087 
1088 			DEBUG_PAGE_ACCESS_START(page);
1089 			RemovePage(page);
1090 			vm_page_free(this, page);
1091 				// Note: When iterating through a IteratableSplayTree
1092 				// removing the current node is safe.
1093 		}
1094 	}
1095 
1096 	return B_OK;
1097 }
1098 
1099 
1100 status_t
1101 VMCache::Commit(off_t size)
1102 {
1103 	committed_size = size;
1104 	return B_OK;
1105 }
1106 
1107 
1108 bool
1109 VMCache::HasPage(off_t offset)
1110 {
1111 	return offset >= virtual_base && offset <= virtual_end;
1112 }
1113 
1114 
1115 status_t
1116 VMCache::Read(off_t offset, const iovec *vecs, size_t count, uint32 flags,
1117 	size_t *_numBytes)
1118 {
1119 	return B_ERROR;
1120 }
1121 
1122 
1123 status_t
1124 VMCache::Write(off_t offset, const iovec *vecs, size_t count, uint32 flags,
1125 	size_t *_numBytes)
1126 {
1127 	return B_ERROR;
1128 }
1129 
1130 
1131 status_t
1132 VMCache::WriteAsync(off_t offset, const iovec* vecs, size_t count,
1133 	size_t numBytes, uint32 flags, AsyncIOCallback* callback)
1134 {
1135 	// Not supported, fall back to the synchronous hook.
1136 	size_t transferred = numBytes;
1137 	status_t error = Write(offset, vecs, count, flags, &transferred);
1138 
1139 	if (callback != NULL)
1140 		callback->IOFinished(error, transferred != numBytes, transferred);
1141 
1142 	return error;
1143 }
1144 
1145 
1146 /*!	\brief Returns whether the cache can write the page at the given offset.
1147 
1148 	The cache must be locked when this function is invoked.
1149 
1150 	@param offset The page offset.
1151 	@return \c true, if the page can be written, \c false otherwise.
1152 */
1153 bool
1154 VMCache::CanWritePage(off_t offset)
1155 {
1156 	return false;
1157 }
1158 
1159 
1160 status_t
1161 VMCache::Fault(struct VMAddressSpace *aspace, off_t offset)
1162 {
1163 	return B_BAD_ADDRESS;
1164 }
1165 
1166 
1167 void
1168 VMCache::Merge(VMCache* source)
1169 {
1170 	for (VMCachePagesTree::Iterator it = source->pages.GetIterator();
1171 			vm_page* page = it.Next();) {
1172 		// Note: Removing the current node while iterating through a
1173 		// IteratableSplayTree is safe.
1174 		vm_page* consumerPage = LookupPage(
1175 			(off_t)page->cache_offset << PAGE_SHIFT);
1176 		if (consumerPage == NULL) {
1177 			// the page is not yet in the consumer cache - move it upwards
1178 			MovePage(page);
1179 		}
1180 	}
1181 }
1182 
1183 
1184 status_t
1185 VMCache::AcquireUnreferencedStoreRef()
1186 {
1187 	return B_OK;
1188 }
1189 
1190 
1191 void
1192 VMCache::AcquireStoreRef()
1193 {
1194 }
1195 
1196 
1197 void
1198 VMCache::ReleaseStoreRef()
1199 {
1200 }
1201 
1202 
1203 /*!	Wakes up threads waiting for page events.
1204 	\param page The page for which events occurred.
1205 	\param events The mask of events that occurred.
1206 */
1207 void
1208 VMCache::_NotifyPageEvents(vm_page* page, uint32 events)
1209 {
1210 	PageEventWaiter** it = &fPageEventWaiters;
1211 	while (PageEventWaiter* waiter = *it) {
1212 		if (waiter->page == page && (waiter->events & events) != 0) {
1213 			// remove from list and unblock
1214 			*it = waiter->next;
1215 			InterruptsSpinLocker threadsLocker(gThreadSpinlock);
1216 			thread_unblock_locked(waiter->thread, B_OK);
1217 		} else
1218 			it = &waiter->next;
1219 	}
1220 }
1221 
1222 
1223 /*!	Merges the given cache with its only consumer.
1224 	The caller must hold both the cache's and the consumer's lock. The method
1225 	will unlock the consumer lock.
1226 */
1227 void
1228 VMCache::_MergeWithOnlyConsumer()
1229 {
1230 	VMCache* consumer = (VMCache*)list_remove_head_item(&consumers);
1231 
1232 	TRACE(("merge vm cache %p (ref == %ld) with vm cache %p\n",
1233 		this, this->fRefCount, consumer));
1234 
1235 	T(Merge(this, consumer));
1236 
1237 	// merge the cache
1238 	consumer->Merge(this);
1239 
1240 	// The remaining consumer has got a new source.
1241 	if (source != NULL) {
1242 		VMCache* newSource = source;
1243 
1244 		newSource->Lock();
1245 
1246 		list_remove_item(&newSource->consumers, this);
1247 		list_add_item(&newSource->consumers, consumer);
1248 		consumer->source = newSource;
1249 		source = NULL;
1250 
1251 		newSource->Unlock();
1252 	} else
1253 		consumer->source = NULL;
1254 
1255 	// Release the reference the cache's consumer owned. The consumer takes
1256 	// over the cache's ref to its source (if any) instead.
1257 	ReleaseRefLocked();
1258 
1259 	consumer->Unlock();
1260 }
1261 
1262 
1263 /*!	Removes the \a consumer from this cache.
1264 	It will also release the reference to the cache owned by the consumer.
1265 	Assumes you have the consumer's cache lock held. This cache must not be
1266 	locked.
1267 */
1268 void
1269 VMCache::_RemoveConsumer(VMCache* consumer)
1270 {
1271 	TRACE(("remove consumer vm cache %p from cache %p\n", consumer, this));
1272 	consumer->AssertLocked();
1273 
1274 	T(RemoveConsumer(this, consumer));
1275 
1276 	// Remove the store ref before locking the cache. Otherwise we'd call into
1277 	// the VFS while holding the cache lock, which would reverse the usual
1278 	// locking order.
1279 	ReleaseStoreRef();
1280 
1281 	// remove the consumer from the cache, but keep its reference until later
1282 	Lock();
1283 	list_remove_item(&consumers, consumer);
1284 	consumer->source = NULL;
1285 
1286 	ReleaseRefAndUnlock();
1287 }
1288 
1289 
1290 // #pragma mark - VMCacheFactory
1291 	// TODO: Move to own source file!
1292 
1293 
1294 #include <heap.h>
1295 
1296 #include "VMAnonymousCache.h"
1297 #include "VMAnonymousNoSwapCache.h"
1298 #include "VMDeviceCache.h"
1299 #include "VMNullCache.h"
1300 #include "../cache/vnode_store.h"
1301 
1302 
1303 /*static*/ status_t
1304 VMCacheFactory::CreateAnonymousCache(VMCache*& _cache, bool canOvercommit,
1305 	int32 numPrecommittedPages, int32 numGuardPages, bool swappable)
1306 {
1307 #if ENABLE_SWAP_SUPPORT
1308 	if (swappable) {
1309 		VMAnonymousCache* cache = new(nogrow) VMAnonymousCache;
1310 		if (cache == NULL)
1311 			return B_NO_MEMORY;
1312 
1313 		status_t error = cache->Init(canOvercommit, numPrecommittedPages,
1314 			numGuardPages);
1315 		if (error != B_OK) {
1316 			cache->Delete();
1317 			return error;
1318 		}
1319 
1320 		T(Create(cache));
1321 
1322 		_cache = cache;
1323 		return B_OK;
1324 	}
1325 #endif
1326 
1327 	VMAnonymousNoSwapCache* cache = new(nogrow) VMAnonymousNoSwapCache;
1328 	if (cache == NULL)
1329 		return B_NO_MEMORY;
1330 
1331 	status_t error = cache->Init(canOvercommit, numPrecommittedPages,
1332 		numGuardPages);
1333 	if (error != B_OK) {
1334 		cache->Delete();
1335 		return error;
1336 	}
1337 
1338 	T(Create(cache));
1339 
1340 	_cache = cache;
1341 	return B_OK;
1342 }
1343 
1344 
1345 /*static*/ status_t
1346 VMCacheFactory::CreateVnodeCache(VMCache*& _cache, struct vnode* vnode)
1347 {
1348 	VMVnodeCache* cache = new(nogrow) VMVnodeCache;
1349 	if (cache == NULL)
1350 		return B_NO_MEMORY;
1351 
1352 	status_t error = cache->Init(vnode);
1353 	if (error != B_OK) {
1354 		cache->Delete();
1355 		return error;
1356 	}
1357 
1358 	T(Create(cache));
1359 
1360 	_cache = cache;
1361 	return B_OK;
1362 }
1363 
1364 
1365 /*static*/ status_t
1366 VMCacheFactory::CreateDeviceCache(VMCache*& _cache, addr_t baseAddress)
1367 {
1368 	VMDeviceCache* cache = new(nogrow) VMDeviceCache;
1369 	if (cache == NULL)
1370 		return B_NO_MEMORY;
1371 
1372 	status_t error = cache->Init(baseAddress);
1373 	if (error != B_OK) {
1374 		cache->Delete();
1375 		return error;
1376 	}
1377 
1378 	T(Create(cache));
1379 
1380 	_cache = cache;
1381 	return B_OK;
1382 }
1383 
1384 
1385 /*static*/ status_t
1386 VMCacheFactory::CreateNullCache(VMCache*& _cache)
1387 {
1388 	VMNullCache* cache = new(nogrow) VMNullCache;
1389 	if (cache == NULL)
1390 		return B_NO_MEMORY;
1391 
1392 	status_t error = cache->Init();
1393 	if (error != B_OK) {
1394 		cache->Delete();
1395 		return error;
1396 	}
1397 
1398 	T(Create(cache));
1399 
1400 	_cache = cache;
1401 	return B_OK;
1402 }
1403