xref: /haiku/src/system/kernel/vm/VMCache.cpp (revision eb8dc1ebfbe911a6af06efe02d003aa37687faad)
1 /*
2  * Copyright 2008-2009, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 #include <vm/VMCache.h>
12 
13 #include <stddef.h>
14 #include <stdlib.h>
15 
16 #include <algorithm>
17 
18 #include <arch/cpu.h>
19 #include <condition_variable.h>
20 #include <debug.h>
21 #include <heap.h>
22 #include <int.h>
23 #include <kernel.h>
24 #include <smp.h>
25 #include <tracing.h>
26 #include <util/khash.h>
27 #include <util/AutoLock.h>
28 #include <vfs.h>
29 #include <vm/vm.h>
30 #include <vm/vm_page.h>
31 #include <vm/vm_priv.h>
32 #include <vm/vm_types.h>
33 #include <vm/VMArea.h>
34 
35 
36 //#define TRACE_VM_CACHE
37 #ifdef TRACE_VM_CACHE
38 #	define TRACE(x) dprintf x
39 #else
40 #	define TRACE(x) ;
41 #endif
42 
43 
44 #if DEBUG_CACHE_LIST
45 VMCache* gDebugCacheList;
46 #endif
47 static mutex sCacheListLock = MUTEX_INITIALIZER("global VMCache list");
48 	// The lock is also needed when the debug feature is disabled.
49 
50 
51 struct VMCache::PageEventWaiter {
52 	struct thread*		thread;
53 	PageEventWaiter*	next;
54 	vm_page*			page;
55 	uint32				events;
56 };
57 
58 
59 #if VM_CACHE_TRACING
60 
61 namespace VMCacheTracing {
62 
63 class VMCacheTraceEntry : public AbstractTraceEntry {
64 	public:
65 		VMCacheTraceEntry(VMCache* cache)
66 			:
67 			fCache(cache)
68 		{
69 #if VM_CACHE_TRACING_STACK_TRACE
70 			fStackTrace = capture_tracing_stack_trace(
71 				VM_CACHE_TRACING_STACK_TRACE, 0, true);
72 				// Don't capture userland stack trace to avoid potential
73 				// deadlocks.
74 #endif
75 		}
76 
77 #if VM_CACHE_TRACING_STACK_TRACE
78 		virtual void DumpStackTrace(TraceOutput& out)
79 		{
80 			out.PrintStackTrace(fStackTrace);
81 		}
82 #endif
83 
84 		VMCache* Cache() const
85 		{
86 			return fCache;
87 		}
88 
89 	protected:
90 		VMCache*	fCache;
91 #if VM_CACHE_TRACING_STACK_TRACE
92 		tracing_stack_trace* fStackTrace;
93 #endif
94 };
95 
96 
97 class Create : public VMCacheTraceEntry {
98 	public:
99 		Create(VMCache* cache)
100 			:
101 			VMCacheTraceEntry(cache)
102 		{
103 			Initialized();
104 		}
105 
106 		virtual void AddDump(TraceOutput& out)
107 		{
108 			out.Print("vm cache create: -> cache: %p", fCache);
109 		}
110 };
111 
112 
113 class Delete : public VMCacheTraceEntry {
114 	public:
115 		Delete(VMCache* cache)
116 			:
117 			VMCacheTraceEntry(cache)
118 		{
119 			Initialized();
120 		}
121 
122 		virtual void AddDump(TraceOutput& out)
123 		{
124 			out.Print("vm cache delete: cache: %p", fCache);
125 		}
126 };
127 
128 
129 class SetMinimalCommitment : public VMCacheTraceEntry {
130 	public:
131 		SetMinimalCommitment(VMCache* cache, off_t commitment)
132 			:
133 			VMCacheTraceEntry(cache),
134 			fOldCommitment(cache->committed_size),
135 			fCommitment(commitment)
136 		{
137 			Initialized();
138 		}
139 
140 		virtual void AddDump(TraceOutput& out)
141 		{
142 			out.Print("vm cache set min commitment: cache: %p, "
143 				"commitment: %lld -> %lld", fCache, fOldCommitment,
144 				fCommitment);
145 		}
146 
147 	private:
148 		off_t	fOldCommitment;
149 		off_t	fCommitment;
150 };
151 
152 
153 class Resize : public VMCacheTraceEntry {
154 	public:
155 		Resize(VMCache* cache, off_t size)
156 			:
157 			VMCacheTraceEntry(cache),
158 			fOldSize(cache->virtual_end),
159 			fSize(size)
160 		{
161 			Initialized();
162 		}
163 
164 		virtual void AddDump(TraceOutput& out)
165 		{
166 			out.Print("vm cache resize: cache: %p, size: %lld -> %lld", fCache,
167 				fOldSize, fSize);
168 		}
169 
170 	private:
171 		off_t	fOldSize;
172 		off_t	fSize;
173 };
174 
175 
176 class AddConsumer : public VMCacheTraceEntry {
177 	public:
178 		AddConsumer(VMCache* cache, VMCache* consumer)
179 			:
180 			VMCacheTraceEntry(cache),
181 			fConsumer(consumer)
182 		{
183 			Initialized();
184 		}
185 
186 		virtual void AddDump(TraceOutput& out)
187 		{
188 			out.Print("vm cache add consumer: cache: %p, consumer: %p", fCache,
189 				fConsumer);
190 		}
191 
192 		VMCache* Consumer() const
193 		{
194 			return fConsumer;
195 		}
196 
197 	private:
198 		VMCache*	fConsumer;
199 };
200 
201 
202 class RemoveConsumer : public VMCacheTraceEntry {
203 	public:
204 		RemoveConsumer(VMCache* cache, VMCache* consumer)
205 			:
206 			VMCacheTraceEntry(cache),
207 			fConsumer(consumer)
208 		{
209 			Initialized();
210 		}
211 
212 		virtual void AddDump(TraceOutput& out)
213 		{
214 			out.Print("vm cache remove consumer: cache: %p, consumer: %p",
215 				fCache, fConsumer);
216 		}
217 
218 	private:
219 		VMCache*	fConsumer;
220 };
221 
222 
223 class Merge : public VMCacheTraceEntry {
224 	public:
225 		Merge(VMCache* cache, VMCache* consumer)
226 			:
227 			VMCacheTraceEntry(cache),
228 			fConsumer(consumer)
229 		{
230 			Initialized();
231 		}
232 
233 		virtual void AddDump(TraceOutput& out)
234 		{
235 			out.Print("vm cache merge with consumer: cache: %p, consumer: %p",
236 				fCache, fConsumer);
237 		}
238 
239 	private:
240 		VMCache*	fConsumer;
241 };
242 
243 
244 class InsertArea : public VMCacheTraceEntry {
245 	public:
246 		InsertArea(VMCache* cache, VMArea* area)
247 			:
248 			VMCacheTraceEntry(cache),
249 			fArea(area)
250 		{
251 			Initialized();
252 		}
253 
254 		virtual void AddDump(TraceOutput& out)
255 		{
256 			out.Print("vm cache insert area: cache: %p, area: %p", fCache,
257 				fArea);
258 		}
259 
260 		VMArea*	Area() const
261 		{
262 			return fArea;
263 		}
264 
265 	private:
266 		VMArea*	fArea;
267 };
268 
269 
270 class RemoveArea : public VMCacheTraceEntry {
271 	public:
272 		RemoveArea(VMCache* cache, VMArea* area)
273 			:
274 			VMCacheTraceEntry(cache),
275 			fArea(area)
276 		{
277 			Initialized();
278 		}
279 
280 		virtual void AddDump(TraceOutput& out)
281 		{
282 			out.Print("vm cache remove area: cache: %p, area: %p", fCache,
283 				fArea);
284 		}
285 
286 	private:
287 		VMArea*	fArea;
288 };
289 
290 }	// namespace VMCacheTracing
291 
292 #	define T(x) new(std::nothrow) VMCacheTracing::x;
293 
294 #	if VM_CACHE_TRACING >= 2
295 
296 namespace VMCacheTracing {
297 
298 class InsertPage : public VMCacheTraceEntry {
299 	public:
300 		InsertPage(VMCache* cache, vm_page* page, off_t offset)
301 			:
302 			VMCacheTraceEntry(cache),
303 			fPage(page),
304 			fOffset(offset)
305 		{
306 			Initialized();
307 		}
308 
309 		virtual void AddDump(TraceOutput& out)
310 		{
311 			out.Print("vm cache insert page: cache: %p, page: %p, offset: %lld",
312 				fCache, fPage, fOffset);
313 		}
314 
315 	private:
316 		vm_page*	fPage;
317 		off_t		fOffset;
318 };
319 
320 
321 class RemovePage : public VMCacheTraceEntry {
322 	public:
323 		RemovePage(VMCache* cache, vm_page* page)
324 			:
325 			VMCacheTraceEntry(cache),
326 			fPage(page)
327 		{
328 			Initialized();
329 		}
330 
331 		virtual void AddDump(TraceOutput& out)
332 		{
333 			out.Print("vm cache remove page: cache: %p, page: %p", fCache,
334 				fPage);
335 		}
336 
337 	private:
338 		vm_page*	fPage;
339 };
340 
341 }	// namespace VMCacheTracing
342 
343 #		define T2(x) new(std::nothrow) VMCacheTracing::x;
344 #	else
345 #		define T2(x) ;
346 #	endif
347 #else
348 #	define T(x) ;
349 #	define T2(x) ;
350 #endif
351 
352 
353 //	#pragma mark - debugger commands
354 
355 
356 #if VM_CACHE_TRACING
357 
358 
359 static void*
360 cache_stack_find_area_cache(const TraceEntryIterator& baseIterator, void* area)
361 {
362 	using namespace VMCacheTracing;
363 
364 	// find the previous "insert area" entry for the given area
365 	TraceEntryIterator iterator = baseIterator;
366 	TraceEntry* entry = iterator.Current();
367 	while (entry != NULL) {
368 		if (InsertArea* insertAreaEntry = dynamic_cast<InsertArea*>(entry)) {
369 			if (insertAreaEntry->Area() == area)
370 				return insertAreaEntry->Cache();
371 		}
372 
373 		entry = iterator.Previous();
374 	}
375 
376 	return NULL;
377 }
378 
379 
380 static void*
381 cache_stack_find_consumer(const TraceEntryIterator& baseIterator, void* cache)
382 {
383 	using namespace VMCacheTracing;
384 
385 	// find the previous "add consumer" or "create" entry for the given cache
386 	TraceEntryIterator iterator = baseIterator;
387 	TraceEntry* entry = iterator.Current();
388 	while (entry != NULL) {
389 		if (Create* createEntry = dynamic_cast<Create*>(entry)) {
390 			if (createEntry->Cache() == cache)
391 				return NULL;
392 		} else if (AddConsumer* addEntry = dynamic_cast<AddConsumer*>(entry)) {
393 			if (addEntry->Consumer() == cache)
394 				return addEntry->Cache();
395 		}
396 
397 		entry = iterator.Previous();
398 	}
399 
400 	return NULL;
401 }
402 
403 
404 static int
405 command_cache_stack(int argc, char** argv)
406 {
407 	if (argc < 3 || argc > 4) {
408 		print_debugger_command_usage(argv[0]);
409 		return 0;
410 	}
411 
412 	bool isArea = false;
413 
414 	int argi = 1;
415 	if (argc == 4) {
416 		if (strcmp(argv[argi], "area") != 0) {
417 			print_debugger_command_usage(argv[0]);
418 			return 0;
419 		}
420 
421 		argi++;
422 		isArea = true;
423 	}
424 
425 	uint64 addressValue;
426 	uint64 debugEntryIndex;
427 	if (!evaluate_debug_expression(argv[argi++], &addressValue, false)
428 		|| !evaluate_debug_expression(argv[argi++], &debugEntryIndex, false)) {
429 		return 0;
430 	}
431 
432 	TraceEntryIterator baseIterator;
433 	if (baseIterator.MoveTo((int32)debugEntryIndex) == NULL) {
434 		kprintf("Invalid tracing entry index %" B_PRIu64 "\n", debugEntryIndex);
435 		return 0;
436 	}
437 
438 	void* address = (void*)(addr_t)addressValue;
439 
440 	kprintf("cache stack for %s %p at %" B_PRIu64 ":\n",
441 		isArea ? "area" : "cache", address, debugEntryIndex);
442 	if (isArea) {
443 		address = cache_stack_find_area_cache(baseIterator, address);
444 		if (address == NULL) {
445 			kprintf("  cache not found\n");
446 			return 0;
447 		}
448 	}
449 
450 	while (address != NULL) {
451 		kprintf("  %p\n", address);
452 		address = cache_stack_find_consumer(baseIterator, address);
453 	}
454 
455 	return 0;
456 }
457 
458 
459 #endif	// VM_CACHE_TRACING
460 
461 
462 //	#pragma mark -
463 
464 
465 status_t
466 vm_cache_init(kernel_args* args)
467 {
468 	return B_OK;
469 }
470 
471 
472 void
473 vm_cache_init_post_heap()
474 {
475 #if VM_CACHE_TRACING
476 	add_debugger_command_etc("cache_stack", &command_cache_stack,
477 		"List the ancestors (sources) of a VMCache at the time given by "
478 			"tracing entry index",
479 		"[ \"area\" ] <address> <tracing entry index>\n"
480 		"All ancestors (sources) of a given VMCache at the time given by the\n"
481 		"tracing entry index are listed. If \"area\" is given the supplied\n"
482 		"address is an area instead of a cache address. The listing will\n"
483 		"start with the area's cache at that point.\n",
484 		0);
485 #endif	// VM_CACHE_TRACING
486 }
487 
488 
489 VMCache*
490 vm_cache_acquire_locked_page_cache(vm_page* page, bool dontWait)
491 {
492 	mutex_lock(&sCacheListLock);
493 
494 	while (dontWait) {
495 		VMCache* cache = page->cache;
496 		if (cache == NULL || !cache->TryLock()) {
497 			mutex_unlock(&sCacheListLock);
498 			return NULL;
499 		}
500 
501 		if (cache == page->cache) {
502 			cache->AcquireRefLocked();
503 			mutex_unlock(&sCacheListLock);
504 			return cache;
505 		}
506 
507 		// the cache changed in the meantime
508 		cache->Unlock();
509 	}
510 
511 	while (true) {
512 		VMCache* cache = page->cache;
513 		if (cache == NULL) {
514 			mutex_unlock(&sCacheListLock);
515 			return NULL;
516 		}
517 
518 		// TODO: this is problematic, as it requires the caller not to have
519 		// a lock on this cache (it might be called via
520 		// vm_page_allocate_page(..., false)).
521 		if (!cache->SwitchLock(&sCacheListLock)) {
522 			// cache has been deleted
523 			mutex_lock(&sCacheListLock);
524 			continue;
525 		}
526 
527 		if (cache == page->cache) {
528 			cache->AcquireRefLocked();
529 			return cache;
530 		}
531 
532 		// the cache changed in the meantime
533 		cache->Unlock();
534 		mutex_lock(&sCacheListLock);
535 	}
536 }
537 
538 
539 // #pragma mark - VMCache
540 
541 
542 bool
543 VMCache::_IsMergeable() const
544 {
545 	return (areas == NULL && temporary
546 		&& !list_is_empty(const_cast<list*>(&consumers))
547 		&& consumers.link.next == consumers.link.prev);
548 }
549 
550 
551 VMCache::VMCache()
552 {
553 }
554 
555 
556 VMCache::~VMCache()
557 {
558 }
559 
560 
561 status_t
562 VMCache::Init(uint32 cacheType)
563 {
564 	mutex_init(&fLock, "VMCache");
565 	VMCache dummyCache;
566 	list_init_etc(&consumers, offset_of_member(dummyCache, consumer_link));
567 	areas = NULL;
568 	fRefCount = 1;
569 	source = NULL;
570 	virtual_base = 0;
571 	virtual_end = 0;
572 	committed_size = 0;
573 	temporary = 0;
574 	scan_skip = 0;
575 	page_count = 0;
576 	type = cacheType;
577 	fPageEventWaiters = NULL;
578 
579 #if DEBUG_CACHE_LIST
580 	mutex_lock(&sCacheListLock);
581 
582 	if (gDebugCacheList)
583 		gDebugCacheList->debug_previous = this;
584 	debug_previous = NULL;
585 	debug_next = gDebugCacheList;
586 	gDebugCacheList = this;
587 
588 	mutex_unlock(&sCacheListLock);
589 #endif
590 
591 	return B_OK;
592 }
593 
594 
595 void
596 VMCache::Delete()
597 {
598 	if (areas != NULL)
599 		panic("cache %p to be deleted still has areas", this);
600 	if (!list_is_empty(&consumers))
601 		panic("cache %p to be deleted still has consumers", this);
602 
603 	T(Delete(this));
604 
605 	// free all of the pages in the cache
606 	while (vm_page* page = pages.Root()) {
607 		if (!page->mappings.IsEmpty() || page->wired_count != 0) {
608 			panic("remove page %p from cache %p: page still has mappings!\n",
609 				page, this);
610 		}
611 
612 		// remove it
613 		pages.Remove(page);
614 		page->cache = NULL;
615 		// TODO: we also need to remove all of the page's mappings!
616 
617 		TRACE(("vm_cache_release_ref: freeing page 0x%lx\n",
618 			oldPage->physical_page_number));
619 		vm_page_free(this, page);
620 	}
621 
622 	// remove the ref to the source
623 	if (source)
624 		source->_RemoveConsumer(this);
625 
626 	// We lock and unlock the sCacheListLock, even if the DEBUG_CACHE_LIST is
627 	// not enabled. This synchronization point is needed for
628 	// vm_cache_acquire_locked_page_cache().
629 	mutex_lock(&sCacheListLock);
630 
631 #if DEBUG_CACHE_LIST
632 	if (debug_previous)
633 		debug_previous->debug_next = debug_next;
634 	if (debug_next)
635 		debug_next->debug_previous = debug_previous;
636 	if (this == gDebugCacheList)
637 		gDebugCacheList = debug_next;
638 #endif
639 
640 	mutex_destroy(&fLock);
641 
642 	mutex_unlock(&sCacheListLock);
643 
644 	delete this;
645 }
646 
647 
648 void
649 VMCache::Unlock()
650 {
651 	while (fRefCount == 1 && _IsMergeable()) {
652 		VMCache* consumer = (VMCache*)list_get_first_item(&consumers);
653 		if (consumer->TryLock()) {
654 			_MergeWithOnlyConsumer();
655 		} else {
656 			// Someone else has locked the consumer ATM. Unlock this cache and
657 			// wait for the consumer lock. Increment the cache's ref count
658 			// temporarily, so that no one else will try what we are doing or
659 			// delete the cache.
660 			fRefCount++;
661 			bool consumerLocked = consumer->SwitchLock(&fLock);
662 			Lock();
663 			fRefCount--;
664 
665 			if (consumerLocked) {
666 				if (fRefCount == 1 && _IsMergeable()
667 						&& consumer == list_get_first_item(&consumers)) {
668 					_MergeWithOnlyConsumer();
669 				} else {
670 					// something changed, get rid of the consumer lock
671 					consumer->Unlock();
672 				}
673 			}
674 		}
675 	}
676 
677 	if (fRefCount == 0) {
678 		// delete this cache
679 		Delete();
680 	} else
681 		mutex_unlock(&fLock);
682 }
683 
684 
685 void
686 VMCache::AcquireRefLocked()
687 {
688 // TODO: Inline!
689 	ASSERT_LOCKED_MUTEX(&fLock);
690 
691 	fRefCount++;
692 }
693 
694 
695 void
696 VMCache::AcquireRef()
697 {
698 	Lock();
699 	fRefCount++;
700 	Unlock();
701 }
702 
703 
704 void
705 VMCache::ReleaseRefLocked()
706 {
707 // TODO: Inline!
708 	ASSERT_LOCKED_MUTEX(&fLock);
709 
710 	fRefCount--;
711 }
712 
713 
714 void
715 VMCache::ReleaseRef()
716 {
717 	Lock();
718 	fRefCount--;
719 	Unlock();
720 }
721 
722 
723 vm_page*
724 VMCache::LookupPage(off_t offset)
725 {
726 	AssertLocked();
727 
728 	vm_page* page = pages.Lookup((page_num_t)(offset >> PAGE_SHIFT));
729 
730 #if KDEBUG
731 	if (page != NULL && page->cache != this)
732 		panic("page %p not in cache %p\n", page, this);
733 #endif
734 
735 	return page;
736 }
737 
738 
739 void
740 VMCache::InsertPage(vm_page* page, off_t offset)
741 {
742 	TRACE(("VMCache::InsertPage(): cache %p, page %p, offset %Ld\n",
743 		this, page, offset));
744 	AssertLocked();
745 
746 	if (page->cache != NULL) {
747 		panic("insert page %p into cache %p: page cache is set to %p\n",
748 			page, this, page->cache);
749 	}
750 
751 	T2(InsertPage(this, page, offset));
752 
753 	page->cache_offset = (page_num_t)(offset >> PAGE_SHIFT);
754 	page_count++;
755 	page->usage_count = 2;
756 	page->cache = this;
757 
758 #if KDEBUG
759 	vm_page* otherPage = pages.Lookup(page->cache_offset);
760 	if (otherPage != NULL) {
761 		panic("VMCache::InsertPage(): there's already page %p with cache "
762 			"offset %lu in cache %p; inserting page %p", otherPage,
763 			page->cache_offset, this, page);
764 	}
765 #endif	// KDEBUG
766 
767 	pages.Insert(page);
768 }
769 
770 
771 /*!	Removes the vm_page from this cache. Of course, the page must
772 	really be in this cache or evil things will happen.
773 	The cache lock must be held.
774 */
775 void
776 VMCache::RemovePage(vm_page* page)
777 {
778 	TRACE(("VMCache::RemovePage(): cache %p, page %p\n", this, page));
779 	AssertLocked();
780 
781 	if (page->cache != this) {
782 		panic("remove page %p from cache %p: page cache is set to %p\n", page,
783 			this, page->cache);
784 	}
785 
786 	T2(RemovePage(this, page));
787 
788 	pages.Remove(page);
789 	page->cache = NULL;
790 	page_count--;
791 }
792 
793 
794 /*!	Moves the given page from its current cache inserts it into this cache.
795 	Both caches must be locked.
796 */
797 void
798 VMCache::MovePage(vm_page* page)
799 {
800 	VMCache* oldCache = page->cache;
801 
802 	AssertLocked();
803 	oldCache->AssertLocked();
804 
805 	// remove from old cache
806 	oldCache->pages.Remove(page);
807 	oldCache->page_count--;
808 	T2(RemovePage(oldCache, page));
809 
810 	// insert here
811 	pages.Insert(page);
812 	page_count++;
813 	page->cache = this;
814 	T2(InsertPage(this, page, page->cache_offset << PAGE_SHIFT));
815 }
816 
817 
818 /*!	Moves all pages from the given cache to this one.
819 	Both caches must be locked. This cache must be empty.
820 */
821 void
822 VMCache::MoveAllPages(VMCache* fromCache)
823 {
824 	AssertLocked();
825 	fromCache->AssertLocked();
826 	ASSERT(page_count == 0);
827 
828 	std::swap(fromCache->pages, pages);
829 	page_count = fromCache->page_count;
830 	fromCache->page_count = 0;
831 
832 	for (VMCachePagesTree::Iterator it = pages.GetIterator();
833 			vm_page* page = it.Next();) {
834 		page->cache = this;
835 		T2(RemovePage(fromCache, page));
836 		T2(InsertPage(this, page, page->cache_offset << PAGE_SHIFT));
837 	}
838 }
839 
840 
841 /*!	Waits until one or more events happened for a given page which belongs to
842 	this cache.
843 	The cache must be locked. It will be unlocked by the method. \a relock
844 	specifies whether the method shall re-lock the cache before returning.
845 	\param page The page for which to wait.
846 	\param events The mask of events the caller is interested in.
847 	\param relock If \c true, the cache will be locked when returning,
848 		otherwise it won't be locked.
849 */
850 void
851 VMCache::WaitForPageEvents(vm_page* page, uint32 events, bool relock)
852 {
853 	PageEventWaiter waiter;
854 	waiter.thread = thread_get_current_thread();
855 	waiter.next = fPageEventWaiters;
856 	waiter.page = page;
857 	waiter.events = events;
858 
859 	fPageEventWaiters = &waiter;
860 
861 	thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_OTHER,
862 		"cache page events");
863 
864 	Unlock();
865 	thread_block();
866 
867 	if (relock)
868 		Lock();
869 }
870 
871 
872 /*!	Makes this case the source of the \a consumer cache,
873 	and adds the \a consumer to its list.
874 	This also grabs a reference to the source cache.
875 	Assumes you have the cache and the consumer's lock held.
876 */
877 void
878 VMCache::AddConsumer(VMCache* consumer)
879 {
880 	TRACE(("add consumer vm cache %p to cache %p\n", consumer, cache));
881 	AssertLocked();
882 	consumer->AssertLocked();
883 
884 	T(AddConsumer(this, consumer));
885 
886 	consumer->source = this;
887 	list_add_item(&consumers, consumer);
888 
889 	AcquireRefLocked();
890 	AcquireStoreRef();
891 }
892 
893 
894 /*!	Adds the \a area to this cache.
895 	Assumes you have the locked the cache.
896 */
897 status_t
898 VMCache::InsertAreaLocked(VMArea* area)
899 {
900 	TRACE(("VMCache::InsertAreaLocked(cache %p, area %p)\n", this, area));
901 	AssertLocked();
902 
903 	T(InsertArea(this, area));
904 
905 	area->cache_next = areas;
906 	if (area->cache_next)
907 		area->cache_next->cache_prev = area;
908 	area->cache_prev = NULL;
909 	areas = area;
910 
911 	AcquireStoreRef();
912 
913 	return B_OK;
914 }
915 
916 
917 status_t
918 VMCache::RemoveArea(VMArea* area)
919 {
920 	TRACE(("VMCache::RemoveArea(cache %p, area %p)\n", this, area));
921 
922 	T(RemoveArea(this, area));
923 
924 	// We release the store reference first, since otherwise we would reverse
925 	// the locking order or even deadlock ourselves (... -> free_vnode() -> ...
926 	// -> bfs_remove_vnode() -> ... -> file_cache_set_size() -> mutex_lock()).
927 	// Also cf. _RemoveConsumer().
928 	ReleaseStoreRef();
929 
930 	AutoLocker<VMCache> locker(this);
931 
932 	if (area->cache_prev)
933 		area->cache_prev->cache_next = area->cache_next;
934 	if (area->cache_next)
935 		area->cache_next->cache_prev = area->cache_prev;
936 	if (areas == area)
937 		areas = area->cache_next;
938 
939 	return B_OK;
940 }
941 
942 
943 /*!	Transfers the areas from \a fromCache to this cache. This cache must not
944 	have areas yet. Both caches must be locked.
945 */
946 void
947 VMCache::TransferAreas(VMCache* fromCache)
948 {
949 	AssertLocked();
950 	fromCache->AssertLocked();
951 	ASSERT(areas == NULL);
952 
953 	areas = fromCache->areas;
954 	fromCache->areas = NULL;
955 
956 	for (VMArea* area = areas; area != NULL; area = area->cache_next) {
957 		area->cache = this;
958 		AcquireRefLocked();
959 		fromCache->ReleaseRefLocked();
960 
961 		T(RemoveArea(fromCache, area));
962 		T(InsertArea(this, area));
963 	}
964 }
965 
966 
967 uint32
968 VMCache::CountWritableAreas(VMArea* ignoreArea) const
969 {
970 	uint32 count = 0;
971 
972 	for (VMArea* area = areas; area != NULL; area = area->cache_next) {
973 		if (area != ignoreArea
974 			&& (area->protection & (B_WRITE_AREA | B_KERNEL_WRITE_AREA)) != 0) {
975 			count++;
976 		}
977 	}
978 
979 	return count;
980 }
981 
982 
983 status_t
984 VMCache::WriteModified()
985 {
986 	TRACE(("VMCache::WriteModified(cache = %p)\n", this));
987 
988 	if (temporary)
989 		return B_OK;
990 
991 	Lock();
992 	status_t status = vm_page_write_modified_pages(this);
993 	Unlock();
994 
995 	return status;
996 }
997 
998 
999 /*!	Commits the memory to the store if the \a commitment is larger than
1000 	what's committed already.
1001 	Assumes you have the cache's lock held.
1002 */
1003 status_t
1004 VMCache::SetMinimalCommitment(off_t commitment)
1005 {
1006 	TRACE(("VMCache::SetMinimalCommitment(cache %p, commitment %Ld)\n",
1007 		this, commitment));
1008 	AssertLocked();
1009 
1010 	T(SetMinimalCommitment(this, commitment));
1011 
1012 	status_t status = B_OK;
1013 
1014 	// If we don't have enough committed space to cover through to the new end
1015 	// of the area...
1016 	if (committed_size < commitment) {
1017 		// ToDo: should we check if the cache's virtual size is large
1018 		//	enough for a commitment of that size?
1019 
1020 		// try to commit more memory
1021 		status = Commit(commitment);
1022 	}
1023 
1024 	return status;
1025 }
1026 
1027 
1028 /*!	This function updates the size field of the cache.
1029 	If needed, it will free up all pages that don't belong to the cache anymore.
1030 	The cache lock must be held when you call it.
1031 	Since removed pages don't belong to the cache any longer, they are not
1032 	written back before they will be removed.
1033 
1034 	Note, this function may temporarily release the cache lock in case it
1035 	has to wait for busy pages.
1036 */
1037 status_t
1038 VMCache::Resize(off_t newSize)
1039 {
1040 	TRACE(("VMCache::Resize(cache %p, newSize %Ld) old size %Ld\n",
1041 		this, newSize, this->virtual_end));
1042 	this->AssertLocked();
1043 
1044 	T(Resize(this, newSize));
1045 
1046 	status_t status = Commit(newSize - virtual_base);
1047 	if (status != B_OK)
1048 		return status;
1049 
1050 	uint32 oldPageCount = (uint32)((virtual_end + B_PAGE_SIZE - 1)
1051 		>> PAGE_SHIFT);
1052 	uint32 newPageCount = (uint32)((newSize + B_PAGE_SIZE - 1) >> PAGE_SHIFT);
1053 
1054 	if (newPageCount < oldPageCount) {
1055 		// we need to remove all pages in the cache outside of the new virtual
1056 		// size
1057 		for (VMCachePagesTree::Iterator it
1058 					= pages.GetIterator(newPageCount, true, true);
1059 				vm_page* page = it.Next();) {
1060 			if (page->state == PAGE_STATE_BUSY) {
1061 				if (page->busy_writing) {
1062 					// We cannot wait for the page to become available
1063 					// as we might cause a deadlock this way
1064 					page->busy_writing = false;
1065 						// this will notify the writer to free the page
1066 				} else {
1067 					// wait for page to become unbusy
1068 					WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, true);
1069 
1070 					// restart from the start of the list
1071 					it = pages.GetIterator(newPageCount, true, true);
1072 				}
1073 				continue;
1074 			}
1075 
1076 			// remove the page and put it into the free queue
1077 			vm_remove_all_page_mappings(page, NULL);
1078 			ASSERT(page->wired_count == 0);
1079 				// TODO: Find a real solution! Unmapping is probably fine, but
1080 				// we have no way of unmapping wired pages here.
1081 			RemovePage(page);
1082 			vm_page_free(this, page);
1083 				// Note: When iterating through a IteratableSplayTree
1084 				// removing the current node is safe.
1085 		}
1086 	}
1087 
1088 	virtual_end = newSize;
1089 	return B_OK;
1090 }
1091 
1092 
1093 /*!	You have to call this function with the VMCache lock held. */
1094 status_t
1095 VMCache::FlushAndRemoveAllPages()
1096 {
1097 	ASSERT_LOCKED_MUTEX(&fLock);
1098 
1099 	while (page_count > 0) {
1100 		// write back modified pages
1101 		status_t status = vm_page_write_modified_pages(this);
1102 		if (status != B_OK)
1103 			return status;
1104 
1105 		// remove pages
1106 		for (VMCachePagesTree::Iterator it = pages.GetIterator();
1107 				vm_page* page = it.Next();) {
1108 			if (page->state == PAGE_STATE_BUSY) {
1109 				// wait for page to become unbusy
1110 				WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, true);
1111 
1112 				// restart from the start of the list
1113 				it = pages.GetIterator();
1114 				continue;
1115 			}
1116 
1117 			// skip modified pages -- they will be written back in the next
1118 			// iteration
1119 			if (page->state == PAGE_STATE_MODIFIED)
1120 				continue;
1121 
1122 			// We can't remove mapped pages.
1123 			if (page->wired_count > 0 || !page->mappings.IsEmpty())
1124 				return B_BUSY;
1125 
1126 			RemovePage(page);
1127 			vm_page_free(this, page);
1128 				// Note: When iterating through a IteratableSplayTree
1129 				// removing the current node is safe.
1130 		}
1131 	}
1132 
1133 	return B_OK;
1134 }
1135 
1136 
1137 status_t
1138 VMCache::Commit(off_t size)
1139 {
1140 	committed_size = size;
1141 	return B_OK;
1142 }
1143 
1144 
1145 bool
1146 VMCache::HasPage(off_t offset)
1147 {
1148 	return offset >= virtual_base && offset <= virtual_end;
1149 }
1150 
1151 
1152 status_t
1153 VMCache::Read(off_t offset, const iovec *vecs, size_t count, uint32 flags,
1154 	size_t *_numBytes)
1155 {
1156 	return B_ERROR;
1157 }
1158 
1159 
1160 status_t
1161 VMCache::Write(off_t offset, const iovec *vecs, size_t count, uint32 flags,
1162 	size_t *_numBytes)
1163 {
1164 	return B_ERROR;
1165 }
1166 
1167 
1168 status_t
1169 VMCache::WriteAsync(off_t offset, const iovec* vecs, size_t count,
1170 	size_t numBytes, uint32 flags, AsyncIOCallback* callback)
1171 {
1172 	// Not supported, fall back to the synchronous hook.
1173 	size_t transferred = numBytes;
1174 	status_t error = Write(offset, vecs, count, flags, &transferred);
1175 
1176 	if (callback != NULL)
1177 		callback->IOFinished(error, transferred != numBytes, transferred);
1178 
1179 	return error;
1180 }
1181 
1182 
1183 /*!	\brief Returns whether the cache can write the page at the given offset.
1184 
1185 	The cache must be locked when this function is invoked.
1186 
1187 	@param offset The page offset.
1188 	@return \c true, if the page can be written, \c false otherwise.
1189 */
1190 bool
1191 VMCache::CanWritePage(off_t offset)
1192 {
1193 	return false;
1194 }
1195 
1196 
1197 status_t
1198 VMCache::Fault(struct VMAddressSpace *aspace, off_t offset)
1199 {
1200 	return B_BAD_ADDRESS;
1201 }
1202 
1203 
1204 void
1205 VMCache::Merge(VMCache* source)
1206 {
1207 	for (VMCachePagesTree::Iterator it = source->pages.GetIterator();
1208 			vm_page* page = it.Next();) {
1209 		// Note: Removing the current node while iterating through a
1210 		// IteratableSplayTree is safe.
1211 		vm_page* consumerPage = LookupPage(
1212 			(off_t)page->cache_offset << PAGE_SHIFT);
1213 		if (consumerPage == NULL) {
1214 			// the page is not yet in the consumer cache - move it upwards
1215 			source->RemovePage(page);
1216 			InsertPage(page, (off_t)page->cache_offset << PAGE_SHIFT);
1217 		}
1218 	}
1219 }
1220 
1221 
1222 status_t
1223 VMCache::AcquireUnreferencedStoreRef()
1224 {
1225 	return B_OK;
1226 }
1227 
1228 
1229 void
1230 VMCache::AcquireStoreRef()
1231 {
1232 }
1233 
1234 
1235 void
1236 VMCache::ReleaseStoreRef()
1237 {
1238 }
1239 
1240 
1241 /*!	Wakes up threads waiting for page events.
1242 	\param page The page for which events occurred.
1243 	\param events The mask of events that occurred.
1244 */
1245 void
1246 VMCache::_NotifyPageEvents(vm_page* page, uint32 events)
1247 {
1248 	PageEventWaiter** it = &fPageEventWaiters;
1249 	while (PageEventWaiter* waiter = *it) {
1250 		if (waiter->page == page && (waiter->events & events) != 0) {
1251 			// remove from list and unblock
1252 			*it = waiter->next;
1253 			InterruptsSpinLocker threadsLocker(gThreadSpinlock);
1254 			thread_unblock_locked(waiter->thread, B_OK);
1255 		} else
1256 			it = &waiter->next;
1257 	}
1258 }
1259 
1260 
1261 /*!	Merges the given cache with its only consumer.
1262 	The caller must hold both the cache's and the consumer's lock. The method
1263 	will unlock the consumer lock.
1264 */
1265 void
1266 VMCache::_MergeWithOnlyConsumer()
1267 {
1268 	VMCache* consumer = (VMCache*)list_remove_head_item(&consumers);
1269 
1270 	TRACE(("merge vm cache %p (ref == %ld) with vm cache %p\n",
1271 		this, this->fRefCount, consumer));
1272 
1273 	T(Merge(this, consumer));
1274 
1275 	// merge the cache
1276 	consumer->Merge(this);
1277 
1278 	// The remaining consumer has got a new source.
1279 	if (source != NULL) {
1280 		VMCache* newSource = source;
1281 
1282 		newSource->Lock();
1283 
1284 		list_remove_item(&newSource->consumers, this);
1285 		list_add_item(&newSource->consumers, consumer);
1286 		consumer->source = newSource;
1287 		source = NULL;
1288 
1289 		newSource->Unlock();
1290 	} else
1291 		consumer->source = NULL;
1292 
1293 	// Release the reference the cache's consumer owned. The consumer takes
1294 	// over the cache's ref to its source (if any) instead.
1295 	ReleaseRefLocked();
1296 
1297 	consumer->Unlock();
1298 }
1299 
1300 
1301 /*!	Removes the \a consumer from this cache.
1302 	It will also release the reference to the cache owned by the consumer.
1303 	Assumes you have the consumer's cache lock held. This cache must not be
1304 	locked.
1305 */
1306 void
1307 VMCache::_RemoveConsumer(VMCache* consumer)
1308 {
1309 	TRACE(("remove consumer vm cache %p from cache %p\n", consumer, this));
1310 	consumer->AssertLocked();
1311 
1312 	T(RemoveConsumer(this, consumer));
1313 
1314 	// Remove the store ref before locking the cache. Otherwise we'd call into
1315 	// the VFS while holding the cache lock, which would reverse the usual
1316 	// locking order.
1317 	ReleaseStoreRef();
1318 
1319 	// remove the consumer from the cache, but keep its reference until later
1320 	Lock();
1321 	list_remove_item(&consumers, consumer);
1322 	consumer->source = NULL;
1323 
1324 	ReleaseRefAndUnlock();
1325 }
1326 
1327 
1328 // #pragma mark - VMCacheFactory
1329 	// TODO: Move to own source file!
1330 
1331 
1332 #include <heap.h>
1333 
1334 #include "VMAnonymousCache.h"
1335 #include "VMAnonymousNoSwapCache.h"
1336 #include "VMDeviceCache.h"
1337 #include "VMNullCache.h"
1338 #include "../cache/vnode_store.h"
1339 
1340 
1341 /*static*/ status_t
1342 VMCacheFactory::CreateAnonymousCache(VMCache*& _cache, bool canOvercommit,
1343 	int32 numPrecommittedPages, int32 numGuardPages, bool swappable)
1344 {
1345 #if ENABLE_SWAP_SUPPORT
1346 	if (swappable) {
1347 		VMAnonymousCache* cache = new(nogrow) VMAnonymousCache;
1348 		if (cache == NULL)
1349 			return B_NO_MEMORY;
1350 
1351 		status_t error = cache->Init(canOvercommit, numPrecommittedPages,
1352 			numGuardPages);
1353 		if (error != B_OK) {
1354 			cache->Delete();
1355 			return error;
1356 		}
1357 
1358 		T(Create(cache));
1359 
1360 		_cache = cache;
1361 		return B_OK;
1362 	}
1363 #endif
1364 
1365 	VMAnonymousNoSwapCache* cache = new(nogrow) VMAnonymousNoSwapCache;
1366 	if (cache == NULL)
1367 		return B_NO_MEMORY;
1368 
1369 	status_t error = cache->Init(canOvercommit, numPrecommittedPages,
1370 		numGuardPages);
1371 	if (error != B_OK) {
1372 		cache->Delete();
1373 		return error;
1374 	}
1375 
1376 	T(Create(cache));
1377 
1378 	_cache = cache;
1379 	return B_OK;
1380 }
1381 
1382 
1383 /*static*/ status_t
1384 VMCacheFactory::CreateVnodeCache(VMCache*& _cache, struct vnode* vnode)
1385 {
1386 	VMVnodeCache* cache = new(nogrow) VMVnodeCache;
1387 	if (cache == NULL)
1388 		return B_NO_MEMORY;
1389 
1390 	status_t error = cache->Init(vnode);
1391 	if (error != B_OK) {
1392 		cache->Delete();
1393 		return error;
1394 	}
1395 
1396 	T(Create(cache));
1397 
1398 	_cache = cache;
1399 	return B_OK;
1400 }
1401 
1402 
1403 /*static*/ status_t
1404 VMCacheFactory::CreateDeviceCache(VMCache*& _cache, addr_t baseAddress)
1405 {
1406 	VMDeviceCache* cache = new(nogrow) VMDeviceCache;
1407 	if (cache == NULL)
1408 		return B_NO_MEMORY;
1409 
1410 	status_t error = cache->Init(baseAddress);
1411 	if (error != B_OK) {
1412 		cache->Delete();
1413 		return error;
1414 	}
1415 
1416 	T(Create(cache));
1417 
1418 	_cache = cache;
1419 	return B_OK;
1420 }
1421 
1422 
1423 /*static*/ status_t
1424 VMCacheFactory::CreateNullCache(VMCache*& _cache)
1425 {
1426 	VMNullCache* cache = new(nogrow) VMNullCache;
1427 	if (cache == NULL)
1428 		return B_NO_MEMORY;
1429 
1430 	status_t error = cache->Init();
1431 	if (error != B_OK) {
1432 		cache->Delete();
1433 		return error;
1434 	}
1435 
1436 	T(Create(cache));
1437 
1438 	_cache = cache;
1439 	return B_OK;
1440 }
1441