xref: /haiku/src/system/kernel/vm/VMTranslationMap.cpp (revision 97901ec593ec4dd50ac115c1c35a6d72f6e489a5)
1 /*
2  * Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include <vm/VMTranslationMap.h>
8 
9 #include <vm/vm_page.h>
10 #include <vm/vm_priv.h>
11 #include <vm/VMArea.h>
12 
13 
14 // #pragma mark - VMTranslationMap
15 
16 
17 VMTranslationMap::VMTranslationMap()
18 	:
19 	fMapCount(0)
20 {
21 	recursive_lock_init(&fLock, "translation map");
22 }
23 
24 
25 VMTranslationMap::~VMTranslationMap()
26 {
27 	recursive_lock_destroy(&fLock);
28 }
29 
30 
31 /*!	Unmaps a range of pages of an area.
32 
33 	The default implementation just iterates over all virtual pages of the
34 	range and calls UnmapPage(). This is obviously not particularly efficient.
35 */
36 void
37 VMTranslationMap::UnmapPages(VMArea* area, addr_t base, size_t size,
38 	bool updatePageQueue)
39 {
40 	ASSERT(base % B_PAGE_SIZE == 0);
41 	ASSERT(size % B_PAGE_SIZE == 0);
42 
43 	addr_t address = base;
44 	addr_t end = address + size;
45 #if DEBUG_PAGE_ACCESS
46 	for (; address != end; address += B_PAGE_SIZE) {
47 		addr_t physicalAddress;
48 		uint32 flags;
49 		if (Query(address, &physicalAddress, &flags) == B_OK
50 			&& (flags & PAGE_PRESENT) == 0) {
51 			vm_page* page = vm_lookup_page(physicalAddress / B_PAGE_SIZE);
52 			if (page != NULL) {
53 				DEBUG_PAGE_ACCESS_START(page);
54 				UnmapPage(area, address, updatePageQueue);
55 				DEBUG_PAGE_ACCESS_END(page);
56 			} else
57 				UnmapPage(area, address, updatePageQueue);
58 		}
59 	}
60 #else
61 	for (; address != end; address += B_PAGE_SIZE)
62 		UnmapPage(area, address, updatePageQueue);
63 #endif
64 }
65 
66 
67 /*!	Unmaps all of an area's pages.
68 	If \a deletingAddressSpace is \c true, the address space the area belongs to
69 	is in the process of being destroyed and isn't used by anyone anymore. For
70 	some architectures this can be used for optimizations (e.g. not unmapping
71 	pages or at least not needing to invalidate TLB entries).
72 	If \a ignoreTopCachePageFlags is \c true, the area is in the process of
73 	being destroyed and its top cache is otherwise unreferenced. I.e. all mapped
74 	pages that live in the top cache area going to be freed and the page
75 	accessed and modified flags don't need to be propagated.
76 
77 	The default implementation just iterates over all virtual pages of the
78 	area and calls UnmapPage(). This is obviously not particularly efficient.
79 */
80 void
81 VMTranslationMap::UnmapArea(VMArea* area, bool deletingAddressSpace,
82 	bool ignoreTopCachePageFlags)
83 {
84 	addr_t address = area->Base();
85 	addr_t end = address + area->Size();
86 #if DEBUG_PAGE_ACCESS
87 	for (; address != end; address += B_PAGE_SIZE) {
88 		addr_t physicalAddress;
89 		uint32 flags;
90 		if (Query(address, &physicalAddress, &flags) == B_OK
91 			&& (flags & PAGE_PRESENT) == 0) {
92 			vm_page* page = vm_lookup_page(physicalAddress / B_PAGE_SIZE);
93 			if (page != NULL) {
94 				DEBUG_PAGE_ACCESS_START(page);
95 				UnmapPage(area, address, true);
96 				DEBUG_PAGE_ACCESS_END(page);
97 			} else
98 				UnmapPage(area, address, true);
99 		}
100 	}
101 #else
102 	for (; address != end; address += B_PAGE_SIZE)
103 		UnmapPage(area, address, true);
104 #endif
105 }
106 
107 
108 // #pragma mark - VMPhysicalPageMapper
109 
110 
111 VMPhysicalPageMapper::VMPhysicalPageMapper()
112 {
113 }
114 
115 
116 VMPhysicalPageMapper::~VMPhysicalPageMapper()
117 {
118 }
119