xref: /haiku/src/system/kernel/vm/VMTranslationMap.cpp (revision 909af08f4328301fbdef1ffb41f566c3b5bec0c7)
1 /*
2  * Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include <vm/VMTranslationMap.h>
8 
9 #include <slab/Slab.h>
10 #include <vm/vm_page.h>
11 #include <vm/vm_priv.h>
12 #include <vm/VMAddressSpace.h>
13 #include <vm/VMArea.h>
14 #include <vm/VMCache.h>
15 
16 
17 // #pragma mark - VMTranslationMap
18 
19 
20 VMTranslationMap::VMTranslationMap()
21 	:
22 	fMapCount(0)
23 {
24 	recursive_lock_init(&fLock, "translation map");
25 }
26 
27 
28 VMTranslationMap::~VMTranslationMap()
29 {
30 	recursive_lock_destroy(&fLock);
31 }
32 
33 
34 status_t
35 VMTranslationMap::DebugMarkRangePresent(addr_t start, addr_t end,
36 	bool markPresent)
37 {
38 	return B_NOT_SUPPORTED;
39 }
40 
41 
42 /*!	Unmaps a range of pages of an area.
43 
44 	The default implementation just iterates over all virtual pages of the
45 	range and calls UnmapPage(). This is obviously not particularly efficient.
46 */
47 void
48 VMTranslationMap::UnmapPages(VMArea* area, addr_t base, size_t size,
49 	bool updatePageQueue)
50 {
51 	ASSERT(base % B_PAGE_SIZE == 0);
52 	ASSERT(size % B_PAGE_SIZE == 0);
53 
54 	addr_t address = base;
55 	addr_t end = address + size;
56 #if DEBUG_PAGE_ACCESS
57 	for (; address != end; address += B_PAGE_SIZE) {
58 		phys_addr_t physicalAddress;
59 		uint32 flags;
60 		if (Query(address, &physicalAddress, &flags) == B_OK
61 			&& (flags & PAGE_PRESENT) != 0) {
62 			vm_page* page = vm_lookup_page(physicalAddress / B_PAGE_SIZE);
63 			if (page != NULL) {
64 				DEBUG_PAGE_ACCESS_START(page);
65 				UnmapPage(area, address, updatePageQueue);
66 				DEBUG_PAGE_ACCESS_END(page);
67 			} else
68 				UnmapPage(area, address, updatePageQueue);
69 		}
70 	}
71 #else
72 	for (; address != end; address += B_PAGE_SIZE)
73 		UnmapPage(area, address, updatePageQueue);
74 #endif
75 }
76 
77 
78 /*!	Unmaps all of an area's pages.
79 	If \a deletingAddressSpace is \c true, the address space the area belongs to
80 	is in the process of being destroyed and isn't used by anyone anymore. For
81 	some architectures this can be used for optimizations (e.g. not unmapping
82 	pages or at least not needing to invalidate TLB entries).
83 	If \a ignoreTopCachePageFlags is \c true, the area is in the process of
84 	being destroyed and its top cache is otherwise unreferenced. I.e. all mapped
85 	pages that live in the top cache area going to be freed and the page
86 	accessed and modified flags don't need to be propagated.
87 
88 	The default implementation just iterates over all virtual pages of the
89 	area and calls UnmapPage(). This is obviously not particularly efficient.
90 */
91 void
92 VMTranslationMap::UnmapArea(VMArea* area, bool deletingAddressSpace,
93 	bool ignoreTopCachePageFlags)
94 {
95 	addr_t address = area->Base();
96 	addr_t end = address + area->Size();
97 #if DEBUG_PAGE_ACCESS
98 	for (; address != end; address += B_PAGE_SIZE) {
99 		phys_addr_t physicalAddress;
100 		uint32 flags;
101 		if (Query(address, &physicalAddress, &flags) == B_OK
102 			&& (flags & PAGE_PRESENT) != 0) {
103 			vm_page* page = vm_lookup_page(physicalAddress / B_PAGE_SIZE);
104 			if (page != NULL) {
105 				DEBUG_PAGE_ACCESS_START(page);
106 				UnmapPage(area, address, true);
107 				DEBUG_PAGE_ACCESS_END(page);
108 			} else
109 				UnmapPage(area, address, true);
110 		}
111 	}
112 #else
113 	for (; address != end; address += B_PAGE_SIZE)
114 		UnmapPage(area, address, true);
115 #endif
116 }
117 
118 
119 /*!	Print mapping information for a virtual address.
120 	The method navigates the paging structures and prints all relevant
121 	information on the way.
122 	The method is invoked from a KDL command. The default implementation is a
123 	no-op.
124 	\param virtualAddress The virtual address to look up.
125 */
126 void
127 VMTranslationMap::DebugPrintMappingInfo(addr_t virtualAddress)
128 {
129 }
130 
131 
132 /*!	Find virtual addresses mapped to the given physical address.
133 	For each virtual address the method finds, it invokes the callback object's
134 	HandleVirtualAddress() method. When that method returns \c true, the search
135 	is terminated and \c true is returned.
136 	The method is invoked from a KDL command. The default implementation is a
137 	no-op.
138 	\param physicalAddress The physical address to search for.
139 	\param callback Callback object to be notified of each found virtual
140 		address.
141 	\return \c true, if for a found virtual address the callback's
142 		HandleVirtualAddress() returned \c true, \c false otherwise.
143 */
144 bool
145 VMTranslationMap::DebugGetReverseMappingInfo(phys_addr_t physicalAddress,
146 	ReverseMappingInfoCallback& callback)
147 {
148 	return false;
149 }
150 
151 
152 /*!	Called by UnmapPage() after performing the architecture specific part.
153 	Looks up the page, updates its flags, removes the page-area mapping, and
154 	requeues the page, if necessary.
155 */
156 void
157 VMTranslationMap::PageUnmapped(VMArea* area, page_num_t pageNumber,
158 	bool accessed, bool modified, bool updatePageQueue)
159 {
160 	if (area->cache_type == CACHE_TYPE_DEVICE) {
161 		recursive_lock_unlock(&fLock);
162 		return;
163 	}
164 
165 	// get the page
166 	vm_page* page = vm_lookup_page(pageNumber);
167 	ASSERT_PRINT(page != NULL, "page number: %#" B_PRIxPHYSADDR
168 		", accessed: %d, modified: %d", pageNumber, accessed, modified);
169 
170 	// transfer the accessed/dirty flags to the page
171 	page->accessed |= accessed;
172 	page->modified |= modified;
173 
174 	// remove the mapping object/decrement the wired_count of the page
175 	vm_page_mapping* mapping = NULL;
176 	if (area->wiring == B_NO_LOCK) {
177 		vm_page_mappings::Iterator iterator = page->mappings.GetIterator();
178 		while ((mapping = iterator.Next()) != NULL) {
179 			if (mapping->area == area) {
180 				area->mappings.Remove(mapping);
181 				page->mappings.Remove(mapping);
182 				break;
183 			}
184 		}
185 
186 		ASSERT_PRINT(mapping != NULL, "page: %p, page number: %#"
187 			B_PRIxPHYSADDR ", accessed: %d, modified: %d", page,
188 			pageNumber, accessed, modified);
189 	} else
190 		page->DecrementWiredCount();
191 
192 	recursive_lock_unlock(&fLock);
193 
194 	if (!page->IsMapped()) {
195 		atomic_add(&gMappedPagesCount, -1);
196 
197 		if (updatePageQueue) {
198 			if (page->Cache()->temporary)
199 				vm_page_set_state(page, PAGE_STATE_INACTIVE);
200 			else if (page->modified)
201 				vm_page_set_state(page, PAGE_STATE_MODIFIED);
202 			else
203 				vm_page_set_state(page, PAGE_STATE_CACHED);
204 		}
205 	}
206 
207 	if (mapping != NULL) {
208 		bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
209 		vm_free_page_mapping(pageNumber, mapping,
210 			CACHE_DONT_WAIT_FOR_MEMORY
211 				| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0));
212 	}
213 }
214 
215 
216 /*!	Called by ClearAccessedAndModified() after performing the architecture
217 	specific part.
218 	Looks up the page and removes the page-area mapping.
219 */
220 void
221 VMTranslationMap::UnaccessedPageUnmapped(VMArea* area, page_num_t pageNumber)
222 {
223 	if (area->cache_type == CACHE_TYPE_DEVICE) {
224 		recursive_lock_unlock(&fLock);
225 		return;
226 	}
227 
228 	// get the page
229 	vm_page* page = vm_lookup_page(pageNumber);
230 	ASSERT_PRINT(page != NULL, "page number: %#" B_PRIxPHYSADDR, pageNumber);
231 
232 	// remove the mapping object/decrement the wired_count of the page
233 	vm_page_mapping* mapping = NULL;
234 	if (area->wiring == B_NO_LOCK) {
235 		vm_page_mappings::Iterator iterator = page->mappings.GetIterator();
236 		while ((mapping = iterator.Next()) != NULL) {
237 			if (mapping->area == area) {
238 				area->mappings.Remove(mapping);
239 				page->mappings.Remove(mapping);
240 				break;
241 			}
242 		}
243 
244 		ASSERT_PRINT(mapping != NULL, "page: %p, page number: %#"
245 			B_PRIxPHYSADDR, page, pageNumber);
246 	} else
247 		page->DecrementWiredCount();
248 
249 	recursive_lock_unlock(&fLock);
250 
251 	if (!page->IsMapped())
252 		atomic_add(&gMappedPagesCount, -1);
253 
254 	if (mapping != NULL) {
255 		vm_free_page_mapping(pageNumber, mapping,
256 			CACHE_DONT_WAIT_FOR_MEMORY | CACHE_DONT_LOCK_KERNEL_SPACE);
257 			// Since this is called by the page daemon, we never want to lock
258 			// the kernel address space.
259 	}
260 }
261 
262 
263 // #pragma mark - ReverseMappingInfoCallback
264 
265 
266 VMTranslationMap::ReverseMappingInfoCallback::~ReverseMappingInfoCallback()
267 {
268 }
269 
270 
271 // #pragma mark - VMPhysicalPageMapper
272 
273 
274 VMPhysicalPageMapper::VMPhysicalPageMapper()
275 {
276 }
277 
278 
279 VMPhysicalPageMapper::~VMPhysicalPageMapper()
280 {
281 }
282