xref: /haiku/src/system/kernel/vm/VMTranslationMap.cpp (revision fc7456e9b1ec38c941134ed6d01c438cf289381e)
1 /*
2  * Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include <vm/VMTranslationMap.h>
8 
9 #include <slab/Slab.h>
10 #include <vm/vm_page.h>
11 #include <vm/vm_priv.h>
12 #include <vm/VMAddressSpace.h>
13 #include <vm/VMArea.h>
14 #include <vm/VMCache.h>
15 
16 
17 // #pragma mark - VMTranslationMap
18 
19 
20 VMTranslationMap::VMTranslationMap()
21 	:
22 	fMapCount(0)
23 {
24 	recursive_lock_init(&fLock, "translation map");
25 }
26 
27 
28 VMTranslationMap::~VMTranslationMap()
29 {
30 	recursive_lock_destroy(&fLock);
31 }
32 
33 
34 status_t
35 VMTranslationMap::DebugMarkRangePresent(addr_t start, addr_t end,
36 	bool markPresent)
37 {
38 	return B_NOT_SUPPORTED;
39 }
40 
41 
42 /*!	Unmaps a range of pages of an area.
43 
44 	The default implementation just iterates over all virtual pages of the
45 	range and calls UnmapPage(). This is obviously not particularly efficient.
46 */
47 void
48 VMTranslationMap::UnmapPages(VMArea* area, addr_t base, size_t size,
49 	bool updatePageQueue)
50 {
51 	ASSERT(base % B_PAGE_SIZE == 0);
52 	ASSERT(size % B_PAGE_SIZE == 0);
53 
54 	addr_t address = base;
55 	addr_t end = address + size;
56 #if DEBUG_PAGE_ACCESS
57 	for (; address != end; address += B_PAGE_SIZE) {
58 		phys_addr_t physicalAddress;
59 		uint32 flags;
60 		if (Query(address, &physicalAddress, &flags) == B_OK
61 			&& (flags & PAGE_PRESENT) != 0) {
62 			vm_page* page = vm_lookup_page(physicalAddress / B_PAGE_SIZE);
63 			if (page != NULL) {
64 				DEBUG_PAGE_ACCESS_START(page);
65 				UnmapPage(area, address, updatePageQueue);
66 				DEBUG_PAGE_ACCESS_END(page);
67 			} else
68 				UnmapPage(area, address, updatePageQueue);
69 		}
70 	}
71 #else
72 	for (; address != end; address += B_PAGE_SIZE)
73 		UnmapPage(area, address, updatePageQueue);
74 #endif
75 }
76 
77 
78 /*!	Unmaps all of an area's pages.
79 	If \a deletingAddressSpace is \c true, the address space the area belongs to
80 	is in the process of being destroyed and isn't used by anyone anymore. For
81 	some architectures this can be used for optimizations (e.g. not unmapping
82 	pages or at least not needing to invalidate TLB entries).
83 	If \a ignoreTopCachePageFlags is \c true, the area is in the process of
84 	being destroyed and its top cache is otherwise unreferenced. I.e. all mapped
85 	pages that live in the top cache area going to be freed and the page
86 	accessed and modified flags don't need to be propagated.
87 
88 	The default implementation just iterates over all virtual pages of the
89 	area and calls UnmapPage(). This is obviously not particularly efficient.
90 */
91 void
92 VMTranslationMap::UnmapArea(VMArea* area, bool deletingAddressSpace,
93 	bool ignoreTopCachePageFlags)
94 {
95 	addr_t address = area->Base();
96 	addr_t end = address + area->Size();
97 #if DEBUG_PAGE_ACCESS
98 	for (; address != end; address += B_PAGE_SIZE) {
99 		phys_addr_t physicalAddress;
100 		uint32 flags;
101 		if (Query(address, &physicalAddress, &flags) == B_OK
102 			&& (flags & PAGE_PRESENT) != 0) {
103 			vm_page* page = vm_lookup_page(physicalAddress / B_PAGE_SIZE);
104 			if (page != NULL) {
105 				DEBUG_PAGE_ACCESS_START(page);
106 				UnmapPage(area, address, true);
107 				DEBUG_PAGE_ACCESS_END(page);
108 			} else
109 				UnmapPage(area, address, true);
110 		}
111 	}
112 #else
113 	for (; address != end; address += B_PAGE_SIZE)
114 		UnmapPage(area, address, true);
115 #endif
116 }
117 
118 
119 /*!	Print mapping information for a virtual address.
120 	The method navigates the paging structures and prints all relevant
121 	information on the way.
122 	The method is invoked from a KDL command. The default implementation is a
123 	no-op.
124 	\param virtualAddress The virtual address to look up.
125 */
126 void
127 VMTranslationMap::DebugPrintMappingInfo(addr_t virtualAddress)
128 {
129 #if KDEBUG
130 	kprintf("VMTranslationMap::DebugPrintMappingInfo not implemented\n");
131 #endif
132 }
133 
134 
135 /*!	Find virtual addresses mapped to the given physical address.
136 	For each virtual address the method finds, it invokes the callback object's
137 	HandleVirtualAddress() method. When that method returns \c true, the search
138 	is terminated and \c true is returned.
139 	The method is invoked from a KDL command. The default implementation is a
140 	no-op.
141 	\param physicalAddress The physical address to search for.
142 	\param callback Callback object to be notified of each found virtual
143 		address.
144 	\return \c true, if for a found virtual address the callback's
145 		HandleVirtualAddress() returned \c true, \c false otherwise.
146 */
147 bool
148 VMTranslationMap::DebugGetReverseMappingInfo(phys_addr_t physicalAddress,
149 	ReverseMappingInfoCallback& callback)
150 {
151 #if KDEBUG
152 	kprintf("VMTranslationMap::DebugGetReverseMappingInfo not implemented\n");
153 #endif
154 	return false;
155 }
156 
157 
158 /*!	Called by UnmapPage() after performing the architecture specific part.
159 	Looks up the page, updates its flags, removes the page-area mapping, and
160 	requeues the page, if necessary.
161 */
162 void
163 VMTranslationMap::PageUnmapped(VMArea* area, page_num_t pageNumber,
164 	bool accessed, bool modified, bool updatePageQueue)
165 {
166 	if (area->cache_type == CACHE_TYPE_DEVICE) {
167 		recursive_lock_unlock(&fLock);
168 		return;
169 	}
170 
171 	// get the page
172 	vm_page* page = vm_lookup_page(pageNumber);
173 	ASSERT_PRINT(page != NULL, "page number: %#" B_PRIxPHYSADDR
174 		", accessed: %d, modified: %d", pageNumber, accessed, modified);
175 
176 	// transfer the accessed/dirty flags to the page
177 	page->accessed |= accessed;
178 	page->modified |= modified;
179 
180 	// remove the mapping object/decrement the wired_count of the page
181 	vm_page_mapping* mapping = NULL;
182 	if (area->wiring == B_NO_LOCK) {
183 		vm_page_mappings::Iterator iterator = page->mappings.GetIterator();
184 		while ((mapping = iterator.Next()) != NULL) {
185 			if (mapping->area == area) {
186 				area->mappings.Remove(mapping);
187 				page->mappings.Remove(mapping);
188 				break;
189 			}
190 		}
191 
192 		ASSERT_PRINT(mapping != NULL, "page: %p, page number: %#"
193 			B_PRIxPHYSADDR ", accessed: %d, modified: %d", page,
194 			pageNumber, accessed, modified);
195 	} else
196 		page->DecrementWiredCount();
197 
198 	recursive_lock_unlock(&fLock);
199 
200 	if (!page->IsMapped()) {
201 		atomic_add(&gMappedPagesCount, -1);
202 
203 		if (updatePageQueue) {
204 			if (page->Cache()->temporary)
205 				vm_page_set_state(page, PAGE_STATE_INACTIVE);
206 			else if (page->modified)
207 				vm_page_set_state(page, PAGE_STATE_MODIFIED);
208 			else
209 				vm_page_set_state(page, PAGE_STATE_CACHED);
210 		}
211 	}
212 
213 	if (mapping != NULL) {
214 		bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
215 		vm_free_page_mapping(pageNumber, mapping,
216 			CACHE_DONT_WAIT_FOR_MEMORY
217 				| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0));
218 	}
219 }
220 
221 
222 /*!	Called by ClearAccessedAndModified() after performing the architecture
223 	specific part.
224 	Looks up the page and removes the page-area mapping.
225 */
226 void
227 VMTranslationMap::UnaccessedPageUnmapped(VMArea* area, page_num_t pageNumber)
228 {
229 	if (area->cache_type == CACHE_TYPE_DEVICE) {
230 		recursive_lock_unlock(&fLock);
231 		return;
232 	}
233 
234 	// get the page
235 	vm_page* page = vm_lookup_page(pageNumber);
236 	ASSERT_PRINT(page != NULL, "page number: %#" B_PRIxPHYSADDR, pageNumber);
237 
238 	// remove the mapping object/decrement the wired_count of the page
239 	vm_page_mapping* mapping = NULL;
240 	if (area->wiring == B_NO_LOCK) {
241 		vm_page_mappings::Iterator iterator = page->mappings.GetIterator();
242 		while ((mapping = iterator.Next()) != NULL) {
243 			if (mapping->area == area) {
244 				area->mappings.Remove(mapping);
245 				page->mappings.Remove(mapping);
246 				break;
247 			}
248 		}
249 
250 		ASSERT_PRINT(mapping != NULL, "page: %p, page number: %#"
251 			B_PRIxPHYSADDR, page, pageNumber);
252 	} else
253 		page->DecrementWiredCount();
254 
255 	recursive_lock_unlock(&fLock);
256 
257 	if (!page->IsMapped())
258 		atomic_add(&gMappedPagesCount, -1);
259 
260 	if (mapping != NULL) {
261 		vm_free_page_mapping(pageNumber, mapping,
262 			CACHE_DONT_WAIT_FOR_MEMORY | CACHE_DONT_LOCK_KERNEL_SPACE);
263 			// Since this is called by the page daemon, we never want to lock
264 			// the kernel address space.
265 	}
266 }
267 
268 
269 // #pragma mark - ReverseMappingInfoCallback
270 
271 
272 VMTranslationMap::ReverseMappingInfoCallback::~ReverseMappingInfoCallback()
273 {
274 }
275 
276 
277 // #pragma mark - VMPhysicalPageMapper
278 
279 
280 VMPhysicalPageMapper::VMPhysicalPageMapper()
281 {
282 }
283 
284 
285 VMPhysicalPageMapper::~VMPhysicalPageMapper()
286 {
287 }
288