xref: /haiku/src/system/kernel/vm/VMTranslationMap.cpp (revision 9a6a20d4689307142a7ed26a1437ba47e244e73f)
1 /*
2  * Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include <vm/VMTranslationMap.h>
8 
9 #include <slab/Slab.h>
10 #include <vm/vm_page.h>
11 #include <vm/vm_priv.h>
12 #include <vm/VMAddressSpace.h>
13 #include <vm/VMArea.h>
14 #include <vm/VMCache.h>
15 
16 
17 // #pragma mark - VMTranslationMap
18 
19 
20 VMTranslationMap::VMTranslationMap()
21 	:
22 	fMapCount(0)
23 {
24 	recursive_lock_init(&fLock, "translation map");
25 }
26 
27 
28 VMTranslationMap::~VMTranslationMap()
29 {
30 	recursive_lock_destroy(&fLock);
31 }
32 
33 
34 status_t
35 VMTranslationMap::DebugMarkRangePresent(addr_t start, addr_t end,
36 	bool markPresent)
37 {
38 	return B_NOT_SUPPORTED;
39 }
40 
41 
42 /*!	Unmaps a range of pages of an area.
43 
44 	The default implementation just iterates over all virtual pages of the
45 	range and calls UnmapPage(). This is obviously not particularly efficient.
46 */
47 void
48 VMTranslationMap::UnmapPages(VMArea* area, addr_t base, size_t size,
49 	bool updatePageQueue)
50 {
51 	ASSERT(base % B_PAGE_SIZE == 0);
52 	ASSERT(size % B_PAGE_SIZE == 0);
53 
54 	addr_t address = base;
55 	addr_t end = address + size;
56 #if DEBUG_PAGE_ACCESS
57 	for (; address != end; address += B_PAGE_SIZE) {
58 		phys_addr_t physicalAddress;
59 		uint32 flags;
60 		if (Query(address, &physicalAddress, &flags) == B_OK
61 			&& (flags & PAGE_PRESENT) != 0) {
62 			vm_page* page = vm_lookup_page(physicalAddress / B_PAGE_SIZE);
63 			if (page != NULL) {
64 				DEBUG_PAGE_ACCESS_START(page);
65 				UnmapPage(area, address, updatePageQueue);
66 				DEBUG_PAGE_ACCESS_END(page);
67 			} else
68 				UnmapPage(area, address, updatePageQueue);
69 		}
70 	}
71 #else
72 	for (; address != end; address += B_PAGE_SIZE)
73 		UnmapPage(area, address, updatePageQueue);
74 #endif
75 }
76 
77 
78 /*!	Unmaps all of an area's pages.
79 	If \a deletingAddressSpace is \c true, the address space the area belongs to
80 	is in the process of being destroyed and isn't used by anyone anymore. For
81 	some architectures this can be used for optimizations (e.g. not unmapping
82 	pages or at least not needing to invalidate TLB entries).
83 	If \a ignoreTopCachePageFlags is \c true, the area is in the process of
84 	being destroyed and its top cache is otherwise unreferenced. I.e. all mapped
85 	pages that live in the top cache area going to be freed and the page
86 	accessed and modified flags don't need to be propagated.
87 
88 	The default implementation just iterates over all virtual pages of the
89 	area and calls UnmapPage(). This is obviously not particularly efficient.
90 */
91 void
92 VMTranslationMap::UnmapArea(VMArea* area, bool deletingAddressSpace,
93 	bool ignoreTopCachePageFlags)
94 {
95 	addr_t address = area->Base();
96 	addr_t end = address + area->Size();
97 #if DEBUG_PAGE_ACCESS
98 	for (; address != end; address += B_PAGE_SIZE) {
99 		phys_addr_t physicalAddress;
100 		uint32 flags;
101 		if (Query(address, &physicalAddress, &flags) == B_OK
102 			&& (flags & PAGE_PRESENT) != 0) {
103 			vm_page* page = vm_lookup_page(physicalAddress / B_PAGE_SIZE);
104 			if (page != NULL) {
105 				DEBUG_PAGE_ACCESS_START(page);
106 				UnmapPage(area, address, true);
107 				DEBUG_PAGE_ACCESS_END(page);
108 			} else
109 				UnmapPage(area, address, true);
110 		}
111 	}
112 #else
113 	for (; address != end; address += B_PAGE_SIZE)
114 		UnmapPage(area, address, true);
115 #endif
116 }
117 
118 
119 /*!	Print mapping information for a virtual address.
120 	The method navigates the paging structures and prints all relevant
121 	information on the way.
122 	The method is invoked from a KDL command. The default implementation is a
123 	no-op.
124 	\param virtualAddress The virtual address to look up.
125 */
126 void
127 VMTranslationMap::DebugPrintMappingInfo(addr_t virtualAddress)
128 {
129 	kprintf("VMTranslationMap::DebugPrintMappingInfo not implemented\n");
130 }
131 
132 
133 /*!	Find virtual addresses mapped to the given physical address.
134 	For each virtual address the method finds, it invokes the callback object's
135 	HandleVirtualAddress() method. When that method returns \c true, the search
136 	is terminated and \c true is returned.
137 	The method is invoked from a KDL command. The default implementation is a
138 	no-op.
139 	\param physicalAddress The physical address to search for.
140 	\param callback Callback object to be notified of each found virtual
141 		address.
142 	\return \c true, if for a found virtual address the callback's
143 		HandleVirtualAddress() returned \c true, \c false otherwise.
144 */
145 bool
146 VMTranslationMap::DebugGetReverseMappingInfo(phys_addr_t physicalAddress,
147 	ReverseMappingInfoCallback& callback)
148 {
149 	kprintf("VMTranslationMap::DebugGetReverseMappingInfo not implemented\n");
150 	return false;
151 }
152 
153 
154 /*!	Called by UnmapPage() after performing the architecture specific part.
155 	Looks up the page, updates its flags, removes the page-area mapping, and
156 	requeues the page, if necessary.
157 */
158 void
159 VMTranslationMap::PageUnmapped(VMArea* area, page_num_t pageNumber,
160 	bool accessed, bool modified, bool updatePageQueue)
161 {
162 	if (area->cache_type == CACHE_TYPE_DEVICE) {
163 		recursive_lock_unlock(&fLock);
164 		return;
165 	}
166 
167 	// get the page
168 	vm_page* page = vm_lookup_page(pageNumber);
169 	ASSERT_PRINT(page != NULL, "page number: %#" B_PRIxPHYSADDR
170 		", accessed: %d, modified: %d", pageNumber, accessed, modified);
171 
172 	// transfer the accessed/dirty flags to the page
173 	page->accessed |= accessed;
174 	page->modified |= modified;
175 
176 	// remove the mapping object/decrement the wired_count of the page
177 	vm_page_mapping* mapping = NULL;
178 	if (area->wiring == B_NO_LOCK) {
179 		vm_page_mappings::Iterator iterator = page->mappings.GetIterator();
180 		while ((mapping = iterator.Next()) != NULL) {
181 			if (mapping->area == area) {
182 				area->mappings.Remove(mapping);
183 				page->mappings.Remove(mapping);
184 				break;
185 			}
186 		}
187 
188 		ASSERT_PRINT(mapping != NULL, "page: %p, page number: %#"
189 			B_PRIxPHYSADDR ", accessed: %d, modified: %d", page,
190 			pageNumber, accessed, modified);
191 	} else
192 		page->DecrementWiredCount();
193 
194 	recursive_lock_unlock(&fLock);
195 
196 	if (!page->IsMapped()) {
197 		atomic_add(&gMappedPagesCount, -1);
198 
199 		if (updatePageQueue) {
200 			if (page->Cache()->temporary)
201 				vm_page_set_state(page, PAGE_STATE_INACTIVE);
202 			else if (page->modified)
203 				vm_page_set_state(page, PAGE_STATE_MODIFIED);
204 			else
205 				vm_page_set_state(page, PAGE_STATE_CACHED);
206 		}
207 	}
208 
209 	if (mapping != NULL) {
210 		bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
211 		vm_free_page_mapping(pageNumber, mapping,
212 			CACHE_DONT_WAIT_FOR_MEMORY
213 				| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0));
214 	}
215 }
216 
217 
218 /*!	Called by ClearAccessedAndModified() after performing the architecture
219 	specific part.
220 	Looks up the page and removes the page-area mapping.
221 */
222 void
223 VMTranslationMap::UnaccessedPageUnmapped(VMArea* area, page_num_t pageNumber)
224 {
225 	if (area->cache_type == CACHE_TYPE_DEVICE) {
226 		recursive_lock_unlock(&fLock);
227 		return;
228 	}
229 
230 	// get the page
231 	vm_page* page = vm_lookup_page(pageNumber);
232 	ASSERT_PRINT(page != NULL, "page number: %#" B_PRIxPHYSADDR, pageNumber);
233 
234 	// remove the mapping object/decrement the wired_count of the page
235 	vm_page_mapping* mapping = NULL;
236 	if (area->wiring == B_NO_LOCK) {
237 		vm_page_mappings::Iterator iterator = page->mappings.GetIterator();
238 		while ((mapping = iterator.Next()) != NULL) {
239 			if (mapping->area == area) {
240 				area->mappings.Remove(mapping);
241 				page->mappings.Remove(mapping);
242 				break;
243 			}
244 		}
245 
246 		ASSERT_PRINT(mapping != NULL, "page: %p, page number: %#"
247 			B_PRIxPHYSADDR, page, pageNumber);
248 	} else
249 		page->DecrementWiredCount();
250 
251 	recursive_lock_unlock(&fLock);
252 
253 	if (!page->IsMapped())
254 		atomic_add(&gMappedPagesCount, -1);
255 
256 	if (mapping != NULL) {
257 		vm_free_page_mapping(pageNumber, mapping,
258 			CACHE_DONT_WAIT_FOR_MEMORY | CACHE_DONT_LOCK_KERNEL_SPACE);
259 			// Since this is called by the page daemon, we never want to lock
260 			// the kernel address space.
261 	}
262 }
263 
264 
265 // #pragma mark - ReverseMappingInfoCallback
266 
267 
268 VMTranslationMap::ReverseMappingInfoCallback::~ReverseMappingInfoCallback()
269 {
270 }
271 
272 
273 // #pragma mark - VMPhysicalPageMapper
274 
275 
276 VMPhysicalPageMapper::VMPhysicalPageMapper()
277 {
278 }
279 
280 
281 VMPhysicalPageMapper::~VMPhysicalPageMapper()
282 {
283 }
284