xref: /haiku/src/system/kernel/vm/VMTranslationMap.cpp (revision 1026b0a1a76dc88927bb8175c470f638dc5464ee)
1 /*
2  * Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include <vm/VMTranslationMap.h>
8 
9 #include <slab/Slab.h>
10 #include <vm/vm_page.h>
11 #include <vm/vm_priv.h>
12 #include <vm/VMAddressSpace.h>
13 #include <vm/VMArea.h>
14 #include <vm/VMCache.h>
15 
16 
17 // #pragma mark - VMTranslationMap
18 
19 
20 VMTranslationMap::VMTranslationMap()
21 	:
22 	fMapCount(0)
23 {
24 	recursive_lock_init(&fLock, "translation map");
25 }
26 
27 
28 VMTranslationMap::~VMTranslationMap()
29 {
30 	recursive_lock_destroy(&fLock);
31 }
32 
33 
34 status_t
35 VMTranslationMap::DebugMarkRangePresent(addr_t start, addr_t end,
36 	bool markPresent)
37 {
38 	return B_NOT_SUPPORTED;
39 }
40 
41 
42 /*!	Unmaps a range of pages of an area.
43 
44 	The default implementation just iterates over all virtual pages of the
45 	range and calls UnmapPage(). This is obviously not particularly efficient.
46 */
47 void
48 VMTranslationMap::UnmapPages(VMArea* area, addr_t base, size_t size,
49 	bool updatePageQueue)
50 {
51 	ASSERT(base % B_PAGE_SIZE == 0);
52 	ASSERT(size % B_PAGE_SIZE == 0);
53 
54 	addr_t address = base;
55 	addr_t end = address + size;
56 #if DEBUG_PAGE_ACCESS
57 	for (; address != end; address += B_PAGE_SIZE) {
58 		phys_addr_t physicalAddress;
59 		uint32 flags;
60 		if (Query(address, &physicalAddress, &flags) == B_OK
61 			&& (flags & PAGE_PRESENT) != 0) {
62 			vm_page* page = vm_lookup_page(physicalAddress / B_PAGE_SIZE);
63 			if (page != NULL) {
64 				DEBUG_PAGE_ACCESS_START(page);
65 				UnmapPage(area, address, updatePageQueue);
66 				DEBUG_PAGE_ACCESS_END(page);
67 			} else
68 				UnmapPage(area, address, updatePageQueue);
69 		}
70 	}
71 #else
72 	for (; address != end; address += B_PAGE_SIZE)
73 		UnmapPage(area, address, updatePageQueue);
74 #endif
75 }
76 
77 
78 /*!	Unmaps all of an area's pages.
79 	If \a deletingAddressSpace is \c true, the address space the area belongs to
80 	is in the process of being destroyed and isn't used by anyone anymore. For
81 	some architectures this can be used for optimizations (e.g. not unmapping
82 	pages or at least not needing to invalidate TLB entries).
83 	If \a ignoreTopCachePageFlags is \c true, the area is in the process of
84 	being destroyed and its top cache is otherwise unreferenced. I.e. all mapped
85 	pages that live in the top cache area going to be freed and the page
86 	accessed and modified flags don't need to be propagated.
87 
88 	The default implementation just iterates over all virtual pages of the
89 	area and calls UnmapPage(). This is obviously not particularly efficient.
90 */
91 void
92 VMTranslationMap::UnmapArea(VMArea* area, bool deletingAddressSpace,
93 	bool ignoreTopCachePageFlags)
94 {
95 	addr_t address = area->Base();
96 	addr_t end = address + area->Size();
97 #if DEBUG_PAGE_ACCESS
98 	for (; address != end; address += B_PAGE_SIZE) {
99 		phys_addr_t physicalAddress;
100 		uint32 flags;
101 		if (Query(address, &physicalAddress, &flags) == B_OK
102 			&& (flags & PAGE_PRESENT) != 0) {
103 			vm_page* page = vm_lookup_page(physicalAddress / B_PAGE_SIZE);
104 			if (page != NULL) {
105 				DEBUG_PAGE_ACCESS_START(page);
106 				UnmapPage(area, address, true);
107 				DEBUG_PAGE_ACCESS_END(page);
108 			} else
109 				UnmapPage(area, address, true);
110 		}
111 	}
112 #else
113 	for (; address != end; address += B_PAGE_SIZE)
114 		UnmapPage(area, address, true);
115 #endif
116 }
117 
118 
119 /*!	Called by UnmapPage() after performing the architecture specific part.
120 	Looks up the page, updates its flags, removes the page-area mapping, and
121 	requeues the page, if necessary.
122 */
123 void
124 VMTranslationMap::PageUnmapped(VMArea* area, page_num_t pageNumber,
125 	bool accessed, bool modified, bool updatePageQueue)
126 {
127 	if (area->cache_type == CACHE_TYPE_DEVICE) {
128 		recursive_lock_unlock(&fLock);
129 		return;
130 	}
131 
132 	// get the page
133 	vm_page* page = vm_lookup_page(pageNumber);
134 	ASSERT_PRINT(page != NULL, "page number: %#" B_PRIxPHYSADDR
135 		", accessed: %d, modified: %d", pageNumber, accessed, modified);
136 
137 	// transfer the accessed/dirty flags to the page
138 	page->accessed |= accessed;
139 	page->modified |= modified;
140 
141 	// remove the mapping object/decrement the wired_count of the page
142 	vm_page_mapping* mapping = NULL;
143 	if (area->wiring == B_NO_LOCK) {
144 		vm_page_mappings::Iterator iterator = page->mappings.GetIterator();
145 		while ((mapping = iterator.Next()) != NULL) {
146 			if (mapping->area == area) {
147 				area->mappings.Remove(mapping);
148 				page->mappings.Remove(mapping);
149 				break;
150 			}
151 		}
152 
153 		ASSERT_PRINT(mapping != NULL, "page: %p, page number: %#"
154 			B_PRIxPHYSADDR ", accessed: %d, modified: %d", page,
155 			pageNumber, accessed, modified);
156 	} else
157 		page->DecrementWiredCount();
158 
159 	recursive_lock_unlock(&fLock);
160 
161 	if (!page->IsMapped()) {
162 		atomic_add(&gMappedPagesCount, -1);
163 
164 		if (updatePageQueue) {
165 			if (page->Cache()->temporary)
166 				vm_page_set_state(page, PAGE_STATE_INACTIVE);
167 			else if (page->modified)
168 				vm_page_set_state(page, PAGE_STATE_MODIFIED);
169 			else
170 				vm_page_set_state(page, PAGE_STATE_CACHED);
171 		}
172 	}
173 
174 	if (mapping != NULL) {
175 		bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
176 		object_cache_free(gPageMappingsObjectCache, mapping,
177 			CACHE_DONT_WAIT_FOR_MEMORY
178 				| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0));
179 	}
180 }
181 
182 
183 /*!	Called by ClearAccessedAndModified() after performing the architecture
184 	specific part.
185 	Looks up the page and removes the page-area mapping.
186 */
187 void
188 VMTranslationMap::UnaccessedPageUnmapped(VMArea* area, page_num_t pageNumber)
189 {
190 	if (area->cache_type == CACHE_TYPE_DEVICE) {
191 		recursive_lock_unlock(&fLock);
192 		return;
193 	}
194 
195 	// get the page
196 	vm_page* page = vm_lookup_page(pageNumber);
197 	ASSERT_PRINT(page != NULL, "page number: %#" B_PRIxPHYSADDR, pageNumber);
198 
199 	// remove the mapping object/decrement the wired_count of the page
200 	vm_page_mapping* mapping = NULL;
201 	if (area->wiring == B_NO_LOCK) {
202 		vm_page_mappings::Iterator iterator = page->mappings.GetIterator();
203 		while ((mapping = iterator.Next()) != NULL) {
204 			if (mapping->area == area) {
205 				area->mappings.Remove(mapping);
206 				page->mappings.Remove(mapping);
207 				break;
208 			}
209 		}
210 
211 		ASSERT_PRINT(mapping != NULL, "page: %p, page number: %#"
212 			B_PRIxPHYSADDR, page, pageNumber);
213 	} else
214 		page->DecrementWiredCount();
215 
216 	recursive_lock_unlock(&fLock);
217 
218 	if (!page->IsMapped())
219 		atomic_add(&gMappedPagesCount, -1);
220 
221 	if (mapping != NULL) {
222 		object_cache_free(gPageMappingsObjectCache, mapping,
223 			CACHE_DONT_WAIT_FOR_MEMORY | CACHE_DONT_LOCK_KERNEL_SPACE);
224 			// Since this is called by the page daemon, we never want to lock
225 			// the kernel address space.
226 	}
227 }
228 
229 
230 // #pragma mark - VMPhysicalPageMapper
231 
232 
233 VMPhysicalPageMapper::VMPhysicalPageMapper()
234 {
235 }
236 
237 
238 VMPhysicalPageMapper::~VMPhysicalPageMapper()
239 {
240 }
241