xref: /haiku/src/system/kernel/arch/arm/paging/32bit/ARMVMTranslationMap32Bit.cpp (revision 909af08f4328301fbdef1ffb41f566c3b5bec0c7)
1 /*
2  * Copyright 2010, Ithamar R. Adema, ithamar.adema@team-embedded.nl
3  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
4  * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
5  * Distributed under the terms of the MIT License.
6  *
7  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
8  * Distributed under the terms of the NewOS License.
9  */
10 
11 
12 #include "paging/32bit/ARMVMTranslationMap32Bit.h"
13 
14 #include <stdlib.h>
15 #include <string.h>
16 
17 #include <int.h>
18 #include <thread.h>
19 #include <slab/Slab.h>
20 #include <smp.h>
21 #include <util/AutoLock.h>
22 #include <util/ThreadAutoLock.h>
23 #include <util/queue.h>
24 #include <vm/vm_page.h>
25 #include <vm/vm_priv.h>
26 #include <vm/VMAddressSpace.h>
27 #include <vm/VMCache.h>
28 
29 #include "paging/32bit/ARMPagingMethod32Bit.h"
30 #include "paging/32bit/ARMPagingStructures32Bit.h"
31 #include "paging/arm_physical_page_mapper.h"
32 
33 
34 //#define TRACE_ARM_VM_TRANSLATION_MAP_32_BIT
35 #ifdef TRACE_ARM_VM_TRANSLATION_MAP_32_BIT
36 #	define TRACE(x...) dprintf(x)
37 #else
38 #	define TRACE(x...) ;
39 #endif
40 
41 
42 #define PAGEDIR_SIZE	ARM_MMU_L1_TABLE_SIZE
43 #define PAGEDIR_ALIGN	(4 * B_PAGE_SIZE)
44 
45 
46 ARMVMTranslationMap32Bit::ARMVMTranslationMap32Bit()
47 	:
48 	fPagingStructures(NULL)
49 {
50 }
51 
52 
53 ARMVMTranslationMap32Bit::~ARMVMTranslationMap32Bit()
54 {
55 	if (fPagingStructures == NULL)
56 		return;
57 
58 	if (fPageMapper != NULL)
59 		fPageMapper->Delete();
60 
61 	if (fPagingStructures->pgdir_virt != NULL) {
62 		// cycle through and free all of the user space pgtables
63 		for (uint32 i = VADDR_TO_PDENT(USER_BASE);
64 				i <= VADDR_TO_PDENT(USER_BASE + (USER_SIZE - 1)); i++) {
65 			if ((fPagingStructures->pgdir_virt[i] & ARM_PDE_TYPE_MASK) != 0) {
66 				addr_t address = fPagingStructures->pgdir_virt[i]
67 					& ARM_PDE_ADDRESS_MASK;
68 				vm_page* page = vm_lookup_page(address / B_PAGE_SIZE);
69 				if (!page)
70 					panic("destroy_tmap: didn't find pgtable page\n");
71 				DEBUG_PAGE_ACCESS_START(page);
72 				vm_page_set_state(page, PAGE_STATE_FREE);
73 			}
74 		}
75 	}
76 
77 	fPagingStructures->RemoveReference();
78 }
79 
80 
81 status_t
82 ARMVMTranslationMap32Bit::Init(bool kernel)
83 {
84 	TRACE("ARMVMTranslationMap32Bit::Init()\n");
85 
86 	ARMVMTranslationMap::Init(kernel);
87 
88 	fPagingStructures = new(std::nothrow) ARMPagingStructures32Bit;
89 	if (fPagingStructures == NULL)
90 		return B_NO_MEMORY;
91 
92 	ARMPagingMethod32Bit* method = ARMPagingMethod32Bit::Method();
93 
94 	if (!kernel) {
95 		// user
96 		// allocate a physical page mapper
97 		status_t error = method->PhysicalPageMapper()
98 			->CreateTranslationMapPhysicalPageMapper(&fPageMapper);
99 		if (error != B_OK)
100 			return error;
101 
102 		// allocate the page directory
103 		page_directory_entry *virtualPageDir = NULL;
104 
105 		virtual_address_restrictions virtualRestrictions = {};
106 		virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
107 
108 		physical_address_restrictions physicalRestrictions = {};
109 		physicalRestrictions.alignment = PAGEDIR_ALIGN;
110 
111 		area_id pgdir_area = create_area_etc(B_SYSTEM_TEAM, "pgdir",
112 			PAGEDIR_SIZE, B_CONTIGUOUS,
113 			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0, 0,
114 			&virtualRestrictions, &physicalRestrictions, (void **)&virtualPageDir);
115 
116 		if (pgdir_area < 0) {
117 			return B_NO_MEMORY;
118 		}
119 
120 		// look up the page directory's physical address
121 		phys_addr_t physicalPageDir;
122 		vm_get_page_mapping(VMAddressSpace::KernelID(),
123 			(addr_t)virtualPageDir, &physicalPageDir);
124 
125 		fPagingStructures->Init(virtualPageDir, physicalPageDir,
126 			method->KernelVirtualPageDirectory());
127 	} else {
128 		// kernel
129 		// get the physical page mapper
130 		fPageMapper = method->KernelPhysicalPageMapper();
131 
132 		// we already know the kernel pgdir mapping
133 		fPagingStructures->Init(method->KernelVirtualPageDirectory(),
134 			method->KernelPhysicalPageDirectory(), NULL);
135 	}
136 
137 	return B_OK;
138 }
139 
140 
141 size_t
142 ARMVMTranslationMap32Bit::MaxPagesNeededToMap(addr_t start, addr_t end) const
143 {
144 	// If start == 0, the actual base address is not yet known to the caller and
145 	// we shall assume the worst case.
146 	if (start == 0) {
147 		// offset the range so it has the worst possible alignment
148 		start = 1023 * B_PAGE_SIZE;
149 		end += 1023 * B_PAGE_SIZE;
150 	}
151 
152 	return VADDR_TO_PDENT(end) + 1 - VADDR_TO_PDENT(start);
153 }
154 
155 
156 status_t
157 ARMVMTranslationMap32Bit::Map(addr_t va, phys_addr_t pa, uint32 attributes,
158 	uint32 memoryType, vm_page_reservation* reservation)
159 {
160 	TRACE("map_tmap: entry pa 0x%lx va 0x%lx\n", pa, va);
161 
162 /*
163 	dprintf("pgdir at 0x%x\n", pgdir);
164 	dprintf("index is %d\n", va / B_PAGE_SIZE / 1024);
165 	dprintf("final at 0x%x\n", &pgdir[va / B_PAGE_SIZE / 1024]);
166 	dprintf("value is 0x%x\n", *(int *)&pgdir[va / B_PAGE_SIZE / 1024]);
167 	dprintf("present bit is %d\n", pgdir[va / B_PAGE_SIZE / 1024].present);
168 	dprintf("addr is %d\n", pgdir[va / B_PAGE_SIZE / 1024].addr);
169 */
170 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
171 
172 	// check to see if a page table exists for this range
173 	uint32 index = VADDR_TO_PDENT(va);
174 	if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
175 		phys_addr_t pgtable;
176 		vm_page *page;
177 
178 		// we need to allocate a pgtable
179 		page = vm_page_allocate_page(reservation,
180 			PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
181 
182 		DEBUG_PAGE_ACCESS_END(page);
183 
184 		pgtable = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;
185 
186 		TRACE("map_tmap: asked for free page for pgtable. 0x%lx\n", pgtable);
187 
188 		// put it in the pgdir
189 		ARMPagingMethod32Bit::PutPageTableInPageDir(&pd[index], pgtable,
190 			(va < KERNEL_BASE) ? ARM_MMU_L1_FLAG_PXN : 0);
191 
192 		// update any other page directories, if it maps kernel space
193 		if (index >= FIRST_KERNEL_PGDIR_ENT
194 			&& index < (FIRST_KERNEL_PGDIR_ENT + NUM_KERNEL_PGDIR_ENTS)) {
195 			ARMPagingStructures32Bit::UpdateAllPageDirs(index, pd[index]);
196 		}
197 
198 		fMapCount++;
199 	}
200 
201 	// now, fill in the pentry
202 	Thread* thread = thread_get_current_thread();
203 	ThreadCPUPinner pinner(thread);
204 
205 	page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
206 		pd[index] & ARM_PDE_ADDRESS_MASK);
207 	index = VADDR_TO_PTENT(va);
208 
209 	ASSERT_PRINT((pt[index] & ARM_PTE_TYPE_MASK) == 0,
210 		"virtual address: %#" B_PRIxADDR ", existing pte: %#" B_PRIx32, va,
211 		pt[index]);
212 
213 	ARMPagingMethod32Bit::PutPageTableEntryInTable(&pt[index], pa, attributes,
214 		memoryType, fIsKernelMap);
215 
216 	pinner.Unlock();
217 
218 	// Note: We don't need to invalidate the TLB for this address, as previously
219 	// the entry was not present and the TLB doesn't cache those entries.
220 
221 	fMapCount++;
222 
223 	return 0;
224 }
225 
226 
227 status_t
228 ARMVMTranslationMap32Bit::Unmap(addr_t start, addr_t end)
229 {
230 	start = ROUNDDOWN(start, B_PAGE_SIZE);
231 	if (start >= end)
232 		return B_OK;
233 
234 	TRACE("unmap_tmap: asked to free pages 0x%lx to 0x%lx\n", start, end);
235 
236 	page_directory_entry *pd = fPagingStructures->pgdir_virt;
237 
238 	do {
239 		int index = VADDR_TO_PDENT(start);
240 		if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
241 			// no page table here, move the start up to access the next page
242 			// table
243 			start = ROUNDUP(start + 1, kPageTableAlignment);
244 			continue;
245 		}
246 
247 		Thread* thread = thread_get_current_thread();
248 		ThreadCPUPinner pinner(thread);
249 
250 		page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
251 			pd[index] & ARM_PDE_ADDRESS_MASK);
252 
253 		for (index = VADDR_TO_PTENT(start); (index < 256) && (start < end);
254 				index++, start += B_PAGE_SIZE) {
255 			if ((pt[index] & ARM_PTE_TYPE_MASK) == 0) {
256 				// page mapping not valid
257 				continue;
258 			}
259 
260 			TRACE("unmap_tmap: removing page 0x%lx\n", start);
261 
262 			page_table_entry oldEntry
263 				= ARMPagingMethod32Bit::ClearPageTableEntryFlags(&pt[index],
264 					ARM_PTE_TYPE_MASK);
265 			fMapCount--;
266 
267 			if ((oldEntry & ARM_MMU_L2_FLAG_AP0) != 0) {
268 				// Note, that we only need to invalidate the address, if the
269 				// accessed flags was set, since only then the entry could have
270 				// been in any TLB.
271 				InvalidatePage(start);
272 			}
273 		}
274 	} while (start != 0 && start < end);
275 
276 	return B_OK;
277 }
278 
279 
280 status_t
281 ARMVMTranslationMap32Bit::DebugMarkRangePresent(addr_t start, addr_t end,
282 	bool markPresent)
283 {
284 #if 0
285 	start = ROUNDDOWN(start, B_PAGE_SIZE);
286 	if (start >= end)
287 		return B_OK;
288 
289 	page_directory_entry *pd = fPagingStructures->pgdir_virt;
290 
291 	do {
292 		int index = VADDR_TO_PDENT(start);
293 		if ((pd[index] & X86_PDE_PRESENT) == 0) {
294 			// no page table here, move the start up to access the next page
295 			// table
296 			start = ROUNDUP(start + 1, kPageTableAlignment);
297 			continue;
298 		}
299 
300 		Thread* thread = thread_get_current_thread();
301 		ThreadCPUPinner pinner(thread);
302 
303 		page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
304 			pd[index] & X86_PDE_ADDRESS_MASK);
305 
306 		for (index = VADDR_TO_PTENT(start); (index < 1024) && (start < end);
307 				index++, start += B_PAGE_SIZE) {
308 			if ((pt[index] & X86_PTE_PRESENT) == 0) {
309 				if (!markPresent)
310 					continue;
311 
312 				X86PagingMethod32Bit::SetPageTableEntryFlags(&pt[index],
313 					X86_PTE_PRESENT);
314 			} else {
315 				if (markPresent)
316 					continue;
317 
318 				page_table_entry oldEntry
319 					= X86PagingMethod32Bit::ClearPageTableEntryFlags(&pt[index],
320 						X86_PTE_PRESENT);
321 
322 				if ((oldEntry & ARM_MMU_L2_FLAG_AP0) != 0) {
323 					// Note, that we only need to invalidate the address, if the
324 					// accessed flags was set, since only then the entry could
325 					// have been in any TLB.
326 					InvalidatePage(start);
327 				}
328 			}
329 		}
330 	} while (start != 0 && start < end);
331 #endif
332 	return B_OK;
333 }
334 
335 
336 /*!	Caller must have locked the cache of the page to be unmapped.
337 	This object shouldn't be locked.
338 */
339 status_t
340 ARMVMTranslationMap32Bit::UnmapPage(VMArea* area, addr_t address,
341 	bool updatePageQueue)
342 {
343 	ASSERT(address % B_PAGE_SIZE == 0);
344 
345 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
346 
347 	TRACE("ARMVMTranslationMap32Bit::UnmapPage(%#" B_PRIxADDR ")\n", address);
348 
349 	RecursiveLocker locker(fLock);
350 
351 	int index = VADDR_TO_PDENT(address);
352 	if ((pd[index] & ARM_PDE_TYPE_MASK) == 0)
353 		return B_ENTRY_NOT_FOUND;
354 
355 	ThreadCPUPinner pinner(thread_get_current_thread());
356 
357 	page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
358 		pd[index] & ARM_PDE_ADDRESS_MASK);
359 
360 	index = VADDR_TO_PTENT(address);
361 	page_table_entry oldEntry = ARMPagingMethod32Bit::ClearPageTableEntry(
362 		&pt[index]);
363 
364 	pinner.Unlock();
365 
366 	if ((oldEntry & ARM_PTE_TYPE_MASK) == 0) {
367 		// page mapping not valid
368 		return B_ENTRY_NOT_FOUND;
369 	}
370 
371 	fMapCount--;
372 
373 
374 	if ((oldEntry & ARM_MMU_L2_FLAG_AP0) != 0) {
375 		// Note, that we only need to invalidate the address, if the
376 		// accessed flags was set, since only then the entry could have been
377 		// in any TLB.
378 		InvalidatePage(address);
379 		Flush();
380 
381 		// NOTE: Between clearing the page table entry and Flush() other
382 		// processors (actually even this processor with another thread of the
383 		// same team) could still access the page in question via their cached
384 		// entry. We can obviously lose a modified flag in this case, with the
385 		// effect that the page looks unmodified (and might thus be recycled),
386 		// but is actually modified.
387 		// In most cases this is harmless, but for vm_remove_all_page_mappings()
388 		// this is actually a problem.
389 		// Interestingly FreeBSD seems to ignore this problem as well
390 		// (cf. pmap_remove_all()), unless I've missed something.
391 	}
392 
393 	locker.Detach();
394 		// PageUnmapped() will unlock for us
395 
396 	PageUnmapped(area, (oldEntry & ARM_PTE_ADDRESS_MASK) / B_PAGE_SIZE,
397 		(oldEntry & ARM_MMU_L2_FLAG_AP0) != 0, false /*(oldEntry & ARM_PTE_DIRTY) != 0*/,
398 		updatePageQueue);
399 
400 	return B_OK;
401 }
402 
403 
404 void
405 ARMVMTranslationMap32Bit::UnmapPages(VMArea* area, addr_t base, size_t size,
406 	bool updatePageQueue)
407 {
408 	if (size == 0)
409 		return;
410 
411 	addr_t start = base;
412 	addr_t end = base + size - 1;
413 
414 	TRACE("ARMVMTranslationMap32Bit::UnmapPages(%p, %#" B_PRIxADDR ", %#"
415 		B_PRIxADDR ")\n", area, start, end);
416 
417 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
418 
419 	VMAreaMappings queue;
420 
421 	RecursiveLocker locker(fLock);
422 
423 	do {
424 		int index = VADDR_TO_PDENT(start);
425 		if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
426 			// no page table here, move the start up to access the next page
427 			// table
428 			start = ROUNDUP(start + 1, kPageTableAlignment);
429 			continue;
430 		}
431 
432 		Thread* thread = thread_get_current_thread();
433 		ThreadCPUPinner pinner(thread);
434 
435 		page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
436 			pd[index] & ARM_PDE_ADDRESS_MASK);
437 
438 		for (index = VADDR_TO_PTENT(start); (index < 256) && (start < end);
439 				index++, start += B_PAGE_SIZE) {
440 			page_table_entry oldEntry
441 				= ARMPagingMethod32Bit::ClearPageTableEntry(&pt[index]);
442 			if ((oldEntry & ARM_PTE_TYPE_MASK) == 0)
443 				continue;
444 
445 			fMapCount--;
446 
447 			if ((oldEntry & ARM_MMU_L2_FLAG_AP0) != 0) {
448 				// Note, that we only need to invalidate the address, if the
449 				// accessed flags was set, since only then the entry could have
450 				// been in any TLB.
451 				InvalidatePage(start);
452 			}
453 
454 			if (area->cache_type != CACHE_TYPE_DEVICE) {
455 				// get the page
456 				vm_page* page = vm_lookup_page(
457 					(oldEntry & ARM_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
458 				ASSERT(page != NULL);
459 
460 				DEBUG_PAGE_ACCESS_START(page);
461 
462 				// transfer the accessed/dirty flags to the page
463 				if ((oldEntry & ARM_MMU_L2_FLAG_AP0) != 0)
464 					page->accessed = true;
465 				if ((oldEntry & ARM_MMU_L2_FLAG_AP2) == 0)
466 					page->modified = true;
467 
468 				// remove the mapping object/decrement the wired_count of the
469 				// page
470 				if (area->wiring == B_NO_LOCK) {
471 					vm_page_mapping* mapping = NULL;
472 					vm_page_mappings::Iterator iterator
473 						= page->mappings.GetIterator();
474 					while ((mapping = iterator.Next()) != NULL) {
475 						if (mapping->area == area)
476 							break;
477 					}
478 
479 					ASSERT(mapping != NULL);
480 
481 					area->mappings.Remove(mapping);
482 					page->mappings.Remove(mapping);
483 					queue.Add(mapping);
484 				} else
485 					page->DecrementWiredCount();
486 
487 				if (!page->IsMapped()) {
488 					atomic_add(&gMappedPagesCount, -1);
489 
490 					if (updatePageQueue) {
491 						if (page->Cache()->temporary)
492 							vm_page_set_state(page, PAGE_STATE_INACTIVE);
493 						else if (page->modified)
494 							vm_page_set_state(page, PAGE_STATE_MODIFIED);
495 						else
496 							vm_page_set_state(page, PAGE_STATE_CACHED);
497 					}
498 				}
499 
500 				DEBUG_PAGE_ACCESS_END(page);
501 			}
502 		}
503 
504 		Flush();
505 			// flush explicitly, since we directly use the lock
506 	} while (start != 0 && start < end);
507 
508 	// TODO: As in UnmapPage() we can lose page dirty flags here. ATM it's not
509 	// really critical here, as in all cases this method is used, the unmapped
510 	// area range is unmapped for good (resized/cut) and the pages will likely
511 	// be freed.
512 
513 	locker.Unlock();
514 
515 	// free removed mappings
516 	bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
517 	uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
518 		| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
519 	while (vm_page_mapping* mapping = queue.RemoveHead())
520 		vm_free_page_mapping(mapping->page->physical_page_number, mapping, freeFlags);
521 }
522 
523 
524 void
525 ARMVMTranslationMap32Bit::UnmapArea(VMArea* area, bool deletingAddressSpace,
526 	bool ignoreTopCachePageFlags)
527 {
528 	if (area->cache_type == CACHE_TYPE_DEVICE || area->wiring != B_NO_LOCK) {
529 		ARMVMTranslationMap32Bit::UnmapPages(area, area->Base(), area->Size(),
530 			true);
531 		return;
532 	}
533 
534 	bool unmapPages = !deletingAddressSpace || !ignoreTopCachePageFlags;
535 
536 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
537 
538 	RecursiveLocker locker(fLock);
539 
540 	VMAreaMappings mappings;
541 	mappings.MoveFrom(&area->mappings);
542 
543 	for (VMAreaMappings::Iterator it = mappings.GetIterator();
544 			vm_page_mapping* mapping = it.Next();) {
545 		vm_page* page = mapping->page;
546 		page->mappings.Remove(mapping);
547 
548 		VMCache* cache = page->Cache();
549 
550 		bool pageFullyUnmapped = false;
551 		if (!page->IsMapped()) {
552 			atomic_add(&gMappedPagesCount, -1);
553 			pageFullyUnmapped = true;
554 		}
555 
556 		if (unmapPages || cache != area->cache) {
557 			addr_t address = area->Base()
558 				+ ((page->cache_offset * B_PAGE_SIZE) - area->cache_offset);
559 
560 			int index = VADDR_TO_PDENT(address);
561 			if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
562 				panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
563 					"has no page dir entry", page, area, address);
564 				continue;
565 			}
566 
567 			ThreadCPUPinner pinner(thread_get_current_thread());
568 
569 			page_table_entry* pt
570 				= (page_table_entry*)fPageMapper->GetPageTableAt(
571 					pd[index] & ARM_PDE_ADDRESS_MASK);
572 			page_table_entry oldEntry
573 				= ARMPagingMethod32Bit::ClearPageTableEntry(
574 					&pt[VADDR_TO_PTENT(address)]);
575 
576 			pinner.Unlock();
577 
578 			if ((oldEntry & ARM_PTE_TYPE_MASK) == 0) {
579 				panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
580 					"has no page table entry", page, area, address);
581 				continue;
582 			}
583 
584 			// transfer the accessed/dirty flags to the page and invalidate
585 			// the mapping, if necessary
586 			if ((oldEntry & ARM_MMU_L2_FLAG_AP0) != 0) {
587 				page->accessed = true;
588 
589 				if (!deletingAddressSpace)
590 					InvalidatePage(address);
591 			}
592 
593 			if (false /*(oldEntry & ARM_PTE_DIRTY) != 0*/)
594 				page->modified = true;
595 
596 			if (pageFullyUnmapped) {
597 				DEBUG_PAGE_ACCESS_START(page);
598 
599 				if (cache->temporary)
600 					vm_page_set_state(page, PAGE_STATE_INACTIVE);
601 				else if (page->modified)
602 					vm_page_set_state(page, PAGE_STATE_MODIFIED);
603 				else
604 					vm_page_set_state(page, PAGE_STATE_CACHED);
605 
606 				DEBUG_PAGE_ACCESS_END(page);
607 			}
608 		}
609 
610 		fMapCount--;
611 	}
612 
613 	Flush();
614 		// flush explicitely, since we directly use the lock
615 
616 	locker.Unlock();
617 
618 	bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
619 	uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
620 		| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
621 	while (vm_page_mapping* mapping = mappings.RemoveHead())
622 		vm_free_page_mapping(mapping->page->physical_page_number, mapping, freeFlags);
623 }
624 
625 
626 status_t
627 ARMVMTranslationMap32Bit::Query(addr_t va, phys_addr_t *_physical,
628 	uint32 *_flags)
629 {
630 	// default the flags to not present
631 	*_flags = 0;
632 	*_physical = 0;
633 
634 	int index = VADDR_TO_PDENT(va);
635 	page_directory_entry *pd = fPagingStructures->pgdir_virt;
636 	if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
637 		// no pagetable here
638 		return B_OK;
639 	}
640 
641 	Thread* thread = thread_get_current_thread();
642 	ThreadCPUPinner pinner(thread);
643 
644 	page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
645 		pd[index] & ARM_PDE_ADDRESS_MASK);
646 	page_table_entry entry = pt[VADDR_TO_PTENT(va)];
647 
648 	if ((entry & ARM_PTE_TYPE_MASK) != 0)
649 		*_physical = (entry & ARM_PTE_ADDRESS_MASK);
650 
651 	*_flags = ARMPagingMethod32Bit::PageTableEntryFlagsToAttributes(entry);
652 	if (*_physical != 0)
653 		*_flags |= PAGE_PRESENT;
654 
655 	pinner.Unlock();
656 
657 	TRACE("query_tmap: returning pa 0x%lx for va 0x%lx\n", *_physical, va);
658 
659 	return B_OK;
660 }
661 
662 
663 status_t
664 ARMVMTranslationMap32Bit::QueryInterrupt(addr_t va, phys_addr_t *_physical,
665 	uint32 *_flags)
666 {
667 	*_flags = 0;
668 	*_physical = 0;
669 
670 	int index = VADDR_TO_PDENT(va);
671 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
672 	if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
673 		// no pagetable here
674 		return B_OK;
675 	}
676 
677 	// map page table entry
678 	page_table_entry* pt = (page_table_entry*)ARMPagingMethod32Bit::Method()
679 		->PhysicalPageMapper()->InterruptGetPageTableAt(
680 			pd[index] & ARM_PDE_ADDRESS_MASK);
681 	page_table_entry entry = pt[VADDR_TO_PTENT(va)];
682 
683 	if ((entry & ARM_PTE_TYPE_MASK) != 0)
684 		*_physical = (entry & ARM_PTE_ADDRESS_MASK);
685 
686 	*_flags = ARMPagingMethod32Bit::PageTableEntryFlagsToAttributes(entry);
687 	if (*_physical != 0)
688 		*_flags |= PAGE_PRESENT;
689 
690 	return B_OK;
691 }
692 
693 
694 status_t
695 ARMVMTranslationMap32Bit::Protect(addr_t start, addr_t end, uint32 attributes,
696 	uint32 memoryType)
697 {
698 	start = ROUNDDOWN(start, B_PAGE_SIZE);
699 	if (start >= end)
700 		return B_OK;
701 
702 	TRACE("protect_tmap: pages 0x%lx to 0x%lx, attributes %lx\n", start, end,
703 		attributes);
704 
705 	uint32 newProtectionFlags = ARMPagingMethod32Bit::AttributesToPageTableEntryFlags(attributes);
706 	uint32 newMemoryTypeFlags = ARMPagingMethod32Bit::MemoryTypeToPageTableEntryFlags(memoryType);
707 
708 	page_directory_entry *pd = fPagingStructures->pgdir_virt;
709 
710 	do {
711 		int index = VADDR_TO_PDENT(start);
712 		if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
713 			// no page table here, move the start up to access the next page
714 			// table
715 			start = ROUNDUP(start + 1, kPageTableAlignment);
716 			continue;
717 		}
718 
719 		Thread* thread = thread_get_current_thread();
720 		ThreadCPUPinner pinner(thread);
721 
722 		page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
723 			pd[index] & ARM_PDE_ADDRESS_MASK);
724 
725 		for (index = VADDR_TO_PTENT(start); index < 256 && start < end;
726 				index++, start += B_PAGE_SIZE) {
727 			page_table_entry entry = pt[index];
728 			if ((entry & ARM_PTE_TYPE_MASK) == 0) {
729 				// page mapping not valid
730 				continue;
731 			}
732 
733 			TRACE("protect_tmap: protect page 0x%lx\n", start);
734 
735 			// set the new protection flags -- we want to do that atomically,
736 			// without changing the accessed or dirty flag
737 			page_table_entry oldEntry;
738 			while (true) {
739 				oldEntry = ARMPagingMethod32Bit::TestAndSetPageTableEntry(
740 					&pt[index],
741 					(entry & ~(ARM_PTE_PROTECTION_MASK
742 							| ARM_PTE_MEMORY_TYPE_MASK))
743 						| newProtectionFlags | newMemoryTypeFlags,
744 					entry);
745 				if (oldEntry == entry)
746 					break;
747 				entry = oldEntry;
748 			}
749 
750 			if ((oldEntry & ARM_MMU_L2_FLAG_AP0) != 0) {
751 				// Note, that we only need to invalidate the address, if the
752 				// accessed flag was set, since only then the entry could have
753 				// been in any TLB.
754 				InvalidatePage(start);
755 			}
756 		}
757 	} while (start != 0 && start < end);
758 
759 	return B_OK;
760 }
761 
762 
763 status_t
764 ARMVMTranslationMap32Bit::SetFlags(addr_t virtualAddress, uint32 flags)
765 {
766 	int index = VADDR_TO_PDENT(virtualAddress);
767 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
768 	if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
769 		// no pagetable here
770 		return B_OK;
771 	}
772 
773 	uint32 flagsToSet = (flags & PAGE_ACCESSED) ? ARM_MMU_L2_FLAG_AP0 : 0;
774 	uint32 flagsToClear = (flags & PAGE_MODIFIED) ? ARM_MMU_L2_FLAG_AP2 : 0;
775 
776 	page_table_entry* pt = (page_table_entry*)ARMPagingMethod32Bit::Method()
777 		->PhysicalPageMapper()->InterruptGetPageTableAt(
778 			pd[index] & ARM_PDE_ADDRESS_MASK);
779 	index = VADDR_TO_PTENT(virtualAddress);
780 
781 	ARMPagingMethod32Bit::SetAndClearPageTableEntryFlags(&pt[index], flagsToSet, flagsToClear);
782 
783 	// normally we would call InvalidatePage() here and then Flush() later when all updates are done
784 	// however, as this scenario happens only in case of Modified flag handling,
785 	// we can directly call TLBIMVAIS from here as we need to update only a single TLB entry
786 	if (flagsToClear)
787 		arch_cpu_invalidate_TLB_page(virtualAddress);
788 
789 	return B_OK;
790 }
791 
792 
793 status_t
794 ARMVMTranslationMap32Bit::ClearFlags(addr_t va, uint32 flags)
795 {
796 	int index = VADDR_TO_PDENT(va);
797 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
798 	if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
799 		// no pagetable here
800 		return B_OK;
801 	}
802 
803 	uint32 flagsToClear = (flags & PAGE_ACCESSED) ? ARM_MMU_L2_FLAG_AP0 : 0;
804 	uint32 flagsToSet = (flags & PAGE_MODIFIED) ? ARM_MMU_L2_FLAG_AP2 : 0;
805 
806 	Thread* thread = thread_get_current_thread();
807 	ThreadCPUPinner pinner(thread);
808 
809 	page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
810 		pd[index] & ARM_PDE_ADDRESS_MASK);
811 	index = VADDR_TO_PTENT(va);
812 
813 	// adjust the flags we've been requested to set/clear
814 	page_table_entry oldEntry
815 		= ARMPagingMethod32Bit::SetAndClearPageTableEntryFlags(&pt[index],
816 			flagsToSet, flagsToClear);
817 
818 	pinner.Unlock();
819 
820 	if (((oldEntry & flagsToClear) != 0) || ((oldEntry & flagsToSet) == 0))
821 		InvalidatePage(va);
822 
823 	return B_OK;
824 }
825 
826 
827 bool
828 ARMVMTranslationMap32Bit::ClearAccessedAndModified(VMArea* area, addr_t address,
829 	bool unmapIfUnaccessed, bool& _modified)
830 {
831 	ASSERT(address % B_PAGE_SIZE == 0);
832 
833 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
834 
835 	TRACE("ARMVMTranslationMap32Bit::ClearAccessedAndModified(%#" B_PRIxADDR
836 		")\n", address);
837 
838 	RecursiveLocker locker(fLock);
839 
840 	int index = VADDR_TO_PDENT(address);
841 	if ((pd[index] & ARM_PDE_TYPE_MASK) == 0)
842 		return false;
843 
844 	ThreadCPUPinner pinner(thread_get_current_thread());
845 
846 	page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
847 		pd[index] & ARM_PDE_ADDRESS_MASK);
848 
849 	index = VADDR_TO_PTENT(address);
850 
851 	// perform the deed
852 	page_table_entry oldEntry;
853 
854 	if (unmapIfUnaccessed) {
855 		while (true) {
856 			oldEntry = pt[index];
857 			if ((oldEntry & ARM_PTE_TYPE_MASK) == 0) {
858 				// page mapping not valid
859 				return false;
860 			}
861 			if (oldEntry & ARM_MMU_L2_FLAG_AP0) {
862 				// page was accessed -- just clear the flags
863 				oldEntry = ARMPagingMethod32Bit::SetAndClearPageTableEntryFlags(
864 					&pt[index], ARM_MMU_L2_FLAG_AP2, ARM_MMU_L2_FLAG_AP0);
865 				break;
866 			}
867 
868 			// page hasn't been accessed -- unmap it
869 			if (ARMPagingMethod32Bit::TestAndSetPageTableEntry(&pt[index], 0,
870 					oldEntry) == oldEntry) {
871 				break;
872 			}
873 
874 			// something changed -- check again
875 		}
876 	} else {
877 		oldEntry = ARMPagingMethod32Bit::SetAndClearPageTableEntryFlags(&pt[index],
878 			ARM_MMU_L2_FLAG_AP2, ARM_MMU_L2_FLAG_AP0);
879 	}
880 
881 	pinner.Unlock();
882 
883 	_modified = (oldEntry & ARM_MMU_L2_FLAG_AP2) == 0;
884 
885 	if ((oldEntry & ARM_MMU_L2_FLAG_AP0) != 0) {
886 		// Note, that we only need to invalidate the address, if the
887 		// accessed flags was set, since only then the entry could have been
888 		// in any TLB.
889 		InvalidatePage(address);
890 
891 		Flush();
892 
893 		return true;
894 	}
895 
896 	if (!unmapIfUnaccessed)
897 		return false;
898 
899 	// We have unmapped the address. Do the "high level" stuff.
900 
901 	fMapCount--;
902 
903 	locker.Detach();
904 		// UnaccessedPageUnmapped() will unlock for us
905 
906 	UnaccessedPageUnmapped(area,
907 		(oldEntry & ARM_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
908 
909 	return false;
910 }
911 
912 
913 ARMPagingStructures*
914 ARMVMTranslationMap32Bit::PagingStructures() const
915 {
916 	return fPagingStructures;
917 }
918