xref: /haiku/src/system/kernel/arch/arm/paging/32bit/ARMVMTranslationMap32Bit.cpp (revision 6f80a9801fedbe7355c4360bd204ba746ec3ec2d)
1 /*
2  * Copyright 2010, Ithamar R. Adema, ithamar.adema@team-embedded.nl
3  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
4  * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
5  * Distributed under the terms of the MIT License.
6  *
7  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
8  * Distributed under the terms of the NewOS License.
9  */
10 
11 
12 #include "paging/32bit/ARMVMTranslationMap32Bit.h"
13 
14 #include <stdlib.h>
15 #include <string.h>
16 
17 #include <int.h>
18 #include <thread.h>
19 #include <slab/Slab.h>
20 #include <smp.h>
21 #include <util/AutoLock.h>
22 #include <util/ThreadAutoLock.h>
23 #include <util/queue.h>
24 #include <vm/vm_page.h>
25 #include <vm/vm_priv.h>
26 #include <vm/VMAddressSpace.h>
27 #include <vm/VMCache.h>
28 
29 #include "paging/32bit/ARMPagingMethod32Bit.h"
30 #include "paging/32bit/ARMPagingStructures32Bit.h"
31 #include "paging/arm_physical_page_mapper.h"
32 
33 
34 //#define TRACE_ARM_VM_TRANSLATION_MAP_32_BIT
35 #ifdef TRACE_ARM_VM_TRANSLATION_MAP_32_BIT
36 #	define TRACE(x...) dprintf(x)
37 #else
38 #	define TRACE(x...) ;
39 #endif
40 
41 
42 #define PAGEDIR_SIZE	ARM_MMU_L1_TABLE_SIZE
43 #define PAGEDIR_ALIGN	(4 * B_PAGE_SIZE)
44 
45 
46 ARMVMTranslationMap32Bit::ARMVMTranslationMap32Bit()
47 	:
48 	fPagingStructures(NULL)
49 {
50 }
51 
52 
53 ARMVMTranslationMap32Bit::~ARMVMTranslationMap32Bit()
54 {
55 	if (fPagingStructures == NULL)
56 		return;
57 
58 	if (fPageMapper != NULL)
59 		fPageMapper->Delete();
60 
61 	if (fPagingStructures->pgdir_virt != NULL) {
62 		// cycle through and free all of the user space pgtables
63 		for (uint32 i = VADDR_TO_PDENT(USER_BASE);
64 				i <= VADDR_TO_PDENT(USER_BASE + (USER_SIZE - 1)); i++) {
65 			if ((fPagingStructures->pgdir_virt[i] & ARM_PDE_TYPE_MASK) != 0) {
66 				addr_t address = fPagingStructures->pgdir_virt[i]
67 					& ARM_PDE_ADDRESS_MASK;
68 				vm_page* page = vm_lookup_page(address / B_PAGE_SIZE);
69 				if (!page)
70 					panic("destroy_tmap: didn't find pgtable page\n");
71 				DEBUG_PAGE_ACCESS_START(page);
72 				vm_page_set_state(page, PAGE_STATE_FREE);
73 			}
74 		}
75 	}
76 
77 	fPagingStructures->RemoveReference();
78 }
79 
80 
81 status_t
82 ARMVMTranslationMap32Bit::Init(bool kernel)
83 {
84 	TRACE("ARMVMTranslationMap32Bit::Init()\n");
85 
86 	ARMVMTranslationMap::Init(kernel);
87 
88 	fPagingStructures = new(std::nothrow) ARMPagingStructures32Bit;
89 	if (fPagingStructures == NULL)
90 		return B_NO_MEMORY;
91 
92 	ARMPagingMethod32Bit* method = ARMPagingMethod32Bit::Method();
93 
94 	if (!kernel) {
95 		// user
96 		// allocate a physical page mapper
97 		status_t error = method->PhysicalPageMapper()
98 			->CreateTranslationMapPhysicalPageMapper(&fPageMapper);
99 		if (error != B_OK)
100 			return error;
101 
102 		// allocate the page directory
103 		page_directory_entry* virtualPageDir = (page_directory_entry*)memalign(
104 			PAGEDIR_ALIGN, PAGEDIR_SIZE);
105 		if (virtualPageDir == NULL)
106 			return B_NO_MEMORY;
107 
108 		// look up the page directory's physical address
109 		phys_addr_t physicalPageDir;
110 		vm_get_page_mapping(VMAddressSpace::KernelID(),
111 			(addr_t)virtualPageDir, &physicalPageDir);
112 
113 		fPagingStructures->Init(virtualPageDir, physicalPageDir,
114 			method->KernelVirtualPageDirectory());
115 	} else {
116 		// kernel
117 		// get the physical page mapper
118 		fPageMapper = method->KernelPhysicalPageMapper();
119 
120 		// we already know the kernel pgdir mapping
121 		fPagingStructures->Init(method->KernelVirtualPageDirectory(),
122 			method->KernelPhysicalPageDirectory(), NULL);
123 	}
124 
125 	return B_OK;
126 }
127 
128 
129 size_t
130 ARMVMTranslationMap32Bit::MaxPagesNeededToMap(addr_t start, addr_t end) const
131 {
132 	// If start == 0, the actual base address is not yet known to the caller and
133 	// we shall assume the worst case.
134 	if (start == 0) {
135 		// offset the range so it has the worst possible alignment
136 		start = 1023 * B_PAGE_SIZE;
137 		end += 1023 * B_PAGE_SIZE;
138 	}
139 
140 	return VADDR_TO_PDENT(end) + 1 - VADDR_TO_PDENT(start);
141 }
142 
143 
144 status_t
145 ARMVMTranslationMap32Bit::Map(addr_t va, phys_addr_t pa, uint32 attributes,
146 	uint32 memoryType, vm_page_reservation* reservation)
147 {
148 	TRACE("map_tmap: entry pa 0x%lx va 0x%lx\n", pa, va);
149 
150 /*
151 	dprintf("pgdir at 0x%x\n", pgdir);
152 	dprintf("index is %d\n", va / B_PAGE_SIZE / 1024);
153 	dprintf("final at 0x%x\n", &pgdir[va / B_PAGE_SIZE / 1024]);
154 	dprintf("value is 0x%x\n", *(int *)&pgdir[va / B_PAGE_SIZE / 1024]);
155 	dprintf("present bit is %d\n", pgdir[va / B_PAGE_SIZE / 1024].present);
156 	dprintf("addr is %d\n", pgdir[va / B_PAGE_SIZE / 1024].addr);
157 */
158 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
159 
160 	// check to see if a page table exists for this range
161 	uint32 index = VADDR_TO_PDENT(va);
162 	if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
163 		phys_addr_t pgtable;
164 		vm_page *page;
165 
166 		// we need to allocate a pgtable
167 		page = vm_page_allocate_page(reservation,
168 			PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
169 
170 		DEBUG_PAGE_ACCESS_END(page);
171 
172 		pgtable = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;
173 
174 		TRACE("map_tmap: asked for free page for pgtable. 0x%lx\n", pgtable);
175 
176 		// put it in the pgdir
177 		ARMPagingMethod32Bit::PutPageTableInPageDir(&pd[index], pgtable,
178 			attributes
179 				| ((attributes & B_USER_PROTECTION) != 0
180 						? B_WRITE_AREA : B_KERNEL_WRITE_AREA));
181 
182 		// update any other page directories, if it maps kernel space
183 		if (index >= FIRST_KERNEL_PGDIR_ENT
184 			&& index < (FIRST_KERNEL_PGDIR_ENT + NUM_KERNEL_PGDIR_ENTS)) {
185 			ARMPagingStructures32Bit::UpdateAllPageDirs(index, pd[index]);
186 		}
187 
188 		fMapCount++;
189 	}
190 
191 	// now, fill in the pentry
192 	Thread* thread = thread_get_current_thread();
193 	ThreadCPUPinner pinner(thread);
194 
195 	page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
196 		pd[index] & ARM_PDE_ADDRESS_MASK);
197 	index = VADDR_TO_PTENT(va);
198 
199 	ASSERT_PRINT((pt[index] & ARM_PTE_TYPE_MASK) == 0,
200 		"virtual address: %#" B_PRIxADDR ", existing pte: %#" B_PRIx32, va,
201 		pt[index]);
202 
203 	ARMPagingMethod32Bit::PutPageTableEntryInTable(&pt[index], pa, attributes,
204 		memoryType, fIsKernelMap);
205 
206 	pinner.Unlock();
207 
208 	// Note: We don't need to invalidate the TLB for this address, as previously
209 	// the entry was not present and the TLB doesn't cache those entries.
210 
211 	fMapCount++;
212 
213 	return 0;
214 }
215 
216 
217 status_t
218 ARMVMTranslationMap32Bit::Unmap(addr_t start, addr_t end)
219 {
220 	start = ROUNDDOWN(start, B_PAGE_SIZE);
221 	if (start >= end)
222 		return B_OK;
223 
224 	TRACE("unmap_tmap: asked to free pages 0x%lx to 0x%lx\n", start, end);
225 
226 	page_directory_entry *pd = fPagingStructures->pgdir_virt;
227 
228 	do {
229 		int index = VADDR_TO_PDENT(start);
230 		if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
231 			// no page table here, move the start up to access the next page
232 			// table
233 			start = ROUNDUP(start + 1, kPageTableAlignment);
234 			continue;
235 		}
236 
237 		Thread* thread = thread_get_current_thread();
238 		ThreadCPUPinner pinner(thread);
239 
240 		page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
241 			pd[index] & ARM_PDE_ADDRESS_MASK);
242 
243 		for (index = VADDR_TO_PTENT(start); (index < 256) && (start < end);
244 				index++, start += B_PAGE_SIZE) {
245 			if ((pt[index] & ARM_PTE_TYPE_MASK) == 0) {
246 				// page mapping not valid
247 				continue;
248 			}
249 
250 			TRACE("unmap_tmap: removing page 0x%lx\n", start);
251 
252 			page_table_entry oldEntry
253 				= ARMPagingMethod32Bit::ClearPageTableEntryFlags(&pt[index],
254 					ARM_PTE_TYPE_MASK);
255 			fMapCount--;
256 
257 			if (true /* (oldEntry & ARM_PTE_ACCESSED) != 0*/) {
258 				// Note, that we only need to invalidate the address, if the
259 				// accessed flags was set, since only then the entry could have
260 				// been in any TLB.
261 				InvalidatePage(start);
262 			}
263 		}
264 	} while (start != 0 && start < end);
265 
266 	return B_OK;
267 }
268 
269 
270 status_t
271 ARMVMTranslationMap32Bit::DebugMarkRangePresent(addr_t start, addr_t end,
272 	bool markPresent)
273 {
274 #if 0
275 	start = ROUNDDOWN(start, B_PAGE_SIZE);
276 	if (start >= end)
277 		return B_OK;
278 
279 	page_directory_entry *pd = fPagingStructures->pgdir_virt;
280 
281 	do {
282 		int index = VADDR_TO_PDENT(start);
283 		if ((pd[index] & X86_PDE_PRESENT) == 0) {
284 			// no page table here, move the start up to access the next page
285 			// table
286 			start = ROUNDUP(start + 1, kPageTableAlignment);
287 			continue;
288 		}
289 
290 		Thread* thread = thread_get_current_thread();
291 		ThreadCPUPinner pinner(thread);
292 
293 		page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
294 			pd[index] & X86_PDE_ADDRESS_MASK);
295 
296 		for (index = VADDR_TO_PTENT(start); (index < 1024) && (start < end);
297 				index++, start += B_PAGE_SIZE) {
298 			if ((pt[index] & X86_PTE_PRESENT) == 0) {
299 				if (!markPresent)
300 					continue;
301 
302 				X86PagingMethod32Bit::SetPageTableEntryFlags(&pt[index],
303 					X86_PTE_PRESENT);
304 			} else {
305 				if (markPresent)
306 					continue;
307 
308 				page_table_entry oldEntry
309 					= X86PagingMethod32Bit::ClearPageTableEntryFlags(&pt[index],
310 						X86_PTE_PRESENT);
311 
312 				if ((oldEntry & X86_PTE_ACCESSED) != 0) {
313 					// Note, that we only need to invalidate the address, if the
314 					// accessed flags was set, since only then the entry could
315 					// have been in any TLB.
316 					InvalidatePage(start);
317 				}
318 			}
319 		}
320 	} while (start != 0 && start < end);
321 #endif
322 	return B_OK;
323 }
324 
325 
326 /*!	Caller must have locked the cache of the page to be unmapped.
327 	This object shouldn't be locked.
328 */
329 status_t
330 ARMVMTranslationMap32Bit::UnmapPage(VMArea* area, addr_t address,
331 	bool updatePageQueue)
332 {
333 	ASSERT(address % B_PAGE_SIZE == 0);
334 
335 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
336 
337 	TRACE("ARMVMTranslationMap32Bit::UnmapPage(%#" B_PRIxADDR ")\n", address);
338 
339 	RecursiveLocker locker(fLock);
340 
341 	int index = VADDR_TO_PDENT(address);
342 	if ((pd[index] & ARM_PDE_TYPE_MASK) == 0)
343 		return B_ENTRY_NOT_FOUND;
344 
345 	ThreadCPUPinner pinner(thread_get_current_thread());
346 
347 	page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
348 		pd[index] & ARM_PDE_ADDRESS_MASK);
349 
350 	index = VADDR_TO_PTENT(address);
351 	page_table_entry oldEntry = ARMPagingMethod32Bit::ClearPageTableEntry(
352 		&pt[index]);
353 
354 	pinner.Unlock();
355 
356 	if ((oldEntry & ARM_PTE_TYPE_MASK) == 0) {
357 		// page mapping not valid
358 		return B_ENTRY_NOT_FOUND;
359 	}
360 
361 	fMapCount--;
362 
363 
364 	if (true /*(oldEntry & ARM_PTE_ACCESSED) != 0*/) { // XXX IRA
365 		// Note, that we only need to invalidate the address, if the
366 		// accessed flags was set, since only then the entry could have been
367 		// in any TLB.
368 		InvalidatePage(address);
369 		Flush();
370 
371 		// NOTE: Between clearing the page table entry and Flush() other
372 		// processors (actually even this processor with another thread of the
373 		// same team) could still access the page in question via their cached
374 		// entry. We can obviously lose a modified flag in this case, with the
375 		// effect that the page looks unmodified (and might thus be recycled),
376 		// but is actually modified.
377 		// In most cases this is harmless, but for vm_remove_all_page_mappings()
378 		// this is actually a problem.
379 		// Interestingly FreeBSD seems to ignore this problem as well
380 		// (cf. pmap_remove_all()), unless I've missed something.
381 	}
382 
383 	locker.Detach();
384 		// PageUnmapped() will unlock for us
385 
386 	PageUnmapped(area, (oldEntry & ARM_PTE_ADDRESS_MASK) / B_PAGE_SIZE,
387 		true /*(oldEntry & ARM_PTE_ACCESSED) != 0*/, true /*(oldEntry & ARM_PTE_DIRTY) != 0*/,
388 		updatePageQueue);
389 
390 	return B_OK;
391 }
392 
393 
394 void
395 ARMVMTranslationMap32Bit::UnmapPages(VMArea* area, addr_t base, size_t size,
396 	bool updatePageQueue)
397 {
398 	if (size == 0)
399 		return;
400 
401 	addr_t start = base;
402 	addr_t end = base + size - 1;
403 
404 	TRACE("ARMVMTranslationMap32Bit::UnmapPages(%p, %#" B_PRIxADDR ", %#"
405 		B_PRIxADDR ")\n", area, start, end);
406 
407 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
408 
409 	VMAreaMappings queue;
410 
411 	RecursiveLocker locker(fLock);
412 
413 	do {
414 		int index = VADDR_TO_PDENT(start);
415 		if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
416 			// no page table here, move the start up to access the next page
417 			// table
418 			start = ROUNDUP(start + 1, kPageTableAlignment);
419 			continue;
420 		}
421 
422 		Thread* thread = thread_get_current_thread();
423 		ThreadCPUPinner pinner(thread);
424 
425 		page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
426 			pd[index] & ARM_PDE_ADDRESS_MASK);
427 
428 		for (index = VADDR_TO_PTENT(start); (index < 256) && (start < end);
429 				index++, start += B_PAGE_SIZE) {
430 			page_table_entry oldEntry
431 				= ARMPagingMethod32Bit::ClearPageTableEntry(&pt[index]);
432 			if ((oldEntry & ARM_PTE_TYPE_MASK) == 0)
433 				continue;
434 
435 			fMapCount--;
436 
437 			if (true /*(oldEntry & ARM_PTE_ACCESSED) != 0*/) { // XXX IRA
438 				// Note, that we only need to invalidate the address, if the
439 				// accessed flags was set, since only then the entry could have
440 				// been in any TLB.
441 				InvalidatePage(start);
442 			}
443 
444 			if (area->cache_type != CACHE_TYPE_DEVICE) {
445 				// get the page
446 				vm_page* page = vm_lookup_page(
447 					(oldEntry & ARM_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
448 				ASSERT(page != NULL);
449 
450 				DEBUG_PAGE_ACCESS_START(page);
451 
452 				// transfer the accessed/dirty flags to the page
453 				if (/*(oldEntry & ARM_PTE_ACCESSED) != 0*/ true) // XXX IRA
454 					page->accessed = true;
455 				if (/*(oldEntry & ARM_PTE_DIRTY) != 0 */ true)
456 					page->modified = true;
457 
458 				// remove the mapping object/decrement the wired_count of the
459 				// page
460 				if (area->wiring == B_NO_LOCK) {
461 					vm_page_mapping* mapping = NULL;
462 					vm_page_mappings::Iterator iterator
463 						= page->mappings.GetIterator();
464 					while ((mapping = iterator.Next()) != NULL) {
465 						if (mapping->area == area)
466 							break;
467 					}
468 
469 					ASSERT(mapping != NULL);
470 
471 					area->mappings.Remove(mapping);
472 					page->mappings.Remove(mapping);
473 					queue.Add(mapping);
474 				} else
475 					page->DecrementWiredCount();
476 
477 				if (!page->IsMapped()) {
478 					atomic_add(&gMappedPagesCount, -1);
479 
480 					if (updatePageQueue) {
481 						if (page->Cache()->temporary)
482 							vm_page_set_state(page, PAGE_STATE_INACTIVE);
483 						else if (page->modified)
484 							vm_page_set_state(page, PAGE_STATE_MODIFIED);
485 						else
486 							vm_page_set_state(page, PAGE_STATE_CACHED);
487 					}
488 				}
489 
490 				DEBUG_PAGE_ACCESS_END(page);
491 			}
492 		}
493 
494 		Flush();
495 			// flush explicitly, since we directly use the lock
496 	} while (start != 0 && start < end);
497 
498 	// TODO: As in UnmapPage() we can lose page dirty flags here. ATM it's not
499 	// really critical here, as in all cases this method is used, the unmapped
500 	// area range is unmapped for good (resized/cut) and the pages will likely
501 	// be freed.
502 
503 	locker.Unlock();
504 
505 	// free removed mappings
506 	bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
507 	uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
508 		| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
509 	while (vm_page_mapping* mapping = queue.RemoveHead())
510 		object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
511 }
512 
513 
514 void
515 ARMVMTranslationMap32Bit::UnmapArea(VMArea* area, bool deletingAddressSpace,
516 	bool ignoreTopCachePageFlags)
517 {
518 	if (area->cache_type == CACHE_TYPE_DEVICE || area->wiring != B_NO_LOCK) {
519 		ARMVMTranslationMap32Bit::UnmapPages(area, area->Base(), area->Size(),
520 			true);
521 		return;
522 	}
523 
524 	bool unmapPages = !deletingAddressSpace || !ignoreTopCachePageFlags;
525 
526 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
527 
528 	RecursiveLocker locker(fLock);
529 
530 	VMAreaMappings mappings;
531 	mappings.MoveFrom(&area->mappings);
532 
533 	for (VMAreaMappings::Iterator it = mappings.GetIterator();
534 			vm_page_mapping* mapping = it.Next();) {
535 		vm_page* page = mapping->page;
536 		page->mappings.Remove(mapping);
537 
538 		VMCache* cache = page->Cache();
539 
540 		bool pageFullyUnmapped = false;
541 		if (!page->IsMapped()) {
542 			atomic_add(&gMappedPagesCount, -1);
543 			pageFullyUnmapped = true;
544 		}
545 
546 		if (unmapPages || cache != area->cache) {
547 			addr_t address = area->Base()
548 				+ ((page->cache_offset * B_PAGE_SIZE) - area->cache_offset);
549 
550 			int index = VADDR_TO_PDENT(address);
551 			if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
552 				panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
553 					"has no page dir entry", page, area, address);
554 				continue;
555 			}
556 
557 			ThreadCPUPinner pinner(thread_get_current_thread());
558 
559 			page_table_entry* pt
560 				= (page_table_entry*)fPageMapper->GetPageTableAt(
561 					pd[index] & ARM_PDE_ADDRESS_MASK);
562 			page_table_entry oldEntry
563 				= ARMPagingMethod32Bit::ClearPageTableEntry(
564 					&pt[VADDR_TO_PTENT(address)]);
565 
566 			pinner.Unlock();
567 
568 			if ((oldEntry & ARM_PTE_TYPE_MASK) == 0) {
569 				panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
570 					"has no page table entry", page, area, address);
571 				continue;
572 			}
573 
574 			// transfer the accessed/dirty flags to the page and invalidate
575 			// the mapping, if necessary
576 			if (true /*(oldEntry & ARM_PTE_ACCESSED) != 0*/) { // XXX IRA
577 				page->accessed = true;
578 
579 				if (!deletingAddressSpace)
580 					InvalidatePage(address);
581 			}
582 
583 			if (true /*(oldEntry & ARM_PTE_DIRTY) != 0*/)
584 				page->modified = true;
585 
586 			if (pageFullyUnmapped) {
587 				DEBUG_PAGE_ACCESS_START(page);
588 
589 				if (cache->temporary)
590 					vm_page_set_state(page, PAGE_STATE_INACTIVE);
591 				else if (page->modified)
592 					vm_page_set_state(page, PAGE_STATE_MODIFIED);
593 				else
594 					vm_page_set_state(page, PAGE_STATE_CACHED);
595 
596 				DEBUG_PAGE_ACCESS_END(page);
597 			}
598 		}
599 
600 		fMapCount--;
601 	}
602 
603 	Flush();
604 		// flush explicitely, since we directly use the lock
605 
606 	locker.Unlock();
607 
608 	bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
609 	uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
610 		| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
611 	while (vm_page_mapping* mapping = mappings.RemoveHead())
612 		object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
613 }
614 
615 
616 status_t
617 ARMVMTranslationMap32Bit::Query(addr_t va, phys_addr_t *_physical,
618 	uint32 *_flags)
619 {
620 	// default the flags to not present
621 	*_flags = 0;
622 	*_physical = 0;
623 
624 	int index = VADDR_TO_PDENT(va);
625 	page_directory_entry *pd = fPagingStructures->pgdir_virt;
626 	if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
627 		// no pagetable here
628 		return B_OK;
629 	}
630 
631 	Thread* thread = thread_get_current_thread();
632 	ThreadCPUPinner pinner(thread);
633 
634 	page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
635 		pd[index] & ARM_PDE_ADDRESS_MASK);
636 	page_table_entry entry = pt[VADDR_TO_PTENT(va)];
637 
638 	if ((entry & ARM_PTE_TYPE_MASK) != 0)
639 		*_physical = (entry & ARM_PTE_ADDRESS_MASK);
640 
641 	//TODO: read in the page state flags
642 	*_flags = ARMPagingMethod32Bit::PageTableEntryFlagsToAttributes(entry);
643 	if (*_physical != 0)
644 		*_flags |= PAGE_PRESENT;
645 
646 	pinner.Unlock();
647 
648 	TRACE("query_tmap: returning pa 0x%lx for va 0x%lx\n", *_physical, va);
649 
650 	return B_OK;
651 }
652 
653 
654 status_t
655 ARMVMTranslationMap32Bit::QueryInterrupt(addr_t va, phys_addr_t *_physical,
656 	uint32 *_flags)
657 {
658 	*_flags = 0;
659 	*_physical = 0;
660 
661 	int index = VADDR_TO_PDENT(va);
662 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
663 	if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
664 		// no pagetable here
665 		return B_OK;
666 	}
667 
668 	// map page table entry
669 	page_table_entry* pt = (page_table_entry*)ARMPagingMethod32Bit::Method()
670 		->PhysicalPageMapper()->InterruptGetPageTableAt(
671 			pd[index] & ARM_PDE_ADDRESS_MASK);
672 	page_table_entry entry = pt[VADDR_TO_PTENT(va)];
673 
674 	if ((entry & ARM_PTE_TYPE_MASK) != 0)
675 		*_physical = (entry & ARM_PTE_ADDRESS_MASK);
676 
677 	//TODO: read in the page state flags
678 	*_flags = ARMPagingMethod32Bit::PageTableEntryFlagsToAttributes(entry);
679 	if (*_physical != 0)
680 		*_flags |= PAGE_PRESENT;
681 
682 	return B_OK;
683 }
684 
685 
686 status_t
687 ARMVMTranslationMap32Bit::Protect(addr_t start, addr_t end, uint32 attributes,
688 	uint32 memoryType)
689 {
690 	start = ROUNDDOWN(start, B_PAGE_SIZE);
691 	if (start >= end)
692 		return B_OK;
693 
694 	TRACE("protect_tmap: pages 0x%lx to 0x%lx, attributes %lx\n", start, end,
695 		attributes);
696 
697 	uint32 newProtectionFlags = ARMPagingMethod32Bit::AttributesToPageTableEntryFlags(attributes);
698 	uint32 newMemoryTypeFlags = ARMPagingMethod32Bit::MemoryTypeToPageTableEntryFlags(memoryType);
699 
700 	page_directory_entry *pd = fPagingStructures->pgdir_virt;
701 
702 	do {
703 		int index = VADDR_TO_PDENT(start);
704 		if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
705 			// no page table here, move the start up to access the next page
706 			// table
707 			start = ROUNDUP(start + 1, kPageTableAlignment);
708 			continue;
709 		}
710 
711 		Thread* thread = thread_get_current_thread();
712 		ThreadCPUPinner pinner(thread);
713 
714 		page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
715 			pd[index] & ARM_PDE_ADDRESS_MASK);
716 
717 		for (index = VADDR_TO_PTENT(start); index < 256 && start < end;
718 				index++, start += B_PAGE_SIZE) {
719 			page_table_entry entry = pt[index];
720 			if ((entry & ARM_PTE_TYPE_MASK) == 0) {
721 				// page mapping not valid
722 				continue;
723 			}
724 
725 			TRACE("protect_tmap: protect page 0x%lx\n", start);
726 
727 			// set the new protection flags -- we want to do that atomically,
728 			// without changing the accessed or dirty flag
729 			page_table_entry oldEntry;
730 			while (true) {
731 				oldEntry = ARMPagingMethod32Bit::TestAndSetPageTableEntry(
732 					&pt[index],
733 					(entry & ~(ARM_PTE_PROTECTION_MASK
734 							| ARM_PTE_MEMORY_TYPE_MASK))
735 						| newProtectionFlags | newMemoryTypeFlags,
736 					entry);
737 				if (oldEntry == entry)
738 					break;
739 				entry = oldEntry;
740 			}
741 
742 			//TODO: invalidate only if the Accessed flag is set
743 			InvalidatePage(start);
744 		}
745 	} while (start != 0 && start < end);
746 
747 	return B_OK;
748 }
749 
750 
751 status_t
752 ARMVMTranslationMap32Bit::ClearFlags(addr_t va, uint32 flags)
753 {
754 	int index = VADDR_TO_PDENT(va);
755 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
756 	if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
757 		// no pagetable here
758 		return B_OK;
759 	}
760 #if 0 //IRA
761 	uint32 flagsToClear = ((flags & PAGE_MODIFIED) ? X86_PTE_DIRTY : 0)
762 		| ((flags & PAGE_ACCESSED) ? X86_PTE_ACCESSED : 0);
763 #else
764 	uint32 flagsToClear = 0;
765 #endif
766 	Thread* thread = thread_get_current_thread();
767 	ThreadCPUPinner pinner(thread);
768 
769 	page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
770 		pd[index] & ARM_PDE_ADDRESS_MASK);
771 	index = VADDR_TO_PTENT(va);
772 
773 	// clear out the flags we've been requested to clear
774 	page_table_entry oldEntry
775 		= ARMPagingMethod32Bit::ClearPageTableEntryFlags(&pt[index],
776 			flagsToClear);
777 
778 	pinner.Unlock();
779 
780 	//XXX IRA if ((oldEntry & flagsToClear) != 0)
781 		InvalidatePage(va);
782 
783 	return B_OK;
784 }
785 
786 
787 bool
788 ARMVMTranslationMap32Bit::ClearAccessedAndModified(VMArea* area, addr_t address,
789 	bool unmapIfUnaccessed, bool& _modified)
790 {
791 	ASSERT(address % B_PAGE_SIZE == 0);
792 
793 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
794 
795 	TRACE("ARMVMTranslationMap32Bit::ClearAccessedAndModified(%#" B_PRIxADDR
796 		")\n", address);
797 
798 	RecursiveLocker locker(fLock);
799 
800 	int index = VADDR_TO_PDENT(address);
801 	if ((pd[index] & ARM_PDE_TYPE_MASK) == 0)
802 		return false;
803 
804 	ThreadCPUPinner pinner(thread_get_current_thread());
805 
806 	page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
807 		pd[index] & ARM_PDE_ADDRESS_MASK);
808 
809 	index = VADDR_TO_PTENT(address);
810 
811 	// perform the deed
812 	page_table_entry oldEntry;
813 
814 	if (unmapIfUnaccessed) {
815 		while (true) {
816 			oldEntry = pt[index];
817 			if ((oldEntry & ARM_PTE_TYPE_MASK) == 0) {
818 				// page mapping not valid
819 				return false;
820 			}
821 #if 0 //IRA
822 			if (oldEntry & ARM_PTE_ACCESSED) {
823 				// page was accessed -- just clear the flags
824 				oldEntry = ARMPagingMethod32Bit::ClearPageTableEntryFlags(
825 					&pt[index], ARM_PTE_ACCESSED | ARM_PTE_DIRTY);
826 				break;
827 			}
828 #endif
829 			// page hasn't been accessed -- unmap it
830 			if (ARMPagingMethod32Bit::TestAndSetPageTableEntry(&pt[index], 0,
831 					oldEntry) == oldEntry) {
832 				break;
833 			}
834 
835 			// something changed -- check again
836 		}
837 	} else {
838 #if 0 //IRA
839 		oldEntry = ARMPagingMethod32Bit::ClearPageTableEntryFlags(&pt[index],
840 			ARM_PTE_ACCESSED | ARM_PTE_DIRTY);
841 #else
842 		oldEntry = pt[index];
843 #endif
844 	}
845 
846 	pinner.Unlock();
847 
848 	_modified = true /* (oldEntry & X86_PTE_DIRTY) != 0 */; // XXX IRA
849 
850 	if (true /*(oldEntry & ARM_PTE_ACCESSED) != 0*/) {
851 		// Note, that we only need to invalidate the address, if the
852 		// accessed flags was set, since only then the entry could have been
853 		// in any TLB.
854 		InvalidatePage(address);
855 
856 		Flush();
857 
858 		return true;
859 	}
860 
861 	if (!unmapIfUnaccessed)
862 		return false;
863 
864 	// We have unmapped the address. Do the "high level" stuff.
865 
866 	fMapCount--;
867 
868 	locker.Detach();
869 		// UnaccessedPageUnmapped() will unlock for us
870 
871 	UnaccessedPageUnmapped(area,
872 		(oldEntry & ARM_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
873 
874 	return false;
875 }
876 
877 
878 ARMPagingStructures*
879 ARMVMTranslationMap32Bit::PagingStructures() const
880 {
881 	return fPagingStructures;
882 }
883