xref: /haiku/src/system/kernel/arch/arm/paging/32bit/ARMVMTranslationMap32Bit.cpp (revision 02354704729d38c3b078c696adc1bbbd33cbcf72)
1 /*
2  * Copyright 2010, Ithamar R. Adema, ithamar.adema@team-embedded.nl
3  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
4  * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
5  * Distributed under the terms of the MIT License.
6  *
7  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
8  * Distributed under the terms of the NewOS License.
9  */
10 
11 
12 #include "paging/32bit/ARMVMTranslationMap32Bit.h"
13 
14 #include <stdlib.h>
15 #include <string.h>
16 
17 #include <int.h>
18 #include <thread.h>
19 #include <slab/Slab.h>
20 #include <smp.h>
21 #include <util/AutoLock.h>
22 #include <util/ThreadAutoLock.h>
23 #include <util/queue.h>
24 #include <vm/vm_page.h>
25 #include <vm/vm_priv.h>
26 #include <vm/VMAddressSpace.h>
27 #include <vm/VMCache.h>
28 
29 #include "paging/32bit/ARMPagingMethod32Bit.h"
30 #include "paging/32bit/ARMPagingStructures32Bit.h"
31 #include "paging/arm_physical_page_mapper.h"
32 
33 
34 //#define TRACE_ARM_VM_TRANSLATION_MAP_32_BIT
35 #ifdef TRACE_ARM_VM_TRANSLATION_MAP_32_BIT
36 #	define TRACE(x...) dprintf(x)
37 #else
38 #	define TRACE(x...) ;
39 #endif
40 
41 
42 ARMVMTranslationMap32Bit::ARMVMTranslationMap32Bit()
43 	:
44 	fPagingStructures(NULL)
45 {
46 }
47 
48 
49 ARMVMTranslationMap32Bit::~ARMVMTranslationMap32Bit()
50 {
51 	if (fPagingStructures == NULL)
52 		return;
53 
54 	if (fPageMapper != NULL)
55 		fPageMapper->Delete();
56 
57 	if (fPagingStructures->pgdir_virt != NULL) {
58 		// cycle through and free all of the user space pgtables
59 		for (uint32 i = VADDR_TO_PDENT(USER_BASE);
60 				i <= VADDR_TO_PDENT(USER_BASE + (USER_SIZE - 1)); i++) {
61 			if ((fPagingStructures->pgdir_virt[i] & ARM_PDE_TYPE_MASK) != 0) {
62 				addr_t address = fPagingStructures->pgdir_virt[i]
63 					& ARM_PDE_ADDRESS_MASK;
64 				vm_page* page = vm_lookup_page(address / B_PAGE_SIZE);
65 				if (!page)
66 					panic("destroy_tmap: didn't find pgtable page\n");
67 				DEBUG_PAGE_ACCESS_START(page);
68 				vm_page_set_state(page, PAGE_STATE_FREE);
69 			}
70 		}
71 	}
72 
73 	fPagingStructures->RemoveReference();
74 }
75 
76 
77 status_t
78 ARMVMTranslationMap32Bit::Init(bool kernel)
79 {
80 	TRACE("ARMVMTranslationMap32Bit::Init()\n");
81 
82 	ARMVMTranslationMap::Init(kernel);
83 
84 	fPagingStructures = new(std::nothrow) ARMPagingStructures32Bit;
85 	if (fPagingStructures == NULL)
86 		return B_NO_MEMORY;
87 
88 	ARMPagingMethod32Bit* method = ARMPagingMethod32Bit::Method();
89 
90 	if (!kernel) {
91 		// user
92 		// allocate a physical page mapper
93 		status_t error = method->PhysicalPageMapper()
94 			->CreateTranslationMapPhysicalPageMapper(&fPageMapper);
95 		if (error != B_OK)
96 			return error;
97 
98 		// allocate the page directory
99 		page_directory_entry* virtualPageDir = (page_directory_entry*)memalign(
100 			B_PAGE_SIZE, B_PAGE_SIZE);
101 		if (virtualPageDir == NULL)
102 			return B_NO_MEMORY;
103 
104 		// look up the page directory's physical address
105 		phys_addr_t physicalPageDir;
106 		vm_get_page_mapping(VMAddressSpace::KernelID(),
107 			(addr_t)virtualPageDir, &physicalPageDir);
108 
109 		fPagingStructures->Init(virtualPageDir, physicalPageDir,
110 			method->KernelVirtualPageDirectory());
111 	} else {
112 		// kernel
113 		// get the physical page mapper
114 		fPageMapper = method->KernelPhysicalPageMapper();
115 
116 		// we already know the kernel pgdir mapping
117 		fPagingStructures->Init(method->KernelVirtualPageDirectory(),
118 			method->KernelPhysicalPageDirectory(), NULL);
119 	}
120 
121 	return B_OK;
122 }
123 
124 
125 size_t
126 ARMVMTranslationMap32Bit::MaxPagesNeededToMap(addr_t start, addr_t end) const
127 {
128 	// If start == 0, the actual base address is not yet known to the caller and
129 	// we shall assume the worst case.
130 	if (start == 0) {
131 		// offset the range so it has the worst possible alignment
132 		start = 1023 * B_PAGE_SIZE;
133 		end += 1023 * B_PAGE_SIZE;
134 	}
135 
136 	return VADDR_TO_PDENT(end) + 1 - VADDR_TO_PDENT(start);
137 }
138 
139 
140 status_t
141 ARMVMTranslationMap32Bit::Map(addr_t va, phys_addr_t pa, uint32 attributes,
142 	uint32 memoryType, vm_page_reservation* reservation)
143 {
144 	TRACE("map_tmap: entry pa 0x%lx va 0x%lx\n", pa, va);
145 
146 /*
147 	dprintf("pgdir at 0x%x\n", pgdir);
148 	dprintf("index is %d\n", va / B_PAGE_SIZE / 1024);
149 	dprintf("final at 0x%x\n", &pgdir[va / B_PAGE_SIZE / 1024]);
150 	dprintf("value is 0x%x\n", *(int *)&pgdir[va / B_PAGE_SIZE / 1024]);
151 	dprintf("present bit is %d\n", pgdir[va / B_PAGE_SIZE / 1024].present);
152 	dprintf("addr is %d\n", pgdir[va / B_PAGE_SIZE / 1024].addr);
153 */
154 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
155 
156 	// check to see if a page table exists for this range
157 	uint32 index = VADDR_TO_PDENT(va);
158 	if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
159 		phys_addr_t pgtable;
160 		vm_page *page;
161 
162 		// we need to allocate a pgtable
163 		page = vm_page_allocate_page(reservation,
164 			PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
165 
166 		DEBUG_PAGE_ACCESS_END(page);
167 
168 		pgtable = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;
169 
170 		TRACE("map_tmap: asked for free page for pgtable. 0x%lx\n", pgtable);
171 
172 		// put it in the pgdir
173 		ARMPagingMethod32Bit::PutPageTableInPageDir(&pd[index], pgtable,
174 			attributes
175 				| ((attributes & B_USER_PROTECTION) != 0
176 						? B_WRITE_AREA : B_KERNEL_WRITE_AREA));
177 
178 		// update any other page directories, if it maps kernel space
179 		if (index >= FIRST_KERNEL_PGDIR_ENT
180 			&& index < (FIRST_KERNEL_PGDIR_ENT + NUM_KERNEL_PGDIR_ENTS)) {
181 			ARMPagingStructures32Bit::UpdateAllPageDirs(index, pd[index]);
182 		}
183 
184 		fMapCount++;
185 	}
186 
187 	// now, fill in the pentry
188 	Thread* thread = thread_get_current_thread();
189 	ThreadCPUPinner pinner(thread);
190 
191 	page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
192 		pd[index] & ARM_PDE_ADDRESS_MASK);
193 	index = VADDR_TO_PTENT(va);
194 
195 	ASSERT_PRINT((pt[index] & ARM_PTE_TYPE_MASK) == 0,
196 		"virtual address: %#" B_PRIxADDR ", existing pte: %#" B_PRIx32, va,
197 		pt[index]);
198 
199 	ARMPagingMethod32Bit::PutPageTableEntryInTable(&pt[index], pa, attributes,
200 		memoryType, fIsKernelMap);
201 
202 	pinner.Unlock();
203 
204 	// Note: We don't need to invalidate the TLB for this address, as previously
205 	// the entry was not present and the TLB doesn't cache those entries.
206 
207 	fMapCount++;
208 
209 	return 0;
210 }
211 
212 
213 status_t
214 ARMVMTranslationMap32Bit::Unmap(addr_t start, addr_t end)
215 {
216 	start = ROUNDDOWN(start, B_PAGE_SIZE);
217 	if (start >= end)
218 		return B_OK;
219 
220 	TRACE("unmap_tmap: asked to free pages 0x%lx to 0x%lx\n", start, end);
221 
222 	page_directory_entry *pd = fPagingStructures->pgdir_virt;
223 
224 	do {
225 		int index = VADDR_TO_PDENT(start);
226 		if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
227 			// no page table here, move the start up to access the next page
228 			// table
229 			start = ROUNDUP(start + 1, kPageTableAlignment);
230 			continue;
231 		}
232 
233 		Thread* thread = thread_get_current_thread();
234 		ThreadCPUPinner pinner(thread);
235 
236 		page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
237 			pd[index] & ARM_PDE_ADDRESS_MASK);
238 
239 		for (index = VADDR_TO_PTENT(start); (index < 256) && (start < end);
240 				index++, start += B_PAGE_SIZE) {
241 			if ((pt[index] & ARM_PTE_TYPE_MASK) == 0) {
242 				// page mapping not valid
243 				continue;
244 			}
245 
246 			TRACE("unmap_tmap: removing page 0x%lx\n", start);
247 
248 			page_table_entry oldEntry
249 				= ARMPagingMethod32Bit::ClearPageTableEntryFlags(&pt[index],
250 					ARM_PTE_TYPE_MASK);
251 			fMapCount--;
252 
253 			if (true /* (oldEntry & ARM_PTE_ACCESSED) != 0*/) {
254 				// Note, that we only need to invalidate the address, if the
255 				// accessed flags was set, since only then the entry could have
256 				// been in any TLB.
257 				InvalidatePage(start);
258 			}
259 		}
260 	} while (start != 0 && start < end);
261 
262 	return B_OK;
263 }
264 
265 
266 status_t
267 ARMVMTranslationMap32Bit::DebugMarkRangePresent(addr_t start, addr_t end,
268 	bool markPresent)
269 {
270 #if 0
271 	start = ROUNDDOWN(start, B_PAGE_SIZE);
272 	if (start >= end)
273 		return B_OK;
274 
275 	page_directory_entry *pd = fPagingStructures->pgdir_virt;
276 
277 	do {
278 		int index = VADDR_TO_PDENT(start);
279 		if ((pd[index] & X86_PDE_PRESENT) == 0) {
280 			// no page table here, move the start up to access the next page
281 			// table
282 			start = ROUNDUP(start + 1, kPageTableAlignment);
283 			continue;
284 		}
285 
286 		Thread* thread = thread_get_current_thread();
287 		ThreadCPUPinner pinner(thread);
288 
289 		page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
290 			pd[index] & X86_PDE_ADDRESS_MASK);
291 
292 		for (index = VADDR_TO_PTENT(start); (index < 1024) && (start < end);
293 				index++, start += B_PAGE_SIZE) {
294 			if ((pt[index] & X86_PTE_PRESENT) == 0) {
295 				if (!markPresent)
296 					continue;
297 
298 				X86PagingMethod32Bit::SetPageTableEntryFlags(&pt[index],
299 					X86_PTE_PRESENT);
300 			} else {
301 				if (markPresent)
302 					continue;
303 
304 				page_table_entry oldEntry
305 					= X86PagingMethod32Bit::ClearPageTableEntryFlags(&pt[index],
306 						X86_PTE_PRESENT);
307 
308 				if ((oldEntry & X86_PTE_ACCESSED) != 0) {
309 					// Note, that we only need to invalidate the address, if the
310 					// accessed flags was set, since only then the entry could
311 					// have been in any TLB.
312 					InvalidatePage(start);
313 				}
314 			}
315 		}
316 	} while (start != 0 && start < end);
317 #endif
318 	return B_OK;
319 }
320 
321 
322 /*!	Caller must have locked the cache of the page to be unmapped.
323 	This object shouldn't be locked.
324 */
325 status_t
326 ARMVMTranslationMap32Bit::UnmapPage(VMArea* area, addr_t address,
327 	bool updatePageQueue)
328 {
329 	ASSERT(address % B_PAGE_SIZE == 0);
330 
331 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
332 
333 	TRACE("ARMVMTranslationMap32Bit::UnmapPage(%#" B_PRIxADDR ")\n", address);
334 
335 	RecursiveLocker locker(fLock);
336 
337 	int index = VADDR_TO_PDENT(address);
338 	if ((pd[index] & ARM_PDE_TYPE_MASK) == 0)
339 		return B_ENTRY_NOT_FOUND;
340 
341 	ThreadCPUPinner pinner(thread_get_current_thread());
342 
343 	page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
344 		pd[index] & ARM_PDE_ADDRESS_MASK);
345 
346 	index = VADDR_TO_PTENT(address);
347 	page_table_entry oldEntry = ARMPagingMethod32Bit::ClearPageTableEntry(
348 		&pt[index]);
349 
350 	pinner.Unlock();
351 
352 	if ((oldEntry & ARM_PTE_TYPE_MASK) == 0) {
353 		// page mapping not valid
354 		return B_ENTRY_NOT_FOUND;
355 	}
356 
357 	fMapCount--;
358 
359 
360 	if (true /*(oldEntry & ARM_PTE_ACCESSED) != 0*/) { // XXX IRA
361 		// Note, that we only need to invalidate the address, if the
362 		// accessed flags was set, since only then the entry could have been
363 		// in any TLB.
364 		InvalidatePage(address);
365 		Flush();
366 
367 		// NOTE: Between clearing the page table entry and Flush() other
368 		// processors (actually even this processor with another thread of the
369 		// same team) could still access the page in question via their cached
370 		// entry. We can obviously lose a modified flag in this case, with the
371 		// effect that the page looks unmodified (and might thus be recycled),
372 		// but is actually modified.
373 		// In most cases this is harmless, but for vm_remove_all_page_mappings()
374 		// this is actually a problem.
375 		// Interestingly FreeBSD seems to ignore this problem as well
376 		// (cf. pmap_remove_all()), unless I've missed something.
377 	}
378 
379 	locker.Detach();
380 		// PageUnmapped() will unlock for us
381 
382 	PageUnmapped(area, (oldEntry & ARM_PTE_ADDRESS_MASK) / B_PAGE_SIZE,
383 		true /*(oldEntry & ARM_PTE_ACCESSED) != 0*/, true /*(oldEntry & ARM_PTE_DIRTY) != 0*/,
384 		updatePageQueue);
385 
386 	return B_OK;
387 }
388 
389 
390 void
391 ARMVMTranslationMap32Bit::UnmapPages(VMArea* area, addr_t base, size_t size,
392 	bool updatePageQueue)
393 {
394 	if (size == 0)
395 		return;
396 
397 	addr_t start = base;
398 	addr_t end = base + size - 1;
399 
400 	TRACE("ARMVMTranslationMap32Bit::UnmapPages(%p, %#" B_PRIxADDR ", %#"
401 		B_PRIxADDR ")\n", area, start, end);
402 
403 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
404 
405 	VMAreaMappings queue;
406 
407 	RecursiveLocker locker(fLock);
408 
409 	do {
410 		int index = VADDR_TO_PDENT(start);
411 		if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
412 			// no page table here, move the start up to access the next page
413 			// table
414 			start = ROUNDUP(start + 1, kPageTableAlignment);
415 			continue;
416 		}
417 
418 		Thread* thread = thread_get_current_thread();
419 		ThreadCPUPinner pinner(thread);
420 
421 		page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
422 			pd[index] & ARM_PDE_ADDRESS_MASK);
423 
424 		for (index = VADDR_TO_PTENT(start); (index < 256) && (start < end);
425 				index++, start += B_PAGE_SIZE) {
426 			page_table_entry oldEntry
427 				= ARMPagingMethod32Bit::ClearPageTableEntry(&pt[index]);
428 			if ((oldEntry & ARM_PTE_TYPE_MASK) == 0)
429 				continue;
430 
431 			fMapCount--;
432 
433 			if (true /*(oldEntry & ARM_PTE_ACCESSED) != 0*/) { // XXX IRA
434 				// Note, that we only need to invalidate the address, if the
435 				// accessed flags was set, since only then the entry could have
436 				// been in any TLB.
437 				InvalidatePage(start);
438 			}
439 
440 			if (area->cache_type != CACHE_TYPE_DEVICE) {
441 				// get the page
442 				vm_page* page = vm_lookup_page(
443 					(oldEntry & ARM_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
444 				ASSERT(page != NULL);
445 
446 				DEBUG_PAGE_ACCESS_START(page);
447 
448 				// transfer the accessed/dirty flags to the page
449 				if (/*(oldEntry & ARM_PTE_ACCESSED) != 0*/ true) // XXX IRA
450 					page->accessed = true;
451 				if (/*(oldEntry & ARM_PTE_DIRTY) != 0 */ true)
452 					page->modified = true;
453 
454 				// remove the mapping object/decrement the wired_count of the
455 				// page
456 				if (area->wiring == B_NO_LOCK) {
457 					vm_page_mapping* mapping = NULL;
458 					vm_page_mappings::Iterator iterator
459 						= page->mappings.GetIterator();
460 					while ((mapping = iterator.Next()) != NULL) {
461 						if (mapping->area == area)
462 							break;
463 					}
464 
465 					ASSERT(mapping != NULL);
466 
467 					area->mappings.Remove(mapping);
468 					page->mappings.Remove(mapping);
469 					queue.Add(mapping);
470 				} else
471 					page->DecrementWiredCount();
472 
473 				if (!page->IsMapped()) {
474 					atomic_add(&gMappedPagesCount, -1);
475 
476 					if (updatePageQueue) {
477 						if (page->Cache()->temporary)
478 							vm_page_set_state(page, PAGE_STATE_INACTIVE);
479 						else if (page->modified)
480 							vm_page_set_state(page, PAGE_STATE_MODIFIED);
481 						else
482 							vm_page_set_state(page, PAGE_STATE_CACHED);
483 					}
484 				}
485 
486 				DEBUG_PAGE_ACCESS_END(page);
487 			}
488 		}
489 
490 		Flush();
491 			// flush explicitly, since we directly use the lock
492 	} while (start != 0 && start < end);
493 
494 	// TODO: As in UnmapPage() we can lose page dirty flags here. ATM it's not
495 	// really critical here, as in all cases this method is used, the unmapped
496 	// area range is unmapped for good (resized/cut) and the pages will likely
497 	// be freed.
498 
499 	locker.Unlock();
500 
501 	// free removed mappings
502 	bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
503 	uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
504 		| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
505 	while (vm_page_mapping* mapping = queue.RemoveHead())
506 		object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
507 }
508 
509 
510 void
511 ARMVMTranslationMap32Bit::UnmapArea(VMArea* area, bool deletingAddressSpace,
512 	bool ignoreTopCachePageFlags)
513 {
514 	if (area->cache_type == CACHE_TYPE_DEVICE || area->wiring != B_NO_LOCK) {
515 		ARMVMTranslationMap32Bit::UnmapPages(area, area->Base(), area->Size(),
516 			true);
517 		return;
518 	}
519 
520 	bool unmapPages = !deletingAddressSpace || !ignoreTopCachePageFlags;
521 
522 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
523 
524 	RecursiveLocker locker(fLock);
525 
526 	VMAreaMappings mappings;
527 	mappings.MoveFrom(&area->mappings);
528 
529 	for (VMAreaMappings::Iterator it = mappings.GetIterator();
530 			vm_page_mapping* mapping = it.Next();) {
531 		vm_page* page = mapping->page;
532 		page->mappings.Remove(mapping);
533 
534 		VMCache* cache = page->Cache();
535 
536 		bool pageFullyUnmapped = false;
537 		if (!page->IsMapped()) {
538 			atomic_add(&gMappedPagesCount, -1);
539 			pageFullyUnmapped = true;
540 		}
541 
542 		if (unmapPages || cache != area->cache) {
543 			addr_t address = area->Base()
544 				+ ((page->cache_offset * B_PAGE_SIZE) - area->cache_offset);
545 
546 			int index = VADDR_TO_PDENT(address);
547 			if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
548 				panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
549 					"has no page dir entry", page, area, address);
550 				continue;
551 			}
552 
553 			ThreadCPUPinner pinner(thread_get_current_thread());
554 
555 			page_table_entry* pt
556 				= (page_table_entry*)fPageMapper->GetPageTableAt(
557 					pd[index] & ARM_PDE_ADDRESS_MASK);
558 			page_table_entry oldEntry
559 				= ARMPagingMethod32Bit::ClearPageTableEntry(
560 					&pt[VADDR_TO_PTENT(address)]);
561 
562 			pinner.Unlock();
563 
564 			if ((oldEntry & ARM_PTE_TYPE_MASK) == 0) {
565 				panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
566 					"has no page table entry", page, area, address);
567 				continue;
568 			}
569 
570 			// transfer the accessed/dirty flags to the page and invalidate
571 			// the mapping, if necessary
572 			if (true /*(oldEntry & ARM_PTE_ACCESSED) != 0*/) { // XXX IRA
573 				page->accessed = true;
574 
575 				if (!deletingAddressSpace)
576 					InvalidatePage(address);
577 			}
578 
579 			if (true /*(oldEntry & ARM_PTE_DIRTY) != 0*/)
580 				page->modified = true;
581 
582 			if (pageFullyUnmapped) {
583 				DEBUG_PAGE_ACCESS_START(page);
584 
585 				if (cache->temporary)
586 					vm_page_set_state(page, PAGE_STATE_INACTIVE);
587 				else if (page->modified)
588 					vm_page_set_state(page, PAGE_STATE_MODIFIED);
589 				else
590 					vm_page_set_state(page, PAGE_STATE_CACHED);
591 
592 				DEBUG_PAGE_ACCESS_END(page);
593 			}
594 		}
595 
596 		fMapCount--;
597 	}
598 
599 	Flush();
600 		// flush explicitely, since we directly use the lock
601 
602 	locker.Unlock();
603 
604 	bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
605 	uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
606 		| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
607 	while (vm_page_mapping* mapping = mappings.RemoveHead())
608 		object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
609 }
610 
611 
612 status_t
613 ARMVMTranslationMap32Bit::Query(addr_t va, phys_addr_t *_physical,
614 	uint32 *_flags)
615 {
616 	// default the flags to not present
617 	*_flags = 0;
618 	*_physical = 0;
619 
620 	int index = VADDR_TO_PDENT(va);
621 	page_directory_entry *pd = fPagingStructures->pgdir_virt;
622 	if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
623 		// no pagetable here
624 		return B_OK;
625 	}
626 
627 	Thread* thread = thread_get_current_thread();
628 	ThreadCPUPinner pinner(thread);
629 
630 	page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
631 		pd[index] & ARM_PDE_ADDRESS_MASK);
632 	page_table_entry entry = pt[VADDR_TO_PTENT(va)];
633 
634 	if ((entry & ARM_PTE_TYPE_MASK) != 0)
635 		*_physical = (entry & ARM_PTE_ADDRESS_MASK) | VADDR_TO_PGOFF(va);
636 
637 #if 0 //IRA
638 	// read in the page state flags
639 	if ((entry & X86_PTE_USER) != 0) {
640 		*_flags |= ((entry & X86_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0)
641 			| B_READ_AREA;
642 	}
643 
644 	*_flags |= ((entry & X86_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0)
645 		| B_KERNEL_READ_AREA
646 		| ((entry & ARM_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
647 		| ((entry & ARM_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
648 		| ((entry & ARM_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0);
649 #else
650 	*_flags = B_KERNEL_WRITE_AREA | B_KERNEL_READ_AREA;
651 	if (*_physical != 0)
652 		*_flags |= PAGE_PRESENT;
653 #endif
654 	pinner.Unlock();
655 
656 	TRACE("query_tmap: returning pa 0x%lx for va 0x%lx\n", *_physical, va);
657 
658 	return B_OK;
659 }
660 
661 
662 status_t
663 ARMVMTranslationMap32Bit::QueryInterrupt(addr_t va, phys_addr_t *_physical,
664 	uint32 *_flags)
665 {
666 	*_flags = 0;
667 	*_physical = 0;
668 
669 	int index = VADDR_TO_PDENT(va);
670 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
671 	if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
672 		// no pagetable here
673 		return B_OK;
674 	}
675 
676 	// map page table entry
677 	page_table_entry* pt = (page_table_entry*)ARMPagingMethod32Bit::Method()
678 		->PhysicalPageMapper()->InterruptGetPageTableAt(
679 			pd[index] & ARM_PDE_ADDRESS_MASK);
680 	page_table_entry entry = pt[VADDR_TO_PTENT(va)];
681 
682 	if ((entry & ARM_PTE_TYPE_MASK) != 0)
683 		*_physical = (entry & ARM_PTE_ADDRESS_MASK) | VADDR_TO_PGOFF(va);
684 
685 #if 0
686 	// read in the page state flags
687 	if ((entry & X86_PTE_USER) != 0) {
688 		*_flags |= ((entry & X86_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0)
689 			| B_READ_AREA;
690 	}
691 
692 	*_flags |= ((entry & X86_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0)
693 		| B_KERNEL_READ_AREA
694 		| ((entry & X86_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
695 		| ((entry & X86_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
696 		| ((entry & X86_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0);
697 #else
698 	*_flags = B_KERNEL_WRITE_AREA | B_KERNEL_READ_AREA;
699 	if (*_physical != 0)
700 		*_flags |= PAGE_PRESENT;
701 #endif
702 	return B_OK;
703 }
704 
705 
706 status_t
707 ARMVMTranslationMap32Bit::Protect(addr_t start, addr_t end, uint32 attributes,
708 	uint32 memoryType)
709 {
710 	start = ROUNDDOWN(start, B_PAGE_SIZE);
711 	if (start >= end)
712 		return B_OK;
713 
714 	TRACE("protect_tmap: pages 0x%lx to 0x%lx, attributes %lx\n", start, end,
715 		attributes);
716 #if 0 //IRA
717 	// compute protection flags
718 	uint32 newProtectionFlags = 0;
719 	if ((attributes & B_USER_PROTECTION) != 0) {
720 		newProtectionFlags = ARM_PTE_USER;
721 		if ((attributes & B_WRITE_AREA) != 0)
722 			newProtectionFlags |= ARM_PTE_WRITABLE;
723 	} else if ((attributes & B_KERNEL_WRITE_AREA) != 0)
724 		newProtectionFlags = ARM_PTE_WRITABLE;
725 
726 	page_directory_entry *pd = fPagingStructures->pgdir_virt;
727 
728 	do {
729 		int index = VADDR_TO_PDENT(start);
730 		if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
731 			// no page table here, move the start up to access the next page
732 			// table
733 			start = ROUNDUP(start + 1, kPageTableAlignment);
734 			continue;
735 		}
736 
737 		Thread* thread = thread_get_current_thread();
738 		ThreadCPUPinner pinner(thread);
739 
740 		page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
741 			pd[index] & ARM_PDE_ADDRESS_MASK);
742 
743 		for (index = VADDR_TO_PTENT(start); index < 256 && start < end;
744 				index++, start += B_PAGE_SIZE) {
745 			page_table_entry entry = pt[index];
746 			if ((entry & ARM_PTE_PRESENT) == 0) {
747 				// page mapping not valid
748 				continue;
749 			}
750 
751 			TRACE("protect_tmap: protect page 0x%lx\n", start);
752 
753 			// set the new protection flags -- we want to do that atomically,
754 			// without changing the accessed or dirty flag
755 			page_table_entry oldEntry;
756 			while (true) {
757 				oldEntry = ARMPagingMethod32Bit::TestAndSetPageTableEntry(
758 					&pt[index],
759 					(entry & ~(ARM_PTE_PROTECTION_MASK
760 							| ARM_PTE_MEMORY_TYPE_MASK))
761 						| newProtectionFlags
762 						| ARMPagingMethod32Bit::MemoryTypeToPageTableEntryFlags(
763 							memoryType),
764 					entry);
765 				if (oldEntry == entry)
766 					break;
767 				entry = oldEntry;
768 			}
769 
770 			if ((oldEntry & ARM_PTE_ACCESSED) != 0) {
771 				// Note, that we only need to invalidate the address, if the
772 				// accessed flag was set, since only then the entry could have
773 				// been in any TLB.
774 				InvalidatePage(start);
775 			}
776 		}
777 	} while (start != 0 && start < end);
778 #endif
779 	return B_OK;
780 }
781 
782 
783 status_t
784 ARMVMTranslationMap32Bit::ClearFlags(addr_t va, uint32 flags)
785 {
786 	int index = VADDR_TO_PDENT(va);
787 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
788 	if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
789 		// no pagetable here
790 		return B_OK;
791 	}
792 #if 0 //IRA
793 	uint32 flagsToClear = ((flags & PAGE_MODIFIED) ? X86_PTE_DIRTY : 0)
794 		| ((flags & PAGE_ACCESSED) ? X86_PTE_ACCESSED : 0);
795 #else
796 	uint32 flagsToClear = 0;
797 #endif
798 	Thread* thread = thread_get_current_thread();
799 	ThreadCPUPinner pinner(thread);
800 
801 	page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
802 		pd[index] & ARM_PDE_ADDRESS_MASK);
803 	index = VADDR_TO_PTENT(va);
804 
805 	// clear out the flags we've been requested to clear
806 	page_table_entry oldEntry
807 		= ARMPagingMethod32Bit::ClearPageTableEntryFlags(&pt[index],
808 			flagsToClear);
809 
810 	pinner.Unlock();
811 
812 	//XXX IRA if ((oldEntry & flagsToClear) != 0)
813 		InvalidatePage(va);
814 
815 	return B_OK;
816 }
817 
818 
819 bool
820 ARMVMTranslationMap32Bit::ClearAccessedAndModified(VMArea* area, addr_t address,
821 	bool unmapIfUnaccessed, bool& _modified)
822 {
823 	ASSERT(address % B_PAGE_SIZE == 0);
824 
825 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
826 
827 	TRACE("ARMVMTranslationMap32Bit::ClearAccessedAndModified(%#" B_PRIxADDR
828 		")\n", address);
829 
830 	RecursiveLocker locker(fLock);
831 
832 	int index = VADDR_TO_PDENT(address);
833 	if ((pd[index] & ARM_PDE_TYPE_MASK) == 0)
834 		return false;
835 
836 	ThreadCPUPinner pinner(thread_get_current_thread());
837 
838 	page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
839 		pd[index] & ARM_PDE_ADDRESS_MASK);
840 
841 	index = VADDR_TO_PTENT(address);
842 
843 	// perform the deed
844 	page_table_entry oldEntry;
845 
846 	if (unmapIfUnaccessed) {
847 		while (true) {
848 			oldEntry = pt[index];
849 			if ((oldEntry & ARM_PTE_TYPE_MASK) == 0) {
850 				// page mapping not valid
851 				return false;
852 			}
853 #if 0 //IRA
854 			if (oldEntry & ARM_PTE_ACCESSED) {
855 				// page was accessed -- just clear the flags
856 				oldEntry = ARMPagingMethod32Bit::ClearPageTableEntryFlags(
857 					&pt[index], ARM_PTE_ACCESSED | ARM_PTE_DIRTY);
858 				break;
859 			}
860 #endif
861 			// page hasn't been accessed -- unmap it
862 			if (ARMPagingMethod32Bit::TestAndSetPageTableEntry(&pt[index], 0,
863 					oldEntry) == oldEntry) {
864 				break;
865 			}
866 
867 			// something changed -- check again
868 		}
869 	} else {
870 #if 0 //IRA
871 		oldEntry = ARMPagingMethod32Bit::ClearPageTableEntryFlags(&pt[index],
872 			ARM_PTE_ACCESSED | ARM_PTE_DIRTY);
873 #else
874 		oldEntry = pt[index];
875 #endif
876 	}
877 
878 	pinner.Unlock();
879 
880 	_modified = true /* (oldEntry & X86_PTE_DIRTY) != 0 */; // XXX IRA
881 
882 	if (true /*(oldEntry & ARM_PTE_ACCESSED) != 0*/) {
883 		// Note, that we only need to invalidate the address, if the
884 		// accessed flags was set, since only then the entry could have been
885 		// in any TLB.
886 		InvalidatePage(address);
887 
888 		Flush();
889 
890 		return true;
891 	}
892 
893 	if (!unmapIfUnaccessed)
894 		return false;
895 
896 	// We have unmapped the address. Do the "high level" stuff.
897 
898 	fMapCount--;
899 
900 	locker.Detach();
901 		// UnaccessedPageUnmapped() will unlock for us
902 
903 	UnaccessedPageUnmapped(area,
904 		(oldEntry & ARM_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
905 
906 	return false;
907 }
908 
909 
910 ARMPagingStructures*
911 ARMVMTranslationMap32Bit::PagingStructures() const
912 {
913 	return fPagingStructures;
914 }
915