xref: /haiku/src/system/kernel/arch/arm/paging/32bit/ARMVMTranslationMap32Bit.cpp (revision 1e60bdeab63fa7a57bc9a55b032052e95a18bd2c)
1 /*
2  * Copyright 2010, Ithamar R. Adema, ithamar.adema@team-embedded.nl
3  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
4  * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
5  * Distributed under the terms of the MIT License.
6  *
7  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
8  * Distributed under the terms of the NewOS License.
9  */
10 
11 
12 #include "paging/32bit/ARMVMTranslationMap32Bit.h"
13 
14 #include <stdlib.h>
15 #include <string.h>
16 
17 #include <int.h>
18 #include <thread.h>
19 #include <slab/Slab.h>
20 #include <smp.h>
21 #include <util/AutoLock.h>
22 #include <util/queue.h>
23 #include <vm/vm_page.h>
24 #include <vm/vm_priv.h>
25 #include <vm/VMAddressSpace.h>
26 #include <vm/VMCache.h>
27 
28 #include "paging/32bit/ARMPagingMethod32Bit.h"
29 #include "paging/32bit/ARMPagingStructures32Bit.h"
30 #include "paging/arm_physical_page_mapper.h"
31 
32 
33 //#define TRACE_ARM_VM_TRANSLATION_MAP_32_BIT
34 #ifdef TRACE_ARM_VM_TRANSLATION_MAP_32_BIT
35 #	define TRACE(x...) dprintf(x)
36 #else
37 #	define TRACE(x...) ;
38 #endif
39 
40 
41 ARMVMTranslationMap32Bit::ARMVMTranslationMap32Bit()
42 	:
43 	fPagingStructures(NULL)
44 {
45 }
46 
47 
48 ARMVMTranslationMap32Bit::~ARMVMTranslationMap32Bit()
49 {
50 	if (fPagingStructures == NULL)
51 		return;
52 
53 	if (fPageMapper != NULL)
54 		fPageMapper->Delete();
55 
56 	if (fPagingStructures->pgdir_virt != NULL) {
57 		// cycle through and free all of the user space pgtables
58 		for (uint32 i = VADDR_TO_PDENT(USER_BASE);
59 				i <= VADDR_TO_PDENT(USER_BASE + (USER_SIZE - 1)); i++) {
60 			if ((fPagingStructures->pgdir_virt[i] & ARM_PDE_TYPE_MASK) != 0) {
61 				addr_t address = fPagingStructures->pgdir_virt[i]
62 					& ARM_PDE_ADDRESS_MASK;
63 				vm_page* page = vm_lookup_page(address / B_PAGE_SIZE);
64 				if (!page)
65 					panic("destroy_tmap: didn't find pgtable page\n");
66 				DEBUG_PAGE_ACCESS_START(page);
67 				vm_page_set_state(page, PAGE_STATE_FREE);
68 			}
69 		}
70 	}
71 
72 	fPagingStructures->RemoveReference();
73 }
74 
75 
76 status_t
77 ARMVMTranslationMap32Bit::Init(bool kernel)
78 {
79 	TRACE("ARMVMTranslationMap32Bit::Init()\n");
80 
81 	ARMVMTranslationMap::Init(kernel);
82 
83 	fPagingStructures = new(std::nothrow) ARMPagingStructures32Bit;
84 	if (fPagingStructures == NULL)
85 		return B_NO_MEMORY;
86 
87 	ARMPagingMethod32Bit* method = ARMPagingMethod32Bit::Method();
88 
89 	if (!kernel) {
90 		// user
91 		// allocate a physical page mapper
92 		status_t error = method->PhysicalPageMapper()
93 			->CreateTranslationMapPhysicalPageMapper(&fPageMapper);
94 		if (error != B_OK)
95 			return error;
96 
97 		// allocate the page directory
98 		page_directory_entry* virtualPageDir = (page_directory_entry*)memalign(
99 			B_PAGE_SIZE, B_PAGE_SIZE);
100 		if (virtualPageDir == NULL)
101 			return B_NO_MEMORY;
102 
103 		// look up the page directory's physical address
104 		phys_addr_t physicalPageDir;
105 		vm_get_page_mapping(VMAddressSpace::KernelID(),
106 			(addr_t)virtualPageDir, &physicalPageDir);
107 
108 		fPagingStructures->Init(virtualPageDir, physicalPageDir,
109 			method->KernelVirtualPageDirectory());
110 	} else {
111 		// kernel
112 		// get the physical page mapper
113 		fPageMapper = method->KernelPhysicalPageMapper();
114 
115 		// we already know the kernel pgdir mapping
116 		fPagingStructures->Init(method->KernelVirtualPageDirectory(),
117 			method->KernelPhysicalPageDirectory(), NULL);
118 	}
119 
120 	return B_OK;
121 }
122 
123 
124 size_t
125 ARMVMTranslationMap32Bit::MaxPagesNeededToMap(addr_t start, addr_t end) const
126 {
127 	// If start == 0, the actual base address is not yet known to the caller and
128 	// we shall assume the worst case.
129 	if (start == 0) {
130 		// offset the range so it has the worst possible alignment
131 		start = 1023 * B_PAGE_SIZE;
132 		end += 1023 * B_PAGE_SIZE;
133 	}
134 
135 	return VADDR_TO_PDENT(end) + 1 - VADDR_TO_PDENT(start);
136 }
137 
138 
139 status_t
140 ARMVMTranslationMap32Bit::Map(addr_t va, phys_addr_t pa, uint32 attributes,
141 	uint32 memoryType, vm_page_reservation* reservation)
142 {
143 	TRACE("map_tmap: entry pa 0x%lx va 0x%lx\n", pa, va);
144 
145 /*
146 	dprintf("pgdir at 0x%x\n", pgdir);
147 	dprintf("index is %d\n", va / B_PAGE_SIZE / 1024);
148 	dprintf("final at 0x%x\n", &pgdir[va / B_PAGE_SIZE / 1024]);
149 	dprintf("value is 0x%x\n", *(int *)&pgdir[va / B_PAGE_SIZE / 1024]);
150 	dprintf("present bit is %d\n", pgdir[va / B_PAGE_SIZE / 1024].present);
151 	dprintf("addr is %d\n", pgdir[va / B_PAGE_SIZE / 1024].addr);
152 */
153 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
154 
155 	// check to see if a page table exists for this range
156 	uint32 index = VADDR_TO_PDENT(va);
157 	if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
158 		phys_addr_t pgtable;
159 		vm_page *page;
160 
161 		// we need to allocate a pgtable
162 		page = vm_page_allocate_page(reservation,
163 			PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
164 
165 		DEBUG_PAGE_ACCESS_END(page);
166 
167 		pgtable = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;
168 
169 		TRACE("map_tmap: asked for free page for pgtable. 0x%lx\n", pgtable);
170 
171 		// put it in the pgdir
172 		ARMPagingMethod32Bit::PutPageTableInPageDir(&pd[index], pgtable,
173 			attributes
174 				| ((attributes & B_USER_PROTECTION) != 0
175 						? B_WRITE_AREA : B_KERNEL_WRITE_AREA));
176 
177 		// update any other page directories, if it maps kernel space
178 		if (index >= FIRST_KERNEL_PGDIR_ENT
179 			&& index < (FIRST_KERNEL_PGDIR_ENT + NUM_KERNEL_PGDIR_ENTS)) {
180 			ARMPagingStructures32Bit::UpdateAllPageDirs(index, pd[index]);
181 		}
182 
183 		fMapCount++;
184 	}
185 
186 	// now, fill in the pentry
187 	Thread* thread = thread_get_current_thread();
188 	ThreadCPUPinner pinner(thread);
189 
190 	page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
191 		pd[index] & ARM_PDE_ADDRESS_MASK);
192 	index = VADDR_TO_PTENT(va);
193 
194 	ASSERT_PRINT((pt[index] & ARM_PTE_TYPE_MASK) == 0,
195 		"virtual address: %#" B_PRIxADDR ", existing pte: %#" B_PRIx32, va,
196 		pt[index]);
197 
198 	ARMPagingMethod32Bit::PutPageTableEntryInTable(&pt[index], pa, attributes,
199 		memoryType, fIsKernelMap);
200 
201 	pinner.Unlock();
202 
203 	// Note: We don't need to invalidate the TLB for this address, as previously
204 	// the entry was not present and the TLB doesn't cache those entries.
205 
206 	fMapCount++;
207 
208 	return 0;
209 }
210 
211 
212 status_t
213 ARMVMTranslationMap32Bit::Unmap(addr_t start, addr_t end)
214 {
215 	start = ROUNDDOWN(start, B_PAGE_SIZE);
216 	if (start >= end)
217 		return B_OK;
218 
219 	TRACE("unmap_tmap: asked to free pages 0x%lx to 0x%lx\n", start, end);
220 
221 	page_directory_entry *pd = fPagingStructures->pgdir_virt;
222 
223 	do {
224 		int index = VADDR_TO_PDENT(start);
225 		if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
226 			// no page table here, move the start up to access the next page
227 			// table
228 			start = ROUNDUP(start + 1, kPageTableAlignment);
229 			continue;
230 		}
231 
232 		Thread* thread = thread_get_current_thread();
233 		ThreadCPUPinner pinner(thread);
234 
235 		page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
236 			pd[index] & ARM_PDE_ADDRESS_MASK);
237 
238 		for (index = VADDR_TO_PTENT(start); (index < 256) && (start < end);
239 				index++, start += B_PAGE_SIZE) {
240 			if ((pt[index] & ARM_PTE_TYPE_MASK) == 0) {
241 				// page mapping not valid
242 				continue;
243 			}
244 
245 			TRACE("unmap_tmap: removing page 0x%lx\n", start);
246 
247 			page_table_entry oldEntry
248 				= ARMPagingMethod32Bit::ClearPageTableEntryFlags(&pt[index],
249 					ARM_PTE_TYPE_MASK);
250 			fMapCount--;
251 
252 			if (true /* (oldEntry & ARM_PTE_ACCESSED) != 0*/) {
253 				// Note, that we only need to invalidate the address, if the
254 				// accessed flags was set, since only then the entry could have
255 				// been in any TLB.
256 				InvalidatePage(start);
257 			}
258 		}
259 	} while (start != 0 && start < end);
260 
261 	return B_OK;
262 }
263 
264 
265 status_t
266 ARMVMTranslationMap32Bit::DebugMarkRangePresent(addr_t start, addr_t end,
267 	bool markPresent)
268 {
269 #if 0
270 	start = ROUNDDOWN(start, B_PAGE_SIZE);
271 	if (start >= end)
272 		return B_OK;
273 
274 	page_directory_entry *pd = fPagingStructures->pgdir_virt;
275 
276 	do {
277 		int index = VADDR_TO_PDENT(start);
278 		if ((pd[index] & X86_PDE_PRESENT) == 0) {
279 			// no page table here, move the start up to access the next page
280 			// table
281 			start = ROUNDUP(start + 1, kPageTableAlignment);
282 			continue;
283 		}
284 
285 		Thread* thread = thread_get_current_thread();
286 		ThreadCPUPinner pinner(thread);
287 
288 		page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
289 			pd[index] & X86_PDE_ADDRESS_MASK);
290 
291 		for (index = VADDR_TO_PTENT(start); (index < 1024) && (start < end);
292 				index++, start += B_PAGE_SIZE) {
293 			if ((pt[index] & X86_PTE_PRESENT) == 0) {
294 				if (!markPresent)
295 					continue;
296 
297 				X86PagingMethod32Bit::SetPageTableEntryFlags(&pt[index],
298 					X86_PTE_PRESENT);
299 			} else {
300 				if (markPresent)
301 					continue;
302 
303 				page_table_entry oldEntry
304 					= X86PagingMethod32Bit::ClearPageTableEntryFlags(&pt[index],
305 						X86_PTE_PRESENT);
306 
307 				if ((oldEntry & X86_PTE_ACCESSED) != 0) {
308 					// Note, that we only need to invalidate the address, if the
309 					// accessed flags was set, since only then the entry could
310 					// have been in any TLB.
311 					InvalidatePage(start);
312 				}
313 			}
314 		}
315 	} while (start != 0 && start < end);
316 #endif
317 	return B_OK;
318 }
319 
320 
321 /*!	Caller must have locked the cache of the page to be unmapped.
322 	This object shouldn't be locked.
323 */
324 status_t
325 ARMVMTranslationMap32Bit::UnmapPage(VMArea* area, addr_t address,
326 	bool updatePageQueue)
327 {
328 	ASSERT(address % B_PAGE_SIZE == 0);
329 
330 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
331 
332 	TRACE("ARMVMTranslationMap32Bit::UnmapPage(%#" B_PRIxADDR ")\n", address);
333 
334 	RecursiveLocker locker(fLock);
335 
336 	int index = VADDR_TO_PDENT(address);
337 	if ((pd[index] & ARM_PDE_TYPE_MASK) == 0)
338 		return B_ENTRY_NOT_FOUND;
339 
340 	ThreadCPUPinner pinner(thread_get_current_thread());
341 
342 	page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
343 		pd[index] & ARM_PDE_ADDRESS_MASK);
344 
345 	index = VADDR_TO_PTENT(address);
346 	page_table_entry oldEntry = ARMPagingMethod32Bit::ClearPageTableEntry(
347 		&pt[index]);
348 
349 	pinner.Unlock();
350 
351 	if ((oldEntry & ARM_PTE_TYPE_MASK) == 0) {
352 		// page mapping not valid
353 		return B_ENTRY_NOT_FOUND;
354 	}
355 
356 	fMapCount--;
357 
358 
359 	if (true /*(oldEntry & ARM_PTE_ACCESSED) != 0*/) { // XXX IRA
360 		// Note, that we only need to invalidate the address, if the
361 		// accessed flags was set, since only then the entry could have been
362 		// in any TLB.
363 		InvalidatePage(address);
364 		Flush();
365 
366 		// NOTE: Between clearing the page table entry and Flush() other
367 		// processors (actually even this processor with another thread of the
368 		// same team) could still access the page in question via their cached
369 		// entry. We can obviously lose a modified flag in this case, with the
370 		// effect that the page looks unmodified (and might thus be recycled),
371 		// but is actually modified.
372 		// In most cases this is harmless, but for vm_remove_all_page_mappings()
373 		// this is actually a problem.
374 		// Interestingly FreeBSD seems to ignore this problem as well
375 		// (cf. pmap_remove_all()), unless I've missed something.
376 	}
377 
378 	locker.Detach();
379 		// PageUnmapped() will unlock for us
380 
381 	PageUnmapped(area, (oldEntry & ARM_PTE_ADDRESS_MASK) / B_PAGE_SIZE,
382 		true /*(oldEntry & ARM_PTE_ACCESSED) != 0*/, true /*(oldEntry & ARM_PTE_DIRTY) != 0*/,
383 		updatePageQueue);
384 
385 	return B_OK;
386 }
387 
388 
389 void
390 ARMVMTranslationMap32Bit::UnmapPages(VMArea* area, addr_t base, size_t size,
391 	bool updatePageQueue)
392 {
393 	if (size == 0)
394 		return;
395 
396 	addr_t start = base;
397 	addr_t end = base + size - 1;
398 
399 	TRACE("ARMVMTranslationMap32Bit::UnmapPages(%p, %#" B_PRIxADDR ", %#"
400 		B_PRIxADDR ")\n", area, start, end);
401 
402 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
403 
404 	VMAreaMappings queue;
405 
406 	RecursiveLocker locker(fLock);
407 
408 	do {
409 		int index = VADDR_TO_PDENT(start);
410 		if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
411 			// no page table here, move the start up to access the next page
412 			// table
413 			start = ROUNDUP(start + 1, kPageTableAlignment);
414 			continue;
415 		}
416 
417 		Thread* thread = thread_get_current_thread();
418 		ThreadCPUPinner pinner(thread);
419 
420 		page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
421 			pd[index] & ARM_PDE_ADDRESS_MASK);
422 
423 		for (index = VADDR_TO_PTENT(start); (index < 256) && (start < end);
424 				index++, start += B_PAGE_SIZE) {
425 			page_table_entry oldEntry
426 				= ARMPagingMethod32Bit::ClearPageTableEntry(&pt[index]);
427 			if ((oldEntry & ARM_PTE_TYPE_MASK) == 0)
428 				continue;
429 
430 			fMapCount--;
431 
432 			if (true /*(oldEntry & ARM_PTE_ACCESSED) != 0*/) { // XXX IRA
433 				// Note, that we only need to invalidate the address, if the
434 				// accessed flags was set, since only then the entry could have
435 				// been in any TLB.
436 				InvalidatePage(start);
437 			}
438 
439 			if (area->cache_type != CACHE_TYPE_DEVICE) {
440 				// get the page
441 				vm_page* page = vm_lookup_page(
442 					(oldEntry & ARM_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
443 				ASSERT(page != NULL);
444 
445 				DEBUG_PAGE_ACCESS_START(page);
446 
447 				// transfer the accessed/dirty flags to the page
448 				if (/*(oldEntry & ARM_PTE_ACCESSED) != 0*/ true) // XXX IRA
449 					page->accessed = true;
450 				if (/*(oldEntry & ARM_PTE_DIRTY) != 0 */ true)
451 					page->modified = true;
452 
453 				// remove the mapping object/decrement the wired_count of the
454 				// page
455 				if (area->wiring == B_NO_LOCK) {
456 					vm_page_mapping* mapping = NULL;
457 					vm_page_mappings::Iterator iterator
458 						= page->mappings.GetIterator();
459 					while ((mapping = iterator.Next()) != NULL) {
460 						if (mapping->area == area)
461 							break;
462 					}
463 
464 					ASSERT(mapping != NULL);
465 
466 					area->mappings.Remove(mapping);
467 					page->mappings.Remove(mapping);
468 					queue.Add(mapping);
469 				} else
470 					page->DecrementWiredCount();
471 
472 				if (!page->IsMapped()) {
473 					atomic_add(&gMappedPagesCount, -1);
474 
475 					if (updatePageQueue) {
476 						if (page->Cache()->temporary)
477 							vm_page_set_state(page, PAGE_STATE_INACTIVE);
478 						else if (page->modified)
479 							vm_page_set_state(page, PAGE_STATE_MODIFIED);
480 						else
481 							vm_page_set_state(page, PAGE_STATE_CACHED);
482 					}
483 				}
484 
485 				DEBUG_PAGE_ACCESS_END(page);
486 			}
487 		}
488 
489 		Flush();
490 			// flush explicitly, since we directly use the lock
491 	} while (start != 0 && start < end);
492 
493 	// TODO: As in UnmapPage() we can lose page dirty flags here. ATM it's not
494 	// really critical here, as in all cases this method is used, the unmapped
495 	// area range is unmapped for good (resized/cut) and the pages will likely
496 	// be freed.
497 
498 	locker.Unlock();
499 
500 	// free removed mappings
501 	bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
502 	uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
503 		| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
504 	while (vm_page_mapping* mapping = queue.RemoveHead())
505 		object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
506 }
507 
508 
509 void
510 ARMVMTranslationMap32Bit::UnmapArea(VMArea* area, bool deletingAddressSpace,
511 	bool ignoreTopCachePageFlags)
512 {
513 	if (area->cache_type == CACHE_TYPE_DEVICE || area->wiring != B_NO_LOCK) {
514 		ARMVMTranslationMap32Bit::UnmapPages(area, area->Base(), area->Size(),
515 			true);
516 		return;
517 	}
518 
519 	bool unmapPages = !deletingAddressSpace || !ignoreTopCachePageFlags;
520 
521 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
522 
523 	RecursiveLocker locker(fLock);
524 
525 	VMAreaMappings mappings;
526 	mappings.MoveFrom(&area->mappings);
527 
528 	for (VMAreaMappings::Iterator it = mappings.GetIterator();
529 			vm_page_mapping* mapping = it.Next();) {
530 		vm_page* page = mapping->page;
531 		page->mappings.Remove(mapping);
532 
533 		VMCache* cache = page->Cache();
534 
535 		bool pageFullyUnmapped = false;
536 		if (!page->IsMapped()) {
537 			atomic_add(&gMappedPagesCount, -1);
538 			pageFullyUnmapped = true;
539 		}
540 
541 		if (unmapPages || cache != area->cache) {
542 			addr_t address = area->Base()
543 				+ ((page->cache_offset * B_PAGE_SIZE) - area->cache_offset);
544 
545 			int index = VADDR_TO_PDENT(address);
546 			if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
547 				panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
548 					"has no page dir entry", page, area, address);
549 				continue;
550 			}
551 
552 			ThreadCPUPinner pinner(thread_get_current_thread());
553 
554 			page_table_entry* pt
555 				= (page_table_entry*)fPageMapper->GetPageTableAt(
556 					pd[index] & ARM_PDE_ADDRESS_MASK);
557 			page_table_entry oldEntry
558 				= ARMPagingMethod32Bit::ClearPageTableEntry(
559 					&pt[VADDR_TO_PTENT(address)]);
560 
561 			pinner.Unlock();
562 
563 			if ((oldEntry & ARM_PTE_TYPE_MASK) == 0) {
564 				panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
565 					"has no page table entry", page, area, address);
566 				continue;
567 			}
568 
569 			// transfer the accessed/dirty flags to the page and invalidate
570 			// the mapping, if necessary
571 			if (true /*(oldEntry & ARM_PTE_ACCESSED) != 0*/) { // XXX IRA
572 				page->accessed = true;
573 
574 				if (!deletingAddressSpace)
575 					InvalidatePage(address);
576 			}
577 
578 			if (true /*(oldEntry & ARM_PTE_DIRTY) != 0*/)
579 				page->modified = true;
580 
581 			if (pageFullyUnmapped) {
582 				DEBUG_PAGE_ACCESS_START(page);
583 
584 				if (cache->temporary)
585 					vm_page_set_state(page, PAGE_STATE_INACTIVE);
586 				else if (page->modified)
587 					vm_page_set_state(page, PAGE_STATE_MODIFIED);
588 				else
589 					vm_page_set_state(page, PAGE_STATE_CACHED);
590 
591 				DEBUG_PAGE_ACCESS_END(page);
592 			}
593 		}
594 
595 		fMapCount--;
596 	}
597 
598 	Flush();
599 		// flush explicitely, since we directly use the lock
600 
601 	locker.Unlock();
602 
603 	bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
604 	uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
605 		| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
606 	while (vm_page_mapping* mapping = mappings.RemoveHead())
607 		object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
608 }
609 
610 
611 status_t
612 ARMVMTranslationMap32Bit::Query(addr_t va, phys_addr_t *_physical,
613 	uint32 *_flags)
614 {
615 	// default the flags to not present
616 	*_flags = 0;
617 	*_physical = 0;
618 
619 	int index = VADDR_TO_PDENT(va);
620 	page_directory_entry *pd = fPagingStructures->pgdir_virt;
621 	if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
622 		// no pagetable here
623 		return B_OK;
624 	}
625 
626 	Thread* thread = thread_get_current_thread();
627 	ThreadCPUPinner pinner(thread);
628 
629 	page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
630 		pd[index] & ARM_PDE_ADDRESS_MASK);
631 	page_table_entry entry = pt[VADDR_TO_PTENT(va)];
632 
633 	if ((entry & ARM_PTE_TYPE_MASK) != 0)
634 		*_physical = (entry & ARM_PTE_ADDRESS_MASK) | VADDR_TO_PGOFF(va);
635 
636 #if 0 //IRA
637 	// read in the page state flags
638 	if ((entry & X86_PTE_USER) != 0) {
639 		*_flags |= ((entry & X86_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0)
640 			| B_READ_AREA;
641 	}
642 
643 	*_flags |= ((entry & X86_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0)
644 		| B_KERNEL_READ_AREA
645 		| ((entry & ARM_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
646 		| ((entry & ARM_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
647 		| ((entry & ARM_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0);
648 #else
649 	*_flags = B_KERNEL_WRITE_AREA | B_KERNEL_READ_AREA;
650 	if (*_physical != 0)
651 		*_flags |= PAGE_PRESENT;
652 #endif
653 	pinner.Unlock();
654 
655 	TRACE("query_tmap: returning pa 0x%lx for va 0x%lx\n", *_physical, va);
656 
657 	return B_OK;
658 }
659 
660 
661 status_t
662 ARMVMTranslationMap32Bit::QueryInterrupt(addr_t va, phys_addr_t *_physical,
663 	uint32 *_flags)
664 {
665 	*_flags = 0;
666 	*_physical = 0;
667 
668 	int index = VADDR_TO_PDENT(va);
669 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
670 	if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
671 		// no pagetable here
672 		return B_OK;
673 	}
674 
675 	// map page table entry
676 	page_table_entry* pt = (page_table_entry*)ARMPagingMethod32Bit::Method()
677 		->PhysicalPageMapper()->InterruptGetPageTableAt(
678 			pd[index] & ARM_PDE_ADDRESS_MASK);
679 	page_table_entry entry = pt[VADDR_TO_PTENT(va)];
680 
681 	if ((entry & ARM_PTE_TYPE_MASK) != 0)
682 		*_physical = (entry & ARM_PTE_ADDRESS_MASK) | VADDR_TO_PGOFF(va);
683 
684 #if 0
685 	// read in the page state flags
686 	if ((entry & X86_PTE_USER) != 0) {
687 		*_flags |= ((entry & X86_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0)
688 			| B_READ_AREA;
689 	}
690 
691 	*_flags |= ((entry & X86_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0)
692 		| B_KERNEL_READ_AREA
693 		| ((entry & X86_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
694 		| ((entry & X86_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
695 		| ((entry & X86_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0);
696 #else
697 	*_flags = B_KERNEL_WRITE_AREA | B_KERNEL_READ_AREA;
698 	if (*_physical != 0)
699 		*_flags |= PAGE_PRESENT;
700 #endif
701 	return B_OK;
702 }
703 
704 
705 status_t
706 ARMVMTranslationMap32Bit::Protect(addr_t start, addr_t end, uint32 attributes,
707 	uint32 memoryType)
708 {
709 	start = ROUNDDOWN(start, B_PAGE_SIZE);
710 	if (start >= end)
711 		return B_OK;
712 
713 	TRACE("protect_tmap: pages 0x%lx to 0x%lx, attributes %lx\n", start, end,
714 		attributes);
715 #if 0 //IRA
716 	// compute protection flags
717 	uint32 newProtectionFlags = 0;
718 	if ((attributes & B_USER_PROTECTION) != 0) {
719 		newProtectionFlags = ARM_PTE_USER;
720 		if ((attributes & B_WRITE_AREA) != 0)
721 			newProtectionFlags |= ARM_PTE_WRITABLE;
722 	} else if ((attributes & B_KERNEL_WRITE_AREA) != 0)
723 		newProtectionFlags = ARM_PTE_WRITABLE;
724 
725 	page_directory_entry *pd = fPagingStructures->pgdir_virt;
726 
727 	do {
728 		int index = VADDR_TO_PDENT(start);
729 		if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
730 			// no page table here, move the start up to access the next page
731 			// table
732 			start = ROUNDUP(start + 1, kPageTableAlignment);
733 			continue;
734 		}
735 
736 		Thread* thread = thread_get_current_thread();
737 		ThreadCPUPinner pinner(thread);
738 
739 		page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
740 			pd[index] & ARM_PDE_ADDRESS_MASK);
741 
742 		for (index = VADDR_TO_PTENT(start); index < 256 && start < end;
743 				index++, start += B_PAGE_SIZE) {
744 			page_table_entry entry = pt[index];
745 			if ((entry & ARM_PTE_PRESENT) == 0) {
746 				// page mapping not valid
747 				continue;
748 			}
749 
750 			TRACE("protect_tmap: protect page 0x%lx\n", start);
751 
752 			// set the new protection flags -- we want to do that atomically,
753 			// without changing the accessed or dirty flag
754 			page_table_entry oldEntry;
755 			while (true) {
756 				oldEntry = ARMPagingMethod32Bit::TestAndSetPageTableEntry(
757 					&pt[index],
758 					(entry & ~(ARM_PTE_PROTECTION_MASK
759 							| ARM_PTE_MEMORY_TYPE_MASK))
760 						| newProtectionFlags
761 						| ARMPagingMethod32Bit::MemoryTypeToPageTableEntryFlags(
762 							memoryType),
763 					entry);
764 				if (oldEntry == entry)
765 					break;
766 				entry = oldEntry;
767 			}
768 
769 			if ((oldEntry & ARM_PTE_ACCESSED) != 0) {
770 				// Note, that we only need to invalidate the address, if the
771 				// accessed flag was set, since only then the entry could have
772 				// been in any TLB.
773 				InvalidatePage(start);
774 			}
775 		}
776 	} while (start != 0 && start < end);
777 #endif
778 	return B_OK;
779 }
780 
781 
782 status_t
783 ARMVMTranslationMap32Bit::ClearFlags(addr_t va, uint32 flags)
784 {
785 	int index = VADDR_TO_PDENT(va);
786 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
787 	if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
788 		// no pagetable here
789 		return B_OK;
790 	}
791 #if 0 //IRA
792 	uint32 flagsToClear = ((flags & PAGE_MODIFIED) ? X86_PTE_DIRTY : 0)
793 		| ((flags & PAGE_ACCESSED) ? X86_PTE_ACCESSED : 0);
794 #else
795 	uint32 flagsToClear = 0;
796 #endif
797 	Thread* thread = thread_get_current_thread();
798 	ThreadCPUPinner pinner(thread);
799 
800 	page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
801 		pd[index] & ARM_PDE_ADDRESS_MASK);
802 	index = VADDR_TO_PTENT(va);
803 
804 	// clear out the flags we've been requested to clear
805 	page_table_entry oldEntry
806 		= ARMPagingMethod32Bit::ClearPageTableEntryFlags(&pt[index],
807 			flagsToClear);
808 
809 	pinner.Unlock();
810 
811 	//XXX IRA if ((oldEntry & flagsToClear) != 0)
812 		InvalidatePage(va);
813 
814 	return B_OK;
815 }
816 
817 
818 bool
819 ARMVMTranslationMap32Bit::ClearAccessedAndModified(VMArea* area, addr_t address,
820 	bool unmapIfUnaccessed, bool& _modified)
821 {
822 	ASSERT(address % B_PAGE_SIZE == 0);
823 
824 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
825 
826 	TRACE("ARMVMTranslationMap32Bit::ClearAccessedAndModified(%#" B_PRIxADDR
827 		")\n", address);
828 
829 	RecursiveLocker locker(fLock);
830 
831 	int index = VADDR_TO_PDENT(address);
832 	if ((pd[index] & ARM_PDE_TYPE_MASK) == 0)
833 		return false;
834 
835 	ThreadCPUPinner pinner(thread_get_current_thread());
836 
837 	page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
838 		pd[index] & ARM_PDE_ADDRESS_MASK);
839 
840 	index = VADDR_TO_PTENT(address);
841 
842 	// perform the deed
843 	page_table_entry oldEntry;
844 
845 	if (unmapIfUnaccessed) {
846 		while (true) {
847 			oldEntry = pt[index];
848 			if ((oldEntry & ARM_PTE_TYPE_MASK) == 0) {
849 				// page mapping not valid
850 				return false;
851 			}
852 #if 0 //IRA
853 			if (oldEntry & ARM_PTE_ACCESSED) {
854 				// page was accessed -- just clear the flags
855 				oldEntry = ARMPagingMethod32Bit::ClearPageTableEntryFlags(
856 					&pt[index], ARM_PTE_ACCESSED | ARM_PTE_DIRTY);
857 				break;
858 			}
859 #endif
860 			// page hasn't been accessed -- unmap it
861 			if (ARMPagingMethod32Bit::TestAndSetPageTableEntry(&pt[index], 0,
862 					oldEntry) == oldEntry) {
863 				break;
864 			}
865 
866 			// something changed -- check again
867 		}
868 	} else {
869 #if 0 //IRA
870 		oldEntry = ARMPagingMethod32Bit::ClearPageTableEntryFlags(&pt[index],
871 			ARM_PTE_ACCESSED | ARM_PTE_DIRTY);
872 #else
873 		oldEntry = pt[index];
874 #endif
875 	}
876 
877 	pinner.Unlock();
878 
879 	_modified = true /* (oldEntry & X86_PTE_DIRTY) != 0 */; // XXX IRA
880 
881 	if (true /*(oldEntry & ARM_PTE_ACCESSED) != 0*/) {
882 		// Note, that we only need to invalidate the address, if the
883 		// accessed flags was set, since only then the entry could have been
884 		// in any TLB.
885 		InvalidatePage(address);
886 
887 		Flush();
888 
889 		return true;
890 	}
891 
892 	if (!unmapIfUnaccessed)
893 		return false;
894 
895 	// We have unmapped the address. Do the "high level" stuff.
896 
897 	fMapCount--;
898 
899 	locker.Detach();
900 		// UnaccessedPageUnmapped() will unlock for us
901 
902 	UnaccessedPageUnmapped(area,
903 		(oldEntry & ARM_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
904 
905 	return false;
906 }
907 
908 
909 ARMPagingStructures*
910 ARMVMTranslationMap32Bit::PagingStructures() const
911 {
912 	return fPagingStructures;
913 }
914