xref: /haiku/src/system/kernel/arch/arm/paging/32bit/ARMVMTranslationMap32Bit.cpp (revision 4a55cc230cf7566cadcbb23b1928eefff8aea9a2)
1 /*
2  * Copyright 2010, Ithamar R. Adema, ithamar.adema@team-embedded.nl
3  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
4  * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
5  * Distributed under the terms of the MIT License.
6  *
7  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
8  * Distributed under the terms of the NewOS License.
9  */
10 
11 
12 #include "paging/32bit/ARMVMTranslationMap32Bit.h"
13 
14 #include <stdlib.h>
15 #include <string.h>
16 
17 #include <int.h>
18 #include <thread.h>
19 #include <slab/Slab.h>
20 #include <smp.h>
21 #include <util/AutoLock.h>
22 #include <util/ThreadAutoLock.h>
23 #include <util/queue.h>
24 #include <vm/vm_page.h>
25 #include <vm/vm_priv.h>
26 #include <vm/VMAddressSpace.h>
27 #include <vm/VMCache.h>
28 
29 #include "paging/32bit/ARMPagingMethod32Bit.h"
30 #include "paging/32bit/ARMPagingStructures32Bit.h"
31 #include "paging/arm_physical_page_mapper.h"
32 
33 
34 //#define TRACE_ARM_VM_TRANSLATION_MAP_32_BIT
35 #ifdef TRACE_ARM_VM_TRANSLATION_MAP_32_BIT
36 #	define TRACE(x...) dprintf(x)
37 #else
38 #	define TRACE(x...) ;
39 #endif
40 
41 
42 #define PAGEDIR_SIZE	ARM_MMU_L1_TABLE_SIZE
43 #define PAGEDIR_ALIGN	(4 * B_PAGE_SIZE)
44 
45 
46 ARMVMTranslationMap32Bit::ARMVMTranslationMap32Bit()
47 	:
48 	fPagingStructures(NULL)
49 {
50 }
51 
52 
53 ARMVMTranslationMap32Bit::~ARMVMTranslationMap32Bit()
54 {
55 	if (fPagingStructures == NULL)
56 		return;
57 
58 	if (fPageMapper != NULL)
59 		fPageMapper->Delete();
60 
61 	if (fPagingStructures->pgdir_virt != NULL) {
62 		// cycle through and free all of the user space pgtables
63 		for (uint32 i = VADDR_TO_PDENT(USER_BASE);
64 				i <= VADDR_TO_PDENT(USER_BASE + (USER_SIZE - 1)); i++) {
65 			if ((fPagingStructures->pgdir_virt[i] & ARM_PDE_TYPE_MASK) != 0) {
66 				addr_t address = fPagingStructures->pgdir_virt[i]
67 					& ARM_PDE_ADDRESS_MASK;
68 				vm_page* page = vm_lookup_page(address / B_PAGE_SIZE);
69 				if (!page)
70 					panic("destroy_tmap: didn't find pgtable page\n");
71 				DEBUG_PAGE_ACCESS_START(page);
72 				vm_page_set_state(page, PAGE_STATE_FREE);
73 			}
74 		}
75 	}
76 
77 	fPagingStructures->RemoveReference();
78 }
79 
80 
81 status_t
82 ARMVMTranslationMap32Bit::Init(bool kernel)
83 {
84 	TRACE("ARMVMTranslationMap32Bit::Init()\n");
85 
86 	ARMVMTranslationMap::Init(kernel);
87 
88 	fPagingStructures = new(std::nothrow) ARMPagingStructures32Bit;
89 	if (fPagingStructures == NULL)
90 		return B_NO_MEMORY;
91 
92 	ARMPagingMethod32Bit* method = ARMPagingMethod32Bit::Method();
93 
94 	if (!kernel) {
95 		// user
96 		// allocate a physical page mapper
97 		status_t error = method->PhysicalPageMapper()
98 			->CreateTranslationMapPhysicalPageMapper(&fPageMapper);
99 		if (error != B_OK)
100 			return error;
101 
102 		// allocate the page directory
103 		page_directory_entry *virtualPageDir = NULL;
104 
105 		virtual_address_restrictions virtualRestrictions = {};
106 		virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
107 
108 		physical_address_restrictions physicalRestrictions = {};
109 		physicalRestrictions.alignment = PAGEDIR_ALIGN;
110 
111 		area_id pgdir_area = create_area_etc(B_SYSTEM_TEAM, "pgdir",
112 			PAGEDIR_SIZE, B_CONTIGUOUS,
113 			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0, 0,
114 			&virtualRestrictions, &physicalRestrictions, (void **)&virtualPageDir);
115 
116 		if (pgdir_area < 0) {
117 			return B_NO_MEMORY;
118 		}
119 
120 		// look up the page directory's physical address
121 		phys_addr_t physicalPageDir;
122 		vm_get_page_mapping(VMAddressSpace::KernelID(),
123 			(addr_t)virtualPageDir, &physicalPageDir);
124 
125 		fPagingStructures->Init(virtualPageDir, physicalPageDir,
126 			method->KernelVirtualPageDirectory());
127 	} else {
128 		// kernel
129 		// get the physical page mapper
130 		fPageMapper = method->KernelPhysicalPageMapper();
131 
132 		// we already know the kernel pgdir mapping
133 		fPagingStructures->Init(method->KernelVirtualPageDirectory(),
134 			method->KernelPhysicalPageDirectory(), NULL);
135 	}
136 
137 	return B_OK;
138 }
139 
140 
141 size_t
142 ARMVMTranslationMap32Bit::MaxPagesNeededToMap(addr_t start, addr_t end) const
143 {
144 	// If start == 0, the actual base address is not yet known to the caller and
145 	// we shall assume the worst case.
146 	if (start == 0) {
147 		// offset the range so it has the worst possible alignment
148 		start = 1023 * B_PAGE_SIZE;
149 		end += 1023 * B_PAGE_SIZE;
150 	}
151 
152 	return VADDR_TO_PDENT(end) + 1 - VADDR_TO_PDENT(start);
153 }
154 
155 
156 status_t
157 ARMVMTranslationMap32Bit::Map(addr_t va, phys_addr_t pa, uint32 attributes,
158 	uint32 memoryType, vm_page_reservation* reservation)
159 {
160 	TRACE("map_tmap: entry pa 0x%lx va 0x%lx\n", pa, va);
161 
162 /*
163 	dprintf("pgdir at 0x%x\n", pgdir);
164 	dprintf("index is %d\n", va / B_PAGE_SIZE / 1024);
165 	dprintf("final at 0x%x\n", &pgdir[va / B_PAGE_SIZE / 1024]);
166 	dprintf("value is 0x%x\n", *(int *)&pgdir[va / B_PAGE_SIZE / 1024]);
167 	dprintf("present bit is %d\n", pgdir[va / B_PAGE_SIZE / 1024].present);
168 	dprintf("addr is %d\n", pgdir[va / B_PAGE_SIZE / 1024].addr);
169 */
170 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
171 
172 	// check to see if a page table exists for this range
173 	uint32 index = VADDR_TO_PDENT(va);
174 	if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
175 		phys_addr_t pgtable;
176 		vm_page *page;
177 
178 		// we need to allocate a pgtable
179 		page = vm_page_allocate_page(reservation,
180 			PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
181 
182 		DEBUG_PAGE_ACCESS_END(page);
183 
184 		pgtable = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;
185 
186 		TRACE("map_tmap: asked for free page for pgtable. 0x%lx\n", pgtable);
187 
188 		// put it in the pgdir
189 		ARMPagingMethod32Bit::PutPageTableInPageDir(&pd[index], pgtable,
190 			(va < KERNEL_BASE) ? ARM_MMU_L1_FLAG_PXN : 0);
191 
192 		// update any other page directories, if it maps kernel space
193 		if (index >= FIRST_KERNEL_PGDIR_ENT
194 			&& index < (FIRST_KERNEL_PGDIR_ENT + NUM_KERNEL_PGDIR_ENTS)) {
195 			ARMPagingStructures32Bit::UpdateAllPageDirs(index, pd[index]);
196 		}
197 
198 		fMapCount++;
199 	}
200 
201 	// now, fill in the pentry
202 	Thread* thread = thread_get_current_thread();
203 	ThreadCPUPinner pinner(thread);
204 
205 	page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
206 		pd[index] & ARM_PDE_ADDRESS_MASK);
207 	index = VADDR_TO_PTENT(va);
208 
209 	ASSERT_PRINT((pt[index] & ARM_PTE_TYPE_MASK) == 0,
210 		"virtual address: %#" B_PRIxADDR ", existing pte: %#" B_PRIx32, va,
211 		pt[index]);
212 
213 	ARMPagingMethod32Bit::PutPageTableEntryInTable(&pt[index], pa, attributes,
214 		memoryType, fIsKernelMap);
215 
216 	pinner.Unlock();
217 
218 	// Note: We don't need to invalidate the TLB for this address, as previously
219 	// the entry was not present and the TLB doesn't cache those entries.
220 
221 	fMapCount++;
222 
223 	return 0;
224 }
225 
226 
227 status_t
228 ARMVMTranslationMap32Bit::Unmap(addr_t start, addr_t end)
229 {
230 	start = ROUNDDOWN(start, B_PAGE_SIZE);
231 	if (start >= end)
232 		return B_OK;
233 
234 	TRACE("unmap_tmap: asked to free pages 0x%lx to 0x%lx\n", start, end);
235 
236 	page_directory_entry *pd = fPagingStructures->pgdir_virt;
237 
238 	do {
239 		int index = VADDR_TO_PDENT(start);
240 		if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
241 			// no page table here, move the start up to access the next page
242 			// table
243 			start = ROUNDUP(start + 1, kPageTableAlignment);
244 			continue;
245 		}
246 
247 		Thread* thread = thread_get_current_thread();
248 		ThreadCPUPinner pinner(thread);
249 
250 		page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
251 			pd[index] & ARM_PDE_ADDRESS_MASK);
252 
253 		for (index = VADDR_TO_PTENT(start); (index < 256) && (start < end);
254 				index++, start += B_PAGE_SIZE) {
255 			if ((pt[index] & ARM_PTE_TYPE_MASK) == 0) {
256 				// page mapping not valid
257 				continue;
258 			}
259 
260 			TRACE("unmap_tmap: removing page 0x%lx\n", start);
261 
262 			page_table_entry oldEntry
263 				= ARMPagingMethod32Bit::ClearPageTableEntryFlags(&pt[index],
264 					ARM_PTE_TYPE_MASK);
265 			fMapCount--;
266 
267 			if (true /* (oldEntry & ARM_PTE_ACCESSED) != 0*/) {
268 				// Note, that we only need to invalidate the address, if the
269 				// accessed flags was set, since only then the entry could have
270 				// been in any TLB.
271 				InvalidatePage(start);
272 			}
273 		}
274 	} while (start != 0 && start < end);
275 
276 	return B_OK;
277 }
278 
279 
280 status_t
281 ARMVMTranslationMap32Bit::DebugMarkRangePresent(addr_t start, addr_t end,
282 	bool markPresent)
283 {
284 #if 0
285 	start = ROUNDDOWN(start, B_PAGE_SIZE);
286 	if (start >= end)
287 		return B_OK;
288 
289 	page_directory_entry *pd = fPagingStructures->pgdir_virt;
290 
291 	do {
292 		int index = VADDR_TO_PDENT(start);
293 		if ((pd[index] & X86_PDE_PRESENT) == 0) {
294 			// no page table here, move the start up to access the next page
295 			// table
296 			start = ROUNDUP(start + 1, kPageTableAlignment);
297 			continue;
298 		}
299 
300 		Thread* thread = thread_get_current_thread();
301 		ThreadCPUPinner pinner(thread);
302 
303 		page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
304 			pd[index] & X86_PDE_ADDRESS_MASK);
305 
306 		for (index = VADDR_TO_PTENT(start); (index < 1024) && (start < end);
307 				index++, start += B_PAGE_SIZE) {
308 			if ((pt[index] & X86_PTE_PRESENT) == 0) {
309 				if (!markPresent)
310 					continue;
311 
312 				X86PagingMethod32Bit::SetPageTableEntryFlags(&pt[index],
313 					X86_PTE_PRESENT);
314 			} else {
315 				if (markPresent)
316 					continue;
317 
318 				page_table_entry oldEntry
319 					= X86PagingMethod32Bit::ClearPageTableEntryFlags(&pt[index],
320 						X86_PTE_PRESENT);
321 
322 				if ((oldEntry & X86_PTE_ACCESSED) != 0) {
323 					// Note, that we only need to invalidate the address, if the
324 					// accessed flags was set, since only then the entry could
325 					// have been in any TLB.
326 					InvalidatePage(start);
327 				}
328 			}
329 		}
330 	} while (start != 0 && start < end);
331 #endif
332 	return B_OK;
333 }
334 
335 
336 /*!	Caller must have locked the cache of the page to be unmapped.
337 	This object shouldn't be locked.
338 */
339 status_t
340 ARMVMTranslationMap32Bit::UnmapPage(VMArea* area, addr_t address,
341 	bool updatePageQueue)
342 {
343 	ASSERT(address % B_PAGE_SIZE == 0);
344 
345 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
346 
347 	TRACE("ARMVMTranslationMap32Bit::UnmapPage(%#" B_PRIxADDR ")\n", address);
348 
349 	RecursiveLocker locker(fLock);
350 
351 	int index = VADDR_TO_PDENT(address);
352 	if ((pd[index] & ARM_PDE_TYPE_MASK) == 0)
353 		return B_ENTRY_NOT_FOUND;
354 
355 	ThreadCPUPinner pinner(thread_get_current_thread());
356 
357 	page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
358 		pd[index] & ARM_PDE_ADDRESS_MASK);
359 
360 	index = VADDR_TO_PTENT(address);
361 	page_table_entry oldEntry = ARMPagingMethod32Bit::ClearPageTableEntry(
362 		&pt[index]);
363 
364 	pinner.Unlock();
365 
366 	if ((oldEntry & ARM_PTE_TYPE_MASK) == 0) {
367 		// page mapping not valid
368 		return B_ENTRY_NOT_FOUND;
369 	}
370 
371 	fMapCount--;
372 
373 
374 	if (true /*(oldEntry & ARM_PTE_ACCESSED) != 0*/) { // XXX IRA
375 		// Note, that we only need to invalidate the address, if the
376 		// accessed flags was set, since only then the entry could have been
377 		// in any TLB.
378 		InvalidatePage(address);
379 		Flush();
380 
381 		// NOTE: Between clearing the page table entry and Flush() other
382 		// processors (actually even this processor with another thread of the
383 		// same team) could still access the page in question via their cached
384 		// entry. We can obviously lose a modified flag in this case, with the
385 		// effect that the page looks unmodified (and might thus be recycled),
386 		// but is actually modified.
387 		// In most cases this is harmless, but for vm_remove_all_page_mappings()
388 		// this is actually a problem.
389 		// Interestingly FreeBSD seems to ignore this problem as well
390 		// (cf. pmap_remove_all()), unless I've missed something.
391 	}
392 
393 	locker.Detach();
394 		// PageUnmapped() will unlock for us
395 
396 	PageUnmapped(area, (oldEntry & ARM_PTE_ADDRESS_MASK) / B_PAGE_SIZE,
397 		true /*(oldEntry & ARM_PTE_ACCESSED) != 0*/, true /*(oldEntry & ARM_PTE_DIRTY) != 0*/,
398 		updatePageQueue);
399 
400 	return B_OK;
401 }
402 
403 
404 void
405 ARMVMTranslationMap32Bit::UnmapPages(VMArea* area, addr_t base, size_t size,
406 	bool updatePageQueue)
407 {
408 	if (size == 0)
409 		return;
410 
411 	addr_t start = base;
412 	addr_t end = base + size - 1;
413 
414 	TRACE("ARMVMTranslationMap32Bit::UnmapPages(%p, %#" B_PRIxADDR ", %#"
415 		B_PRIxADDR ")\n", area, start, end);
416 
417 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
418 
419 	VMAreaMappings queue;
420 
421 	RecursiveLocker locker(fLock);
422 
423 	do {
424 		int index = VADDR_TO_PDENT(start);
425 		if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
426 			// no page table here, move the start up to access the next page
427 			// table
428 			start = ROUNDUP(start + 1, kPageTableAlignment);
429 			continue;
430 		}
431 
432 		Thread* thread = thread_get_current_thread();
433 		ThreadCPUPinner pinner(thread);
434 
435 		page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
436 			pd[index] & ARM_PDE_ADDRESS_MASK);
437 
438 		for (index = VADDR_TO_PTENT(start); (index < 256) && (start < end);
439 				index++, start += B_PAGE_SIZE) {
440 			page_table_entry oldEntry
441 				= ARMPagingMethod32Bit::ClearPageTableEntry(&pt[index]);
442 			if ((oldEntry & ARM_PTE_TYPE_MASK) == 0)
443 				continue;
444 
445 			fMapCount--;
446 
447 			if (true /*(oldEntry & ARM_PTE_ACCESSED) != 0*/) { // XXX IRA
448 				// Note, that we only need to invalidate the address, if the
449 				// accessed flags was set, since only then the entry could have
450 				// been in any TLB.
451 				InvalidatePage(start);
452 			}
453 
454 			if (area->cache_type != CACHE_TYPE_DEVICE) {
455 				// get the page
456 				vm_page* page = vm_lookup_page(
457 					(oldEntry & ARM_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
458 				ASSERT(page != NULL);
459 
460 				DEBUG_PAGE_ACCESS_START(page);
461 
462 				// transfer the accessed/dirty flags to the page
463 				if (/*(oldEntry & ARM_PTE_ACCESSED) != 0*/ true) // XXX IRA
464 					page->accessed = true;
465 				if (/*(oldEntry & ARM_PTE_DIRTY) != 0 */ true)
466 					page->modified = true;
467 
468 				// remove the mapping object/decrement the wired_count of the
469 				// page
470 				if (area->wiring == B_NO_LOCK) {
471 					vm_page_mapping* mapping = NULL;
472 					vm_page_mappings::Iterator iterator
473 						= page->mappings.GetIterator();
474 					while ((mapping = iterator.Next()) != NULL) {
475 						if (mapping->area == area)
476 							break;
477 					}
478 
479 					ASSERT(mapping != NULL);
480 
481 					area->mappings.Remove(mapping);
482 					page->mappings.Remove(mapping);
483 					queue.Add(mapping);
484 				} else
485 					page->DecrementWiredCount();
486 
487 				if (!page->IsMapped()) {
488 					atomic_add(&gMappedPagesCount, -1);
489 
490 					if (updatePageQueue) {
491 						if (page->Cache()->temporary)
492 							vm_page_set_state(page, PAGE_STATE_INACTIVE);
493 						else if (page->modified)
494 							vm_page_set_state(page, PAGE_STATE_MODIFIED);
495 						else
496 							vm_page_set_state(page, PAGE_STATE_CACHED);
497 					}
498 				}
499 
500 				DEBUG_PAGE_ACCESS_END(page);
501 			}
502 		}
503 
504 		Flush();
505 			// flush explicitly, since we directly use the lock
506 	} while (start != 0 && start < end);
507 
508 	// TODO: As in UnmapPage() we can lose page dirty flags here. ATM it's not
509 	// really critical here, as in all cases this method is used, the unmapped
510 	// area range is unmapped for good (resized/cut) and the pages will likely
511 	// be freed.
512 
513 	locker.Unlock();
514 
515 	// free removed mappings
516 	bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
517 	uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
518 		| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
519 	while (vm_page_mapping* mapping = queue.RemoveHead())
520 		object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
521 }
522 
523 
524 void
525 ARMVMTranslationMap32Bit::UnmapArea(VMArea* area, bool deletingAddressSpace,
526 	bool ignoreTopCachePageFlags)
527 {
528 	if (area->cache_type == CACHE_TYPE_DEVICE || area->wiring != B_NO_LOCK) {
529 		ARMVMTranslationMap32Bit::UnmapPages(area, area->Base(), area->Size(),
530 			true);
531 		return;
532 	}
533 
534 	bool unmapPages = !deletingAddressSpace || !ignoreTopCachePageFlags;
535 
536 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
537 
538 	RecursiveLocker locker(fLock);
539 
540 	VMAreaMappings mappings;
541 	mappings.MoveFrom(&area->mappings);
542 
543 	for (VMAreaMappings::Iterator it = mappings.GetIterator();
544 			vm_page_mapping* mapping = it.Next();) {
545 		vm_page* page = mapping->page;
546 		page->mappings.Remove(mapping);
547 
548 		VMCache* cache = page->Cache();
549 
550 		bool pageFullyUnmapped = false;
551 		if (!page->IsMapped()) {
552 			atomic_add(&gMappedPagesCount, -1);
553 			pageFullyUnmapped = true;
554 		}
555 
556 		if (unmapPages || cache != area->cache) {
557 			addr_t address = area->Base()
558 				+ ((page->cache_offset * B_PAGE_SIZE) - area->cache_offset);
559 
560 			int index = VADDR_TO_PDENT(address);
561 			if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
562 				panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
563 					"has no page dir entry", page, area, address);
564 				continue;
565 			}
566 
567 			ThreadCPUPinner pinner(thread_get_current_thread());
568 
569 			page_table_entry* pt
570 				= (page_table_entry*)fPageMapper->GetPageTableAt(
571 					pd[index] & ARM_PDE_ADDRESS_MASK);
572 			page_table_entry oldEntry
573 				= ARMPagingMethod32Bit::ClearPageTableEntry(
574 					&pt[VADDR_TO_PTENT(address)]);
575 
576 			pinner.Unlock();
577 
578 			if ((oldEntry & ARM_PTE_TYPE_MASK) == 0) {
579 				panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
580 					"has no page table entry", page, area, address);
581 				continue;
582 			}
583 
584 			// transfer the accessed/dirty flags to the page and invalidate
585 			// the mapping, if necessary
586 			if (true /*(oldEntry & ARM_PTE_ACCESSED) != 0*/) { // XXX IRA
587 				page->accessed = true;
588 
589 				if (!deletingAddressSpace)
590 					InvalidatePage(address);
591 			}
592 
593 			if (true /*(oldEntry & ARM_PTE_DIRTY) != 0*/)
594 				page->modified = true;
595 
596 			if (pageFullyUnmapped) {
597 				DEBUG_PAGE_ACCESS_START(page);
598 
599 				if (cache->temporary)
600 					vm_page_set_state(page, PAGE_STATE_INACTIVE);
601 				else if (page->modified)
602 					vm_page_set_state(page, PAGE_STATE_MODIFIED);
603 				else
604 					vm_page_set_state(page, PAGE_STATE_CACHED);
605 
606 				DEBUG_PAGE_ACCESS_END(page);
607 			}
608 		}
609 
610 		fMapCount--;
611 	}
612 
613 	Flush();
614 		// flush explicitely, since we directly use the lock
615 
616 	locker.Unlock();
617 
618 	bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
619 	uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
620 		| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
621 	while (vm_page_mapping* mapping = mappings.RemoveHead())
622 		object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
623 }
624 
625 
626 status_t
627 ARMVMTranslationMap32Bit::Query(addr_t va, phys_addr_t *_physical,
628 	uint32 *_flags)
629 {
630 	// default the flags to not present
631 	*_flags = 0;
632 	*_physical = 0;
633 
634 	int index = VADDR_TO_PDENT(va);
635 	page_directory_entry *pd = fPagingStructures->pgdir_virt;
636 	if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
637 		// no pagetable here
638 		return B_OK;
639 	}
640 
641 	Thread* thread = thread_get_current_thread();
642 	ThreadCPUPinner pinner(thread);
643 
644 	page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
645 		pd[index] & ARM_PDE_ADDRESS_MASK);
646 	page_table_entry entry = pt[VADDR_TO_PTENT(va)];
647 
648 	if ((entry & ARM_PTE_TYPE_MASK) != 0)
649 		*_physical = (entry & ARM_PTE_ADDRESS_MASK);
650 
651 	//TODO: read in the page state flags
652 	*_flags = ARMPagingMethod32Bit::PageTableEntryFlagsToAttributes(entry);
653 	if (*_physical != 0)
654 		*_flags |= PAGE_PRESENT;
655 
656 	pinner.Unlock();
657 
658 	TRACE("query_tmap: returning pa 0x%lx for va 0x%lx\n", *_physical, va);
659 
660 	return B_OK;
661 }
662 
663 
664 status_t
665 ARMVMTranslationMap32Bit::QueryInterrupt(addr_t va, phys_addr_t *_physical,
666 	uint32 *_flags)
667 {
668 	*_flags = 0;
669 	*_physical = 0;
670 
671 	int index = VADDR_TO_PDENT(va);
672 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
673 	if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
674 		// no pagetable here
675 		return B_OK;
676 	}
677 
678 	// map page table entry
679 	page_table_entry* pt = (page_table_entry*)ARMPagingMethod32Bit::Method()
680 		->PhysicalPageMapper()->InterruptGetPageTableAt(
681 			pd[index] & ARM_PDE_ADDRESS_MASK);
682 	page_table_entry entry = pt[VADDR_TO_PTENT(va)];
683 
684 	if ((entry & ARM_PTE_TYPE_MASK) != 0)
685 		*_physical = (entry & ARM_PTE_ADDRESS_MASK);
686 
687 	//TODO: read in the page state flags
688 	*_flags = ARMPagingMethod32Bit::PageTableEntryFlagsToAttributes(entry);
689 	if (*_physical != 0)
690 		*_flags |= PAGE_PRESENT;
691 
692 	return B_OK;
693 }
694 
695 
696 status_t
697 ARMVMTranslationMap32Bit::Protect(addr_t start, addr_t end, uint32 attributes,
698 	uint32 memoryType)
699 {
700 	start = ROUNDDOWN(start, B_PAGE_SIZE);
701 	if (start >= end)
702 		return B_OK;
703 
704 	TRACE("protect_tmap: pages 0x%lx to 0x%lx, attributes %lx\n", start, end,
705 		attributes);
706 
707 	uint32 newProtectionFlags = ARMPagingMethod32Bit::AttributesToPageTableEntryFlags(attributes);
708 	uint32 newMemoryTypeFlags = ARMPagingMethod32Bit::MemoryTypeToPageTableEntryFlags(memoryType);
709 
710 	page_directory_entry *pd = fPagingStructures->pgdir_virt;
711 
712 	do {
713 		int index = VADDR_TO_PDENT(start);
714 		if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
715 			// no page table here, move the start up to access the next page
716 			// table
717 			start = ROUNDUP(start + 1, kPageTableAlignment);
718 			continue;
719 		}
720 
721 		Thread* thread = thread_get_current_thread();
722 		ThreadCPUPinner pinner(thread);
723 
724 		page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
725 			pd[index] & ARM_PDE_ADDRESS_MASK);
726 
727 		for (index = VADDR_TO_PTENT(start); index < 256 && start < end;
728 				index++, start += B_PAGE_SIZE) {
729 			page_table_entry entry = pt[index];
730 			if ((entry & ARM_PTE_TYPE_MASK) == 0) {
731 				// page mapping not valid
732 				continue;
733 			}
734 
735 			TRACE("protect_tmap: protect page 0x%lx\n", start);
736 
737 			// set the new protection flags -- we want to do that atomically,
738 			// without changing the accessed or dirty flag
739 			page_table_entry oldEntry;
740 			while (true) {
741 				oldEntry = ARMPagingMethod32Bit::TestAndSetPageTableEntry(
742 					&pt[index],
743 					(entry & ~(ARM_PTE_PROTECTION_MASK
744 							| ARM_PTE_MEMORY_TYPE_MASK))
745 						| newProtectionFlags | newMemoryTypeFlags,
746 					entry);
747 				if (oldEntry == entry)
748 					break;
749 				entry = oldEntry;
750 			}
751 
752 			//TODO: invalidate only if the Accessed flag is set
753 			InvalidatePage(start);
754 		}
755 	} while (start != 0 && start < end);
756 
757 	return B_OK;
758 }
759 
760 
761 status_t
762 ARMVMTranslationMap32Bit::ClearFlags(addr_t va, uint32 flags)
763 {
764 	int index = VADDR_TO_PDENT(va);
765 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
766 	if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
767 		// no pagetable here
768 		return B_OK;
769 	}
770 #if 0 //IRA
771 	uint32 flagsToClear = ((flags & PAGE_MODIFIED) ? X86_PTE_DIRTY : 0)
772 		| ((flags & PAGE_ACCESSED) ? X86_PTE_ACCESSED : 0);
773 #else
774 	uint32 flagsToClear = 0;
775 #endif
776 	Thread* thread = thread_get_current_thread();
777 	ThreadCPUPinner pinner(thread);
778 
779 	page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
780 		pd[index] & ARM_PDE_ADDRESS_MASK);
781 	index = VADDR_TO_PTENT(va);
782 
783 	// clear out the flags we've been requested to clear
784 	page_table_entry oldEntry
785 		= ARMPagingMethod32Bit::ClearPageTableEntryFlags(&pt[index],
786 			flagsToClear);
787 
788 	pinner.Unlock();
789 
790 	//XXX IRA if ((oldEntry & flagsToClear) != 0)
791 		InvalidatePage(va);
792 
793 	return B_OK;
794 }
795 
796 
797 bool
798 ARMVMTranslationMap32Bit::ClearAccessedAndModified(VMArea* area, addr_t address,
799 	bool unmapIfUnaccessed, bool& _modified)
800 {
801 	ASSERT(address % B_PAGE_SIZE == 0);
802 
803 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
804 
805 	TRACE("ARMVMTranslationMap32Bit::ClearAccessedAndModified(%#" B_PRIxADDR
806 		")\n", address);
807 
808 	RecursiveLocker locker(fLock);
809 
810 	int index = VADDR_TO_PDENT(address);
811 	if ((pd[index] & ARM_PDE_TYPE_MASK) == 0)
812 		return false;
813 
814 	ThreadCPUPinner pinner(thread_get_current_thread());
815 
816 	page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
817 		pd[index] & ARM_PDE_ADDRESS_MASK);
818 
819 	index = VADDR_TO_PTENT(address);
820 
821 	// perform the deed
822 	page_table_entry oldEntry;
823 
824 	if (unmapIfUnaccessed) {
825 		while (true) {
826 			oldEntry = pt[index];
827 			if ((oldEntry & ARM_PTE_TYPE_MASK) == 0) {
828 				// page mapping not valid
829 				return false;
830 			}
831 #if 0 //IRA
832 			if (oldEntry & ARM_PTE_ACCESSED) {
833 				// page was accessed -- just clear the flags
834 				oldEntry = ARMPagingMethod32Bit::ClearPageTableEntryFlags(
835 					&pt[index], ARM_PTE_ACCESSED | ARM_PTE_DIRTY);
836 				break;
837 			}
838 #endif
839 			// page hasn't been accessed -- unmap it
840 			if (ARMPagingMethod32Bit::TestAndSetPageTableEntry(&pt[index], 0,
841 					oldEntry) == oldEntry) {
842 				break;
843 			}
844 
845 			// something changed -- check again
846 		}
847 	} else {
848 #if 0 //IRA
849 		oldEntry = ARMPagingMethod32Bit::ClearPageTableEntryFlags(&pt[index],
850 			ARM_PTE_ACCESSED | ARM_PTE_DIRTY);
851 #else
852 		oldEntry = pt[index];
853 #endif
854 	}
855 
856 	pinner.Unlock();
857 
858 	_modified = true /* (oldEntry & X86_PTE_DIRTY) != 0 */; // XXX IRA
859 
860 	if (true /*(oldEntry & ARM_PTE_ACCESSED) != 0*/) {
861 		// Note, that we only need to invalidate the address, if the
862 		// accessed flags was set, since only then the entry could have been
863 		// in any TLB.
864 		InvalidatePage(address);
865 
866 		Flush();
867 
868 		return true;
869 	}
870 
871 	if (!unmapIfUnaccessed)
872 		return false;
873 
874 	// We have unmapped the address. Do the "high level" stuff.
875 
876 	fMapCount--;
877 
878 	locker.Detach();
879 		// UnaccessedPageUnmapped() will unlock for us
880 
881 	UnaccessedPageUnmapped(area,
882 		(oldEntry & ARM_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
883 
884 	return false;
885 }
886 
887 
888 ARMPagingStructures*
889 ARMVMTranslationMap32Bit::PagingStructures() const
890 {
891 	return fPagingStructures;
892 }
893