xref: /haiku/src/system/kernel/arch/arm64/VMSAv8TranslationMap.cpp (revision 8cb8c3d75679dfb6852dfd150f1bd5bc883cce8e)
1a25542e7Smilek7 /*
2a25542e7Smilek7  * Copyright 2022 Haiku, Inc. All Rights Reserved.
3a25542e7Smilek7  * Distributed under the terms of the MIT License.
4a25542e7Smilek7  */
5a25542e7Smilek7 #include "VMSAv8TranslationMap.h"
6a25542e7Smilek7 
7baf574c9SOwen Anderson #include <algorithm>
8a25542e7Smilek7 #include <util/AutoLock.h>
9a25542e7Smilek7 #include <util/ThreadAutoLock.h>
10a25542e7Smilek7 #include <vm/vm_page.h>
11a25542e7Smilek7 #include <vm/vm_priv.h>
12a25542e7Smilek7 
13a25542e7Smilek7 uint32_t VMSAv8TranslationMap::fHwFeature;
14a25542e7Smilek7 uint64_t VMSAv8TranslationMap::fMair;
15a25542e7Smilek7 
169fad0a5cSOwen Anderson // ASID Management
179fad0a5cSOwen Anderson static constexpr size_t kAsidBits = 8;
189fad0a5cSOwen Anderson static constexpr size_t kNumAsids = (1 << kAsidBits);
197908993dSOwen Anderson static spinlock sAsidLock = B_SPINLOCK_INITIALIZER;
209fad0a5cSOwen Anderson // A bitmap to track which ASIDs are in use.
219fad0a5cSOwen Anderson static uint64 sAsidBitMap[kNumAsids / 64] = {};
229fad0a5cSOwen Anderson // A mapping from ASID to translation map.
239fad0a5cSOwen Anderson static VMSAv8TranslationMap* sAsidMapping[kNumAsids] = {};
249fad0a5cSOwen Anderson 
259fad0a5cSOwen Anderson 
269fad0a5cSOwen Anderson static void
279fad0a5cSOwen Anderson free_asid(size_t asid)
289fad0a5cSOwen Anderson {
299fad0a5cSOwen Anderson 	for (size_t i = 0; i < B_COUNT_OF(sAsidBitMap); ++i) {
309fad0a5cSOwen Anderson 		if (asid < 64) {
319fad0a5cSOwen Anderson 			sAsidBitMap[i] &= ~(uint64_t{1} << asid);
329fad0a5cSOwen Anderson 			return;
339fad0a5cSOwen Anderson 		}
349fad0a5cSOwen Anderson 		asid -= 64;
359fad0a5cSOwen Anderson 	}
369fad0a5cSOwen Anderson 
379fad0a5cSOwen Anderson 	panic("Could not free ASID!");
389fad0a5cSOwen Anderson }
399fad0a5cSOwen Anderson 
409fad0a5cSOwen Anderson 
419406d2a4SOwen Anderson static void
429406d2a4SOwen Anderson flush_tlb_whole_asid(uint64_t asid)
439406d2a4SOwen Anderson {
449406d2a4SOwen Anderson 	asm("dsb ishst");
459406d2a4SOwen Anderson 	asm("tlbi aside1is, %0" ::"r"(asid << 48));
469406d2a4SOwen Anderson 	asm("dsb ish");
479406d2a4SOwen Anderson 	asm("isb");
489406d2a4SOwen Anderson }
499406d2a4SOwen Anderson 
509406d2a4SOwen Anderson 
519fad0a5cSOwen Anderson static size_t
529fad0a5cSOwen Anderson alloc_first_free_asid(void)
539fad0a5cSOwen Anderson {
549fad0a5cSOwen Anderson 	int asid = 0;
559fad0a5cSOwen Anderson 	for (size_t i = 0; i < B_COUNT_OF(sAsidBitMap); ++i) {
569fad0a5cSOwen Anderson 		int avail = __builtin_ffsll(~sAsidBitMap[i]);
579fad0a5cSOwen Anderson 		if (avail != 0) {
589fad0a5cSOwen Anderson 			sAsidBitMap[i] |= (uint64_t{1} << (avail-1));
599fad0a5cSOwen Anderson 			asid += (avail - 1);
609fad0a5cSOwen Anderson 			return asid;
619fad0a5cSOwen Anderson 		}
629fad0a5cSOwen Anderson 		asid += 64;
639fad0a5cSOwen Anderson 	}
649fad0a5cSOwen Anderson 
659fad0a5cSOwen Anderson 	return kNumAsids;
669fad0a5cSOwen Anderson }
677908993dSOwen Anderson 
68a25542e7Smilek7 
69a25542e7Smilek7 VMSAv8TranslationMap::VMSAv8TranslationMap(
70a25542e7Smilek7 	bool kernel, phys_addr_t pageTable, int pageBits, int vaBits, int minBlockLevel)
71a25542e7Smilek7 	:
72a25542e7Smilek7 	fIsKernel(kernel),
73a25542e7Smilek7 	fPageTable(pageTable),
74a25542e7Smilek7 	fPageBits(pageBits),
75a25542e7Smilek7 	fVaBits(vaBits),
767908993dSOwen Anderson 	fMinBlockLevel(minBlockLevel),
779fad0a5cSOwen Anderson 	fASID(-1),
789fad0a5cSOwen Anderson 	fRefcount(0)
79a25542e7Smilek7 {
80a25542e7Smilek7 	dprintf("VMSAv8TranslationMap\n");
81a25542e7Smilek7 
82a25542e7Smilek7 	fInitialLevel = CalcStartLevel(fVaBits, fPageBits);
83a25542e7Smilek7 }
84a25542e7Smilek7 
85a25542e7Smilek7 
86a25542e7Smilek7 VMSAv8TranslationMap::~VMSAv8TranslationMap()
87a25542e7Smilek7 {
887908993dSOwen Anderson 	ASSERT(!fIsKernel);
899fad0a5cSOwen Anderson 	ASSERT(fRefcount == 0);
907908993dSOwen Anderson 	{
917908993dSOwen Anderson 		ThreadCPUPinner pinner(thread_get_current_thread());
927908993dSOwen Anderson 		FreeTable(fPageTable, 0, fInitialLevel, [](int level, uint64_t oldPte) {});
937908993dSOwen Anderson 	}
94a25542e7Smilek7 
957908993dSOwen Anderson 	{
967908993dSOwen Anderson 		InterruptsSpinLocker locker(sAsidLock);
977908993dSOwen Anderson 
989fad0a5cSOwen Anderson 		if (fASID != -1) {
997908993dSOwen Anderson 			sAsidMapping[fASID] = NULL;
1009fad0a5cSOwen Anderson 			free_asid(fASID);
1017908993dSOwen Anderson 		}
102a25542e7Smilek7 	}
1039fad0a5cSOwen Anderson }
1049fad0a5cSOwen Anderson 
1059fad0a5cSOwen Anderson 
1069fad0a5cSOwen Anderson // Switch user map into TTBR0.
1079fad0a5cSOwen Anderson // Passing kernel map here configures empty page table.
1089fad0a5cSOwen Anderson void
1099fad0a5cSOwen Anderson VMSAv8TranslationMap::SwitchUserMap(VMSAv8TranslationMap *from, VMSAv8TranslationMap *to)
1109fad0a5cSOwen Anderson {
1114b9a9eabSOwen Anderson 	InterruptsSpinLocker locker(sAsidLock);
1129fad0a5cSOwen Anderson 
1139fad0a5cSOwen Anderson 	if (!from->fIsKernel) {
1149fad0a5cSOwen Anderson 		from->fRefcount--;
1159fad0a5cSOwen Anderson 	}
1169fad0a5cSOwen Anderson 
1179fad0a5cSOwen Anderson 	if (!to->fIsKernel) {
1189fad0a5cSOwen Anderson 		to->fRefcount++;
1199fad0a5cSOwen Anderson 	} else {
1209fad0a5cSOwen Anderson 		arch_vm_install_empty_table_ttbr0();
1219fad0a5cSOwen Anderson 		return;
1229fad0a5cSOwen Anderson 	}
1239fad0a5cSOwen Anderson 
1249fad0a5cSOwen Anderson 	ASSERT(to->fPageTable != 0);
1259fad0a5cSOwen Anderson 	uint64_t ttbr = to->fPageTable | ((fHwFeature & HW_COMMON_NOT_PRIVATE) != 0 ? 1 : 0);
1269fad0a5cSOwen Anderson 
1279fad0a5cSOwen Anderson 	if (to->fASID != -1) {
1289fad0a5cSOwen Anderson 		WRITE_SPECIALREG(TTBR0_EL1, ((uint64_t)to->fASID << 48) | ttbr);
1299fad0a5cSOwen Anderson 		asm("isb");
1309fad0a5cSOwen Anderson 		return;
1319fad0a5cSOwen Anderson 	}
1329fad0a5cSOwen Anderson 
1339fad0a5cSOwen Anderson 	size_t allocatedAsid = alloc_first_free_asid();
1349fad0a5cSOwen Anderson 	if (allocatedAsid != kNumAsids) {
1359fad0a5cSOwen Anderson 		to->fASID = allocatedAsid;
1369fad0a5cSOwen Anderson 		sAsidMapping[allocatedAsid] = to;
1379fad0a5cSOwen Anderson 
1389fad0a5cSOwen Anderson 		WRITE_SPECIALREG(TTBR0_EL1, (allocatedAsid << 48) | ttbr);
1399406d2a4SOwen Anderson 		flush_tlb_whole_asid(allocatedAsid);
1409fad0a5cSOwen Anderson 		return;
1419fad0a5cSOwen Anderson 	}
1429fad0a5cSOwen Anderson 
1439fad0a5cSOwen Anderson 	for (size_t i = 0; i < kNumAsids; ++i) {
1449fad0a5cSOwen Anderson 		if (sAsidMapping[i]->fRefcount == 0) {
1459fad0a5cSOwen Anderson 			sAsidMapping[i]->fASID = -1;
1469fad0a5cSOwen Anderson 			to->fASID = i;
1479fad0a5cSOwen Anderson 			sAsidMapping[i] = to;
1489fad0a5cSOwen Anderson 
1499fad0a5cSOwen Anderson 			WRITE_SPECIALREG(TTBR0_EL1, (i << 48) | ttbr);
1509406d2a4SOwen Anderson 			flush_tlb_whole_asid(i);
1519fad0a5cSOwen Anderson 			return;
1529fad0a5cSOwen Anderson 		}
1539fad0a5cSOwen Anderson 	}
1549fad0a5cSOwen Anderson 
1559fad0a5cSOwen Anderson 	panic("cannot assign ASID");
1569fad0a5cSOwen Anderson }
157a25542e7Smilek7 
158a25542e7Smilek7 
159a25542e7Smilek7 int
160a25542e7Smilek7 VMSAv8TranslationMap::CalcStartLevel(int vaBits, int pageBits)
161a25542e7Smilek7 {
162a25542e7Smilek7 	int level = 4;
163a25542e7Smilek7 
164a25542e7Smilek7 	int bitsLeft = vaBits - pageBits;
165a25542e7Smilek7 	while (bitsLeft > 0) {
166a25542e7Smilek7 		int tableBits = pageBits - 3;
167a25542e7Smilek7 		bitsLeft -= tableBits;
168a25542e7Smilek7 		level--;
169a25542e7Smilek7 	}
170a25542e7Smilek7 
171a25542e7Smilek7 	ASSERT(level >= 0);
172a25542e7Smilek7 
173a25542e7Smilek7 	return level;
174a25542e7Smilek7 }
175a25542e7Smilek7 
176a25542e7Smilek7 
177a25542e7Smilek7 bool
178a25542e7Smilek7 VMSAv8TranslationMap::Lock()
179a25542e7Smilek7 {
180a25542e7Smilek7 	recursive_lock_lock(&fLock);
181a25542e7Smilek7 	return true;
182a25542e7Smilek7 }
183a25542e7Smilek7 
184a25542e7Smilek7 
185a25542e7Smilek7 void
186a25542e7Smilek7 VMSAv8TranslationMap::Unlock()
187a25542e7Smilek7 {
188a25542e7Smilek7 	if (recursive_lock_get_recursion(&fLock) == 1) {
189a25542e7Smilek7 		// we're about to release it for the last time
190a25542e7Smilek7 		Flush();
191a25542e7Smilek7 	}
192a25542e7Smilek7 	recursive_lock_unlock(&fLock);
193a25542e7Smilek7 }
194a25542e7Smilek7 
195a25542e7Smilek7 
196a25542e7Smilek7 addr_t
197a25542e7Smilek7 VMSAv8TranslationMap::MappedSize() const
198a25542e7Smilek7 {
199a25542e7Smilek7 	panic("VMSAv8TranslationMap::MappedSize not implemented");
200a25542e7Smilek7 	return 0;
201a25542e7Smilek7 }
202a25542e7Smilek7 
203a25542e7Smilek7 
204a25542e7Smilek7 size_t
205a25542e7Smilek7 VMSAv8TranslationMap::MaxPagesNeededToMap(addr_t start, addr_t end) const
206a25542e7Smilek7 {
207a25542e7Smilek7 	size_t result = 0;
208a25542e7Smilek7 	size_t size = end - start + 1;
209a25542e7Smilek7 
210a25542e7Smilek7 	for (int i = fInitialLevel; i < 3; i++) {
211a25542e7Smilek7 		int tableBits = fPageBits - 3;
212a25542e7Smilek7 		int shift = tableBits * (3 - i) + fPageBits;
213a25542e7Smilek7 		uint64_t entrySize = 1UL << shift;
214a25542e7Smilek7 
215a25542e7Smilek7 		result += size / entrySize + 2;
216a25542e7Smilek7 	}
217a25542e7Smilek7 
218a25542e7Smilek7 	return result;
219a25542e7Smilek7 }
220a25542e7Smilek7 
221a25542e7Smilek7 
222a25542e7Smilek7 uint64_t*
223a25542e7Smilek7 VMSAv8TranslationMap::TableFromPa(phys_addr_t pa)
224a25542e7Smilek7 {
225a25542e7Smilek7 	return reinterpret_cast<uint64_t*>(KERNEL_PMAP_BASE + pa);
226a25542e7Smilek7 }
227a25542e7Smilek7 
228a25542e7Smilek7 
2297908993dSOwen Anderson template<typename EntryRemoved>
230a25542e7Smilek7 void
2317908993dSOwen Anderson VMSAv8TranslationMap::FreeTable(phys_addr_t ptPa, uint64_t va, int level,
2327908993dSOwen Anderson 	EntryRemoved &&entryRemoved)
233a25542e7Smilek7 {
2347908993dSOwen Anderson 	ASSERT(level < 4);
235a25542e7Smilek7 
236a25542e7Smilek7 	int tableBits = fPageBits - 3;
237a25542e7Smilek7 	uint64_t tableSize = 1UL << tableBits;
2387908993dSOwen Anderson 	uint64_t vaMask = (1UL << fVaBits) - 1;
239a25542e7Smilek7 
2407908993dSOwen Anderson 	int shift = tableBits * (3 - level) + fPageBits;
2417908993dSOwen Anderson 	uint64_t entrySize = 1UL << shift;
2427908993dSOwen Anderson 
2437908993dSOwen Anderson 	uint64_t nextVa = va;
244a25542e7Smilek7 	uint64_t* pt = TableFromPa(ptPa);
245a25542e7Smilek7 	for (uint64_t i = 0; i < tableSize; i++) {
2467908993dSOwen Anderson 		uint64_t oldPte = (uint64_t) atomic_get_and_set64((int64*) &pt[i], 0);
2477908993dSOwen Anderson 
24818a27fe0SOwen Anderson 		if (level < 3 && (oldPte & kPteTypeMask) == kPteTypeL012Table) {
2497908993dSOwen Anderson 			FreeTable(oldPte & kPteAddrMask, nextVa, level + 1, entryRemoved);
25018a27fe0SOwen Anderson 		} else if ((oldPte & kPteTypeMask) != 0) {
2517908993dSOwen Anderson 			uint64_t fullVa = (fIsKernel ? ~vaMask : 0) | nextVa;
2527908993dSOwen Anderson 			asm("dsb ishst");
2537908993dSOwen Anderson 			asm("tlbi vaae1is, %0" :: "r" ((fullVa >> 12) & kTLBIMask));
2547908993dSOwen Anderson 			// Does it correctly flush block entries at level < 3? We don't use them anyway though.
2557908993dSOwen Anderson 			// TODO: Flush only currently used ASID (using vae1is)
2567908993dSOwen Anderson 			entryRemoved(level, oldPte);
257a25542e7Smilek7 		}
258a25542e7Smilek7 
2597908993dSOwen Anderson 		nextVa += entrySize;
2607908993dSOwen Anderson 	}
2617908993dSOwen Anderson 
2627908993dSOwen Anderson 	asm("dsb ish");
2637908993dSOwen Anderson 
264a25542e7Smilek7 	vm_page* page = vm_lookup_page(ptPa >> fPageBits);
2657908993dSOwen Anderson 	DEBUG_PAGE_ACCESS_START(page);
266a25542e7Smilek7 	vm_page_set_state(page, PAGE_STATE_FREE);
267a25542e7Smilek7 }
268a25542e7Smilek7 
269a25542e7Smilek7 
27018a27fe0SOwen Anderson // Make a new page sub-table.
27118a27fe0SOwen Anderson // The parent table is `ptPa`, and the new sub-table's PTE will be at `index`
27218a27fe0SOwen Anderson // in it.
27318a27fe0SOwen Anderson // Returns the physical address of the new table, or the address of the existing
27418a27fe0SOwen Anderson // one if the PTE is already filled.
275a25542e7Smilek7 phys_addr_t
276baf574c9SOwen Anderson VMSAv8TranslationMap::GetOrMakeTable(phys_addr_t ptPa, int level, int index,
277baf574c9SOwen Anderson 	vm_page_reservation* reservation)
278a25542e7Smilek7 {
27918a27fe0SOwen Anderson 	ASSERT(level < 3);
280a25542e7Smilek7 
28118a27fe0SOwen Anderson 	uint64_t* ptePtr = TableFromPa(ptPa) + index;
28218a27fe0SOwen Anderson 	uint64_t oldPte = atomic_get64((int64*) ptePtr);
283a25542e7Smilek7 
28418a27fe0SOwen Anderson 	int type = oldPte & kPteTypeMask;
28518a27fe0SOwen Anderson 	if (type == kPteTypeL012Table) {
28618a27fe0SOwen Anderson 		// This is table entry already, just return it
287a25542e7Smilek7 		return oldPte & kPteAddrMask;
28818a27fe0SOwen Anderson 	} else if (reservation != nullptr) {
28918a27fe0SOwen Anderson 		// Create new table there
29018a27fe0SOwen Anderson 		vm_page* page = vm_page_allocate_page(reservation, PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
291a25542e7Smilek7 		phys_addr_t newTablePa = page->physical_page_number << fPageBits;
29218a27fe0SOwen Anderson 		DEBUG_PAGE_ACCESS_END(page);
293a25542e7Smilek7 
29418a27fe0SOwen Anderson 		// We only create mappings at the final level so we don't need to handle
29518a27fe0SOwen Anderson 		// splitting block mappings
296baf574c9SOwen Anderson 		ASSERT(type != kPteTypeL12Block);
297a25542e7Smilek7 
29818a27fe0SOwen Anderson 		// Ensure that writes to page being attached have completed
29918a27fe0SOwen Anderson 		asm("dsb ishst");
300a25542e7Smilek7 
30118a27fe0SOwen Anderson 		uint64_t oldPteRefetch = (uint64_t)atomic_test_and_set64((int64*) ptePtr,
30218a27fe0SOwen Anderson 			newTablePa | kPteTypeL012Table, oldPte);
30318a27fe0SOwen Anderson 		if (oldPteRefetch != oldPte) {
30418a27fe0SOwen Anderson 			// If the old PTE has mutated, it must be because another thread has allocated the
30518a27fe0SOwen Anderson 			// sub-table at the same time as us. If that has happened, deallocate the page we
30618a27fe0SOwen Anderson 			// setup and use the one they installed instead.
30718a27fe0SOwen Anderson 			ASSERT((oldPteRefetch & kPteTypeMask) == kPteTypeL012Table);
30818a27fe0SOwen Anderson 			DEBUG_PAGE_ACCESS_START(page);
30918a27fe0SOwen Anderson 			vm_page_set_state(page, PAGE_STATE_FREE);
31018a27fe0SOwen Anderson 			return oldPteRefetch & kPteAddrMask;
311a25542e7Smilek7 		}
312a25542e7Smilek7 
313a25542e7Smilek7 		return newTablePa;
314a25542e7Smilek7 	}
315a25542e7Smilek7 
31618a27fe0SOwen Anderson 	// There's no existing table and we have no reservation
317a25542e7Smilek7 	return 0;
318a25542e7Smilek7 }
319a25542e7Smilek7 
320a25542e7Smilek7 
321a25542e7Smilek7 void
322baf574c9SOwen Anderson VMSAv8TranslationMap::FlushVAFromTLBByASID(addr_t va)
323baf574c9SOwen Anderson {
3244b9a9eabSOwen Anderson 	InterruptsSpinLocker locker(sAsidLock);
3259406d2a4SOwen Anderson 	if (fASID != -1) {
326baf574c9SOwen Anderson         asm("tlbi vae1is, %0" ::"r"(((va >> 12) & kTLBIMask) | (uint64_t(fASID) << 48)));
327baf574c9SOwen Anderson 		asm("dsb ish"); // Wait for TLB flush to complete
328baf574c9SOwen Anderson 	}
329baf574c9SOwen Anderson }
330baf574c9SOwen Anderson 
331baf574c9SOwen Anderson 
332baf574c9SOwen Anderson void
333baf574c9SOwen Anderson VMSAv8TranslationMap::PerformPteBreakBeforeMake(uint64_t* ptePtr, addr_t va)
334baf574c9SOwen Anderson {
335baf574c9SOwen Anderson 	atomic_set64((int64*)ptePtr, 0);
336baf574c9SOwen Anderson 	asm("dsb ishst"); // Ensure PTE write completed
337baf574c9SOwen Anderson 	FlushVAFromTLBByASID(va);
338baf574c9SOwen Anderson }
339baf574c9SOwen Anderson 
340baf574c9SOwen Anderson 
341baf574c9SOwen Anderson template<typename UpdatePte>
342baf574c9SOwen Anderson void
343baf574c9SOwen Anderson VMSAv8TranslationMap::ProcessRange(phys_addr_t ptPa, int level, addr_t va, size_t size,
344baf574c9SOwen Anderson     vm_page_reservation* reservation, UpdatePte&& updatePte)
345baf574c9SOwen Anderson {
346baf574c9SOwen Anderson 	ASSERT(level < 4);
347baf574c9SOwen Anderson 	ASSERT(ptPa != 0);
348baf574c9SOwen Anderson 
349baf574c9SOwen Anderson 	int tableBits = fPageBits - 3;
350baf574c9SOwen Anderson 	uint64_t tableMask = (1UL << tableBits) - 1;
351baf574c9SOwen Anderson 
352baf574c9SOwen Anderson 	int shift = tableBits * (3 - level) + fPageBits;
353baf574c9SOwen Anderson 	uint64_t entrySize = 1UL << shift;
354baf574c9SOwen Anderson 	uint64_t entryMask = entrySize - 1;
355baf574c9SOwen Anderson 
356baf574c9SOwen Anderson 	uint64_t alignedDownVa = va & ~entryMask;
357baf574c9SOwen Anderson 	uint64_t alignedUpEnd = (va + size + (entrySize - 1)) & ~entryMask;
358baf574c9SOwen Anderson 	if (level == 3)
359baf574c9SOwen Anderson 		ASSERT(alignedDownVa == va);
360baf574c9SOwen Anderson 
361baf574c9SOwen Anderson     for (uint64_t effectiveVa = alignedDownVa; effectiveVa < alignedUpEnd;
362baf574c9SOwen Anderson         effectiveVa += entrySize) {
363baf574c9SOwen Anderson 		int index = (effectiveVa >> shift) & tableMask;
364baf574c9SOwen Anderson 		uint64_t* ptePtr = TableFromPa(ptPa) + index;
365baf574c9SOwen Anderson 
366baf574c9SOwen Anderson 		if (level == 3) {
367baf574c9SOwen Anderson 			updatePte(ptePtr, effectiveVa);
368baf574c9SOwen Anderson 		} else {
369baf574c9SOwen Anderson 			phys_addr_t subTable = GetOrMakeTable(ptPa, level, index, reservation);
370baf574c9SOwen Anderson 
371baf574c9SOwen Anderson 			// When reservation is null, we can't create a new subtable. This can be intentional,
372baf574c9SOwen Anderson 			// for example when called from Unmap().
373baf574c9SOwen Anderson 			if (subTable == 0)
374baf574c9SOwen Anderson 				continue;
375baf574c9SOwen Anderson 
376baf574c9SOwen Anderson 			uint64_t subVa = std::max(effectiveVa, va);
377baf574c9SOwen Anderson 			size_t subSize = std::min(size_t(entrySize - (subVa & entryMask)), size);
378baf574c9SOwen Anderson             ProcessRange(subTable, level + 1, subVa, subSize, reservation, updatePte);
379baf574c9SOwen Anderson 
380baf574c9SOwen Anderson 			size -= subSize;
381baf574c9SOwen Anderson 		}
382baf574c9SOwen Anderson 	}
383baf574c9SOwen Anderson }
384baf574c9SOwen Anderson 
385baf574c9SOwen Anderson 
386a25542e7Smilek7 uint8_t
387a25542e7Smilek7 VMSAv8TranslationMap::MairIndex(uint8_t type)
388a25542e7Smilek7 {
389a25542e7Smilek7 	for (int i = 0; i < 8; i++)
390a25542e7Smilek7 		if (((fMair >> (i * 8)) & 0xff) == type)
391a25542e7Smilek7 			return i;
392a25542e7Smilek7 
393a25542e7Smilek7 	panic("MAIR entry not found");
394a25542e7Smilek7 	return 0;
395a25542e7Smilek7 }
396a25542e7Smilek7 
397a25542e7Smilek7 
398a25542e7Smilek7 uint64_t
399a25542e7Smilek7 VMSAv8TranslationMap::GetMemoryAttr(uint32 attributes, uint32 memoryType, bool isKernel)
400a25542e7Smilek7 {
401a25542e7Smilek7 	uint64_t attr = 0;
402a25542e7Smilek7 
403a25542e7Smilek7 	if (!isKernel)
404a25542e7Smilek7 		attr |= kAttrNG;
405a25542e7Smilek7 
406a25542e7Smilek7 	if ((attributes & B_EXECUTE_AREA) == 0)
407a25542e7Smilek7 		attr |= kAttrUXN;
408a25542e7Smilek7 	if ((attributes & B_KERNEL_EXECUTE_AREA) == 0)
409a25542e7Smilek7 		attr |= kAttrPXN;
410a25542e7Smilek7 
411108f6fdcSOwen Anderson 	// SWDBM is software reserved bit that we use to mark that
412108f6fdcSOwen Anderson 	// writes are allowed, and fault handler should clear kAttrAPReadOnly.
413108f6fdcSOwen Anderson 	// In that case kAttrAPReadOnly doubles as not-dirty bit.
414108f6fdcSOwen Anderson 	// Additionally dirty state can be stored in SWDIRTY, in order not to lose
415108f6fdcSOwen Anderson 	// dirty state when changing protection from RW to RO.
416a25542e7Smilek7 
417108f6fdcSOwen Anderson 	// All page permissions begin life in RO state.
418108f6fdcSOwen Anderson 	attr |= kAttrAPReadOnly;
419108f6fdcSOwen Anderson 
420108f6fdcSOwen Anderson 	// User-Execute implies User-Read, because it would break PAN otherwise
421108f6fdcSOwen Anderson 	if ((attributes & B_READ_AREA) != 0 || (attributes & B_EXECUTE_AREA) != 0)
422108f6fdcSOwen Anderson 		attr |= kAttrAPUserAccess; // Allow user reads
423108f6fdcSOwen Anderson 
424108f6fdcSOwen Anderson 	if ((attributes & B_WRITE_AREA) != 0 || (attributes & B_KERNEL_WRITE_AREA) != 0)
425108f6fdcSOwen Anderson 		attr |= kAttrSWDBM; // Mark as writeable
426108f6fdcSOwen Anderson 
427108f6fdcSOwen Anderson 	// When supported by hardware copy our SWDBM bit into DBM,
428108f6fdcSOwen Anderson 	// so that kAttrAPReadOnly is cleared on write attempt automatically
429108f6fdcSOwen Anderson 	// without going through fault handler.
430108f6fdcSOwen Anderson 	if ((fHwFeature & HW_DIRTY) != 0 && (attr & kAttrSWDBM) != 0)
431a25542e7Smilek7 		attr |= kAttrDBM;
432a25542e7Smilek7 
433108f6fdcSOwen Anderson 	attr |= kAttrSHInnerShareable; // Inner Shareable
434a25542e7Smilek7 
435108f6fdcSOwen Anderson 	uint8_t type = MAIR_NORMAL_WB;
436108f6fdcSOwen Anderson 
437*8cb8c3d7SOwen Anderson 	switch (memoryType & B_MTR_MASK) {
438*8cb8c3d7SOwen Anderson 		case B_MTR_UC:
439*8cb8c3d7SOwen Anderson 			// TODO: This probably should be nGnRE for PCI
440*8cb8c3d7SOwen Anderson 			type = MAIR_DEVICE_nGnRnE;
441*8cb8c3d7SOwen Anderson 			break;
442*8cb8c3d7SOwen Anderson 		case B_MTR_WC:
443108f6fdcSOwen Anderson 			type = MAIR_NORMAL_NC;
444*8cb8c3d7SOwen Anderson 			break;
445*8cb8c3d7SOwen Anderson 		case B_MTR_WT:
446108f6fdcSOwen Anderson 			type = MAIR_NORMAL_WT;
447*8cb8c3d7SOwen Anderson 			break;
448*8cb8c3d7SOwen Anderson 		case B_MTR_WP:
449108f6fdcSOwen Anderson 			type = MAIR_NORMAL_WT;
450*8cb8c3d7SOwen Anderson 			break;
451*8cb8c3d7SOwen Anderson 		default:
452*8cb8c3d7SOwen Anderson 		case B_MTR_WB:
453108f6fdcSOwen Anderson 			type = MAIR_NORMAL_WB;
454*8cb8c3d7SOwen Anderson 			break;
455*8cb8c3d7SOwen Anderson 	}
456108f6fdcSOwen Anderson 
457108f6fdcSOwen Anderson 	attr |= MairIndex(type) << 2;
458a25542e7Smilek7 
459a25542e7Smilek7 	return attr;
460a25542e7Smilek7 }
461a25542e7Smilek7 
462a25542e7Smilek7 
463a25542e7Smilek7 status_t
464a25542e7Smilek7 VMSAv8TranslationMap::Map(addr_t va, phys_addr_t pa, uint32 attributes, uint32 memoryType,
465a25542e7Smilek7 	vm_page_reservation* reservation)
466a25542e7Smilek7 {
467a25542e7Smilek7 	ThreadCPUPinner pinner(thread_get_current_thread());
468a25542e7Smilek7 
469a25542e7Smilek7 	uint64_t pageMask = (1UL << fPageBits) - 1;
470a25542e7Smilek7 	uint64_t vaMask = (1UL << fVaBits) - 1;
471a25542e7Smilek7 
472a25542e7Smilek7 	ASSERT((va & pageMask) == 0);
473a25542e7Smilek7 	ASSERT((pa & pageMask) == 0);
474a25542e7Smilek7 	ASSERT(ValidateVa(va));
475a25542e7Smilek7 
476a25542e7Smilek7 	uint64_t attr = GetMemoryAttr(attributes, memoryType, fIsKernel);
477a25542e7Smilek7 
478baf574c9SOwen Anderson 	// During first mapping we need to allocate root table
479baf574c9SOwen Anderson 	if (fPageTable == 0) {
480a25542e7Smilek7 		vm_page* page = vm_page_allocate_page(reservation, PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
481baf574c9SOwen Anderson 		DEBUG_PAGE_ACCESS_END(page);
482a25542e7Smilek7 		fPageTable = page->physical_page_number << fPageBits;
483a25542e7Smilek7 	}
484a25542e7Smilek7 
485baf574c9SOwen Anderson 	ProcessRange(fPageTable, 0, va & vaMask, B_PAGE_SIZE, reservation,
486baf574c9SOwen Anderson 		[=](uint64_t* ptePtr, uint64_t effectiveVa) {
48740f58daaSOwen Anderson 			phys_addr_t effectivePa = effectiveVa - (va & vaMask) + pa;
488baf574c9SOwen Anderson 			uint64_t oldPte = atomic_get64((int64*)ptePtr);
489baf574c9SOwen Anderson 			uint64_t newPte = effectivePa | attr | kPteTypeL3Page;
490baf574c9SOwen Anderson 
491baf574c9SOwen Anderson 			if (newPte == oldPte)
492baf574c9SOwen Anderson 				return;
493baf574c9SOwen Anderson 
494baf574c9SOwen Anderson             if ((newPte & kPteValidMask) != 0 && (oldPte & kPteValidMask) != 0) {
495baf574c9SOwen Anderson 				// ARM64 requires "break-before-make". We must set the PTE to an invalid
496baf574c9SOwen Anderson 				// entry and flush the TLB as appropriate before we can write the new PTE.
497baf574c9SOwen Anderson 				PerformPteBreakBeforeMake(ptePtr, effectiveVa);
498baf574c9SOwen Anderson 			}
499baf574c9SOwen Anderson 
500baf574c9SOwen Anderson 			// Install the new PTE
501baf574c9SOwen Anderson             atomic_set64((int64*)ptePtr, newPte);
502baf574c9SOwen Anderson 			asm("dsb ishst"); // Ensure PTE write completed
503baf574c9SOwen Anderson 		});
504a25542e7Smilek7 
505a25542e7Smilek7 	return B_OK;
506a25542e7Smilek7 }
507a25542e7Smilek7 
508a25542e7Smilek7 
509a25542e7Smilek7 status_t
510a25542e7Smilek7 VMSAv8TranslationMap::Unmap(addr_t start, addr_t end)
511a25542e7Smilek7 {
512a25542e7Smilek7 	ThreadCPUPinner pinner(thread_get_current_thread());
513a25542e7Smilek7 
514a25542e7Smilek7 	size_t size = end - start + 1;
515a25542e7Smilek7 
516a25542e7Smilek7 	uint64_t pageMask = (1UL << fPageBits) - 1;
517a25542e7Smilek7 	uint64_t vaMask = (1UL << fVaBits) - 1;
518a25542e7Smilek7 
519a25542e7Smilek7 	ASSERT((start & pageMask) == 0);
520a25542e7Smilek7 	ASSERT((size & pageMask) == 0);
521a25542e7Smilek7 	ASSERT(ValidateVa(start));
522a25542e7Smilek7 
523baf574c9SOwen Anderson 	if (fPageTable == 0)
524baf574c9SOwen Anderson 		return B_OK;
525baf574c9SOwen Anderson 
526baf574c9SOwen Anderson 	ProcessRange(fPageTable, 0, start & vaMask, size, nullptr,
527baf574c9SOwen Anderson 		[=](uint64_t* ptePtr, uint64_t effectiveVa) {
528baf574c9SOwen Anderson 			uint64_t oldPte = atomic_and64((int64_t*)ptePtr, ~kPteValidMask);
529baf574c9SOwen Anderson 			if ((oldPte & kPteValidMask) != 0) {
530baf574c9SOwen Anderson 				asm("dsb ishst"); // Ensure PTE write completed
531baf574c9SOwen Anderson 				FlushVAFromTLBByASID(effectiveVa);
532baf574c9SOwen Anderson 			}
533baf574c9SOwen Anderson 		});
534a25542e7Smilek7 
535a25542e7Smilek7 	return B_OK;
536a25542e7Smilek7 }
537a25542e7Smilek7 
538a25542e7Smilek7 
539a25542e7Smilek7 status_t
540a25542e7Smilek7 VMSAv8TranslationMap::UnmapPage(VMArea* area, addr_t address, bool updatePageQueue)
541a25542e7Smilek7 {
54273c51743SOwen Anderson 	uint64_t pageMask = (1UL << fPageBits) - 1;
54373c51743SOwen Anderson 	uint64_t vaMask = (1UL << fVaBits) - 1;
54473c51743SOwen Anderson 
54573c51743SOwen Anderson 	ASSERT((address & pageMask) == 0);
54673c51743SOwen Anderson 	ASSERT(ValidateVa(address));
547a25542e7Smilek7 
548a25542e7Smilek7 	ThreadCPUPinner pinner(thread_get_current_thread());
549a25542e7Smilek7 	RecursiveLocker locker(fLock);
550a25542e7Smilek7 
55173c51743SOwen Anderson 	uint64_t oldPte = 0;
55273c51743SOwen Anderson 	ProcessRange(fPageTable, 0, address & vaMask, B_PAGE_SIZE, nullptr,
55373c51743SOwen Anderson 		[=, &oldPte](uint64_t* ptePtr, uint64_t effectiveVa) {
55473c51743SOwen Anderson 			oldPte = atomic_get_and_set64((int64_t*)ptePtr, 0);
55573c51743SOwen Anderson 			asm("dsb ishst");
55673c51743SOwen Anderson 			if ((oldPte & kAttrAF) != 0)
55773c51743SOwen Anderson 				FlushVAFromTLBByASID(effectiveVa);
55873c51743SOwen Anderson 		});
559a25542e7Smilek7 
5604b9a9eabSOwen Anderson 	if ((oldPte & kPteValidMask) == 0)
5614b9a9eabSOwen Anderson 		return B_ENTRY_NOT_FOUND;
5624b9a9eabSOwen Anderson 
563a25542e7Smilek7 	pinner.Unlock();
564a25542e7Smilek7 	locker.Detach();
56573c51743SOwen Anderson 	PageUnmapped(area, (oldPte & kPteAddrMask) >> fPageBits, (oldPte & kAttrAF) != 0,
56673c51743SOwen Anderson 		(oldPte & kAttrAPReadOnly) == 0, updatePageQueue);
567a25542e7Smilek7 
568a25542e7Smilek7 	return B_OK;
569a25542e7Smilek7 }
570a25542e7Smilek7 
571a25542e7Smilek7 
572a25542e7Smilek7 bool
573a25542e7Smilek7 VMSAv8TranslationMap::ValidateVa(addr_t va)
574a25542e7Smilek7 {
575a25542e7Smilek7 	uint64_t vaMask = (1UL << fVaBits) - 1;
576a25542e7Smilek7 	bool kernelAddr = (va & (1UL << 63)) != 0;
577a25542e7Smilek7 	if (kernelAddr != fIsKernel)
578a25542e7Smilek7 		return false;
579a25542e7Smilek7 	if ((va & ~vaMask) != (fIsKernel ? ~vaMask : 0))
580a25542e7Smilek7 		return false;
581a25542e7Smilek7 	return true;
582a25542e7Smilek7 }
583a25542e7Smilek7 
584a25542e7Smilek7 
585a25542e7Smilek7 status_t
586a25542e7Smilek7 VMSAv8TranslationMap::Query(addr_t va, phys_addr_t* pa, uint32* flags)
587a25542e7Smilek7 {
58873c51743SOwen Anderson 	*flags = 0;
58973c51743SOwen Anderson 	*pa = 0;
59073c51743SOwen Anderson 
591a25542e7Smilek7 	ThreadCPUPinner pinner(thread_get_current_thread());
592a25542e7Smilek7 
59373c51743SOwen Anderson 	uint64_t pageMask = (1UL << fPageBits) - 1;
59473c51743SOwen Anderson 	uint64_t vaMask = (1UL << fVaBits) - 1;
59573c51743SOwen Anderson 
59673c51743SOwen Anderson 	ASSERT((va & pageMask) == 0);
597a25542e7Smilek7 	ASSERT(ValidateVa(va));
598a25542e7Smilek7 
59973c51743SOwen Anderson 	ProcessRange(fPageTable, 0, va & vaMask, B_PAGE_SIZE, nullptr,
60073c51743SOwen Anderson 		[=](uint64_t* ptePtr, uint64_t effectiveVa) {
60173c51743SOwen Anderson 			uint64_t pte = atomic_get64((int64_t*)ptePtr);
60273c51743SOwen Anderson 			*pa = pte & kPteAddrMask;
60373c51743SOwen Anderson 			*flags |= PAGE_PRESENT | B_KERNEL_READ_AREA;
604a25542e7Smilek7 			if ((pte & kAttrAF) != 0)
60573c51743SOwen Anderson 				*flags |= PAGE_ACCESSED;
606108f6fdcSOwen Anderson 			if ((pte & kAttrAPReadOnly) == 0)
60773c51743SOwen Anderson 				*flags |= PAGE_MODIFIED;
608a25542e7Smilek7 
609a25542e7Smilek7 			if ((pte & kAttrUXN) == 0)
61073c51743SOwen Anderson 				*flags |= B_EXECUTE_AREA;
611a25542e7Smilek7 			if ((pte & kAttrPXN) == 0)
61273c51743SOwen Anderson 				*flags |= B_KERNEL_EXECUTE_AREA;
613a25542e7Smilek7 
614108f6fdcSOwen Anderson 			if ((pte & kAttrAPUserAccess) != 0)
61573c51743SOwen Anderson 				*flags |= B_READ_AREA;
616a25542e7Smilek7 
617108f6fdcSOwen Anderson 			if ((pte & kAttrAPReadOnly) == 0 || (pte & kAttrSWDBM) != 0) {
61873c51743SOwen Anderson 				*flags |= B_KERNEL_WRITE_AREA;
619108f6fdcSOwen Anderson 				if ((pte & kAttrAPUserAccess) != 0)
62073c51743SOwen Anderson 					*flags |= B_WRITE_AREA;
621a25542e7Smilek7 			}
62273c51743SOwen Anderson 		});
623a25542e7Smilek7 
624a25542e7Smilek7 	return B_OK;
625a25542e7Smilek7 }
626a25542e7Smilek7 
627a25542e7Smilek7 
628a25542e7Smilek7 status_t
629a25542e7Smilek7 VMSAv8TranslationMap::QueryInterrupt(
630a25542e7Smilek7 	addr_t virtualAddress, phys_addr_t* _physicalAddress, uint32* _flags)
631a25542e7Smilek7 {
632a25542e7Smilek7 	return Query(virtualAddress, _physicalAddress, _flags);
633a25542e7Smilek7 }
634a25542e7Smilek7 
635a25542e7Smilek7 
636a25542e7Smilek7 status_t
637a25542e7Smilek7 VMSAv8TranslationMap::Protect(addr_t start, addr_t end, uint32 attributes, uint32 memoryType)
638a25542e7Smilek7 {
639a25542e7Smilek7 	ThreadCPUPinner pinner(thread_get_current_thread());
640f73ff202SOwen Anderson 	uint64_t attr = GetMemoryAttr(attributes, memoryType, fIsKernel);
641a25542e7Smilek7 	size_t size = end - start + 1;
642a25542e7Smilek7 
643a25542e7Smilek7 	uint64_t pageMask = (1UL << fPageBits) - 1;
644a25542e7Smilek7 	uint64_t vaMask = (1UL << fVaBits) - 1;
645a25542e7Smilek7 
646a25542e7Smilek7 	ASSERT((start & pageMask) == 0);
647a25542e7Smilek7 	ASSERT((size & pageMask) == 0);
648a25542e7Smilek7 	ASSERT(ValidateVa(start));
649a25542e7Smilek7 
650f73ff202SOwen Anderson 	ProcessRange(fPageTable, 0, start & vaMask, size, nullptr,
651f73ff202SOwen Anderson 		[=](uint64_t* ptePtr, uint64_t effectiveVa) {
652f73ff202SOwen Anderson 			// We need to use an atomic compare-swap loop because we must
653f73ff202SOwen Anderson 			// need to clear somes bits while setting others.
654f73ff202SOwen Anderson 			while (true) {
655f73ff202SOwen Anderson 				uint64_t oldPte = atomic_get64((int64_t*)ptePtr);
656f73ff202SOwen Anderson 				uint64_t newPte = oldPte & ~kPteAttrMask;
657f73ff202SOwen Anderson 				newPte |= attr;
658f73ff202SOwen Anderson 
659f73ff202SOwen Anderson                 if ((uint64_t)atomic_test_and_set64((int64_t*)ptePtr, newPte, oldPte) == oldPte) {
660f73ff202SOwen Anderson 					asm("dsb ishst"); // Ensure PTE write completed
661f73ff202SOwen Anderson 					if ((oldPte & kAttrAF) != 0)
662f73ff202SOwen Anderson 						FlushVAFromTLBByASID(effectiveVa);
663f73ff202SOwen Anderson 					break;
664f73ff202SOwen Anderson 				}
665f73ff202SOwen Anderson 			}
666f73ff202SOwen Anderson 		});
667a25542e7Smilek7 
668a25542e7Smilek7 	return B_OK;
669a25542e7Smilek7 }
670a25542e7Smilek7 
671a25542e7Smilek7 
672a25542e7Smilek7 status_t
673a25542e7Smilek7 VMSAv8TranslationMap::ClearFlags(addr_t va, uint32 flags)
674a25542e7Smilek7 {
675a25542e7Smilek7 	ThreadCPUPinner pinner(thread_get_current_thread());
676a25542e7Smilek7 
677a25542e7Smilek7 	uint64_t pageMask = (1UL << fPageBits) - 1;
678a25542e7Smilek7 	uint64_t vaMask = (1UL << fVaBits) - 1;
679a25542e7Smilek7 
680a25542e7Smilek7 	ASSERT((va & pageMask) == 0);
681a25542e7Smilek7 	ASSERT(ValidateVa(va));
682a25542e7Smilek7 
683744bdd73SOwen Anderson 	bool clearAF = flags & kAttrAF;
684744bdd73SOwen Anderson 	bool setRO = flags & kAttrAPReadOnly;
685a25542e7Smilek7 
686744bdd73SOwen Anderson 	if (!clearAF && !setRO)
687744bdd73SOwen Anderson 		return B_OK;
688744bdd73SOwen Anderson 
689744bdd73SOwen Anderson 	ProcessRange(fPageTable, 0, va & vaMask, B_PAGE_SIZE, nullptr,
690744bdd73SOwen Anderson 		[=](uint64_t* ptePtr, uint64_t effectiveVa) {
691744bdd73SOwen Anderson 			if (clearAF && setRO) {
692744bdd73SOwen Anderson 				// We need to use an atomic compare-swap loop because we must
693744bdd73SOwen Anderson 				// need to clear one bit while setting the other.
694744bdd73SOwen Anderson 				while (true) {
695744bdd73SOwen Anderson 					uint64_t oldPte = atomic_get64((int64_t*)ptePtr);
696744bdd73SOwen Anderson 					uint64_t newPte = oldPte & ~kAttrAF;
697744bdd73SOwen Anderson 					newPte |= kAttrAPReadOnly;
698744bdd73SOwen Anderson 
699744bdd73SOwen Anderson                     if ((uint64_t)atomic_test_and_set64((int64_t*)ptePtr, newPte, oldPte) == oldPte)
700744bdd73SOwen Anderson 						break;
701744bdd73SOwen Anderson 				}
702744bdd73SOwen Anderson 			} else if (clearAF) {
703744bdd73SOwen Anderson 				atomic_and64((int64_t*)ptePtr, ~kAttrAPReadOnly);
704744bdd73SOwen Anderson 			} else {
705744bdd73SOwen Anderson 				atomic_or64((int64_t*)ptePtr, kAttrAPReadOnly);
706744bdd73SOwen Anderson 			}
707744bdd73SOwen Anderson 			asm("dsb ishst"); // Ensure PTE write completed
708744bdd73SOwen Anderson 		});
709744bdd73SOwen Anderson 
710744bdd73SOwen Anderson 	FlushVAFromTLBByASID(va);
711a25542e7Smilek7 	return B_OK;
712a25542e7Smilek7 }
713a25542e7Smilek7 
714a25542e7Smilek7 
715a25542e7Smilek7 bool
716a25542e7Smilek7 VMSAv8TranslationMap::ClearAccessedAndModified(
717a25542e7Smilek7 	VMArea* area, addr_t address, bool unmapIfUnaccessed, bool& _modified)
718a25542e7Smilek7 {
719bb67bf75SOwen Anderson 	RecursiveLocker locker(fLock);
720bb67bf75SOwen Anderson 	ThreadCPUPinner pinner(thread_get_current_thread());
721bb67bf75SOwen Anderson 
722bb67bf75SOwen Anderson 	uint64_t pageMask = (1UL << fPageBits) - 1;
723bb67bf75SOwen Anderson 	uint64_t vaMask = (1UL << fVaBits) - 1;
724bb67bf75SOwen Anderson 
725bb67bf75SOwen Anderson 	ASSERT((address & pageMask) == 0);
726bb67bf75SOwen Anderson 	ASSERT(ValidateVa(address));
727bb67bf75SOwen Anderson 
728bb67bf75SOwen Anderson 	uint64_t oldPte = 0;
729bb67bf75SOwen Anderson 	ProcessRange(fPageTable, 0, address & vaMask, B_PAGE_SIZE, nullptr,
730bb67bf75SOwen Anderson 		[=, &_modified, &oldPte](uint64_t* ptePtr, uint64_t effectiveVa) {
731bb67bf75SOwen Anderson 			// We need to use an atomic compare-swap loop because we must
732bb67bf75SOwen Anderson 			// first read the old PTE and make decisions based on the AF
733bb67bf75SOwen Anderson 			// bit to proceed.
734bb67bf75SOwen Anderson 			while (true) {
735bb67bf75SOwen Anderson 				oldPte = atomic_get64((int64_t*)ptePtr);
736bb67bf75SOwen Anderson 				uint64_t newPte = oldPte & ~kAttrAF;
737bb67bf75SOwen Anderson 				newPte |= kAttrAPReadOnly;
738bb67bf75SOwen Anderson 
739bb67bf75SOwen Anderson 				// If the page has been not be accessed, then unmap it.
740bb67bf75SOwen Anderson 				if (unmapIfUnaccessed && (oldPte & kAttrAF) == 0)
741bb67bf75SOwen Anderson 					newPte = 0;
742bb67bf75SOwen Anderson 
743bb67bf75SOwen Anderson 				if ((uint64_t)atomic_test_and_set64((int64_t*)ptePtr, newPte, oldPte) == oldPte)
744bb67bf75SOwen Anderson 					break;
745bb67bf75SOwen Anderson 			}
746bb67bf75SOwen Anderson 			asm("dsb ishst"); // Ensure PTE write completed
747bb67bf75SOwen Anderson 		});
748bb67bf75SOwen Anderson 
749bb67bf75SOwen Anderson 	pinner.Unlock();
750bb67bf75SOwen Anderson 	_modified = (oldPte & kAttrAPReadOnly) == 0;
751bb67bf75SOwen Anderson 	if ((oldPte & kAttrAF) != 0) {
752bb67bf75SOwen Anderson 		FlushVAFromTLBByASID(address);
753bb67bf75SOwen Anderson 		return true;
754bb67bf75SOwen Anderson 	}
755bb67bf75SOwen Anderson 
756bb67bf75SOwen Anderson 	if (!unmapIfUnaccessed)
757bb67bf75SOwen Anderson 		return false;
758bb67bf75SOwen Anderson 
759bb67bf75SOwen Anderson 	fMapCount--;
760bb67bf75SOwen Anderson 
761bb67bf75SOwen Anderson 	locker.Detach(); // UnaccessedPageUnmapped takes ownership
762bb67bf75SOwen Anderson 	phys_addr_t oldPa = oldPte & kPteAddrMask;
763bb67bf75SOwen Anderson 	UnaccessedPageUnmapped(area, oldPa >> fPageBits);
764bb67bf75SOwen Anderson 	return false;
765a25542e7Smilek7 }
766a25542e7Smilek7 
767a25542e7Smilek7 
768a25542e7Smilek7 void
769a25542e7Smilek7 VMSAv8TranslationMap::Flush()
770a25542e7Smilek7 {
771a25542e7Smilek7 	ThreadCPUPinner pinner(thread_get_current_thread());
772a25542e7Smilek7 
773a25542e7Smilek7 	arch_cpu_global_TLB_invalidate();
774a25542e7Smilek7 }
775