xref: /haiku/src/system/kernel/arch/arm64/VMSAv8TranslationMap.cpp (revision 1c408b7298815e53176d1d6c89c87d5c76351afa)
1a25542e7Smilek7 /*
2a25542e7Smilek7  * Copyright 2022 Haiku, Inc. All Rights Reserved.
3a25542e7Smilek7  * Distributed under the terms of the MIT License.
4a25542e7Smilek7  */
5a25542e7Smilek7 #include "VMSAv8TranslationMap.h"
6a25542e7Smilek7 
7baf574c9SOwen Anderson #include <algorithm>
83b098011SOwen Anderson #include <slab/Slab.h>
9a25542e7Smilek7 #include <util/AutoLock.h>
10a25542e7Smilek7 #include <util/ThreadAutoLock.h>
113b098011SOwen Anderson #include <vm/VMAddressSpace.h>
123b098011SOwen Anderson #include <vm/VMCache.h>
13a25542e7Smilek7 #include <vm/vm_page.h>
14a25542e7Smilek7 #include <vm/vm_priv.h>
15a25542e7Smilek7 
1632c542bdSOwen Anderson 
1732c542bdSOwen Anderson //#define DO_TRACE
1832c542bdSOwen Anderson #ifdef DO_TRACE
1932c542bdSOwen Anderson #	define TRACE(x...) dprintf(x)
2032c542bdSOwen Anderson #else
2132c542bdSOwen Anderson #	define TRACE(x...) ;
2232c542bdSOwen Anderson #endif
2332c542bdSOwen Anderson 
2432c542bdSOwen Anderson 
25a25542e7Smilek7 uint32_t VMSAv8TranslationMap::fHwFeature;
26a25542e7Smilek7 uint64_t VMSAv8TranslationMap::fMair;
27a25542e7Smilek7 
289fad0a5cSOwen Anderson // ASID Management
299fad0a5cSOwen Anderson static constexpr size_t kAsidBits = 8;
309fad0a5cSOwen Anderson static constexpr size_t kNumAsids = (1 << kAsidBits);
317908993dSOwen Anderson static spinlock sAsidLock = B_SPINLOCK_INITIALIZER;
329fad0a5cSOwen Anderson // A bitmap to track which ASIDs are in use.
339fad0a5cSOwen Anderson static uint64 sAsidBitMap[kNumAsids / 64] = {};
349fad0a5cSOwen Anderson // A mapping from ASID to translation map.
359fad0a5cSOwen Anderson static VMSAv8TranslationMap* sAsidMapping[kNumAsids] = {};
369fad0a5cSOwen Anderson 
379fad0a5cSOwen Anderson 
389fad0a5cSOwen Anderson static void
399fad0a5cSOwen Anderson free_asid(size_t asid)
409fad0a5cSOwen Anderson {
419fad0a5cSOwen Anderson 	for (size_t i = 0; i < B_COUNT_OF(sAsidBitMap); ++i) {
429fad0a5cSOwen Anderson 		if (asid < 64) {
439fad0a5cSOwen Anderson 			sAsidBitMap[i] &= ~(uint64_t{1} << asid);
449fad0a5cSOwen Anderson 			return;
459fad0a5cSOwen Anderson 		}
469fad0a5cSOwen Anderson 		asid -= 64;
479fad0a5cSOwen Anderson 	}
489fad0a5cSOwen Anderson 
499fad0a5cSOwen Anderson 	panic("Could not free ASID!");
509fad0a5cSOwen Anderson }
519fad0a5cSOwen Anderson 
529fad0a5cSOwen Anderson 
539406d2a4SOwen Anderson static void
549406d2a4SOwen Anderson flush_tlb_whole_asid(uint64_t asid)
559406d2a4SOwen Anderson {
569406d2a4SOwen Anderson 	asm("dsb ishst");
579406d2a4SOwen Anderson 	asm("tlbi aside1is, %0" ::"r"(asid << 48));
589406d2a4SOwen Anderson 	asm("dsb ish");
599406d2a4SOwen Anderson 	asm("isb");
609406d2a4SOwen Anderson }
619406d2a4SOwen Anderson 
629406d2a4SOwen Anderson 
639fad0a5cSOwen Anderson static size_t
649fad0a5cSOwen Anderson alloc_first_free_asid(void)
659fad0a5cSOwen Anderson {
669fad0a5cSOwen Anderson 	int asid = 0;
679fad0a5cSOwen Anderson 	for (size_t i = 0; i < B_COUNT_OF(sAsidBitMap); ++i) {
689fad0a5cSOwen Anderson 		int avail = __builtin_ffsll(~sAsidBitMap[i]);
699fad0a5cSOwen Anderson 		if (avail != 0) {
709fad0a5cSOwen Anderson 			sAsidBitMap[i] |= (uint64_t{1} << (avail-1));
719fad0a5cSOwen Anderson 			asid += (avail - 1);
729fad0a5cSOwen Anderson 			return asid;
739fad0a5cSOwen Anderson 		}
749fad0a5cSOwen Anderson 		asid += 64;
759fad0a5cSOwen Anderson 	}
769fad0a5cSOwen Anderson 
779fad0a5cSOwen Anderson 	return kNumAsids;
789fad0a5cSOwen Anderson }
797908993dSOwen Anderson 
80a25542e7Smilek7 
816a2e4f41SOwen Anderson static bool
826a2e4f41SOwen Anderson is_pte_dirty(uint64_t pte)
836a2e4f41SOwen Anderson {
84bb43aaacSOwen Anderson 	if ((pte & kAttrSWDIRTY) != 0)
85bb43aaacSOwen Anderson 		return true;
86bb43aaacSOwen Anderson 
876a2e4f41SOwen Anderson 	return (pte & kAttrAPReadOnly) == 0;
886a2e4f41SOwen Anderson }
896a2e4f41SOwen Anderson 
906a2e4f41SOwen Anderson 
916a2e4f41SOwen Anderson static uint64_t
926a2e4f41SOwen Anderson set_pte_dirty(uint64_t pte)
936a2e4f41SOwen Anderson {
946a2e4f41SOwen Anderson 	if ((pte & kAttrSWDBM) != 0)
956a2e4f41SOwen Anderson 		return pte & ~kAttrAPReadOnly;
966a2e4f41SOwen Anderson 
97bb43aaacSOwen Anderson 	return pte | kAttrSWDIRTY;
986a2e4f41SOwen Anderson }
996a2e4f41SOwen Anderson 
1006a2e4f41SOwen Anderson 
1016a2e4f41SOwen Anderson static uint64_t
1026a2e4f41SOwen Anderson set_pte_clean(uint64_t pte)
1036a2e4f41SOwen Anderson {
104bb43aaacSOwen Anderson 	pte &= ~kAttrSWDIRTY;
1056a2e4f41SOwen Anderson 	return pte | kAttrAPReadOnly;
1066a2e4f41SOwen Anderson }
1076a2e4f41SOwen Anderson 
1086a2e4f41SOwen Anderson 
109129bc12bSOwen Anderson static bool
110129bc12bSOwen Anderson is_pte_accessed(uint64_t pte)
111129bc12bSOwen Anderson {
112129bc12bSOwen Anderson 	return (pte & kPteValidMask) != 0 && (pte & kAttrAF) != 0;
113129bc12bSOwen Anderson }
114129bc12bSOwen Anderson 
115129bc12bSOwen Anderson 
116a25542e7Smilek7 VMSAv8TranslationMap::VMSAv8TranslationMap(
117a25542e7Smilek7 	bool kernel, phys_addr_t pageTable, int pageBits, int vaBits, int minBlockLevel)
118a25542e7Smilek7 	:
119a25542e7Smilek7 	fIsKernel(kernel),
120a25542e7Smilek7 	fPageTable(pageTable),
121a25542e7Smilek7 	fPageBits(pageBits),
122a25542e7Smilek7 	fVaBits(vaBits),
1237908993dSOwen Anderson 	fMinBlockLevel(minBlockLevel),
1244e4d3167SOwen Anderson 	fASID(kernel ? 0 : -1),
1259fad0a5cSOwen Anderson 	fRefcount(0)
126a25542e7Smilek7 {
12732c542bdSOwen Anderson 	TRACE("+VMSAv8TranslationMap(%p, %d, 0x%" B_PRIxADDR ", %d, %d, %d)\n", this,
12832c542bdSOwen Anderson 		kernel, pageTable, pageBits, vaBits, minBlockLevel);
129a25542e7Smilek7 
130a25542e7Smilek7 	fInitialLevel = CalcStartLevel(fVaBits, fPageBits);
131a25542e7Smilek7 }
132a25542e7Smilek7 
133a25542e7Smilek7 
134a25542e7Smilek7 VMSAv8TranslationMap::~VMSAv8TranslationMap()
135a25542e7Smilek7 {
13632c542bdSOwen Anderson 	TRACE("-VMSAv8TranslationMap(%p)\n", this);
13732c542bdSOwen Anderson 	TRACE("  fIsKernel: %d, fPageTable: 0x%" B_PRIxADDR ", fASID: %d, fRefcount: %d\n",
13832c542bdSOwen Anderson 		fIsKernel, fPageTable, fASID, fRefcount);
13932c542bdSOwen Anderson 
1407908993dSOwen Anderson 	ASSERT(!fIsKernel);
1419fad0a5cSOwen Anderson 	ASSERT(fRefcount == 0);
1427908993dSOwen Anderson 	{
1437908993dSOwen Anderson 		ThreadCPUPinner pinner(thread_get_current_thread());
144*1c408b72SOwen Anderson 		FreeTable(fPageTable, 0, fInitialLevel);
1457908993dSOwen Anderson 	}
146a25542e7Smilek7 
1477908993dSOwen Anderson 	{
1487908993dSOwen Anderson 		InterruptsSpinLocker locker(sAsidLock);
1497908993dSOwen Anderson 
1509fad0a5cSOwen Anderson 		if (fASID != -1) {
1517908993dSOwen Anderson 			sAsidMapping[fASID] = NULL;
1529fad0a5cSOwen Anderson 			free_asid(fASID);
1537908993dSOwen Anderson 		}
154a25542e7Smilek7 	}
1559fad0a5cSOwen Anderson }
1569fad0a5cSOwen Anderson 
1579fad0a5cSOwen Anderson 
1589fad0a5cSOwen Anderson // Switch user map into TTBR0.
1599fad0a5cSOwen Anderson // Passing kernel map here configures empty page table.
1609fad0a5cSOwen Anderson void
1619fad0a5cSOwen Anderson VMSAv8TranslationMap::SwitchUserMap(VMSAv8TranslationMap *from, VMSAv8TranslationMap *to)
1629fad0a5cSOwen Anderson {
1634b9a9eabSOwen Anderson 	InterruptsSpinLocker locker(sAsidLock);
1649fad0a5cSOwen Anderson 
1659fad0a5cSOwen Anderson 	if (!from->fIsKernel) {
1669fad0a5cSOwen Anderson 		from->fRefcount--;
1679fad0a5cSOwen Anderson 	}
1689fad0a5cSOwen Anderson 
1699fad0a5cSOwen Anderson 	if (!to->fIsKernel) {
1709fad0a5cSOwen Anderson 		to->fRefcount++;
1719fad0a5cSOwen Anderson 	} else {
1729fad0a5cSOwen Anderson 		arch_vm_install_empty_table_ttbr0();
1739fad0a5cSOwen Anderson 		return;
1749fad0a5cSOwen Anderson 	}
1759fad0a5cSOwen Anderson 
1769fad0a5cSOwen Anderson 	ASSERT(to->fPageTable != 0);
1779fad0a5cSOwen Anderson 	uint64_t ttbr = to->fPageTable | ((fHwFeature & HW_COMMON_NOT_PRIVATE) != 0 ? 1 : 0);
1789fad0a5cSOwen Anderson 
1799fad0a5cSOwen Anderson 	if (to->fASID != -1) {
1809fad0a5cSOwen Anderson 		WRITE_SPECIALREG(TTBR0_EL1, ((uint64_t)to->fASID << 48) | ttbr);
1819fad0a5cSOwen Anderson 		asm("isb");
1829fad0a5cSOwen Anderson 		return;
1839fad0a5cSOwen Anderson 	}
1849fad0a5cSOwen Anderson 
1859fad0a5cSOwen Anderson 	size_t allocatedAsid = alloc_first_free_asid();
1869fad0a5cSOwen Anderson 	if (allocatedAsid != kNumAsids) {
1879fad0a5cSOwen Anderson 		to->fASID = allocatedAsid;
1889fad0a5cSOwen Anderson 		sAsidMapping[allocatedAsid] = to;
1899fad0a5cSOwen Anderson 
1909fad0a5cSOwen Anderson 		WRITE_SPECIALREG(TTBR0_EL1, (allocatedAsid << 48) | ttbr);
1919406d2a4SOwen Anderson 		flush_tlb_whole_asid(allocatedAsid);
1929fad0a5cSOwen Anderson 		return;
1939fad0a5cSOwen Anderson 	}
1949fad0a5cSOwen Anderson 
1954e4d3167SOwen Anderson 	// ASID 0 is reserved for the kernel.
1964e4d3167SOwen Anderson 	for (size_t i = 1; i < kNumAsids; ++i) {
1979fad0a5cSOwen Anderson 		if (sAsidMapping[i]->fRefcount == 0) {
1989fad0a5cSOwen Anderson 			sAsidMapping[i]->fASID = -1;
1999fad0a5cSOwen Anderson 			to->fASID = i;
2009fad0a5cSOwen Anderson 			sAsidMapping[i] = to;
2019fad0a5cSOwen Anderson 
2029fad0a5cSOwen Anderson 			WRITE_SPECIALREG(TTBR0_EL1, (i << 48) | ttbr);
2039406d2a4SOwen Anderson 			flush_tlb_whole_asid(i);
2049fad0a5cSOwen Anderson 			return;
2059fad0a5cSOwen Anderson 		}
2069fad0a5cSOwen Anderson 	}
2079fad0a5cSOwen Anderson 
2089fad0a5cSOwen Anderson 	panic("cannot assign ASID");
2099fad0a5cSOwen Anderson }
210a25542e7Smilek7 
211a25542e7Smilek7 
212a25542e7Smilek7 int
213a25542e7Smilek7 VMSAv8TranslationMap::CalcStartLevel(int vaBits, int pageBits)
214a25542e7Smilek7 {
215a25542e7Smilek7 	int level = 4;
216a25542e7Smilek7 
217a25542e7Smilek7 	int bitsLeft = vaBits - pageBits;
218a25542e7Smilek7 	while (bitsLeft > 0) {
219a25542e7Smilek7 		int tableBits = pageBits - 3;
220a25542e7Smilek7 		bitsLeft -= tableBits;
221a25542e7Smilek7 		level--;
222a25542e7Smilek7 	}
223a25542e7Smilek7 
224a25542e7Smilek7 	ASSERT(level >= 0);
225a25542e7Smilek7 
226a25542e7Smilek7 	return level;
227a25542e7Smilek7 }
228a25542e7Smilek7 
229a25542e7Smilek7 
230a25542e7Smilek7 bool
231a25542e7Smilek7 VMSAv8TranslationMap::Lock()
232a25542e7Smilek7 {
23332c542bdSOwen Anderson 	TRACE("VMSAv8TranslationMap::Lock()\n");
234a25542e7Smilek7 	recursive_lock_lock(&fLock);
235a25542e7Smilek7 	return true;
236a25542e7Smilek7 }
237a25542e7Smilek7 
238a25542e7Smilek7 
239a25542e7Smilek7 void
240a25542e7Smilek7 VMSAv8TranslationMap::Unlock()
241a25542e7Smilek7 {
24232c542bdSOwen Anderson 	TRACE("VMSAv8TranslationMap::Unlock()\n");
243a25542e7Smilek7 	recursive_lock_unlock(&fLock);
244a25542e7Smilek7 }
245a25542e7Smilek7 
246a25542e7Smilek7 
247a25542e7Smilek7 addr_t
248a25542e7Smilek7 VMSAv8TranslationMap::MappedSize() const
249a25542e7Smilek7 {
250a25542e7Smilek7 	panic("VMSAv8TranslationMap::MappedSize not implemented");
251a25542e7Smilek7 	return 0;
252a25542e7Smilek7 }
253a25542e7Smilek7 
254a25542e7Smilek7 
255a25542e7Smilek7 size_t
256a25542e7Smilek7 VMSAv8TranslationMap::MaxPagesNeededToMap(addr_t start, addr_t end) const
257a25542e7Smilek7 {
258a25542e7Smilek7 	size_t result = 0;
259a25542e7Smilek7 	size_t size = end - start + 1;
260a25542e7Smilek7 
261a25542e7Smilek7 	for (int i = fInitialLevel; i < 3; i++) {
262a25542e7Smilek7 		int tableBits = fPageBits - 3;
263a25542e7Smilek7 		int shift = tableBits * (3 - i) + fPageBits;
264a25542e7Smilek7 		uint64_t entrySize = 1UL << shift;
265a25542e7Smilek7 
266a25542e7Smilek7 		result += size / entrySize + 2;
267a25542e7Smilek7 	}
268a25542e7Smilek7 
269a25542e7Smilek7 	return result;
270a25542e7Smilek7 }
271a25542e7Smilek7 
272a25542e7Smilek7 
273a25542e7Smilek7 uint64_t*
274a25542e7Smilek7 VMSAv8TranslationMap::TableFromPa(phys_addr_t pa)
275a25542e7Smilek7 {
276a25542e7Smilek7 	return reinterpret_cast<uint64_t*>(KERNEL_PMAP_BASE + pa);
277a25542e7Smilek7 }
278a25542e7Smilek7 
279a25542e7Smilek7 
280a25542e7Smilek7 void
281*1c408b72SOwen Anderson VMSAv8TranslationMap::FreeTable(phys_addr_t ptPa, uint64_t va, int level)
282a25542e7Smilek7 {
2837908993dSOwen Anderson 	ASSERT(level < 4);
284*1c408b72SOwen Anderson 	InterruptsSpinLocker locker(sAsidLock);
285a25542e7Smilek7 
286a25542e7Smilek7 	int tableBits = fPageBits - 3;
287a25542e7Smilek7 	uint64_t tableSize = 1UL << tableBits;
2887908993dSOwen Anderson 	uint64_t vaMask = (1UL << fVaBits) - 1;
289a25542e7Smilek7 
2907908993dSOwen Anderson 	int shift = tableBits * (3 - level) + fPageBits;
2917908993dSOwen Anderson 	uint64_t entrySize = 1UL << shift;
2927908993dSOwen Anderson 
2937908993dSOwen Anderson 	uint64_t nextVa = va;
294a25542e7Smilek7 	uint64_t* pt = TableFromPa(ptPa);
295a25542e7Smilek7 	for (uint64_t i = 0; i < tableSize; i++) {
2967908993dSOwen Anderson 		uint64_t oldPte = (uint64_t) atomic_get_and_set64((int64*) &pt[i], 0);
2977908993dSOwen Anderson 
29818a27fe0SOwen Anderson 		if (level < 3 && (oldPte & kPteTypeMask) == kPteTypeL012Table) {
299*1c408b72SOwen Anderson 			FreeTable(oldPte & kPteAddrMask, nextVa, level + 1);
30018a27fe0SOwen Anderson 		} else if ((oldPte & kPteTypeMask) != 0) {
3017908993dSOwen Anderson 			uint64_t fullVa = (fIsKernel ? ~vaMask : 0) | nextVa;
302*1c408b72SOwen Anderson 
303*1c408b72SOwen Anderson 			// Use this rather than FlushVAIfAccessed so that we don't have to
304*1c408b72SOwen Anderson 			// acquire sAsidLock for every entry.
305*1c408b72SOwen Anderson 			flush_va_if_accessed(oldPte, nextVa, fASID);
306a25542e7Smilek7 		}
307a25542e7Smilek7 
3087908993dSOwen Anderson 		nextVa += entrySize;
3097908993dSOwen Anderson 	}
3107908993dSOwen Anderson 
311a25542e7Smilek7 	vm_page* page = vm_lookup_page(ptPa >> fPageBits);
3127908993dSOwen Anderson 	DEBUG_PAGE_ACCESS_START(page);
313a25542e7Smilek7 	vm_page_set_state(page, PAGE_STATE_FREE);
314a25542e7Smilek7 }
315a25542e7Smilek7 
316a25542e7Smilek7 
31718a27fe0SOwen Anderson // Make a new page sub-table.
31818a27fe0SOwen Anderson // The parent table is `ptPa`, and the new sub-table's PTE will be at `index`
31918a27fe0SOwen Anderson // in it.
32018a27fe0SOwen Anderson // Returns the physical address of the new table, or the address of the existing
32118a27fe0SOwen Anderson // one if the PTE is already filled.
322a25542e7Smilek7 phys_addr_t
323baf574c9SOwen Anderson VMSAv8TranslationMap::GetOrMakeTable(phys_addr_t ptPa, int level, int index,
324baf574c9SOwen Anderson 	vm_page_reservation* reservation)
325a25542e7Smilek7 {
32618a27fe0SOwen Anderson 	ASSERT(level < 3);
327a25542e7Smilek7 
32818a27fe0SOwen Anderson 	uint64_t* ptePtr = TableFromPa(ptPa) + index;
32918a27fe0SOwen Anderson 	uint64_t oldPte = atomic_get64((int64*) ptePtr);
330a25542e7Smilek7 
33118a27fe0SOwen Anderson 	int type = oldPte & kPteTypeMask;
3326b4ccaa5SOwen Anderson 	ASSERT(type != kPteTypeL12Block);
3336b4ccaa5SOwen Anderson 
33418a27fe0SOwen Anderson 	if (type == kPteTypeL012Table) {
33518a27fe0SOwen Anderson 		// This is table entry already, just return it
336a25542e7Smilek7 		return oldPte & kPteAddrMask;
33718a27fe0SOwen Anderson 	} else if (reservation != nullptr) {
33818a27fe0SOwen Anderson 		// Create new table there
33918a27fe0SOwen Anderson 		vm_page* page = vm_page_allocate_page(reservation, PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
340a25542e7Smilek7 		phys_addr_t newTablePa = page->physical_page_number << fPageBits;
34118a27fe0SOwen Anderson 		DEBUG_PAGE_ACCESS_END(page);
342a25542e7Smilek7 
34318a27fe0SOwen Anderson 		// We only create mappings at the final level so we don't need to handle
34418a27fe0SOwen Anderson 		// splitting block mappings
345baf574c9SOwen Anderson 		ASSERT(type != kPteTypeL12Block);
346a25542e7Smilek7 
34718a27fe0SOwen Anderson 		// Ensure that writes to page being attached have completed
34818a27fe0SOwen Anderson 		asm("dsb ishst");
349a25542e7Smilek7 
35018a27fe0SOwen Anderson 		uint64_t oldPteRefetch = (uint64_t)atomic_test_and_set64((int64*) ptePtr,
35118a27fe0SOwen Anderson 			newTablePa | kPteTypeL012Table, oldPte);
35218a27fe0SOwen Anderson 		if (oldPteRefetch != oldPte) {
35318a27fe0SOwen Anderson 			// If the old PTE has mutated, it must be because another thread has allocated the
35418a27fe0SOwen Anderson 			// sub-table at the same time as us. If that has happened, deallocate the page we
35518a27fe0SOwen Anderson 			// setup and use the one they installed instead.
35618a27fe0SOwen Anderson 			ASSERT((oldPteRefetch & kPteTypeMask) == kPteTypeL012Table);
35718a27fe0SOwen Anderson 			DEBUG_PAGE_ACCESS_START(page);
35818a27fe0SOwen Anderson 			vm_page_set_state(page, PAGE_STATE_FREE);
35918a27fe0SOwen Anderson 			return oldPteRefetch & kPteAddrMask;
360a25542e7Smilek7 		}
361a25542e7Smilek7 
362a25542e7Smilek7 		return newTablePa;
363a25542e7Smilek7 	}
364a25542e7Smilek7 
36518a27fe0SOwen Anderson 	// There's no existing table and we have no reservation
366a25542e7Smilek7 	return 0;
367a25542e7Smilek7 }
368a25542e7Smilek7 
369a25542e7Smilek7 
370129bc12bSOwen Anderson bool
3711fa60a5cSOwen Anderson flush_va_if_accessed(uint64_t pte, addr_t va, int asid)
372baf574c9SOwen Anderson {
373129bc12bSOwen Anderson 	if (!is_pte_accessed(pte))
374129bc12bSOwen Anderson 		return false;
375129bc12bSOwen Anderson 
3769473fe5eSOwen Anderson 	if ((pte & kAttrNG) == 0) {
3779473fe5eSOwen Anderson 		// Flush from all address spaces
378af5e461fSOwen Anderson 		asm("dsb ishst"); // Ensure PTE write completed
379af5e461fSOwen Anderson 		asm("tlbi vaae1is, %0" ::"r"(((va >> 12) & kTLBIMask)));
380af5e461fSOwen Anderson 		asm("dsb ish");
381af5e461fSOwen Anderson 		asm("isb");
3821fa60a5cSOwen Anderson 	} else if (asid != -1) {
383129bc12bSOwen Anderson 		asm("dsb ishst"); // Ensure PTE write completed
3841fa60a5cSOwen Anderson         asm("tlbi vae1is, %0" ::"r"(((va >> 12) & kTLBIMask) | (uint64_t(asid) << 48)));
385baf574c9SOwen Anderson 		asm("dsb ish"); // Wait for TLB flush to complete
386129bc12bSOwen Anderson 		asm("isb");
387129bc12bSOwen Anderson 		return true;
388baf574c9SOwen Anderson 	}
389129bc12bSOwen Anderson 
390129bc12bSOwen Anderson 	return false;
391baf574c9SOwen Anderson }
392baf574c9SOwen Anderson 
3931fa60a5cSOwen Anderson bool
3941fa60a5cSOwen Anderson VMSAv8TranslationMap::FlushVAIfAccessed(uint64_t pte, addr_t va) {
3951fa60a5cSOwen Anderson 	InterruptsSpinLocker locker(sAsidLock);
3961fa60a5cSOwen Anderson 	return flush_va_if_accessed(pte, va, fASID);
3971fa60a5cSOwen Anderson }
3981fa60a5cSOwen Anderson 
399baf574c9SOwen Anderson 
400129bc12bSOwen Anderson bool
4014bb796cfSOwen Anderson VMSAv8TranslationMap::AttemptPteBreakBeforeMake(uint64_t* ptePtr, uint64_t oldPte, addr_t va)
402baf574c9SOwen Anderson {
4034bb796cfSOwen Anderson 	uint64_t loadedPte = atomic_test_and_set64((int64_t*)ptePtr, 0, oldPte);
4044bb796cfSOwen Anderson 	if (loadedPte != oldPte)
405129bc12bSOwen Anderson 		return false;
4064bb796cfSOwen Anderson 
407129bc12bSOwen Anderson 	FlushVAIfAccessed(oldPte, va);
408129bc12bSOwen Anderson 
409129bc12bSOwen Anderson 	return true;
410baf574c9SOwen Anderson }
411baf574c9SOwen Anderson 
412baf574c9SOwen Anderson 
413baf574c9SOwen Anderson template<typename UpdatePte>
414baf574c9SOwen Anderson void
415baf574c9SOwen Anderson VMSAv8TranslationMap::ProcessRange(phys_addr_t ptPa, int level, addr_t va, size_t size,
416baf574c9SOwen Anderson     vm_page_reservation* reservation, UpdatePte&& updatePte)
417baf574c9SOwen Anderson {
418baf574c9SOwen Anderson 	ASSERT(level < 4);
419baf574c9SOwen Anderson 	ASSERT(ptPa != 0);
420baf574c9SOwen Anderson 
421af5e461fSOwen Anderson 	uint64_t pageMask = (1UL << fPageBits) - 1;
422af5e461fSOwen Anderson 	uint64_t vaMask = (1UL << fVaBits) - 1;
423af5e461fSOwen Anderson 
424af5e461fSOwen Anderson 	ASSERT((va & pageMask) == 0);
425af5e461fSOwen Anderson 
426baf574c9SOwen Anderson 	int tableBits = fPageBits - 3;
427baf574c9SOwen Anderson 	uint64_t tableMask = (1UL << tableBits) - 1;
428baf574c9SOwen Anderson 
429baf574c9SOwen Anderson 	int shift = tableBits * (3 - level) + fPageBits;
430baf574c9SOwen Anderson 	uint64_t entrySize = 1UL << shift;
431baf574c9SOwen Anderson 	uint64_t entryMask = entrySize - 1;
432baf574c9SOwen Anderson 
433baf574c9SOwen Anderson 	uint64_t alignedDownVa = va & ~entryMask;
434d2397007SOwen Anderson 	uint64_t end = va + size - 1;
435baf574c9SOwen Anderson 	if (level == 3)
436baf574c9SOwen Anderson 		ASSERT(alignedDownVa == va);
437baf574c9SOwen Anderson 
438d2397007SOwen Anderson     for (uint64_t effectiveVa = alignedDownVa; effectiveVa < end; effectiveVa += entrySize) {
439af5e461fSOwen Anderson 		int index = ((effectiveVa & vaMask) >> shift) & tableMask;
440baf574c9SOwen Anderson 		uint64_t* ptePtr = TableFromPa(ptPa) + index;
441baf574c9SOwen Anderson 
442baf574c9SOwen Anderson 		if (level == 3) {
443baf574c9SOwen Anderson 			updatePte(ptePtr, effectiveVa);
444baf574c9SOwen Anderson 		} else {
445baf574c9SOwen Anderson 			phys_addr_t subTable = GetOrMakeTable(ptPa, level, index, reservation);
446baf574c9SOwen Anderson 
447baf574c9SOwen Anderson 			// When reservation is null, we can't create a new subtable. This can be intentional,
448baf574c9SOwen Anderson 			// for example when called from Unmap().
449baf574c9SOwen Anderson 			if (subTable == 0)
450baf574c9SOwen Anderson 				continue;
451baf574c9SOwen Anderson 
452d2397007SOwen Anderson 			if (effectiveVa < va) {
453d2397007SOwen Anderson 				// The range begins inside the slot.
454d2397007SOwen Anderson 				if (effectiveVa + entrySize - 1 > end) {
455d2397007SOwen Anderson 					// The range ends within the slot.
456d2397007SOwen Anderson 					ProcessRange(subTable, level + 1, va, size, reservation, updatePte);
457d2397007SOwen Anderson 				} else {
458d2397007SOwen Anderson 					// The range extends past the end of the slot.
459d2397007SOwen Anderson 					ProcessRange(subTable, level + 1, va, effectiveVa + entrySize - va, reservation, updatePte);
460d2397007SOwen Anderson 				}
461d2397007SOwen Anderson 			} else {
462d2397007SOwen Anderson 				// The range beginning is aligned to the slot.
463d2397007SOwen Anderson 				if (effectiveVa + entrySize - 1 > end) {
464d2397007SOwen Anderson 					// The range ends within the slot.
465d2397007SOwen Anderson 					ProcessRange(subTable, level + 1, effectiveVa, end - effectiveVa + 1,
466d2397007SOwen Anderson 						reservation, updatePte);
467d2397007SOwen Anderson 				} else {
468d2397007SOwen Anderson 					// The range extends past the end of the slot.
469d2397007SOwen Anderson 					ProcessRange(subTable, level + 1, effectiveVa, entrySize, reservation, updatePte);
470d2397007SOwen Anderson 				}
471d2397007SOwen Anderson 			}
472baf574c9SOwen Anderson 		}
473baf574c9SOwen Anderson 	}
474baf574c9SOwen Anderson }
475baf574c9SOwen Anderson 
476baf574c9SOwen Anderson 
477a25542e7Smilek7 uint8_t
478a25542e7Smilek7 VMSAv8TranslationMap::MairIndex(uint8_t type)
479a25542e7Smilek7 {
480a25542e7Smilek7 	for (int i = 0; i < 8; i++)
481a25542e7Smilek7 		if (((fMair >> (i * 8)) & 0xff) == type)
482a25542e7Smilek7 			return i;
483a25542e7Smilek7 
484a25542e7Smilek7 	panic("MAIR entry not found");
485a25542e7Smilek7 	return 0;
486a25542e7Smilek7 }
487a25542e7Smilek7 
488a25542e7Smilek7 
489a25542e7Smilek7 uint64_t
490a25542e7Smilek7 VMSAv8TranslationMap::GetMemoryAttr(uint32 attributes, uint32 memoryType, bool isKernel)
491a25542e7Smilek7 {
492a25542e7Smilek7 	uint64_t attr = 0;
493a25542e7Smilek7 
494a25542e7Smilek7 	if (!isKernel)
495a25542e7Smilek7 		attr |= kAttrNG;
496a25542e7Smilek7 
497a25542e7Smilek7 	if ((attributes & B_EXECUTE_AREA) == 0)
498a25542e7Smilek7 		attr |= kAttrUXN;
499a25542e7Smilek7 	if ((attributes & B_KERNEL_EXECUTE_AREA) == 0)
500a25542e7Smilek7 		attr |= kAttrPXN;
501a25542e7Smilek7 
502108f6fdcSOwen Anderson 	// SWDBM is software reserved bit that we use to mark that
503108f6fdcSOwen Anderson 	// writes are allowed, and fault handler should clear kAttrAPReadOnly.
504108f6fdcSOwen Anderson 	// In that case kAttrAPReadOnly doubles as not-dirty bit.
505108f6fdcSOwen Anderson 	// Additionally dirty state can be stored in SWDIRTY, in order not to lose
506108f6fdcSOwen Anderson 	// dirty state when changing protection from RW to RO.
507a25542e7Smilek7 
508108f6fdcSOwen Anderson 	// All page permissions begin life in RO state.
509108f6fdcSOwen Anderson 	attr |= kAttrAPReadOnly;
510108f6fdcSOwen Anderson 
511108f6fdcSOwen Anderson 	// User-Execute implies User-Read, because it would break PAN otherwise
512108f6fdcSOwen Anderson 	if ((attributes & B_READ_AREA) != 0 || (attributes & B_EXECUTE_AREA) != 0)
513108f6fdcSOwen Anderson 		attr |= kAttrAPUserAccess; // Allow user reads
514108f6fdcSOwen Anderson 
515108f6fdcSOwen Anderson 	if ((attributes & B_WRITE_AREA) != 0 || (attributes & B_KERNEL_WRITE_AREA) != 0)
516108f6fdcSOwen Anderson 		attr |= kAttrSWDBM; // Mark as writeable
517108f6fdcSOwen Anderson 
518108f6fdcSOwen Anderson 	// When supported by hardware copy our SWDBM bit into DBM,
519108f6fdcSOwen Anderson 	// so that kAttrAPReadOnly is cleared on write attempt automatically
520108f6fdcSOwen Anderson 	// without going through fault handler.
521108f6fdcSOwen Anderson 	if ((fHwFeature & HW_DIRTY) != 0 && (attr & kAttrSWDBM) != 0)
522a25542e7Smilek7 		attr |= kAttrDBM;
523a25542e7Smilek7 
524108f6fdcSOwen Anderson 	attr |= kAttrSHInnerShareable; // Inner Shareable
525a25542e7Smilek7 
526108f6fdcSOwen Anderson 	uint8_t type = MAIR_NORMAL_WB;
527108f6fdcSOwen Anderson 
5285c1f2319SAugustin Cavalier 	switch (memoryType & B_MEMORY_TYPE_MASK) {
5295c1f2319SAugustin Cavalier 		case B_UNCACHED_MEMORY:
5308cb8c3d7SOwen Anderson 			// TODO: This probably should be nGnRE for PCI
5318cb8c3d7SOwen Anderson 			type = MAIR_DEVICE_nGnRnE;
5328cb8c3d7SOwen Anderson 			break;
5335c1f2319SAugustin Cavalier 		case B_WRITE_COMBINING_MEMORY:
534edb17c54SOwen Anderson 			type = MAIR_NORMAL_NC;
5358cb8c3d7SOwen Anderson 			break;
5365c1f2319SAugustin Cavalier 		case B_WRITE_THROUGH_MEMORY:
537108f6fdcSOwen Anderson 			type = MAIR_NORMAL_WT;
5388cb8c3d7SOwen Anderson 			break;
5395c1f2319SAugustin Cavalier 		case B_WRITE_PROTECTED_MEMORY:
540108f6fdcSOwen Anderson 			type = MAIR_NORMAL_WT;
5418cb8c3d7SOwen Anderson 			break;
5428cb8c3d7SOwen Anderson 		default:
5435c1f2319SAugustin Cavalier 		case B_WRITE_BACK_MEMORY:
544108f6fdcSOwen Anderson 			type = MAIR_NORMAL_WB;
5458cb8c3d7SOwen Anderson 			break;
5468cb8c3d7SOwen Anderson 	}
547108f6fdcSOwen Anderson 
548108f6fdcSOwen Anderson 	attr |= MairIndex(type) << 2;
549a25542e7Smilek7 
550a25542e7Smilek7 	return attr;
551a25542e7Smilek7 }
552a25542e7Smilek7 
553a25542e7Smilek7 
554a25542e7Smilek7 status_t
555a25542e7Smilek7 VMSAv8TranslationMap::Map(addr_t va, phys_addr_t pa, uint32 attributes, uint32 memoryType,
556a25542e7Smilek7 	vm_page_reservation* reservation)
557a25542e7Smilek7 {
55832c542bdSOwen Anderson 	TRACE("VMSAv8TranslationMap::Map(0x%" B_PRIxADDR ", 0x%" B_PRIxADDR
55932c542bdSOwen Anderson 		", 0x%x, 0x%x)\n", va, pa, attributes, memoryType);
56032c542bdSOwen Anderson 
561a25542e7Smilek7 	ThreadCPUPinner pinner(thread_get_current_thread());
562a25542e7Smilek7 
563a25542e7Smilek7 	ASSERT(ValidateVa(va));
564a25542e7Smilek7 	uint64_t attr = GetMemoryAttr(attributes, memoryType, fIsKernel);
565a25542e7Smilek7 
566baf574c9SOwen Anderson 	// During first mapping we need to allocate root table
567baf574c9SOwen Anderson 	if (fPageTable == 0) {
568a25542e7Smilek7 		vm_page* page = vm_page_allocate_page(reservation, PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
569baf574c9SOwen Anderson 		DEBUG_PAGE_ACCESS_END(page);
570a25542e7Smilek7 		fPageTable = page->physical_page_number << fPageBits;
571a25542e7Smilek7 	}
572a25542e7Smilek7 
573af5e461fSOwen Anderson 	ProcessRange(fPageTable, fInitialLevel, va, B_PAGE_SIZE, reservation,
574baf574c9SOwen Anderson 		[=](uint64_t* ptePtr, uint64_t effectiveVa) {
5754bb796cfSOwen Anderson 			while (true) {
576af5e461fSOwen Anderson 				phys_addr_t effectivePa = effectiveVa - va + pa;
577baf574c9SOwen Anderson 				uint64_t oldPte = atomic_get64((int64*)ptePtr);
578baf574c9SOwen Anderson 				uint64_t newPte = effectivePa | attr | kPteTypeL3Page;
579baf574c9SOwen Anderson 
580baf574c9SOwen Anderson 				if (newPte == oldPte)
581baf574c9SOwen Anderson 					return;
582baf574c9SOwen Anderson 
583af5e461fSOwen Anderson 				if ((oldPte & kPteValidMask) != 0) {
584baf574c9SOwen Anderson 					// ARM64 requires "break-before-make". We must set the PTE to an invalid
585baf574c9SOwen Anderson 					// entry and flush the TLB as appropriate before we can write the new PTE.
586129bc12bSOwen Anderson 					if (!AttemptPteBreakBeforeMake(ptePtr, oldPte, effectiveVa))
5874bb796cfSOwen Anderson 						continue;
588baf574c9SOwen Anderson 				}
589baf574c9SOwen Anderson 
590baf574c9SOwen Anderson 				// Install the new PTE
591baf574c9SOwen Anderson 				atomic_set64((int64*)ptePtr, newPte);
592baf574c9SOwen Anderson 				asm("dsb ishst"); // Ensure PTE write completed
593129bc12bSOwen Anderson 				asm("isb");
5944bb796cfSOwen Anderson 				break;
5954bb796cfSOwen Anderson 			}
596baf574c9SOwen Anderson 		});
597a25542e7Smilek7 
598a25542e7Smilek7 	return B_OK;
599a25542e7Smilek7 }
600a25542e7Smilek7 
601a25542e7Smilek7 
602a25542e7Smilek7 status_t
603a25542e7Smilek7 VMSAv8TranslationMap::Unmap(addr_t start, addr_t end)
604a25542e7Smilek7 {
60532c542bdSOwen Anderson 	TRACE("VMSAv8TranslationMap::Unmap(0x%" B_PRIxADDR ", 0x%" B_PRIxADDR
60632c542bdSOwen Anderson 		")\n", start, end);
607a25542e7Smilek7 	ThreadCPUPinner pinner(thread_get_current_thread());
608a25542e7Smilek7 
6095ee5c0f3SOwen Anderson 	size_t size = end - start + 1;
610a25542e7Smilek7 	ASSERT(ValidateVa(start));
611a25542e7Smilek7 
612baf574c9SOwen Anderson 	if (fPageTable == 0)
613baf574c9SOwen Anderson 		return B_OK;
614baf574c9SOwen Anderson 
615af5e461fSOwen Anderson 	ProcessRange(fPageTable, fInitialLevel, start, size, nullptr,
616baf574c9SOwen Anderson 		[=](uint64_t* ptePtr, uint64_t effectiveVa) {
617da8c631eSOwen Anderson 			ASSERT(effectiveVa <= end);
618129bc12bSOwen Anderson 			uint64_t oldPte = atomic_get_and_set64((int64_t*)ptePtr, 0);
619129bc12bSOwen Anderson 			FlushVAIfAccessed(oldPte, effectiveVa);
620baf574c9SOwen Anderson 		});
621a25542e7Smilek7 
622a25542e7Smilek7 	return B_OK;
623a25542e7Smilek7 }
624a25542e7Smilek7 
625a25542e7Smilek7 
626a25542e7Smilek7 status_t
627a25542e7Smilek7 VMSAv8TranslationMap::UnmapPage(VMArea* area, addr_t address, bool updatePageQueue)
628a25542e7Smilek7 {
62932c542bdSOwen Anderson 	TRACE("VMSAv8TranslationMap::UnmapPage(0x%" B_PRIxADDR "(%s), 0x%"
63032c542bdSOwen Anderson 		B_PRIxADDR ", %d)\n", (addr_t)area, area->name, address,
63132c542bdSOwen Anderson 		updatePageQueue);
63232c542bdSOwen Anderson 
63373c51743SOwen Anderson 	ASSERT(ValidateVa(address));
634a25542e7Smilek7 	ThreadCPUPinner pinner(thread_get_current_thread());
635a25542e7Smilek7 	RecursiveLocker locker(fLock);
636a25542e7Smilek7 
63773c51743SOwen Anderson 	uint64_t oldPte = 0;
638af5e461fSOwen Anderson 	ProcessRange(fPageTable, fInitialLevel, address, B_PAGE_SIZE, nullptr,
63973c51743SOwen Anderson 		[=, &oldPte](uint64_t* ptePtr, uint64_t effectiveVa) {
64073c51743SOwen Anderson 			oldPte = atomic_get_and_set64((int64_t*)ptePtr, 0);
641129bc12bSOwen Anderson 			FlushVAIfAccessed(oldPte, effectiveVa);
64273c51743SOwen Anderson 		});
643a25542e7Smilek7 
6444b9a9eabSOwen Anderson 	if ((oldPte & kPteValidMask) == 0)
6454b9a9eabSOwen Anderson 		return B_ENTRY_NOT_FOUND;
6464b9a9eabSOwen Anderson 
647a25542e7Smilek7 	pinner.Unlock();
648a25542e7Smilek7 	locker.Detach();
64973c51743SOwen Anderson 	PageUnmapped(area, (oldPte & kPteAddrMask) >> fPageBits, (oldPte & kAttrAF) != 0,
6506a2e4f41SOwen Anderson 		is_pte_dirty(oldPte), updatePageQueue);
651a25542e7Smilek7 
652a25542e7Smilek7 	return B_OK;
653a25542e7Smilek7 }
654a25542e7Smilek7 
655a25542e7Smilek7 
6563b098011SOwen Anderson void
6573b098011SOwen Anderson VMSAv8TranslationMap::UnmapPages(VMArea* area, addr_t address, size_t size, bool updatePageQueue)
6583b098011SOwen Anderson {
6593b098011SOwen Anderson 	TRACE("VMSAv8TranslationMap::UnmapPages(0x%" B_PRIxADDR "(%s), 0x%"
6603b098011SOwen Anderson 		B_PRIxADDR ", 0x%" B_PRIxSIZE ", %d)\n", (addr_t)area,
6613b098011SOwen Anderson 		area->name, address, size, updatePageQueue);
6623b098011SOwen Anderson 
6633b098011SOwen Anderson 	ASSERT(ValidateVa(address));
6643b098011SOwen Anderson 	VMAreaMappings queue;
6653b098011SOwen Anderson 	ThreadCPUPinner pinner(thread_get_current_thread());
6663b098011SOwen Anderson 	RecursiveLocker locker(fLock);
6673b098011SOwen Anderson 
668af5e461fSOwen Anderson 	ProcessRange(fPageTable, fInitialLevel, address, size, nullptr,
6693b098011SOwen Anderson 		[=, &queue](uint64_t* ptePtr, uint64_t effectiveVa) {
6703b098011SOwen Anderson 			uint64_t oldPte = atomic_get_and_set64((int64_t*)ptePtr, 0);
671af5e461fSOwen Anderson 			FlushVAIfAccessed(oldPte, effectiveVa);
6723b098011SOwen Anderson 			if ((oldPte & kPteValidMask) == 0)
6733b098011SOwen Anderson 				return;
6743b098011SOwen Anderson 
6753b098011SOwen Anderson 			if (area->cache_type == CACHE_TYPE_DEVICE)
6763b098011SOwen Anderson 				return;
6773b098011SOwen Anderson 
6783b098011SOwen Anderson 			// get the page
6793b098011SOwen Anderson 			vm_page* page = vm_lookup_page((oldPte & kPteAddrMask) >> fPageBits);
6803b098011SOwen Anderson 			ASSERT(page != NULL);
6813b098011SOwen Anderson 
6823b098011SOwen Anderson 			DEBUG_PAGE_ACCESS_START(page);
6833b098011SOwen Anderson 
6843b098011SOwen Anderson 			// transfer the accessed/dirty flags to the page
6853b098011SOwen Anderson 			page->accessed = (oldPte & kAttrAF) != 0;
6863b098011SOwen Anderson 			page->modified = is_pte_dirty(oldPte);
6873b098011SOwen Anderson 
6883b098011SOwen Anderson 			// remove the mapping object/decrement the wired_count of the
6893b098011SOwen Anderson 			// page
6903b098011SOwen Anderson 			if (area->wiring == B_NO_LOCK) {
6913b098011SOwen Anderson 				vm_page_mapping* mapping = NULL;
6923b098011SOwen Anderson 				vm_page_mappings::Iterator iterator
6933b098011SOwen Anderson 					= page->mappings.GetIterator();
6943b098011SOwen Anderson 				while ((mapping = iterator.Next()) != NULL) {
6953b098011SOwen Anderson 					if (mapping->area == area)
6963b098011SOwen Anderson 						break;
6973b098011SOwen Anderson 				}
6983b098011SOwen Anderson 
6993b098011SOwen Anderson 				ASSERT(mapping != NULL);
7003b098011SOwen Anderson 
7013b098011SOwen Anderson 				area->mappings.Remove(mapping);
7023b098011SOwen Anderson 				page->mappings.Remove(mapping);
7033b098011SOwen Anderson 				queue.Add(mapping);
7043b098011SOwen Anderson 			} else
7053b098011SOwen Anderson 				page->DecrementWiredCount();
7063b098011SOwen Anderson 
7073b098011SOwen Anderson 			if (!page->IsMapped()) {
7083b098011SOwen Anderson 				atomic_add(&gMappedPagesCount, -1);
7093b098011SOwen Anderson 
7103b098011SOwen Anderson 				if (updatePageQueue) {
7113b098011SOwen Anderson 					if (page->Cache()->temporary)
7123b098011SOwen Anderson 						vm_page_set_state(page, PAGE_STATE_INACTIVE);
7133b098011SOwen Anderson 					else if (page->modified)
7143b098011SOwen Anderson 						vm_page_set_state(page, PAGE_STATE_MODIFIED);
7153b098011SOwen Anderson 					else
7163b098011SOwen Anderson 						vm_page_set_state(page, PAGE_STATE_CACHED);
7173b098011SOwen Anderson 				}
7183b098011SOwen Anderson 			}
7193b098011SOwen Anderson 
7203b098011SOwen Anderson 			DEBUG_PAGE_ACCESS_END(page);
7213b098011SOwen Anderson 		});
7223b098011SOwen Anderson 
7233b098011SOwen Anderson 	// TODO: As in UnmapPage() we can lose page dirty flags here. ATM it's not
7243b098011SOwen Anderson 	// really critical here, as in all cases this method is used, the unmapped
7253b098011SOwen Anderson 	// area range is unmapped for good (resized/cut) and the pages will likely
7263b098011SOwen Anderson 	// be freed.
7273b098011SOwen Anderson 
7283b098011SOwen Anderson 	locker.Unlock();
7293b098011SOwen Anderson 
7303b098011SOwen Anderson 	// free removed mappings
7313b098011SOwen Anderson 	bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
7323b098011SOwen Anderson 	uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
7333b098011SOwen Anderson 		| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
7343b098011SOwen Anderson 
7353b098011SOwen Anderson 	while (vm_page_mapping* mapping = queue.RemoveHead())
7363b098011SOwen Anderson 		vm_free_page_mapping(mapping->page->physical_page_number, mapping, freeFlags);
7373b098011SOwen Anderson }
7383b098011SOwen Anderson 
7393b098011SOwen Anderson 
7400a367809SOwen Anderson void
7410a367809SOwen Anderson VMSAv8TranslationMap::UnmapArea(VMArea* area, bool deletingAddressSpace,
7420a367809SOwen Anderson 	bool ignoreTopCachePageFlags)
7430a367809SOwen Anderson {
7440a367809SOwen Anderson 	TRACE("VMSAv8TranslationMap::UnmapArea(0x%" B_PRIxADDR "(%s), 0x%"
7450a367809SOwen Anderson 		B_PRIxADDR ", 0x%" B_PRIxSIZE ", %d, %d)\n", (addr_t)area,
7460a367809SOwen Anderson 		area->name, area->Base(), area->Size(), deletingAddressSpace,
7470a367809SOwen Anderson 		ignoreTopCachePageFlags);
7480a367809SOwen Anderson 
7490a367809SOwen Anderson 	if (area->cache_type == CACHE_TYPE_DEVICE || area->wiring != B_NO_LOCK) {
7500a367809SOwen Anderson 		UnmapPages(area, area->Base(), area->Size(), true);
7510a367809SOwen Anderson 		return;
7520a367809SOwen Anderson 	}
7530a367809SOwen Anderson 
7540a367809SOwen Anderson 	bool unmapPages = !deletingAddressSpace || !ignoreTopCachePageFlags;
7550a367809SOwen Anderson 
7560a367809SOwen Anderson 	RecursiveLocker locker(fLock);
7570a367809SOwen Anderson 	ThreadCPUPinner pinner(thread_get_current_thread());
7580a367809SOwen Anderson 
7590a367809SOwen Anderson 	VMAreaMappings mappings;
7600a367809SOwen Anderson 	mappings.MoveFrom(&area->mappings);
7610a367809SOwen Anderson 
7620a367809SOwen Anderson 	for (VMAreaMappings::Iterator it = mappings.GetIterator();
7630a367809SOwen Anderson 			vm_page_mapping* mapping = it.Next();) {
7640a367809SOwen Anderson 
7650a367809SOwen Anderson 		vm_page* page = mapping->page;
7660a367809SOwen Anderson 		page->mappings.Remove(mapping);
7670a367809SOwen Anderson 
7680a367809SOwen Anderson 		VMCache* cache = page->Cache();
7690a367809SOwen Anderson 
7700a367809SOwen Anderson 		bool pageFullyUnmapped = false;
7710a367809SOwen Anderson 		if (!page->IsMapped()) {
7720a367809SOwen Anderson 			atomic_add(&gMappedPagesCount, -1);
7730a367809SOwen Anderson 			pageFullyUnmapped = true;
7740a367809SOwen Anderson 		}
7750a367809SOwen Anderson 
7760a367809SOwen Anderson 		if (unmapPages || cache != area->cache) {
7770a367809SOwen Anderson 			addr_t address = area->Base()
7780a367809SOwen Anderson 				+ ((page->cache_offset * B_PAGE_SIZE)
7790a367809SOwen Anderson 				- area->cache_offset);
7800a367809SOwen Anderson 
7810a367809SOwen Anderson 			uint64_t oldPte = 0;
782af5e461fSOwen Anderson 			ProcessRange(fPageTable, fInitialLevel, address, B_PAGE_SIZE, nullptr,
7830a367809SOwen Anderson 				[=, &oldPte](uint64_t* ptePtr, uint64_t effectiveVa) {
7840a367809SOwen Anderson 					oldPte = atomic_get_and_set64((int64_t*)ptePtr, 0);
785129bc12bSOwen Anderson 					if (!deletingAddressSpace)
786129bc12bSOwen Anderson 						FlushVAIfAccessed(oldPte, effectiveVa);
7870a367809SOwen Anderson 				});
7880a367809SOwen Anderson 
7890a367809SOwen Anderson 			if ((oldPte & kPteValidMask) == 0) {
7900a367809SOwen Anderson 				panic("page %p has mapping for area %p "
7910a367809SOwen Anderson 					"(%#" B_PRIxADDR "), but has no "
7920a367809SOwen Anderson 					"page table", page, area, address);
7930a367809SOwen Anderson 				continue;
7940a367809SOwen Anderson 			}
7950a367809SOwen Anderson 
7960a367809SOwen Anderson 			// transfer the accessed/dirty flags to the page and
7970a367809SOwen Anderson 			// invalidate the mapping, if necessary
7980a367809SOwen Anderson 			if (is_pte_dirty(oldPte))
7990a367809SOwen Anderson 				page->modified = true;
8000a367809SOwen Anderson 			if (oldPte & kAttrAF)
8010a367809SOwen Anderson 				page->accessed = true;
8020a367809SOwen Anderson 
8030a367809SOwen Anderson 			if (pageFullyUnmapped) {
8040a367809SOwen Anderson 				DEBUG_PAGE_ACCESS_START(page);
8050a367809SOwen Anderson 
8060a367809SOwen Anderson 				if (cache->temporary) {
8070a367809SOwen Anderson 					vm_page_set_state(page,
8080a367809SOwen Anderson 						PAGE_STATE_INACTIVE);
8090a367809SOwen Anderson 				} else if (page->modified) {
8100a367809SOwen Anderson 					vm_page_set_state(page,
8110a367809SOwen Anderson 						PAGE_STATE_MODIFIED);
8120a367809SOwen Anderson 				} else {
8130a367809SOwen Anderson 					vm_page_set_state(page,
8140a367809SOwen Anderson 						PAGE_STATE_CACHED);
8150a367809SOwen Anderson 				}
8160a367809SOwen Anderson 
8170a367809SOwen Anderson 				DEBUG_PAGE_ACCESS_END(page);
8180a367809SOwen Anderson 			}
8190a367809SOwen Anderson 		}
8200a367809SOwen Anderson 	}
8210a367809SOwen Anderson 
8220a367809SOwen Anderson 	locker.Unlock();
8230a367809SOwen Anderson 
8240a367809SOwen Anderson 	bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
8250a367809SOwen Anderson 	uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
8260a367809SOwen Anderson 		| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
8270a367809SOwen Anderson 
8280a367809SOwen Anderson 	while (vm_page_mapping* mapping = mappings.RemoveHead())
8290a367809SOwen Anderson 		vm_free_page_mapping(mapping->page->physical_page_number, mapping, freeFlags);
8300a367809SOwen Anderson }
8310a367809SOwen Anderson 
8320a367809SOwen Anderson 
833a25542e7Smilek7 bool
834a25542e7Smilek7 VMSAv8TranslationMap::ValidateVa(addr_t va)
835a25542e7Smilek7 {
836a25542e7Smilek7 	uint64_t vaMask = (1UL << fVaBits) - 1;
837a25542e7Smilek7 	bool kernelAddr = (va & (1UL << 63)) != 0;
838a25542e7Smilek7 	if (kernelAddr != fIsKernel)
839a25542e7Smilek7 		return false;
840a25542e7Smilek7 	if ((va & ~vaMask) != (fIsKernel ? ~vaMask : 0))
841a25542e7Smilek7 		return false;
842a25542e7Smilek7 	return true;
843a25542e7Smilek7 }
844a25542e7Smilek7 
845a25542e7Smilek7 
846a25542e7Smilek7 status_t
847a25542e7Smilek7 VMSAv8TranslationMap::Query(addr_t va, phys_addr_t* pa, uint32* flags)
848a25542e7Smilek7 {
84973c51743SOwen Anderson 	*flags = 0;
85073c51743SOwen Anderson 	*pa = 0;
85173c51743SOwen Anderson 
85273c51743SOwen Anderson 	uint64_t pageMask = (1UL << fPageBits) - 1;
853088b72e7SOwen Anderson 	va &= ~pageMask;
854af5e461fSOwen Anderson 
855af5e461fSOwen Anderson 	ThreadCPUPinner pinner(thread_get_current_thread());
856a25542e7Smilek7 	ASSERT(ValidateVa(va));
857a25542e7Smilek7 
858af5e461fSOwen Anderson 	ProcessRange(fPageTable, fInitialLevel, va, B_PAGE_SIZE, nullptr,
85973c51743SOwen Anderson 		[=](uint64_t* ptePtr, uint64_t effectiveVa) {
86073c51743SOwen Anderson 			uint64_t pte = atomic_get64((int64_t*)ptePtr);
86173c51743SOwen Anderson 			*pa = pte & kPteAddrMask;
86273c51743SOwen Anderson 			*flags |= PAGE_PRESENT | B_KERNEL_READ_AREA;
863a25542e7Smilek7 			if ((pte & kAttrAF) != 0)
86473c51743SOwen Anderson 				*flags |= PAGE_ACCESSED;
8656a2e4f41SOwen Anderson 			if (is_pte_dirty(pte))
86673c51743SOwen Anderson 				*flags |= PAGE_MODIFIED;
867a25542e7Smilek7 
868a25542e7Smilek7 			if ((pte & kAttrUXN) == 0)
86973c51743SOwen Anderson 				*flags |= B_EXECUTE_AREA;
870a25542e7Smilek7 			if ((pte & kAttrPXN) == 0)
87173c51743SOwen Anderson 				*flags |= B_KERNEL_EXECUTE_AREA;
872a25542e7Smilek7 
873108f6fdcSOwen Anderson 			if ((pte & kAttrAPUserAccess) != 0)
87473c51743SOwen Anderson 				*flags |= B_READ_AREA;
875a25542e7Smilek7 
8766a2e4f41SOwen Anderson 			if ((pte & kAttrSWDBM) != 0) {
87773c51743SOwen Anderson 				*flags |= B_KERNEL_WRITE_AREA;
878108f6fdcSOwen Anderson 				if ((pte & kAttrAPUserAccess) != 0)
87973c51743SOwen Anderson 					*flags |= B_WRITE_AREA;
880a25542e7Smilek7 			}
88173c51743SOwen Anderson 		});
882a25542e7Smilek7 
883a25542e7Smilek7 	return B_OK;
884a25542e7Smilek7 }
885a25542e7Smilek7 
886a25542e7Smilek7 
887a25542e7Smilek7 status_t
888a25542e7Smilek7 VMSAv8TranslationMap::QueryInterrupt(
889a25542e7Smilek7 	addr_t virtualAddress, phys_addr_t* _physicalAddress, uint32* _flags)
890a25542e7Smilek7 {
891a25542e7Smilek7 	return Query(virtualAddress, _physicalAddress, _flags);
892a25542e7Smilek7 }
893a25542e7Smilek7 
894a25542e7Smilek7 
895a25542e7Smilek7 status_t
896a25542e7Smilek7 VMSAv8TranslationMap::Protect(addr_t start, addr_t end, uint32 attributes, uint32 memoryType)
897a25542e7Smilek7 {
89832c542bdSOwen Anderson 	TRACE("VMSAv8TranslationMap::Protect(0x%" B_PRIxADDR ", 0x%"
89932c542bdSOwen Anderson 		B_PRIxADDR ", 0x%x, 0x%x)\n", start, end, attributes, memoryType);
90032c542bdSOwen Anderson 
901f73ff202SOwen Anderson 	uint64_t attr = GetMemoryAttr(attributes, memoryType, fIsKernel);
902a25542e7Smilek7 	size_t size = end - start + 1;
903a25542e7Smilek7 	ASSERT(ValidateVa(start));
904a25542e7Smilek7 
905af5e461fSOwen Anderson 	ThreadCPUPinner pinner(thread_get_current_thread());
906af5e461fSOwen Anderson 
907af5e461fSOwen Anderson 	ProcessRange(fPageTable, fInitialLevel, start, size, nullptr,
908f73ff202SOwen Anderson 		[=](uint64_t* ptePtr, uint64_t effectiveVa) {
909da8c631eSOwen Anderson 			ASSERT(effectiveVa <= end);
910da8c631eSOwen Anderson 
911f73ff202SOwen Anderson 			// We need to use an atomic compare-swap loop because we must
912f73ff202SOwen Anderson 			// need to clear somes bits while setting others.
913f73ff202SOwen Anderson 			while (true) {
914f73ff202SOwen Anderson 				uint64_t oldPte = atomic_get64((int64_t*)ptePtr);
915f73ff202SOwen Anderson 				uint64_t newPte = oldPte & ~kPteAttrMask;
916f73ff202SOwen Anderson 				newPte |= attr;
917f73ff202SOwen Anderson 
9184bb796cfSOwen Anderson 				// Preserve access bit.
9194bb796cfSOwen Anderson 				newPte |= oldPte & kAttrAF;
9204bb796cfSOwen Anderson 
9216a2e4f41SOwen Anderson 				// Preserve the dirty bit.
9226a2e4f41SOwen Anderson 				if (is_pte_dirty(oldPte))
9236a2e4f41SOwen Anderson 					newPte = set_pte_dirty(newPte);
9244bb796cfSOwen Anderson 
9254bb796cfSOwen Anderson 				uint64_t oldMemoryType = oldPte & (kAttrShareability | kAttrMemoryAttrIdx);
9264bb796cfSOwen Anderson 				uint64_t newMemoryType = newPte & (kAttrShareability | kAttrMemoryAttrIdx);
9274bb796cfSOwen Anderson 				if (oldMemoryType != newMemoryType) {
9284bb796cfSOwen Anderson 					// ARM64 requires "break-before-make". We must set the PTE to an invalid
9294bb796cfSOwen Anderson 					// entry and flush the TLB as appropriate before we can write the new PTE.
9304bb796cfSOwen Anderson 					// In this case specifically, it applies any time we change cacheability or
9314bb796cfSOwen Anderson 					// shareability.
932129bc12bSOwen Anderson 					if (!AttemptPteBreakBeforeMake(ptePtr, oldPte, effectiveVa))
9334bb796cfSOwen Anderson 						continue;
9344bb796cfSOwen Anderson 
9354bb796cfSOwen Anderson 					atomic_set64((int64_t*)ptePtr, newPte);
9364bb796cfSOwen Anderson 					asm("dsb ishst"); // Ensure PTE write completed
937129bc12bSOwen Anderson 					asm("isb");
9384bb796cfSOwen Anderson 
9394bb796cfSOwen Anderson 					// No compare-exchange loop required in this case.
9404bb796cfSOwen Anderson 					break;
9414bb796cfSOwen Anderson 				} else {
942f73ff202SOwen Anderson 					if ((uint64_t)atomic_test_and_set64((int64_t*)ptePtr, newPte, oldPte) == oldPte) {
943129bc12bSOwen Anderson 						FlushVAIfAccessed(oldPte, effectiveVa);
944f73ff202SOwen Anderson 						break;
945f73ff202SOwen Anderson 					}
946f73ff202SOwen Anderson 				}
9474bb796cfSOwen Anderson 			}
948f73ff202SOwen Anderson 		});
949a25542e7Smilek7 
950a25542e7Smilek7 	return B_OK;
951a25542e7Smilek7 }
952a25542e7Smilek7 
953a25542e7Smilek7 
954a25542e7Smilek7 status_t
955a25542e7Smilek7 VMSAv8TranslationMap::ClearFlags(addr_t va, uint32 flags)
956a25542e7Smilek7 {
957a25542e7Smilek7 	ASSERT(ValidateVa(va));
958a25542e7Smilek7 
9594bb796cfSOwen Anderson 	bool clearAF = flags & PAGE_ACCESSED;
9604bb796cfSOwen Anderson 	bool setRO = flags & PAGE_MODIFIED;
961a25542e7Smilek7 
962744bdd73SOwen Anderson 	if (!clearAF && !setRO)
963744bdd73SOwen Anderson 		return B_OK;
964744bdd73SOwen Anderson 
965af5e461fSOwen Anderson 	ThreadCPUPinner pinner(thread_get_current_thread());
966af5e461fSOwen Anderson 
967129bc12bSOwen Anderson 	uint64_t oldPte = 0;
968af5e461fSOwen Anderson 	ProcessRange(fPageTable, fInitialLevel, va, B_PAGE_SIZE, nullptr,
969129bc12bSOwen Anderson 		[=, &oldPte](uint64_t* ptePtr, uint64_t effectiveVa) {
970744bdd73SOwen Anderson 			if (clearAF && setRO) {
971744bdd73SOwen Anderson 				// We need to use an atomic compare-swap loop because we must
972744bdd73SOwen Anderson 				// need to clear one bit while setting the other.
973744bdd73SOwen Anderson 				while (true) {
974129bc12bSOwen Anderson 					oldPte = atomic_get64((int64_t*)ptePtr);
975744bdd73SOwen Anderson 					uint64_t newPte = oldPte & ~kAttrAF;
9766a2e4f41SOwen Anderson 					newPte = set_pte_clean(newPte);
977744bdd73SOwen Anderson 
978744bdd73SOwen Anderson                     if ((uint64_t)atomic_test_and_set64((int64_t*)ptePtr, newPte, oldPte) == oldPte)
979744bdd73SOwen Anderson 						break;
980744bdd73SOwen Anderson 				}
981744bdd73SOwen Anderson 			} else if (clearAF) {
982129bc12bSOwen Anderson 				oldPte = atomic_and64((int64_t*)ptePtr, ~kAttrAF);
983744bdd73SOwen Anderson 			} else {
9846a2e4f41SOwen Anderson 				while (true) {
985129bc12bSOwen Anderson 					oldPte = atomic_get64((int64_t*)ptePtr);
986129bc12bSOwen Anderson 					if (!is_pte_dirty(oldPte)) {
987129bc12bSOwen Anderson 						// Avoid a TLB flush
988129bc12bSOwen Anderson 						oldPte = 0;
9896a2e4f41SOwen Anderson 						return;
990129bc12bSOwen Anderson 					}
9916a2e4f41SOwen Anderson 					uint64_t newPte = set_pte_clean(oldPte);
9926a2e4f41SOwen Anderson                     if ((uint64_t)atomic_test_and_set64((int64_t*)ptePtr, newPte, oldPte) == oldPte)
9936a2e4f41SOwen Anderson 						break;
9946a2e4f41SOwen Anderson 				}
995744bdd73SOwen Anderson 			}
996744bdd73SOwen Anderson 		});
997744bdd73SOwen Anderson 
998129bc12bSOwen Anderson 	FlushVAIfAccessed(oldPte, va);
999129bc12bSOwen Anderson 
1000a25542e7Smilek7 	return B_OK;
1001a25542e7Smilek7 }
1002a25542e7Smilek7 
1003a25542e7Smilek7 
1004a25542e7Smilek7 bool
1005a25542e7Smilek7 VMSAv8TranslationMap::ClearAccessedAndModified(
1006a25542e7Smilek7 	VMArea* area, addr_t address, bool unmapIfUnaccessed, bool& _modified)
1007a25542e7Smilek7 {
100832c542bdSOwen Anderson 	TRACE("VMSAv8TranslationMap::ClearAccessedAndModified(0x%"
100932c542bdSOwen Anderson 		B_PRIxADDR "(%s), 0x%" B_PRIxADDR ", %d)\n", (addr_t)area,
101032c542bdSOwen Anderson 		area->name, address, unmapIfUnaccessed);
1011af5e461fSOwen Anderson 	ASSERT(ValidateVa(address));
101232c542bdSOwen Anderson 
1013bb67bf75SOwen Anderson 	RecursiveLocker locker(fLock);
1014bb67bf75SOwen Anderson 	ThreadCPUPinner pinner(thread_get_current_thread());
1015bb67bf75SOwen Anderson 
1016bb67bf75SOwen Anderson 	uint64_t oldPte = 0;
1017af5e461fSOwen Anderson 	ProcessRange(fPageTable, fInitialLevel, address, B_PAGE_SIZE, nullptr,
10184bb796cfSOwen Anderson 		[=, &oldPte](uint64_t* ptePtr, uint64_t effectiveVa) {
1019bb67bf75SOwen Anderson 			// We need to use an atomic compare-swap loop because we must
1020bb67bf75SOwen Anderson 			// first read the old PTE and make decisions based on the AF
1021bb67bf75SOwen Anderson 			// bit to proceed.
1022bb67bf75SOwen Anderson 			while (true) {
1023bb67bf75SOwen Anderson 				oldPte = atomic_get64((int64_t*)ptePtr);
1024bb67bf75SOwen Anderson 				uint64_t newPte = oldPte & ~kAttrAF;
10256a2e4f41SOwen Anderson 				newPte = set_pte_clean(newPte);
1026bb67bf75SOwen Anderson 
1027bb67bf75SOwen Anderson 				// If the page has been not be accessed, then unmap it.
1028bb67bf75SOwen Anderson 				if (unmapIfUnaccessed && (oldPte & kAttrAF) == 0)
1029bb67bf75SOwen Anderson 					newPte = 0;
1030bb67bf75SOwen Anderson 
1031bb67bf75SOwen Anderson 				if ((uint64_t)atomic_test_and_set64((int64_t*)ptePtr, newPte, oldPte) == oldPte)
1032bb67bf75SOwen Anderson 					break;
1033bb67bf75SOwen Anderson 			}
1034bb67bf75SOwen Anderson 			asm("dsb ishst"); // Ensure PTE write completed
1035bb67bf75SOwen Anderson 		});
1036bb67bf75SOwen Anderson 
1037bb67bf75SOwen Anderson 	pinner.Unlock();
10386a2e4f41SOwen Anderson 	_modified = is_pte_dirty(oldPte);
1039129bc12bSOwen Anderson 
1040129bc12bSOwen Anderson 	if (FlushVAIfAccessed(oldPte, address))
1041bb67bf75SOwen Anderson 		return true;
1042bb67bf75SOwen Anderson 
1043bb67bf75SOwen Anderson 	if (!unmapIfUnaccessed)
1044bb67bf75SOwen Anderson 		return false;
1045bb67bf75SOwen Anderson 
1046bb67bf75SOwen Anderson 	locker.Detach(); // UnaccessedPageUnmapped takes ownership
1047bb67bf75SOwen Anderson 	phys_addr_t oldPa = oldPte & kPteAddrMask;
1048bb67bf75SOwen Anderson 	UnaccessedPageUnmapped(area, oldPa >> fPageBits);
1049bb67bf75SOwen Anderson 	return false;
1050a25542e7Smilek7 }
1051a25542e7Smilek7 
1052a25542e7Smilek7 
1053a25542e7Smilek7 void
1054a25542e7Smilek7 VMSAv8TranslationMap::Flush()
1055a25542e7Smilek7 {
105683316034SOwen Anderson 	// Necessary invalidation is performed during mapping,
105783316034SOwen Anderson 	// no need to do anything more here.
1058a25542e7Smilek7 }
1059