1a25542e7Smilek7 /*
2a25542e7Smilek7 * Copyright 2022 Haiku, Inc. All Rights Reserved.
3a25542e7Smilek7 * Distributed under the terms of the MIT License.
4a25542e7Smilek7 */
5a25542e7Smilek7 #include "VMSAv8TranslationMap.h"
6a25542e7Smilek7
7baf574c9SOwen Anderson #include <algorithm>
83b098011SOwen Anderson #include <slab/Slab.h>
9a25542e7Smilek7 #include <util/AutoLock.h>
10a25542e7Smilek7 #include <util/ThreadAutoLock.h>
113b098011SOwen Anderson #include <vm/VMAddressSpace.h>
123b098011SOwen Anderson #include <vm/VMCache.h>
13a25542e7Smilek7 #include <vm/vm_page.h>
14a25542e7Smilek7 #include <vm/vm_priv.h>
15a25542e7Smilek7
1632c542bdSOwen Anderson
1732c542bdSOwen Anderson //#define DO_TRACE
1832c542bdSOwen Anderson #ifdef DO_TRACE
1932c542bdSOwen Anderson # define TRACE(x...) dprintf(x)
2032c542bdSOwen Anderson #else
2132c542bdSOwen Anderson # define TRACE(x...) ;
2232c542bdSOwen Anderson #endif
2332c542bdSOwen Anderson
2432c542bdSOwen Anderson
25a25542e7Smilek7 uint32_t VMSAv8TranslationMap::fHwFeature;
26a25542e7Smilek7 uint64_t VMSAv8TranslationMap::fMair;
27a25542e7Smilek7
289fad0a5cSOwen Anderson // ASID Management
299fad0a5cSOwen Anderson static constexpr size_t kAsidBits = 8;
309fad0a5cSOwen Anderson static constexpr size_t kNumAsids = (1 << kAsidBits);
317908993dSOwen Anderson static spinlock sAsidLock = B_SPINLOCK_INITIALIZER;
329fad0a5cSOwen Anderson // A bitmap to track which ASIDs are in use.
339fad0a5cSOwen Anderson static uint64 sAsidBitMap[kNumAsids / 64] = {};
349fad0a5cSOwen Anderson // A mapping from ASID to translation map.
359fad0a5cSOwen Anderson static VMSAv8TranslationMap* sAsidMapping[kNumAsids] = {};
369fad0a5cSOwen Anderson
379fad0a5cSOwen Anderson
389fad0a5cSOwen Anderson static void
free_asid(size_t asid)399fad0a5cSOwen Anderson free_asid(size_t asid)
409fad0a5cSOwen Anderson {
419fad0a5cSOwen Anderson for (size_t i = 0; i < B_COUNT_OF(sAsidBitMap); ++i) {
429fad0a5cSOwen Anderson if (asid < 64) {
439fad0a5cSOwen Anderson sAsidBitMap[i] &= ~(uint64_t{1} << asid);
449fad0a5cSOwen Anderson return;
459fad0a5cSOwen Anderson }
469fad0a5cSOwen Anderson asid -= 64;
479fad0a5cSOwen Anderson }
489fad0a5cSOwen Anderson
499fad0a5cSOwen Anderson panic("Could not free ASID!");
509fad0a5cSOwen Anderson }
519fad0a5cSOwen Anderson
529fad0a5cSOwen Anderson
539406d2a4SOwen Anderson static void
flush_tlb_whole_asid(uint64_t asid)549406d2a4SOwen Anderson flush_tlb_whole_asid(uint64_t asid)
559406d2a4SOwen Anderson {
569406d2a4SOwen Anderson asm("dsb ishst");
579406d2a4SOwen Anderson asm("tlbi aside1is, %0" ::"r"(asid << 48));
589406d2a4SOwen Anderson asm("dsb ish");
599406d2a4SOwen Anderson asm("isb");
609406d2a4SOwen Anderson }
619406d2a4SOwen Anderson
629406d2a4SOwen Anderson
639fad0a5cSOwen Anderson static size_t
alloc_first_free_asid(void)649fad0a5cSOwen Anderson alloc_first_free_asid(void)
659fad0a5cSOwen Anderson {
669fad0a5cSOwen Anderson int asid = 0;
679fad0a5cSOwen Anderson for (size_t i = 0; i < B_COUNT_OF(sAsidBitMap); ++i) {
689fad0a5cSOwen Anderson int avail = __builtin_ffsll(~sAsidBitMap[i]);
699fad0a5cSOwen Anderson if (avail != 0) {
709fad0a5cSOwen Anderson sAsidBitMap[i] |= (uint64_t{1} << (avail-1));
719fad0a5cSOwen Anderson asid += (avail - 1);
729fad0a5cSOwen Anderson return asid;
739fad0a5cSOwen Anderson }
749fad0a5cSOwen Anderson asid += 64;
759fad0a5cSOwen Anderson }
769fad0a5cSOwen Anderson
779fad0a5cSOwen Anderson return kNumAsids;
789fad0a5cSOwen Anderson }
797908993dSOwen Anderson
80a25542e7Smilek7
816a2e4f41SOwen Anderson static bool
is_pte_dirty(uint64_t pte)826a2e4f41SOwen Anderson is_pte_dirty(uint64_t pte)
836a2e4f41SOwen Anderson {
84bb43aaacSOwen Anderson if ((pte & kAttrSWDIRTY) != 0)
85bb43aaacSOwen Anderson return true;
86bb43aaacSOwen Anderson
876a2e4f41SOwen Anderson return (pte & kAttrAPReadOnly) == 0;
886a2e4f41SOwen Anderson }
896a2e4f41SOwen Anderson
906a2e4f41SOwen Anderson
916a2e4f41SOwen Anderson static uint64_t
set_pte_dirty(uint64_t pte)926a2e4f41SOwen Anderson set_pte_dirty(uint64_t pte)
936a2e4f41SOwen Anderson {
946a2e4f41SOwen Anderson if ((pte & kAttrSWDBM) != 0)
956a2e4f41SOwen Anderson return pte & ~kAttrAPReadOnly;
966a2e4f41SOwen Anderson
97bb43aaacSOwen Anderson return pte | kAttrSWDIRTY;
986a2e4f41SOwen Anderson }
996a2e4f41SOwen Anderson
1006a2e4f41SOwen Anderson
1016a2e4f41SOwen Anderson static uint64_t
set_pte_clean(uint64_t pte)1026a2e4f41SOwen Anderson set_pte_clean(uint64_t pte)
1036a2e4f41SOwen Anderson {
104bb43aaacSOwen Anderson pte &= ~kAttrSWDIRTY;
1056a2e4f41SOwen Anderson return pte | kAttrAPReadOnly;
1066a2e4f41SOwen Anderson }
1076a2e4f41SOwen Anderson
1086a2e4f41SOwen Anderson
109129bc12bSOwen Anderson static bool
is_pte_accessed(uint64_t pte)110129bc12bSOwen Anderson is_pte_accessed(uint64_t pte)
111129bc12bSOwen Anderson {
112129bc12bSOwen Anderson return (pte & kPteValidMask) != 0 && (pte & kAttrAF) != 0;
113129bc12bSOwen Anderson }
114129bc12bSOwen Anderson
115129bc12bSOwen Anderson
VMSAv8TranslationMap(bool kernel,phys_addr_t pageTable,int pageBits,int vaBits,int minBlockLevel)116a25542e7Smilek7 VMSAv8TranslationMap::VMSAv8TranslationMap(
117a25542e7Smilek7 bool kernel, phys_addr_t pageTable, int pageBits, int vaBits, int minBlockLevel)
118a25542e7Smilek7 :
119a25542e7Smilek7 fIsKernel(kernel),
120a25542e7Smilek7 fPageTable(pageTable),
121a25542e7Smilek7 fPageBits(pageBits),
122a25542e7Smilek7 fVaBits(vaBits),
1237908993dSOwen Anderson fMinBlockLevel(minBlockLevel),
1244e4d3167SOwen Anderson fASID(kernel ? 0 : -1),
1259fad0a5cSOwen Anderson fRefcount(0)
126a25542e7Smilek7 {
12732c542bdSOwen Anderson TRACE("+VMSAv8TranslationMap(%p, %d, 0x%" B_PRIxADDR ", %d, %d, %d)\n", this,
12832c542bdSOwen Anderson kernel, pageTable, pageBits, vaBits, minBlockLevel);
129a25542e7Smilek7
130a25542e7Smilek7 fInitialLevel = CalcStartLevel(fVaBits, fPageBits);
131a25542e7Smilek7 }
132a25542e7Smilek7
133a25542e7Smilek7
~VMSAv8TranslationMap()134a25542e7Smilek7 VMSAv8TranslationMap::~VMSAv8TranslationMap()
135a25542e7Smilek7 {
13632c542bdSOwen Anderson TRACE("-VMSAv8TranslationMap(%p)\n", this);
13732c542bdSOwen Anderson TRACE(" fIsKernel: %d, fPageTable: 0x%" B_PRIxADDR ", fASID: %d, fRefcount: %d\n",
13832c542bdSOwen Anderson fIsKernel, fPageTable, fASID, fRefcount);
13932c542bdSOwen Anderson
1407908993dSOwen Anderson ASSERT(!fIsKernel);
1419fad0a5cSOwen Anderson ASSERT(fRefcount == 0);
142a25542e7Smilek7
14317c50417SOwen Anderson ThreadCPUPinner pinner(thread_get_current_thread());
1447908993dSOwen Anderson InterruptsSpinLocker locker(sAsidLock);
1457908993dSOwen Anderson
14617c50417SOwen Anderson FreeTable(fPageTable, 0, fInitialLevel);
14717c50417SOwen Anderson
1489fad0a5cSOwen Anderson if (fASID != -1) {
1497908993dSOwen Anderson sAsidMapping[fASID] = NULL;
1509fad0a5cSOwen Anderson free_asid(fASID);
1517908993dSOwen Anderson }
152a25542e7Smilek7 }
1539fad0a5cSOwen Anderson
1549fad0a5cSOwen Anderson
1559fad0a5cSOwen Anderson // Switch user map into TTBR0.
1569fad0a5cSOwen Anderson // Passing kernel map here configures empty page table.
1579fad0a5cSOwen Anderson void
SwitchUserMap(VMSAv8TranslationMap * from,VMSAv8TranslationMap * to)1589fad0a5cSOwen Anderson VMSAv8TranslationMap::SwitchUserMap(VMSAv8TranslationMap *from, VMSAv8TranslationMap *to)
1599fad0a5cSOwen Anderson {
1604b9a9eabSOwen Anderson InterruptsSpinLocker locker(sAsidLock);
1619fad0a5cSOwen Anderson
1629fad0a5cSOwen Anderson if (!from->fIsKernel) {
1639fad0a5cSOwen Anderson from->fRefcount--;
1649fad0a5cSOwen Anderson }
1659fad0a5cSOwen Anderson
1669fad0a5cSOwen Anderson if (!to->fIsKernel) {
1679fad0a5cSOwen Anderson to->fRefcount++;
1689fad0a5cSOwen Anderson } else {
1699fad0a5cSOwen Anderson arch_vm_install_empty_table_ttbr0();
1709fad0a5cSOwen Anderson return;
1719fad0a5cSOwen Anderson }
1729fad0a5cSOwen Anderson
1739fad0a5cSOwen Anderson ASSERT(to->fPageTable != 0);
1749fad0a5cSOwen Anderson uint64_t ttbr = to->fPageTable | ((fHwFeature & HW_COMMON_NOT_PRIVATE) != 0 ? 1 : 0);
1759fad0a5cSOwen Anderson
1769fad0a5cSOwen Anderson if (to->fASID != -1) {
1779fad0a5cSOwen Anderson WRITE_SPECIALREG(TTBR0_EL1, ((uint64_t)to->fASID << 48) | ttbr);
1789fad0a5cSOwen Anderson asm("isb");
1799fad0a5cSOwen Anderson return;
1809fad0a5cSOwen Anderson }
1819fad0a5cSOwen Anderson
1829fad0a5cSOwen Anderson size_t allocatedAsid = alloc_first_free_asid();
1839fad0a5cSOwen Anderson if (allocatedAsid != kNumAsids) {
1849fad0a5cSOwen Anderson to->fASID = allocatedAsid;
1859fad0a5cSOwen Anderson sAsidMapping[allocatedAsid] = to;
1869fad0a5cSOwen Anderson
1879fad0a5cSOwen Anderson WRITE_SPECIALREG(TTBR0_EL1, (allocatedAsid << 48) | ttbr);
1889406d2a4SOwen Anderson flush_tlb_whole_asid(allocatedAsid);
1899fad0a5cSOwen Anderson return;
1909fad0a5cSOwen Anderson }
1919fad0a5cSOwen Anderson
1924ac9940cSOwen Anderson for (size_t i = 0; i < kNumAsids; ++i) {
1939fad0a5cSOwen Anderson if (sAsidMapping[i]->fRefcount == 0) {
1949fad0a5cSOwen Anderson sAsidMapping[i]->fASID = -1;
1959fad0a5cSOwen Anderson to->fASID = i;
1969fad0a5cSOwen Anderson sAsidMapping[i] = to;
1979fad0a5cSOwen Anderson
1989fad0a5cSOwen Anderson WRITE_SPECIALREG(TTBR0_EL1, (i << 48) | ttbr);
1999406d2a4SOwen Anderson flush_tlb_whole_asid(i);
2009fad0a5cSOwen Anderson return;
2019fad0a5cSOwen Anderson }
2029fad0a5cSOwen Anderson }
2039fad0a5cSOwen Anderson
2049fad0a5cSOwen Anderson panic("cannot assign ASID");
2059fad0a5cSOwen Anderson }
206a25542e7Smilek7
207a25542e7Smilek7
208a25542e7Smilek7 int
CalcStartLevel(int vaBits,int pageBits)209a25542e7Smilek7 VMSAv8TranslationMap::CalcStartLevel(int vaBits, int pageBits)
210a25542e7Smilek7 {
211a25542e7Smilek7 int level = 4;
212a25542e7Smilek7
213a25542e7Smilek7 int bitsLeft = vaBits - pageBits;
214a25542e7Smilek7 while (bitsLeft > 0) {
215a25542e7Smilek7 int tableBits = pageBits - 3;
216a25542e7Smilek7 bitsLeft -= tableBits;
217a25542e7Smilek7 level--;
218a25542e7Smilek7 }
219a25542e7Smilek7
220a25542e7Smilek7 ASSERT(level >= 0);
221a25542e7Smilek7
222a25542e7Smilek7 return level;
223a25542e7Smilek7 }
224a25542e7Smilek7
225a25542e7Smilek7
226a25542e7Smilek7 bool
Lock()227a25542e7Smilek7 VMSAv8TranslationMap::Lock()
228a25542e7Smilek7 {
22932c542bdSOwen Anderson TRACE("VMSAv8TranslationMap::Lock()\n");
230a25542e7Smilek7 recursive_lock_lock(&fLock);
231a25542e7Smilek7 return true;
232a25542e7Smilek7 }
233a25542e7Smilek7
234a25542e7Smilek7
235a25542e7Smilek7 void
Unlock()236a25542e7Smilek7 VMSAv8TranslationMap::Unlock()
237a25542e7Smilek7 {
23832c542bdSOwen Anderson TRACE("VMSAv8TranslationMap::Unlock()\n");
239a25542e7Smilek7 recursive_lock_unlock(&fLock);
240a25542e7Smilek7 }
241a25542e7Smilek7
242a25542e7Smilek7
243a25542e7Smilek7 addr_t
MappedSize() const244a25542e7Smilek7 VMSAv8TranslationMap::MappedSize() const
245a25542e7Smilek7 {
246a25542e7Smilek7 panic("VMSAv8TranslationMap::MappedSize not implemented");
247a25542e7Smilek7 return 0;
248a25542e7Smilek7 }
249a25542e7Smilek7
250a25542e7Smilek7
251a25542e7Smilek7 size_t
MaxPagesNeededToMap(addr_t start,addr_t end) const252a25542e7Smilek7 VMSAv8TranslationMap::MaxPagesNeededToMap(addr_t start, addr_t end) const
253a25542e7Smilek7 {
254*6519d027SOwen Anderson constexpr uint64_t level3Range = B_PAGE_SIZE * 512;
255*6519d027SOwen Anderson constexpr uint64_t level2Range = level3Range * 512;
256*6519d027SOwen Anderson constexpr uint64_t level1Range = level2Range * 512;
257*6519d027SOwen Anderson constexpr uint64_t level0Range = level1Range * 512;
258a25542e7Smilek7
259*6519d027SOwen Anderson if (start == 0) {
260*6519d027SOwen Anderson start = level3Range - B_PAGE_SIZE;
261*6519d027SOwen Anderson end += start;
262a25542e7Smilek7 }
263a25542e7Smilek7
264*6519d027SOwen Anderson size_t requiredPages[] = {
265*6519d027SOwen Anderson end / level0Range + 1 - start / level0Range,
266*6519d027SOwen Anderson end / level1Range + 1 - start / level1Range,
267*6519d027SOwen Anderson end / level2Range + 1 - start / level2Range,
268*6519d027SOwen Anderson end / level3Range + 1 - start / level3Range
269*6519d027SOwen Anderson };
270*6519d027SOwen Anderson
271*6519d027SOwen Anderson size_t ret = 0;
272*6519d027SOwen Anderson for (int i = fInitialLevel; i < 4; ++i) {
273*6519d027SOwen Anderson ret += requiredPages[i];
274*6519d027SOwen Anderson }
275*6519d027SOwen Anderson
276*6519d027SOwen Anderson return ret;
277a25542e7Smilek7 }
278a25542e7Smilek7
279a25542e7Smilek7
280a25542e7Smilek7 uint64_t*
TableFromPa(phys_addr_t pa)281a25542e7Smilek7 VMSAv8TranslationMap::TableFromPa(phys_addr_t pa)
282a25542e7Smilek7 {
283a25542e7Smilek7 return reinterpret_cast<uint64_t*>(KERNEL_PMAP_BASE + pa);
284a25542e7Smilek7 }
285a25542e7Smilek7
286a25542e7Smilek7
287a25542e7Smilek7 void
FreeTable(phys_addr_t ptPa,uint64_t va,int level)2881c408b72SOwen Anderson VMSAv8TranslationMap::FreeTable(phys_addr_t ptPa, uint64_t va, int level)
289a25542e7Smilek7 {
2907908993dSOwen Anderson ASSERT(level < 4);
291a25542e7Smilek7
292a25542e7Smilek7 int tableBits = fPageBits - 3;
293a25542e7Smilek7 uint64_t tableSize = 1UL << tableBits;
2947908993dSOwen Anderson uint64_t vaMask = (1UL << fVaBits) - 1;
295a25542e7Smilek7
2967908993dSOwen Anderson int shift = tableBits * (3 - level) + fPageBits;
2977908993dSOwen Anderson uint64_t entrySize = 1UL << shift;
2987908993dSOwen Anderson
2997908993dSOwen Anderson uint64_t nextVa = va;
300a25542e7Smilek7 uint64_t* pt = TableFromPa(ptPa);
301a25542e7Smilek7 for (uint64_t i = 0; i < tableSize; i++) {
3027908993dSOwen Anderson uint64_t oldPte = (uint64_t) atomic_get_and_set64((int64*) &pt[i], 0);
3037908993dSOwen Anderson
30418a27fe0SOwen Anderson if (level < 3 && (oldPte & kPteTypeMask) == kPteTypeL012Table) {
3051c408b72SOwen Anderson FreeTable(oldPte & kPteAddrMask, nextVa, level + 1);
30618a27fe0SOwen Anderson } else if ((oldPte & kPteTypeMask) != 0) {
3077908993dSOwen Anderson uint64_t fullVa = (fIsKernel ? ~vaMask : 0) | nextVa;
3081c408b72SOwen Anderson
3091c408b72SOwen Anderson // Use this rather than FlushVAIfAccessed so that we don't have to
3101c408b72SOwen Anderson // acquire sAsidLock for every entry.
3111c408b72SOwen Anderson flush_va_if_accessed(oldPte, nextVa, fASID);
312a25542e7Smilek7 }
313a25542e7Smilek7
3147908993dSOwen Anderson nextVa += entrySize;
3157908993dSOwen Anderson }
3167908993dSOwen Anderson
317a25542e7Smilek7 vm_page* page = vm_lookup_page(ptPa >> fPageBits);
3187908993dSOwen Anderson DEBUG_PAGE_ACCESS_START(page);
319a25542e7Smilek7 vm_page_set_state(page, PAGE_STATE_FREE);
320a25542e7Smilek7 }
321a25542e7Smilek7
322a25542e7Smilek7
32318a27fe0SOwen Anderson // Make a new page sub-table.
32418a27fe0SOwen Anderson // The parent table is `ptPa`, and the new sub-table's PTE will be at `index`
32518a27fe0SOwen Anderson // in it.
32618a27fe0SOwen Anderson // Returns the physical address of the new table, or the address of the existing
32718a27fe0SOwen Anderson // one if the PTE is already filled.
328a25542e7Smilek7 phys_addr_t
GetOrMakeTable(phys_addr_t ptPa,int level,int index,vm_page_reservation * reservation)329baf574c9SOwen Anderson VMSAv8TranslationMap::GetOrMakeTable(phys_addr_t ptPa, int level, int index,
330baf574c9SOwen Anderson vm_page_reservation* reservation)
331a25542e7Smilek7 {
33218a27fe0SOwen Anderson ASSERT(level < 3);
333a25542e7Smilek7
33418a27fe0SOwen Anderson uint64_t* ptePtr = TableFromPa(ptPa) + index;
33518a27fe0SOwen Anderson uint64_t oldPte = atomic_get64((int64*) ptePtr);
336a25542e7Smilek7
33718a27fe0SOwen Anderson int type = oldPte & kPteTypeMask;
3386b4ccaa5SOwen Anderson ASSERT(type != kPteTypeL12Block);
3396b4ccaa5SOwen Anderson
34018a27fe0SOwen Anderson if (type == kPteTypeL012Table) {
34118a27fe0SOwen Anderson // This is table entry already, just return it
342a25542e7Smilek7 return oldPte & kPteAddrMask;
34318a27fe0SOwen Anderson } else if (reservation != nullptr) {
34418a27fe0SOwen Anderson // Create new table there
34518a27fe0SOwen Anderson vm_page* page = vm_page_allocate_page(reservation, PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
346a25542e7Smilek7 phys_addr_t newTablePa = page->physical_page_number << fPageBits;
34718a27fe0SOwen Anderson DEBUG_PAGE_ACCESS_END(page);
348a25542e7Smilek7
34918a27fe0SOwen Anderson // We only create mappings at the final level so we don't need to handle
35018a27fe0SOwen Anderson // splitting block mappings
351baf574c9SOwen Anderson ASSERT(type != kPteTypeL12Block);
352a25542e7Smilek7
35318a27fe0SOwen Anderson // Ensure that writes to page being attached have completed
35418a27fe0SOwen Anderson asm("dsb ishst");
355a25542e7Smilek7
35618a27fe0SOwen Anderson uint64_t oldPteRefetch = (uint64_t)atomic_test_and_set64((int64*) ptePtr,
35718a27fe0SOwen Anderson newTablePa | kPteTypeL012Table, oldPte);
35818a27fe0SOwen Anderson if (oldPteRefetch != oldPte) {
35918a27fe0SOwen Anderson // If the old PTE has mutated, it must be because another thread has allocated the
36018a27fe0SOwen Anderson // sub-table at the same time as us. If that has happened, deallocate the page we
36118a27fe0SOwen Anderson // setup and use the one they installed instead.
36218a27fe0SOwen Anderson ASSERT((oldPteRefetch & kPteTypeMask) == kPteTypeL012Table);
36318a27fe0SOwen Anderson DEBUG_PAGE_ACCESS_START(page);
36418a27fe0SOwen Anderson vm_page_set_state(page, PAGE_STATE_FREE);
36518a27fe0SOwen Anderson return oldPteRefetch & kPteAddrMask;
366a25542e7Smilek7 }
367a25542e7Smilek7
368a25542e7Smilek7 return newTablePa;
369a25542e7Smilek7 }
370a25542e7Smilek7
37118a27fe0SOwen Anderson // There's no existing table and we have no reservation
372a25542e7Smilek7 return 0;
373a25542e7Smilek7 }
374a25542e7Smilek7
375a25542e7Smilek7
376129bc12bSOwen Anderson bool
flush_va_if_accessed(uint64_t pte,addr_t va,int asid)3771fa60a5cSOwen Anderson flush_va_if_accessed(uint64_t pte, addr_t va, int asid)
378baf574c9SOwen Anderson {
379129bc12bSOwen Anderson if (!is_pte_accessed(pte))
380129bc12bSOwen Anderson return false;
381129bc12bSOwen Anderson
3829473fe5eSOwen Anderson if ((pte & kAttrNG) == 0) {
3839473fe5eSOwen Anderson // Flush from all address spaces
384af5e461fSOwen Anderson asm("dsb ishst"); // Ensure PTE write completed
385af5e461fSOwen Anderson asm("tlbi vaae1is, %0" ::"r"(((va >> 12) & kTLBIMask)));
386af5e461fSOwen Anderson asm("dsb ish");
387af5e461fSOwen Anderson asm("isb");
3881fa60a5cSOwen Anderson } else if (asid != -1) {
389129bc12bSOwen Anderson asm("dsb ishst"); // Ensure PTE write completed
3901fa60a5cSOwen Anderson asm("tlbi vae1is, %0" ::"r"(((va >> 12) & kTLBIMask) | (uint64_t(asid) << 48)));
391baf574c9SOwen Anderson asm("dsb ish"); // Wait for TLB flush to complete
392129bc12bSOwen Anderson asm("isb");
393129bc12bSOwen Anderson return true;
394baf574c9SOwen Anderson }
395129bc12bSOwen Anderson
396129bc12bSOwen Anderson return false;
397baf574c9SOwen Anderson }
398baf574c9SOwen Anderson
3991fa60a5cSOwen Anderson bool
FlushVAIfAccessed(uint64_t pte,addr_t va)4001fa60a5cSOwen Anderson VMSAv8TranslationMap::FlushVAIfAccessed(uint64_t pte, addr_t va) {
4011fa60a5cSOwen Anderson InterruptsSpinLocker locker(sAsidLock);
4021fa60a5cSOwen Anderson return flush_va_if_accessed(pte, va, fASID);
4031fa60a5cSOwen Anderson }
4041fa60a5cSOwen Anderson
405baf574c9SOwen Anderson
406129bc12bSOwen Anderson bool
AttemptPteBreakBeforeMake(uint64_t * ptePtr,uint64_t oldPte,addr_t va)4074bb796cfSOwen Anderson VMSAv8TranslationMap::AttemptPteBreakBeforeMake(uint64_t* ptePtr, uint64_t oldPte, addr_t va)
408baf574c9SOwen Anderson {
4094bb796cfSOwen Anderson uint64_t loadedPte = atomic_test_and_set64((int64_t*)ptePtr, 0, oldPte);
4104bb796cfSOwen Anderson if (loadedPte != oldPte)
411129bc12bSOwen Anderson return false;
4124bb796cfSOwen Anderson
413129bc12bSOwen Anderson FlushVAIfAccessed(oldPte, va);
414129bc12bSOwen Anderson
415129bc12bSOwen Anderson return true;
416baf574c9SOwen Anderson }
417baf574c9SOwen Anderson
418baf574c9SOwen Anderson
419baf574c9SOwen Anderson template<typename UpdatePte>
420baf574c9SOwen Anderson void
ProcessRange(phys_addr_t ptPa,int level,addr_t va,size_t size,vm_page_reservation * reservation,UpdatePte && updatePte)421baf574c9SOwen Anderson VMSAv8TranslationMap::ProcessRange(phys_addr_t ptPa, int level, addr_t va, size_t size,
422baf574c9SOwen Anderson vm_page_reservation* reservation, UpdatePte&& updatePte)
423baf574c9SOwen Anderson {
424baf574c9SOwen Anderson ASSERT(level < 4);
425baf574c9SOwen Anderson ASSERT(ptPa != 0);
426baf574c9SOwen Anderson
427af5e461fSOwen Anderson uint64_t pageMask = (1UL << fPageBits) - 1;
428af5e461fSOwen Anderson uint64_t vaMask = (1UL << fVaBits) - 1;
429af5e461fSOwen Anderson
430af5e461fSOwen Anderson ASSERT((va & pageMask) == 0);
431af5e461fSOwen Anderson
432baf574c9SOwen Anderson int tableBits = fPageBits - 3;
433baf574c9SOwen Anderson uint64_t tableMask = (1UL << tableBits) - 1;
434baf574c9SOwen Anderson
435baf574c9SOwen Anderson int shift = tableBits * (3 - level) + fPageBits;
436baf574c9SOwen Anderson uint64_t entrySize = 1UL << shift;
437baf574c9SOwen Anderson uint64_t entryMask = entrySize - 1;
438baf574c9SOwen Anderson
439baf574c9SOwen Anderson uint64_t alignedDownVa = va & ~entryMask;
440d2397007SOwen Anderson uint64_t end = va + size - 1;
441baf574c9SOwen Anderson if (level == 3)
442baf574c9SOwen Anderson ASSERT(alignedDownVa == va);
443baf574c9SOwen Anderson
444d2397007SOwen Anderson for (uint64_t effectiveVa = alignedDownVa; effectiveVa < end; effectiveVa += entrySize) {
445af5e461fSOwen Anderson int index = ((effectiveVa & vaMask) >> shift) & tableMask;
446baf574c9SOwen Anderson uint64_t* ptePtr = TableFromPa(ptPa) + index;
447baf574c9SOwen Anderson
448baf574c9SOwen Anderson if (level == 3) {
449baf574c9SOwen Anderson updatePte(ptePtr, effectiveVa);
450baf574c9SOwen Anderson } else {
451baf574c9SOwen Anderson phys_addr_t subTable = GetOrMakeTable(ptPa, level, index, reservation);
452baf574c9SOwen Anderson
453baf574c9SOwen Anderson // When reservation is null, we can't create a new subtable. This can be intentional,
454baf574c9SOwen Anderson // for example when called from Unmap().
455baf574c9SOwen Anderson if (subTable == 0)
456baf574c9SOwen Anderson continue;
457baf574c9SOwen Anderson
458d2397007SOwen Anderson if (effectiveVa < va) {
459d2397007SOwen Anderson // The range begins inside the slot.
460d2397007SOwen Anderson if (effectiveVa + entrySize - 1 > end) {
461d2397007SOwen Anderson // The range ends within the slot.
462d2397007SOwen Anderson ProcessRange(subTable, level + 1, va, size, reservation, updatePte);
463d2397007SOwen Anderson } else {
464d2397007SOwen Anderson // The range extends past the end of the slot.
465d2397007SOwen Anderson ProcessRange(subTable, level + 1, va, effectiveVa + entrySize - va, reservation, updatePte);
466d2397007SOwen Anderson }
467d2397007SOwen Anderson } else {
468d2397007SOwen Anderson // The range beginning is aligned to the slot.
469d2397007SOwen Anderson if (effectiveVa + entrySize - 1 > end) {
470d2397007SOwen Anderson // The range ends within the slot.
471d2397007SOwen Anderson ProcessRange(subTable, level + 1, effectiveVa, end - effectiveVa + 1,
472d2397007SOwen Anderson reservation, updatePte);
473d2397007SOwen Anderson } else {
474d2397007SOwen Anderson // The range extends past the end of the slot.
475d2397007SOwen Anderson ProcessRange(subTable, level + 1, effectiveVa, entrySize, reservation, updatePte);
476d2397007SOwen Anderson }
477d2397007SOwen Anderson }
478baf574c9SOwen Anderson }
479baf574c9SOwen Anderson }
480baf574c9SOwen Anderson }
481baf574c9SOwen Anderson
482baf574c9SOwen Anderson
483a25542e7Smilek7 uint8_t
MairIndex(uint8_t type)484a25542e7Smilek7 VMSAv8TranslationMap::MairIndex(uint8_t type)
485a25542e7Smilek7 {
486a25542e7Smilek7 for (int i = 0; i < 8; i++)
487a25542e7Smilek7 if (((fMair >> (i * 8)) & 0xff) == type)
488a25542e7Smilek7 return i;
489a25542e7Smilek7
490a25542e7Smilek7 panic("MAIR entry not found");
491a25542e7Smilek7 return 0;
492a25542e7Smilek7 }
493a25542e7Smilek7
494a25542e7Smilek7
495a25542e7Smilek7 uint64_t
GetMemoryAttr(uint32 attributes,uint32 memoryType,bool isKernel)496a25542e7Smilek7 VMSAv8TranslationMap::GetMemoryAttr(uint32 attributes, uint32 memoryType, bool isKernel)
497a25542e7Smilek7 {
498a25542e7Smilek7 uint64_t attr = 0;
499a25542e7Smilek7
500a25542e7Smilek7 if (!isKernel)
501a25542e7Smilek7 attr |= kAttrNG;
502a25542e7Smilek7
503a25542e7Smilek7 if ((attributes & B_EXECUTE_AREA) == 0)
504a25542e7Smilek7 attr |= kAttrUXN;
505a25542e7Smilek7 if ((attributes & B_KERNEL_EXECUTE_AREA) == 0)
506a25542e7Smilek7 attr |= kAttrPXN;
507a25542e7Smilek7
508108f6fdcSOwen Anderson // SWDBM is software reserved bit that we use to mark that
509108f6fdcSOwen Anderson // writes are allowed, and fault handler should clear kAttrAPReadOnly.
510108f6fdcSOwen Anderson // In that case kAttrAPReadOnly doubles as not-dirty bit.
511108f6fdcSOwen Anderson // Additionally dirty state can be stored in SWDIRTY, in order not to lose
512108f6fdcSOwen Anderson // dirty state when changing protection from RW to RO.
513a25542e7Smilek7
514108f6fdcSOwen Anderson // All page permissions begin life in RO state.
515108f6fdcSOwen Anderson attr |= kAttrAPReadOnly;
516108f6fdcSOwen Anderson
517108f6fdcSOwen Anderson // User-Execute implies User-Read, because it would break PAN otherwise
518108f6fdcSOwen Anderson if ((attributes & B_READ_AREA) != 0 || (attributes & B_EXECUTE_AREA) != 0)
519108f6fdcSOwen Anderson attr |= kAttrAPUserAccess; // Allow user reads
520108f6fdcSOwen Anderson
521108f6fdcSOwen Anderson if ((attributes & B_WRITE_AREA) != 0 || (attributes & B_KERNEL_WRITE_AREA) != 0)
522108f6fdcSOwen Anderson attr |= kAttrSWDBM; // Mark as writeable
523108f6fdcSOwen Anderson
524108f6fdcSOwen Anderson // When supported by hardware copy our SWDBM bit into DBM,
525108f6fdcSOwen Anderson // so that kAttrAPReadOnly is cleared on write attempt automatically
526108f6fdcSOwen Anderson // without going through fault handler.
527108f6fdcSOwen Anderson if ((fHwFeature & HW_DIRTY) != 0 && (attr & kAttrSWDBM) != 0)
528a25542e7Smilek7 attr |= kAttrDBM;
529a25542e7Smilek7
530108f6fdcSOwen Anderson attr |= kAttrSHInnerShareable; // Inner Shareable
531a25542e7Smilek7
532108f6fdcSOwen Anderson uint8_t type = MAIR_NORMAL_WB;
533108f6fdcSOwen Anderson
5345c1f2319SAugustin Cavalier switch (memoryType & B_MEMORY_TYPE_MASK) {
5355c1f2319SAugustin Cavalier case B_UNCACHED_MEMORY:
5368cb8c3d7SOwen Anderson // TODO: This probably should be nGnRE for PCI
5378cb8c3d7SOwen Anderson type = MAIR_DEVICE_nGnRnE;
5388cb8c3d7SOwen Anderson break;
5395c1f2319SAugustin Cavalier case B_WRITE_COMBINING_MEMORY:
540edb17c54SOwen Anderson type = MAIR_NORMAL_NC;
5418cb8c3d7SOwen Anderson break;
5425c1f2319SAugustin Cavalier case B_WRITE_THROUGH_MEMORY:
543108f6fdcSOwen Anderson type = MAIR_NORMAL_WT;
5448cb8c3d7SOwen Anderson break;
5455c1f2319SAugustin Cavalier case B_WRITE_PROTECTED_MEMORY:
546108f6fdcSOwen Anderson type = MAIR_NORMAL_WT;
5478cb8c3d7SOwen Anderson break;
5488cb8c3d7SOwen Anderson default:
5495c1f2319SAugustin Cavalier case B_WRITE_BACK_MEMORY:
550108f6fdcSOwen Anderson type = MAIR_NORMAL_WB;
5518cb8c3d7SOwen Anderson break;
5528cb8c3d7SOwen Anderson }
553108f6fdcSOwen Anderson
554108f6fdcSOwen Anderson attr |= MairIndex(type) << 2;
555a25542e7Smilek7
556a25542e7Smilek7 return attr;
557a25542e7Smilek7 }
558a25542e7Smilek7
559a25542e7Smilek7
560a25542e7Smilek7 status_t
Map(addr_t va,phys_addr_t pa,uint32 attributes,uint32 memoryType,vm_page_reservation * reservation)561a25542e7Smilek7 VMSAv8TranslationMap::Map(addr_t va, phys_addr_t pa, uint32 attributes, uint32 memoryType,
562a25542e7Smilek7 vm_page_reservation* reservation)
563a25542e7Smilek7 {
56432c542bdSOwen Anderson TRACE("VMSAv8TranslationMap::Map(0x%" B_PRIxADDR ", 0x%" B_PRIxADDR
56532c542bdSOwen Anderson ", 0x%x, 0x%x)\n", va, pa, attributes, memoryType);
56632c542bdSOwen Anderson
567a25542e7Smilek7 ThreadCPUPinner pinner(thread_get_current_thread());
568a25542e7Smilek7
569a25542e7Smilek7 ASSERT(ValidateVa(va));
570a25542e7Smilek7 uint64_t attr = GetMemoryAttr(attributes, memoryType, fIsKernel);
571a25542e7Smilek7
572baf574c9SOwen Anderson // During first mapping we need to allocate root table
573baf574c9SOwen Anderson if (fPageTable == 0) {
574a25542e7Smilek7 vm_page* page = vm_page_allocate_page(reservation, PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
575baf574c9SOwen Anderson DEBUG_PAGE_ACCESS_END(page);
576a25542e7Smilek7 fPageTable = page->physical_page_number << fPageBits;
577a25542e7Smilek7 }
578a25542e7Smilek7
579af5e461fSOwen Anderson ProcessRange(fPageTable, fInitialLevel, va, B_PAGE_SIZE, reservation,
580baf574c9SOwen Anderson [=](uint64_t* ptePtr, uint64_t effectiveVa) {
5814bb796cfSOwen Anderson while (true) {
582af5e461fSOwen Anderson phys_addr_t effectivePa = effectiveVa - va + pa;
583baf574c9SOwen Anderson uint64_t oldPte = atomic_get64((int64*)ptePtr);
584baf574c9SOwen Anderson uint64_t newPte = effectivePa | attr | kPteTypeL3Page;
585baf574c9SOwen Anderson
586baf574c9SOwen Anderson if (newPte == oldPte)
587baf574c9SOwen Anderson return;
588baf574c9SOwen Anderson
589af5e461fSOwen Anderson if ((oldPte & kPteValidMask) != 0) {
590baf574c9SOwen Anderson // ARM64 requires "break-before-make". We must set the PTE to an invalid
591baf574c9SOwen Anderson // entry and flush the TLB as appropriate before we can write the new PTE.
592129bc12bSOwen Anderson if (!AttemptPteBreakBeforeMake(ptePtr, oldPte, effectiveVa))
5934bb796cfSOwen Anderson continue;
594baf574c9SOwen Anderson }
595baf574c9SOwen Anderson
596baf574c9SOwen Anderson // Install the new PTE
597baf574c9SOwen Anderson atomic_set64((int64*)ptePtr, newPte);
598baf574c9SOwen Anderson asm("dsb ishst"); // Ensure PTE write completed
599129bc12bSOwen Anderson asm("isb");
6004bb796cfSOwen Anderson break;
6014bb796cfSOwen Anderson }
602baf574c9SOwen Anderson });
603a25542e7Smilek7
604a25542e7Smilek7 return B_OK;
605a25542e7Smilek7 }
606a25542e7Smilek7
607a25542e7Smilek7
608a25542e7Smilek7 status_t
Unmap(addr_t start,addr_t end)609a25542e7Smilek7 VMSAv8TranslationMap::Unmap(addr_t start, addr_t end)
610a25542e7Smilek7 {
61132c542bdSOwen Anderson TRACE("VMSAv8TranslationMap::Unmap(0x%" B_PRIxADDR ", 0x%" B_PRIxADDR
61232c542bdSOwen Anderson ")\n", start, end);
613a25542e7Smilek7 ThreadCPUPinner pinner(thread_get_current_thread());
614a25542e7Smilek7
6155ee5c0f3SOwen Anderson size_t size = end - start + 1;
616a25542e7Smilek7 ASSERT(ValidateVa(start));
617a25542e7Smilek7
618baf574c9SOwen Anderson if (fPageTable == 0)
619baf574c9SOwen Anderson return B_OK;
620baf574c9SOwen Anderson
621af5e461fSOwen Anderson ProcessRange(fPageTable, fInitialLevel, start, size, nullptr,
622baf574c9SOwen Anderson [=](uint64_t* ptePtr, uint64_t effectiveVa) {
623da8c631eSOwen Anderson ASSERT(effectiveVa <= end);
624129bc12bSOwen Anderson uint64_t oldPte = atomic_get_and_set64((int64_t*)ptePtr, 0);
625129bc12bSOwen Anderson FlushVAIfAccessed(oldPte, effectiveVa);
626baf574c9SOwen Anderson });
627a25542e7Smilek7
628a25542e7Smilek7 return B_OK;
629a25542e7Smilek7 }
630a25542e7Smilek7
631a25542e7Smilek7
632a25542e7Smilek7 status_t
UnmapPage(VMArea * area,addr_t address,bool updatePageQueue)633a25542e7Smilek7 VMSAv8TranslationMap::UnmapPage(VMArea* area, addr_t address, bool updatePageQueue)
634a25542e7Smilek7 {
63532c542bdSOwen Anderson TRACE("VMSAv8TranslationMap::UnmapPage(0x%" B_PRIxADDR "(%s), 0x%"
63632c542bdSOwen Anderson B_PRIxADDR ", %d)\n", (addr_t)area, area->name, address,
63732c542bdSOwen Anderson updatePageQueue);
63832c542bdSOwen Anderson
63973c51743SOwen Anderson ASSERT(ValidateVa(address));
640a25542e7Smilek7 ThreadCPUPinner pinner(thread_get_current_thread());
641a25542e7Smilek7 RecursiveLocker locker(fLock);
642a25542e7Smilek7
64373c51743SOwen Anderson uint64_t oldPte = 0;
644af5e461fSOwen Anderson ProcessRange(fPageTable, fInitialLevel, address, B_PAGE_SIZE, nullptr,
64573c51743SOwen Anderson [=, &oldPte](uint64_t* ptePtr, uint64_t effectiveVa) {
64673c51743SOwen Anderson oldPte = atomic_get_and_set64((int64_t*)ptePtr, 0);
647129bc12bSOwen Anderson FlushVAIfAccessed(oldPte, effectiveVa);
64873c51743SOwen Anderson });
649a25542e7Smilek7
6504b9a9eabSOwen Anderson if ((oldPte & kPteValidMask) == 0)
6514b9a9eabSOwen Anderson return B_ENTRY_NOT_FOUND;
6524b9a9eabSOwen Anderson
653a25542e7Smilek7 pinner.Unlock();
654a25542e7Smilek7 locker.Detach();
655c062bccfSOwen Anderson PageUnmapped(area, (oldPte & kPteAddrMask) >> fPageBits, is_pte_accessed(oldPte),
6566a2e4f41SOwen Anderson is_pte_dirty(oldPte), updatePageQueue);
657a25542e7Smilek7
658a25542e7Smilek7 return B_OK;
659a25542e7Smilek7 }
660a25542e7Smilek7
661a25542e7Smilek7
6623b098011SOwen Anderson void
UnmapPages(VMArea * area,addr_t address,size_t size,bool updatePageQueue)6633b098011SOwen Anderson VMSAv8TranslationMap::UnmapPages(VMArea* area, addr_t address, size_t size, bool updatePageQueue)
6643b098011SOwen Anderson {
6653b098011SOwen Anderson TRACE("VMSAv8TranslationMap::UnmapPages(0x%" B_PRIxADDR "(%s), 0x%"
6663b098011SOwen Anderson B_PRIxADDR ", 0x%" B_PRIxSIZE ", %d)\n", (addr_t)area,
6673b098011SOwen Anderson area->name, address, size, updatePageQueue);
6683b098011SOwen Anderson
6693b098011SOwen Anderson ASSERT(ValidateVa(address));
6703b098011SOwen Anderson VMAreaMappings queue;
6713b098011SOwen Anderson ThreadCPUPinner pinner(thread_get_current_thread());
6723b098011SOwen Anderson RecursiveLocker locker(fLock);
6733b098011SOwen Anderson
674af5e461fSOwen Anderson ProcessRange(fPageTable, fInitialLevel, address, size, nullptr,
6753b098011SOwen Anderson [=, &queue](uint64_t* ptePtr, uint64_t effectiveVa) {
6763b098011SOwen Anderson uint64_t oldPte = atomic_get_and_set64((int64_t*)ptePtr, 0);
677af5e461fSOwen Anderson FlushVAIfAccessed(oldPte, effectiveVa);
6783b098011SOwen Anderson if ((oldPte & kPteValidMask) == 0)
6793b098011SOwen Anderson return;
6803b098011SOwen Anderson
6813b098011SOwen Anderson if (area->cache_type == CACHE_TYPE_DEVICE)
6823b098011SOwen Anderson return;
6833b098011SOwen Anderson
6843b098011SOwen Anderson // get the page
6853b098011SOwen Anderson vm_page* page = vm_lookup_page((oldPte & kPteAddrMask) >> fPageBits);
6863b098011SOwen Anderson ASSERT(page != NULL);
6873b098011SOwen Anderson
6883b098011SOwen Anderson DEBUG_PAGE_ACCESS_START(page);
6893b098011SOwen Anderson
6903b098011SOwen Anderson // transfer the accessed/dirty flags to the page
691c062bccfSOwen Anderson page->accessed = is_pte_accessed(oldPte);
6923b098011SOwen Anderson page->modified = is_pte_dirty(oldPte);
6933b098011SOwen Anderson
6943b098011SOwen Anderson // remove the mapping object/decrement the wired_count of the
6953b098011SOwen Anderson // page
6963b098011SOwen Anderson if (area->wiring == B_NO_LOCK) {
6973b098011SOwen Anderson vm_page_mapping* mapping = NULL;
6983b098011SOwen Anderson vm_page_mappings::Iterator iterator
6993b098011SOwen Anderson = page->mappings.GetIterator();
7003b098011SOwen Anderson while ((mapping = iterator.Next()) != NULL) {
7013b098011SOwen Anderson if (mapping->area == area)
7023b098011SOwen Anderson break;
7033b098011SOwen Anderson }
7043b098011SOwen Anderson
7053b098011SOwen Anderson ASSERT(mapping != NULL);
7063b098011SOwen Anderson
7073b098011SOwen Anderson area->mappings.Remove(mapping);
7083b098011SOwen Anderson page->mappings.Remove(mapping);
7093b098011SOwen Anderson queue.Add(mapping);
7103b098011SOwen Anderson } else
7113b098011SOwen Anderson page->DecrementWiredCount();
7123b098011SOwen Anderson
7133b098011SOwen Anderson if (!page->IsMapped()) {
7143b098011SOwen Anderson atomic_add(&gMappedPagesCount, -1);
7153b098011SOwen Anderson
7163b098011SOwen Anderson if (updatePageQueue) {
7173b098011SOwen Anderson if (page->Cache()->temporary)
7183b098011SOwen Anderson vm_page_set_state(page, PAGE_STATE_INACTIVE);
7193b098011SOwen Anderson else if (page->modified)
7203b098011SOwen Anderson vm_page_set_state(page, PAGE_STATE_MODIFIED);
7213b098011SOwen Anderson else
7223b098011SOwen Anderson vm_page_set_state(page, PAGE_STATE_CACHED);
7233b098011SOwen Anderson }
7243b098011SOwen Anderson }
7253b098011SOwen Anderson
7263b098011SOwen Anderson DEBUG_PAGE_ACCESS_END(page);
7273b098011SOwen Anderson });
7283b098011SOwen Anderson
7293b098011SOwen Anderson // TODO: As in UnmapPage() we can lose page dirty flags here. ATM it's not
7303b098011SOwen Anderson // really critical here, as in all cases this method is used, the unmapped
7313b098011SOwen Anderson // area range is unmapped for good (resized/cut) and the pages will likely
7323b098011SOwen Anderson // be freed.
7333b098011SOwen Anderson
7343b098011SOwen Anderson locker.Unlock();
7353b098011SOwen Anderson
7363b098011SOwen Anderson // free removed mappings
7373b098011SOwen Anderson bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
7383b098011SOwen Anderson uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
7393b098011SOwen Anderson | (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
7403b098011SOwen Anderson
7413b098011SOwen Anderson while (vm_page_mapping* mapping = queue.RemoveHead())
7423b098011SOwen Anderson vm_free_page_mapping(mapping->page->physical_page_number, mapping, freeFlags);
7433b098011SOwen Anderson }
7443b098011SOwen Anderson
7453b098011SOwen Anderson
7460a367809SOwen Anderson void
UnmapArea(VMArea * area,bool deletingAddressSpace,bool ignoreTopCachePageFlags)7470a367809SOwen Anderson VMSAv8TranslationMap::UnmapArea(VMArea* area, bool deletingAddressSpace,
7480a367809SOwen Anderson bool ignoreTopCachePageFlags)
7490a367809SOwen Anderson {
7500a367809SOwen Anderson TRACE("VMSAv8TranslationMap::UnmapArea(0x%" B_PRIxADDR "(%s), 0x%"
7510a367809SOwen Anderson B_PRIxADDR ", 0x%" B_PRIxSIZE ", %d, %d)\n", (addr_t)area,
7520a367809SOwen Anderson area->name, area->Base(), area->Size(), deletingAddressSpace,
7530a367809SOwen Anderson ignoreTopCachePageFlags);
7540a367809SOwen Anderson
7550a367809SOwen Anderson if (area->cache_type == CACHE_TYPE_DEVICE || area->wiring != B_NO_LOCK) {
7560a367809SOwen Anderson UnmapPages(area, area->Base(), area->Size(), true);
7570a367809SOwen Anderson return;
7580a367809SOwen Anderson }
7590a367809SOwen Anderson
7600a367809SOwen Anderson bool unmapPages = !deletingAddressSpace || !ignoreTopCachePageFlags;
7610a367809SOwen Anderson
7620a367809SOwen Anderson RecursiveLocker locker(fLock);
7630a367809SOwen Anderson ThreadCPUPinner pinner(thread_get_current_thread());
7640a367809SOwen Anderson
7650a367809SOwen Anderson VMAreaMappings mappings;
7660a367809SOwen Anderson mappings.MoveFrom(&area->mappings);
7670a367809SOwen Anderson
7680a367809SOwen Anderson for (VMAreaMappings::Iterator it = mappings.GetIterator();
7690a367809SOwen Anderson vm_page_mapping* mapping = it.Next();) {
7700a367809SOwen Anderson
7710a367809SOwen Anderson vm_page* page = mapping->page;
7720a367809SOwen Anderson page->mappings.Remove(mapping);
7730a367809SOwen Anderson
7740a367809SOwen Anderson VMCache* cache = page->Cache();
7750a367809SOwen Anderson
7760a367809SOwen Anderson bool pageFullyUnmapped = false;
7770a367809SOwen Anderson if (!page->IsMapped()) {
7780a367809SOwen Anderson atomic_add(&gMappedPagesCount, -1);
7790a367809SOwen Anderson pageFullyUnmapped = true;
7800a367809SOwen Anderson }
7810a367809SOwen Anderson
7820a367809SOwen Anderson if (unmapPages || cache != area->cache) {
7830a367809SOwen Anderson addr_t address = area->Base()
7840a367809SOwen Anderson + ((page->cache_offset * B_PAGE_SIZE)
7850a367809SOwen Anderson - area->cache_offset);
7860a367809SOwen Anderson
7870a367809SOwen Anderson uint64_t oldPte = 0;
788af5e461fSOwen Anderson ProcessRange(fPageTable, fInitialLevel, address, B_PAGE_SIZE, nullptr,
7890a367809SOwen Anderson [=, &oldPte](uint64_t* ptePtr, uint64_t effectiveVa) {
7900a367809SOwen Anderson oldPte = atomic_get_and_set64((int64_t*)ptePtr, 0);
791129bc12bSOwen Anderson if (!deletingAddressSpace)
792129bc12bSOwen Anderson FlushVAIfAccessed(oldPte, effectiveVa);
7930a367809SOwen Anderson });
7940a367809SOwen Anderson
7950a367809SOwen Anderson if ((oldPte & kPteValidMask) == 0) {
7960a367809SOwen Anderson panic("page %p has mapping for area %p "
7970a367809SOwen Anderson "(%#" B_PRIxADDR "), but has no "
7980a367809SOwen Anderson "page table", page, area, address);
7990a367809SOwen Anderson continue;
8000a367809SOwen Anderson }
8010a367809SOwen Anderson
8020a367809SOwen Anderson // transfer the accessed/dirty flags to the page and
8030a367809SOwen Anderson // invalidate the mapping, if necessary
8040a367809SOwen Anderson if (is_pte_dirty(oldPte))
8050a367809SOwen Anderson page->modified = true;
806c062bccfSOwen Anderson if (is_pte_accessed(oldPte))
8070a367809SOwen Anderson page->accessed = true;
8080a367809SOwen Anderson
8090a367809SOwen Anderson if (pageFullyUnmapped) {
8100a367809SOwen Anderson DEBUG_PAGE_ACCESS_START(page);
8110a367809SOwen Anderson
8120a367809SOwen Anderson if (cache->temporary) {
8130a367809SOwen Anderson vm_page_set_state(page,
8140a367809SOwen Anderson PAGE_STATE_INACTIVE);
8150a367809SOwen Anderson } else if (page->modified) {
8160a367809SOwen Anderson vm_page_set_state(page,
8170a367809SOwen Anderson PAGE_STATE_MODIFIED);
8180a367809SOwen Anderson } else {
8190a367809SOwen Anderson vm_page_set_state(page,
8200a367809SOwen Anderson PAGE_STATE_CACHED);
8210a367809SOwen Anderson }
8220a367809SOwen Anderson
8230a367809SOwen Anderson DEBUG_PAGE_ACCESS_END(page);
8240a367809SOwen Anderson }
8250a367809SOwen Anderson }
8260a367809SOwen Anderson }
8270a367809SOwen Anderson
8280a367809SOwen Anderson locker.Unlock();
8290a367809SOwen Anderson
8300a367809SOwen Anderson bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
8310a367809SOwen Anderson uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
8320a367809SOwen Anderson | (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
8330a367809SOwen Anderson
8340a367809SOwen Anderson while (vm_page_mapping* mapping = mappings.RemoveHead())
8350a367809SOwen Anderson vm_free_page_mapping(mapping->page->physical_page_number, mapping, freeFlags);
8360a367809SOwen Anderson }
8370a367809SOwen Anderson
8380a367809SOwen Anderson
839a25542e7Smilek7 bool
ValidateVa(addr_t va)840a25542e7Smilek7 VMSAv8TranslationMap::ValidateVa(addr_t va)
841a25542e7Smilek7 {
842a25542e7Smilek7 uint64_t vaMask = (1UL << fVaBits) - 1;
843a25542e7Smilek7 bool kernelAddr = (va & (1UL << 63)) != 0;
844a25542e7Smilek7 if (kernelAddr != fIsKernel)
845a25542e7Smilek7 return false;
846a25542e7Smilek7 if ((va & ~vaMask) != (fIsKernel ? ~vaMask : 0))
847a25542e7Smilek7 return false;
848a25542e7Smilek7 return true;
849a25542e7Smilek7 }
850a25542e7Smilek7
851a25542e7Smilek7
852a25542e7Smilek7 status_t
Query(addr_t va,phys_addr_t * pa,uint32 * flags)853a25542e7Smilek7 VMSAv8TranslationMap::Query(addr_t va, phys_addr_t* pa, uint32* flags)
854a25542e7Smilek7 {
85573c51743SOwen Anderson *flags = 0;
85673c51743SOwen Anderson *pa = 0;
85773c51743SOwen Anderson
85873c51743SOwen Anderson uint64_t pageMask = (1UL << fPageBits) - 1;
859088b72e7SOwen Anderson va &= ~pageMask;
860af5e461fSOwen Anderson
861af5e461fSOwen Anderson ThreadCPUPinner pinner(thread_get_current_thread());
862a25542e7Smilek7 ASSERT(ValidateVa(va));
863a25542e7Smilek7
864af5e461fSOwen Anderson ProcessRange(fPageTable, fInitialLevel, va, B_PAGE_SIZE, nullptr,
86573c51743SOwen Anderson [=](uint64_t* ptePtr, uint64_t effectiveVa) {
86673c51743SOwen Anderson uint64_t pte = atomic_get64((int64_t*)ptePtr);
86773c51743SOwen Anderson *pa = pte & kPteAddrMask;
86873c51743SOwen Anderson *flags |= PAGE_PRESENT | B_KERNEL_READ_AREA;
869c062bccfSOwen Anderson if (is_pte_accessed(pte))
87073c51743SOwen Anderson *flags |= PAGE_ACCESSED;
8716a2e4f41SOwen Anderson if (is_pte_dirty(pte))
87273c51743SOwen Anderson *flags |= PAGE_MODIFIED;
873a25542e7Smilek7
874a25542e7Smilek7 if ((pte & kAttrPXN) == 0)
87573c51743SOwen Anderson *flags |= B_KERNEL_EXECUTE_AREA;
876a25542e7Smilek7
87758cef789SOwen Anderson if ((pte & kAttrAPUserAccess) != 0) {
87873c51743SOwen Anderson *flags |= B_READ_AREA;
87958cef789SOwen Anderson if ((pte & kAttrUXN) == 0)
88058cef789SOwen Anderson *flags |= B_EXECUTE_AREA;
88158cef789SOwen Anderson }
882a25542e7Smilek7
8836a2e4f41SOwen Anderson if ((pte & kAttrSWDBM) != 0) {
88473c51743SOwen Anderson *flags |= B_KERNEL_WRITE_AREA;
885108f6fdcSOwen Anderson if ((pte & kAttrAPUserAccess) != 0)
88673c51743SOwen Anderson *flags |= B_WRITE_AREA;
887a25542e7Smilek7 }
88873c51743SOwen Anderson });
889a25542e7Smilek7
890a25542e7Smilek7 return B_OK;
891a25542e7Smilek7 }
892a25542e7Smilek7
893a25542e7Smilek7
894a25542e7Smilek7 status_t
QueryInterrupt(addr_t virtualAddress,phys_addr_t * _physicalAddress,uint32 * _flags)895a25542e7Smilek7 VMSAv8TranslationMap::QueryInterrupt(
896a25542e7Smilek7 addr_t virtualAddress, phys_addr_t* _physicalAddress, uint32* _flags)
897a25542e7Smilek7 {
898a25542e7Smilek7 return Query(virtualAddress, _physicalAddress, _flags);
899a25542e7Smilek7 }
900a25542e7Smilek7
901a25542e7Smilek7
902a25542e7Smilek7 status_t
Protect(addr_t start,addr_t end,uint32 attributes,uint32 memoryType)903a25542e7Smilek7 VMSAv8TranslationMap::Protect(addr_t start, addr_t end, uint32 attributes, uint32 memoryType)
904a25542e7Smilek7 {
90532c542bdSOwen Anderson TRACE("VMSAv8TranslationMap::Protect(0x%" B_PRIxADDR ", 0x%"
90632c542bdSOwen Anderson B_PRIxADDR ", 0x%x, 0x%x)\n", start, end, attributes, memoryType);
90732c542bdSOwen Anderson
908f73ff202SOwen Anderson uint64_t attr = GetMemoryAttr(attributes, memoryType, fIsKernel);
909a25542e7Smilek7 size_t size = end - start + 1;
910a25542e7Smilek7 ASSERT(ValidateVa(start));
911a25542e7Smilek7
912af5e461fSOwen Anderson ThreadCPUPinner pinner(thread_get_current_thread());
913af5e461fSOwen Anderson
914af5e461fSOwen Anderson ProcessRange(fPageTable, fInitialLevel, start, size, nullptr,
915f73ff202SOwen Anderson [=](uint64_t* ptePtr, uint64_t effectiveVa) {
916da8c631eSOwen Anderson ASSERT(effectiveVa <= end);
917da8c631eSOwen Anderson
918f73ff202SOwen Anderson // We need to use an atomic compare-swap loop because we must
919f73ff202SOwen Anderson // need to clear somes bits while setting others.
920f73ff202SOwen Anderson while (true) {
921f73ff202SOwen Anderson uint64_t oldPte = atomic_get64((int64_t*)ptePtr);
922f73ff202SOwen Anderson uint64_t newPte = oldPte & ~kPteAttrMask;
923f73ff202SOwen Anderson newPte |= attr;
924f73ff202SOwen Anderson
9254bb796cfSOwen Anderson // Preserve access bit.
9264bb796cfSOwen Anderson newPte |= oldPte & kAttrAF;
9274bb796cfSOwen Anderson
9286a2e4f41SOwen Anderson // Preserve the dirty bit.
9296a2e4f41SOwen Anderson if (is_pte_dirty(oldPte))
9306a2e4f41SOwen Anderson newPte = set_pte_dirty(newPte);
9314bb796cfSOwen Anderson
9324bb796cfSOwen Anderson uint64_t oldMemoryType = oldPte & (kAttrShareability | kAttrMemoryAttrIdx);
9334bb796cfSOwen Anderson uint64_t newMemoryType = newPte & (kAttrShareability | kAttrMemoryAttrIdx);
9344bb796cfSOwen Anderson if (oldMemoryType != newMemoryType) {
9354bb796cfSOwen Anderson // ARM64 requires "break-before-make". We must set the PTE to an invalid
9364bb796cfSOwen Anderson // entry and flush the TLB as appropriate before we can write the new PTE.
9374bb796cfSOwen Anderson // In this case specifically, it applies any time we change cacheability or
9384bb796cfSOwen Anderson // shareability.
939129bc12bSOwen Anderson if (!AttemptPteBreakBeforeMake(ptePtr, oldPte, effectiveVa))
9404bb796cfSOwen Anderson continue;
9414bb796cfSOwen Anderson
9424bb796cfSOwen Anderson atomic_set64((int64_t*)ptePtr, newPte);
9434bb796cfSOwen Anderson asm("dsb ishst"); // Ensure PTE write completed
944129bc12bSOwen Anderson asm("isb");
9454bb796cfSOwen Anderson
9464bb796cfSOwen Anderson // No compare-exchange loop required in this case.
9474bb796cfSOwen Anderson break;
9484bb796cfSOwen Anderson } else {
949f73ff202SOwen Anderson if ((uint64_t)atomic_test_and_set64((int64_t*)ptePtr, newPte, oldPte) == oldPte) {
950129bc12bSOwen Anderson FlushVAIfAccessed(oldPte, effectiveVa);
951f73ff202SOwen Anderson break;
952f73ff202SOwen Anderson }
953f73ff202SOwen Anderson }
9544bb796cfSOwen Anderson }
955f73ff202SOwen Anderson });
956a25542e7Smilek7
957a25542e7Smilek7 return B_OK;
958a25542e7Smilek7 }
959a25542e7Smilek7
960a25542e7Smilek7
961a25542e7Smilek7 status_t
ClearFlags(addr_t va,uint32 flags)962a25542e7Smilek7 VMSAv8TranslationMap::ClearFlags(addr_t va, uint32 flags)
963a25542e7Smilek7 {
964a25542e7Smilek7 ASSERT(ValidateVa(va));
965a25542e7Smilek7
9664bb796cfSOwen Anderson bool clearAF = flags & PAGE_ACCESSED;
9674bb796cfSOwen Anderson bool setRO = flags & PAGE_MODIFIED;
968a25542e7Smilek7
969744bdd73SOwen Anderson if (!clearAF && !setRO)
970744bdd73SOwen Anderson return B_OK;
971744bdd73SOwen Anderson
972af5e461fSOwen Anderson ThreadCPUPinner pinner(thread_get_current_thread());
973af5e461fSOwen Anderson
974af5e461fSOwen Anderson ProcessRange(fPageTable, fInitialLevel, va, B_PAGE_SIZE, nullptr,
975f2e8f48cSOwen Anderson [=](uint64_t* ptePtr, uint64_t effectiveVa) {
976744bdd73SOwen Anderson if (clearAF && setRO) {
977744bdd73SOwen Anderson // We need to use an atomic compare-swap loop because we must
978744bdd73SOwen Anderson // need to clear one bit while setting the other.
979744bdd73SOwen Anderson while (true) {
980f2e8f48cSOwen Anderson uint64_t oldPte = atomic_get64((int64_t*)ptePtr);
981744bdd73SOwen Anderson uint64_t newPte = oldPte & ~kAttrAF;
9826a2e4f41SOwen Anderson newPte = set_pte_clean(newPte);
983744bdd73SOwen Anderson
984f2e8f48cSOwen Anderson if ((uint64_t)atomic_test_and_set64((int64_t*)ptePtr, newPte, oldPte) == oldPte) {
985f2e8f48cSOwen Anderson FlushVAIfAccessed(oldPte, va);
986744bdd73SOwen Anderson break;
987744bdd73SOwen Anderson }
988f2e8f48cSOwen Anderson }
989744bdd73SOwen Anderson } else if (clearAF) {
990f2e8f48cSOwen Anderson atomic_and64((int64_t*)ptePtr, ~kAttrAF);
991744bdd73SOwen Anderson } else {
9926a2e4f41SOwen Anderson while (true) {
993f2e8f48cSOwen Anderson uint64_t oldPte = atomic_get64((int64_t*)ptePtr);
994f2e8f48cSOwen Anderson if (!is_pte_dirty(oldPte))
9956a2e4f41SOwen Anderson return;
9966a2e4f41SOwen Anderson uint64_t newPte = set_pte_clean(oldPte);
997f2e8f48cSOwen Anderson if ((uint64_t)atomic_test_and_set64((int64_t*)ptePtr, newPte, oldPte) == oldPte) {
998f2e8f48cSOwen Anderson FlushVAIfAccessed(oldPte, va);
9996a2e4f41SOwen Anderson break;
10006a2e4f41SOwen Anderson }
1001744bdd73SOwen Anderson }
1002f2e8f48cSOwen Anderson }
1003744bdd73SOwen Anderson });
1004744bdd73SOwen Anderson
1005a25542e7Smilek7 return B_OK;
1006a25542e7Smilek7 }
1007a25542e7Smilek7
1008a25542e7Smilek7
1009a25542e7Smilek7 bool
ClearAccessedAndModified(VMArea * area,addr_t address,bool unmapIfUnaccessed,bool & _modified)1010a25542e7Smilek7 VMSAv8TranslationMap::ClearAccessedAndModified(
1011a25542e7Smilek7 VMArea* area, addr_t address, bool unmapIfUnaccessed, bool& _modified)
1012a25542e7Smilek7 {
101332c542bdSOwen Anderson TRACE("VMSAv8TranslationMap::ClearAccessedAndModified(0x%"
101432c542bdSOwen Anderson B_PRIxADDR "(%s), 0x%" B_PRIxADDR ", %d)\n", (addr_t)area,
101532c542bdSOwen Anderson area->name, address, unmapIfUnaccessed);
1016af5e461fSOwen Anderson ASSERT(ValidateVa(address));
101732c542bdSOwen Anderson
1018bb67bf75SOwen Anderson RecursiveLocker locker(fLock);
1019bb67bf75SOwen Anderson ThreadCPUPinner pinner(thread_get_current_thread());
1020bb67bf75SOwen Anderson
1021bb67bf75SOwen Anderson uint64_t oldPte = 0;
1022af5e461fSOwen Anderson ProcessRange(fPageTable, fInitialLevel, address, B_PAGE_SIZE, nullptr,
10234bb796cfSOwen Anderson [=, &oldPte](uint64_t* ptePtr, uint64_t effectiveVa) {
1024bb67bf75SOwen Anderson // We need to use an atomic compare-swap loop because we must
1025bb67bf75SOwen Anderson // first read the old PTE and make decisions based on the AF
1026bb67bf75SOwen Anderson // bit to proceed.
1027bb67bf75SOwen Anderson while (true) {
1028bb67bf75SOwen Anderson oldPte = atomic_get64((int64_t*)ptePtr);
1029bb67bf75SOwen Anderson uint64_t newPte = oldPte & ~kAttrAF;
10306a2e4f41SOwen Anderson newPte = set_pte_clean(newPte);
1031bb67bf75SOwen Anderson
1032bb67bf75SOwen Anderson // If the page has been not be accessed, then unmap it.
1033bb67bf75SOwen Anderson if (unmapIfUnaccessed && (oldPte & kAttrAF) == 0)
1034bb67bf75SOwen Anderson newPte = 0;
1035bb67bf75SOwen Anderson
1036bb67bf75SOwen Anderson if ((uint64_t)atomic_test_and_set64((int64_t*)ptePtr, newPte, oldPte) == oldPte)
1037bb67bf75SOwen Anderson break;
1038bb67bf75SOwen Anderson }
1039bb67bf75SOwen Anderson asm("dsb ishst"); // Ensure PTE write completed
1040bb67bf75SOwen Anderson });
1041bb67bf75SOwen Anderson
1042bb67bf75SOwen Anderson pinner.Unlock();
10436a2e4f41SOwen Anderson _modified = is_pte_dirty(oldPte);
1044129bc12bSOwen Anderson
1045129bc12bSOwen Anderson if (FlushVAIfAccessed(oldPte, address))
1046bb67bf75SOwen Anderson return true;
1047bb67bf75SOwen Anderson
1048bb67bf75SOwen Anderson if (!unmapIfUnaccessed)
1049bb67bf75SOwen Anderson return false;
1050bb67bf75SOwen Anderson
1051bb67bf75SOwen Anderson locker.Detach(); // UnaccessedPageUnmapped takes ownership
1052bb67bf75SOwen Anderson phys_addr_t oldPa = oldPte & kPteAddrMask;
1053bb67bf75SOwen Anderson UnaccessedPageUnmapped(area, oldPa >> fPageBits);
1054bb67bf75SOwen Anderson return false;
1055a25542e7Smilek7 }
1056a25542e7Smilek7
1057a25542e7Smilek7
1058a25542e7Smilek7 void
Flush()1059a25542e7Smilek7 VMSAv8TranslationMap::Flush()
1060a25542e7Smilek7 {
106183316034SOwen Anderson // Necessary invalidation is performed during mapping,
106283316034SOwen Anderson // no need to do anything more here.
1063a25542e7Smilek7 }
1064