xref: /haiku/src/system/kernel/arch/arm64/VMSAv8TranslationMap.cpp (revision 6a2d53e7237764eab0c7b6d121772f26d636fb60)
1 /*
2  * Copyright 2022 Haiku, Inc. All Rights Reserved.
3  * Distributed under the terms of the MIT License.
4  */
5 #include "VMSAv8TranslationMap.h"
6 
7 #include <algorithm>
8 #include <util/AutoLock.h>
9 #include <util/ThreadAutoLock.h>
10 #include <vm/vm_page.h>
11 #include <vm/vm_priv.h>
12 
13 
14 //#define DO_TRACE
15 #ifdef DO_TRACE
16 #	define TRACE(x...) dprintf(x)
17 #else
18 #	define TRACE(x...) ;
19 #endif
20 
21 
22 uint32_t VMSAv8TranslationMap::fHwFeature;
23 uint64_t VMSAv8TranslationMap::fMair;
24 
25 // ASID Management
26 static constexpr size_t kAsidBits = 8;
27 static constexpr size_t kNumAsids = (1 << kAsidBits);
28 static spinlock sAsidLock = B_SPINLOCK_INITIALIZER;
29 // A bitmap to track which ASIDs are in use.
30 static uint64 sAsidBitMap[kNumAsids / 64] = {};
31 // A mapping from ASID to translation map.
32 static VMSAv8TranslationMap* sAsidMapping[kNumAsids] = {};
33 
34 
35 static void
36 free_asid(size_t asid)
37 {
38 	for (size_t i = 0; i < B_COUNT_OF(sAsidBitMap); ++i) {
39 		if (asid < 64) {
40 			sAsidBitMap[i] &= ~(uint64_t{1} << asid);
41 			return;
42 		}
43 		asid -= 64;
44 	}
45 
46 	panic("Could not free ASID!");
47 }
48 
49 
50 static void
51 flush_tlb_whole_asid(uint64_t asid)
52 {
53 	asm("dsb ishst");
54 	asm("tlbi aside1is, %0" ::"r"(asid << 48));
55 	asm("dsb ish");
56 	asm("isb");
57 }
58 
59 
60 static size_t
61 alloc_first_free_asid(void)
62 {
63 	int asid = 0;
64 	for (size_t i = 0; i < B_COUNT_OF(sAsidBitMap); ++i) {
65 		int avail = __builtin_ffsll(~sAsidBitMap[i]);
66 		if (avail != 0) {
67 			sAsidBitMap[i] |= (uint64_t{1} << (avail-1));
68 			asid += (avail - 1);
69 			return asid;
70 		}
71 		asid += 64;
72 	}
73 
74 	return kNumAsids;
75 }
76 
77 
78 VMSAv8TranslationMap::VMSAv8TranslationMap(
79 	bool kernel, phys_addr_t pageTable, int pageBits, int vaBits, int minBlockLevel)
80 	:
81 	fIsKernel(kernel),
82 	fPageTable(pageTable),
83 	fPageBits(pageBits),
84 	fVaBits(vaBits),
85 	fMinBlockLevel(minBlockLevel),
86 	fASID(kernel ? 0 : -1),
87 	fRefcount(0)
88 {
89 	TRACE("+VMSAv8TranslationMap(%p, %d, 0x%" B_PRIxADDR ", %d, %d, %d)\n", this,
90 		kernel, pageTable, pageBits, vaBits, minBlockLevel);
91 
92 	if (kernel) {
93 		// ASID 0 is reserved for the kernel.
94 		InterruptsSpinLocker locker(sAsidLock);
95 		sAsidMapping[0] = this;
96 		sAsidBitMap[0] |= 1;
97 	}
98 
99 	fInitialLevel = CalcStartLevel(fVaBits, fPageBits);
100 }
101 
102 
103 VMSAv8TranslationMap::~VMSAv8TranslationMap()
104 {
105 	TRACE("-VMSAv8TranslationMap(%p)\n", this);
106 	TRACE("  fIsKernel: %d, fPageTable: 0x%" B_PRIxADDR ", fASID: %d, fRefcount: %d\n",
107 		fIsKernel, fPageTable, fASID, fRefcount);
108 
109 	ASSERT(!fIsKernel);
110 	ASSERT(fRefcount == 0);
111 	{
112 		ThreadCPUPinner pinner(thread_get_current_thread());
113 		FreeTable(fPageTable, 0, fInitialLevel, [](int level, uint64_t oldPte) {});
114 	}
115 
116 	{
117 		InterruptsSpinLocker locker(sAsidLock);
118 
119 		if (fASID != -1) {
120 			sAsidMapping[fASID] = NULL;
121 			free_asid(fASID);
122 		}
123 	}
124 }
125 
126 
127 // Switch user map into TTBR0.
128 // Passing kernel map here configures empty page table.
129 void
130 VMSAv8TranslationMap::SwitchUserMap(VMSAv8TranslationMap *from, VMSAv8TranslationMap *to)
131 {
132 	InterruptsSpinLocker locker(sAsidLock);
133 
134 	if (!from->fIsKernel) {
135 		from->fRefcount--;
136 	}
137 
138 	if (!to->fIsKernel) {
139 		to->fRefcount++;
140 	} else {
141 		arch_vm_install_empty_table_ttbr0();
142 		return;
143 	}
144 
145 	ASSERT(to->fPageTable != 0);
146 	uint64_t ttbr = to->fPageTable | ((fHwFeature & HW_COMMON_NOT_PRIVATE) != 0 ? 1 : 0);
147 
148 	if (to->fASID != -1) {
149 		WRITE_SPECIALREG(TTBR0_EL1, ((uint64_t)to->fASID << 48) | ttbr);
150 		asm("isb");
151 		return;
152 	}
153 
154 	size_t allocatedAsid = alloc_first_free_asid();
155 	if (allocatedAsid != kNumAsids) {
156 		to->fASID = allocatedAsid;
157 		sAsidMapping[allocatedAsid] = to;
158 
159 		WRITE_SPECIALREG(TTBR0_EL1, (allocatedAsid << 48) | ttbr);
160 		flush_tlb_whole_asid(allocatedAsid);
161 		return;
162 	}
163 
164 	// ASID 0 is reserved for the kernel.
165 	for (size_t i = 1; i < kNumAsids; ++i) {
166 		if (sAsidMapping[i]->fRefcount == 0) {
167 			sAsidMapping[i]->fASID = -1;
168 			to->fASID = i;
169 			sAsidMapping[i] = to;
170 
171 			WRITE_SPECIALREG(TTBR0_EL1, (i << 48) | ttbr);
172 			flush_tlb_whole_asid(i);
173 			return;
174 		}
175 	}
176 
177 	panic("cannot assign ASID");
178 }
179 
180 
181 int
182 VMSAv8TranslationMap::CalcStartLevel(int vaBits, int pageBits)
183 {
184 	int level = 4;
185 
186 	int bitsLeft = vaBits - pageBits;
187 	while (bitsLeft > 0) {
188 		int tableBits = pageBits - 3;
189 		bitsLeft -= tableBits;
190 		level--;
191 	}
192 
193 	ASSERT(level >= 0);
194 
195 	return level;
196 }
197 
198 
199 bool
200 VMSAv8TranslationMap::Lock()
201 {
202 	TRACE("VMSAv8TranslationMap::Lock()\n");
203 	recursive_lock_lock(&fLock);
204 	return true;
205 }
206 
207 
208 void
209 VMSAv8TranslationMap::Unlock()
210 {
211 	TRACE("VMSAv8TranslationMap::Unlock()\n");
212 	if (recursive_lock_get_recursion(&fLock) == 1) {
213 		// we're about to release it for the last time
214 		Flush();
215 	}
216 	recursive_lock_unlock(&fLock);
217 }
218 
219 
220 addr_t
221 VMSAv8TranslationMap::MappedSize() const
222 {
223 	panic("VMSAv8TranslationMap::MappedSize not implemented");
224 	return 0;
225 }
226 
227 
228 size_t
229 VMSAv8TranslationMap::MaxPagesNeededToMap(addr_t start, addr_t end) const
230 {
231 	size_t result = 0;
232 	size_t size = end - start + 1;
233 
234 	for (int i = fInitialLevel; i < 3; i++) {
235 		int tableBits = fPageBits - 3;
236 		int shift = tableBits * (3 - i) + fPageBits;
237 		uint64_t entrySize = 1UL << shift;
238 
239 		result += size / entrySize + 2;
240 	}
241 
242 	return result;
243 }
244 
245 
246 uint64_t*
247 VMSAv8TranslationMap::TableFromPa(phys_addr_t pa)
248 {
249 	return reinterpret_cast<uint64_t*>(KERNEL_PMAP_BASE + pa);
250 }
251 
252 
253 template<typename EntryRemoved>
254 void
255 VMSAv8TranslationMap::FreeTable(phys_addr_t ptPa, uint64_t va, int level,
256 	EntryRemoved &&entryRemoved)
257 {
258 	ASSERT(level < 4);
259 
260 	int tableBits = fPageBits - 3;
261 	uint64_t tableSize = 1UL << tableBits;
262 	uint64_t vaMask = (1UL << fVaBits) - 1;
263 
264 	int shift = tableBits * (3 - level) + fPageBits;
265 	uint64_t entrySize = 1UL << shift;
266 
267 	uint64_t nextVa = va;
268 	uint64_t* pt = TableFromPa(ptPa);
269 	for (uint64_t i = 0; i < tableSize; i++) {
270 		uint64_t oldPte = (uint64_t) atomic_get_and_set64((int64*) &pt[i], 0);
271 
272 		if (level < 3 && (oldPte & kPteTypeMask) == kPteTypeL012Table) {
273 			FreeTable(oldPte & kPteAddrMask, nextVa, level + 1, entryRemoved);
274 		} else if ((oldPte & kPteTypeMask) != 0) {
275 			uint64_t fullVa = (fIsKernel ? ~vaMask : 0) | nextVa;
276 			asm("dsb ishst");
277 			asm("tlbi vaae1is, %0" :: "r" ((fullVa >> 12) & kTLBIMask));
278 			// Does it correctly flush block entries at level < 3? We don't use them anyway though.
279 			// TODO: Flush only currently used ASID (using vae1is)
280 			entryRemoved(level, oldPte);
281 		}
282 
283 		nextVa += entrySize;
284 	}
285 
286 	asm("dsb ish");
287 
288 	vm_page* page = vm_lookup_page(ptPa >> fPageBits);
289 	DEBUG_PAGE_ACCESS_START(page);
290 	vm_page_set_state(page, PAGE_STATE_FREE);
291 }
292 
293 
294 // Make a new page sub-table.
295 // The parent table is `ptPa`, and the new sub-table's PTE will be at `index`
296 // in it.
297 // Returns the physical address of the new table, or the address of the existing
298 // one if the PTE is already filled.
299 phys_addr_t
300 VMSAv8TranslationMap::GetOrMakeTable(phys_addr_t ptPa, int level, int index,
301 	vm_page_reservation* reservation)
302 {
303 	ASSERT(level < 3);
304 
305 	uint64_t* ptePtr = TableFromPa(ptPa) + index;
306 	uint64_t oldPte = atomic_get64((int64*) ptePtr);
307 
308 	int type = oldPte & kPteTypeMask;
309 	if (type == kPteTypeL012Table) {
310 		// This is table entry already, just return it
311 		return oldPte & kPteAddrMask;
312 	} else if (reservation != nullptr) {
313 		// Create new table there
314 		vm_page* page = vm_page_allocate_page(reservation, PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
315 		phys_addr_t newTablePa = page->physical_page_number << fPageBits;
316 		DEBUG_PAGE_ACCESS_END(page);
317 
318 		// We only create mappings at the final level so we don't need to handle
319 		// splitting block mappings
320 		ASSERT(type != kPteTypeL12Block);
321 
322 		// Ensure that writes to page being attached have completed
323 		asm("dsb ishst");
324 
325 		uint64_t oldPteRefetch = (uint64_t)atomic_test_and_set64((int64*) ptePtr,
326 			newTablePa | kPteTypeL012Table, oldPte);
327 		if (oldPteRefetch != oldPte) {
328 			// If the old PTE has mutated, it must be because another thread has allocated the
329 			// sub-table at the same time as us. If that has happened, deallocate the page we
330 			// setup and use the one they installed instead.
331 			ASSERT((oldPteRefetch & kPteTypeMask) == kPteTypeL012Table);
332 			DEBUG_PAGE_ACCESS_START(page);
333 			vm_page_set_state(page, PAGE_STATE_FREE);
334 			return oldPteRefetch & kPteAddrMask;
335 		}
336 
337 		return newTablePa;
338 	}
339 
340 	// There's no existing table and we have no reservation
341 	return 0;
342 }
343 
344 
345 void
346 VMSAv8TranslationMap::FlushVAFromTLBByASID(addr_t va)
347 {
348 	InterruptsSpinLocker locker(sAsidLock);
349 	if (fASID != -1) {
350         asm("tlbi vae1is, %0" ::"r"(((va >> 12) & kTLBIMask) | (uint64_t(fASID) << 48)));
351 		asm("dsb ish"); // Wait for TLB flush to complete
352 	}
353 }
354 
355 
356 void
357 VMSAv8TranslationMap::PerformPteBreakBeforeMake(uint64_t* ptePtr, addr_t va)
358 {
359 	atomic_set64((int64*)ptePtr, 0);
360 	asm("dsb ishst"); // Ensure PTE write completed
361 	FlushVAFromTLBByASID(va);
362 }
363 
364 
365 template<typename UpdatePte>
366 void
367 VMSAv8TranslationMap::ProcessRange(phys_addr_t ptPa, int level, addr_t va, size_t size,
368     vm_page_reservation* reservation, UpdatePte&& updatePte)
369 {
370 	ASSERT(level < 4);
371 	ASSERT(ptPa != 0);
372 
373 	int tableBits = fPageBits - 3;
374 	uint64_t tableMask = (1UL << tableBits) - 1;
375 
376 	int shift = tableBits * (3 - level) + fPageBits;
377 	uint64_t entrySize = 1UL << shift;
378 	uint64_t entryMask = entrySize - 1;
379 
380 	uint64_t alignedDownVa = va & ~entryMask;
381 	uint64_t alignedUpEnd = (va + size + (entrySize - 1)) & ~entryMask;
382 	if (level == 3)
383 		ASSERT(alignedDownVa == va);
384 
385     for (uint64_t effectiveVa = alignedDownVa; effectiveVa < alignedUpEnd;
386         effectiveVa += entrySize) {
387 		int index = (effectiveVa >> shift) & tableMask;
388 		uint64_t* ptePtr = TableFromPa(ptPa) + index;
389 
390 		if (level == 3) {
391 			updatePte(ptePtr, effectiveVa);
392 		} else {
393 			phys_addr_t subTable = GetOrMakeTable(ptPa, level, index, reservation);
394 
395 			// When reservation is null, we can't create a new subtable. This can be intentional,
396 			// for example when called from Unmap().
397 			if (subTable == 0)
398 				continue;
399 
400 			uint64_t subVa = std::max(effectiveVa, va);
401 			size_t subSize = std::min(size_t(entrySize - (subVa & entryMask)), size);
402             ProcessRange(subTable, level + 1, subVa, subSize, reservation, updatePte);
403 
404 			size -= subSize;
405 		}
406 	}
407 }
408 
409 
410 uint8_t
411 VMSAv8TranslationMap::MairIndex(uint8_t type)
412 {
413 	for (int i = 0; i < 8; i++)
414 		if (((fMair >> (i * 8)) & 0xff) == type)
415 			return i;
416 
417 	panic("MAIR entry not found");
418 	return 0;
419 }
420 
421 
422 uint64_t
423 VMSAv8TranslationMap::GetMemoryAttr(uint32 attributes, uint32 memoryType, bool isKernel)
424 {
425 	uint64_t attr = 0;
426 
427 	if (!isKernel)
428 		attr |= kAttrNG;
429 
430 	if ((attributes & B_EXECUTE_AREA) == 0)
431 		attr |= kAttrUXN;
432 	if ((attributes & B_KERNEL_EXECUTE_AREA) == 0)
433 		attr |= kAttrPXN;
434 
435 	// SWDBM is software reserved bit that we use to mark that
436 	// writes are allowed, and fault handler should clear kAttrAPReadOnly.
437 	// In that case kAttrAPReadOnly doubles as not-dirty bit.
438 	// Additionally dirty state can be stored in SWDIRTY, in order not to lose
439 	// dirty state when changing protection from RW to RO.
440 
441 	// All page permissions begin life in RO state.
442 	attr |= kAttrAPReadOnly;
443 
444 	// User-Execute implies User-Read, because it would break PAN otherwise
445 	if ((attributes & B_READ_AREA) != 0 || (attributes & B_EXECUTE_AREA) != 0)
446 		attr |= kAttrAPUserAccess; // Allow user reads
447 
448 	if ((attributes & B_WRITE_AREA) != 0 || (attributes & B_KERNEL_WRITE_AREA) != 0)
449 		attr |= kAttrSWDBM; // Mark as writeable
450 
451 	// When supported by hardware copy our SWDBM bit into DBM,
452 	// so that kAttrAPReadOnly is cleared on write attempt automatically
453 	// without going through fault handler.
454 	if ((fHwFeature & HW_DIRTY) != 0 && (attr & kAttrSWDBM) != 0)
455 		attr |= kAttrDBM;
456 
457 	attr |= kAttrSHInnerShareable; // Inner Shareable
458 
459 	uint8_t type = MAIR_NORMAL_WB;
460 
461 	switch (memoryType & B_MTR_MASK) {
462 		case B_MTR_UC:
463 			// TODO: This probably should be nGnRE for PCI
464 			type = MAIR_DEVICE_nGnRnE;
465 			break;
466 		case B_MTR_WC:
467 			type = MAIR_DEVICE_GRE;
468 			break;
469 		case B_MTR_WT:
470 			type = MAIR_NORMAL_WT;
471 			break;
472 		case B_MTR_WP:
473 			type = MAIR_NORMAL_WT;
474 			break;
475 		default:
476 		case B_MTR_WB:
477 			type = MAIR_NORMAL_WB;
478 			break;
479 	}
480 
481 	attr |= MairIndex(type) << 2;
482 
483 	return attr;
484 }
485 
486 
487 status_t
488 VMSAv8TranslationMap::Map(addr_t va, phys_addr_t pa, uint32 attributes, uint32 memoryType,
489 	vm_page_reservation* reservation)
490 {
491 	TRACE("VMSAv8TranslationMap::Map(0x%" B_PRIxADDR ", 0x%" B_PRIxADDR
492 		", 0x%x, 0x%x)\n", va, pa, attributes, memoryType);
493 
494 	ThreadCPUPinner pinner(thread_get_current_thread());
495 
496 	uint64_t pageMask = (1UL << fPageBits) - 1;
497 	uint64_t vaMask = (1UL << fVaBits) - 1;
498 
499 	ASSERT((va & pageMask) == 0);
500 	ASSERT((pa & pageMask) == 0);
501 	ASSERT(ValidateVa(va));
502 
503 	uint64_t attr = GetMemoryAttr(attributes, memoryType, fIsKernel);
504 
505 	// During first mapping we need to allocate root table
506 	if (fPageTable == 0) {
507 		vm_page* page = vm_page_allocate_page(reservation, PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
508 		DEBUG_PAGE_ACCESS_END(page);
509 		fPageTable = page->physical_page_number << fPageBits;
510 	}
511 
512 	ProcessRange(fPageTable, 0, va & vaMask, B_PAGE_SIZE, reservation,
513 		[=](uint64_t* ptePtr, uint64_t effectiveVa) {
514 			phys_addr_t effectivePa = effectiveVa - (va & vaMask) + pa;
515 			uint64_t oldPte = atomic_get64((int64*)ptePtr);
516 			uint64_t newPte = effectivePa | attr | kPteTypeL3Page;
517 
518 			if (newPte == oldPte)
519 				return;
520 
521             if ((newPte & kPteValidMask) != 0 && (oldPte & kPteValidMask) != 0) {
522 				// ARM64 requires "break-before-make". We must set the PTE to an invalid
523 				// entry and flush the TLB as appropriate before we can write the new PTE.
524 				PerformPteBreakBeforeMake(ptePtr, effectiveVa);
525 			}
526 
527 			// Install the new PTE
528             atomic_set64((int64*)ptePtr, newPte);
529 			asm("dsb ishst"); // Ensure PTE write completed
530 		});
531 
532 	return B_OK;
533 }
534 
535 
536 status_t
537 VMSAv8TranslationMap::Unmap(addr_t start, addr_t end)
538 {
539 	TRACE("VMSAv8TranslationMap::Unmap(0x%" B_PRIxADDR ", 0x%" B_PRIxADDR
540 		")\n", start, end);
541 	ThreadCPUPinner pinner(thread_get_current_thread());
542 
543 	size_t size = end - start + 1;
544 
545 	uint64_t pageMask = (1UL << fPageBits) - 1;
546 	uint64_t vaMask = (1UL << fVaBits) - 1;
547 
548 	ASSERT((start & pageMask) == 0);
549 	ASSERT((size & pageMask) == 0);
550 	ASSERT(ValidateVa(start));
551 
552 	if (fPageTable == 0)
553 		return B_OK;
554 
555 	ProcessRange(fPageTable, 0, start & vaMask, size, nullptr,
556 		[=](uint64_t* ptePtr, uint64_t effectiveVa) {
557 			uint64_t oldPte = atomic_and64((int64_t*)ptePtr, ~kPteValidMask);
558 			if ((oldPte & kPteValidMask) != 0) {
559 				asm("dsb ishst"); // Ensure PTE write completed
560 				FlushVAFromTLBByASID(effectiveVa);
561 			}
562 		});
563 
564 	return B_OK;
565 }
566 
567 
568 status_t
569 VMSAv8TranslationMap::UnmapPage(VMArea* area, addr_t address, bool updatePageQueue)
570 {
571 	TRACE("VMSAv8TranslationMap::UnmapPage(0x%" B_PRIxADDR "(%s), 0x%"
572 		B_PRIxADDR ", %d)\n", (addr_t)area, area->name, address,
573 		updatePageQueue);
574 
575 	uint64_t pageMask = (1UL << fPageBits) - 1;
576 	uint64_t vaMask = (1UL << fVaBits) - 1;
577 
578 	ASSERT((address & pageMask) == 0);
579 	ASSERT(ValidateVa(address));
580 
581 	ThreadCPUPinner pinner(thread_get_current_thread());
582 	RecursiveLocker locker(fLock);
583 
584 	uint64_t oldPte = 0;
585 	ProcessRange(fPageTable, 0, address & vaMask, B_PAGE_SIZE, nullptr,
586 		[=, &oldPte](uint64_t* ptePtr, uint64_t effectiveVa) {
587 			oldPte = atomic_get_and_set64((int64_t*)ptePtr, 0);
588 			asm("dsb ishst");
589 			if ((oldPte & kAttrAF) != 0)
590 				FlushVAFromTLBByASID(effectiveVa);
591 		});
592 
593 	if ((oldPte & kPteValidMask) == 0)
594 		return B_ENTRY_NOT_FOUND;
595 
596 	pinner.Unlock();
597 	locker.Detach();
598 	PageUnmapped(area, (oldPte & kPteAddrMask) >> fPageBits, (oldPte & kAttrAF) != 0,
599 		(oldPte & kAttrAPReadOnly) == 0, updatePageQueue);
600 
601 	return B_OK;
602 }
603 
604 
605 bool
606 VMSAv8TranslationMap::ValidateVa(addr_t va)
607 {
608 	uint64_t vaMask = (1UL << fVaBits) - 1;
609 	bool kernelAddr = (va & (1UL << 63)) != 0;
610 	if (kernelAddr != fIsKernel)
611 		return false;
612 	if ((va & ~vaMask) != (fIsKernel ? ~vaMask : 0))
613 		return false;
614 	return true;
615 }
616 
617 
618 status_t
619 VMSAv8TranslationMap::Query(addr_t va, phys_addr_t* pa, uint32* flags)
620 {
621 	*flags = 0;
622 	*pa = 0;
623 
624 	ThreadCPUPinner pinner(thread_get_current_thread());
625 
626 	uint64_t pageMask = (1UL << fPageBits) - 1;
627 	uint64_t vaMask = (1UL << fVaBits) - 1;
628 
629 	va &= ~pageMask;
630 	ASSERT(ValidateVa(va));
631 
632 	ProcessRange(fPageTable, 0, va & vaMask, B_PAGE_SIZE, nullptr,
633 		[=](uint64_t* ptePtr, uint64_t effectiveVa) {
634 			uint64_t pte = atomic_get64((int64_t*)ptePtr);
635 			*pa = pte & kPteAddrMask;
636 			*flags |= PAGE_PRESENT | B_KERNEL_READ_AREA;
637 			if ((pte & kAttrAF) != 0)
638 				*flags |= PAGE_ACCESSED;
639 			if ((pte & kAttrAPReadOnly) == 0)
640 				*flags |= PAGE_MODIFIED;
641 
642 			if ((pte & kAttrUXN) == 0)
643 				*flags |= B_EXECUTE_AREA;
644 			if ((pte & kAttrPXN) == 0)
645 				*flags |= B_KERNEL_EXECUTE_AREA;
646 
647 			if ((pte & kAttrAPUserAccess) != 0)
648 				*flags |= B_READ_AREA;
649 
650 			if ((pte & kAttrAPReadOnly) == 0 || (pte & kAttrSWDBM) != 0) {
651 				*flags |= B_KERNEL_WRITE_AREA;
652 				if ((pte & kAttrAPUserAccess) != 0)
653 					*flags |= B_WRITE_AREA;
654 			}
655 		});
656 
657 	return B_OK;
658 }
659 
660 
661 status_t
662 VMSAv8TranslationMap::QueryInterrupt(
663 	addr_t virtualAddress, phys_addr_t* _physicalAddress, uint32* _flags)
664 {
665 	return Query(virtualAddress, _physicalAddress, _flags);
666 }
667 
668 
669 status_t
670 VMSAv8TranslationMap::Protect(addr_t start, addr_t end, uint32 attributes, uint32 memoryType)
671 {
672 	TRACE("VMSAv8TranslationMap::Protect(0x%" B_PRIxADDR ", 0x%"
673 		B_PRIxADDR ", 0x%x, 0x%x)\n", start, end, attributes, memoryType);
674 
675 	ThreadCPUPinner pinner(thread_get_current_thread());
676 	uint64_t attr = GetMemoryAttr(attributes, memoryType, fIsKernel);
677 	size_t size = end - start + 1;
678 
679 	uint64_t pageMask = (1UL << fPageBits) - 1;
680 	uint64_t vaMask = (1UL << fVaBits) - 1;
681 
682 	ASSERT((start & pageMask) == 0);
683 	ASSERT((size & pageMask) == 0);
684 	ASSERT(ValidateVa(start));
685 
686 	ProcessRange(fPageTable, 0, start & vaMask, size, nullptr,
687 		[=](uint64_t* ptePtr, uint64_t effectiveVa) {
688 			// We need to use an atomic compare-swap loop because we must
689 			// need to clear somes bits while setting others.
690 			while (true) {
691 				uint64_t oldPte = atomic_get64((int64_t*)ptePtr);
692 				uint64_t newPte = oldPte & ~kPteAttrMask;
693 				newPte |= attr;
694 
695                 if ((uint64_t)atomic_test_and_set64((int64_t*)ptePtr, newPte, oldPte) == oldPte) {
696 					asm("dsb ishst"); // Ensure PTE write completed
697 					if ((oldPte & kAttrAF) != 0)
698 						FlushVAFromTLBByASID(effectiveVa);
699 					break;
700 				}
701 			}
702 		});
703 
704 	return B_OK;
705 }
706 
707 
708 status_t
709 VMSAv8TranslationMap::ClearFlags(addr_t va, uint32 flags)
710 {
711 	ThreadCPUPinner pinner(thread_get_current_thread());
712 
713 	uint64_t pageMask = (1UL << fPageBits) - 1;
714 	uint64_t vaMask = (1UL << fVaBits) - 1;
715 
716 	ASSERT((va & pageMask) == 0);
717 	ASSERT(ValidateVa(va));
718 
719 	bool clearAF = flags & kAttrAF;
720 	bool setRO = flags & kAttrAPReadOnly;
721 
722 	if (!clearAF && !setRO)
723 		return B_OK;
724 
725 	ProcessRange(fPageTable, 0, va & vaMask, B_PAGE_SIZE, nullptr,
726 		[=](uint64_t* ptePtr, uint64_t effectiveVa) {
727 			if (clearAF && setRO) {
728 				// We need to use an atomic compare-swap loop because we must
729 				// need to clear one bit while setting the other.
730 				while (true) {
731 					uint64_t oldPte = atomic_get64((int64_t*)ptePtr);
732 					uint64_t newPte = oldPte & ~kAttrAF;
733 					newPte |= kAttrAPReadOnly;
734 
735                     if ((uint64_t)atomic_test_and_set64((int64_t*)ptePtr, newPte, oldPte) == oldPte)
736 						break;
737 				}
738 			} else if (clearAF) {
739 				atomic_and64((int64_t*)ptePtr, ~kAttrAPReadOnly);
740 			} else {
741 				atomic_or64((int64_t*)ptePtr, kAttrAPReadOnly);
742 			}
743 			asm("dsb ishst"); // Ensure PTE write completed
744 		});
745 
746 	FlushVAFromTLBByASID(va);
747 	return B_OK;
748 }
749 
750 
751 bool
752 VMSAv8TranslationMap::ClearAccessedAndModified(
753 	VMArea* area, addr_t address, bool unmapIfUnaccessed, bool& _modified)
754 {
755 	TRACE("VMSAv8TranslationMap::ClearAccessedAndModified(0x%"
756 		B_PRIxADDR "(%s), 0x%" B_PRIxADDR ", %d)\n", (addr_t)area,
757 		area->name, address, unmapIfUnaccessed);
758 
759 	RecursiveLocker locker(fLock);
760 	ThreadCPUPinner pinner(thread_get_current_thread());
761 
762 	uint64_t pageMask = (1UL << fPageBits) - 1;
763 	uint64_t vaMask = (1UL << fVaBits) - 1;
764 
765 	ASSERT((address & pageMask) == 0);
766 	ASSERT(ValidateVa(address));
767 
768 	uint64_t oldPte = 0;
769 	ProcessRange(fPageTable, 0, address & vaMask, B_PAGE_SIZE, nullptr,
770 		[=, &_modified, &oldPte](uint64_t* ptePtr, uint64_t effectiveVa) {
771 			// We need to use an atomic compare-swap loop because we must
772 			// first read the old PTE and make decisions based on the AF
773 			// bit to proceed.
774 			while (true) {
775 				oldPte = atomic_get64((int64_t*)ptePtr);
776 				uint64_t newPte = oldPte & ~kAttrAF;
777 				newPte |= kAttrAPReadOnly;
778 
779 				// If the page has been not be accessed, then unmap it.
780 				if (unmapIfUnaccessed && (oldPte & kAttrAF) == 0)
781 					newPte = 0;
782 
783 				if ((uint64_t)atomic_test_and_set64((int64_t*)ptePtr, newPte, oldPte) == oldPte)
784 					break;
785 			}
786 			asm("dsb ishst"); // Ensure PTE write completed
787 		});
788 
789 	pinner.Unlock();
790 	_modified = (oldPte & kAttrAPReadOnly) == 0;
791 	if ((oldPte & kAttrAF) != 0) {
792 		FlushVAFromTLBByASID(address);
793 		return true;
794 	}
795 
796 	if (!unmapIfUnaccessed)
797 		return false;
798 
799 	fMapCount--;
800 
801 	locker.Detach(); // UnaccessedPageUnmapped takes ownership
802 	phys_addr_t oldPa = oldPte & kPteAddrMask;
803 	UnaccessedPageUnmapped(area, oldPa >> fPageBits);
804 	return false;
805 }
806 
807 
808 void
809 VMSAv8TranslationMap::Flush()
810 {
811 	ThreadCPUPinner pinner(thread_get_current_thread());
812 
813 	arch_cpu_global_TLB_invalidate();
814 }
815