xref: /haiku/src/system/kernel/arch/arm64/VMSAv8TranslationMap.cpp (revision 13581b3d2a71545960b98fefebc5225b5bf29072)
1 /*
2  * Copyright 2022 Haiku, Inc. All Rights Reserved.
3  * Distributed under the terms of the MIT License.
4  */
5 #include "VMSAv8TranslationMap.h"
6 
7 #include <util/AutoLock.h>
8 #include <util/ThreadAutoLock.h>
9 #include <vm/vm_page.h>
10 #include <vm/vm_priv.h>
11 
12 
13 static constexpr uint64_t kPteAddrMask = (((1UL << 36) - 1) << 12);
14 static constexpr uint64_t kPteAttrMask = ~(kPteAddrMask | 0x3);
15 
16 static constexpr uint64_t kAttrSWDBM = (1UL << 55);
17 static constexpr uint64_t kAttrUXN = (1UL << 54);
18 static constexpr uint64_t kAttrPXN = (1UL << 53);
19 static constexpr uint64_t kAttrDBM = (1UL << 51);
20 static constexpr uint64_t kAttrNG = (1UL << 11);
21 static constexpr uint64_t kAttrAF = (1UL << 10);
22 static constexpr uint64_t kAttrSH1 = (1UL << 9);
23 static constexpr uint64_t kAttrSH0 = (1UL << 8);
24 static constexpr uint64_t kAttrAP2 = (1UL << 7);
25 static constexpr uint64_t kAttrAP1 = (1UL << 6);
26 
27 uint32_t VMSAv8TranslationMap::fHwFeature;
28 uint64_t VMSAv8TranslationMap::fMair;
29 
30 
31 VMSAv8TranslationMap::VMSAv8TranslationMap(
32 	bool kernel, phys_addr_t pageTable, int pageBits, int vaBits, int minBlockLevel)
33 	:
34 	fIsKernel(kernel),
35 	fPageTable(pageTable),
36 	fPageBits(pageBits),
37 	fVaBits(vaBits),
38 	fMinBlockLevel(minBlockLevel)
39 {
40 	dprintf("VMSAv8TranslationMap\n");
41 
42 	fInitialLevel = CalcStartLevel(fVaBits, fPageBits);
43 }
44 
45 
46 VMSAv8TranslationMap::~VMSAv8TranslationMap()
47 {
48 	dprintf("~VMSAv8TranslationMap\n");
49 
50 	// FreeTable(fPageTable, fInitialLevel);
51 }
52 
53 
54 int
55 VMSAv8TranslationMap::CalcStartLevel(int vaBits, int pageBits)
56 {
57 	int level = 4;
58 
59 	int bitsLeft = vaBits - pageBits;
60 	while (bitsLeft > 0) {
61 		int tableBits = pageBits - 3;
62 		bitsLeft -= tableBits;
63 		level--;
64 	}
65 
66 	ASSERT(level >= 0);
67 
68 	return level;
69 }
70 
71 
72 bool
73 VMSAv8TranslationMap::Lock()
74 {
75 	recursive_lock_lock(&fLock);
76 	return true;
77 }
78 
79 
80 void
81 VMSAv8TranslationMap::Unlock()
82 {
83 	if (recursive_lock_get_recursion(&fLock) == 1) {
84 		// we're about to release it for the last time
85 		Flush();
86 	}
87 	recursive_lock_unlock(&fLock);
88 }
89 
90 
91 addr_t
92 VMSAv8TranslationMap::MappedSize() const
93 {
94 	panic("VMSAv8TranslationMap::MappedSize not implemented");
95 	return 0;
96 }
97 
98 
99 size_t
100 VMSAv8TranslationMap::MaxPagesNeededToMap(addr_t start, addr_t end) const
101 {
102 	size_t result = 0;
103 	size_t size = end - start + 1;
104 
105 	for (int i = fInitialLevel; i < 3; i++) {
106 		int tableBits = fPageBits - 3;
107 		int shift = tableBits * (3 - i) + fPageBits;
108 		uint64_t entrySize = 1UL << shift;
109 
110 		result += size / entrySize + 2;
111 	}
112 
113 	return result;
114 }
115 
116 
117 uint64_t*
118 VMSAv8TranslationMap::TableFromPa(phys_addr_t pa)
119 {
120 	return reinterpret_cast<uint64_t*>(KERNEL_PMAP_BASE + pa);
121 }
122 
123 
124 uint64_t
125 VMSAv8TranslationMap::MakeBlock(phys_addr_t pa, int level, uint64_t attr)
126 {
127 	ASSERT(level >= fMinBlockLevel && level < 4);
128 
129 	return pa | attr | (level == 3 ? 0x3 : 0x1);
130 }
131 
132 
133 void
134 VMSAv8TranslationMap::FreeTable(phys_addr_t ptPa, int level)
135 {
136 	ASSERT(level < 3);
137 
138 	if (level + 1 < 3) {
139 		int tableBits = fPageBits - 3;
140 		uint64_t tableSize = 1UL << tableBits;
141 
142 		uint64_t* pt = TableFromPa(ptPa);
143 		for (uint64_t i = 0; i < tableSize; i++) {
144 			uint64_t pte = pt[i];
145 			if ((pte & 0x3) == 0x3) {
146 				FreeTable(pte & kPteAddrMask, level + 1);
147 			}
148 		}
149 	}
150 
151 	vm_page* page = vm_lookup_page(ptPa >> fPageBits);
152 	vm_page_set_state(page, PAGE_STATE_FREE);
153 }
154 
155 
156 phys_addr_t
157 VMSAv8TranslationMap::MakeTable(
158 	phys_addr_t ptPa, int level, int index, vm_page_reservation* reservation)
159 {
160 	if (level == 3)
161 		return 0;
162 
163 	uint64_t* pte = &TableFromPa(ptPa)[index];
164 	vm_page* page = NULL;
165 
166 retry:
167 	uint64_t oldPte = atomic_get64((int64*) pte);
168 
169 	int type = oldPte & 0x3;
170 	if (type == 0x3) {
171 		return oldPte & kPteAddrMask;
172 	} else if (reservation != NULL) {
173 		if (page == NULL)
174 			page = vm_page_allocate_page(reservation, PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
175 		phys_addr_t newTablePa = page->physical_page_number << fPageBits;
176 
177 		if (type == 0x1) {
178 			// If we're replacing existing block mapping convert it to pagetable
179 			int tableBits = fPageBits - 3;
180 			int shift = tableBits * (3 - (level + 1)) + fPageBits;
181 			uint64_t entrySize = 1UL << shift;
182 			uint64_t tableSize = 1UL << tableBits;
183 
184 			uint64_t* newTable = TableFromPa(newTablePa);
185 			uint64_t addr = oldPte & kPteAddrMask;
186 			uint64_t attr = oldPte & kPteAttrMask;
187 
188 			for (uint64_t i = 0; i < tableSize; i++) {
189 				newTable[i] = MakeBlock(addr + i * entrySize, level + 1, attr);
190 			}
191 		}
192 
193 		asm("dsb ish");
194 
195 		// FIXME: this is not enough on real hardware with SMP
196 		if ((uint64_t) atomic_test_and_set64((int64*) pte, newTablePa | 0x3, oldPte) != oldPte)
197 			goto retry;
198 
199 		return newTablePa;
200 	}
201 
202 	return 0;
203 }
204 
205 
206 void
207 VMSAv8TranslationMap::MapRange(phys_addr_t ptPa, int level, addr_t va, phys_addr_t pa, size_t size,
208 	VMSAv8TranslationMap::VMAction action, uint64_t attr, vm_page_reservation* reservation)
209 {
210 	ASSERT(level < 4);
211 	ASSERT(ptPa != 0);
212 	ASSERT(reservation != NULL || action != VMAction::MAP);
213 
214 	int tableBits = fPageBits - 3;
215 	uint64_t tableMask = (1UL << tableBits) - 1;
216 
217 	int shift = tableBits * (3 - level) + fPageBits;
218 	uint64_t entrySize = 1UL << shift;
219 
220 	uint64_t entryMask = entrySize - 1;
221 	uint64_t nextVa = va;
222 	uint64_t end = va + size;
223 	int index;
224 
225 	// Handle misaligned header that straddles entry boundary in next-level table
226 	if ((va & entryMask) != 0) {
227 		uint64_t aligned = (va & ~entryMask) + entrySize;
228 		if (end > aligned) {
229 			index = (va >> shift) & tableMask;
230 			phys_addr_t table = MakeTable(ptPa, level, index, reservation);
231 			MapRange(table, level + 1, va, pa, aligned - va, action, attr, reservation);
232 			nextVa = aligned;
233 		}
234 	}
235 
236 	// Handle fully aligned and appropriately sized chunks
237 	while (nextVa + entrySize <= end) {
238 		phys_addr_t targetPa = pa + (nextVa - va);
239 		index = (nextVa >> shift) & tableMask;
240 
241 		bool blockAllowed = false;
242 		if (action == VMAction::MAP)
243 			blockAllowed = (level >= fMinBlockLevel && (targetPa & entryMask) == 0);
244 		if (action == VMAction::SET_ATTR || action == VMAction::CLEAR_FLAGS)
245 			blockAllowed = (MakeTable(ptPa, level, index, NULL) == 0);
246 		if (action == VMAction::UNMAP)
247 			blockAllowed = true;
248 
249 		if (blockAllowed) {
250 			// Everything is aligned, we can make block mapping there
251 			uint64_t* pte = &TableFromPa(ptPa)[index];
252 
253 		retry:
254 			uint64_t oldPte = atomic_get64((int64*) pte);
255 
256 			if (action == VMAction::MAP || (oldPte & 0x1) != 0) {
257 				uint64_t newPte = 0;
258 				if (action == VMAction::MAP) {
259 					newPte = MakeBlock(targetPa, level, attr);
260 				} else if (action == VMAction::SET_ATTR) {
261 					newPte = MakeBlock(oldPte & kPteAddrMask, level, MoveAttrFlags(attr, oldPte));
262 				} else if (action == VMAction::CLEAR_FLAGS) {
263 					newPte = MakeBlock(oldPte & kPteAddrMask, level, ClearAttrFlags(oldPte, attr));
264 				} else if (action == VMAction::UNMAP) {
265 					newPte = 0;
266 					tmp_pte = oldPte;
267 				}
268 
269 				// FIXME: this might not be enough on real hardware with SMP for some cases
270 				if ((uint64_t) atomic_test_and_set64((int64*) pte, newPte, oldPte) != oldPte)
271 					goto retry;
272 
273 				if (level < 3 && (oldPte & 0x3) == 0x3) {
274 					// If we're replacing existing pagetable clean it up
275 					FreeTable(oldPte & kPteAddrMask, level);
276 				}
277 			}
278 		} else {
279 			// Otherwise handle mapping in next-level table
280 			phys_addr_t table = MakeTable(ptPa, level, index, reservation);
281 			MapRange(table, level + 1, nextVa, targetPa, entrySize, action, attr, reservation);
282 		}
283 		nextVa += entrySize;
284 	}
285 
286 	// Handle misaligned tail area (or entirety of small area) in next-level table
287 	if (nextVa < end) {
288 		index = (nextVa >> shift) & tableMask;
289 		phys_addr_t table = MakeTable(ptPa, level, index, reservation);
290 		MapRange(
291 			table, level + 1, nextVa, pa + (nextVa - va), end - nextVa, action, attr, reservation);
292 	}
293 }
294 
295 
296 uint8_t
297 VMSAv8TranslationMap::MairIndex(uint8_t type)
298 {
299 	for (int i = 0; i < 8; i++)
300 		if (((fMair >> (i * 8)) & 0xff) == type)
301 			return i;
302 
303 	panic("MAIR entry not found");
304 	return 0;
305 }
306 
307 
308 uint64_t
309 VMSAv8TranslationMap::ClearAttrFlags(uint64_t attr, uint32 flags)
310 {
311 	attr &= kPteAttrMask;
312 
313 	if ((flags & PAGE_ACCESSED) != 0)
314 		attr &= ~kAttrAF;
315 
316 	if ((flags & PAGE_MODIFIED) != 0 && (attr & kAttrSWDBM) != 0)
317 		attr |= kAttrAP2;
318 
319 	return attr;
320 }
321 
322 
323 uint64_t
324 VMSAv8TranslationMap::MoveAttrFlags(uint64_t newAttr, uint64_t oldAttr)
325 {
326 	if ((oldAttr & kAttrAF) != 0)
327 		newAttr |= kAttrAF;
328 	if (((newAttr & oldAttr) & kAttrSWDBM) != 0 && (oldAttr & kAttrAP2) == 0)
329 		newAttr &= ~kAttrAP2;
330 
331 	return newAttr;
332 }
333 
334 
335 uint64_t
336 VMSAv8TranslationMap::GetMemoryAttr(uint32 attributes, uint32 memoryType, bool isKernel)
337 {
338 	uint64_t attr = 0;
339 
340 	if (!isKernel)
341 		attr |= kAttrNG;
342 
343 	if ((attributes & B_EXECUTE_AREA) == 0)
344 		attr |= kAttrUXN;
345 	if ((attributes & B_KERNEL_EXECUTE_AREA) == 0)
346 		attr |= kAttrPXN;
347 
348 	if ((attributes & B_READ_AREA) == 0) {
349 		attr |= kAttrAP2;
350 		if ((attributes & B_KERNEL_WRITE_AREA) != 0)
351 			attr |= kAttrSWDBM;
352 	} else {
353 		attr |= kAttrAP2 | kAttrAP1;
354 		if ((attributes & B_WRITE_AREA) != 0)
355 			attr |= kAttrSWDBM;
356 	}
357 
358 	if ((fHwFeature & HW_DIRTY) != 0 && (attr & kAttrSWDBM))
359 		attr |= kAttrDBM;
360 
361 	attr |= kAttrSH1 | kAttrSH0;
362 
363 	attr |= MairIndex(MAIR_NORMAL_WB) << 2;
364 
365 	return attr;
366 }
367 
368 
369 status_t
370 VMSAv8TranslationMap::Map(addr_t va, phys_addr_t pa, uint32 attributes, uint32 memoryType,
371 	vm_page_reservation* reservation)
372 {
373 	ThreadCPUPinner pinner(thread_get_current_thread());
374 
375 	uint64_t pageMask = (1UL << fPageBits) - 1;
376 	uint64_t vaMask = (1UL << fVaBits) - 1;
377 
378 	ASSERT((va & pageMask) == 0);
379 	ASSERT((pa & pageMask) == 0);
380 	ASSERT(ValidateVa(va));
381 
382 	uint64_t attr = GetMemoryAttr(attributes, memoryType, fIsKernel);
383 
384 	if (!fPageTable) {
385 		vm_page* page = vm_page_allocate_page(reservation, PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
386 		fPageTable = page->physical_page_number << fPageBits;
387 	}
388 
389 	MapRange(
390 		fPageTable, fInitialLevel, va & vaMask, pa, B_PAGE_SIZE, VMAction::MAP, attr, reservation);
391 
392 	return B_OK;
393 }
394 
395 
396 status_t
397 VMSAv8TranslationMap::Unmap(addr_t start, addr_t end)
398 {
399 	ThreadCPUPinner pinner(thread_get_current_thread());
400 
401 	size_t size = end - start + 1;
402 
403 	uint64_t pageMask = (1UL << fPageBits) - 1;
404 	uint64_t vaMask = (1UL << fVaBits) - 1;
405 
406 	ASSERT((start & pageMask) == 0);
407 	ASSERT((size & pageMask) == 0);
408 	ASSERT(ValidateVa(start));
409 
410 	MapRange(fPageTable, fInitialLevel, start & vaMask, 0, size, VMAction::UNMAP, 0, NULL);
411 
412 	return B_OK;
413 }
414 
415 
416 status_t
417 VMSAv8TranslationMap::UnmapPage(VMArea* area, addr_t address, bool updatePageQueue)
418 {
419 
420 	ThreadCPUPinner pinner(thread_get_current_thread());
421 	RecursiveLocker locker(fLock);
422 
423 	// TODO: replace this kludge
424 
425 	phys_addr_t pa;
426 	uint64_t pte;
427 	if (!WalkTable(fPageTable, fInitialLevel, address, &pa, &pte))
428 		return B_ENTRY_NOT_FOUND;
429 
430 	uint64_t vaMask = (1UL << fVaBits) - 1;
431 	MapRange(fPageTable, fInitialLevel, address & vaMask, 0, B_PAGE_SIZE, VMAction::UNMAP, 0, NULL);
432 
433 	pinner.Unlock();
434 	locker.Detach();
435 	PageUnmapped(area, pa >> fPageBits, (tmp_pte & kAttrAF) != 0, (tmp_pte & kAttrAP2) == 0,
436 		updatePageQueue);
437 
438 	return B_OK;
439 }
440 
441 
442 bool
443 VMSAv8TranslationMap::WalkTable(
444 	phys_addr_t ptPa, int level, addr_t va, phys_addr_t* pa, uint64_t* rpte)
445 {
446 	int tableBits = fPageBits - 3;
447 	uint64_t tableMask = (1UL << tableBits) - 1;
448 
449 	int shift = tableBits * (3 - level) + fPageBits;
450 	uint64_t entrySize = 1UL << shift;
451 	uint64_t entryMask = entrySize - 1;
452 
453 	int index = (va >> shift) & tableMask;
454 
455 	uint64_t pte = TableFromPa(ptPa)[index];
456 	int type = pte & 0x3;
457 
458 	if ((type & 0x1) == 0)
459 		return false;
460 
461 	uint64_t addr = pte & kPteAddrMask;
462 	if (level < 3) {
463 		if (type == 0x3) {
464 			return WalkTable(addr, level + 1, va, pa, rpte);
465 		} else {
466 			*pa = addr | (va & entryMask);
467 			*rpte = pte;
468 		}
469 	} else {
470 		ASSERT(type == 0x3);
471 		*pa = addr;
472 		*rpte = pte;
473 	}
474 
475 	return true;
476 }
477 
478 
479 bool
480 VMSAv8TranslationMap::ValidateVa(addr_t va)
481 {
482 	uint64_t vaMask = (1UL << fVaBits) - 1;
483 	bool kernelAddr = (va & (1UL << 63)) != 0;
484 	if (kernelAddr != fIsKernel)
485 		return false;
486 	if ((va & ~vaMask) != (fIsKernel ? ~vaMask : 0))
487 		return false;
488 	return true;
489 }
490 
491 
492 status_t
493 VMSAv8TranslationMap::Query(addr_t va, phys_addr_t* pa, uint32* flags)
494 {
495 	ThreadCPUPinner pinner(thread_get_current_thread());
496 
497 	ASSERT(ValidateVa(va));
498 
499 	uint64_t pte = 0;
500 	bool ret = WalkTable(fPageTable, fInitialLevel, va, pa, &pte);
501 
502 	uint32 result = 0;
503 
504 	if (ret) {
505 		result |= PAGE_PRESENT;
506 
507 		if ((pte & kAttrAF) != 0)
508 			result |= PAGE_ACCESSED;
509 		if ((pte & kAttrAP2) == 0)
510 			result |= PAGE_MODIFIED;
511 
512 		if ((pte & kAttrUXN) == 0)
513 			result |= B_EXECUTE_AREA;
514 		if ((pte & kAttrPXN) == 0)
515 			result |= B_KERNEL_EXECUTE_AREA;
516 
517 		result |= B_KERNEL_READ_AREA;
518 
519 		if ((pte & kAttrAP1) != 0)
520 			result |= B_READ_AREA;
521 
522 		if ((pte & kAttrAP2) == 0 || (pte & kAttrSWDBM) != 0) {
523 			result |= B_KERNEL_WRITE_AREA;
524 
525 			if ((pte & kAttrAP1) != 0)
526 				result |= B_WRITE_AREA;
527 		}
528 	}
529 
530 	*flags = result;
531 	return B_OK;
532 }
533 
534 
535 status_t
536 VMSAv8TranslationMap::QueryInterrupt(
537 	addr_t virtualAddress, phys_addr_t* _physicalAddress, uint32* _flags)
538 {
539 	return Query(virtualAddress, _physicalAddress, _flags);
540 }
541 
542 
543 status_t
544 VMSAv8TranslationMap::Protect(addr_t start, addr_t end, uint32 attributes, uint32 memoryType)
545 {
546 	ThreadCPUPinner pinner(thread_get_current_thread());
547 
548 	size_t size = end - start + 1;
549 
550 	uint64_t pageMask = (1UL << fPageBits) - 1;
551 	uint64_t vaMask = (1UL << fVaBits) - 1;
552 
553 	ASSERT((start & pageMask) == 0);
554 	ASSERT((size & pageMask) == 0);
555 	ASSERT(ValidateVa(start));
556 
557 	uint64_t attr = GetMemoryAttr(attributes, memoryType, fIsKernel);
558 	MapRange(fPageTable, fInitialLevel, start & vaMask, 0, size, VMAction::SET_ATTR, attr, NULL);
559 
560 	return B_OK;
561 }
562 
563 
564 status_t
565 VMSAv8TranslationMap::ClearFlags(addr_t va, uint32 flags)
566 {
567 	ThreadCPUPinner pinner(thread_get_current_thread());
568 
569 	uint64_t pageMask = (1UL << fPageBits) - 1;
570 	uint64_t vaMask = (1UL << fVaBits) - 1;
571 
572 	ASSERT((va & pageMask) == 0);
573 	ASSERT(ValidateVa(va));
574 
575 	MapRange(
576 		fPageTable, fInitialLevel, va & vaMask, 0, B_PAGE_SIZE, VMAction::CLEAR_FLAGS, flags, NULL);
577 
578 	return B_OK;
579 }
580 
581 
582 bool
583 VMSAv8TranslationMap::ClearAccessedAndModified(
584 	VMArea* area, addr_t address, bool unmapIfUnaccessed, bool& _modified)
585 {
586 	panic("VMSAv8TranslationMap::ClearAccessedAndModified not implemented\n");
587 	return B_OK;
588 }
589 
590 
591 void
592 VMSAv8TranslationMap::Flush()
593 {
594 	ThreadCPUPinner pinner(thread_get_current_thread());
595 
596 	arch_cpu_global_TLB_invalidate();
597 }
598