xref: /haiku/src/system/kernel/arch/riscv64/RISCV64VMTranslationMap.cpp (revision ed24eb5ff12640d052171c6a7feba37fab8a75d1)
1 /*
2  * Copyright 2020-2021, Haiku, Inc. All rights reserved.
3  * Distributed under the terms of the MIT License.
4  *
5  * Authors:
6  *   X512 <danger_mail@list.ru>
7  */
8 
9 
10 #include "RISCV64VMTranslationMap.h"
11 
12 #include <kernel.h>
13 #include <vm/vm_priv.h>
14 #include <vm/vm_page.h>
15 #include <vm/VMAddressSpace.h>
16 #include <vm/VMCache.h>
17 #include <slab/Slab.h>
18 #include <platform/sbi/sbi_syscalls.h>
19 
20 #include <util/AutoLock.h>
21 #include <util/ThreadAutoLock.h>
22 
23 
24 //#define DO_TRACE
25 #ifdef DO_TRACE
26 #	define TRACE(x...) dprintf(x)
27 #else
28 #	define TRACE(x...) ;
29 #endif
30 
31 #define NOT_IMPLEMENTED_PANIC() \
32 	panic("not implemented: %s\n", __PRETTY_FUNCTION__)
33 
34 extern uint32 gPlatform;
35 
36 
37 static void
38 WriteVmPage(vm_page* page)
39 {
40 	dprintf("0x%08" B_PRIxADDR " ",
41 		(addr_t)(page->physical_page_number * B_PAGE_SIZE));
42 	switch (page->State()) {
43 		case PAGE_STATE_ACTIVE:
44 			dprintf("A");
45 			break;
46 		case PAGE_STATE_INACTIVE:
47 			dprintf("I");
48 			break;
49 		case PAGE_STATE_MODIFIED:
50 			dprintf("M");
51 			break;
52 		case PAGE_STATE_CACHED:
53 			dprintf("C");
54 			break;
55 		case PAGE_STATE_FREE:
56 			dprintf("F");
57 			break;
58 		case PAGE_STATE_CLEAR:
59 			dprintf("L");
60 			break;
61 		case PAGE_STATE_WIRED:
62 			dprintf("W");
63 			break;
64 		case PAGE_STATE_UNUSED:
65 			dprintf("-");
66 			break;
67 	}
68 	dprintf(" ");
69 	if (page->busy)
70 		dprintf("B");
71 	else
72 		dprintf("-");
73 
74 	if (page->busy_writing)
75 		dprintf("W");
76 	else
77 		dprintf("-");
78 
79 	if (page->accessed)
80 		dprintf("A");
81 	else
82 		dprintf("-");
83 
84 	if (page->modified)
85 		dprintf("M");
86 	else
87 		dprintf("-");
88 
89 	if (page->unused)
90 		dprintf("U");
91 	else
92 		dprintf("-");
93 
94 	dprintf(" usage:%3u", page->usage_count);
95 	dprintf(" wired:%5u", page->WiredCount());
96 
97 	bool first = true;
98 	vm_page_mappings::Iterator iterator = page->mappings.GetIterator();
99 	vm_page_mapping* mapping;
100 	while ((mapping = iterator.Next()) != NULL) {
101 		if (first) {
102 			dprintf(": ");
103 			first = false;
104 		} else
105 			dprintf(", ");
106 
107 		dprintf("%" B_PRId32 " (%s)", mapping->area->id, mapping->area->name);
108 		mapping = mapping->page_link.next;
109 	}
110 }
111 
112 
113 static void
114 FreePageTable(page_num_t ppn, bool isKernel, uint32 level = 2)
115 {
116 	if (level > 0) {
117 		Pte* pte = (Pte*)VirtFromPhys(ppn * B_PAGE_SIZE);
118 		uint64 beg = 0;
119 		uint64 end = pteCount - 1;
120 		if (level == 2 && !isKernel) {
121 			beg = VirtAdrPte(USER_BASE, 2);
122 			end = VirtAdrPte(USER_TOP, 2);
123 		}
124 		for (uint64 i = beg; i <= end; i++) {
125 			if ((1 << pteValid) & pte[i].flags)
126 				FreePageTable(pte[i].ppn, isKernel, level - 1);
127 		}
128 	}
129 	vm_page* page = vm_lookup_page(ppn);
130 	DEBUG_PAGE_ACCESS_START(page);
131 	vm_page_set_state(page, PAGE_STATE_FREE);
132 }
133 
134 
135 static uint64
136 GetPageTableSize(page_num_t ppn, bool isKernel, uint32 level = 2)
137 {
138 	if (ppn == 0)
139 		return 0;
140 
141 	if (level == 0)
142 		return 1;
143 
144 	uint64 size = 1;
145 	Pte* pte = (Pte*)VirtFromPhys(ppn * B_PAGE_SIZE);
146 	uint64 beg = 0;
147 	uint64 end = pteCount - 1;
148 	if (level == 2 && !isKernel) {
149 		beg = VirtAdrPte(USER_BASE, 2);
150 		end = VirtAdrPte(USER_TOP, 2);
151 	}
152 	for (uint64 i = beg; i <= end; i++) {
153 		if ((1 << pteValid) & pte[i].flags)
154 			size += GetPageTableSize(pte[i].ppn, isKernel, level - 1);
155 	}
156 	return size;
157 }
158 
159 
160 //#pragma mark RISCV64VMTranslationMap
161 
162 
163 Pte*
164 RISCV64VMTranslationMap::LookupPte(addr_t virtAdr, bool alloc,
165 	vm_page_reservation* reservation)
166 {
167 	if (fPageTable == 0) {
168 		if (!alloc)
169 			return NULL;
170 		vm_page* page = vm_page_allocate_page(reservation,
171 			PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
172 		fPageTable = page->physical_page_number * B_PAGE_SIZE;
173 		if (fPageTable == 0)
174 			return NULL;
175 		DEBUG_PAGE_ACCESS_END(page);
176 		fPageTableSize++;
177 		if (!fIsKernel) {
178 			// Map kernel address space into user address space. Preallocated
179 			// kernel level-2 PTEs are reused.
180 			RISCV64VMTranslationMap* kernelMap = (RISCV64VMTranslationMap*)
181 				VMAddressSpace::Kernel()->TranslationMap();
182 			Pte *kernelPageTable = (Pte*)VirtFromPhys(kernelMap->PageTable());
183 			Pte *userPageTable = (Pte*)VirtFromPhys(fPageTable);
184 			for (uint64 i = VirtAdrPte(KERNEL_BASE, 2);
185 				i <= VirtAdrPte(KERNEL_TOP, 2); i++) {
186 				Pte *pte = &userPageTable[i];
187 				pte->ppn = kernelPageTable[i].ppn;
188 				pte->flags |= (1 << pteValid);
189 			}
190 		}
191 	}
192 	Pte *pte = (Pte*)VirtFromPhys(fPageTable);
193 	for (int level = 2; level > 0; level--) {
194 		pte += VirtAdrPte(virtAdr, level);
195 		if (!((1 << pteValid) & pte->flags)) {
196 			if (!alloc)
197 				return NULL;
198 			vm_page* page = vm_page_allocate_page(reservation,
199 				PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
200 			pte->ppn = page->physical_page_number;
201 			if (pte->ppn == 0)
202 				return NULL;
203 			DEBUG_PAGE_ACCESS_END(page);
204 			fPageTableSize++;
205 			pte->flags |= (1 << pteValid);
206 		}
207 		pte = (Pte*)VirtFromPhys(B_PAGE_SIZE * pte->ppn);
208 	}
209 	pte += VirtAdrPte(virtAdr, 0);
210 	return pte;
211 }
212 
213 
214 phys_addr_t
215 RISCV64VMTranslationMap::LookupAddr(addr_t virtAdr)
216 {
217 	Pte* pte = LookupPte(virtAdr, false, NULL);
218 	if (pte == NULL || !((1 << pteValid) & pte->flags))
219 		return 0;
220 	if (fIsKernel != (((1 << pteUser) & pte->flags) == 0))
221 		return 0;
222 	return pte->ppn * B_PAGE_SIZE;
223 }
224 
225 
226 RISCV64VMTranslationMap::RISCV64VMTranslationMap(bool kernel,
227 	phys_addr_t pageTable):
228 	fIsKernel(kernel),
229 	fPageTable(pageTable),
230 	fPageTableSize(GetPageTableSize(pageTable / B_PAGE_SIZE, kernel)),
231 	fInvalidPagesCount(0),
232 	fInvalidCode(false)
233 {
234 	TRACE("+RISCV64VMTranslationMap(%p, %d, 0x%" B_PRIxADDR ")\n", this,
235 		kernel, pageTable);
236 	TRACE("  pageTableSize: %" B_PRIu64 "\n", fPageTableSize);
237 }
238 
239 
240 RISCV64VMTranslationMap::~RISCV64VMTranslationMap()
241 {
242 	TRACE("-RISCV64VMTranslationMap(%p)\n", this);
243 	TRACE("  pageTableSize: %" B_PRIu64 "\n", fPageTableSize);
244 	TRACE("  GetPageTableSize(): %" B_PRIu64 "\n",
245 		GetPageTableSize(fPageTable / B_PAGE_SIZE, fIsKernel));
246 
247 	ASSERT_ALWAYS(!fIsKernel);
248 	// Can't delete currently used page table
249 	ASSERT_ALWAYS(::Satp() != Satp());
250 
251 	FreePageTable(fPageTable / B_PAGE_SIZE, fIsKernel);
252 }
253 
254 
255 bool
256 RISCV64VMTranslationMap::Lock()
257 {
258 	TRACE("RISCV64VMTranslationMap::Lock()\n");
259 	recursive_lock_lock(&fLock);
260 	return true;
261 }
262 
263 
264 void
265 RISCV64VMTranslationMap::Unlock()
266 {
267 	TRACE("RISCV64VMTranslationMap::Unlock()\n");
268 	if (recursive_lock_get_recursion(&fLock) == 1) {
269 		// we're about to release it for the last time
270 		Flush();
271 	}
272 	recursive_lock_unlock(&fLock);
273 }
274 
275 
276 addr_t
277 RISCV64VMTranslationMap::MappedSize() const
278 {
279 	NOT_IMPLEMENTED_PANIC();
280 	return 0;
281 }
282 
283 
284 size_t
285 RISCV64VMTranslationMap::MaxPagesNeededToMap(addr_t start, addr_t end) const
286 {
287 	enum {
288 		level0Range = (uint64_t)B_PAGE_SIZE * pteCount,
289 		level1Range = (uint64_t)level0Range * pteCount,
290 		level2Range = (uint64_t)level1Range * pteCount,
291 	};
292 
293 	if (start == 0) {
294 		start = (level2Range) - B_PAGE_SIZE;
295 		end += start;
296 	}
297 
298 	size_t requiredLevel2 = end / level2Range + 1 - start / level2Range;
299 	size_t requiredLevel1 = end / level1Range + 1 - start / level1Range;
300 	size_t requiredLevel0 = end / level0Range + 1 - start / level0Range;
301 
302 	return requiredLevel2 + requiredLevel1 + requiredLevel0;
303 }
304 
305 
306 status_t
307 RISCV64VMTranslationMap::Map(addr_t virtualAddress, phys_addr_t physicalAddress,
308 	uint32 attributes, uint32 memoryType,
309 	vm_page_reservation* reservation)
310 {
311 	TRACE("RISCV64VMTranslationMap::Map(0x%" B_PRIxADDR ", 0x%" B_PRIxADDR
312 		")\n", virtualAddress, physicalAddress);
313 
314 	ThreadCPUPinner pinner(thread_get_current_thread());
315 
316 	Pte* pte = LookupPte(virtualAddress, true, reservation);
317 	if (pte == NULL)
318 		panic("can't allocate page table");
319 
320 	Pte newPte;
321 	newPte.ppn = physicalAddress / B_PAGE_SIZE;
322 	newPte.flags = (1 << pteValid);
323 
324 	if ((attributes & B_USER_PROTECTION) != 0) {
325 		newPte.flags |= (1 << pteUser);
326 		if ((attributes & B_READ_AREA) != 0)
327 			newPte.flags |= (1 << pteRead);
328 		if ((attributes & B_WRITE_AREA) != 0)
329 			newPte.flags |= (1 << pteWrite);
330 		if ((attributes & B_EXECUTE_AREA) != 0) {
331 			newPte.flags |= (1 << pteExec);
332 			fInvalidCode = true;
333 		}
334 	} else {
335 		if ((attributes & B_KERNEL_READ_AREA) != 0)
336 			newPte.flags |= (1 << pteRead);
337 		if ((attributes & B_KERNEL_WRITE_AREA) != 0)
338 			newPte.flags |= (1 << pteWrite);
339 		if ((attributes & B_KERNEL_EXECUTE_AREA) != 0) {
340 			newPte.flags |= (1 << pteExec);
341 			fInvalidCode = true;
342 		}
343 	}
344 
345 	*pte = newPte;
346 
347 	// Note: We don't need to invalidate the TLB for this address, as previously
348 	// the entry was not present and the TLB doesn't cache those entries.
349 
350 	fMapCount++;
351 
352 	return B_OK;
353 }
354 
355 
356 status_t
357 RISCV64VMTranslationMap::Unmap(addr_t start, addr_t end)
358 {
359 	TRACE("RISCV64VMTranslationMap::Unmap(0x%" B_PRIxADDR ", 0x%" B_PRIxADDR
360 		")\n", start, end);
361 
362 	ThreadCPUPinner pinner(thread_get_current_thread());
363 
364 	for (addr_t page = start; page < end; page += B_PAGE_SIZE) {
365 		Pte* pte = LookupPte(page, false, NULL);
366 		if (pte != NULL) {
367 			fMapCount--;
368 			Pte oldPte{.val = (uint64)atomic_get_and_set64((int64*)&pte->val, 0)};
369 			if ((oldPte.flags & (1 << pteAccessed)) != 0)
370 				InvalidatePage(page);
371 		}
372 	}
373 	return B_OK;
374 }
375 
376 
377 status_t
378 RISCV64VMTranslationMap::DebugMarkRangePresent(addr_t start, addr_t end,
379 	bool markPresent)
380 {
381 	NOT_IMPLEMENTED_PANIC();
382 	return B_NOT_SUPPORTED;
383 }
384 
385 
386 /*
387 Things need to be done when unmapping VMArea pages
388 	update vm_page::accessed, modified
389 	MMIO pages:
390 		just unmap
391 	wired pages:
392 		decrement wired count
393 	non-wired pages:
394 		remove from VMArea and vm_page `mappings` list
395 	wired and non-wird pages
396 		vm_page_set_state
397 */
398 
399 status_t
400 RISCV64VMTranslationMap::UnmapPage(VMArea* area, addr_t address,
401 	bool updatePageQueue)
402 {
403 	TRACE("RISCV64VMTranslationMap::UnmapPage(0x%" B_PRIxADDR "(%s), 0x%"
404 		B_PRIxADDR ", %d)\n", (addr_t)area, area->name, address,
405 		updatePageQueue);
406 
407 	ThreadCPUPinner pinner(thread_get_current_thread());
408 
409 	Pte* pte = LookupPte(address, false, NULL);
410 	if (pte == NULL || ((1 << pteValid) & pte->flags) == 0)
411 		return B_ENTRY_NOT_FOUND;
412 
413 	RecursiveLocker locker(fLock);
414 
415 	Pte oldPte{.val = (uint64)atomic_get_and_set64((int64*)&pte->val, 0)};
416 	fMapCount--;
417 	pinner.Unlock();
418 
419 	if ((oldPte.flags & (1 << pteAccessed)) != 0)
420 		InvalidatePage(address);
421 
422 	Flush();
423 
424 	locker.Detach(); // PageUnmapped takes ownership
425 	PageUnmapped(area, oldPte.ppn, ((1 << pteAccessed) & oldPte.flags) != 0,
426 		((1 << pteDirty) & oldPte.flags) != 0, updatePageQueue);
427 	return B_OK;
428 }
429 
430 
431 void
432 RISCV64VMTranslationMap::UnmapPages(VMArea* area, addr_t base, size_t size,
433 	bool updatePageQueue)
434 {
435 	TRACE("RISCV64VMTranslationMap::UnmapPages(0x%" B_PRIxADDR "(%s), 0x%"
436 		B_PRIxADDR ", 0x%" B_PRIxSIZE ", %d)\n", (addr_t)area,
437 		area->name, base, size, updatePageQueue);
438 
439 	if (size == 0)
440 		return;
441 
442 	addr_t end = base + size - 1;
443 
444 	VMAreaMappings queue;
445 	RecursiveLocker locker(fLock);
446 	ThreadCPUPinner pinner(thread_get_current_thread());
447 
448 	for (addr_t start = base; start < end; start += B_PAGE_SIZE) {
449 		Pte* pte = LookupPte(start, false, NULL);
450 		if (pte == NULL)
451 			continue;
452 
453 		Pte oldPte{.val = (uint64)atomic_get_and_set64((int64*)&pte->val, 0)};
454 		if ((oldPte.flags & (1 << pteValid)) == 0)
455 			continue;
456 
457 		fMapCount--;
458 
459 		if ((oldPte.flags & (1 << pteAccessed)) != 0)
460 			InvalidatePage(start);
461 
462 		if (area->cache_type != CACHE_TYPE_DEVICE) {
463 			// get the page
464 			vm_page* page = vm_lookup_page(oldPte.ppn);
465 			ASSERT(page != NULL);
466 			if (false) {
467 				WriteVmPage(page); dprintf("\n");
468 			}
469 
470 			DEBUG_PAGE_ACCESS_START(page);
471 
472 			// transfer the accessed/dirty flags to the page
473 			if ((oldPte.flags & (1 << pteAccessed)) != 0)
474 				page->accessed = true;
475 			if ((oldPte.flags & (1 << pteDirty)) != 0)
476 				page->modified = true;
477 
478 			// remove the mapping object/decrement the wired_count of the
479 			// page
480 			if (area->wiring == B_NO_LOCK) {
481 				vm_page_mapping* mapping = NULL;
482 				vm_page_mappings::Iterator iterator
483 					= page->mappings.GetIterator();
484 				while ((mapping = iterator.Next()) != NULL) {
485 					if (mapping->area == area)
486 						break;
487 				}
488 
489 				ASSERT(mapping != NULL);
490 
491 				area->mappings.Remove(mapping);
492 				page->mappings.Remove(mapping);
493 				queue.Add(mapping);
494 			} else
495 				page->DecrementWiredCount();
496 
497 			if (!page->IsMapped()) {
498 				atomic_add(&gMappedPagesCount, -1);
499 
500 				if (updatePageQueue) {
501 					if (page->Cache()->temporary)
502 						vm_page_set_state(page, PAGE_STATE_INACTIVE);
503 					else if (page->modified)
504 						vm_page_set_state(page, PAGE_STATE_MODIFIED);
505 					else
506 						vm_page_set_state(page, PAGE_STATE_CACHED);
507 				}
508 			}
509 
510 			DEBUG_PAGE_ACCESS_END(page);
511 		}
512 
513 		// flush explicitly, since we directly use the lock
514 		Flush();
515 	}
516 
517 	// TODO: As in UnmapPage() we can lose page dirty flags here. ATM it's not
518 	// really critical here, as in all cases this method is used, the unmapped
519 	// area range is unmapped for good (resized/cut) and the pages will likely
520 	// be freed.
521 
522 	locker.Unlock();
523 
524 	// free removed mappings
525 	bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
526 	uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
527 		| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
528 
529 	while (vm_page_mapping* mapping = queue.RemoveHead())
530 		object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
531 }
532 
533 
534 void
535 RISCV64VMTranslationMap::UnmapArea(VMArea* area, bool deletingAddressSpace,
536 	bool ignoreTopCachePageFlags)
537 {
538 	TRACE("RISCV64VMTranslationMap::UnmapArea(0x%" B_PRIxADDR "(%s), 0x%"
539 		B_PRIxADDR ", 0x%" B_PRIxSIZE ", %d, %d)\n", (addr_t)area,
540 		area->name, area->Base(), area->Size(), deletingAddressSpace,
541 		ignoreTopCachePageFlags);
542 
543 	if (area->cache_type == CACHE_TYPE_DEVICE || area->wiring != B_NO_LOCK) {
544 		UnmapPages(area, area->Base(), area->Size(), true);
545 		return;
546 	}
547 
548 	bool unmapPages = !deletingAddressSpace || !ignoreTopCachePageFlags;
549 
550 	RecursiveLocker locker(fLock);
551 	ThreadCPUPinner pinner(thread_get_current_thread());
552 
553 	VMAreaMappings mappings;
554 	mappings.MoveFrom(&area->mappings);
555 
556 	for (VMAreaMappings::Iterator it = mappings.GetIterator();
557 			vm_page_mapping* mapping = it.Next();) {
558 
559 		vm_page* page = mapping->page;
560 		page->mappings.Remove(mapping);
561 
562 		VMCache* cache = page->Cache();
563 
564 		bool pageFullyUnmapped = false;
565 		if (!page->IsMapped()) {
566 			atomic_add(&gMappedPagesCount, -1);
567 			pageFullyUnmapped = true;
568 		}
569 
570 		if (unmapPages || cache != area->cache) {
571 			addr_t address = area->Base()
572 				+ ((page->cache_offset * B_PAGE_SIZE)
573 				- area->cache_offset);
574 
575 			Pte* pte = LookupPte(address, false, NULL);
576 			if (pte == NULL
577 				|| ((1 << pteValid) & pte->flags) == 0) {
578 				panic("page %p has mapping for area %p "
579 					"(%#" B_PRIxADDR "), but has no "
580 					"page table", page, area, address);
581 				continue;
582 			}
583 
584 			Pte oldPte{.val = (uint64)atomic_get_and_set64((int64*)&pte->val, 0)};
585 
586 			// transfer the accessed/dirty flags to the page and
587 			// invalidate the mapping, if necessary
588 			if (((1 << pteAccessed) & oldPte.flags) != 0) {
589 				page->accessed = true;
590 
591 				if (!deletingAddressSpace)
592 					InvalidatePage(address);
593 			}
594 
595 			if (((1 << pteDirty) & oldPte.flags) != 0)
596 				page->modified = true;
597 
598 			if (pageFullyUnmapped) {
599 				DEBUG_PAGE_ACCESS_START(page);
600 
601 				if (cache->temporary) {
602 					vm_page_set_state(page,
603 						PAGE_STATE_INACTIVE);
604 				} else if (page->modified) {
605 					vm_page_set_state(page,
606 						PAGE_STATE_MODIFIED);
607 				} else {
608 					vm_page_set_state(page,
609 						PAGE_STATE_CACHED);
610 				}
611 
612 				DEBUG_PAGE_ACCESS_END(page);
613 			}
614 		}
615 
616 		fMapCount--;
617 	}
618 
619 	Flush();
620 		// flush explicitely, since we directly use the lock
621 
622 	locker.Unlock();
623 
624 	bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
625 	uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
626 		| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
627 
628 	while (vm_page_mapping* mapping = mappings.RemoveHead())
629 		object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
630 }
631 
632 
633 status_t
634 RISCV64VMTranslationMap::Query(addr_t virtualAddress,
635 	phys_addr_t* _physicalAddress, uint32* _flags)
636 {
637 	*_flags = 0;
638 	*_physicalAddress = 0;
639 
640 	ThreadCPUPinner pinner(thread_get_current_thread());
641 
642 	if (fPageTable == 0)
643 		return B_OK;
644 
645 	Pte* pte = LookupPte(virtualAddress, false, NULL);
646 	if (pte == 0)
647 		return B_OK;
648 
649 	Pte pteVal = *pte;
650 	*_physicalAddress = pteVal.ppn * B_PAGE_SIZE;
651 
652 	if (((1 << pteValid)    & pteVal.flags) != 0)
653 		*_flags |= PAGE_PRESENT;
654 	if (((1 << pteDirty)    & pteVal.flags) != 0)
655 		*_flags |= PAGE_MODIFIED;
656 	if (((1 << pteAccessed) & pteVal.flags) != 0)
657 		*_flags |= PAGE_ACCESSED;
658 	if (((1 << pteUser) & pteVal.flags) != 0) {
659 		if (((1 << pteRead)  & pteVal.flags) != 0)
660 			*_flags |= B_READ_AREA;
661 		if (((1 << pteWrite) & pteVal.flags) != 0)
662 			*_flags |= B_WRITE_AREA;
663 		if (((1 << pteExec)  & pteVal.flags) != 0)
664 			*_flags |= B_EXECUTE_AREA;
665 	} else {
666 		if (((1 << pteRead)  & pteVal.flags) != 0)
667 			*_flags |= B_KERNEL_READ_AREA;
668 		if (((1 << pteWrite) & pteVal.flags) != 0)
669 			*_flags |= B_KERNEL_WRITE_AREA;
670 		if (((1 << pteExec)  & pteVal.flags) != 0)
671 			*_flags |= B_KERNEL_EXECUTE_AREA;
672 	}
673 
674 	return B_OK;
675 }
676 
677 
678 status_t
679 RISCV64VMTranslationMap::QueryInterrupt(addr_t virtualAddress,
680 	phys_addr_t* _physicalAddress, uint32* _flags)
681 {
682 	return Query(virtualAddress, _physicalAddress, _flags);
683 }
684 
685 
686 status_t RISCV64VMTranslationMap::Protect(addr_t base, addr_t top,
687 	uint32 attributes, uint32 memoryType)
688 {
689 	TRACE("RISCV64VMTranslationMap::Protect(0x%" B_PRIxADDR ", 0x%"
690 		B_PRIxADDR ")\n", base, top);
691 
692 	ThreadCPUPinner pinner(thread_get_current_thread());
693 
694 	for (addr_t page = base; page < top; page += B_PAGE_SIZE) {
695 
696 		Pte* pte = LookupPte(page, false, NULL);
697 		if (pte == NULL || ((1 << pteValid) & pte->flags) == 0) {
698 			TRACE("attempt to protect not mapped page: 0x%"
699 				B_PRIxADDR "\n", page);
700 			continue;
701 		}
702 
703 		Pte oldPte = *pte;
704 		Pte newPte = oldPte;
705 		newPte.flags &= (1 << pteValid)
706 			| (1 << pteAccessed) | (1 << pteDirty);
707 
708 		if ((attributes & B_USER_PROTECTION) != 0) {
709 			newPte.flags |= (1 << pteUser);
710 			if ((attributes & B_READ_AREA)    != 0)
711 				newPte.flags |= (1 << pteRead);
712 			if ((attributes & B_WRITE_AREA)   != 0)
713 				newPte.flags |= (1 << pteWrite);
714 			if ((attributes & B_EXECUTE_AREA) != 0) {
715 				newPte.flags |= (1 << pteExec);
716 				fInvalidCode = true;
717 			}
718 		} else {
719 			if ((attributes & B_KERNEL_READ_AREA)    != 0)
720 				newPte.flags |= (1 << pteRead);
721 			if ((attributes & B_KERNEL_WRITE_AREA)   != 0)
722 				newPte.flags |= (1 << pteWrite);
723 			if ((attributes & B_KERNEL_EXECUTE_AREA) != 0) {
724 				newPte.flags |= (1 << pteExec);
725 				fInvalidCode = true;
726 			}
727 		}
728 		*pte = newPte;
729 
730 		if ((oldPte.flags & (1 << pteAccessed)) != 0)
731 			InvalidatePage(page);
732 	}
733 
734 	return B_OK;
735 }
736 
737 
738 status_t
739 RISCV64VMTranslationMap::ProtectPage(VMArea* area, addr_t address,
740 	uint32 attributes)
741 {
742 	NOT_IMPLEMENTED_PANIC();
743 	return B_OK;
744 }
745 
746 
747 status_t
748 RISCV64VMTranslationMap::ProtectArea(VMArea* area, uint32 attributes)
749 {
750 	NOT_IMPLEMENTED_PANIC();
751 	return B_NOT_SUPPORTED;
752 }
753 
754 
755 static inline uint32
756 ConvertAccessedFlags(uint32 flags)
757 {
758 	return ((flags & PAGE_MODIFIED) ? (1 << pteDirty   ) : 0)
759 		| ((flags & PAGE_ACCESSED) ? (1 << pteAccessed) : 0);
760 }
761 
762 
763 status_t
764 RISCV64VMTranslationMap::SetFlags(addr_t address, uint32 flags)
765 {
766 	ThreadCPUPinner pinner(thread_get_current_thread());
767 	Pte* pte = LookupPte(address, false, NULL);
768 	if (pte == NULL || ((1 << pteValid) & pte->flags) == 0)
769 		return B_OK;
770 	pte->flags |= ConvertAccessedFlags(flags);
771 	FlushTlbPage(address);
772 	return B_OK;
773 }
774 
775 
776 status_t
777 RISCV64VMTranslationMap::ClearFlags(addr_t address, uint32 flags)
778 {
779 	ThreadCPUPinner pinner(thread_get_current_thread());
780 
781 	Pte* pte = LookupPte(address, false, NULL);
782 	if (pte == NULL || ((1 << pteValid) & pte->flags) == 0)
783 		return B_OK;
784 
785 	pte->flags &= ~ConvertAccessedFlags(flags);
786 	InvalidatePage(address);
787 	return B_OK;
788 }
789 
790 
791 bool
792 RISCV64VMTranslationMap::ClearAccessedAndModified(VMArea* area, addr_t address,
793 	bool unmapIfUnaccessed, bool& _modified)
794 {
795 	TRACE("RISCV64VMPhysicalPageMapper::ClearAccessedAndModified(0x%"
796 		B_PRIxADDR "(%s), 0x%" B_PRIxADDR ", %d)\n", (addr_t)area,
797 		area->name, address, unmapIfUnaccessed);
798 
799 	RecursiveLocker locker(fLock);
800 	ThreadCPUPinner pinner(thread_get_current_thread());
801 
802 	Pte* pte = LookupPte(address, false, NULL);
803 	if (pte == NULL || ((1 << pteValid) & pte->flags) == 0)
804 		return false;
805 
806 	Pte oldPte;
807 	if (unmapIfUnaccessed) {
808 		for (;;) {
809 			oldPte = *pte;
810 			if (((1 << pteValid) & oldPte.flags) == 0)
811 				return false;
812 
813 			if (((1 << pteAccessed) & oldPte.flags) != 0) {
814 				oldPte.val = atomic_and64((int64*)&pte->val,
815 					~((1 << pteAccessed) | (1 << pteDirty)));
816 				break;
817 			}
818 			if (atomic_test_and_set64((int64*)&pte->val, 0, oldPte.val)
819 					== (int64)oldPte.val) {
820 				break;
821 			}
822 		}
823 	} else {
824 		oldPte.val = atomic_and64((int64*)&pte->val,
825 			~((1 << pteAccessed) | (1 << pteDirty)));
826 	}
827 
828 	pinner.Unlock();
829 	_modified = ((1 << pteDirty) & oldPte.flags) != 0;
830 	if (((1 << pteAccessed) & oldPte.flags) != 0) {
831 		InvalidatePage(address);
832 		Flush();
833 		return true;
834 	}
835 
836 	if (!unmapIfUnaccessed)
837 		return false;
838 
839 	fMapCount--;
840 
841 	locker.Detach(); // UnaccessedPageUnmapped takes ownership
842 	UnaccessedPageUnmapped(area, oldPte.ppn);
843 	return false;
844 }
845 
846 
847 void
848 RISCV64VMTranslationMap::Flush()
849 {
850 	// copy of X86VMTranslationMap::Flush
851 	// TODO: move to common VMTranslationMap class
852 
853 	if (fInvalidPagesCount <= 0)
854 		return;
855 /*
856 	dprintf("+Flush(%p)\n", this);
857 	struct ScopeExit {
858 		~ScopeExit()
859 		{
860 			dprintf("-Flush(%p)\n", this);
861 		}
862 	} scopeExit;
863 */
864 	ThreadCPUPinner pinner(thread_get_current_thread());
865 
866 	if (fInvalidPagesCount > PAGE_INVALIDATE_CACHE_SIZE) {
867 		// invalidate all pages
868 		TRACE("flush_tmap: %d pages to invalidate, invalidate all\n",
869 			fInvalidPagesCount);
870 
871 		if (fIsKernel) {
872 			arch_cpu_global_TLB_invalidate();
873 
874 			// dprintf("+smp_send_broadcast_ici\n");
875 			smp_send_broadcast_ici(SMP_MSG_GLOBAL_INVALIDATE_PAGES, 0, 0, 0,
876 				NULL, SMP_MSG_FLAG_SYNC);
877 			// dprintf("-smp_send_broadcast_ici\n");
878 
879 		} else {
880 			cpu_status state = disable_interrupts();
881 			arch_cpu_user_TLB_invalidate();
882 			restore_interrupts(state);
883 
884 			int cpu = smp_get_current_cpu();
885 			CPUSet cpuMask = fActiveOnCpus;
886 			cpuMask.ClearBit(cpu);
887 
888 			if (!cpuMask.IsEmpty()) {
889 				// dprintf("+smp_send_multicast_ici\n");
890 				smp_send_multicast_ici(cpuMask, SMP_MSG_USER_INVALIDATE_PAGES,
891 					0, 0, 0, NULL, SMP_MSG_FLAG_SYNC);
892 				// dprintf("-smp_send_multicast_ici\n");
893 			}
894 		}
895 	} else {
896 		TRACE("flush_tmap: %d pages to invalidate, invalidate list\n",
897 			fInvalidPagesCount);
898 
899 		arch_cpu_invalidate_TLB_list(fInvalidPages, fInvalidPagesCount);
900 
901 		if (fIsKernel) {
902 			// dprintf("+smp_send_broadcast_ici\n");
903 			smp_send_broadcast_ici(SMP_MSG_INVALIDATE_PAGE_LIST,
904 				(addr_t)fInvalidPages, fInvalidPagesCount, 0, NULL,
905 				SMP_MSG_FLAG_SYNC);
906 			// dprintf("-smp_send_broadcast_ici\n");
907 		} else {
908 			int cpu = smp_get_current_cpu();
909 			CPUSet cpuMask = fActiveOnCpus;
910 			cpuMask.ClearBit(cpu);
911 
912 			if (!cpuMask.IsEmpty()) {
913 				// dprintf("+smp_send_multicast_ici\n");
914 				smp_send_multicast_ici(cpuMask, SMP_MSG_INVALIDATE_PAGE_LIST,
915 					(addr_t)fInvalidPages, fInvalidPagesCount, 0, NULL,
916 					SMP_MSG_FLAG_SYNC);
917 				// dprintf("-smp_send_multicast_ici\n");
918 			}
919 		}
920 	}
921 	fInvalidPagesCount = 0;
922 
923 	if (fInvalidCode) {
924 		FenceI();
925 
926 		int cpu = smp_get_current_cpu();
927 		CPUSet cpuMask = fActiveOnCpus;
928 		cpuMask.ClearBit(cpu);
929 
930 		if (!cpuMask.IsEmpty()) {
931 			switch (gPlatform) {
932 				case kPlatformSbi: {
933 					uint64 hartMask = 0;
934 					int32 cpuCount = smp_get_num_cpus();
935 					for (int32 i = 0; i < cpuCount; i++) {
936 						if (cpuMask.GetBit(i))
937 							hartMask |= (uint64)1 << gCPU[i].arch.hartId;
938 					}
939 					// TODO: handle hart ID >= 64
940 					memory_full_barrier();
941 					sbi_remote_fence_i(hartMask, 0);
942 					break;
943 				}
944 			}
945 		}
946 		fInvalidCode = false;
947 	}
948 }
949 
950 
951 void
952 RISCV64VMTranslationMap::DebugPrintMappingInfo(addr_t virtualAddress)
953 {
954 	NOT_IMPLEMENTED_PANIC();
955 }
956 
957 
958 bool
959 RISCV64VMTranslationMap::DebugGetReverseMappingInfo(phys_addr_t physicalAddress,
960 	ReverseMappingInfoCallback& callback)
961 {
962 	NOT_IMPLEMENTED_PANIC();
963 	return false;
964 }
965 
966 
967 status_t
968 RISCV64VMTranslationMap::MemcpyToMap(addr_t to, const char *from, size_t size)
969 {
970 	TRACE("RISCV64VMPhysicalPageMapper::MemcpyToMap(0x%" B_PRIxADDR ", 0x%"
971 		B_PRIxADDR ", %" B_PRIuSIZE ")\n", to, (addr_t)from, size);
972 
973 	while (size > 0) {
974 		uint64 va0 = ROUNDDOWN(to, B_PAGE_SIZE);
975 		uint64 pa0 = LookupAddr(va0);
976 		TRACE("LookupAddr(0x%" B_PRIxADDR "): 0x%" B_PRIxADDR "\n",
977 			va0, pa0);
978 
979 		if (pa0 == 0) {
980 			TRACE("[!] not mapped: 0x%" B_PRIxADDR "\n", va0);
981 			return B_BAD_ADDRESS;
982 		}
983 
984 		uint64 n = B_PAGE_SIZE - (to - va0);
985 		if (n > size)
986 			n = size;
987 
988 		memcpy(VirtFromPhys(pa0 + (to - va0)), from, n);
989 
990 		size -= n;
991 		from += n;
992 		to = va0 + B_PAGE_SIZE;
993 	}
994 	return B_OK;
995 }
996 
997 
998 status_t
999 RISCV64VMTranslationMap::MemcpyFromMap(char *to, addr_t from, size_t size)
1000 {
1001 	TRACE("RISCV64VMPhysicalPageMapper::MemcpyFromMap(0x%" B_PRIxADDR
1002 		", 0x%" B_PRIxADDR ", %" B_PRIuSIZE ")\n",
1003 		(addr_t)to, from, size);
1004 
1005 	while (size > 0) {
1006 		uint64 va0 = ROUNDDOWN(from, B_PAGE_SIZE);
1007 		uint64 pa0 = LookupAddr(va0);
1008 		if (pa0 == 0) {
1009 			TRACE("[!] not mapped: 0x%" B_PRIxADDR
1010 				", calling page fault handler\n", va0);
1011 
1012 			addr_t newIP;
1013 			vm_page_fault(va0, Ra(), true, false, true, &newIP);
1014 
1015 			pa0 = LookupAddr(va0);
1016 			TRACE("LookupAddr(0x%" B_PRIxADDR "): 0x%"
1017 				B_PRIxADDR "\n", va0, pa0);
1018 
1019 			if (pa0 == 0)
1020 				return B_BAD_ADDRESS;
1021 		}
1022 		uint64 n = B_PAGE_SIZE - (from - va0);
1023 		if(n > size)
1024 			n = size;
1025 
1026 		memcpy(to, VirtFromPhys(pa0 + (from - va0)), n);
1027 
1028 		size -= n;
1029 		to += n;
1030 		from = va0 + B_PAGE_SIZE;
1031 	}
1032 
1033 	return B_OK;
1034 }
1035 
1036 
1037 status_t
1038 RISCV64VMTranslationMap::MemsetToMap(addr_t to, char c, size_t count)
1039 {
1040 	TRACE("RISCV64VMPhysicalPageMapper::MemsetToMap(0x%" B_PRIxADDR
1041 		", %d, %" B_PRIuSIZE ")\n", to, c, count);
1042 
1043 	while (count > 0) {
1044 		uint64 va0 = ROUNDDOWN(to, B_PAGE_SIZE);
1045 		uint64 pa0 = LookupAddr(va0);
1046 		TRACE("LookupAddr(0x%" B_PRIxADDR "): 0x%" B_PRIxADDR "\n",
1047 			va0, pa0);
1048 
1049 		if (pa0 == 0) {
1050 			TRACE("[!] not mapped: 0x%" B_PRIxADDR
1051 				", calling page fault handler\n", va0);
1052 			addr_t newIP;
1053 			vm_page_fault(va0, Ra(), true, false, true, &newIP);
1054 			pa0 = LookupAddr(va0);
1055 			TRACE("LookupAddr(0x%" B_PRIxADDR "): 0x%"
1056 				B_PRIxADDR "\n", va0, pa0);
1057 
1058 			if (pa0 == 0)
1059 				return B_BAD_ADDRESS;
1060 		}
1061 
1062 		uint64 n = B_PAGE_SIZE - (to - va0);
1063 		if (n > count)
1064 			n = count;
1065 
1066 		memset(VirtFromPhys(pa0 + (to - va0)), c, n);
1067 
1068 		count -= n;
1069 		to = va0 + B_PAGE_SIZE;
1070 	}
1071 	return B_OK;
1072 }
1073 
1074 
1075 ssize_t
1076 RISCV64VMTranslationMap::StrlcpyFromMap(char *to, addr_t from, size_t size)
1077 {
1078 	// NOT_IMPLEMENTED_PANIC();
1079 	return strlcpy(to, (const char*)from, size);
1080 	// return 0;
1081 }
1082 
1083 
1084 ssize_t
1085 RISCV64VMTranslationMap::StrlcpyToMap(addr_t to, const char *from, size_t size)
1086 {
1087 	ssize_t len = strlen(from) + 1;
1088 	if ((size_t)len > size)
1089 		len = size;
1090 
1091 	if (MemcpyToMap(to, from, len) < B_OK)
1092 		return 0;
1093 
1094 	return len;
1095 }
1096 
1097 
1098 //#pragma mark RISCV64VMPhysicalPageMapper
1099 
1100 
1101 RISCV64VMPhysicalPageMapper::RISCV64VMPhysicalPageMapper()
1102 {
1103 	TRACE("+RISCV64VMPhysicalPageMapper\n");
1104 }
1105 
1106 
1107 RISCV64VMPhysicalPageMapper::~RISCV64VMPhysicalPageMapper()
1108 {
1109 	TRACE("-RISCV64VMPhysicalPageMapper\n");
1110 }
1111 
1112 
1113 status_t
1114 RISCV64VMPhysicalPageMapper::GetPage(phys_addr_t physicalAddress,
1115 	addr_t* _virtualAddress, void** _handle)
1116 {
1117 	*_virtualAddress = (addr_t)VirtFromPhys(physicalAddress);
1118 	*_handle = (void*)1;
1119 	return B_OK;
1120 }
1121 
1122 
1123 status_t
1124 RISCV64VMPhysicalPageMapper::PutPage(addr_t virtualAddress, void* handle)
1125 {
1126 	return B_OK;
1127 }
1128 
1129 
1130 status_t
1131 RISCV64VMPhysicalPageMapper::GetPageCurrentCPU( phys_addr_t physicalAddress,
1132 	addr_t* _virtualAddress, void** _handle)
1133 {
1134 	return GetPage(physicalAddress, _virtualAddress, _handle);
1135 }
1136 
1137 
1138 status_t
1139 RISCV64VMPhysicalPageMapper::PutPageCurrentCPU(addr_t virtualAddress,
1140 	void* _handle)
1141 {
1142 	return PutPage(virtualAddress, _handle);
1143 }
1144 
1145 
1146 status_t
1147 RISCV64VMPhysicalPageMapper::GetPageDebug(phys_addr_t physicalAddress,
1148 	addr_t* _virtualAddress, void** _handle)
1149 {
1150 	NOT_IMPLEMENTED_PANIC();
1151 	return B_NOT_SUPPORTED;
1152 }
1153 
1154 
1155 status_t
1156 RISCV64VMPhysicalPageMapper::PutPageDebug(addr_t virtualAddress, void* handle)
1157 {
1158 	NOT_IMPLEMENTED_PANIC();
1159 	return B_NOT_SUPPORTED;
1160 }
1161 
1162 
1163 status_t
1164 RISCV64VMPhysicalPageMapper::MemsetPhysical(phys_addr_t address, int value,
1165 	phys_size_t length)
1166 {
1167 	TRACE("RISCV64VMPhysicalPageMapper::MemsetPhysical(0x%" B_PRIxADDR
1168 		", 0x%x, 0x%" B_PRIxADDR ")\n", address, value, length);
1169 	return user_memset(VirtFromPhys(address), value, length);
1170 }
1171 
1172 
1173 status_t
1174 RISCV64VMPhysicalPageMapper::MemcpyFromPhysical(void* to, phys_addr_t from,
1175 	size_t length, bool user)
1176 {
1177 	TRACE("RISCV64VMPhysicalPageMapper::MemcpyFromPhysical(0x%" B_PRIxADDR
1178 		", 0x%" B_PRIxADDR ", %" B_PRIuSIZE ")\n", (addr_t)to,
1179 		from, length);
1180 	return user_memcpy(to, VirtFromPhys(from), length);
1181 }
1182 
1183 
1184 status_t
1185 RISCV64VMPhysicalPageMapper::MemcpyToPhysical(phys_addr_t to, const void* from,
1186 	size_t length, bool user)
1187 {
1188 	TRACE("RISCV64VMPhysicalPageMapper::MemcpyToPhysical(0x%" B_PRIxADDR
1189 		", 0x%" B_PRIxADDR ", %" B_PRIuSIZE ")\n", to, (addr_t)from,
1190 		length);
1191 	return user_memcpy(VirtFromPhys(to), from, length);
1192 }
1193 
1194 
1195 void
1196 RISCV64VMPhysicalPageMapper::MemcpyPhysicalPage(phys_addr_t to,
1197 	phys_addr_t from)
1198 {
1199 	TRACE("RISCV64VMPhysicalPageMapper::MemcpyPhysicalPage(0x%" B_PRIxADDR
1200 		", 0x%" B_PRIxADDR ")\n", to, from);
1201 	user_memcpy(VirtFromPhys(to), VirtFromPhys(from), B_PAGE_SIZE);
1202 }
1203