xref: /haiku/src/system/kernel/arch/riscv64/RISCV64VMTranslationMap.cpp (revision 4c8e85b316c35a9161f5a1c50ad70bc91c83a76f)
1 /*
2  * Copyright 2020-2021, Haiku, Inc. All rights reserved.
3  * Distributed under the terms of the MIT License.
4  *
5  * Authors:
6  *   X512 <danger_mail@list.ru>
7  */
8 
9 
10 #include "RISCV64VMTranslationMap.h"
11 
12 #include <kernel.h>
13 #include <vm/vm_priv.h>
14 #include <vm/vm_page.h>
15 #include <vm/VMAddressSpace.h>
16 #include <vm/VMCache.h>
17 #include <slab/Slab.h>
18 #include <platform/sbi/sbi_syscalls.h>
19 
20 #include <util/AutoLock.h>
21 #include <util/ThreadAutoLock.h>
22 
23 
24 //#define DO_TRACE
25 #ifdef DO_TRACE
26 #	define TRACE(x...) dprintf(x)
27 #else
28 #	define TRACE(x...) ;
29 #endif
30 
31 #define NOT_IMPLEMENTED_PANIC() \
32 	panic("not implemented: %s\n", __PRETTY_FUNCTION__)
33 
34 extern uint32 gPlatform;
35 
36 
37 static void
38 WriteVmPage(vm_page* page)
39 {
40 	dprintf("0x%08" B_PRIxADDR " ",
41 		(addr_t)(page->physical_page_number * B_PAGE_SIZE));
42 	switch (page->State()) {
43 		case PAGE_STATE_ACTIVE:
44 			dprintf("A");
45 			break;
46 		case PAGE_STATE_INACTIVE:
47 			dprintf("I");
48 			break;
49 		case PAGE_STATE_MODIFIED:
50 			dprintf("M");
51 			break;
52 		case PAGE_STATE_CACHED:
53 			dprintf("C");
54 			break;
55 		case PAGE_STATE_FREE:
56 			dprintf("F");
57 			break;
58 		case PAGE_STATE_CLEAR:
59 			dprintf("L");
60 			break;
61 		case PAGE_STATE_WIRED:
62 			dprintf("W");
63 			break;
64 		case PAGE_STATE_UNUSED:
65 			dprintf("-");
66 			break;
67 	}
68 	dprintf(" ");
69 	if (page->busy)
70 		dprintf("B");
71 	else
72 		dprintf("-");
73 
74 	if (page->busy_writing)
75 		dprintf("W");
76 	else
77 		dprintf("-");
78 
79 	if (page->accessed)
80 		dprintf("A");
81 	else
82 		dprintf("-");
83 
84 	if (page->modified)
85 		dprintf("M");
86 	else
87 		dprintf("-");
88 
89 	if (page->unused)
90 		dprintf("U");
91 	else
92 		dprintf("-");
93 
94 	dprintf(" usage:%3u", page->usage_count);
95 	dprintf(" wired:%5u", page->WiredCount());
96 
97 	bool first = true;
98 	vm_page_mappings::Iterator iterator = page->mappings.GetIterator();
99 	vm_page_mapping* mapping;
100 	while ((mapping = iterator.Next()) != NULL) {
101 		if (first) {
102 			dprintf(": ");
103 			first = false;
104 		} else
105 			dprintf(", ");
106 
107 		dprintf("%" B_PRId32 " (%s)", mapping->area->id, mapping->area->name);
108 		mapping = mapping->page_link.next;
109 	}
110 }
111 
112 
113 static void
114 FreePageTable(page_num_t ppn, bool isKernel, uint32 level = 2)
115 {
116 	if (level > 0) {
117 		Pte* pte = (Pte*)VirtFromPhys(ppn * B_PAGE_SIZE);
118 		uint64 beg = 0;
119 		uint64 end = pteCount - 1;
120 		if (level == 2 && !isKernel) {
121 			beg = VirtAdrPte(USER_BASE, 2);
122 			end = VirtAdrPte(USER_TOP, 2);
123 		}
124 		for (uint64 i = beg; i <= end; i++) {
125 			if ((1 << pteValid) & pte[i].flags)
126 				FreePageTable(pte[i].ppn, isKernel, level - 1);
127 		}
128 	}
129 	vm_page* page = vm_lookup_page(ppn);
130 	DEBUG_PAGE_ACCESS_START(page);
131 	vm_page_set_state(page, PAGE_STATE_FREE);
132 }
133 
134 
135 static uint64
136 GetPageTableSize(page_num_t ppn, bool isKernel, uint32 level = 2)
137 {
138 	if (ppn == 0)
139 		return 0;
140 
141 	if (level == 0)
142 		return 1;
143 
144 	uint64 size = 1;
145 	Pte* pte = (Pte*)VirtFromPhys(ppn * B_PAGE_SIZE);
146 	uint64 beg = 0;
147 	uint64 end = pteCount - 1;
148 	if (level == 2 && !isKernel) {
149 		beg = VirtAdrPte(USER_BASE, 2);
150 		end = VirtAdrPte(USER_TOP, 2);
151 	}
152 	for (uint64 i = beg; i <= end; i++) {
153 		if ((1 << pteValid) & pte[i].flags)
154 			size += GetPageTableSize(pte[i].ppn, isKernel, level - 1);
155 	}
156 	return size;
157 }
158 
159 
160 //#pragma mark RISCV64VMTranslationMap
161 
162 
163 Pte*
164 RISCV64VMTranslationMap::LookupPte(addr_t virtAdr, bool alloc,
165 	vm_page_reservation* reservation)
166 {
167 	if (fPageTable == 0) {
168 		if (!alloc)
169 			return NULL;
170 		vm_page* page = vm_page_allocate_page(reservation,
171 			PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
172 		fPageTable = page->physical_page_number * B_PAGE_SIZE;
173 		if (fPageTable == 0)
174 			return NULL;
175 		DEBUG_PAGE_ACCESS_END(page);
176 		fPageTableSize++;
177 		if (!fIsKernel) {
178 			// Map kernel address space into user address space. Preallocated
179 			// kernel level-2 PTEs are reused.
180 			RISCV64VMTranslationMap* kernelMap = (RISCV64VMTranslationMap*)
181 				VMAddressSpace::Kernel()->TranslationMap();
182 			Pte *kernelPageTable = (Pte*)VirtFromPhys(kernelMap->PageTable());
183 			Pte *userPageTable = (Pte*)VirtFromPhys(fPageTable);
184 			for (uint64 i = VirtAdrPte(KERNEL_BASE, 2);
185 				i <= VirtAdrPte(KERNEL_TOP, 2); i++) {
186 				Pte *pte = &userPageTable[i];
187 				pte->ppn = kernelPageTable[i].ppn;
188 				pte->flags |= (1 << pteValid);
189 			}
190 		}
191 	}
192 	Pte *pte = (Pte*)VirtFromPhys(fPageTable);
193 	for (int level = 2; level > 0; level--) {
194 		pte += VirtAdrPte(virtAdr, level);
195 		if (!((1 << pteValid) & pte->flags)) {
196 			if (!alloc)
197 				return NULL;
198 			vm_page* page = vm_page_allocate_page(reservation,
199 				PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
200 			pte->ppn = page->physical_page_number;
201 			if (pte->ppn == 0)
202 				return NULL;
203 			DEBUG_PAGE_ACCESS_END(page);
204 			fPageTableSize++;
205 			pte->flags |= (1 << pteValid);
206 		}
207 		pte = (Pte*)VirtFromPhys(B_PAGE_SIZE * pte->ppn);
208 	}
209 	pte += VirtAdrPte(virtAdr, 0);
210 	return pte;
211 }
212 
213 
214 phys_addr_t
215 RISCV64VMTranslationMap::LookupAddr(addr_t virtAdr)
216 {
217 	Pte* pte = LookupPte(virtAdr, false, NULL);
218 	if (pte == NULL || !((1 << pteValid) & pte->flags))
219 		return 0;
220 	if (fIsKernel != (((1 << pteUser) & pte->flags) == 0))
221 		return 0;
222 	return pte->ppn * B_PAGE_SIZE;
223 }
224 
225 
226 RISCV64VMTranslationMap::RISCV64VMTranslationMap(bool kernel,
227 	phys_addr_t pageTable):
228 	fIsKernel(kernel),
229 	fPageTable(pageTable),
230 	fPageTableSize(GetPageTableSize(pageTable / B_PAGE_SIZE, kernel)),
231 	fInvalidPagesCount(0),
232 	fInvalidCode(false)
233 {
234 	TRACE("+RISCV64VMTranslationMap(%p, %d, 0x%" B_PRIxADDR ")\n", this,
235 		kernel, pageTable);
236 	TRACE("  pageTableSize: %" B_PRIu64 "\n", fPageTableSize);
237 }
238 
239 
240 RISCV64VMTranslationMap::~RISCV64VMTranslationMap()
241 {
242 	TRACE("-RISCV64VMTranslationMap(%p)\n", this);
243 	TRACE("  pageTableSize: %" B_PRIu64 "\n", fPageTableSize);
244 	TRACE("  GetPageTableSize(): %" B_PRIu64 "\n",
245 		GetPageTableSize(fPageTable / B_PAGE_SIZE, fIsKernel));
246 
247 	ASSERT_ALWAYS(!fIsKernel);
248 	// Can't delete currently used page table
249 	ASSERT_ALWAYS(::Satp() != Satp());
250 
251 	FreePageTable(fPageTable / B_PAGE_SIZE, fIsKernel);
252 }
253 
254 
255 bool
256 RISCV64VMTranslationMap::Lock()
257 {
258 	TRACE("RISCV64VMTranslationMap::Lock()\n");
259 	recursive_lock_lock(&fLock);
260 	return true;
261 }
262 
263 
264 void
265 RISCV64VMTranslationMap::Unlock()
266 {
267 	TRACE("RISCV64VMTranslationMap::Unlock()\n");
268 	if (recursive_lock_get_recursion(&fLock) == 1) {
269 		// we're about to release it for the last time
270 		Flush();
271 	}
272 	recursive_lock_unlock(&fLock);
273 }
274 
275 
276 addr_t
277 RISCV64VMTranslationMap::MappedSize() const
278 {
279 	NOT_IMPLEMENTED_PANIC();
280 	return 0;
281 }
282 
283 
284 size_t
285 RISCV64VMTranslationMap::MaxPagesNeededToMap(addr_t start, addr_t end) const
286 {
287 	enum {
288 		level0Range = (uint64_t)B_PAGE_SIZE * pteCount,
289 		level1Range = (uint64_t)level0Range * pteCount,
290 		level2Range = (uint64_t)level1Range * pteCount,
291 	};
292 
293 	if (start == 0) {
294 		start = (level2Range) - B_PAGE_SIZE;
295 		end += start;
296 	}
297 
298 	size_t requiredLevel2 = end / level2Range + 1 - start / level2Range;
299 	size_t requiredLevel1 = end / level1Range + 1 - start / level1Range;
300 	size_t requiredLevel0 = end / level0Range + 1 - start / level0Range;
301 
302 	return requiredLevel2 + requiredLevel1 + requiredLevel0;
303 }
304 
305 
306 status_t
307 RISCV64VMTranslationMap::Map(addr_t virtualAddress, phys_addr_t physicalAddress,
308 	uint32 attributes, uint32 memoryType,
309 	vm_page_reservation* reservation)
310 {
311 	TRACE("RISCV64VMTranslationMap::Map(0x%" B_PRIxADDR ", 0x%" B_PRIxADDR
312 		")\n", virtualAddress, physicalAddress);
313 
314 	ThreadCPUPinner pinner(thread_get_current_thread());
315 
316 	Pte* pte = LookupPte(virtualAddress, true, reservation);
317 	if (pte == NULL)
318 		panic("can't allocate page table");
319 
320 	Pte newPte;
321 	newPte.ppn = physicalAddress / B_PAGE_SIZE;
322 	newPte.flags = (1 << pteValid);
323 
324 	if ((attributes & B_USER_PROTECTION) != 0) {
325 		newPte.flags |= (1 << pteUser);
326 		if ((attributes & B_READ_AREA) != 0)
327 			newPte.flags |= (1 << pteRead);
328 		if ((attributes & B_WRITE_AREA) != 0)
329 			newPte.flags |= (1 << pteWrite);
330 		if ((attributes & B_EXECUTE_AREA) != 0)
331 			newPte.flags |= (1 << pteExec);
332 	} else {
333 		if ((attributes & B_KERNEL_READ_AREA) != 0)
334 			newPte.flags |= (1 << pteRead);
335 		if ((attributes & B_KERNEL_WRITE_AREA) != 0)
336 			newPte.flags |= (1 << pteWrite);
337 		if ((attributes & B_KERNEL_EXECUTE_AREA) != 0) {
338 			newPte.flags |= (1 << pteExec);
339 			fInvalidCode = true;
340 		}
341 	}
342 
343 	*pte = newPte;
344 
345 	// Note: We don't need to invalidate the TLB for this address, as previously
346 	// the entry was not present and the TLB doesn't cache those entries.
347 
348 	fMapCount++;
349 
350 	return B_OK;
351 }
352 
353 
354 status_t
355 RISCV64VMTranslationMap::Unmap(addr_t start, addr_t end)
356 {
357 	TRACE("RISCV64VMTranslationMap::Unmap(0x%" B_PRIxADDR ", 0x%" B_PRIxADDR
358 		")\n", start, end);
359 
360 	ThreadCPUPinner pinner(thread_get_current_thread());
361 
362 	for (addr_t page = start; page < end; page += B_PAGE_SIZE) {
363 		Pte* pte = LookupPte(page, false, NULL);
364 		if (pte != NULL) {
365 			fMapCount--;
366 			Pte oldPte((uint64)atomic_get_and_set64((int64*)&pte->val, 0));
367 			if ((oldPte.flags & (1 << pteAccessed)) != 0)
368 				InvalidatePage(page);
369 		}
370 	}
371 	return B_OK;
372 }
373 
374 
375 status_t
376 RISCV64VMTranslationMap::DebugMarkRangePresent(addr_t start, addr_t end,
377 	bool markPresent)
378 {
379 	NOT_IMPLEMENTED_PANIC();
380 	return B_NOT_SUPPORTED;
381 }
382 
383 
384 /*
385 Things need to be done when unmapping VMArea pages
386 	update vm_page::accessed, modified
387 	MMIO pages:
388 		just unmap
389 	wired pages:
390 		decrement wired count
391 	non-wired pages:
392 		remove from VMArea and vm_page `mappings` list
393 	wired and non-wird pages
394 		vm_page_set_state
395 */
396 
397 status_t
398 RISCV64VMTranslationMap::UnmapPage(VMArea* area, addr_t address,
399 	bool updatePageQueue)
400 {
401 	TRACE("RISCV64VMTranslationMap::UnmapPage(0x%" B_PRIxADDR "(%s), 0x%"
402 		B_PRIxADDR ", %d)\n", (addr_t)area, area->name, address,
403 		updatePageQueue);
404 
405 	ThreadCPUPinner pinner(thread_get_current_thread());
406 
407 	Pte* pte = LookupPte(address, false, NULL);
408 	if (pte == NULL || ((1 << pteValid) & pte->flags) == 0)
409 		return B_ENTRY_NOT_FOUND;
410 
411 	RecursiveLocker locker(fLock);
412 
413 	Pte oldPte((uint64)atomic_get_and_set64((int64*)&pte->val, 0));
414 	fMapCount--;
415 	pinner.Unlock();
416 
417 	if ((oldPte.flags & (1 << pteAccessed)) != 0)
418 		InvalidatePage(address);
419 
420 	Flush();
421 
422 	locker.Detach(); // PageUnmapped takes ownership
423 	PageUnmapped(area, oldPte.ppn, ((1 << pteAccessed) & oldPte.flags) != 0,
424 		((1 << pteDirty) & oldPte.flags) != 0, updatePageQueue);
425 	return B_OK;
426 }
427 
428 
429 void
430 RISCV64VMTranslationMap::UnmapPages(VMArea* area, addr_t base, size_t size,
431 	bool updatePageQueue)
432 {
433 	TRACE("RISCV64VMTranslationMap::UnmapPages(0x%" B_PRIxADDR "(%s), 0x%"
434 		B_PRIxADDR ", 0x%" B_PRIxSIZE ", %d)\n", (addr_t)area,
435 		area->name, base, size, updatePageQueue);
436 
437 	if (size == 0)
438 		return;
439 
440 	addr_t end = base + size - 1;
441 
442 	VMAreaMappings queue;
443 	RecursiveLocker locker(fLock);
444 	ThreadCPUPinner pinner(thread_get_current_thread());
445 
446 	for (addr_t start = base; start < end; start += B_PAGE_SIZE) {
447 		Pte* pte = LookupPte(start, false, NULL);
448 		if (pte == NULL)
449 			continue;
450 
451 		Pte oldPte((uint64)atomic_get_and_set64((int64*)&pte->val, 0));
452 		if ((oldPte.flags & (1 << pteValid)) == 0)
453 			continue;
454 
455 		fMapCount--;
456 
457 		if ((oldPte.flags & (1 << pteAccessed)) != 0)
458 			InvalidatePage(start);
459 
460 		if (area->cache_type != CACHE_TYPE_DEVICE) {
461 			// get the page
462 			vm_page* page = vm_lookup_page(oldPte.ppn);
463 			ASSERT(page != NULL);
464 			if (false) {
465 				WriteVmPage(page); dprintf("\n");
466 			}
467 
468 			DEBUG_PAGE_ACCESS_START(page);
469 
470 			// transfer the accessed/dirty flags to the page
471 			if ((oldPte.flags & (1 << pteAccessed)) != 0)
472 				page->accessed = true;
473 			if ((oldPte.flags & (1 << pteDirty)) != 0)
474 				page->modified = true;
475 
476 			// remove the mapping object/decrement the wired_count of the
477 			// page
478 			if (area->wiring == B_NO_LOCK) {
479 				vm_page_mapping* mapping = NULL;
480 				vm_page_mappings::Iterator iterator
481 					= page->mappings.GetIterator();
482 				while ((mapping = iterator.Next()) != NULL) {
483 					if (mapping->area == area)
484 						break;
485 				}
486 
487 				ASSERT(mapping != NULL);
488 
489 				area->mappings.Remove(mapping);
490 				page->mappings.Remove(mapping);
491 				queue.Add(mapping);
492 			} else
493 				page->DecrementWiredCount();
494 
495 			if (!page->IsMapped()) {
496 				atomic_add(&gMappedPagesCount, -1);
497 
498 				if (updatePageQueue) {
499 					if (page->Cache()->temporary)
500 						vm_page_set_state(page, PAGE_STATE_INACTIVE);
501 					else if (page->modified)
502 						vm_page_set_state(page, PAGE_STATE_MODIFIED);
503 					else
504 						vm_page_set_state(page, PAGE_STATE_CACHED);
505 				}
506 			}
507 
508 			DEBUG_PAGE_ACCESS_END(page);
509 		}
510 
511 		// flush explicitly, since we directly use the lock
512 		Flush();
513 	}
514 
515 	// TODO: As in UnmapPage() we can lose page dirty flags here. ATM it's not
516 	// really critical here, as in all cases this method is used, the unmapped
517 	// area range is unmapped for good (resized/cut) and the pages will likely
518 	// be freed.
519 
520 	locker.Unlock();
521 
522 	// free removed mappings
523 	bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
524 	uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
525 		| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
526 
527 	while (vm_page_mapping* mapping = queue.RemoveHead())
528 		object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
529 }
530 
531 
532 void
533 RISCV64VMTranslationMap::UnmapArea(VMArea* area, bool deletingAddressSpace,
534 	bool ignoreTopCachePageFlags)
535 {
536 	TRACE("RISCV64VMTranslationMap::UnmapArea(0x%" B_PRIxADDR "(%s), 0x%"
537 		B_PRIxADDR ", 0x%" B_PRIxSIZE ", %d, %d)\n", (addr_t)area,
538 		area->name, area->Base(), area->Size(), deletingAddressSpace,
539 		ignoreTopCachePageFlags);
540 
541 	if (area->cache_type == CACHE_TYPE_DEVICE || area->wiring != B_NO_LOCK) {
542 		UnmapPages(area, area->Base(), area->Size(), true);
543 		return;
544 	}
545 
546 	bool unmapPages = !deletingAddressSpace || !ignoreTopCachePageFlags;
547 
548 	RecursiveLocker locker(fLock);
549 	ThreadCPUPinner pinner(thread_get_current_thread());
550 
551 	VMAreaMappings mappings;
552 	mappings.MoveFrom(&area->mappings);
553 
554 	for (VMAreaMappings::Iterator it = mappings.GetIterator();
555 			vm_page_mapping* mapping = it.Next();) {
556 
557 		vm_page* page = mapping->page;
558 		page->mappings.Remove(mapping);
559 
560 		VMCache* cache = page->Cache();
561 
562 		bool pageFullyUnmapped = false;
563 		if (!page->IsMapped()) {
564 			atomic_add(&gMappedPagesCount, -1);
565 			pageFullyUnmapped = true;
566 		}
567 
568 		if (unmapPages || cache != area->cache) {
569 			addr_t address = area->Base()
570 				+ ((page->cache_offset * B_PAGE_SIZE)
571 				- area->cache_offset);
572 
573 			Pte* pte = LookupPte(address, false, NULL);
574 			if (pte == NULL
575 				|| ((1 << pteValid) & pte->flags) == 0) {
576 				panic("page %p has mapping for area %p "
577 					"(%#" B_PRIxADDR "), but has no "
578 					"page table", page, area, address);
579 				continue;
580 			}
581 
582 			Pte oldPte((uint64)atomic_get_and_set64((int64*)&pte->val, 0));
583 
584 			// transfer the accessed/dirty flags to the page and
585 			// invalidate the mapping, if necessary
586 			if (((1 << pteAccessed) & oldPte.flags) != 0) {
587 				page->accessed = true;
588 
589 				if (!deletingAddressSpace)
590 					InvalidatePage(address);
591 			}
592 
593 			if (((1 << pteDirty) & oldPte.flags) != 0)
594 				page->modified = true;
595 
596 			if (pageFullyUnmapped) {
597 				DEBUG_PAGE_ACCESS_START(page);
598 
599 				if (cache->temporary) {
600 					vm_page_set_state(page,
601 						PAGE_STATE_INACTIVE);
602 				} else if (page->modified) {
603 					vm_page_set_state(page,
604 						PAGE_STATE_MODIFIED);
605 				} else {
606 					vm_page_set_state(page,
607 						PAGE_STATE_CACHED);
608 				}
609 
610 				DEBUG_PAGE_ACCESS_END(page);
611 			}
612 		}
613 
614 		fMapCount--;
615 	}
616 
617 	Flush();
618 		// flush explicitely, since we directly use the lock
619 
620 	locker.Unlock();
621 
622 	bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
623 	uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
624 		| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
625 
626 	while (vm_page_mapping* mapping = mappings.RemoveHead())
627 		object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
628 }
629 
630 
631 status_t
632 RISCV64VMTranslationMap::Query(addr_t virtualAddress,
633 	phys_addr_t* _physicalAddress, uint32* _flags)
634 {
635 	*_flags = 0;
636 	*_physicalAddress = 0;
637 
638 	ThreadCPUPinner pinner(thread_get_current_thread());
639 
640 	if (fPageTable == 0)
641 		return B_OK;
642 
643 	Pte* pte = LookupPte(virtualAddress, false, NULL);
644 	if (pte == 0)
645 		return B_OK;
646 
647 	Pte pteVal = *pte;
648 	*_physicalAddress = pteVal.ppn * B_PAGE_SIZE;
649 
650 	if (((1 << pteValid)    & pteVal.flags) != 0)
651 		*_flags |= PAGE_PRESENT;
652 	if (((1 << pteDirty)    & pteVal.flags) != 0)
653 		*_flags |= PAGE_MODIFIED;
654 	if (((1 << pteAccessed) & pteVal.flags) != 0)
655 		*_flags |= PAGE_ACCESSED;
656 	if (((1 << pteUser) & pteVal.flags) != 0) {
657 		if (((1 << pteRead)  & pteVal.flags) != 0)
658 			*_flags |= B_READ_AREA;
659 		if (((1 << pteWrite) & pteVal.flags) != 0)
660 			*_flags |= B_WRITE_AREA;
661 		if (((1 << pteExec)  & pteVal.flags) != 0)
662 			*_flags |= B_EXECUTE_AREA;
663 	} else {
664 		if (((1 << pteRead)  & pteVal.flags) != 0)
665 			*_flags |= B_KERNEL_READ_AREA;
666 		if (((1 << pteWrite) & pteVal.flags) != 0)
667 			*_flags |= B_KERNEL_WRITE_AREA;
668 		if (((1 << pteExec)  & pteVal.flags) != 0)
669 			*_flags |= B_KERNEL_EXECUTE_AREA;
670 	}
671 
672 	return B_OK;
673 }
674 
675 
676 status_t
677 RISCV64VMTranslationMap::QueryInterrupt(addr_t virtualAddress,
678 	phys_addr_t* _physicalAddress, uint32* _flags)
679 {
680 	return Query(virtualAddress, _physicalAddress, _flags);
681 }
682 
683 
684 status_t RISCV64VMTranslationMap::Protect(addr_t base, addr_t top,
685 	uint32 attributes, uint32 memoryType)
686 {
687 	TRACE("RISCV64VMTranslationMap::Protect(0x%" B_PRIxADDR ", 0x%"
688 		B_PRIxADDR ")\n", base, top);
689 
690 	ThreadCPUPinner pinner(thread_get_current_thread());
691 
692 	for (addr_t page = base; page < top; page += B_PAGE_SIZE) {
693 
694 		Pte* pte = LookupPte(page, false, NULL);
695 		if (pte == NULL || ((1 << pteValid) & pte->flags) == 0) {
696 			TRACE("attempt to protect not mapped page: 0x%"
697 				B_PRIxADDR "\n", page);
698 			continue;
699 		}
700 
701 		Pte oldPte = *pte;
702 		Pte newPte = oldPte;
703 		newPte.flags &= (1 << pteValid)
704 			| (1 << pteAccessed) | (1 << pteDirty);
705 
706 		if ((attributes & B_USER_PROTECTION) != 0) {
707 			newPte.flags |= (1 << pteUser);
708 			if ((attributes & B_READ_AREA)    != 0)
709 				newPte.flags |= (1 << pteRead);
710 			if ((attributes & B_WRITE_AREA)   != 0)
711 				newPte.flags |= (1 << pteWrite);
712 			if ((attributes & B_EXECUTE_AREA) != 0) {
713 				newPte.flags |= (1 << pteExec);
714 				fInvalidCode = true;
715 			}
716 		} else {
717 			if ((attributes & B_KERNEL_READ_AREA)    != 0)
718 				newPte.flags |= (1 << pteRead);
719 			if ((attributes & B_KERNEL_WRITE_AREA)   != 0)
720 				newPte.flags |= (1 << pteWrite);
721 			if ((attributes & B_KERNEL_EXECUTE_AREA) != 0) {
722 				newPte.flags |= (1 << pteExec);
723 				fInvalidCode = true;
724 			}
725 		}
726 		*pte = newPte;
727 
728 		if ((oldPte.flags & (1 << pteAccessed)) != 0)
729 			InvalidatePage(page);
730 	}
731 
732 	return B_OK;
733 }
734 
735 
736 status_t
737 RISCV64VMTranslationMap::ProtectPage(VMArea* area, addr_t address,
738 	uint32 attributes)
739 {
740 	NOT_IMPLEMENTED_PANIC();
741 	return B_OK;
742 }
743 
744 
745 status_t
746 RISCV64VMTranslationMap::ProtectArea(VMArea* area, uint32 attributes)
747 {
748 	NOT_IMPLEMENTED_PANIC();
749 	return B_NOT_SUPPORTED;
750 }
751 
752 
753 static inline uint32
754 ConvertAccessedFlags(uint32 flags)
755 {
756 	return ((flags & PAGE_MODIFIED) ? (1 << pteDirty   ) : 0)
757 		| ((flags & PAGE_ACCESSED) ? (1 << pteAccessed) : 0);
758 }
759 
760 
761 status_t
762 RISCV64VMTranslationMap::SetFlags(addr_t address, uint32 flags)
763 {
764 	ThreadCPUPinner pinner(thread_get_current_thread());
765 	Pte* pte = LookupPte(address, false, NULL);
766 	if (pte == NULL || ((1 << pteValid) & pte->flags) == 0)
767 		return B_OK;
768 	pte->flags |= ConvertAccessedFlags(flags);
769 	FlushTlbPage(address);
770 	return B_OK;
771 }
772 
773 
774 status_t
775 RISCV64VMTranslationMap::ClearFlags(addr_t address, uint32 flags)
776 {
777 	ThreadCPUPinner pinner(thread_get_current_thread());
778 
779 	Pte* pte = LookupPte(address, false, NULL);
780 	if (pte == NULL || ((1 << pteValid) & pte->flags) == 0)
781 		return B_OK;
782 
783 	pte->flags &= ~ConvertAccessedFlags(flags);
784 	InvalidatePage(address);
785 	return B_OK;
786 }
787 
788 
789 bool
790 RISCV64VMTranslationMap::ClearAccessedAndModified(VMArea* area, addr_t address,
791 	bool unmapIfUnaccessed, bool& _modified)
792 {
793 	TRACE("RISCV64VMPhysicalPageMapper::ClearAccessedAndModified(0x%"
794 		B_PRIxADDR "(%s), 0x%" B_PRIxADDR ", %d)\n", (addr_t)area,
795 		area->name, address, unmapIfUnaccessed);
796 
797 	RecursiveLocker locker(fLock);
798 	ThreadCPUPinner pinner(thread_get_current_thread());
799 
800 	Pte* pte = LookupPte(address, false, NULL);
801 	if (pte == NULL || ((1 << pteValid) & pte->flags) == 0)
802 		return false;
803 
804 	Pte oldPte;
805 	if (unmapIfUnaccessed) {
806 		for (;;) {
807 			oldPte = *pte;
808 			if (((1 << pteValid) & oldPte.flags) == 0)
809 				return false;
810 
811 			if (((1 << pteAccessed) & oldPte.flags) != 0) {
812 				oldPte.val = atomic_and64((int64*)&pte->val,
813 					~((1 << pteAccessed) | (1 << pteDirty)));
814 				break;
815 			}
816 			if (atomic_test_and_set64((int64*)&pte->val, 0, oldPte.val)
817 					== (int64)oldPte.val) {
818 				break;
819 			}
820 		}
821 	} else {
822 		oldPte.val = atomic_and64((int64*)&pte->val,
823 			~((1 << pteAccessed) | (1 << pteDirty)));
824 	}
825 
826 	pinner.Unlock();
827 	_modified = ((1 << pteDirty) & oldPte.flags) != 0;
828 	if (((1 << pteAccessed) & oldPte.flags) != 0) {
829 		InvalidatePage(address);
830 		Flush();
831 		return true;
832 	}
833 
834 	if (!unmapIfUnaccessed)
835 		return false;
836 
837 	fMapCount--;
838 
839 	locker.Detach(); // UnaccessedPageUnmapped takes ownership
840 	UnaccessedPageUnmapped(area, oldPte.ppn);
841 	return false;
842 }
843 
844 
845 void
846 RISCV64VMTranslationMap::Flush()
847 {
848 	// copy of X86VMTranslationMap::Flush
849 	// TODO: move to common VMTranslationMap class
850 
851 	if (fInvalidPagesCount <= 0)
852 		return;
853 /*
854 	dprintf("+Flush(%p)\n", this);
855 	struct ScopeExit {
856 		~ScopeExit()
857 		{
858 			dprintf("-Flush(%p)\n", this);
859 		}
860 	} scopeExit;
861 */
862 	ThreadCPUPinner pinner(thread_get_current_thread());
863 
864 	if (fInvalidPagesCount > PAGE_INVALIDATE_CACHE_SIZE) {
865 		// invalidate all pages
866 		TRACE("flush_tmap: %d pages to invalidate, invalidate all\n",
867 			fInvalidPagesCount);
868 
869 		if (fIsKernel) {
870 			arch_cpu_global_TLB_invalidate();
871 
872 			// dprintf("+smp_send_broadcast_ici\n");
873 			smp_send_broadcast_ici(SMP_MSG_GLOBAL_INVALIDATE_PAGES, 0, 0, 0,
874 				NULL, SMP_MSG_FLAG_SYNC);
875 			// dprintf("-smp_send_broadcast_ici\n");
876 
877 		} else {
878 			cpu_status state = disable_interrupts();
879 			arch_cpu_user_TLB_invalidate();
880 			restore_interrupts(state);
881 
882 			int cpu = smp_get_current_cpu();
883 			CPUSet cpuMask = fActiveOnCpus;
884 			cpuMask.ClearBit(cpu);
885 
886 			if (!cpuMask.IsEmpty()) {
887 				// dprintf("+smp_send_multicast_ici\n");
888 				smp_send_multicast_ici(cpuMask, SMP_MSG_USER_INVALIDATE_PAGES,
889 					0, 0, 0, NULL, SMP_MSG_FLAG_SYNC);
890 				// dprintf("-smp_send_multicast_ici\n");
891 			}
892 		}
893 	} else {
894 		TRACE("flush_tmap: %d pages to invalidate, invalidate list\n",
895 			fInvalidPagesCount);
896 
897 		arch_cpu_invalidate_TLB_list(fInvalidPages, fInvalidPagesCount);
898 
899 		if (fIsKernel) {
900 			// dprintf("+smp_send_broadcast_ici\n");
901 			smp_send_broadcast_ici(SMP_MSG_INVALIDATE_PAGE_LIST,
902 				(addr_t)fInvalidPages, fInvalidPagesCount, 0, NULL,
903 				SMP_MSG_FLAG_SYNC);
904 			// dprintf("-smp_send_broadcast_ici\n");
905 		} else {
906 			int cpu = smp_get_current_cpu();
907 			CPUSet cpuMask = fActiveOnCpus;
908 			cpuMask.ClearBit(cpu);
909 
910 			if (!cpuMask.IsEmpty()) {
911 				// dprintf("+smp_send_multicast_ici\n");
912 				smp_send_multicast_ici(cpuMask, SMP_MSG_INVALIDATE_PAGE_LIST,
913 					(addr_t)fInvalidPages, fInvalidPagesCount, 0, NULL,
914 					SMP_MSG_FLAG_SYNC);
915 				// dprintf("-smp_send_multicast_ici\n");
916 			}
917 		}
918 	}
919 	fInvalidPagesCount = 0;
920 
921 	if (fInvalidCode) {
922 		FenceI();
923 
924 		int cpu = smp_get_current_cpu();
925 		CPUSet cpuMask = fActiveOnCpus;
926 		cpuMask.ClearBit(cpu);
927 
928 		if (!cpuMask.IsEmpty()) {
929 			switch (gPlatform) {
930 				case kPlatformSbi: {
931 					uint64 hartMask = 0;
932 					int32 cpuCount = smp_get_num_cpus();
933 					for (int32 i = 0; i < cpuCount; i++) {
934 						if (cpuMask.GetBit(i))
935 							hartMask |= (uint64)1 << gCPU[i].arch.hartId;
936 					}
937 					// TODO: handle hart ID >= 64
938 					memory_full_barrier();
939 					sbi_remote_fence_i(hartMask, 0);
940 					break;
941 				}
942 			}
943 		}
944 		fInvalidCode = false;
945 	}
946 }
947 
948 
949 void
950 RISCV64VMTranslationMap::DebugPrintMappingInfo(addr_t virtualAddress)
951 {
952 	NOT_IMPLEMENTED_PANIC();
953 }
954 
955 
956 bool
957 RISCV64VMTranslationMap::DebugGetReverseMappingInfo(phys_addr_t physicalAddress,
958 	ReverseMappingInfoCallback& callback)
959 {
960 	NOT_IMPLEMENTED_PANIC();
961 	return false;
962 }
963 
964 
965 status_t
966 RISCV64VMTranslationMap::MemcpyToMap(addr_t to, const char *from, size_t size)
967 {
968 	TRACE("RISCV64VMPhysicalPageMapper::MemcpyToMap(0x%" B_PRIxADDR ", 0x%"
969 		B_PRIxADDR ", %" B_PRIuSIZE ")\n", to, (addr_t)from, size);
970 
971 	while (size > 0) {
972 		uint64 va0 = ROUNDDOWN(to, B_PAGE_SIZE);
973 		uint64 pa0 = LookupAddr(va0);
974 		TRACE("LookupAddr(0x%" B_PRIxADDR "): 0x%" B_PRIxADDR "\n",
975 			va0, pa0);
976 
977 		if (pa0 == 0) {
978 			TRACE("[!] not mapped: 0x%" B_PRIxADDR "\n", va0);
979 			return B_BAD_ADDRESS;
980 		}
981 
982 		uint64 n = B_PAGE_SIZE - (to - va0);
983 		if (n > size)
984 			n = size;
985 
986 		memcpy(VirtFromPhys(pa0 + (to - va0)), from, n);
987 
988 		size -= n;
989 		from += n;
990 		to = va0 + B_PAGE_SIZE;
991 	}
992 	return B_OK;
993 }
994 
995 
996 status_t
997 RISCV64VMTranslationMap::MemcpyFromMap(char *to, addr_t from, size_t size)
998 {
999 	TRACE("RISCV64VMPhysicalPageMapper::MemcpyFromMap(0x%" B_PRIxADDR
1000 		", 0x%" B_PRIxADDR ", %" B_PRIuSIZE ")\n",
1001 		(addr_t)to, from, size);
1002 
1003 	while (size > 0) {
1004 		uint64 va0 = ROUNDDOWN(from, B_PAGE_SIZE);
1005 		uint64 pa0 = LookupAddr(va0);
1006 		if (pa0 == 0) {
1007 			TRACE("[!] not mapped: 0x%" B_PRIxADDR
1008 				", calling page fault handler\n", va0);
1009 
1010 			addr_t newIP;
1011 			vm_page_fault(va0, Ra(), true, false, true, &newIP);
1012 
1013 			pa0 = LookupAddr(va0);
1014 			TRACE("LookupAddr(0x%" B_PRIxADDR "): 0x%"
1015 				B_PRIxADDR "\n", va0, pa0);
1016 
1017 			if (pa0 == 0)
1018 				return B_BAD_ADDRESS;
1019 		}
1020 		uint64 n = B_PAGE_SIZE - (from - va0);
1021 		if(n > size)
1022 			n = size;
1023 
1024 		memcpy(to, VirtFromPhys(pa0 + (from - va0)), n);
1025 
1026 		size -= n;
1027 		to += n;
1028 		from = va0 + B_PAGE_SIZE;
1029 	}
1030 
1031 	return B_OK;
1032 }
1033 
1034 
1035 status_t
1036 RISCV64VMTranslationMap::MemsetToMap(addr_t to, char c, size_t count)
1037 {
1038 	TRACE("RISCV64VMPhysicalPageMapper::MemsetToMap(0x%" B_PRIxADDR
1039 		", %d, %" B_PRIuSIZE ")\n", to, c, count);
1040 
1041 	while (count > 0) {
1042 		uint64 va0 = ROUNDDOWN(to, B_PAGE_SIZE);
1043 		uint64 pa0 = LookupAddr(va0);
1044 		TRACE("LookupAddr(0x%" B_PRIxADDR "): 0x%" B_PRIxADDR "\n",
1045 			va0, pa0);
1046 
1047 		if (pa0 == 0) {
1048 			TRACE("[!] not mapped: 0x%" B_PRIxADDR
1049 				", calling page fault handler\n", va0);
1050 			addr_t newIP;
1051 			vm_page_fault(va0, Ra(), true, false, true, &newIP);
1052 			pa0 = LookupAddr(va0);
1053 			TRACE("LookupAddr(0x%" B_PRIxADDR "): 0x%"
1054 				B_PRIxADDR "\n", va0, pa0);
1055 
1056 			if (pa0 == 0)
1057 				return B_BAD_ADDRESS;
1058 		}
1059 
1060 		uint64 n = B_PAGE_SIZE - (to - va0);
1061 		if (n > count)
1062 			n = count;
1063 
1064 		memset(VirtFromPhys(pa0 + (to - va0)), c, n);
1065 
1066 		count -= n;
1067 		to = va0 + B_PAGE_SIZE;
1068 	}
1069 	return B_OK;
1070 }
1071 
1072 
1073 ssize_t
1074 RISCV64VMTranslationMap::StrlcpyFromMap(char *to, addr_t from, size_t size)
1075 {
1076 	// NOT_IMPLEMENTED_PANIC();
1077 	return strlcpy(to, (const char*)from, size);
1078 	// return 0;
1079 }
1080 
1081 
1082 ssize_t
1083 RISCV64VMTranslationMap::StrlcpyToMap(addr_t to, const char *from, size_t size)
1084 {
1085 	ssize_t len = strlen(from) + 1;
1086 	if ((size_t)len > size)
1087 		len = size;
1088 
1089 	if (MemcpyToMap(to, from, len) < B_OK)
1090 		return 0;
1091 
1092 	return len;
1093 }
1094 
1095 
1096 //#pragma mark RISCV64VMPhysicalPageMapper
1097 
1098 
1099 RISCV64VMPhysicalPageMapper::RISCV64VMPhysicalPageMapper()
1100 {
1101 	TRACE("+RISCV64VMPhysicalPageMapper\n");
1102 }
1103 
1104 
1105 RISCV64VMPhysicalPageMapper::~RISCV64VMPhysicalPageMapper()
1106 {
1107 	TRACE("-RISCV64VMPhysicalPageMapper\n");
1108 }
1109 
1110 
1111 status_t
1112 RISCV64VMPhysicalPageMapper::GetPage(phys_addr_t physicalAddress,
1113 	addr_t* _virtualAddress, void** _handle)
1114 {
1115 	*_virtualAddress = (addr_t)VirtFromPhys(physicalAddress);
1116 	*_handle = (void*)1;
1117 	return B_OK;
1118 }
1119 
1120 
1121 status_t
1122 RISCV64VMPhysicalPageMapper::PutPage(addr_t virtualAddress, void* handle)
1123 {
1124 	return B_OK;
1125 }
1126 
1127 
1128 status_t
1129 RISCV64VMPhysicalPageMapper::GetPageCurrentCPU( phys_addr_t physicalAddress,
1130 	addr_t* _virtualAddress, void** _handle)
1131 {
1132 	return GetPage(physicalAddress, _virtualAddress, _handle);
1133 }
1134 
1135 
1136 status_t
1137 RISCV64VMPhysicalPageMapper::PutPageCurrentCPU(addr_t virtualAddress,
1138 	void* _handle)
1139 {
1140 	return PutPage(virtualAddress, _handle);
1141 }
1142 
1143 
1144 status_t
1145 RISCV64VMPhysicalPageMapper::GetPageDebug(phys_addr_t physicalAddress,
1146 	addr_t* _virtualAddress, void** _handle)
1147 {
1148 	NOT_IMPLEMENTED_PANIC();
1149 	return B_NOT_SUPPORTED;
1150 }
1151 
1152 
1153 status_t
1154 RISCV64VMPhysicalPageMapper::PutPageDebug(addr_t virtualAddress, void* handle)
1155 {
1156 	NOT_IMPLEMENTED_PANIC();
1157 	return B_NOT_SUPPORTED;
1158 }
1159 
1160 
1161 status_t
1162 RISCV64VMPhysicalPageMapper::MemsetPhysical(phys_addr_t address, int value,
1163 	phys_size_t length)
1164 {
1165 	TRACE("RISCV64VMPhysicalPageMapper::MemsetPhysical(0x%" B_PRIxADDR
1166 		", 0x%x, 0x%" B_PRIxADDR ")\n", address, value, length);
1167 	return user_memset(VirtFromPhys(address), value, length);
1168 }
1169 
1170 
1171 status_t
1172 RISCV64VMPhysicalPageMapper::MemcpyFromPhysical(void* to, phys_addr_t from,
1173 	size_t length, bool user)
1174 {
1175 	TRACE("RISCV64VMPhysicalPageMapper::MemcpyFromPhysical(0x%" B_PRIxADDR
1176 		", 0x%" B_PRIxADDR ", %" B_PRIuSIZE ")\n", (addr_t)to,
1177 		from, length);
1178 	return user_memcpy(to, VirtFromPhys(from), length);
1179 }
1180 
1181 
1182 status_t
1183 RISCV64VMPhysicalPageMapper::MemcpyToPhysical(phys_addr_t to, const void* from,
1184 	size_t length, bool user)
1185 {
1186 	TRACE("RISCV64VMPhysicalPageMapper::MemcpyToPhysical(0x%" B_PRIxADDR
1187 		", 0x%" B_PRIxADDR ", %" B_PRIuSIZE ")\n", to, (addr_t)from,
1188 		length);
1189 	return user_memcpy(VirtFromPhys(to), from, length);
1190 }
1191 
1192 
1193 void
1194 RISCV64VMPhysicalPageMapper::MemcpyPhysicalPage(phys_addr_t to,
1195 	phys_addr_t from)
1196 {
1197 	TRACE("RISCV64VMPhysicalPageMapper::MemcpyPhysicalPage(0x%" B_PRIxADDR
1198 		", 0x%" B_PRIxADDR ")\n", to, from);
1199 	user_memcpy(VirtFromPhys(to), VirtFromPhys(from), B_PAGE_SIZE);
1200 }
1201