xref: /haiku/src/system/kernel/arch/riscv64/RISCV64VMTranslationMap.cpp (revision 4a32f48e70297d9a634646f01e08c2f451ecd6bd)
1 /*
2  * Copyright 2020-2021, Haiku, Inc. All rights reserved.
3  * Distributed under the terms of the MIT License.
4  *
5  * Authors:
6  *   X512 <danger_mail@list.ru>
7  */
8 
9 
10 #include "RISCV64VMTranslationMap.h"
11 
12 #include <kernel.h>
13 #include <vm/vm_priv.h>
14 #include <vm/vm_page.h>
15 #include <vm/VMAddressSpace.h>
16 #include <vm/VMCache.h>
17 #include <slab/Slab.h>
18 
19 #include <util/AutoLock.h>
20 
21 
22 //#define DISABLE_MODIFIED_FLAGS 1
23 
24 //#define DO_TRACE
25 #ifdef DO_TRACE
26 #	define TRACE(x...) dprintf(x)
27 #else
28 #	define TRACE(x...) ;
29 #endif
30 
31 #define NOT_IMPLEMENTED_PANIC() \
32 	panic("not implemented: %s\n", __PRETTY_FUNCTION__)
33 
34 
35 static void
36 FreePageTable(page_num_t ppn, bool isKernel, uint32 level = 2)
37 {
38 	if (level > 0) {
39 		Pte* pte = (Pte*)VirtFromPhys(ppn * B_PAGE_SIZE);
40 		uint64 beg = 0;
41 		uint64 end = pteCount - 1;
42 		if (level == 2 && !isKernel) {
43 			beg = VirtAdrPte(USER_BASE, 2);
44 			end = VirtAdrPte(USER_TOP, 2);
45 		}
46 		for (uint64 i = beg; i <= end; i++) {
47 			if ((1 << pteValid) & pte[i].flags)
48 				FreePageTable(pte[i].ppn, isKernel, level - 1);
49 		}
50 	}
51 	vm_page* page = vm_lookup_page(ppn);
52 	vm_page_set_state(page, PAGE_STATE_FREE);
53 }
54 
55 
56 static uint64
57 GetPageTableSize(page_num_t ppn, bool isKernel, uint32 level = 2)
58 {
59 	if (ppn == 0)
60 		return 0;
61 
62 	if (level == 0)
63 		return 1;
64 
65 	uint64 size = 1;
66 	Pte* pte = (Pte*)VirtFromPhys(ppn * B_PAGE_SIZE);
67 	uint64 beg = 0;
68 	uint64 end = pteCount - 1;
69 	if (level == 2 && !isKernel) {
70 		beg = VirtAdrPte(USER_BASE, 2);
71 		end = VirtAdrPte(USER_TOP, 2);
72 	}
73 	for (uint64 i = beg; i <= end; i++) {
74 		if ((1 << pteValid) & pte[i].flags)
75 			size += GetPageTableSize(pte[i].ppn, isKernel, level - 1);
76 	}
77 	return size;
78 }
79 
80 
81 //#pragma mark RISCV64VMTranslationMap
82 
83 
84 Pte*
85 RISCV64VMTranslationMap::LookupPte(addr_t virtAdr, bool alloc,
86 	vm_page_reservation* reservation)
87 {
88 	if (fPageTable == 0) {
89 		if (!alloc)
90 			return NULL;
91 		vm_page* page = vm_page_allocate_page(reservation,
92 			PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
93 		fPageTable = page->physical_page_number * B_PAGE_SIZE;
94 		if (fPageTable == 0)
95 			return NULL;
96 		fPageTableSize++;
97 		if (!fIsKernel) {
98 			// Map kernel address space into user address space. Preallocated
99 			// kernel level-2 PTEs are reused.
100 			RISCV64VMTranslationMap* kernelMap = (RISCV64VMTranslationMap*)
101 				VMAddressSpace::Kernel()->TranslationMap();
102 			Pte *kernelPageTable = (Pte*)VirtFromPhys(kernelMap->PageTable());
103 			Pte *userPageTable = (Pte*)VirtFromPhys(fPageTable);
104 			for (uint64 i = VirtAdrPte(KERNEL_BASE, 2);
105 				i <= VirtAdrPte(KERNEL_TOP, 2); i++) {
106 				Pte *pte = &userPageTable[i];
107 				pte->ppn = kernelPageTable[i].ppn;
108 				pte->flags |= (1 << pteValid);
109 			}
110 		}
111 	}
112 	Pte *pte = (Pte*)VirtFromPhys(fPageTable);
113 	for (int level = 2; level > 0; level--) {
114 		pte += VirtAdrPte(virtAdr, level);
115 		if (!((1 << pteValid) & pte->flags)) {
116 			if (!alloc)
117 				return NULL;
118 			vm_page* page = vm_page_allocate_page(reservation,
119 				PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
120 			pte->ppn = page->physical_page_number;
121 			if (pte->ppn == 0)
122 				return NULL;
123 			fPageTableSize++;
124 			pte->flags |= (1 << pteValid);
125 		}
126 		pte = (Pte*)VirtFromPhys(B_PAGE_SIZE * pte->ppn);
127 	}
128 	pte += VirtAdrPte(virtAdr, 0);
129 	return pte;
130 }
131 
132 
133 phys_addr_t
134 RISCV64VMTranslationMap::LookupAddr(addr_t virtAdr)
135 {
136 	Pte* pte = LookupPte(virtAdr, false, NULL);
137 	if (pte == NULL || !((1 << pteValid) & pte->flags))
138 		return 0;
139 	if (fIsKernel != (((1 << pteUser) & pte->flags) == 0))
140 		return 0;
141 	return pte->ppn * B_PAGE_SIZE;
142 }
143 
144 
145 RISCV64VMTranslationMap::RISCV64VMTranslationMap(bool kernel,
146 	phys_addr_t pageTable):
147 	fIsKernel(kernel),
148 	fPageTable(pageTable),
149 	fPageTableSize(GetPageTableSize(pageTable / B_PAGE_SIZE, kernel))
150 {
151 	TRACE("+RISCV64VMTranslationMap(%p, %d, 0x%" B_PRIxADDR ")\n", this,
152 		kernel, pageTable);
153 	TRACE("  pageTableSize: %" B_PRIu64 "\n", fPageTableSize);
154 }
155 
156 
157 RISCV64VMTranslationMap::~RISCV64VMTranslationMap()
158 {
159 	TRACE("-RISCV64VMTranslationMap(%p)\n", this);
160 	TRACE("  pageTableSize: %" B_PRIu64 "\n", fPageTableSize);
161 	TRACE("  GetPageTableSize(): %" B_PRIu64 "\n",
162 		GetPageTableSize(fPageTable / B_PAGE_SIZE, fIsKernel));
163 
164 	ASSERT_ALWAYS(!fIsKernel);
165 	// Can't delete currently used page table
166 	ASSERT_ALWAYS(::Satp() != Satp());
167 
168 	FreePageTable(fPageTable / B_PAGE_SIZE, fIsKernel);
169 }
170 
171 
172 bool
173 RISCV64VMTranslationMap::Lock()
174 {
175 	TRACE("RISCV64VMTranslationMap::Lock()\n");
176 	recursive_lock_lock(&fLock);
177 	return true;
178 }
179 
180 
181 void
182 RISCV64VMTranslationMap::Unlock()
183 {
184 	TRACE("RISCV64VMTranslationMap::Unlock()\n");
185 	if (recursive_lock_get_recursion(&fLock) == 1) {
186 		// we're about to release it for the last time
187 		Flush();
188 	}
189 	recursive_lock_unlock(&fLock);
190 }
191 
192 
193 addr_t
194 RISCV64VMTranslationMap::MappedSize() const
195 {
196 	NOT_IMPLEMENTED_PANIC();
197 	return 0;
198 }
199 
200 
201 size_t
202 RISCV64VMTranslationMap::MaxPagesNeededToMap(addr_t start, addr_t end) const
203 {
204 	enum {
205 		level0Range = (uint64_t)B_PAGE_SIZE * pteCount,
206 		level1Range = (uint64_t)level0Range * pteCount,
207 		level2Range = (uint64_t)level1Range * pteCount,
208 	};
209 
210 	if (start == 0) {
211 		start = (level2Range) - B_PAGE_SIZE;
212 		end += start;
213 	}
214 
215 	size_t requiredLevel2 = end / level2Range + 1 - start / level2Range;
216 	size_t requiredLevel1 = end / level1Range + 1 - start / level1Range;
217 	size_t requiredLevel0 = end / level0Range + 1 - start / level0Range;
218 
219 	return requiredLevel2 + requiredLevel1 + requiredLevel0;
220 }
221 
222 
223 status_t
224 RISCV64VMTranslationMap::Map(addr_t virtualAddress, phys_addr_t physicalAddress,
225 	uint32 attributes, uint32 memoryType,
226 	vm_page_reservation* reservation)
227 {
228 	TRACE("RISCV64VMTranslationMap::Map(0x%" B_PRIxADDR ", 0x%" B_PRIxADDR
229 		")\n", virtualAddress, physicalAddress);
230 
231 	ThreadCPUPinner pinner(thread_get_current_thread());
232 
233 	Pte* pte = LookupPte(virtualAddress, true, reservation);
234 	if (pte == NULL)
235 		panic("can't allocate page table");
236 
237 	pte->ppn = physicalAddress / B_PAGE_SIZE;
238 	pte->flags = 0;
239 	if ((attributes & B_USER_PROTECTION) != 0) {
240 		pte->flags |= (1 << pteUser);
241 		if ((attributes & B_READ_AREA) != 0)
242 			pte->flags |= (1 << pteRead);
243 		if ((attributes & B_WRITE_AREA) != 0)
244 			pte->flags |= (1 << pteWrite);
245 		if ((attributes & B_EXECUTE_AREA) != 0)
246 			pte->flags |= (1 << pteExec);
247 	} else {
248 		if ((attributes & B_KERNEL_READ_AREA) != 0)
249 			pte->flags |= (1 << pteRead);
250 		if ((attributes & B_KERNEL_WRITE_AREA) != 0)
251 			pte->flags |= (1 << pteWrite);
252 		if ((attributes & B_KERNEL_EXECUTE_AREA) != 0)
253 			pte->flags |= (1 << pteExec);
254 	}
255 
256 	pte->flags |= (1 << pteValid)
257 #ifdef DISABLE_MODIFIED_FLAGS
258 		| (1 << pteAccessed) | (1 << pteDirty)
259 #endif
260 	;
261 
262 	FlushTlbPage(virtualAddress);
263 
264 	fMapCount++;
265 
266 	return B_OK;
267 }
268 
269 
270 status_t
271 RISCV64VMTranslationMap::Unmap(addr_t start, addr_t end)
272 {
273 	TRACE("RISCV64VMTranslationMap::Unmap(0x%" B_PRIxADDR ", 0x%" B_PRIxADDR
274 		")\n", start, end);
275 
276 	ThreadCPUPinner pinner(thread_get_current_thread());
277 
278 	for (addr_t page = start; page < end; page += B_PAGE_SIZE) {
279 		Pte* pte = LookupPte(page, false, NULL);
280 		if (pte != NULL) {
281 			fMapCount--;
282 			pte->flags = 0;
283 			pte->ppn = 0;
284 			FlushTlbPage(page);
285 		}
286 	}
287 	return B_OK;
288 }
289 
290 
291 status_t
292 RISCV64VMTranslationMap::DebugMarkRangePresent(addr_t start, addr_t end,
293 	bool markPresent)
294 {
295 	NOT_IMPLEMENTED_PANIC();
296 	return B_NOT_SUPPORTED;
297 }
298 
299 
300 /*
301 Things need to be done when unmapping VMArea pages
302 	update vm_page::accessed, modified
303 	MMIO pages:
304 		just unmap
305 	wired pages:
306 		decrement wired count
307 	non-wired pages:
308 		remove from VMArea and vm_page `mappings` list
309 	wired and non-wird pages
310 		vm_page_set_state
311 */
312 
313 status_t
314 RISCV64VMTranslationMap::UnmapPage(VMArea* area, addr_t address,
315 	bool updatePageQueue)
316 {
317 	TRACE("RISCV64VMTranslationMap::UnmapPage(0x%" B_PRIxADDR "(%s), 0x%"
318 		B_PRIxADDR ", %d)\n", (addr_t)area, area->name, address,
319 		updatePageQueue);
320 
321 	ThreadCPUPinner pinner(thread_get_current_thread());
322 
323 	Pte* pte = LookupPte(address, false, NULL);
324 	if (pte == NULL || ((1 << pteValid) & pte->flags) == 0)
325 		return B_ENTRY_NOT_FOUND;
326 
327 	RecursiveLocker locker(fLock);
328 
329 	Pte oldPte = *pte;
330 	pte->flags = 0;
331 	pte->ppn = 0;
332 	fMapCount--;
333 	FlushTlbPage(address);
334 	pinner.Unlock();
335 
336 	locker.Detach(); // PageUnmapped takes ownership
337 	PageUnmapped(area, oldPte.ppn, ((1 << pteAccessed) & oldPte.flags) != 0,
338 		((1 << pteDirty) & oldPte.flags) != 0, updatePageQueue);
339 	return B_OK;
340 }
341 
342 
343 void
344 RISCV64VMTranslationMap::UnmapPages(VMArea* area, addr_t base, size_t size,
345 	bool updatePageQueue)
346 {
347 	TRACE("RISCV64VMTranslationMap::UnmapPages(0x%" B_PRIxADDR "(%s), 0x%"
348 		B_PRIxADDR ", 0x%" B_PRIxSIZE ", %d)\n", (addr_t)area,
349 		area->name, base, size, updatePageQueue);
350 
351 	for (addr_t end = base + size; base < end; base += B_PAGE_SIZE)
352 		UnmapPage(area, base, updatePageQueue);
353 }
354 
355 
356 void
357 RISCV64VMTranslationMap::UnmapArea(VMArea* area, bool deletingAddressSpace,
358 	bool ignoreTopCachePageFlags)
359 {
360 	TRACE("RISCV64VMTranslationMap::UnmapArea(0x%" B_PRIxADDR "(%s), 0x%"
361 		B_PRIxADDR ", 0x%" B_PRIxSIZE ", %d, %d)\n", (addr_t)area,
362 		area->name, area->Base(), area->Size(), deletingAddressSpace,
363 		ignoreTopCachePageFlags);
364 
365 	if (area->cache_type == CACHE_TYPE_DEVICE || area->wiring != B_NO_LOCK) {
366 		UnmapPages(area, area->Base(), area->Size(), true);
367 		return;
368 	}
369 
370 	bool unmapPages = !deletingAddressSpace || !ignoreTopCachePageFlags;
371 
372 	RecursiveLocker locker(fLock);
373 	ThreadCPUPinner pinner(thread_get_current_thread());
374 
375 	VMAreaMappings mappings;
376 	mappings.MoveFrom(&area->mappings);
377 
378 	for (VMAreaMappings::Iterator it = mappings.GetIterator();
379 			vm_page_mapping* mapping = it.Next();) {
380 
381 		vm_page* page = mapping->page;
382 		page->mappings.Remove(mapping);
383 
384 		VMCache* cache = page->Cache();
385 
386 		bool pageFullyUnmapped = false;
387 		if (!page->IsMapped()) {
388 			atomic_add(&gMappedPagesCount, -1);
389 			pageFullyUnmapped = true;
390 		}
391 
392 		if (unmapPages || cache != area->cache) {
393 			addr_t address = area->Base()
394 				+ ((page->cache_offset * B_PAGE_SIZE)
395 				- area->cache_offset);
396 
397 			Pte* pte = LookupPte(address, false, NULL);
398 			if (pte == NULL
399 				|| ((1 << pteValid) & pte->flags) == 0) {
400 				panic("page %p has mapping for area %p "
401 					"(%#" B_PRIxADDR "), but has no "
402 					"page table", page, area, address);
403 				continue;
404 			}
405 
406 			Pte oldPte = *pte;
407 			pte->flags = 0;
408 			pte->ppn = 0;
409 
410 			// transfer the accessed/dirty flags to the page and
411 			// invalidate the mapping, if necessary
412 			if (((1 << pteAccessed) & oldPte.flags) != 0) {
413 				page->accessed = true;
414 
415 				if (!deletingAddressSpace)
416 					FlushTlbPage(address);
417 			}
418 
419 			if (((1 << pteDirty) & oldPte.flags) != 0)
420 				page->modified = true;
421 
422 			if (pageFullyUnmapped) {
423 				if (cache->temporary) {
424 					vm_page_set_state(page,
425 						PAGE_STATE_INACTIVE);
426 				} else if (page->modified) {
427 					vm_page_set_state(page,
428 						PAGE_STATE_MODIFIED);
429 				} else {
430 					vm_page_set_state(page,
431 						PAGE_STATE_CACHED);
432 				}
433 			}
434 		}
435 
436 		fMapCount--;
437 	}
438 
439 	Flush();
440 		// flush explicitely, since we directly use the lock
441 
442 	locker.Unlock();
443 
444 	bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
445 	uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
446 		| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
447 
448 	while (vm_page_mapping* mapping = mappings.RemoveHead())
449 		object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
450 }
451 
452 
453 status_t
454 RISCV64VMTranslationMap::Query(addr_t virtualAddress,
455 	phys_addr_t* _physicalAddress, uint32* _flags)
456 {
457 	*_flags = 0;
458 	*_physicalAddress = 0;
459 
460 	ThreadCPUPinner pinner(thread_get_current_thread());
461 
462 	if (fPageTable == 0)
463 		return B_OK;
464 
465 	Pte* pte = LookupPte(virtualAddress, false, NULL);
466 	if (pte == 0)
467 		return B_OK;
468 
469 	*_physicalAddress = pte->ppn * B_PAGE_SIZE;
470 
471 	if (((1 << pteValid)    & pte->flags) != 0)
472 		*_flags |= PAGE_PRESENT;
473 #ifndef DISABLE_MODIFIED_FLAGS
474 	if (((1 << pteDirty)    & pte->flags) != 0)
475 		*_flags |= PAGE_MODIFIED;
476 	if (((1 << pteAccessed) & pte->flags) != 0)
477 		*_flags |= PAGE_ACCESSED;
478 #endif
479 	if (((1 << pteUser) & pte->flags) != 0) {
480 		if (((1 << pteRead)  & pte->flags) != 0)
481 			*_flags |= B_READ_AREA;
482 		if (((1 << pteWrite) & pte->flags) != 0)
483 			*_flags |= B_WRITE_AREA;
484 		if (((1 << pteExec)  & pte->flags) != 0)
485 			*_flags |= B_EXECUTE_AREA;
486 	} else {
487 		if (((1 << pteRead)  & pte->flags) != 0)
488 			*_flags |= B_KERNEL_READ_AREA;
489 		if (((1 << pteWrite) & pte->flags) != 0)
490 			*_flags |= B_KERNEL_WRITE_AREA;
491 		if (((1 << pteExec)  & pte->flags) != 0)
492 			*_flags |= B_KERNEL_EXECUTE_AREA;
493 	}
494 
495 	return B_OK;
496 }
497 
498 
499 status_t
500 RISCV64VMTranslationMap::QueryInterrupt(addr_t virtualAddress,
501 	phys_addr_t* _physicalAddress, uint32* _flags)
502 {
503 	return Query(virtualAddress, _physicalAddress, _flags);
504 }
505 
506 
507 status_t RISCV64VMTranslationMap::Protect(addr_t base, addr_t top,
508 	uint32 attributes, uint32 memoryType)
509 {
510 	TRACE("RISCV64VMTranslationMap::Protect(0x%" B_PRIxADDR ", 0x%"
511 		B_PRIxADDR ")\n", base, top);
512 
513 	ThreadCPUPinner pinner(thread_get_current_thread());
514 
515 	for (addr_t page = base; page < top; page += B_PAGE_SIZE) {
516 
517 		Pte* pte = LookupPte(page, false, NULL);
518 		if (pte == NULL || ((1 << pteValid) & pte->flags) == 0) {
519 			TRACE("attempt to protect not mapped page: 0x%"
520 				B_PRIxADDR "\n", page);
521 			continue;
522 		}
523 
524 		Pte newPte = *pte;
525 		newPte.flags &= (1 << pteValid)
526 			| (1 << pteAccessed) | (1 << pteDirty);
527 
528 		if ((attributes & B_USER_PROTECTION) != 0) {
529 			newPte.flags |= (1 << pteUser);
530 			if ((attributes & B_READ_AREA)    != 0)
531 				newPte.flags |= (1 << pteRead);
532 			if ((attributes & B_WRITE_AREA)   != 0)
533 				newPte.flags |= (1 << pteWrite);
534 			if ((attributes & B_EXECUTE_AREA) != 0)
535 				newPte.flags |= (1 << pteExec);
536 		} else {
537 			if ((attributes & B_KERNEL_READ_AREA)    != 0)
538 				newPte.flags |= (1 << pteRead);
539 			if ((attributes & B_KERNEL_WRITE_AREA)   != 0)
540 				newPte.flags |= (1 << pteWrite);
541 			if ((attributes & B_KERNEL_EXECUTE_AREA) != 0)
542 				newPte.flags |= (1 << pteExec);
543 		}
544 		*pte = newPte;
545 
546 		FlushTlbPage(page);
547 	}
548 
549 	return B_OK;
550 }
551 
552 
553 status_t
554 RISCV64VMTranslationMap::ProtectPage(VMArea* area, addr_t address,
555 	uint32 attributes)
556 {
557 	NOT_IMPLEMENTED_PANIC();
558 	return B_OK;
559 }
560 
561 
562 status_t
563 RISCV64VMTranslationMap::ProtectArea(VMArea* area, uint32 attributes)
564 {
565 	NOT_IMPLEMENTED_PANIC();
566 	return B_NOT_SUPPORTED;
567 }
568 
569 
570 static inline uint32
571 ConvertAccessedFlags(uint32 flags)
572 {
573 	return ((flags & PAGE_MODIFIED) ? (1 << pteDirty   ) : 0)
574 		| ((flags & PAGE_ACCESSED) ? (1 << pteAccessed) : 0);
575 }
576 
577 
578 status_t
579 RISCV64VMTranslationMap::SetFlags(addr_t address, uint32 flags)
580 {
581 	ThreadCPUPinner pinner(thread_get_current_thread());
582 	Pte* pte = LookupPte(address, false, NULL);
583 	if (pte == NULL || ((1 << pteValid) & pte->flags) == 0)
584 		return B_OK;
585 #ifndef DISABLE_MODIFIED_FLAGS
586 	pte->flags |= ConvertAccessedFlags(flags);
587 #endif
588 	FlushTlbPage(address);
589 	return B_OK;
590 }
591 
592 
593 status_t
594 RISCV64VMTranslationMap::ClearFlags(addr_t address, uint32 flags)
595 {
596 	ThreadCPUPinner pinner(thread_get_current_thread());
597 
598 	Pte* pte = LookupPte(address, false, NULL);
599 	if (pte == NULL || ((1 << pteValid) & pte->flags) == 0)
600 		return B_OK;
601 
602 #ifndef DISABLE_MODIFIED_FLAGS
603 	pte->flags &= ~ConvertAccessedFlags(flags);
604 #endif
605 
606 	FlushTlbPage(address);
607 	return B_OK;
608 }
609 
610 
611 bool
612 RISCV64VMTranslationMap::ClearAccessedAndModified(VMArea* area, addr_t address,
613 	bool unmapIfUnaccessed, bool& _modified)
614 {
615 	TRACE("RISCV64VMPhysicalPageMapper::ClearAccessedAndModified(0x%"
616 		B_PRIxADDR "(%s), 0x%" B_PRIxADDR ", %d)\n", (addr_t)area,
617 		area->name, address, unmapIfUnaccessed);
618 
619 	RecursiveLocker locker(fLock);
620 	ThreadCPUPinner pinner(thread_get_current_thread());
621 
622 	Pte* pte = LookupPte(address, false, NULL);
623 	if (pte == NULL || ((1 << pteValid) & pte->flags) == 0)
624 		return false;
625 
626 	Pte oldPte = *pte;
627 
628 #ifndef DISABLE_MODIFIED_FLAGS
629 	if (unmapIfUnaccessed) {
630 		if (((1 << pteAccessed) & pte->flags) != 0) {
631 			pte->flags &= ~((1 << pteAccessed) | (1 << pteDirty));
632 		} else {
633 			pte->flags = 0;
634 			pte->ppn = 0;
635 		}
636 	} else {
637 		pte->flags &= ~((1 << pteAccessed) | (1 << pteDirty));
638 	}
639 #endif
640 
641 	pinner.Unlock();
642 	_modified = ((1 << pteDirty) & oldPte.flags) != 0;
643 	if (((1 << pteAccessed) & oldPte.flags) != 0) {
644 		FlushTlbPage(address);
645 		Flush();
646 		return true;
647 	}
648 
649 	if (!unmapIfUnaccessed)
650 		return false;
651 
652 	fMapCount--;
653 
654 	locker.Detach(); // UnaccessedPageUnmapped takes ownership
655 	UnaccessedPageUnmapped(area, oldPte.ppn);
656 	return false;
657 }
658 
659 
660 void
661 RISCV64VMTranslationMap::Flush()
662 {
663 	//NOT_IMPLEMENTED_PANIC();
664 }
665 
666 
667 void
668 RISCV64VMTranslationMap::DebugPrintMappingInfo(addr_t virtualAddress)
669 {
670 	NOT_IMPLEMENTED_PANIC();
671 }
672 
673 
674 bool
675 RISCV64VMTranslationMap::DebugGetReverseMappingInfo(phys_addr_t physicalAddress,
676 	ReverseMappingInfoCallback& callback)
677 {
678 	NOT_IMPLEMENTED_PANIC();
679 	return false;
680 }
681 
682 
683 status_t
684 RISCV64VMTranslationMap::MemcpyToMap(addr_t to, const char *from, size_t size)
685 {
686 	TRACE("RISCV64VMPhysicalPageMapper::MemcpyToMap(0x%" B_PRIxADDR ", 0x%"
687 		B_PRIxADDR ", %" B_PRIuSIZE ")\n", to, (addr_t)from, size);
688 
689 	while (size > 0) {
690 		uint64 va0 = ROUNDDOWN(to, B_PAGE_SIZE);
691 		uint64 pa0 = LookupAddr(va0);
692 		TRACE("LookupAddr(0x%" B_PRIxADDR "): 0x%" B_PRIxADDR "\n",
693 			va0, pa0);
694 
695 		if (pa0 == 0) {
696 			TRACE("[!] not mapped: 0x%" B_PRIxADDR "\n", va0);
697 			return B_BAD_ADDRESS;
698 		}
699 
700 		uint64 n = B_PAGE_SIZE - (to - va0);
701 		if (n > size)
702 			n = size;
703 
704 		memcpy(VirtFromPhys(pa0 + (to - va0)), from, n);
705 
706 		size -= n;
707 		from += n;
708 		to = va0 + B_PAGE_SIZE;
709 	}
710 	return B_OK;
711 }
712 
713 
714 status_t
715 RISCV64VMTranslationMap::MemcpyFromMap(char *to, addr_t from, size_t size)
716 {
717 	TRACE("RISCV64VMPhysicalPageMapper::MemcpyFromMap(0x%" B_PRIxADDR
718 		", 0x%" B_PRIxADDR ", %" B_PRIuSIZE ")\n",
719 		(addr_t)to, from, size);
720 
721 	while (size > 0) {
722 		uint64 va0 = ROUNDDOWN(from, B_PAGE_SIZE);
723 		uint64 pa0 = LookupAddr(va0);
724 		if (pa0 == 0) {
725 			TRACE("[!] not mapped: 0x%" B_PRIxADDR
726 				", calling page fault handler\n", va0);
727 
728 			addr_t newIP;
729 			vm_page_fault(va0, Ra(), true, false, true, true,
730 				&newIP);
731 
732 			pa0 = LookupAddr(va0);
733 			TRACE("LookupAddr(0x%" B_PRIxADDR "): 0x%"
734 				B_PRIxADDR "\n", va0, pa0);
735 
736 			if (pa0 == 0)
737 				return B_BAD_ADDRESS;
738 		}
739 		uint64 n = B_PAGE_SIZE - (from - va0);
740 		if(n > size)
741 			n = size;
742 
743 		memcpy(to, VirtFromPhys(pa0 + (from - va0)), n);
744 
745 		size -= n;
746 		to += n;
747 		from = va0 + B_PAGE_SIZE;
748 	}
749 
750 	return B_OK;
751 }
752 
753 
754 status_t
755 RISCV64VMTranslationMap::MemsetToMap(addr_t to, char c, size_t count)
756 {
757 	TRACE("RISCV64VMPhysicalPageMapper::MemsetToMap(0x%" B_PRIxADDR
758 		", %d, %" B_PRIuSIZE ")\n", to, c, count);
759 
760 	while (count > 0) {
761 		uint64 va0 = ROUNDDOWN(to, B_PAGE_SIZE);
762 		uint64 pa0 = LookupAddr(va0);
763 		TRACE("LookupAddr(0x%" B_PRIxADDR "): 0x%" B_PRIxADDR "\n",
764 			va0, pa0);
765 
766 		if (pa0 == 0) {
767 			TRACE("[!] not mapped: 0x%" B_PRIxADDR
768 				", calling page fault handler\n", va0);
769 			addr_t newIP;
770 			vm_page_fault(va0, Ra(), true, false, true, true,
771 				&newIP);
772 			pa0 = LookupAddr(va0);
773 			TRACE("LookupAddr(0x%" B_PRIxADDR "): 0x%"
774 				B_PRIxADDR "\n", va0, pa0);
775 
776 			if (pa0 == 0)
777 				return B_BAD_ADDRESS;
778 		}
779 
780 		uint64 n = B_PAGE_SIZE - (to - va0);
781 		if (n > count)
782 			n = count;
783 
784 		memset(VirtFromPhys(pa0 + (to - va0)), c, n);
785 
786 		count -= n;
787 		to = va0 + B_PAGE_SIZE;
788 	}
789 	return B_OK;
790 }
791 
792 
793 ssize_t
794 RISCV64VMTranslationMap::StrlcpyFromMap(char *to, addr_t from, size_t size)
795 {
796 	// NOT_IMPLEMENTED_PANIC();
797 	return strlcpy(to, (const char*)from, size);
798 	// return 0;
799 }
800 
801 
802 ssize_t
803 RISCV64VMTranslationMap::StrlcpyToMap(addr_t to, const char *from, size_t size)
804 {
805 	ssize_t len = strlen(from) + 1;
806 	if ((size_t)len > size)
807 		len = size;
808 
809 	if (MemcpyToMap(to, from, len) < B_OK)
810 		return 0;
811 
812 	return len;
813 }
814 
815 
816 //#pragma mark RISCV64VMPhysicalPageMapper
817 
818 
819 RISCV64VMPhysicalPageMapper::RISCV64VMPhysicalPageMapper()
820 {
821 	TRACE("+RISCV64VMPhysicalPageMapper\n");
822 }
823 
824 
825 RISCV64VMPhysicalPageMapper::~RISCV64VMPhysicalPageMapper()
826 {
827 	TRACE("-RISCV64VMPhysicalPageMapper\n");
828 }
829 
830 
831 status_t
832 RISCV64VMPhysicalPageMapper::GetPage(phys_addr_t physicalAddress,
833 	addr_t* _virtualAddress, void** _handle)
834 {
835 	*_virtualAddress = (addr_t)VirtFromPhys(physicalAddress);
836 	*_handle = (void*)1;
837 	return B_OK;
838 }
839 
840 
841 status_t
842 RISCV64VMPhysicalPageMapper::PutPage(addr_t virtualAddress, void* handle)
843 {
844 	return B_OK;
845 }
846 
847 
848 status_t
849 RISCV64VMPhysicalPageMapper::GetPageCurrentCPU( phys_addr_t physicalAddress,
850 	addr_t* _virtualAddress, void** _handle)
851 {
852 	return GetPage(physicalAddress, _virtualAddress, _handle);
853 }
854 
855 
856 status_t
857 RISCV64VMPhysicalPageMapper::PutPageCurrentCPU(addr_t virtualAddress,
858 	void* _handle)
859 {
860 	return PutPage(virtualAddress, _handle);
861 }
862 
863 
864 status_t
865 RISCV64VMPhysicalPageMapper::GetPageDebug(phys_addr_t physicalAddress,
866 	addr_t* _virtualAddress, void** _handle)
867 {
868 	NOT_IMPLEMENTED_PANIC();
869 	return B_NOT_SUPPORTED;
870 }
871 
872 
873 status_t
874 RISCV64VMPhysicalPageMapper::PutPageDebug(addr_t virtualAddress, void* handle)
875 {
876 	NOT_IMPLEMENTED_PANIC();
877 	return B_NOT_SUPPORTED;
878 }
879 
880 
881 status_t
882 RISCV64VMPhysicalPageMapper::MemsetPhysical(phys_addr_t address, int value,
883 	phys_size_t length)
884 {
885 	TRACE("RISCV64VMPhysicalPageMapper::MemsetPhysical(0x%" B_PRIxADDR
886 		", 0x%x, 0x%" B_PRIxADDR ")\n", address, value, length);
887 	set_ac();
888 	memset(VirtFromPhys(address), value, length);
889 	clear_ac();
890 
891 	return B_OK;
892 }
893 
894 
895 status_t
896 RISCV64VMPhysicalPageMapper::MemcpyFromPhysical(void* to, phys_addr_t from,
897 	size_t length, bool user)
898 {
899 	TRACE("RISCV64VMPhysicalPageMapper::MemcpyFromPhysical(0x%" B_PRIxADDR
900 		", 0x%" B_PRIxADDR ", %" B_PRIuSIZE ")\n", (addr_t)to,
901 		from, length);
902 
903 	set_ac();
904 	memcpy(to, VirtFromPhys(from), length);
905 	clear_ac();
906 
907 	return B_OK;
908 }
909 
910 
911 status_t
912 RISCV64VMPhysicalPageMapper::MemcpyToPhysical(phys_addr_t to, const void* from,
913 	size_t length, bool user)
914 {
915 	TRACE("RISCV64VMPhysicalPageMapper::MemcpyToPhysical(0x%" B_PRIxADDR
916 		", 0x%" B_PRIxADDR ", %" B_PRIuSIZE ")\n", to, (addr_t)from,
917 		length);
918 
919 	set_ac();
920 	memcpy(VirtFromPhys(to), from, length);
921 	clear_ac();
922 
923 	return B_OK;
924 }
925 
926 
927 void
928 RISCV64VMPhysicalPageMapper::MemcpyPhysicalPage(phys_addr_t to,
929 	phys_addr_t from)
930 {
931 	TRACE("RISCV64VMPhysicalPageMapper::MemcpyPhysicalPage(0x%" B_PRIxADDR
932 		", 0x%" B_PRIxADDR ")\n", to, from);
933 
934 	set_ac();
935 	memcpy(VirtFromPhys(to), VirtFromPhys(from), B_PAGE_SIZE);
936 	clear_ac();
937 }
938