xref: /haiku/src/system/kernel/arch/riscv64/RISCV64VMTranslationMap.cpp (revision 02354704729d38c3b078c696adc1bbbd33cbcf72)
1 /*
2  * Copyright 2020-2021, Haiku, Inc. All rights reserved.
3  * Distributed under the terms of the MIT License.
4  *
5  * Authors:
6  *   X512 <danger_mail@list.ru>
7  */
8 
9 
10 #include "RISCV64VMTranslationMap.h"
11 
12 #include <kernel.h>
13 #include <vm/vm_priv.h>
14 #include <vm/vm_page.h>
15 #include <vm/VMAddressSpace.h>
16 #include <vm/VMCache.h>
17 #include <slab/Slab.h>
18 
19 #include <util/AutoLock.h>
20 #include <util/ThreadAutoLock.h>
21 
22 
23 //#define DISABLE_MODIFIED_FLAGS 1
24 
25 //#define DO_TRACE
26 #ifdef DO_TRACE
27 #	define TRACE(x...) dprintf(x)
28 #else
29 #	define TRACE(x...) ;
30 #endif
31 
32 #define NOT_IMPLEMENTED_PANIC() \
33 	panic("not implemented: %s\n", __PRETTY_FUNCTION__)
34 
35 
36 static void
37 FreePageTable(page_num_t ppn, bool isKernel, uint32 level = 2)
38 {
39 	if (level > 0) {
40 		Pte* pte = (Pte*)VirtFromPhys(ppn * B_PAGE_SIZE);
41 		uint64 beg = 0;
42 		uint64 end = pteCount - 1;
43 		if (level == 2 && !isKernel) {
44 			beg = VirtAdrPte(USER_BASE, 2);
45 			end = VirtAdrPte(USER_TOP, 2);
46 		}
47 		for (uint64 i = beg; i <= end; i++) {
48 			if ((1 << pteValid) & pte[i].flags)
49 				FreePageTable(pte[i].ppn, isKernel, level - 1);
50 		}
51 	}
52 	vm_page* page = vm_lookup_page(ppn);
53 	vm_page_set_state(page, PAGE_STATE_FREE);
54 }
55 
56 
57 static uint64
58 GetPageTableSize(page_num_t ppn, bool isKernel, uint32 level = 2)
59 {
60 	if (ppn == 0)
61 		return 0;
62 
63 	if (level == 0)
64 		return 1;
65 
66 	uint64 size = 1;
67 	Pte* pte = (Pte*)VirtFromPhys(ppn * B_PAGE_SIZE);
68 	uint64 beg = 0;
69 	uint64 end = pteCount - 1;
70 	if (level == 2 && !isKernel) {
71 		beg = VirtAdrPte(USER_BASE, 2);
72 		end = VirtAdrPte(USER_TOP, 2);
73 	}
74 	for (uint64 i = beg; i <= end; i++) {
75 		if ((1 << pteValid) & pte[i].flags)
76 			size += GetPageTableSize(pte[i].ppn, isKernel, level - 1);
77 	}
78 	return size;
79 }
80 
81 
82 //#pragma mark RISCV64VMTranslationMap
83 
84 
85 Pte*
86 RISCV64VMTranslationMap::LookupPte(addr_t virtAdr, bool alloc,
87 	vm_page_reservation* reservation)
88 {
89 	if (fPageTable == 0) {
90 		if (!alloc)
91 			return NULL;
92 		vm_page* page = vm_page_allocate_page(reservation,
93 			PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
94 		fPageTable = page->physical_page_number * B_PAGE_SIZE;
95 		if (fPageTable == 0)
96 			return NULL;
97 		fPageTableSize++;
98 		if (!fIsKernel) {
99 			// Map kernel address space into user address space. Preallocated
100 			// kernel level-2 PTEs are reused.
101 			RISCV64VMTranslationMap* kernelMap = (RISCV64VMTranslationMap*)
102 				VMAddressSpace::Kernel()->TranslationMap();
103 			Pte *kernelPageTable = (Pte*)VirtFromPhys(kernelMap->PageTable());
104 			Pte *userPageTable = (Pte*)VirtFromPhys(fPageTable);
105 			for (uint64 i = VirtAdrPte(KERNEL_BASE, 2);
106 				i <= VirtAdrPte(KERNEL_TOP, 2); i++) {
107 				Pte *pte = &userPageTable[i];
108 				pte->ppn = kernelPageTable[i].ppn;
109 				pte->flags |= (1 << pteValid);
110 			}
111 		}
112 	}
113 	Pte *pte = (Pte*)VirtFromPhys(fPageTable);
114 	for (int level = 2; level > 0; level--) {
115 		pte += VirtAdrPte(virtAdr, level);
116 		if (!((1 << pteValid) & pte->flags)) {
117 			if (!alloc)
118 				return NULL;
119 			vm_page* page = vm_page_allocate_page(reservation,
120 				PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
121 			pte->ppn = page->physical_page_number;
122 			if (pte->ppn == 0)
123 				return NULL;
124 			fPageTableSize++;
125 			pte->flags |= (1 << pteValid);
126 		}
127 		pte = (Pte*)VirtFromPhys(B_PAGE_SIZE * pte->ppn);
128 	}
129 	pte += VirtAdrPte(virtAdr, 0);
130 	return pte;
131 }
132 
133 
134 phys_addr_t
135 RISCV64VMTranslationMap::LookupAddr(addr_t virtAdr)
136 {
137 	Pte* pte = LookupPte(virtAdr, false, NULL);
138 	if (pte == NULL || !((1 << pteValid) & pte->flags))
139 		return 0;
140 	if (fIsKernel != (((1 << pteUser) & pte->flags) == 0))
141 		return 0;
142 	return pte->ppn * B_PAGE_SIZE;
143 }
144 
145 
146 RISCV64VMTranslationMap::RISCV64VMTranslationMap(bool kernel,
147 	phys_addr_t pageTable):
148 	fIsKernel(kernel),
149 	fPageTable(pageTable),
150 	fPageTableSize(GetPageTableSize(pageTable / B_PAGE_SIZE, kernel))
151 {
152 	TRACE("+RISCV64VMTranslationMap(%p, %d, 0x%" B_PRIxADDR ")\n", this,
153 		kernel, pageTable);
154 	TRACE("  pageTableSize: %" B_PRIu64 "\n", fPageTableSize);
155 }
156 
157 
158 RISCV64VMTranslationMap::~RISCV64VMTranslationMap()
159 {
160 	TRACE("-RISCV64VMTranslationMap(%p)\n", this);
161 	TRACE("  pageTableSize: %" B_PRIu64 "\n", fPageTableSize);
162 	TRACE("  GetPageTableSize(): %" B_PRIu64 "\n",
163 		GetPageTableSize(fPageTable / B_PAGE_SIZE, fIsKernel));
164 
165 	ASSERT_ALWAYS(!fIsKernel);
166 	// Can't delete currently used page table
167 	ASSERT_ALWAYS(::Satp() != Satp());
168 
169 	FreePageTable(fPageTable / B_PAGE_SIZE, fIsKernel);
170 }
171 
172 
173 bool
174 RISCV64VMTranslationMap::Lock()
175 {
176 	TRACE("RISCV64VMTranslationMap::Lock()\n");
177 	recursive_lock_lock(&fLock);
178 	return true;
179 }
180 
181 
182 void
183 RISCV64VMTranslationMap::Unlock()
184 {
185 	TRACE("RISCV64VMTranslationMap::Unlock()\n");
186 	if (recursive_lock_get_recursion(&fLock) == 1) {
187 		// we're about to release it for the last time
188 		Flush();
189 	}
190 	recursive_lock_unlock(&fLock);
191 }
192 
193 
194 addr_t
195 RISCV64VMTranslationMap::MappedSize() const
196 {
197 	NOT_IMPLEMENTED_PANIC();
198 	return 0;
199 }
200 
201 
202 size_t
203 RISCV64VMTranslationMap::MaxPagesNeededToMap(addr_t start, addr_t end) const
204 {
205 	enum {
206 		level0Range = (uint64_t)B_PAGE_SIZE * pteCount,
207 		level1Range = (uint64_t)level0Range * pteCount,
208 		level2Range = (uint64_t)level1Range * pteCount,
209 	};
210 
211 	if (start == 0) {
212 		start = (level2Range) - B_PAGE_SIZE;
213 		end += start;
214 	}
215 
216 	size_t requiredLevel2 = end / level2Range + 1 - start / level2Range;
217 	size_t requiredLevel1 = end / level1Range + 1 - start / level1Range;
218 	size_t requiredLevel0 = end / level0Range + 1 - start / level0Range;
219 
220 	return requiredLevel2 + requiredLevel1 + requiredLevel0;
221 }
222 
223 
224 status_t
225 RISCV64VMTranslationMap::Map(addr_t virtualAddress, phys_addr_t physicalAddress,
226 	uint32 attributes, uint32 memoryType,
227 	vm_page_reservation* reservation)
228 {
229 	TRACE("RISCV64VMTranslationMap::Map(0x%" B_PRIxADDR ", 0x%" B_PRIxADDR
230 		")\n", virtualAddress, physicalAddress);
231 
232 	ThreadCPUPinner pinner(thread_get_current_thread());
233 
234 	Pte* pte = LookupPte(virtualAddress, true, reservation);
235 	if (pte == NULL)
236 		panic("can't allocate page table");
237 
238 	pte->ppn = physicalAddress / B_PAGE_SIZE;
239 	pte->flags = 0;
240 	if ((attributes & B_USER_PROTECTION) != 0) {
241 		pte->flags |= (1 << pteUser);
242 		if ((attributes & B_READ_AREA) != 0)
243 			pte->flags |= (1 << pteRead);
244 		if ((attributes & B_WRITE_AREA) != 0)
245 			pte->flags |= (1 << pteWrite);
246 		if ((attributes & B_EXECUTE_AREA) != 0)
247 			pte->flags |= (1 << pteExec);
248 	} else {
249 		if ((attributes & B_KERNEL_READ_AREA) != 0)
250 			pte->flags |= (1 << pteRead);
251 		if ((attributes & B_KERNEL_WRITE_AREA) != 0)
252 			pte->flags |= (1 << pteWrite);
253 		if ((attributes & B_KERNEL_EXECUTE_AREA) != 0)
254 			pte->flags |= (1 << pteExec);
255 	}
256 
257 	pte->flags |= (1 << pteValid)
258 #ifdef DISABLE_MODIFIED_FLAGS
259 		| (1 << pteAccessed) | (1 << pteDirty)
260 #endif
261 	;
262 
263 	FlushTlbPage(virtualAddress);
264 
265 	fMapCount++;
266 
267 	return B_OK;
268 }
269 
270 
271 status_t
272 RISCV64VMTranslationMap::Unmap(addr_t start, addr_t end)
273 {
274 	TRACE("RISCV64VMTranslationMap::Unmap(0x%" B_PRIxADDR ", 0x%" B_PRIxADDR
275 		")\n", start, end);
276 
277 	ThreadCPUPinner pinner(thread_get_current_thread());
278 
279 	for (addr_t page = start; page < end; page += B_PAGE_SIZE) {
280 		Pte* pte = LookupPte(page, false, NULL);
281 		if (pte != NULL) {
282 			fMapCount--;
283 			pte->flags = 0;
284 			pte->ppn = 0;
285 			FlushTlbPage(page);
286 		}
287 	}
288 	return B_OK;
289 }
290 
291 
292 status_t
293 RISCV64VMTranslationMap::DebugMarkRangePresent(addr_t start, addr_t end,
294 	bool markPresent)
295 {
296 	NOT_IMPLEMENTED_PANIC();
297 	return B_NOT_SUPPORTED;
298 }
299 
300 
301 /*
302 Things need to be done when unmapping VMArea pages
303 	update vm_page::accessed, modified
304 	MMIO pages:
305 		just unmap
306 	wired pages:
307 		decrement wired count
308 	non-wired pages:
309 		remove from VMArea and vm_page `mappings` list
310 	wired and non-wird pages
311 		vm_page_set_state
312 */
313 
314 status_t
315 RISCV64VMTranslationMap::UnmapPage(VMArea* area, addr_t address,
316 	bool updatePageQueue)
317 {
318 	TRACE("RISCV64VMTranslationMap::UnmapPage(0x%" B_PRIxADDR "(%s), 0x%"
319 		B_PRIxADDR ", %d)\n", (addr_t)area, area->name, address,
320 		updatePageQueue);
321 
322 	ThreadCPUPinner pinner(thread_get_current_thread());
323 
324 	Pte* pte = LookupPte(address, false, NULL);
325 	if (pte == NULL || ((1 << pteValid) & pte->flags) == 0)
326 		return B_ENTRY_NOT_FOUND;
327 
328 	RecursiveLocker locker(fLock);
329 
330 	Pte oldPte = *pte;
331 	pte->flags = 0;
332 	pte->ppn = 0;
333 	fMapCount--;
334 	FlushTlbPage(address);
335 	pinner.Unlock();
336 
337 	locker.Detach(); // PageUnmapped takes ownership
338 	PageUnmapped(area, oldPte.ppn, ((1 << pteAccessed) & oldPte.flags) != 0,
339 		((1 << pteDirty) & oldPte.flags) != 0, updatePageQueue);
340 	return B_OK;
341 }
342 
343 
344 void
345 RISCV64VMTranslationMap::UnmapPages(VMArea* area, addr_t base, size_t size,
346 	bool updatePageQueue)
347 {
348 	TRACE("RISCV64VMTranslationMap::UnmapPages(0x%" B_PRIxADDR "(%s), 0x%"
349 		B_PRIxADDR ", 0x%" B_PRIxSIZE ", %d)\n", (addr_t)area,
350 		area->name, base, size, updatePageQueue);
351 
352 	for (addr_t end = base + size; base < end; base += B_PAGE_SIZE)
353 		UnmapPage(area, base, updatePageQueue);
354 }
355 
356 
357 void
358 RISCV64VMTranslationMap::UnmapArea(VMArea* area, bool deletingAddressSpace,
359 	bool ignoreTopCachePageFlags)
360 {
361 	TRACE("RISCV64VMTranslationMap::UnmapArea(0x%" B_PRIxADDR "(%s), 0x%"
362 		B_PRIxADDR ", 0x%" B_PRIxSIZE ", %d, %d)\n", (addr_t)area,
363 		area->name, area->Base(), area->Size(), deletingAddressSpace,
364 		ignoreTopCachePageFlags);
365 
366 	if (area->cache_type == CACHE_TYPE_DEVICE || area->wiring != B_NO_LOCK) {
367 		UnmapPages(area, area->Base(), area->Size(), true);
368 		return;
369 	}
370 
371 	bool unmapPages = !deletingAddressSpace || !ignoreTopCachePageFlags;
372 
373 	RecursiveLocker locker(fLock);
374 	ThreadCPUPinner pinner(thread_get_current_thread());
375 
376 	VMAreaMappings mappings;
377 	mappings.MoveFrom(&area->mappings);
378 
379 	for (VMAreaMappings::Iterator it = mappings.GetIterator();
380 			vm_page_mapping* mapping = it.Next();) {
381 
382 		vm_page* page = mapping->page;
383 		page->mappings.Remove(mapping);
384 
385 		VMCache* cache = page->Cache();
386 
387 		bool pageFullyUnmapped = false;
388 		if (!page->IsMapped()) {
389 			atomic_add(&gMappedPagesCount, -1);
390 			pageFullyUnmapped = true;
391 		}
392 
393 		if (unmapPages || cache != area->cache) {
394 			addr_t address = area->Base()
395 				+ ((page->cache_offset * B_PAGE_SIZE)
396 				- area->cache_offset);
397 
398 			Pte* pte = LookupPte(address, false, NULL);
399 			if (pte == NULL
400 				|| ((1 << pteValid) & pte->flags) == 0) {
401 				panic("page %p has mapping for area %p "
402 					"(%#" B_PRIxADDR "), but has no "
403 					"page table", page, area, address);
404 				continue;
405 			}
406 
407 			Pte oldPte = *pte;
408 			pte->flags = 0;
409 			pte->ppn = 0;
410 
411 			// transfer the accessed/dirty flags to the page and
412 			// invalidate the mapping, if necessary
413 			if (((1 << pteAccessed) & oldPte.flags) != 0) {
414 				page->accessed = true;
415 
416 				if (!deletingAddressSpace)
417 					FlushTlbPage(address);
418 			}
419 
420 			if (((1 << pteDirty) & oldPte.flags) != 0)
421 				page->modified = true;
422 
423 			if (pageFullyUnmapped) {
424 				if (cache->temporary) {
425 					vm_page_set_state(page,
426 						PAGE_STATE_INACTIVE);
427 				} else if (page->modified) {
428 					vm_page_set_state(page,
429 						PAGE_STATE_MODIFIED);
430 				} else {
431 					vm_page_set_state(page,
432 						PAGE_STATE_CACHED);
433 				}
434 			}
435 		}
436 
437 		fMapCount--;
438 	}
439 
440 	Flush();
441 		// flush explicitely, since we directly use the lock
442 
443 	locker.Unlock();
444 
445 	bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
446 	uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
447 		| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
448 
449 	while (vm_page_mapping* mapping = mappings.RemoveHead())
450 		object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
451 }
452 
453 
454 status_t
455 RISCV64VMTranslationMap::Query(addr_t virtualAddress,
456 	phys_addr_t* _physicalAddress, uint32* _flags)
457 {
458 	*_flags = 0;
459 	*_physicalAddress = 0;
460 
461 	ThreadCPUPinner pinner(thread_get_current_thread());
462 
463 	if (fPageTable == 0)
464 		return B_OK;
465 
466 	Pte* pte = LookupPte(virtualAddress, false, NULL);
467 	if (pte == 0)
468 		return B_OK;
469 
470 	*_physicalAddress = pte->ppn * B_PAGE_SIZE;
471 
472 	if (((1 << pteValid)    & pte->flags) != 0)
473 		*_flags |= PAGE_PRESENT;
474 #ifndef DISABLE_MODIFIED_FLAGS
475 	if (((1 << pteDirty)    & pte->flags) != 0)
476 		*_flags |= PAGE_MODIFIED;
477 	if (((1 << pteAccessed) & pte->flags) != 0)
478 		*_flags |= PAGE_ACCESSED;
479 #endif
480 	if (((1 << pteUser) & pte->flags) != 0) {
481 		if (((1 << pteRead)  & pte->flags) != 0)
482 			*_flags |= B_READ_AREA;
483 		if (((1 << pteWrite) & pte->flags) != 0)
484 			*_flags |= B_WRITE_AREA;
485 		if (((1 << pteExec)  & pte->flags) != 0)
486 			*_flags |= B_EXECUTE_AREA;
487 	} else {
488 		if (((1 << pteRead)  & pte->flags) != 0)
489 			*_flags |= B_KERNEL_READ_AREA;
490 		if (((1 << pteWrite) & pte->flags) != 0)
491 			*_flags |= B_KERNEL_WRITE_AREA;
492 		if (((1 << pteExec)  & pte->flags) != 0)
493 			*_flags |= B_KERNEL_EXECUTE_AREA;
494 	}
495 
496 	return B_OK;
497 }
498 
499 
500 status_t
501 RISCV64VMTranslationMap::QueryInterrupt(addr_t virtualAddress,
502 	phys_addr_t* _physicalAddress, uint32* _flags)
503 {
504 	return Query(virtualAddress, _physicalAddress, _flags);
505 }
506 
507 
508 status_t RISCV64VMTranslationMap::Protect(addr_t base, addr_t top,
509 	uint32 attributes, uint32 memoryType)
510 {
511 	TRACE("RISCV64VMTranslationMap::Protect(0x%" B_PRIxADDR ", 0x%"
512 		B_PRIxADDR ")\n", base, top);
513 
514 	ThreadCPUPinner pinner(thread_get_current_thread());
515 
516 	for (addr_t page = base; page < top; page += B_PAGE_SIZE) {
517 
518 		Pte* pte = LookupPte(page, false, NULL);
519 		if (pte == NULL || ((1 << pteValid) & pte->flags) == 0) {
520 			TRACE("attempt to protect not mapped page: 0x%"
521 				B_PRIxADDR "\n", page);
522 			continue;
523 		}
524 
525 		Pte newPte = *pte;
526 		newPte.flags &= (1 << pteValid)
527 			| (1 << pteAccessed) | (1 << pteDirty);
528 
529 		if ((attributes & B_USER_PROTECTION) != 0) {
530 			newPte.flags |= (1 << pteUser);
531 			if ((attributes & B_READ_AREA)    != 0)
532 				newPte.flags |= (1 << pteRead);
533 			if ((attributes & B_WRITE_AREA)   != 0)
534 				newPte.flags |= (1 << pteWrite);
535 			if ((attributes & B_EXECUTE_AREA) != 0)
536 				newPte.flags |= (1 << pteExec);
537 		} else {
538 			if ((attributes & B_KERNEL_READ_AREA)    != 0)
539 				newPte.flags |= (1 << pteRead);
540 			if ((attributes & B_KERNEL_WRITE_AREA)   != 0)
541 				newPte.flags |= (1 << pteWrite);
542 			if ((attributes & B_KERNEL_EXECUTE_AREA) != 0)
543 				newPte.flags |= (1 << pteExec);
544 		}
545 		*pte = newPte;
546 
547 		FlushTlbPage(page);
548 	}
549 
550 	return B_OK;
551 }
552 
553 
554 status_t
555 RISCV64VMTranslationMap::ProtectPage(VMArea* area, addr_t address,
556 	uint32 attributes)
557 {
558 	NOT_IMPLEMENTED_PANIC();
559 	return B_OK;
560 }
561 
562 
563 status_t
564 RISCV64VMTranslationMap::ProtectArea(VMArea* area, uint32 attributes)
565 {
566 	NOT_IMPLEMENTED_PANIC();
567 	return B_NOT_SUPPORTED;
568 }
569 
570 
571 static inline uint32
572 ConvertAccessedFlags(uint32 flags)
573 {
574 	return ((flags & PAGE_MODIFIED) ? (1 << pteDirty   ) : 0)
575 		| ((flags & PAGE_ACCESSED) ? (1 << pteAccessed) : 0);
576 }
577 
578 
579 status_t
580 RISCV64VMTranslationMap::SetFlags(addr_t address, uint32 flags)
581 {
582 	ThreadCPUPinner pinner(thread_get_current_thread());
583 	Pte* pte = LookupPte(address, false, NULL);
584 	if (pte == NULL || ((1 << pteValid) & pte->flags) == 0)
585 		return B_OK;
586 #ifndef DISABLE_MODIFIED_FLAGS
587 	pte->flags |= ConvertAccessedFlags(flags);
588 #endif
589 	FlushTlbPage(address);
590 	return B_OK;
591 }
592 
593 
594 status_t
595 RISCV64VMTranslationMap::ClearFlags(addr_t address, uint32 flags)
596 {
597 	ThreadCPUPinner pinner(thread_get_current_thread());
598 
599 	Pte* pte = LookupPte(address, false, NULL);
600 	if (pte == NULL || ((1 << pteValid) & pte->flags) == 0)
601 		return B_OK;
602 
603 #ifndef DISABLE_MODIFIED_FLAGS
604 	pte->flags &= ~ConvertAccessedFlags(flags);
605 #endif
606 
607 	FlushTlbPage(address);
608 	return B_OK;
609 }
610 
611 
612 bool
613 RISCV64VMTranslationMap::ClearAccessedAndModified(VMArea* area, addr_t address,
614 	bool unmapIfUnaccessed, bool& _modified)
615 {
616 	TRACE("RISCV64VMPhysicalPageMapper::ClearAccessedAndModified(0x%"
617 		B_PRIxADDR "(%s), 0x%" B_PRIxADDR ", %d)\n", (addr_t)area,
618 		area->name, address, unmapIfUnaccessed);
619 
620 	RecursiveLocker locker(fLock);
621 	ThreadCPUPinner pinner(thread_get_current_thread());
622 
623 	Pte* pte = LookupPte(address, false, NULL);
624 	if (pte == NULL || ((1 << pteValid) & pte->flags) == 0)
625 		return false;
626 
627 	Pte oldPte = *pte;
628 
629 #ifndef DISABLE_MODIFIED_FLAGS
630 	if (unmapIfUnaccessed) {
631 		if (((1 << pteAccessed) & pte->flags) != 0) {
632 			pte->flags &= ~((1 << pteAccessed) | (1 << pteDirty));
633 		} else {
634 			pte->flags = 0;
635 			pte->ppn = 0;
636 		}
637 	} else {
638 		pte->flags &= ~((1 << pteAccessed) | (1 << pteDirty));
639 	}
640 #endif
641 
642 	pinner.Unlock();
643 	_modified = ((1 << pteDirty) & oldPte.flags) != 0;
644 	if (((1 << pteAccessed) & oldPte.flags) != 0) {
645 		FlushTlbPage(address);
646 		Flush();
647 		return true;
648 	}
649 
650 	if (!unmapIfUnaccessed)
651 		return false;
652 
653 	fMapCount--;
654 
655 	locker.Detach(); // UnaccessedPageUnmapped takes ownership
656 	UnaccessedPageUnmapped(area, oldPte.ppn);
657 	return false;
658 }
659 
660 
661 void
662 RISCV64VMTranslationMap::Flush()
663 {
664 	//NOT_IMPLEMENTED_PANIC();
665 }
666 
667 
668 void
669 RISCV64VMTranslationMap::DebugPrintMappingInfo(addr_t virtualAddress)
670 {
671 	NOT_IMPLEMENTED_PANIC();
672 }
673 
674 
675 bool
676 RISCV64VMTranslationMap::DebugGetReverseMappingInfo(phys_addr_t physicalAddress,
677 	ReverseMappingInfoCallback& callback)
678 {
679 	NOT_IMPLEMENTED_PANIC();
680 	return false;
681 }
682 
683 
684 status_t
685 RISCV64VMTranslationMap::MemcpyToMap(addr_t to, const char *from, size_t size)
686 {
687 	TRACE("RISCV64VMPhysicalPageMapper::MemcpyToMap(0x%" B_PRIxADDR ", 0x%"
688 		B_PRIxADDR ", %" B_PRIuSIZE ")\n", to, (addr_t)from, size);
689 
690 	while (size > 0) {
691 		uint64 va0 = ROUNDDOWN(to, B_PAGE_SIZE);
692 		uint64 pa0 = LookupAddr(va0);
693 		TRACE("LookupAddr(0x%" B_PRIxADDR "): 0x%" B_PRIxADDR "\n",
694 			va0, pa0);
695 
696 		if (pa0 == 0) {
697 			TRACE("[!] not mapped: 0x%" B_PRIxADDR "\n", va0);
698 			return B_BAD_ADDRESS;
699 		}
700 
701 		uint64 n = B_PAGE_SIZE - (to - va0);
702 		if (n > size)
703 			n = size;
704 
705 		memcpy(VirtFromPhys(pa0 + (to - va0)), from, n);
706 
707 		size -= n;
708 		from += n;
709 		to = va0 + B_PAGE_SIZE;
710 	}
711 	return B_OK;
712 }
713 
714 
715 status_t
716 RISCV64VMTranslationMap::MemcpyFromMap(char *to, addr_t from, size_t size)
717 {
718 	TRACE("RISCV64VMPhysicalPageMapper::MemcpyFromMap(0x%" B_PRIxADDR
719 		", 0x%" B_PRIxADDR ", %" B_PRIuSIZE ")\n",
720 		(addr_t)to, from, size);
721 
722 	while (size > 0) {
723 		uint64 va0 = ROUNDDOWN(from, B_PAGE_SIZE);
724 		uint64 pa0 = LookupAddr(va0);
725 		if (pa0 == 0) {
726 			TRACE("[!] not mapped: 0x%" B_PRIxADDR
727 				", calling page fault handler\n", va0);
728 
729 			addr_t newIP;
730 			vm_page_fault(va0, Ra(), true, false, true, &newIP);
731 
732 			pa0 = LookupAddr(va0);
733 			TRACE("LookupAddr(0x%" B_PRIxADDR "): 0x%"
734 				B_PRIxADDR "\n", va0, pa0);
735 
736 			if (pa0 == 0)
737 				return B_BAD_ADDRESS;
738 		}
739 		uint64 n = B_PAGE_SIZE - (from - va0);
740 		if(n > size)
741 			n = size;
742 
743 		memcpy(to, VirtFromPhys(pa0 + (from - va0)), n);
744 
745 		size -= n;
746 		to += n;
747 		from = va0 + B_PAGE_SIZE;
748 	}
749 
750 	return B_OK;
751 }
752 
753 
754 status_t
755 RISCV64VMTranslationMap::MemsetToMap(addr_t to, char c, size_t count)
756 {
757 	TRACE("RISCV64VMPhysicalPageMapper::MemsetToMap(0x%" B_PRIxADDR
758 		", %d, %" B_PRIuSIZE ")\n", to, c, count);
759 
760 	while (count > 0) {
761 		uint64 va0 = ROUNDDOWN(to, B_PAGE_SIZE);
762 		uint64 pa0 = LookupAddr(va0);
763 		TRACE("LookupAddr(0x%" B_PRIxADDR "): 0x%" B_PRIxADDR "\n",
764 			va0, pa0);
765 
766 		if (pa0 == 0) {
767 			TRACE("[!] not mapped: 0x%" B_PRIxADDR
768 				", calling page fault handler\n", va0);
769 			addr_t newIP;
770 			vm_page_fault(va0, Ra(), true, false, true, &newIP);
771 			pa0 = LookupAddr(va0);
772 			TRACE("LookupAddr(0x%" B_PRIxADDR "): 0x%"
773 				B_PRIxADDR "\n", va0, pa0);
774 
775 			if (pa0 == 0)
776 				return B_BAD_ADDRESS;
777 		}
778 
779 		uint64 n = B_PAGE_SIZE - (to - va0);
780 		if (n > count)
781 			n = count;
782 
783 		memset(VirtFromPhys(pa0 + (to - va0)), c, n);
784 
785 		count -= n;
786 		to = va0 + B_PAGE_SIZE;
787 	}
788 	return B_OK;
789 }
790 
791 
792 ssize_t
793 RISCV64VMTranslationMap::StrlcpyFromMap(char *to, addr_t from, size_t size)
794 {
795 	// NOT_IMPLEMENTED_PANIC();
796 	return strlcpy(to, (const char*)from, size);
797 	// return 0;
798 }
799 
800 
801 ssize_t
802 RISCV64VMTranslationMap::StrlcpyToMap(addr_t to, const char *from, size_t size)
803 {
804 	ssize_t len = strlen(from) + 1;
805 	if ((size_t)len > size)
806 		len = size;
807 
808 	if (MemcpyToMap(to, from, len) < B_OK)
809 		return 0;
810 
811 	return len;
812 }
813 
814 
815 //#pragma mark RISCV64VMPhysicalPageMapper
816 
817 
818 RISCV64VMPhysicalPageMapper::RISCV64VMPhysicalPageMapper()
819 {
820 	TRACE("+RISCV64VMPhysicalPageMapper\n");
821 }
822 
823 
824 RISCV64VMPhysicalPageMapper::~RISCV64VMPhysicalPageMapper()
825 {
826 	TRACE("-RISCV64VMPhysicalPageMapper\n");
827 }
828 
829 
830 status_t
831 RISCV64VMPhysicalPageMapper::GetPage(phys_addr_t physicalAddress,
832 	addr_t* _virtualAddress, void** _handle)
833 {
834 	*_virtualAddress = (addr_t)VirtFromPhys(physicalAddress);
835 	*_handle = (void*)1;
836 	return B_OK;
837 }
838 
839 
840 status_t
841 RISCV64VMPhysicalPageMapper::PutPage(addr_t virtualAddress, void* handle)
842 {
843 	return B_OK;
844 }
845 
846 
847 status_t
848 RISCV64VMPhysicalPageMapper::GetPageCurrentCPU( phys_addr_t physicalAddress,
849 	addr_t* _virtualAddress, void** _handle)
850 {
851 	return GetPage(physicalAddress, _virtualAddress, _handle);
852 }
853 
854 
855 status_t
856 RISCV64VMPhysicalPageMapper::PutPageCurrentCPU(addr_t virtualAddress,
857 	void* _handle)
858 {
859 	return PutPage(virtualAddress, _handle);
860 }
861 
862 
863 status_t
864 RISCV64VMPhysicalPageMapper::GetPageDebug(phys_addr_t physicalAddress,
865 	addr_t* _virtualAddress, void** _handle)
866 {
867 	NOT_IMPLEMENTED_PANIC();
868 	return B_NOT_SUPPORTED;
869 }
870 
871 
872 status_t
873 RISCV64VMPhysicalPageMapper::PutPageDebug(addr_t virtualAddress, void* handle)
874 {
875 	NOT_IMPLEMENTED_PANIC();
876 	return B_NOT_SUPPORTED;
877 }
878 
879 
880 status_t
881 RISCV64VMPhysicalPageMapper::MemsetPhysical(phys_addr_t address, int value,
882 	phys_size_t length)
883 {
884 	TRACE("RISCV64VMPhysicalPageMapper::MemsetPhysical(0x%" B_PRIxADDR
885 		", 0x%x, 0x%" B_PRIxADDR ")\n", address, value, length);
886 	set_ac();
887 	memset(VirtFromPhys(address), value, length);
888 	clear_ac();
889 
890 	return B_OK;
891 }
892 
893 
894 status_t
895 RISCV64VMPhysicalPageMapper::MemcpyFromPhysical(void* to, phys_addr_t from,
896 	size_t length, bool user)
897 {
898 	TRACE("RISCV64VMPhysicalPageMapper::MemcpyFromPhysical(0x%" B_PRIxADDR
899 		", 0x%" B_PRIxADDR ", %" B_PRIuSIZE ")\n", (addr_t)to,
900 		from, length);
901 
902 	set_ac();
903 	memcpy(to, VirtFromPhys(from), length);
904 	clear_ac();
905 
906 	return B_OK;
907 }
908 
909 
910 status_t
911 RISCV64VMPhysicalPageMapper::MemcpyToPhysical(phys_addr_t to, const void* from,
912 	size_t length, bool user)
913 {
914 	TRACE("RISCV64VMPhysicalPageMapper::MemcpyToPhysical(0x%" B_PRIxADDR
915 		", 0x%" B_PRIxADDR ", %" B_PRIuSIZE ")\n", to, (addr_t)from,
916 		length);
917 
918 	set_ac();
919 	memcpy(VirtFromPhys(to), from, length);
920 	clear_ac();
921 
922 	return B_OK;
923 }
924 
925 
926 void
927 RISCV64VMPhysicalPageMapper::MemcpyPhysicalPage(phys_addr_t to,
928 	phys_addr_t from)
929 {
930 	TRACE("RISCV64VMPhysicalPageMapper::MemcpyPhysicalPage(0x%" B_PRIxADDR
931 		", 0x%" B_PRIxADDR ")\n", to, from);
932 
933 	set_ac();
934 	memcpy(VirtFromPhys(to), VirtFromPhys(from), B_PAGE_SIZE);
935 	clear_ac();
936 }
937