1 /*
2 * Copyright 2020-2021, Haiku, Inc. All rights reserved.
3 * Distributed under the terms of the MIT License.
4 *
5 * Authors:
6 * X512 <danger_mail@list.ru>
7 */
8
9
10 #include "RISCV64VMTranslationMap.h"
11
12 #include <kernel.h>
13 #include <vm/vm_priv.h>
14 #include <vm/vm_page.h>
15 #include <vm/VMAddressSpace.h>
16 #include <vm/VMCache.h>
17 #include <slab/Slab.h>
18 #include <platform/sbi/sbi_syscalls.h>
19
20 #include <util/AutoLock.h>
21 #include <util/ThreadAutoLock.h>
22
23
24 //#define DO_TRACE
25 #ifdef DO_TRACE
26 # define TRACE(x...) dprintf(x)
27 #else
28 # define TRACE(x...) ;
29 #endif
30
31 #define NOT_IMPLEMENTED_PANIC() \
32 panic("not implemented: %s\n", __PRETTY_FUNCTION__)
33
34 extern uint32 gPlatform;
35
36
37 static void
FreePageTable(page_num_t ppn,bool isKernel,uint32 level=2)38 FreePageTable(page_num_t ppn, bool isKernel, uint32 level = 2)
39 {
40 if (level > 0) {
41 Pte* pte = (Pte*)VirtFromPhys(ppn * B_PAGE_SIZE);
42 uint64 beg = 0;
43 uint64 end = pteCount - 1;
44 if (level == 2 && !isKernel) {
45 beg = VirtAdrPte(USER_BASE, 2);
46 end = VirtAdrPte(USER_TOP, 2);
47 }
48 for (uint64 i = beg; i <= end; i++) {
49 if (pte[i].isValid)
50 FreePageTable(pte[i].ppn, isKernel, level - 1);
51 }
52 }
53 vm_page* page = vm_lookup_page(ppn);
54 DEBUG_PAGE_ACCESS_START(page);
55 vm_page_set_state(page, PAGE_STATE_FREE);
56 }
57
58
59 static uint64
GetPageTableSize(page_num_t ppn,bool isKernel,uint32 level=2)60 GetPageTableSize(page_num_t ppn, bool isKernel, uint32 level = 2)
61 {
62 if (ppn == 0)
63 return 0;
64
65 if (level == 0)
66 return 1;
67
68 uint64 size = 1;
69 Pte* pte = (Pte*)VirtFromPhys(ppn * B_PAGE_SIZE);
70 uint64 beg = 0;
71 uint64 end = pteCount - 1;
72 if (level == 2 && !isKernel) {
73 beg = VirtAdrPte(USER_BASE, 2);
74 end = VirtAdrPte(USER_TOP, 2);
75 }
76 for (uint64 i = beg; i <= end; i++) {
77 if (pte[i].isValid)
78 size += GetPageTableSize(pte[i].ppn, isKernel, level - 1);
79 }
80 return size;
81 }
82
83
84 //#pragma mark RISCV64VMTranslationMap
85
86
87 std::atomic<Pte>*
LookupPte(addr_t virtAdr,bool alloc,vm_page_reservation * reservation)88 RISCV64VMTranslationMap::LookupPte(addr_t virtAdr, bool alloc,
89 vm_page_reservation* reservation)
90 {
91 if (fPageTable == 0) {
92 if (!alloc)
93 return NULL;
94 vm_page* page = vm_page_allocate_page(reservation,
95 PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
96 fPageTable = page->physical_page_number * B_PAGE_SIZE;
97 if (fPageTable == 0)
98 return NULL;
99 DEBUG_PAGE_ACCESS_END(page);
100 fPageTableSize++;
101 if (!fIsKernel) {
102 // Map kernel address space into user address space. Preallocated
103 // kernel level-2 PTEs are reused.
104 RISCV64VMTranslationMap* kernelMap = (RISCV64VMTranslationMap*)
105 VMAddressSpace::Kernel()->TranslationMap();
106 Pte *kernelPageTable = (Pte*)VirtFromPhys(kernelMap->PageTable());
107 Pte *userPageTable = (Pte*)VirtFromPhys(fPageTable);
108 for (uint64 i = VirtAdrPte(KERNEL_BASE, 2);
109 i <= VirtAdrPte(KERNEL_TOP, 2); i++) {
110 Pte *pte = &userPageTable[i];
111 pte->ppn = kernelPageTable[i].ppn;
112 pte->isValid = true;
113 }
114 }
115 }
116 auto pte = (std::atomic<Pte>*)VirtFromPhys(fPageTable);
117 for (int level = 2; level > 0; level--) {
118 pte += VirtAdrPte(virtAdr, level);
119 if (!pte->load().isValid) {
120 if (!alloc)
121 return NULL;
122 vm_page* page = vm_page_allocate_page(reservation,
123 PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
124 page_num_t ppn = page->physical_page_number;
125 if (ppn == 0)
126 return NULL;
127 DEBUG_PAGE_ACCESS_END(page);
128 fPageTableSize++;
129 Pte newPte {
130 .isValid = true,
131 .isGlobal = fIsKernel,
132 .ppn = ppn
133 };
134 pte->store(newPte);
135 }
136 pte = (std::atomic<Pte>*)VirtFromPhys(B_PAGE_SIZE * pte->load().ppn);
137 }
138 pte += VirtAdrPte(virtAdr, 0);
139 return pte;
140 }
141
142
143 phys_addr_t
LookupAddr(addr_t virtAdr)144 RISCV64VMTranslationMap::LookupAddr(addr_t virtAdr)
145 {
146 std::atomic<Pte>* pte = LookupPte(virtAdr, false, NULL);
147 if (pte == NULL)
148 return 0;
149 Pte pteVal = pte->load();
150 if (!pteVal.isValid)
151 return 0;
152 if (fIsKernel != !pteVal.isUser)
153 return 0;
154 return pteVal.ppn * B_PAGE_SIZE;
155 }
156
157
RISCV64VMTranslationMap(bool kernel,phys_addr_t pageTable)158 RISCV64VMTranslationMap::RISCV64VMTranslationMap(bool kernel,
159 phys_addr_t pageTable):
160 fIsKernel(kernel),
161 fPageTable(pageTable),
162 fPageTableSize(GetPageTableSize(pageTable / B_PAGE_SIZE, kernel)),
163 fInvalidPagesCount(0),
164 fInvalidCode(false)
165 {
166 TRACE("+RISCV64VMTranslationMap(%p, %d, 0x%" B_PRIxADDR ")\n", this,
167 kernel, pageTable);
168 TRACE(" pageTableSize: %" B_PRIu64 "\n", fPageTableSize);
169 }
170
171
~RISCV64VMTranslationMap()172 RISCV64VMTranslationMap::~RISCV64VMTranslationMap()
173 {
174 TRACE("-RISCV64VMTranslationMap(%p)\n", this);
175 TRACE(" pageTableSize: %" B_PRIu64 "\n", fPageTableSize);
176 TRACE(" GetPageTableSize(): %" B_PRIu64 "\n",
177 GetPageTableSize(fPageTable / B_PAGE_SIZE, fIsKernel));
178
179 ASSERT_ALWAYS(!fIsKernel);
180 // Can't delete currently used page table
181 ASSERT_ALWAYS(::Satp() != Satp());
182
183 FreePageTable(fPageTable / B_PAGE_SIZE, fIsKernel);
184 }
185
186
187 bool
Lock()188 RISCV64VMTranslationMap::Lock()
189 {
190 TRACE("RISCV64VMTranslationMap::Lock()\n");
191 recursive_lock_lock(&fLock);
192 return true;
193 }
194
195
196 void
Unlock()197 RISCV64VMTranslationMap::Unlock()
198 {
199 TRACE("RISCV64VMTranslationMap::Unlock()\n");
200 if (recursive_lock_get_recursion(&fLock) == 1) {
201 // we're about to release it for the last time
202 Flush();
203 }
204 recursive_lock_unlock(&fLock);
205 }
206
207
208 addr_t
MappedSize() const209 RISCV64VMTranslationMap::MappedSize() const
210 {
211 NOT_IMPLEMENTED_PANIC();
212 return 0;
213 }
214
215
216 size_t
MaxPagesNeededToMap(addr_t start,addr_t end) const217 RISCV64VMTranslationMap::MaxPagesNeededToMap(addr_t start, addr_t end) const
218 {
219 enum {
220 level0Range = (uint64_t)B_PAGE_SIZE * pteCount,
221 level1Range = (uint64_t)level0Range * pteCount,
222 level2Range = (uint64_t)level1Range * pteCount,
223 };
224
225 if (start == 0) {
226 start = (level2Range) - B_PAGE_SIZE;
227 end += start;
228 }
229
230 size_t requiredLevel2 = end / level2Range + 1 - start / level2Range;
231 size_t requiredLevel1 = end / level1Range + 1 - start / level1Range;
232 size_t requiredLevel0 = end / level0Range + 1 - start / level0Range;
233
234 return requiredLevel2 + requiredLevel1 + requiredLevel0;
235 }
236
237
238 status_t
Map(addr_t virtualAddress,phys_addr_t physicalAddress,uint32 attributes,uint32 memoryType,vm_page_reservation * reservation)239 RISCV64VMTranslationMap::Map(addr_t virtualAddress, phys_addr_t physicalAddress,
240 uint32 attributes, uint32 memoryType,
241 vm_page_reservation* reservation)
242 {
243 TRACE("RISCV64VMTranslationMap::Map(0x%" B_PRIxADDR ", 0x%" B_PRIxADDR
244 ")\n", virtualAddress, physicalAddress);
245
246 ThreadCPUPinner pinner(thread_get_current_thread());
247
248 std::atomic<Pte>* pte = LookupPte(virtualAddress, true, reservation);
249 if (pte == NULL)
250 panic("can't allocate page table");
251
252 Pte newPte {
253 .isValid = true,
254 .isGlobal = fIsKernel,
255 .ppn = physicalAddress / B_PAGE_SIZE
256 };
257
258 if ((attributes & B_USER_PROTECTION) != 0) {
259 newPte.isUser = true;
260 if ((attributes & B_READ_AREA) != 0)
261 newPte.isRead = true;
262 if ((attributes & B_WRITE_AREA) != 0)
263 newPte.isWrite = true;
264 if ((attributes & B_EXECUTE_AREA) != 0) {
265 newPte.isExec = true;
266 fInvalidCode = true;
267 }
268 } else {
269 if ((attributes & B_KERNEL_READ_AREA) != 0)
270 newPte.isRead = true;
271 if ((attributes & B_KERNEL_WRITE_AREA) != 0)
272 newPte.isWrite = true;
273 if ((attributes & B_KERNEL_EXECUTE_AREA) != 0) {
274 newPte.isExec = true;
275 fInvalidCode = true;
276 }
277 }
278
279 pte->store(newPte);
280
281 // Note: We don't need to invalidate the TLB for this address, as previously
282 // the entry was not present and the TLB doesn't cache those entries.
283
284 fMapCount++;
285
286 return B_OK;
287 }
288
289
290 status_t
Unmap(addr_t start,addr_t end)291 RISCV64VMTranslationMap::Unmap(addr_t start, addr_t end)
292 {
293 TRACE("RISCV64VMTranslationMap::Unmap(0x%" B_PRIxADDR ", 0x%" B_PRIxADDR
294 ")\n", start, end);
295
296 ThreadCPUPinner pinner(thread_get_current_thread());
297
298 for (addr_t page = start; page < end; page += B_PAGE_SIZE) {
299 std::atomic<Pte>* pte = LookupPte(page, false, NULL);
300 if (pte != NULL) {
301 fMapCount--;
302 Pte oldPte = pte->exchange({});
303 if (oldPte.isAccessed)
304 InvalidatePage(page);
305 }
306 }
307 return B_OK;
308 }
309
310
311 status_t
DebugMarkRangePresent(addr_t start,addr_t end,bool markPresent)312 RISCV64VMTranslationMap::DebugMarkRangePresent(addr_t start, addr_t end,
313 bool markPresent)
314 {
315 NOT_IMPLEMENTED_PANIC();
316 return B_NOT_SUPPORTED;
317 }
318
319
320 /*
321 Things need to be done when unmapping VMArea pages
322 update vm_page::accessed, modified
323 MMIO pages:
324 just unmap
325 wired pages:
326 decrement wired count
327 non-wired pages:
328 remove from VMArea and vm_page `mappings` list
329 wired and non-wird pages
330 vm_page_set_state
331 */
332
333 status_t
UnmapPage(VMArea * area,addr_t address,bool updatePageQueue)334 RISCV64VMTranslationMap::UnmapPage(VMArea* area, addr_t address,
335 bool updatePageQueue)
336 {
337 TRACE("RISCV64VMTranslationMap::UnmapPage(0x%" B_PRIxADDR "(%s), 0x%"
338 B_PRIxADDR ", %d)\n", (addr_t)area, area->name, address,
339 updatePageQueue);
340
341 ThreadCPUPinner pinner(thread_get_current_thread());
342
343 std::atomic<Pte>* pte = LookupPte(address, false, NULL);
344 if (pte == NULL || !pte->load().isValid)
345 return B_ENTRY_NOT_FOUND;
346
347 RecursiveLocker locker(fLock);
348
349 Pte oldPte = pte->exchange({});
350 fMapCount--;
351 pinner.Unlock();
352
353 if (oldPte.isAccessed)
354 InvalidatePage(address);
355
356 Flush();
357
358 locker.Detach(); // PageUnmapped takes ownership
359 PageUnmapped(area, oldPte.ppn, oldPte.isAccessed, oldPte.isDirty, updatePageQueue);
360 return B_OK;
361 }
362
363
364 void
UnmapPages(VMArea * area,addr_t base,size_t size,bool updatePageQueue)365 RISCV64VMTranslationMap::UnmapPages(VMArea* area, addr_t base, size_t size,
366 bool updatePageQueue)
367 {
368 TRACE("RISCV64VMTranslationMap::UnmapPages(0x%" B_PRIxADDR "(%s), 0x%"
369 B_PRIxADDR ", 0x%" B_PRIxSIZE ", %d)\n", (addr_t)area,
370 area->name, base, size, updatePageQueue);
371
372 if (size == 0)
373 return;
374
375 addr_t end = base + size - 1;
376
377 VMAreaMappings queue;
378 RecursiveLocker locker(fLock);
379 ThreadCPUPinner pinner(thread_get_current_thread());
380
381 for (addr_t start = base; start < end; start += B_PAGE_SIZE) {
382 std::atomic<Pte>* pte = LookupPte(start, false, NULL);
383 if (pte == NULL)
384 continue;
385
386 Pte oldPte = pte->exchange({});
387 if (!oldPte.isValid)
388 continue;
389
390 fMapCount--;
391
392 if (oldPte.isAccessed)
393 InvalidatePage(start);
394
395 if (area->cache_type != CACHE_TYPE_DEVICE) {
396 // get the page
397 vm_page* page = vm_lookup_page(oldPte.ppn);
398 ASSERT(page != NULL);
399
400 DEBUG_PAGE_ACCESS_START(page);
401
402 // transfer the accessed/dirty flags to the page
403 page->accessed = oldPte.isAccessed;
404 page->modified = oldPte.isDirty;
405
406 // remove the mapping object/decrement the wired_count of the
407 // page
408 if (area->wiring == B_NO_LOCK) {
409 vm_page_mapping* mapping = NULL;
410 vm_page_mappings::Iterator iterator
411 = page->mappings.GetIterator();
412 while ((mapping = iterator.Next()) != NULL) {
413 if (mapping->area == area)
414 break;
415 }
416
417 ASSERT(mapping != NULL);
418
419 area->mappings.Remove(mapping);
420 page->mappings.Remove(mapping);
421 queue.Add(mapping);
422 } else
423 page->DecrementWiredCount();
424
425 if (!page->IsMapped()) {
426 atomic_add(&gMappedPagesCount, -1);
427
428 if (updatePageQueue) {
429 if (page->Cache()->temporary)
430 vm_page_set_state(page, PAGE_STATE_INACTIVE);
431 else if (page->modified)
432 vm_page_set_state(page, PAGE_STATE_MODIFIED);
433 else
434 vm_page_set_state(page, PAGE_STATE_CACHED);
435 }
436 }
437
438 DEBUG_PAGE_ACCESS_END(page);
439 }
440
441 // flush explicitly, since we directly use the lock
442 Flush();
443 }
444
445 // TODO: As in UnmapPage() we can lose page dirty flags here. ATM it's not
446 // really critical here, as in all cases this method is used, the unmapped
447 // area range is unmapped for good (resized/cut) and the pages will likely
448 // be freed.
449
450 locker.Unlock();
451
452 // free removed mappings
453 bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
454 uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
455 | (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
456
457 while (vm_page_mapping* mapping = queue.RemoveHead())
458 vm_free_page_mapping(mapping->page->physical_page_number, mapping, freeFlags);
459 }
460
461
462 void
UnmapArea(VMArea * area,bool deletingAddressSpace,bool ignoreTopCachePageFlags)463 RISCV64VMTranslationMap::UnmapArea(VMArea* area, bool deletingAddressSpace,
464 bool ignoreTopCachePageFlags)
465 {
466 TRACE("RISCV64VMTranslationMap::UnmapArea(0x%" B_PRIxADDR "(%s), 0x%"
467 B_PRIxADDR ", 0x%" B_PRIxSIZE ", %d, %d)\n", (addr_t)area,
468 area->name, area->Base(), area->Size(), deletingAddressSpace,
469 ignoreTopCachePageFlags);
470
471 if (area->cache_type == CACHE_TYPE_DEVICE || area->wiring != B_NO_LOCK) {
472 UnmapPages(area, area->Base(), area->Size(), true);
473 return;
474 }
475
476 bool unmapPages = !deletingAddressSpace || !ignoreTopCachePageFlags;
477
478 RecursiveLocker locker(fLock);
479 ThreadCPUPinner pinner(thread_get_current_thread());
480
481 VMAreaMappings mappings;
482 mappings.MoveFrom(&area->mappings);
483
484 for (VMAreaMappings::Iterator it = mappings.GetIterator();
485 vm_page_mapping* mapping = it.Next();) {
486
487 vm_page* page = mapping->page;
488 page->mappings.Remove(mapping);
489
490 VMCache* cache = page->Cache();
491
492 bool pageFullyUnmapped = false;
493 if (!page->IsMapped()) {
494 atomic_add(&gMappedPagesCount, -1);
495 pageFullyUnmapped = true;
496 }
497
498 if (unmapPages || cache != area->cache) {
499 addr_t address = area->Base()
500 + ((page->cache_offset * B_PAGE_SIZE)
501 - area->cache_offset);
502
503 std::atomic<Pte>* pte = LookupPte(address, false, NULL);
504 if (pte == NULL || !pte->load().isValid) {
505 panic("page %p has mapping for area %p "
506 "(%#" B_PRIxADDR "), but has no "
507 "page table", page, area, address);
508 continue;
509 }
510
511 Pte oldPte = pte->exchange({});
512
513 // transfer the accessed/dirty flags to the page and
514 // invalidate the mapping, if necessary
515 if (oldPte.isAccessed) {
516 page->accessed = true;
517
518 if (!deletingAddressSpace)
519 InvalidatePage(address);
520 }
521
522 if (oldPte.isDirty)
523 page->modified = true;
524
525 if (pageFullyUnmapped) {
526 DEBUG_PAGE_ACCESS_START(page);
527
528 if (cache->temporary) {
529 vm_page_set_state(page,
530 PAGE_STATE_INACTIVE);
531 } else if (page->modified) {
532 vm_page_set_state(page,
533 PAGE_STATE_MODIFIED);
534 } else {
535 vm_page_set_state(page,
536 PAGE_STATE_CACHED);
537 }
538
539 DEBUG_PAGE_ACCESS_END(page);
540 }
541 }
542
543 fMapCount--;
544 }
545
546 Flush();
547 // flush explicitely, since we directly use the lock
548
549 locker.Unlock();
550
551 bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
552 uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
553 | (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
554
555 while (vm_page_mapping* mapping = mappings.RemoveHead())
556 vm_free_page_mapping(mapping->page->physical_page_number, mapping, freeFlags);
557 }
558
559
560 status_t
Query(addr_t virtualAddress,phys_addr_t * _physicalAddress,uint32 * _flags)561 RISCV64VMTranslationMap::Query(addr_t virtualAddress,
562 phys_addr_t* _physicalAddress, uint32* _flags)
563 {
564 *_flags = 0;
565 *_physicalAddress = 0;
566
567 ThreadCPUPinner pinner(thread_get_current_thread());
568
569 if (fPageTable == 0)
570 return B_OK;
571
572 std::atomic<Pte>* pte = LookupPte(virtualAddress, false, NULL);
573 if (pte == NULL)
574 return B_OK;
575
576 Pte pteVal = pte->load();
577 *_physicalAddress = pteVal.ppn * B_PAGE_SIZE;
578
579 if (pteVal.isValid)
580 *_flags |= PAGE_PRESENT;
581 if (pteVal.isDirty)
582 *_flags |= PAGE_MODIFIED;
583 if (pteVal.isAccessed)
584 *_flags |= PAGE_ACCESSED;
585 if (pteVal.isUser) {
586 if (pteVal.isRead)
587 *_flags |= B_READ_AREA;
588 if (pteVal.isWrite)
589 *_flags |= B_WRITE_AREA;
590 if (pteVal.isExec)
591 *_flags |= B_EXECUTE_AREA;
592 } else {
593 if (pteVal.isRead)
594 *_flags |= B_KERNEL_READ_AREA;
595 if (pteVal.isWrite)
596 *_flags |= B_KERNEL_WRITE_AREA;
597 if (pteVal.isExec)
598 *_flags |= B_KERNEL_EXECUTE_AREA;
599 }
600
601 return B_OK;
602 }
603
604
605 status_t
QueryInterrupt(addr_t virtualAddress,phys_addr_t * _physicalAddress,uint32 * _flags)606 RISCV64VMTranslationMap::QueryInterrupt(addr_t virtualAddress,
607 phys_addr_t* _physicalAddress, uint32* _flags)
608 {
609 return Query(virtualAddress, _physicalAddress, _flags);
610 }
611
612
Protect(addr_t base,addr_t top,uint32 attributes,uint32 memoryType)613 status_t RISCV64VMTranslationMap::Protect(addr_t base, addr_t top,
614 uint32 attributes, uint32 memoryType)
615 {
616 TRACE("RISCV64VMTranslationMap::Protect(0x%" B_PRIxADDR ", 0x%"
617 B_PRIxADDR ")\n", base, top);
618
619 ThreadCPUPinner pinner(thread_get_current_thread());
620
621 for (addr_t page = base; page < top; page += B_PAGE_SIZE) {
622
623 std::atomic<Pte>* pte = LookupPte(page, false, NULL);
624 if (pte == NULL || !pte->load().isValid) {
625 TRACE("attempt to protect not mapped page: 0x%"
626 B_PRIxADDR "\n", page);
627 continue;
628 }
629
630 Pte oldPte {};
631 Pte newPte {};
632 while (true) {
633 oldPte = pte->load();
634
635 newPte = oldPte;
636 if ((attributes & B_USER_PROTECTION) != 0) {
637 newPte.isUser = true;
638 newPte.isRead = (attributes & B_READ_AREA) != 0;
639 newPte.isWrite = (attributes & B_WRITE_AREA) != 0;
640 newPte.isExec = (attributes & B_EXECUTE_AREA) != 0;
641 } else {
642 newPte.isUser = false;
643 newPte.isRead = (attributes & B_KERNEL_READ_AREA) != 0;
644 newPte.isWrite = (attributes & B_KERNEL_WRITE_AREA) != 0;
645 newPte.isExec = (attributes & B_KERNEL_EXECUTE_AREA) != 0;
646 }
647
648 if (pte->compare_exchange_strong(oldPte, newPte))
649 break;
650 }
651
652 fInvalidCode = newPte.isExec;
653
654 if (oldPte.isAccessed)
655 InvalidatePage(page);
656 }
657
658 return B_OK;
659 }
660
661
662 status_t
ProtectPage(VMArea * area,addr_t address,uint32 attributes)663 RISCV64VMTranslationMap::ProtectPage(VMArea* area, addr_t address,
664 uint32 attributes)
665 {
666 NOT_IMPLEMENTED_PANIC();
667 return B_OK;
668 }
669
670
671 status_t
ProtectArea(VMArea * area,uint32 attributes)672 RISCV64VMTranslationMap::ProtectArea(VMArea* area, uint32 attributes)
673 {
674 NOT_IMPLEMENTED_PANIC();
675 return B_NOT_SUPPORTED;
676 }
677
678
679 static inline uint64
ConvertAccessedFlags(uint32 flags)680 ConvertAccessedFlags(uint32 flags)
681 {
682 Pte pteFlags {
683 .isAccessed = (flags & PAGE_ACCESSED) != 0,
684 .isDirty = (flags & PAGE_MODIFIED) != 0
685 };
686 return pteFlags.val;
687 }
688
689
690 void
SetFlags(addr_t address,uint32 flags)691 RISCV64VMTranslationMap::SetFlags(addr_t address, uint32 flags)
692 {
693 // Only called from interrupt handler with interrupts disabled for CPUs that don't support
694 // setting accessed/modified flags by hardware.
695
696 std::atomic<Pte>* pte = LookupPte(address, false, NULL);
697 if (pte == NULL || !pte->load().isValid)
698 return;
699
700 *(std::atomic<uint64>*)pte |= ConvertAccessedFlags(flags);
701
702 if (IS_KERNEL_ADDRESS(address))
703 FlushTlbPage(address);
704 else
705 FlushTlbPageAsid(address, 0);
706
707 return;
708 }
709
710
711 status_t
ClearFlags(addr_t address,uint32 flags)712 RISCV64VMTranslationMap::ClearFlags(addr_t address, uint32 flags)
713 {
714 ThreadCPUPinner pinner(thread_get_current_thread());
715
716 std::atomic<Pte>* pte = LookupPte(address, false, NULL);
717 if (pte == NULL || !pte->load().isValid)
718 return B_OK;
719
720 *(std::atomic<uint64>*)pte &= ~ConvertAccessedFlags(flags);
721 InvalidatePage(address);
722 return B_OK;
723 }
724
725
726 bool
ClearAccessedAndModified(VMArea * area,addr_t address,bool unmapIfUnaccessed,bool & _modified)727 RISCV64VMTranslationMap::ClearAccessedAndModified(VMArea* area, addr_t address,
728 bool unmapIfUnaccessed, bool& _modified)
729 {
730 TRACE("RISCV64VMPhysicalPageMapper::ClearAccessedAndModified(0x%"
731 B_PRIxADDR "(%s), 0x%" B_PRIxADDR ", %d)\n", (addr_t)area,
732 area->name, address, unmapIfUnaccessed);
733
734 RecursiveLocker locker(fLock);
735 ThreadCPUPinner pinner(thread_get_current_thread());
736
737 std::atomic<Pte>* pte = LookupPte(address, false, NULL);
738 if (pte == NULL || !pte->load().isValid)
739 return false;
740
741 Pte oldPte {};
742 if (unmapIfUnaccessed) {
743 for (;;) {
744 oldPte = pte->load();
745 if (!oldPte.isValid)
746 return false;
747
748 if (oldPte.isAccessed) {
749 oldPte.val = ((std::atomic<uint64>*)pte)->fetch_and(
750 ~Pte {.isAccessed = true, .isDirty = true}.val);
751 break;
752 }
753 if (pte->compare_exchange_strong(oldPte, {}))
754 break;
755 }
756 } else {
757 oldPte.val = ((std::atomic<uint64>*)pte)->fetch_and(
758 ~Pte {.isAccessed = true, .isDirty = true}.val);
759 }
760
761 pinner.Unlock();
762 _modified = oldPte.isDirty;
763 if (oldPte.isAccessed) {
764 InvalidatePage(address);
765 Flush();
766 return true;
767 }
768
769 if (!unmapIfUnaccessed)
770 return false;
771
772 fMapCount--;
773
774 locker.Detach(); // UnaccessedPageUnmapped takes ownership
775 UnaccessedPageUnmapped(area, oldPte.ppn);
776 return false;
777 }
778
779
780 void
Flush()781 RISCV64VMTranslationMap::Flush()
782 {
783 // copy of X86VMTranslationMap::Flush
784 // TODO: move to common VMTranslationMap class
785
786 if (fInvalidPagesCount <= 0)
787 return;
788
789 ThreadCPUPinner pinner(thread_get_current_thread());
790
791 if (fInvalidPagesCount > PAGE_INVALIDATE_CACHE_SIZE) {
792 // invalidate all pages
793 TRACE("flush_tmap: %d pages to invalidate, invalidate all\n",
794 fInvalidPagesCount);
795
796 if (fIsKernel) {
797 arch_cpu_global_TLB_invalidate();
798
799 smp_send_broadcast_ici(SMP_MSG_GLOBAL_INVALIDATE_PAGES, 0, 0, 0,
800 NULL, SMP_MSG_FLAG_SYNC);
801 } else {
802 cpu_status state = disable_interrupts();
803 arch_cpu_user_TLB_invalidate();
804 restore_interrupts(state);
805
806 int cpu = smp_get_current_cpu();
807 CPUSet cpuMask = fActiveOnCpus;
808 cpuMask.ClearBit(cpu);
809
810 if (!cpuMask.IsEmpty()) {
811 smp_send_multicast_ici(cpuMask, SMP_MSG_USER_INVALIDATE_PAGES,
812 0, 0, 0, NULL, SMP_MSG_FLAG_SYNC);
813 }
814 }
815 } else {
816 TRACE("flush_tmap: %d pages to invalidate, invalidate list\n",
817 fInvalidPagesCount);
818
819 arch_cpu_invalidate_TLB_list(fInvalidPages, fInvalidPagesCount);
820
821 if (fIsKernel) {
822 smp_send_broadcast_ici(SMP_MSG_INVALIDATE_PAGE_LIST,
823 (addr_t)fInvalidPages, fInvalidPagesCount, 0, NULL,
824 SMP_MSG_FLAG_SYNC);
825 } else {
826 int cpu = smp_get_current_cpu();
827 CPUSet cpuMask = fActiveOnCpus;
828 cpuMask.ClearBit(cpu);
829
830 if (!cpuMask.IsEmpty()) {
831 smp_send_multicast_ici(cpuMask, SMP_MSG_INVALIDATE_PAGE_LIST,
832 (addr_t)fInvalidPages, fInvalidPagesCount, 0, NULL,
833 SMP_MSG_FLAG_SYNC);
834 }
835 }
836 }
837 fInvalidPagesCount = 0;
838
839 if (fInvalidCode) {
840 FenceI();
841
842 int cpu = smp_get_current_cpu();
843 CPUSet cpuMask = fActiveOnCpus;
844 cpuMask.ClearBit(cpu);
845
846 if (!cpuMask.IsEmpty()) {
847 switch (gPlatform) {
848 case kPlatformSbi: {
849 uint64 hartMask = 0;
850 int32 cpuCount = smp_get_num_cpus();
851 for (int32 i = 0; i < cpuCount; i++) {
852 if (cpuMask.GetBit(i))
853 hartMask |= (uint64)1 << gCPU[i].arch.hartId;
854 }
855 // TODO: handle hart ID >= 64
856 memory_full_barrier();
857 sbi_remote_fence_i(hartMask, 0);
858 break;
859 }
860 }
861 }
862 fInvalidCode = false;
863 }
864 }
865
866
867 void
DebugPrintMappingInfo(addr_t virtualAddress)868 RISCV64VMTranslationMap::DebugPrintMappingInfo(addr_t virtualAddress)
869 {
870 NOT_IMPLEMENTED_PANIC();
871 }
872
873
874 bool
DebugGetReverseMappingInfo(phys_addr_t physicalAddress,ReverseMappingInfoCallback & callback)875 RISCV64VMTranslationMap::DebugGetReverseMappingInfo(phys_addr_t physicalAddress,
876 ReverseMappingInfoCallback& callback)
877 {
878 NOT_IMPLEMENTED_PANIC();
879 return false;
880 }
881
882
883 status_t
MemcpyToMap(addr_t to,const char * from,size_t size)884 RISCV64VMTranslationMap::MemcpyToMap(addr_t to, const char *from, size_t size)
885 {
886 TRACE("RISCV64VMPhysicalPageMapper::MemcpyToMap(0x%" B_PRIxADDR ", 0x%"
887 B_PRIxADDR ", %" B_PRIuSIZE ")\n", to, (addr_t)from, size);
888
889 while (size > 0) {
890 uint64 va0 = ROUNDDOWN(to, B_PAGE_SIZE);
891 uint64 pa0 = LookupAddr(va0);
892 TRACE("LookupAddr(0x%" B_PRIxADDR "): 0x%" B_PRIxADDR "\n",
893 va0, pa0);
894
895 if (pa0 == 0) {
896 TRACE("[!] not mapped: 0x%" B_PRIxADDR "\n", va0);
897 return B_BAD_ADDRESS;
898 }
899
900 uint64 n = B_PAGE_SIZE - (to - va0);
901 if (n > size)
902 n = size;
903
904 memcpy(VirtFromPhys(pa0 + (to - va0)), from, n);
905
906 size -= n;
907 from += n;
908 to = va0 + B_PAGE_SIZE;
909 }
910 return B_OK;
911 }
912
913
914 status_t
MemcpyFromMap(char * to,addr_t from,size_t size)915 RISCV64VMTranslationMap::MemcpyFromMap(char *to, addr_t from, size_t size)
916 {
917 TRACE("RISCV64VMPhysicalPageMapper::MemcpyFromMap(0x%" B_PRIxADDR
918 ", 0x%" B_PRIxADDR ", %" B_PRIuSIZE ")\n",
919 (addr_t)to, from, size);
920
921 while (size > 0) {
922 uint64 va0 = ROUNDDOWN(from, B_PAGE_SIZE);
923 uint64 pa0 = LookupAddr(va0);
924 if (pa0 == 0) {
925 TRACE("[!] not mapped: 0x%" B_PRIxADDR
926 ", calling page fault handler\n", va0);
927
928 addr_t newIP;
929 vm_page_fault(va0, Ra(), true, false, true, &newIP);
930
931 pa0 = LookupAddr(va0);
932 TRACE("LookupAddr(0x%" B_PRIxADDR "): 0x%"
933 B_PRIxADDR "\n", va0, pa0);
934
935 if (pa0 == 0)
936 return B_BAD_ADDRESS;
937 }
938 uint64 n = B_PAGE_SIZE - (from - va0);
939 if(n > size)
940 n = size;
941
942 memcpy(to, VirtFromPhys(pa0 + (from - va0)), n);
943
944 size -= n;
945 to += n;
946 from = va0 + B_PAGE_SIZE;
947 }
948
949 return B_OK;
950 }
951
952
953 status_t
MemsetToMap(addr_t to,char c,size_t count)954 RISCV64VMTranslationMap::MemsetToMap(addr_t to, char c, size_t count)
955 {
956 TRACE("RISCV64VMPhysicalPageMapper::MemsetToMap(0x%" B_PRIxADDR
957 ", %d, %" B_PRIuSIZE ")\n", to, c, count);
958
959 while (count > 0) {
960 uint64 va0 = ROUNDDOWN(to, B_PAGE_SIZE);
961 uint64 pa0 = LookupAddr(va0);
962 TRACE("LookupAddr(0x%" B_PRIxADDR "): 0x%" B_PRIxADDR "\n",
963 va0, pa0);
964
965 if (pa0 == 0) {
966 TRACE("[!] not mapped: 0x%" B_PRIxADDR
967 ", calling page fault handler\n", va0);
968 addr_t newIP;
969 vm_page_fault(va0, Ra(), true, false, true, &newIP);
970 pa0 = LookupAddr(va0);
971 TRACE("LookupAddr(0x%" B_PRIxADDR "): 0x%"
972 B_PRIxADDR "\n", va0, pa0);
973
974 if (pa0 == 0)
975 return B_BAD_ADDRESS;
976 }
977
978 uint64 n = B_PAGE_SIZE - (to - va0);
979 if (n > count)
980 n = count;
981
982 memset(VirtFromPhys(pa0 + (to - va0)), c, n);
983
984 count -= n;
985 to = va0 + B_PAGE_SIZE;
986 }
987 return B_OK;
988 }
989
990
991 ssize_t
StrlcpyFromMap(char * to,addr_t from,size_t size)992 RISCV64VMTranslationMap::StrlcpyFromMap(char *to, addr_t from, size_t size)
993 {
994 // NOT_IMPLEMENTED_PANIC();
995 return strlcpy(to, (const char*)from, size);
996 // return 0;
997 }
998
999
1000 ssize_t
StrlcpyToMap(addr_t to,const char * from,size_t size)1001 RISCV64VMTranslationMap::StrlcpyToMap(addr_t to, const char *from, size_t size)
1002 {
1003 ssize_t len = strlen(from) + 1;
1004 if ((size_t)len > size)
1005 len = size;
1006
1007 if (MemcpyToMap(to, from, len) < B_OK)
1008 return 0;
1009
1010 return len;
1011 }
1012
1013
1014 //#pragma mark RISCV64VMPhysicalPageMapper
1015
1016
RISCV64VMPhysicalPageMapper()1017 RISCV64VMPhysicalPageMapper::RISCV64VMPhysicalPageMapper()
1018 {
1019 TRACE("+RISCV64VMPhysicalPageMapper\n");
1020 }
1021
1022
~RISCV64VMPhysicalPageMapper()1023 RISCV64VMPhysicalPageMapper::~RISCV64VMPhysicalPageMapper()
1024 {
1025 TRACE("-RISCV64VMPhysicalPageMapper\n");
1026 }
1027
1028
1029 status_t
GetPage(phys_addr_t physicalAddress,addr_t * _virtualAddress,void ** _handle)1030 RISCV64VMPhysicalPageMapper::GetPage(phys_addr_t physicalAddress,
1031 addr_t* _virtualAddress, void** _handle)
1032 {
1033 *_virtualAddress = (addr_t)VirtFromPhys(physicalAddress);
1034 *_handle = (void*)1;
1035 return B_OK;
1036 }
1037
1038
1039 status_t
PutPage(addr_t virtualAddress,void * handle)1040 RISCV64VMPhysicalPageMapper::PutPage(addr_t virtualAddress, void* handle)
1041 {
1042 return B_OK;
1043 }
1044
1045
1046 status_t
GetPageCurrentCPU(phys_addr_t physicalAddress,addr_t * _virtualAddress,void ** _handle)1047 RISCV64VMPhysicalPageMapper::GetPageCurrentCPU( phys_addr_t physicalAddress,
1048 addr_t* _virtualAddress, void** _handle)
1049 {
1050 return GetPage(physicalAddress, _virtualAddress, _handle);
1051 }
1052
1053
1054 status_t
PutPageCurrentCPU(addr_t virtualAddress,void * _handle)1055 RISCV64VMPhysicalPageMapper::PutPageCurrentCPU(addr_t virtualAddress,
1056 void* _handle)
1057 {
1058 return PutPage(virtualAddress, _handle);
1059 }
1060
1061
1062 status_t
GetPageDebug(phys_addr_t physicalAddress,addr_t * _virtualAddress,void ** _handle)1063 RISCV64VMPhysicalPageMapper::GetPageDebug(phys_addr_t physicalAddress,
1064 addr_t* _virtualAddress, void** _handle)
1065 {
1066 NOT_IMPLEMENTED_PANIC();
1067 return B_NOT_SUPPORTED;
1068 }
1069
1070
1071 status_t
PutPageDebug(addr_t virtualAddress,void * handle)1072 RISCV64VMPhysicalPageMapper::PutPageDebug(addr_t virtualAddress, void* handle)
1073 {
1074 NOT_IMPLEMENTED_PANIC();
1075 return B_NOT_SUPPORTED;
1076 }
1077
1078
1079 status_t
MemsetPhysical(phys_addr_t address,int value,phys_size_t length)1080 RISCV64VMPhysicalPageMapper::MemsetPhysical(phys_addr_t address, int value,
1081 phys_size_t length)
1082 {
1083 TRACE("RISCV64VMPhysicalPageMapper::MemsetPhysical(0x%" B_PRIxADDR
1084 ", 0x%x, 0x%" B_PRIxADDR ")\n", address, value, length);
1085 return user_memset(VirtFromPhys(address), value, length);
1086 }
1087
1088
1089 status_t
MemcpyFromPhysical(void * to,phys_addr_t from,size_t length,bool user)1090 RISCV64VMPhysicalPageMapper::MemcpyFromPhysical(void* to, phys_addr_t from,
1091 size_t length, bool user)
1092 {
1093 TRACE("RISCV64VMPhysicalPageMapper::MemcpyFromPhysical(0x%" B_PRIxADDR
1094 ", 0x%" B_PRIxADDR ", %" B_PRIuSIZE ")\n", (addr_t)to,
1095 from, length);
1096 return user_memcpy(to, VirtFromPhys(from), length);
1097 }
1098
1099
1100 status_t
MemcpyToPhysical(phys_addr_t to,const void * from,size_t length,bool user)1101 RISCV64VMPhysicalPageMapper::MemcpyToPhysical(phys_addr_t to, const void* from,
1102 size_t length, bool user)
1103 {
1104 TRACE("RISCV64VMPhysicalPageMapper::MemcpyToPhysical(0x%" B_PRIxADDR
1105 ", 0x%" B_PRIxADDR ", %" B_PRIuSIZE ")\n", to, (addr_t)from,
1106 length);
1107 return user_memcpy(VirtFromPhys(to), from, length);
1108 }
1109
1110
1111 void
MemcpyPhysicalPage(phys_addr_t to,phys_addr_t from)1112 RISCV64VMPhysicalPageMapper::MemcpyPhysicalPage(phys_addr_t to,
1113 phys_addr_t from)
1114 {
1115 TRACE("RISCV64VMPhysicalPageMapper::MemcpyPhysicalPage(0x%" B_PRIxADDR
1116 ", 0x%" B_PRIxADDR ")\n", to, from);
1117 user_memcpy(VirtFromPhys(to), VirtFromPhys(from), B_PAGE_SIZE);
1118 }
1119