1 /* 2 * Copyright 2001, Travis Geiselbrecht. All rights reserved. 3 * Copyright 2003-2005, Axel Dörfler, axeld@pinc-software.de. 4 * Copyright 2019, Adrien Destugues, pulkomandy@pulkomandy.tk 5 * Distributed under the terms of the MIT License. 6 */ 7 8 #include <vm/vm.h> 9 #include <vm/VMAddressSpace.h> 10 #include <arch/vm.h> 11 #include <boot/kernel_args.h> 12 13 #include "RISCV64VMTranslationMap.h" 14 15 16 #define TRACE_ARCH_VM 17 #ifdef TRACE_ARCH_VM 18 # define TRACE(x) dprintf x 19 #else 20 # define TRACE(x) ; 21 #endif 22 23 24 static uint64_t 25 SignExtendVirtAdr(uint64_t virtAdr) 26 { 27 if (((uint64_t)1 << 38) & virtAdr) 28 return virtAdr | 0xFFFFFF8000000000; 29 return virtAdr; 30 } 31 32 33 static Pte* 34 LookupPte(phys_addr_t pageTable, addr_t virtAdr) 35 { 36 Pte *pte = (Pte*)VirtFromPhys(pageTable); 37 for (int level = 2; level > 0; level --) { 38 pte += VirtAdrPte(virtAdr, level); 39 if (!((1 << pteValid) & pte->flags)) { 40 return NULL; 41 } 42 // TODO: Handle superpages (RWX=0 when not at lowest level) 43 pte = (Pte*)VirtFromPhys(B_PAGE_SIZE * pte->ppn); 44 } 45 pte += VirtAdrPte(virtAdr, 0); 46 return pte; 47 } 48 49 50 51 static void 52 WritePteFlags(uint32 flags) 53 { 54 bool first = true; 55 dprintf("{"); 56 for (uint32 i = 0; i < 32; i++) { 57 if ((1 << i) & flags) { 58 if (first) 59 first = false; 60 else 61 dprintf(", "); 62 63 switch (i) { 64 case pteValid: 65 dprintf("valid"); 66 break; 67 case pteRead: 68 dprintf("read"); 69 break; 70 case pteWrite: 71 dprintf("write"); 72 break; 73 case pteExec: 74 dprintf("exec"); 75 break; 76 case pteUser: 77 dprintf("user"); 78 break; 79 case pteGlobal: 80 dprintf("global"); 81 break; 82 case pteAccessed: 83 dprintf("accessed"); 84 break; 85 case pteDirty: 86 dprintf("dirty"); 87 break; 88 default: 89 dprintf("%" B_PRIu32, i); 90 } 91 } 92 } 93 dprintf("}"); 94 } 95 96 97 class PageTableDumper 98 { 99 private: 100 uint64 firstVirt; 101 uint64 firstPhys; 102 uint64 firstFlags; 103 uint64 len; 104 105 public: 106 PageTableDumper() 107 : 108 firstVirt(0), 109 firstPhys(0), 110 firstFlags(0), 111 len(0) 112 {} 113 114 ~PageTableDumper() 115 { 116 Write(0, 0, 0, 0); 117 } 118 119 void Write(uint64_t virtAdr, uint64_t physAdr, size_t size, uint64 flags) { 120 if (virtAdr == firstVirt + len && physAdr == firstPhys + len && flags == firstFlags) { 121 len += size; 122 } else { 123 if (len != 0) { 124 dprintf(" 0x%08" B_PRIxADDR " - 0x%08" B_PRIxADDR, 125 firstVirt, firstVirt + (len - 1)); 126 dprintf(": 0x%08" B_PRIxADDR " - 0x%08" B_PRIxADDR ", %#" B_PRIxADDR ", ", 127 firstPhys, firstPhys + (len - 1), len); 128 WritePteFlags(firstFlags); dprintf("\n"); 129 } 130 firstVirt = virtAdr; 131 firstPhys = physAdr; 132 firstFlags = flags; 133 len = size; 134 } 135 } 136 }; 137 138 139 static void 140 DumpPageTableInt(Pte* pte, uint64_t virtAdr, uint32_t level, PageTableDumper& dumper) 141 { 142 for (uint32 i = 0; i < pteCount; i++) { 143 if (((1 << pteValid) & pte[i].flags) != 0) { 144 if ((((1 << pteRead) | (1 << pteWrite) 145 | (1 << pteExec)) & pte[i].flags) == 0) { 146 147 if (level == 0) 148 kprintf(" internal page table on level 0\n"); 149 150 DumpPageTableInt((Pte*)VirtFromPhys(pageSize*pte[i].ppn), 151 virtAdr + ((uint64_t)i << (pageBits + pteIdxBits * level)), 152 level - 1, dumper); 153 } else { 154 dumper.Write(SignExtendVirtAdr(virtAdr 155 + ((uint64_t)i << (pageBits + pteIdxBits*level))), 156 pte[i].ppn * B_PAGE_SIZE, 1 << (pageBits + pteIdxBits * level), 157 pte[i].flags); 158 } 159 } 160 } 161 } 162 163 164 static VMArea* LookupArea(area_id id) 165 { 166 VMAreaHash::ReadLock(); 167 VMArea* area = VMAreaHash::LookupLocked(id); 168 VMAreaHash::ReadUnlock(); 169 170 return area; 171 } 172 173 174 static int 175 DumpPageTable(int argc, char** argv) 176 { 177 int curArg = 1; 178 SatpReg satp; 179 bool isArea = false; 180 addr_t base = 0; 181 size_t size = 0; 182 183 satp.val = Satp(); 184 while (curArg < argc && argv[curArg][0] == '-') { 185 if (strcmp(argv[curArg], "-team") == 0) { 186 curArg++; 187 team_id id = strtoul(argv[curArg++], NULL, 0); 188 VMAddressSpace* addrSpace = VMAddressSpace::DebugGet(id); 189 if (addrSpace == NULL) { 190 kprintf("could not find team %" B_PRId32 "\n", id); 191 return 0; 192 } 193 satp.val = ((RISCV64VMTranslationMap*) 194 addrSpace->TranslationMap())->Satp(); 195 isArea = false; 196 } else if (strcmp(argv[curArg], "-area") == 0) { 197 curArg++; 198 uint64 areaId; 199 if (!evaluate_debug_expression(argv[curArg++], &areaId, false)) 200 return 0; 201 VMArea* area = LookupArea((area_id)areaId); 202 if (area == NULL) { 203 kprintf("could not find area %" B_PRId32 "\n", (area_id)areaId); 204 return 0; 205 } 206 satp.val = ((RISCV64VMTranslationMap*) 207 area->address_space->TranslationMap())->Satp(); 208 base = area->Base(); 209 size = area->Size(); 210 kprintf("area %" B_PRId32 "(%s)\n", area->id, area->name); 211 isArea = true; 212 } else { 213 kprintf("unknown flag \"%s\"\n", argv[curArg]); 214 return 0; 215 } 216 } 217 218 kprintf("satp: %#" B_PRIx64 "\n", satp.val); 219 220 PageTableDumper dumper; 221 222 if (!isArea) { 223 Pte* root = (Pte*)VirtFromPhys(satp.ppn * B_PAGE_SIZE); 224 DumpPageTableInt(root, 0, 2, dumper); 225 } else { 226 for (; size > 0; base += B_PAGE_SIZE, size -= B_PAGE_SIZE) { 227 Pte* pte = LookupPte(satp.ppn * B_PAGE_SIZE, base); 228 if (pte == NULL || (pte->flags & (1 << pteValid)) == 0) 229 continue; 230 231 dumper.Write(base, pte->ppn * B_PAGE_SIZE, B_PAGE_SIZE, pte->flags); 232 } 233 } 234 235 return 0; 236 } 237 238 239 static int 240 DumpVirtPage(int argc, char** argv) 241 { 242 int curArg = 1; 243 SatpReg satp; 244 245 satp.val = Satp(); 246 while (curArg < argc && argv[curArg][0] == '-') { 247 if (strcmp(argv[curArg], "-team") == 0) { 248 curArg++; 249 team_id id = strtoul(argv[curArg++], NULL, 0); 250 VMAddressSpace* addrSpace = VMAddressSpace::DebugGet(id); 251 if (addrSpace == NULL) { 252 kprintf("could not find team %" B_PRId32 "\n", id); 253 return 0; 254 } 255 satp.val = ((RISCV64VMTranslationMap*) 256 addrSpace->TranslationMap())->Satp(); 257 } else { 258 kprintf("unknown flag \"%s\"\n", argv[curArg]); 259 return 0; 260 } 261 } 262 263 kprintf("satp: %#" B_PRIx64 "\n", satp.val); 264 265 uint64 virt = 0; 266 if (!evaluate_debug_expression(argv[curArg++], &virt, false)) 267 return 0; 268 269 virt = ROUNDDOWN(virt, B_PAGE_SIZE); 270 271 Pte* pte = LookupPte(satp.ppn * B_PAGE_SIZE, virt); 272 if (pte == NULL) { 273 dprintf("not mapped\n"); 274 return 0; 275 } 276 277 PageTableDumper dumper; 278 dumper.Write(virt, pte->ppn * B_PAGE_SIZE, B_PAGE_SIZE, pte->flags); 279 280 return 0; 281 } 282 283 284 status_t 285 arch_vm_init(kernel_args *args) 286 { 287 return B_OK; 288 } 289 290 291 status_t 292 arch_vm_init_post_area(kernel_args *args) 293 { 294 void* address = (void*)args->arch_args.physMap.start; 295 area_id area = vm_create_null_area(VMAddressSpace::KernelID(), 296 "physical map area", &address, B_EXACT_ADDRESS, 297 args->arch_args.physMap.size, 0); 298 if (area < B_OK) 299 return area; 300 301 add_debugger_command("dump_page_table", &DumpPageTable, "Dump page table"); 302 add_debugger_command("dump_virt_page", &DumpVirtPage, "Dump virtual page mapping"); 303 304 return B_OK; 305 } 306 307 308 status_t 309 arch_vm_init_post_modules(kernel_args *args) 310 { 311 return B_OK; 312 } 313 314 315 status_t 316 arch_vm_init_end(kernel_args *args) 317 { 318 TRACE(("arch_vm_init_end(): %" B_PRIu32 " virtual ranges to keep:\n", 319 args->arch_args.num_virtual_ranges_to_keep)); 320 321 for (int i = 0; i < (int)args->arch_args.num_virtual_ranges_to_keep; i++) { 322 addr_range &range = args->arch_args.virtual_ranges_to_keep[i]; 323 324 TRACE((" start: %p, size: %#" B_PRIxSIZE "\n", (void*)range.start, range.size)); 325 326 #if 1 327 // skip ranges outside the kernel address space 328 if (!IS_KERNEL_ADDRESS(range.start)) { 329 TRACE((" no kernel address, skipping...\n")); 330 continue; 331 } 332 333 phys_addr_t physicalAddress; 334 void *address = (void*)range.start; 335 if (vm_get_page_mapping(VMAddressSpace::KernelID(), range.start, 336 &physicalAddress) != B_OK) 337 panic("arch_vm_init_end(): No page mapping for %p\n", address); 338 area_id area = vm_map_physical_memory(VMAddressSpace::KernelID(), 339 "boot loader reserved area", &address, 340 B_EXACT_ADDRESS, range.size, 341 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 342 physicalAddress, true); 343 if (area < 0) { 344 panic("arch_vm_init_end(): Failed to create area for boot loader " 345 "reserved area: %p - %p\n", (void*)range.start, 346 (void*)(range.start + range.size)); 347 } 348 #endif 349 } 350 351 #if 0 352 // Throw away any address space mappings we've inherited from the boot 353 // loader and have not yet turned into an area. 354 vm_free_unused_boot_loader_range(0, 0xffffffff - B_PAGE_SIZE + 1); 355 #endif 356 357 return B_OK; 358 } 359 360 361 void 362 arch_vm_aspace_swap(struct VMAddressSpace *from, struct VMAddressSpace *to) 363 { 364 // This functions is only invoked when a userland thread is in the process 365 // of dying. It switches to the kernel team and does whatever cleanup is 366 // necessary (in case it is the team's main thread, it will delete the 367 // team). 368 // It is however not necessary to change the page directory. Userland team's 369 // page directories include all kernel mappings as well. Furthermore our 370 // arch specific translation map data objects are ref-counted, so they won't 371 // go away as long as they are still used on any CPU. 372 373 SetSatp(((RISCV64VMTranslationMap*)to->TranslationMap())->Satp()); 374 FlushTlbAll(); 375 } 376 377 378 bool 379 arch_vm_supports_protection(uint32 protection) 380 { 381 return true; 382 } 383 384 385 void 386 arch_vm_unset_memory_type(VMArea *area) 387 { 388 } 389 390 391 status_t 392 arch_vm_set_memory_type(VMArea *area, phys_addr_t physicalBase, uint32 type) 393 { 394 return B_OK; 395 } 396