1 /* 2 * Copyright 2007-2010, François Revol, revol@free.fr. 3 * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de. 4 * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights 5 * reserved. 6 * Copyright 2019, Adrien Destugues, pulkomandy@pulkomandy.tk. 7 * Distributed under the terms of the MIT License. 8 * 9 * Copyright 2001, Travis Geiselbrecht. All rights reserved. 10 * Distributed under the terms of the NewOS License. 11 */ 12 13 14 #include <arch_cpu_defs.h> 15 #include <boot/kernel_args.h> 16 #include <KernelExport.h> 17 #include <kernel.h> 18 #include <vm/vm.h> 19 #include <vm/vm_priv.h> 20 #include <vm/VMAddressSpace.h> 21 #include <Clint.h> 22 #include <Htif.h> 23 #include <Plic.h> 24 25 #include "RISCV64VMTranslationMap.h" 26 27 28 #define TRACE_VM_TMAP 29 #ifdef TRACE_VM_TMAP 30 # define TRACE(x...) dprintf(x) 31 #else 32 # define TRACE(x...) ; 33 #endif 34 35 36 ssize_t gVirtFromPhysOffset = 0; 37 38 phys_addr_t sPageTable = 0; 39 char sPhysicalPageMapperData[sizeof(RISCV64VMPhysicalPageMapper)]; 40 41 42 // TODO: Consolidate function with RISCV64VMTranslationMap 43 44 static Pte* 45 LookupPte(addr_t virtAdr, bool alloc, kernel_args* args, 46 phys_addr_t (*get_free_page)(kernel_args *)) 47 { 48 Pte *pte = (Pte*)VirtFromPhys(sPageTable); 49 for (int level = 2; level > 0; level --) { 50 pte += VirtAdrPte(virtAdr, level); 51 if (!((1 << pteValid) & pte->flags)) { 52 if (!alloc) 53 return NULL; 54 pte->ppn = get_free_page(args); 55 if (pte->ppn == 0) 56 return NULL; 57 memset((Pte*)VirtFromPhys(B_PAGE_SIZE * pte->ppn), 0, B_PAGE_SIZE); 58 pte->flags |= (1 << pteValid) | (1 << pteGlobal); 59 } 60 pte = (Pte*)VirtFromPhys(B_PAGE_SIZE * pte->ppn); 61 } 62 pte += VirtAdrPte(virtAdr, 0); 63 return pte; 64 } 65 66 67 static void 68 Map(addr_t virtAdr, phys_addr_t physAdr, uint64 flags, kernel_args* args, 69 phys_addr_t (*get_free_page)(kernel_args *)) 70 { 71 // dprintf("Map(0x%" B_PRIxADDR ", 0x%" B_PRIxADDR ")\n", virtAdr, physAdr); 72 Pte* pte = LookupPte(virtAdr, true, args, get_free_page); 73 if (pte == NULL) panic("can't allocate page table"); 74 75 pte->ppn = physAdr / B_PAGE_SIZE; 76 pte->flags = (1 << pteValid) | (1 << pteAccessed) | (1 << pteDirty) 77 | (1 << pteGlobal) // we map only kernel pages here so always set global flag 78 | flags; 79 80 FlushTlbPage(virtAdr); 81 } 82 83 84 //#pragma mark - 85 86 status_t 87 arch_vm_translation_map_init(kernel_args *args, 88 VMPhysicalPageMapper** _physicalPageMapper) 89 { 90 TRACE("vm_translation_map_init: entry\n"); 91 92 #ifdef TRACE_VM_TMAP 93 TRACE("physical memory ranges:\n"); 94 for (uint32 i = 0; i < args->num_physical_memory_ranges; i++) { 95 phys_addr_t start = args->physical_memory_range[i].start; 96 phys_addr_t end = start + args->physical_memory_range[i].size; 97 TRACE(" %" B_PRIxPHYSADDR " - %" B_PRIxPHYSADDR "\n", start, end); 98 } 99 100 TRACE("allocated physical ranges:\n"); 101 for (uint32 i = 0; i < args->num_physical_allocated_ranges; i++) { 102 phys_addr_t start = args->physical_allocated_range[i].start; 103 phys_addr_t end = start + args->physical_allocated_range[i].size; 104 TRACE(" %" B_PRIxPHYSADDR " - %" B_PRIxPHYSADDR "\n", start, end); 105 } 106 107 TRACE("allocated virtual ranges:\n"); 108 for (uint32 i = 0; i < args->num_virtual_allocated_ranges; i++) { 109 addr_t start = args->virtual_allocated_range[i].start; 110 addr_t end = start + args->virtual_allocated_range[i].size; 111 TRACE(" %" B_PRIxADDR " - %" B_PRIxADDR "\n", start, end); 112 } 113 114 TRACE("kernel args ranges:\n"); 115 for (uint32 i = 0; i < args->num_kernel_args_ranges; i++) { 116 phys_addr_t start = args->kernel_args_range[i].start; 117 phys_addr_t end = start + args->kernel_args_range[i].size; 118 TRACE(" %" B_PRIxPHYSADDR " - %" B_PRIxPHYSADDR "\n", start, end); 119 } 120 #endif 121 122 sPageTable = SatpReg{.val = Satp()}.ppn * B_PAGE_SIZE; 123 124 dprintf("physMapBase: %#" B_PRIxADDR "\n", args->arch_args.physMap.start); 125 dprintf("physMemBase: %#" B_PRIxADDR "\n", args->physical_memory_range[0].start); 126 gVirtFromPhysOffset = args->arch_args.physMap.start - args->physical_memory_range[0].start; 127 128 clear_ac(); 129 130 *_physicalPageMapper = new(&sPhysicalPageMapperData) 131 RISCV64VMPhysicalPageMapper(); 132 133 return B_OK; 134 } 135 136 137 status_t 138 arch_vm_translation_map_init_post_sem(kernel_args *args) 139 { 140 return B_OK; 141 } 142 143 144 status_t 145 arch_vm_translation_map_init_post_area(kernel_args *args) 146 { 147 TRACE("vm_translation_map_init_post_area: entry\n"); 148 return B_OK; 149 } 150 151 152 status_t 153 arch_vm_translation_map_early_map(kernel_args *args, 154 addr_t virtAdr, phys_addr_t physAdr, uint8 attributes, 155 phys_addr_t (*get_free_page)(kernel_args *)) 156 { 157 //dprintf("early_map(%#" B_PRIxADDR ", %#" B_PRIxADDR ")\n", virtAdr, physAdr); 158 uint64 flags = 0; 159 if ((attributes & B_KERNEL_READ_AREA) != 0) 160 flags |= (1 << pteRead); 161 if ((attributes & B_KERNEL_WRITE_AREA) != 0) 162 flags |= (1 << pteWrite); 163 if ((attributes & B_KERNEL_EXECUTE_AREA) != 0) 164 flags |= (1 << pteExec); 165 Map(virtAdr, physAdr, flags, args, get_free_page); 166 return B_OK; 167 } 168 169 170 status_t 171 arch_vm_translation_map_create_map(bool kernel, VMTranslationMap** _map) 172 { 173 *_map = new(std::nothrow) RISCV64VMTranslationMap(kernel, 174 (kernel) ? sPageTable : 0); 175 176 if (*_map == NULL) 177 return B_NO_MEMORY; 178 179 return B_OK; 180 } 181 182 183 bool 184 arch_vm_translation_map_is_kernel_page_accessible(addr_t virtualAddress, 185 uint32 protection) 186 { 187 return virtualAddress != 0; 188 } 189