1 /* 2 * Copyright 2019 Haiku, Inc. All Rights Reserved. 3 * Distributed under the terms of the MIT License. 4 */ 5 6 7 #include <arch/vm_translation_map.h> 8 #include <boot/kernel_args.h> 9 #include <vm/VMAddressSpace.h> 10 #include <vm/vm.h> 11 #include <vm/vm_page.h> 12 13 #include "PMAPPhysicalPageMapper.h" 14 #include "VMSAv8TranslationMap.h" 15 16 static char sPhysicalPageMapperData[sizeof(PMAPPhysicalPageMapper)]; 17 18 // Physical pointer to an empty page table, which is used for break-before-make 19 // when updating TTBR0_EL1. 20 static phys_addr_t sEmptyTable; 21 22 23 static void 24 arch_vm_alloc_empty_table(void) 25 { 26 vm_page_reservation reservation; 27 vm_page_reserve_pages(&reservation, 1, VM_PRIORITY_SYSTEM); 28 vm_page* page = vm_page_allocate_page(&reservation, PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR); 29 DEBUG_PAGE_ACCESS_END(page); 30 sEmptyTable = page->physical_page_number << PAGE_SHIFT; 31 } 32 33 34 void 35 arch_vm_install_empty_table_ttbr0(void) 36 { 37 WRITE_SPECIALREG(TTBR0_EL1, sEmptyTable); 38 asm("isb"); 39 } 40 41 42 status_t 43 arch_vm_translation_map_create_map(bool kernel, VMTranslationMap** _map) 44 { 45 phys_addr_t pt = 0; 46 if (kernel) { 47 pt = READ_SPECIALREG(TTBR1_EL1) & kTtbrBasePhysAddrMask; 48 arch_vm_install_empty_table_ttbr0(); 49 } 50 51 *_map = new (std::nothrow) VMSAv8TranslationMap(kernel, pt, 12, 48, 1); 52 53 if (*_map == NULL) 54 return B_NO_MEMORY; 55 56 return B_OK; 57 } 58 59 60 status_t 61 arch_vm_translation_map_init(kernel_args* args, VMPhysicalPageMapper** _physicalPageMapper) 62 { 63 dprintf("arch_vm_translation_map_init\n"); 64 65 // nuke TTBR0 mapping, we use identity mapping in kernel space at KERNEL_PMAP_BASE 66 memset((void*) READ_SPECIALREG(TTBR0_EL1), 0, B_PAGE_SIZE); 67 68 uint64_t tcr = READ_SPECIALREG(TCR_EL1); 69 uint32_t t0sz = tcr & 0x1f; 70 uint32_t t1sz = (tcr >> 16) & 0x1f; 71 uint32_t tg0 = (tcr >> 14) & 0x3; 72 uint32_t tg1 = (tcr >> 30) & 0x3; 73 uint64_t ttbr0 = READ_SPECIALREG(TTBR0_EL1); 74 uint64_t ttbr1 = READ_SPECIALREG(TTBR1_EL1); 75 uint64_t mair = READ_SPECIALREG(MAIR_EL1); 76 uint64_t mmfr1 = READ_SPECIALREG(ID_AA64MMFR1_EL1); 77 uint64_t mmfr2 = READ_SPECIALREG(ID_AA64MMFR2_EL1); 78 uint64_t sctlr = READ_SPECIALREG(SCTLR_EL1); 79 80 ASSERT(VMSAv8TranslationMap::fHwFeature == 0); 81 uint64_t hafdbs = ID_AA64MMFR1_HAFDBS(mmfr1); 82 if (hafdbs == ID_AA64MMFR1_HAFDBS_AF) { 83 VMSAv8TranslationMap::fHwFeature = VMSAv8TranslationMap::HW_ACCESS; 84 tcr |= (1UL << 39); 85 } 86 if (hafdbs == ID_AA64MMFR1_HAFDBS_AF_DBS) { 87 VMSAv8TranslationMap::fHwFeature 88 = VMSAv8TranslationMap::HW_ACCESS | VMSAv8TranslationMap::HW_DIRTY; 89 tcr |= (1UL << 40) | (1UL << 39); 90 } 91 92 if (ID_AA64MMFR2_CNP(mmfr2) == ID_AA64MMFR2_CNP_IMPL) { 93 VMSAv8TranslationMap::fHwFeature |= VMSAv8TranslationMap::HW_COMMON_NOT_PRIVATE; 94 } 95 96 VMSAv8TranslationMap::fMair = mair; 97 98 WRITE_SPECIALREG(TCR_EL1, tcr); 99 100 dprintf("vm config: MMFR1: %lx, MMFR2: %lx, TCR: %lx\nTTBR0: %lx, TTBR1: %lx\nT0SZ: %u, " 101 "T1SZ: %u, TG0: %u, TG1: %u, MAIR: %lx, SCTLR: %lx\n", 102 mmfr1, mmfr2, tcr, ttbr0, ttbr1, t0sz, t1sz, tg0, tg1, mair, sctlr); 103 104 *_physicalPageMapper = new (&sPhysicalPageMapperData) PMAPPhysicalPageMapper(); 105 106 return B_OK; 107 } 108 109 110 status_t 111 arch_vm_translation_map_init_post_sem(kernel_args* args) 112 { 113 dprintf("arch_vm_translation_map_init_post_sem\n"); 114 115 // Create an empty page table for use when we don't want a userspace page table. 116 arch_vm_alloc_empty_table(); 117 118 return B_OK; 119 } 120 121 122 status_t 123 arch_vm_translation_map_init_post_area(kernel_args* args) 124 { 125 dprintf("arch_vm_translation_map_init_post_area\n"); 126 127 // Create an area covering the physical map area. 128 void* address = (void*) KERNEL_PMAP_BASE; 129 area_id area = vm_create_null_area(VMAddressSpace::KernelID(), "physical map area", &address, 130 B_EXACT_ADDRESS, KERNEL_PMAP_SIZE, 0); 131 132 if (args->arch_args.uart.kind[0] != 0) { 133 // debug uart is already mapped by the efi loader 134 address = (void*)args->arch_args.uart.regs.start; 135 area_id area = vm_create_null_area(VMAddressSpace::KernelID(), 136 "debug uart map area", &address, B_EXACT_ADDRESS, 137 ROUNDUP(args->arch_args.uart.regs.size, B_PAGE_SIZE), 0); 138 } 139 140 return B_OK; 141 } 142 143 // TODO: reuse some bits from VMSAv8TranslationMap 144 145 static uint64_t page_bits = 12; 146 static uint64_t tsz = 16; 147 148 149 static uint64_t* 150 TableFromPa(phys_addr_t pa) 151 { 152 return reinterpret_cast<uint64_t*>(KERNEL_PMAP_BASE + pa); 153 } 154 155 156 static void 157 map_page_early(phys_addr_t ptPa, int level, addr_t va, phys_addr_t pa, 158 phys_addr_t (*get_free_page)(kernel_args*), kernel_args* args) 159 { 160 int tableBits = page_bits - 3; 161 uint64_t tableMask = (1UL << tableBits) - 1; 162 163 int shift = tableBits * (3 - level) + page_bits; 164 165 int index = (va >> shift) & tableMask; 166 uint64_t* pte = &TableFromPa(ptPa)[index]; 167 168 if (level == 3) { 169 atomic_set64((int64*) pte, pa | 0x3); 170 asm("dsb ish"); 171 } else { 172 uint64_t pteVal = atomic_get64((int64*) pte); 173 int type = pteVal & 0x3; 174 175 phys_addr_t table; 176 if (type == 0x3) { 177 table = pteVal & kPteAddrMask; 178 } else { 179 table = get_free_page(args) << page_bits; 180 dprintf("early: pulling page %lx\n", table); 181 uint64_t* newTableVa = TableFromPa(table); 182 183 if (type == 0x1) { 184 int shift = tableBits * (3 - (level + 1)) + page_bits; 185 int entrySize = 1UL << shift; 186 187 for (int i = 0; i < (1 << tableBits); i++) 188 newTableVa[i] = pteVal + i * entrySize; 189 } else { 190 memset(newTableVa, 0, 1 << page_bits); 191 } 192 193 asm("dsb ish"); 194 195 atomic_set64((int64*) pte, table | 0x3); 196 } 197 198 map_page_early(table, level + 1, va, pa, get_free_page, args); 199 } 200 } 201 202 203 status_t 204 arch_vm_translation_map_early_map(kernel_args* args, addr_t va, phys_addr_t pa, uint8 attributes, 205 phys_addr_t (*get_free_page)(kernel_args*)) 206 { 207 int va_bits = 64 - tsz; 208 uint64_t va_mask = (1UL << va_bits) - 1; 209 ASSERT((va & ~va_mask) == ~va_mask); 210 211 phys_addr_t ptPa = READ_SPECIALREG(TTBR1_EL1) & kTtbrBasePhysAddrMask; 212 int level = VMSAv8TranslationMap::CalcStartLevel(va_bits, page_bits); 213 va &= va_mask; 214 pa |= VMSAv8TranslationMap::GetMemoryAttr(attributes, 0, true); 215 216 map_page_early(ptPa, level, va, pa, get_free_page, args); 217 218 return B_OK; 219 } 220 221 222 bool 223 arch_vm_translation_map_is_kernel_page_accessible(addr_t va, uint32 protection) 224 { 225 if (protection & B_KERNEL_WRITE_AREA) { 226 asm("at s1e1w, %0" : : "r"((uint64_t) va)); 227 return (READ_SPECIALREG(PAR_EL1) & PAR_F) == 0; 228 } else { 229 asm("at s1e1r, %0" : : "r"((uint64_t) va)); 230 return (READ_SPECIALREG(PAR_EL1) & PAR_F) == 0; 231 } 232 } 233