1 /* 2 * Copyright 2019-2022 Haiku, Inc. All rights reserved. 3 * Released under the terms of the MIT License. 4 */ 5 6 7 #include <boot/platform.h> 8 #include <boot/stage2.h> 9 #include <boot/stdio.h> 10 11 #include "efi_platform.h" 12 #include "serial.h" 13 14 #include "aarch64.h" 15 16 extern "C" void arch_enter_kernel( 17 struct kernel_args* kernelArgs, addr_t kernelEntry, addr_t kernelStackTop); 18 19 extern void arch_mmu_dump_present_tables(); 20 extern const char* granule_type_str(int tg); 21 22 extern uint32_t arch_mmu_generate_post_efi_page_tables(size_t memory_map_size, 23 efi_memory_descriptor* memory_map, size_t descriptor_size, uint32_t descriptor_version); 24 25 extern void arch_mmu_post_efi_setup(size_t memory_map_size, efi_memory_descriptor* memory_map, 26 size_t descriptor_size, uint32_t descriptor_version); 27 28 extern void arch_mmu_setup_EL1(uint64 tcr); 29 30 31 static const char* 32 memory_region_type_str(int type) 33 { 34 switch (type) { 35 case EfiReservedMemoryType: 36 return "ReservedMemoryType"; 37 case EfiLoaderCode: 38 return "LoaderCode"; 39 case EfiLoaderData: 40 return "LoaderData"; 41 case EfiBootServicesCode: 42 return "BootServicesCode"; 43 case EfiBootServicesData: 44 return "BootServicesData"; 45 case EfiRuntimeServicesCode: 46 return "RuntimeServicesCode"; 47 case EfiRuntimeServicesData: 48 return "RuntimeServicesData"; 49 case EfiConventionalMemory: 50 return "ConventionalMemory"; 51 case EfiUnusableMemory: 52 return "UnusableMemory"; 53 case EfiACPIReclaimMemory: 54 return "ACPIReclaimMemory"; 55 case EfiACPIMemoryNVS: 56 return "ACPIMemoryNVS"; 57 case EfiMemoryMappedIO: 58 return "MMIO"; 59 case EfiMemoryMappedIOPortSpace: 60 return "MMIOPortSpace"; 61 case EfiPalCode: 62 return "PalCode"; 63 case EfiPersistentMemory: 64 return "PersistentMemory"; 65 default: 66 return "unknown"; 67 } 68 } 69 70 71 void 72 arch_convert_kernel_args(void) 73 { 74 // empty 75 } 76 77 78 void 79 arch_start_kernel(addr_t kernelEntry) 80 { 81 // Prepare to exit EFI boot services. 82 // Read the memory map. 83 // First call is to determine the buffer size. 84 size_t memory_map_size = 0; 85 efi_memory_descriptor dummy; 86 efi_memory_descriptor* memory_map; 87 size_t map_key; 88 size_t descriptor_size; 89 uint32_t descriptor_version; 90 if (kBootServices->GetMemoryMap( 91 &memory_map_size, &dummy, &map_key, &descriptor_size, &descriptor_version) 92 != EFI_BUFFER_TOO_SMALL) { 93 panic("Unable to determine size of system memory map"); 94 } 95 96 // Allocate a buffer twice as large as needed just in case it gets bigger 97 // between calls to ExitBootServices. 98 size_t actual_memory_map_size = memory_map_size * 2; 99 memory_map = (efi_memory_descriptor*) kernel_args_malloc(actual_memory_map_size); 100 101 if (memory_map == NULL) 102 panic("Unable to allocate memory map."); 103 104 // Read (and print) the memory map. 105 memory_map_size = actual_memory_map_size; 106 if (kBootServices->GetMemoryMap( 107 &memory_map_size, memory_map, &map_key, &descriptor_size, &descriptor_version) 108 != EFI_SUCCESS) { 109 panic("Unable to fetch system memory map."); 110 } 111 112 addr_t addr = (addr_t) memory_map; 113 efi_physical_addr loaderCode = 0LL; 114 dprintf("System provided memory map:\n"); 115 for (size_t i = 0; i < memory_map_size / descriptor_size; ++i) { 116 efi_memory_descriptor* entry = (efi_memory_descriptor*) (addr + i * descriptor_size); 117 dprintf(" phys: 0x%0lx-0x%0lx, virt: 0x%0lx-0x%0lx, size = 0x%0lx, type: %s (%#x), attr: " 118 "%#lx\n", 119 entry->PhysicalStart, entry->PhysicalStart + entry->NumberOfPages * B_PAGE_SIZE, 120 entry->VirtualStart, entry->VirtualStart + entry->NumberOfPages * B_PAGE_SIZE, 121 entry->NumberOfPages * B_PAGE_SIZE, memory_region_type_str(entry->Type), entry->Type, 122 entry->Attribute); 123 if (entry->Type == EfiLoaderCode) 124 loaderCode = entry->PhysicalStart; 125 } 126 // This is where our efi loader got relocated, therefore we need to use this 127 // offset for properly align symbols 128 dprintf("Efi loader symbols offset: 0x%0lx:\n", loaderCode); 129 130 /* 131 * "The AArch64 exception model is made up of a number of exception levels 132 * (EL0 - EL3), with EL0 and EL1 having a secure and a non-secure 133 * counterpart. EL2 is the hypervisor level and exists only in non-secure 134 * mode. EL3 is the highest priority level and exists only in secure mode." 135 * 136 * "2.3 UEFI System Environment and Configuration 137 * The resident UEFI boot-time environment shall use the highest non-secure 138 * privilege level available. The exact meaning of this is architecture 139 * dependent, as detailed below." 140 141 * "2.3.1 AArch64 Exception Levels 142 * On AArch64 UEFI shall execute as 64-bit code at either EL1 or EL2, 143 * depending on whether or not virtualization is available at OS load time." 144 */ 145 uint64 el = arch_exception_level(); 146 dprintf("Current Exception Level EL%1lx\n", el); 147 dprintf("TTBR0: %" B_PRIx64 " TTBRx: %" B_PRIx64 " SCTLR: %" B_PRIx64 " TCR: %" B_PRIx64 "\n", 148 arch_mmu_base_register(), arch_mmu_base_register(true), _arch_mmu_get_sctlr(), 149 _arch_mmu_get_tcr()); 150 151 if (arch_mmu_enabled()) { 152 dprintf("MMU Enabled, Granularity %s, bits %d\n", granule_type_str(arch_mmu_user_granule()), 153 arch_mmu_user_address_bits()); 154 155 dprintf("Kernel entry accessibility W: %x R: %x\n", arch_mmu_write_access(kernelEntry), 156 arch_mmu_read_access(kernelEntry)); 157 158 arch_mmu_dump_present_tables(); 159 160 if (el == 1) { 161 // Disable CACHE & MMU before dealing with TTBRx 162 arch_cache_disable(); 163 } 164 } 165 166 // Generate page tables for use after ExitBootServices. 167 arch_mmu_generate_post_efi_page_tables( 168 memory_map_size, memory_map, descriptor_size, descriptor_version); 169 170 // Attempt to fetch the memory map and exit boot services. 171 // This needs to be done in a loop, as ExitBootServices can change the 172 // memory map. 173 // Even better: Only GetMemoryMap and ExitBootServices can be called after 174 // the first call to ExitBootServices, as the firmware is permitted to 175 // partially exit. This is why twice as much space was allocated for the 176 // memory map, as it's impossible to allocate more now. 177 // A changing memory map shouldn't affect the generated page tables, as 178 // they only needed to know about the maximum address, not any specific 179 // entry. 180 dprintf("Calling ExitBootServices. So long, EFI!\n"); 181 while (true) { 182 if (kBootServices->ExitBootServices(kImage, map_key) == EFI_SUCCESS) { 183 // The console was provided by boot services, disable it. 184 stdout = NULL; 185 stderr = NULL; 186 // Can we adjust gKernelArgs.platform_args.serial_base_ports[0] 187 // to something fixed in qemu for debugging? 188 serial_switch_to_legacy(); 189 dprintf("Switched to legacy serial output\n"); 190 break; 191 } 192 193 memory_map_size = actual_memory_map_size; 194 if (kBootServices->GetMemoryMap( 195 &memory_map_size, memory_map, &map_key, &descriptor_size, &descriptor_version) 196 != EFI_SUCCESS) { 197 panic("Unable to fetch system memory map."); 198 } 199 } 200 201 // Update EFI, generate final kernel physical memory map, etc. 202 arch_mmu_post_efi_setup(memory_map_size, memory_map, descriptor_size, descriptor_version); 203 204 switch (el) { 205 case 1: 206 arch_mmu_setup_EL1(READ_SPECIALREG(TCR_EL1)); 207 break; 208 case 2: 209 arch_mmu_setup_EL1(READ_SPECIALREG(TCR_EL2)); 210 arch_cache_disable(); 211 _arch_transition_EL2_EL1(); 212 break; 213 default: 214 panic("Unexpected Exception Level\n"); 215 break; 216 } 217 218 arch_cache_enable(); 219 220 // smp_boot_other_cpus(final_pml4, kernelEntry, (addr_t)&gKernelArgs); 221 222 if (arch_mmu_read_access(kernelEntry) 223 && arch_mmu_read_access(gKernelArgs.cpu_kstack[0].start)) { 224 // Enter the kernel! 225 arch_enter_kernel(&gKernelArgs, kernelEntry, 226 gKernelArgs.cpu_kstack[0].start + gKernelArgs.cpu_kstack[0].size); 227 } else { 228 // _arch_exception_panic("Kernel or Stack memory not accessible\n", __LINE__); 229 panic("Kernel or Stack memory not accessible\n"); 230 } 231 } 232