xref: /haiku/src/system/boot/platform/efi/arch/arm64/arch_start.cpp (revision a5c0d1a80e18f50987966fda2005210092d7671b)
1 /*
2  * Copyright 2019-2022 Haiku, Inc. All rights reserved.
3  * Released under the terms of the MIT License.
4  */
5 
6 
7 #include <boot/platform.h>
8 #include <boot/stage2.h>
9 #include <boot/stdio.h>
10 
11 #include "efi_platform.h"
12 #include "serial.h"
13 
14 #include "aarch64.h"
15 
16 extern "C" void arch_enter_kernel(struct kernel_args *kernelArgs,
17 	addr_t kernelEntry, addr_t kernelStackTop);
18 
19 extern void arch_mmu_dump_present_tables();
20 extern const char* granule_type_str(int tg);
21 
22 extern uint32_t arch_mmu_generate_post_efi_page_tables(size_t memory_map_size,
23 	efi_memory_descriptor *memory_map, size_t descriptor_size,
24 	uint32_t descriptor_version);
25 
26 extern void arch_mmu_post_efi_setup(size_t memory_map_size,
27 	efi_memory_descriptor *memory_map, size_t descriptor_size,
28 	uint32_t descriptor_version);
29 
30 extern void arch_mmu_setup_EL1(uint64 tcr);
31 
32 
33 static const char*
34 memory_region_type_str(int type)
35 {
36 	switch (type)	{
37 	case EfiReservedMemoryType:
38 		return "ReservedMemoryType";
39 	case EfiLoaderCode:
40 		return "LoaderCode";
41 	case EfiLoaderData:
42 		return "LoaderData";
43 	case EfiBootServicesCode:
44 		return "BootServicesCode";
45 	case EfiBootServicesData:
46 		return "BootServicesData";
47 	case EfiRuntimeServicesCode:
48 		return "RuntimeServicesCode";
49 	case EfiRuntimeServicesData:
50 		return "RuntimeServicesData";
51 	case EfiConventionalMemory:
52 		return "ConventionalMemory";
53 	case EfiUnusableMemory:
54 		return "UnusableMemory";
55 	case EfiACPIReclaimMemory:
56 		return "ACPIReclaimMemory";
57 	case EfiACPIMemoryNVS:
58 		return "ACPIMemoryNVS";
59 	case EfiMemoryMappedIO:
60 		return "MMIO";
61 	case EfiMemoryMappedIOPortSpace:
62 		return "MMIOPortSpace";
63 	case EfiPalCode:
64 		return "PalCode";
65 	case EfiPersistentMemory:
66 		return "PersistentMemory";
67 	default:
68 		return "unknown";
69 	}
70 }
71 
72 
73 void
74 arch_convert_kernel_args(void)
75 {
76 	// empty
77 }
78 
79 
80 void
81 arch_start_kernel(addr_t kernelEntry)
82 {
83 	// Prepare to exit EFI boot services.
84 	// Read the memory map.
85 	// First call is to determine the buffer size.
86 	size_t memory_map_size = 0;
87 	efi_memory_descriptor dummy;
88 	efi_memory_descriptor *memory_map;
89 	size_t map_key;
90 	size_t descriptor_size;
91 	uint32_t descriptor_version;
92 	if (kBootServices->GetMemoryMap(&memory_map_size, &dummy, &map_key,
93 			&descriptor_size, &descriptor_version) != EFI_BUFFER_TOO_SMALL) {
94 		panic("Unable to determine size of system memory map");
95 	}
96 
97 	// Allocate a buffer twice as large as needed just in case it gets bigger
98 	// between calls to ExitBootServices.
99 	size_t actual_memory_map_size = memory_map_size * 2;
100 	memory_map
101 		= (efi_memory_descriptor *)kernel_args_malloc(actual_memory_map_size);
102 
103 	if (memory_map == NULL)
104 		panic("Unable to allocate memory map.");
105 
106 	// Read (and print) the memory map.
107 	memory_map_size = actual_memory_map_size;
108 	if (kBootServices->GetMemoryMap(&memory_map_size, memory_map, &map_key,
109 			&descriptor_size, &descriptor_version) != EFI_SUCCESS) {
110 		panic("Unable to fetch system memory map.");
111 	}
112 
113 	addr_t addr = (addr_t)memory_map;
114 	efi_physical_addr loaderCode = 0LL;
115 	dprintf("System provided memory map:\n");
116 	for (size_t i = 0; i < memory_map_size / descriptor_size; ++i) {
117 		efi_memory_descriptor *entry
118 			= (efi_memory_descriptor *)(addr + i * descriptor_size);
119 		dprintf("  phys: 0x%0lx-0x%0lx, virt: 0x%0lx-0x%0lx, size = 0x%0lx, type: %s (%#x), attr: %#lx\n",
120 			entry->PhysicalStart,
121 			entry->PhysicalStart + entry->NumberOfPages * B_PAGE_SIZE,
122 			entry->VirtualStart,
123 			entry->VirtualStart + entry->NumberOfPages * B_PAGE_SIZE,
124 			entry->NumberOfPages * B_PAGE_SIZE,
125 			memory_region_type_str(entry->Type), entry->Type,
126 			entry->Attribute);
127 		if (entry->Type == EfiLoaderCode)
128 			loaderCode = entry->PhysicalStart;
129 	}
130 	// This is where our efi loader got relocated, therefore we need to use this
131 	// offset for properly align symbols
132 	dprintf("Efi loader symbols offset: 0x%0lx:\n", loaderCode);
133 
134 /*
135 *   "The AArch64 exception model is made up of a number of exception levels
136 *    (EL0 - EL3), with EL0 and EL1 having a secure and a non-secure
137 *    counterpart.  EL2 is the hypervisor level and exists only in non-secure
138 *    mode. EL3 is the highest priority level and exists only in secure mode."
139 *
140 *	"2.3 UEFI System Environment and Configuration
141 *    The resident UEFI boot-time environment shall use the highest non-secure
142 *    privilege level available. The exact meaning of this is architecture
143 *    dependent, as detailed below."
144 
145 *	"2.3.1 AArch64 Exception Levels
146 *    On AArch64 UEFI shall execute as 64-bit code at either EL1 or EL2,
147 *    depending on whether or not virtualization is available at OS load time."
148 */
149 	uint64 el = arch_exception_level();
150 	dprintf("Current Exception Level EL%1lx\n", el);
151 	dprintf("TTBR0: %" B_PRIx64 " TTBRx: %" B_PRIx64 " SCTLR: %" B_PRIx64 " TCR: %" B_PRIx64 "\n",
152 		arch_mmu_base_register(),
153 		arch_mmu_base_register(true),
154 		_arch_mmu_get_sctlr(),
155 		_arch_mmu_get_tcr());
156 
157 	if (arch_mmu_enabled()) {
158 		dprintf("MMU Enabled, Granularity %s, bits %d\n",
159 			granule_type_str(arch_mmu_user_granule()),
160 			arch_mmu_user_address_bits());
161 
162 		dprintf("Kernel entry accessibility W: %x R: %x\n",
163 			arch_mmu_write_access(kernelEntry),
164 			arch_mmu_read_access(kernelEntry));
165 
166 		arch_mmu_dump_present_tables();
167 
168 		if (el == 1) {
169 			// Disable CACHE & MMU before dealing with TTBRx
170 			arch_cache_disable();
171 		}
172 	}
173 
174 	// Generate page tables for use after ExitBootServices.
175 	arch_mmu_generate_post_efi_page_tables(
176 		memory_map_size, memory_map, descriptor_size, descriptor_version);
177 
178 	// Attempt to fetch the memory map and exit boot services.
179 	// This needs to be done in a loop, as ExitBootServices can change the
180 	// memory map.
181 	// Even better: Only GetMemoryMap and ExitBootServices can be called after
182 	// the first call to ExitBootServices, as the firmware is permitted to
183 	// partially exit. This is why twice as much space was allocated for the
184 	// memory map, as it's impossible to allocate more now.
185 	// A changing memory map shouldn't affect the generated page tables, as
186 	// they only needed to know about the maximum address, not any specific
187 	// entry.
188 	dprintf("Calling ExitBootServices. So long, EFI!\n");
189 	while (true) {
190 		if (kBootServices->ExitBootServices(kImage, map_key) == EFI_SUCCESS) {
191 			// The console was provided by boot services, disable it.
192 			stdout = NULL;
193 			stderr = NULL;
194 			// Can we adjust gKernelArgs.platform_args.serial_base_ports[0]
195 			// to something fixed in qemu for debugging?
196 			serial_switch_to_legacy();
197 			dprintf("Switched to legacy serial output\n");
198 			break;
199 		}
200 
201 		memory_map_size = actual_memory_map_size;
202 		if (kBootServices->GetMemoryMap(&memory_map_size, memory_map, &map_key,
203 				&descriptor_size, &descriptor_version) != EFI_SUCCESS) {
204 			panic("Unable to fetch system memory map.");
205 		}
206 	}
207 
208 	// Update EFI, generate final kernel physical memory map, etc.
209 	// arch_mmu_post_efi_setup(memory_map_size, memory_map, descriptor_size, descriptor_version);
210 
211 	switch (el) {
212 		case 1:
213 			arch_mmu_setup_EL1(READ_SPECIALREG(TCR_EL1));
214 			break;
215 		case 2:
216 			arch_mmu_setup_EL1(READ_SPECIALREG(TCR_EL2));
217 			arch_cache_disable();
218 			_arch_transition_EL2_EL1();
219 			break;
220 		default:
221 			panic("Unexpected Exception Level\n");
222 			break;
223 	}
224 
225 	arch_cache_enable();
226 
227 	//smp_boot_other_cpus(final_pml4, kernelEntry, (addr_t)&gKernelArgs);
228 
229 	if (arch_mmu_read_access(kernelEntry) && arch_mmu_read_access(gKernelArgs.cpu_kstack[0].start)) {
230 		// Enter the kernel!
231 		arch_enter_kernel(&gKernelArgs, kernelEntry,
232 			gKernelArgs.cpu_kstack[0].start + gKernelArgs.cpu_kstack[0].size);
233 	} else {
234 		// _arch_exception_panic("Kernel or Stack memory not accessible\n", __LINE__);
235 		panic("Kernel or Stack memory not accessible\n");
236 	}
237 }
238