xref: /haiku/src/system/boot/platform/efi/arch/arm64/arch_start.cpp (revision 52f7c9389475e19fc21487b38064b4390eeb6fea)
1 /*
2  * Copyright 2019-2022 Haiku, Inc. All rights reserved.
3  * Released under the terms of the MIT License.
4  */
5 
6 
7 #include <boot/platform.h>
8 #include <boot/stage2.h>
9 #include <boot/stdio.h>
10 
11 #include "efi_platform.h"
12 
13 #include "aarch64.h"
14 
15 extern "C" void arch_enter_kernel(struct kernel_args *kernelArgs,
16 	addr_t kernelEntry, addr_t kernelStackTop);
17 
18 extern void arch_mmu_dump_present_tables();
19 extern const char* granule_type_str(int tg);
20 
21 extern uint32_t arch_mmu_generate_post_efi_page_tables(size_t memory_map_size,
22 	efi_memory_descriptor *memory_map, size_t descriptor_size,
23 	uint32_t descriptor_version);
24 
25 extern void arch_mmu_post_efi_setup(size_t memory_map_size,
26 	efi_memory_descriptor *memory_map, size_t descriptor_size,
27 	uint32_t descriptor_version);
28 
29 extern void arch_mmu_setup_EL1();
30 
31 
32 static const char*
33 memory_region_type_str(int type)
34 {
35 	switch (type)	{
36 	case EfiReservedMemoryType:
37 		return "ReservedMemoryType";
38 	case EfiLoaderCode:
39 		return "LoaderCode";
40 	case EfiLoaderData:
41 		return "LoaderData";
42 	case EfiBootServicesCode:
43 		return "BootServicesCode";
44 	case EfiBootServicesData:
45 		return "BootServicesData";
46 	case EfiRuntimeServicesCode:
47 		return "RuntimeServicesCode";
48 	case EfiRuntimeServicesData:
49 		return "RuntimeServicesData";
50 	case EfiConventionalMemory:
51 		return "ConventionalMemory";
52 	case EfiUnusableMemory:
53 		return "UnusableMemory";
54 	case EfiACPIReclaimMemory:
55 		return "ACPIReclaimMemory";
56 	case EfiACPIMemoryNVS:
57 		return "ACPIMemoryNVS";
58 	case EfiMemoryMappedIO:
59 		return "MMIO";
60 	case EfiMemoryMappedIOPortSpace:
61 		return "MMIOPortSpace";
62 	case EfiPalCode:
63 		return "PalCode";
64 	case EfiPersistentMemory:
65 		return "PersistentMemory";
66 	default:
67 		return "unknown";
68 	}
69 }
70 
71 
72 void
73 arch_convert_kernel_args(void)
74 {
75 	// empty
76 }
77 
78 
79 void
80 arch_start_kernel(addr_t kernelEntry)
81 {
82 	// Prepare to exit EFI boot services.
83 	// Read the memory map.
84 	// First call is to determine the buffer size.
85 	size_t memory_map_size = 0;
86 	efi_memory_descriptor dummy;
87 	efi_memory_descriptor *memory_map;
88 	size_t map_key;
89 	size_t descriptor_size;
90 	uint32_t descriptor_version;
91 	if (kBootServices->GetMemoryMap(&memory_map_size, &dummy, &map_key,
92 			&descriptor_size, &descriptor_version) != EFI_BUFFER_TOO_SMALL) {
93 		panic("Unable to determine size of system memory map");
94 	}
95 
96 	// Allocate a buffer twice as large as needed just in case it gets bigger
97 	// between calls to ExitBootServices.
98 	size_t actual_memory_map_size = memory_map_size * 2;
99 	memory_map
100 		= (efi_memory_descriptor *)kernel_args_malloc(actual_memory_map_size);
101 
102 	if (memory_map == NULL)
103 		panic("Unable to allocate memory map.");
104 
105 	// Read (and print) the memory map.
106 	memory_map_size = actual_memory_map_size;
107 	if (kBootServices->GetMemoryMap(&memory_map_size, memory_map, &map_key,
108 			&descriptor_size, &descriptor_version) != EFI_SUCCESS) {
109 		panic("Unable to fetch system memory map.");
110 	}
111 
112 	addr_t addr = (addr_t)memory_map;
113 	efi_physical_addr loaderCode = 0LL;
114 	dprintf("System provided memory map:\n");
115 	for (size_t i = 0; i < memory_map_size / descriptor_size; ++i) {
116 		efi_memory_descriptor *entry
117 			= (efi_memory_descriptor *)(addr + i * descriptor_size);
118 		dprintf("  phys: 0x%0lx-0x%0lx, virt: 0x%0lx-0x%0lx, size = 0x%0lx, type: %s (%#x), attr: %#lx\n",
119 			entry->PhysicalStart,
120 			entry->PhysicalStart + entry->NumberOfPages * B_PAGE_SIZE,
121 			entry->VirtualStart,
122 			entry->VirtualStart + entry->NumberOfPages * B_PAGE_SIZE,
123 			entry->NumberOfPages * B_PAGE_SIZE,
124 			memory_region_type_str(entry->Type), entry->Type,
125 			entry->Attribute);
126 		if (entry->Type == EfiLoaderCode)
127 			loaderCode = entry->PhysicalStart;
128 	}
129 	// This is where our efi loader got relocated, therefore we need to use this
130 	// offset for properly align symbols
131 	dprintf("Efi loader symbols offset: 0x%0lx:\n", loaderCode);
132 
133 	// Generate page tables for use after ExitBootServices.
134 	arch_mmu_generate_post_efi_page_tables(
135 		memory_map_size, memory_map, descriptor_size, descriptor_version);
136 
137 	bool el2toel1 = false;
138 /*
139 *   "The AArch64 exception model is made up of a number of exception levels
140 *    (EL0 - EL3), with EL0 and EL1 having a secure and a non-secure
141 *    counterpart.  EL2 is the hypervisor level and exists only in non-secure
142 *    mode. EL3 is the highest priority level and exists only in secure mode."
143 *
144 *	"2.3 UEFI System Environment and Configuration
145 *    The resident UEFI boot-time environment shall use the highest non-secure
146 *    privilege level available. The exact meaning of this is architecture
147 *    dependent, as detailed below."
148 
149 *	"2.3.1 AArch64 Exception Levels
150 *    On AArch64 UEFI shall execute as 64-bit code at either EL1 or EL2,
151 *    depending on whether or not virtualization is available at OS load time."
152 */
153 	dprintf("Current Exception Level EL%1lx\n", arch_exception_level());
154 	dprintf("TTBR0: %" B_PRIx64 " TTBRx: %" B_PRIx64 " SCTLR: %" B_PRIx64 " TCR: %" B_PRIx64 "\n",
155 		arch_mmu_base_register(),
156 		arch_mmu_base_register(true),
157 		_arch_mmu_get_sctlr(),
158 		_arch_mmu_get_tcr());
159 
160 	if (arch_mmu_enabled()) {
161 		dprintf("MMU Enabled, Granularity %s, bits %d\n",
162 			granule_type_str(arch_mmu_user_granule()),
163 			arch_mmu_user_address_bits());
164 
165 		dprintf("Kernel entry accessibility W: %x R: %x\n",
166 			arch_mmu_write_access(kernelEntry),
167 			arch_mmu_read_access(kernelEntry));
168 
169 		arch_mmu_dump_present_tables();
170 	}
171 
172 	switch (arch_exception_level()) {
173 		case 1:
174 			/* arch_cache_disable(); */
175 			/* arch_mmu_generate_post_efi_page_tables */
176 
177 			break;
178 
179 		case 2:
180 
181 			el2toel1 = true; // we want to print before exit services
182 			break;
183 
184 		default:
185 			panic("Unexpected Exception Level\n");
186 			break;
187 	}
188 
189 
190 	// Attempt to fetch the memory map and exit boot services.
191 	// This needs to be done in a loop, as ExitBootServices can change the
192 	// memory map.
193 	// Even better: Only GetMemoryMap and ExitBootServices can be called after
194 	// the first call to ExitBootServices, as the firmware is permitted to
195 	// partially exit. This is why twice as much space was allocated for the
196 	// memory map, as it's impossible to allocate more now.
197 	// A changing memory map shouldn't affect the generated page tables, as
198 	// they only needed to know about the maximum address, not any specific
199 	// entry.
200 	dprintf("Calling ExitBootServices. So long, EFI!\n");
201 	while (true) {
202 		if (kBootServices->ExitBootServices(kImage, map_key) == EFI_SUCCESS) {
203 			// The console was provided by boot services, disable it.
204 			stdout = NULL;
205 			stderr = NULL;
206 			// Can we adjust gKernelArgs.platform_args.serial_base_ports[0]
207 			// to something fixed in qemu for debugging?
208 			break;
209 		}
210 
211 		memory_map_size = actual_memory_map_size;
212 		if (kBootServices->GetMemoryMap(&memory_map_size, memory_map, &map_key,
213 				&descriptor_size, &descriptor_version) != EFI_SUCCESS) {
214 			panic("Unable to fetch system memory map.");
215 		}
216 	}
217 
218 	// Update EFI, generate final kernel physical memory map, etc.
219 	//arch_mmu_post_efi_setup(memory_map_size, memory_map,
220 	//		descriptor_size, descriptor_version);
221 
222 	if (el2toel1) {
223 		arch_mmu_setup_EL1();
224 		arch_cache_disable();
225 
226 		_arch_transition_EL2_EL1();
227 
228 		arch_cache_enable();
229 	} else {
230 
231 		arch_cache_enable();
232 	}
233 
234 	//smp_boot_other_cpus(final_pml4, kernelEntry, (addr_t)&gKernelArgs);
235 
236 	if (arch_mmu_read_access(kernelEntry) && arch_mmu_read_access(gKernelArgs.cpu_kstack[0].start)) {
237 		// Enter the kernel!
238 		arch_enter_kernel(&gKernelArgs, kernelEntry,
239 			gKernelArgs.cpu_kstack[0].start + gKernelArgs.cpu_kstack[0].size - 8);
240 	} else {
241 		_arch_exception_panic("Kernel or Stack memory not accessible\n", __LINE__);
242 	}
243 }
244