xref: /haiku/src/system/boot/platform/efi/arch/arm/arch_mmu.cpp (revision 4c8e85b316c35a9161f5a1c50ad70bc91c83a76f)
1 /*
2  * Copyright 2019-2021 Haiku, Inc. All rights reserved.
3  * Released under the terms of the MIT License.
4  */
5 
6 
7 #include <algorithm>
8 
9 #include <arm_mmu.h>
10 #include <kernel.h>
11 #include <arch_kernel.h>
12 #include <boot/platform.h>
13 #include <boot/stage2.h>
14 #include <efi/types.h>
15 #include <efi/boot-services.h>
16 
17 #include "mmu.h"
18 #include "efi_platform.h"
19 
20 #define ALIGN_PAGEDIR (1024 * 16)
21 #define MAX_PAGE_TABLES 192
22 #define PAGE_TABLE_AREA_SIZE (MAX_PAGE_TABLES * ARM_MMU_L2_COARSE_TABLE_SIZE)
23 
24 static uint32_t *sPageDirectory = NULL;
25 static uint32_t *sFirstPageTable = NULL;
26 static uint32_t *sNextPageTable = NULL;
27 static uint32_t *sLastPageTable = NULL;
28 
29 
30 static void
31 dump_page_dir(void)
32 {
33 	dprintf("=== Page Directory ===\n");
34 	for (uint32_t i = 0; i < ARM_MMU_L1_TABLE_ENTRY_COUNT; i++) {
35 		uint32 directoryEntry = sPageDirectory[i];
36 		if (directoryEntry != 0) {
37 			dprintf("virt 0x%08x --> page table 0x%08x type 0x%08x\n",
38 				i << 20, directoryEntry & ARM_PDE_ADDRESS_MASK,
39 				directoryEntry & ARM_PDE_TYPE_MASK);
40 			uint32_t *pageTable = (uint32_t *)(directoryEntry & ARM_PDE_ADDRESS_MASK);
41 			for (uint32_t j = 0; j < ARM_MMU_L2_COARSE_ENTRY_COUNT; j++) {
42 				uint32 tableEntry = pageTable[j];
43 				if (tableEntry != 0) {
44 					dprintf("virt 0x%08x     --> page 0x%08x type+flags 0x%08x\n",
45 						(i << 20) | (j << 12),
46 						tableEntry & ARM_PTE_ADDRESS_MASK,
47 						tableEntry & (~ARM_PTE_ADDRESS_MASK));
48 				}
49 			}
50 		}
51 	}
52 }
53 
54 static uint32 *
55 get_next_page_table(void)
56 {
57 	uint32 *page_table = sNextPageTable;
58 	sNextPageTable += ARM_MMU_L2_COARSE_ENTRY_COUNT;
59 	if (sNextPageTable >= sLastPageTable)
60 		panic("ran out of page tables\n");
61 	return page_table;
62 }
63 
64 
65 static void
66 map_page(addr_t virt_addr, phys_addr_t phys_addr, uint32_t flags)
67 {
68 	phys_addr &= ~(B_PAGE_SIZE - 1);
69 
70 	uint32 *pageTable = NULL;
71 	uint32 pageDirectoryIndex = VADDR_TO_PDENT(virt_addr);
72 	uint32 pageDirectoryEntry = sPageDirectory[pageDirectoryIndex];
73 
74 	if (pageDirectoryEntry == 0) {
75 		pageTable = get_next_page_table();
76 		sPageDirectory[pageDirectoryIndex] = (uint32_t)pageTable | ARM_MMU_L1_TYPE_COARSE;
77 	} else {
78 		pageTable = (uint32 *)(pageDirectoryEntry & ARM_PDE_ADDRESS_MASK);
79 	}
80 
81 	uint32 pageTableIndex = VADDR_TO_PTENT(virt_addr);
82 	pageTable[pageTableIndex] = phys_addr | flags | ARM_MMU_L2_TYPE_SMALLNEW;
83 }
84 
85 
86 static void
87 map_range(addr_t virt_addr, phys_addr_t phys_addr, size_t size, uint32_t flags)
88 {
89 	//dprintf("map 0x%08x --> 0x%08x, len=0x%08x, flags=0x%08x\n",
90 	//	(uint32_t)virt_addr, (uint32_t)phys_addr, (uint32_t)size, flags);
91 
92 	for (addr_t offset = 0; offset < size; offset += B_PAGE_SIZE) {
93 		map_page(virt_addr + offset, phys_addr + offset, flags);
94 	}
95 
96 	ASSERT_ALWAYS(insert_virtual_allocated_range(virt_addr, size) >= B_OK);
97 }
98 
99 
100 static void
101 map_range_to_new_area(addr_range& range, uint32_t flags)
102 {
103 	if (range.size == 0) {
104 		range.start = 0;
105 		return;
106 	}
107 
108 	phys_addr_t phys_addr = range.start;
109 	addr_t virt_addr = get_next_virtual_address(range.size);
110 
111 	map_range(virt_addr, phys_addr, range.size, flags);
112 
113 	if (gKernelArgs.arch_args.num_virtual_ranges_to_keep
114 		>= MAX_VIRTUAL_RANGES_TO_KEEP)
115 		panic("too many virtual ranges to keep");
116 
117 	range.start = virt_addr;
118 
119 	gKernelArgs.arch_args.virtual_ranges_to_keep[
120 		gKernelArgs.arch_args.num_virtual_ranges_to_keep++] = range;
121 }
122 
123 
124 static void
125 build_physical_memory_list(size_t memory_map_size,
126 	efi_memory_descriptor *memory_map, size_t descriptor_size,
127 	uint32_t descriptor_version)
128 {
129 	addr_t addr = (addr_t)memory_map;
130 
131 	gKernelArgs.num_physical_memory_ranges = 0;
132 
133 	// First scan: Add all usable ranges
134 	for (size_t i = 0; i < memory_map_size / descriptor_size; ++i) {
135 		efi_memory_descriptor* entry = (efi_memory_descriptor *)(addr + i * descriptor_size);
136 		switch (entry->Type) {
137 		case EfiLoaderCode:
138 		case EfiLoaderData:
139 			entry->VirtualStart = entry->PhysicalStart;
140 			break;
141 		case EfiBootServicesCode:
142 		case EfiBootServicesData:
143 		case EfiConventionalMemory: {
144 			// Usable memory.
145 			uint64_t base = entry->PhysicalStart;
146 			uint64_t size = entry->NumberOfPages * B_PAGE_SIZE;
147 			insert_physical_memory_range(base, size);
148 			break;
149 		}
150 		case EfiACPIReclaimMemory:
151 			// ACPI reclaim -- physical memory we could actually use later
152 			break;
153 		case EfiRuntimeServicesCode:
154 		case EfiRuntimeServicesData:
155 			entry->VirtualStart = entry->PhysicalStart;
156 			break;
157 		case EfiMemoryMappedIO:
158 			entry->VirtualStart = entry->PhysicalStart;
159 			break;
160 		}
161 	}
162 
163 	uint64_t initialPhysicalMemory = total_physical_memory();
164 
165 	// Second scan: Remove everything reserved that may overlap
166 	for (size_t i = 0; i < memory_map_size / descriptor_size; ++i) {
167 		efi_memory_descriptor* entry = (efi_memory_descriptor *)(addr + i * descriptor_size);
168 		switch (entry->Type) {
169 		case EfiLoaderCode:
170 		case EfiLoaderData:
171 		case EfiBootServicesCode:
172 		case EfiBootServicesData:
173 		case EfiConventionalMemory:
174 			break;
175 		default:
176 			uint64_t base = entry->PhysicalStart;
177 			uint64_t size = entry->NumberOfPages * B_PAGE_SIZE;
178 			remove_physical_memory_range(base, size);
179 		}
180 	}
181 
182 	gKernelArgs.ignored_physical_memory
183 		+= initialPhysicalMemory - total_physical_memory();
184 
185 	sort_address_ranges(gKernelArgs.physical_memory_range,
186 		gKernelArgs.num_physical_memory_ranges);
187 }
188 
189 
190 static void
191 build_physical_allocated_list(size_t memory_map_size,
192 	efi_memory_descriptor *memory_map, size_t descriptor_size,
193 	uint32_t descriptor_version)
194 {
195 	addr_t addr = (addr_t)memory_map;
196 	for (size_t i = 0; i < memory_map_size / descriptor_size; ++i) {
197 		efi_memory_descriptor* entry = (efi_memory_descriptor *)(addr + i * descriptor_size);
198 		switch (entry->Type) {
199 		case EfiLoaderData: {
200 			uint64_t base = entry->PhysicalStart;
201 			uint64_t size = entry->NumberOfPages * B_PAGE_SIZE;
202 			insert_physical_allocated_range(base, size);
203 			break;
204 		}
205 		default:
206 			;
207 		}
208 	}
209 
210 	sort_address_ranges(gKernelArgs.physical_allocated_range,
211 		gKernelArgs.num_physical_allocated_ranges);
212 }
213 
214 
215 void
216 arch_mmu_init()
217 {
218 }
219 
220 
221 void
222 arch_mmu_post_efi_setup(size_t memory_map_size,
223 	efi_memory_descriptor *memory_map, size_t descriptor_size,
224 	uint32_t descriptor_version)
225 {
226 	build_physical_allocated_list(memory_map_size, memory_map,
227 		descriptor_size, descriptor_version);
228 
229 	// Switch EFI to virtual mode, using the kernel pmap.
230 	// Something involving ConvertPointer might need to be done after this?
231 	// http://wiki.phoenix.com/wiki/index.php/EFI_RUNTIME_SERVICES
232 	kRuntimeServices->SetVirtualAddressMap(memory_map_size, descriptor_size,
233 		descriptor_version, memory_map);
234 
235 	dprintf("phys memory ranges:\n");
236 	for (uint32_t i = 0; i < gKernelArgs.num_physical_memory_ranges; i++) {
237 		uint32_t start = (uint32_t)gKernelArgs.physical_memory_range[i].start;
238 		uint32_t size = (uint32_t)gKernelArgs.physical_memory_range[i].size;
239 		dprintf("    0x%08x-0x%08x, length 0x%08x\n",
240 			start, start + size, size);
241 	}
242 
243 	dprintf("allocated phys memory ranges:\n");
244 	for (uint32_t i = 0; i < gKernelArgs.num_physical_allocated_ranges; i++) {
245 		uint32_t start = (uint32_t)gKernelArgs.physical_allocated_range[i].start;
246 		uint32_t size = (uint32_t)gKernelArgs.physical_allocated_range[i].size;
247 		dprintf("    0x%08x-0x%08x, length 0x%08x\n",
248 			start, start + size, size);
249 	}
250 
251 	dprintf("allocated virt memory ranges:\n");
252 	for (uint32_t i = 0; i < gKernelArgs.num_virtual_allocated_ranges; i++) {
253 		uint32_t start = (uint32_t)gKernelArgs.virtual_allocated_range[i].start;
254 		uint32_t size = (uint32_t)gKernelArgs.virtual_allocated_range[i].size;
255 		dprintf("    0x%08x-0x%08x, length 0x%08x\n",
256 			start, start + size, size);
257 	}
258 
259 	dprintf("virt memory ranges to keep:\n");
260 	for (uint32_t i = 0; i < gKernelArgs.arch_args.num_virtual_ranges_to_keep; i++) {
261 		uint32_t start = (uint32_t)gKernelArgs.arch_args.virtual_ranges_to_keep[i].start;
262 		uint32_t size = (uint32_t)gKernelArgs.arch_args.virtual_ranges_to_keep[i].size;
263 		dprintf("    0x%08x-0x%08x, length 0x%08x\n",
264 			start, start + size, size);
265 	}
266 }
267 
268 
269 static void
270 arch_mmu_allocate_page_tables(void)
271 {
272 	if (platform_allocate_region((void **)&sPageDirectory,
273 		ARM_MMU_L1_TABLE_SIZE + ALIGN_PAGEDIR + PAGE_TABLE_AREA_SIZE, 0, false) != B_OK)
274 		panic("Failed to allocate page directory.");
275 	sPageDirectory = (uint32 *)ROUNDUP((uint32)sPageDirectory, ALIGN_PAGEDIR);
276 	memset(sPageDirectory, 0, ARM_MMU_L1_TABLE_SIZE);
277 
278 	sFirstPageTable = (uint32*)((uint32)sPageDirectory + ARM_MMU_L1_TABLE_SIZE);
279 	sNextPageTable = sFirstPageTable;
280 	sLastPageTable = (uint32*)((uint32)sFirstPageTable + PAGE_TABLE_AREA_SIZE);
281 
282 	memset(sFirstPageTable, 0, PAGE_TABLE_AREA_SIZE);
283 
284 	dprintf("sPageDirectory  = 0x%08x\n", (uint32)sPageDirectory);
285 	dprintf("sFirstPageTable = 0x%08x\n", (uint32)sFirstPageTable);
286 	dprintf("sLastPageTable  = 0x%08x\n", (uint32)sLastPageTable);
287 }
288 
289 uint32_t
290 arch_mmu_generate_post_efi_page_tables(size_t memory_map_size,
291 	efi_memory_descriptor *memory_map, size_t descriptor_size,
292 	uint32_t descriptor_version)
293 {
294 	addr_t memory_map_addr = (addr_t)memory_map;
295 
296 	arch_mmu_allocate_page_tables();
297 
298 	build_physical_memory_list(memory_map_size, memory_map,
299 		descriptor_size, descriptor_version);
300 
301 	for (size_t i = 0; i < memory_map_size / descriptor_size; ++i) {
302 		efi_memory_descriptor* entry =
303 			(efi_memory_descriptor *)(memory_map_addr + i * descriptor_size);
304 		switch (entry->Type) {
305 		case EfiLoaderCode:
306 		case EfiLoaderData:
307 			map_range(entry->VirtualStart, entry->PhysicalStart,
308 				entry->NumberOfPages * B_PAGE_SIZE,
309 				ARM_MMU_L2_FLAG_B | ARM_MMU_L2_FLAG_C | ARM_MMU_L2_FLAG_AP_RW);
310 			break;
311 		default:
312 			;
313 		}
314 	}
315 
316 	for (size_t i = 0; i < memory_map_size / descriptor_size; ++i) {
317 		efi_memory_descriptor* entry =
318 			(efi_memory_descriptor *)(memory_map_addr + i * descriptor_size);
319 		if ((entry->Attribute & EFI_MEMORY_RUNTIME) != 0)
320 			map_range(entry->VirtualStart, entry->PhysicalStart,
321 				entry->NumberOfPages * B_PAGE_SIZE,
322 				ARM_MMU_L2_FLAG_B | ARM_MMU_L2_FLAG_C | ARM_MMU_L2_FLAG_AP_RW);
323 	}
324 
325 	void* cookie = NULL;
326 	addr_t vaddr;
327 	phys_addr_t paddr;
328 	size_t size;
329 	while (mmu_next_region(&cookie, &vaddr, &paddr, &size)) {
330 		map_range(vaddr, paddr, size,
331 			ARM_MMU_L2_FLAG_B | ARM_MMU_L2_FLAG_C | ARM_MMU_L2_FLAG_AP_RW);
332 	}
333 
334 	map_range_to_new_area(gKernelArgs.arch_args.uart.regs, ARM_MMU_L2_FLAG_B);
335 
336 	// identity mapping for page table area
337 	uint32_t page_table_area = (uint32_t)sFirstPageTable;
338 	map_range(page_table_area, page_table_area, PAGE_TABLE_AREA_SIZE,
339 		ARM_MMU_L2_FLAG_B | ARM_MMU_L2_FLAG_C | ARM_MMU_L2_FLAG_AP_RW);
340 
341 	sort_address_ranges(gKernelArgs.virtual_allocated_range,
342 		gKernelArgs.num_virtual_allocated_ranges);
343 
344 	addr_t vir_pgdir;
345 	platform_bootloader_address_to_kernel_address((void*)sPageDirectory, &vir_pgdir);
346 
347 	gKernelArgs.arch_args.phys_pgdir = (uint32)sPageDirectory;
348 	gKernelArgs.arch_args.vir_pgdir = (uint32)vir_pgdir;
349 	gKernelArgs.arch_args.next_pagetable = (uint32)(sNextPageTable) - (uint32)sPageDirectory;
350 
351 	dprintf("gKernelArgs.arch_args.phys_pgdir     = 0x%08x\n",
352 		(uint32_t)gKernelArgs.arch_args.phys_pgdir);
353 	dprintf("gKernelArgs.arch_args.vir_pgdir      = 0x%08x\n",
354 		(uint32_t)gKernelArgs.arch_args.vir_pgdir);
355 	dprintf("gKernelArgs.arch_args.next_pagetable = 0x%08x\n",
356 		(uint32_t)gKernelArgs.arch_args.next_pagetable);
357 
358 	//dump_page_dir();
359 
360 	return (uint32_t)sPageDirectory;
361 }
362