xref: /haiku/src/system/boot/platform/efi/arch/arm/arch_mmu.cpp (revision a5c0d1a80e18f50987966fda2005210092d7671b)
1 /*
2  * Copyright 2019-2022 Haiku, Inc. All rights reserved.
3  * Released under the terms of the MIT License.
4  */
5 
6 
7 #include <algorithm>
8 
9 #include <arm_mmu.h>
10 #include <kernel.h>
11 #include <arch_kernel.h>
12 #include <boot/platform.h>
13 #include <boot/stage2.h>
14 #include <efi/types.h>
15 #include <efi/boot-services.h>
16 
17 #include "mmu.h"
18 #include "efi_platform.h"
19 
20 
21 //#define TRACE_MMU
22 #ifdef TRACE_MMU
23 #	define TRACE(x...) dprintf(x)
24 #else
25 #	define TRACE(x...) ;
26 #endif
27 
28 
29 //#define TRACE_MEMORY_MAP
30 //#define TRACE_PAGE_DIRECTORY
31 
32 #define ALIGN_PAGEDIR			(1024 * 16)
33 #define MAX_PAGE_TABLES			192
34 #define PAGE_TABLE_AREA_SIZE	(MAX_PAGE_TABLES * ARM_MMU_L2_COARSE_TABLE_SIZE)
35 
36 static uint32_t *sPageDirectory = NULL;
37 static uint32_t *sNextPageTable = NULL;
38 static uint32_t *sLastPageTable = NULL;
39 
40 
41 #ifdef TRACE_PAGE_DIRECTORY
42 static void
43 dump_page_dir(void)
44 {
45 	dprintf("=== Page Directory ===\n");
46 	for (uint32_t i = 0; i < ARM_MMU_L1_TABLE_ENTRY_COUNT; i++) {
47 		uint32 directoryEntry = sPageDirectory[i];
48 		if (directoryEntry != 0) {
49 			dprintf("virt 0x%08x --> page table 0x%08x type 0x%08x\n",
50 				i << 20, directoryEntry & ARM_PDE_ADDRESS_MASK,
51 				directoryEntry & ARM_PDE_TYPE_MASK);
52 			uint32_t *pageTable = (uint32_t *)(directoryEntry & ARM_PDE_ADDRESS_MASK);
53 			for (uint32_t j = 0; j < ARM_MMU_L2_COARSE_ENTRY_COUNT; j++) {
54 				uint32 tableEntry = pageTable[j];
55 				if (tableEntry != 0) {
56 					dprintf("virt 0x%08x     --> page 0x%08x type+flags 0x%08x\n",
57 						(i << 20) | (j << 12),
58 						tableEntry & ARM_PTE_ADDRESS_MASK,
59 						tableEntry & (~ARM_PTE_ADDRESS_MASK));
60 				}
61 			}
62 		}
63 	}
64 }
65 #endif
66 
67 
68 static uint32 *
69 get_next_page_table(void)
70 {
71 	uint32 *pageTable = sNextPageTable;
72 	sNextPageTable += ARM_MMU_L2_COARSE_ENTRY_COUNT;
73 	if (sNextPageTable >= sLastPageTable)
74 		panic("ran out of page tables\n");
75 	return pageTable;
76 }
77 
78 
79 static void
80 map_page(addr_t virtAddr, phys_addr_t physAddr, uint32_t flags)
81 {
82 	physAddr &= ~(B_PAGE_SIZE - 1);
83 
84 	uint32 *pageTable = NULL;
85 	uint32 pageDirectoryIndex = VADDR_TO_PDENT(virtAddr);
86 	uint32 pageDirectoryEntry = sPageDirectory[pageDirectoryIndex];
87 
88 	if (pageDirectoryEntry == 0) {
89 		pageTable = get_next_page_table();
90 		sPageDirectory[pageDirectoryIndex] = (uint32_t)pageTable | ARM_MMU_L1_TYPE_COARSE;
91 	} else {
92 		pageTable = (uint32 *)(pageDirectoryEntry & ARM_PDE_ADDRESS_MASK);
93 	}
94 
95 	uint32 pageTableIndex = VADDR_TO_PTENT(virtAddr);
96 	pageTable[pageTableIndex] = physAddr | flags | ARM_MMU_L2_TYPE_SMALLNEW;
97 }
98 
99 
100 static void
101 map_range(addr_t virtAddr, phys_addr_t physAddr, size_t size, uint32_t flags)
102 {
103 	//TRACE("map 0x%08" B_PRIxADDR " --> 0x%08" B_PRIxPHYSADDR
104 	//	", len=0x%08" B_PRIxSIZE ", flags=0x%08" PRIx32 "\n",
105 	//	virtAddr, physAddr, size, flags);
106 
107 	for (addr_t offset = 0; offset < size; offset += B_PAGE_SIZE) {
108 		map_page(virtAddr + offset, physAddr + offset, flags);
109 	}
110 
111 	if (virtAddr >= KERNEL_LOAD_BASE)
112 		ASSERT_ALWAYS(insert_virtual_allocated_range(virtAddr, size) >= B_OK);
113 }
114 
115 
116 static void
117 insert_virtual_range_to_keep(uint64 start, uint64 size)
118 {
119 	status_t status = insert_address_range(
120 		gKernelArgs.arch_args.virtual_ranges_to_keep,
121 		&gKernelArgs.arch_args.num_virtual_ranges_to_keep,
122 		MAX_VIRTUAL_RANGES_TO_KEEP, start, size);
123 
124 	if (status == B_ENTRY_NOT_FOUND)
125 		panic("too many virtual ranges to keep");
126 	else if (status != B_OK)
127 		panic("failed to add virtual range to keep");
128 }
129 
130 
131 static addr_t
132 map_range_to_new_area(addr_t start, size_t size, uint32_t flags)
133 {
134 	if (size == 0)
135 		return 0;
136 
137 	phys_addr_t physAddr = ROUNDDOWN(start, B_PAGE_SIZE);
138 	size_t alignedSize = ROUNDUP(size + (start - physAddr), B_PAGE_SIZE);
139 	addr_t virtAddr = get_next_virtual_address(alignedSize);
140 
141 	map_range(virtAddr, physAddr, alignedSize, flags);
142 	insert_virtual_range_to_keep(virtAddr, alignedSize);
143 
144 	return virtAddr + (start - physAddr);
145 }
146 
147 
148 static void
149 map_range_to_new_area(addr_range& range, uint32_t flags)
150 {
151 	range.start = map_range_to_new_area(range.start, range.size, flags);
152 }
153 
154 
155 static void
156 map_range_to_new_area(efi_memory_descriptor *entry, uint32_t flags)
157 {
158 	uint64_t size = entry->NumberOfPages * B_PAGE_SIZE;
159 	entry->VirtualStart = map_range_to_new_area(entry->PhysicalStart, size, flags);
160 }
161 
162 
163 static void
164 build_physical_memory_list(size_t memoryMapSize,
165 	efi_memory_descriptor *memoryMap, size_t descriptorSize,
166 	uint32_t descriptorVersion)
167 {
168 	addr_t addr = (addr_t)memoryMap;
169 
170 	gKernelArgs.num_physical_memory_ranges = 0;
171 
172 	// First scan: Add all usable ranges
173 	for (size_t i = 0; i < memoryMapSize / descriptorSize; ++i) {
174 		efi_memory_descriptor* entry = (efi_memory_descriptor *)(addr + i * descriptorSize);
175 		switch (entry->Type) {
176 			case EfiLoaderCode:
177 			case EfiLoaderData:
178 			case EfiBootServicesCode:
179 			case EfiBootServicesData:
180 			case EfiConventionalMemory: {
181 				// Usable memory.
182 				uint64_t base = entry->PhysicalStart;
183 				uint64_t size = entry->NumberOfPages * B_PAGE_SIZE;
184 				insert_physical_memory_range(base, size);
185 				break;
186 			}
187 			default:
188 				break;
189 		}
190 	}
191 
192 	uint64_t initialPhysicalMemory = total_physical_memory();
193 
194 	// Second scan: Remove everything reserved that may overlap
195 	for (size_t i = 0; i < memoryMapSize / descriptorSize; ++i) {
196 		efi_memory_descriptor* entry = (efi_memory_descriptor *)(addr + i * descriptorSize);
197 		switch (entry->Type) {
198 			case EfiLoaderCode:
199 			case EfiLoaderData:
200 			case EfiBootServicesCode:
201 			case EfiBootServicesData:
202 			case EfiConventionalMemory:
203 				break;
204 			default:
205 				uint64_t base = entry->PhysicalStart;
206 				uint64_t size = entry->NumberOfPages * B_PAGE_SIZE;
207 				remove_physical_memory_range(base, size);
208 		}
209 	}
210 
211 	gKernelArgs.ignored_physical_memory
212 		+= initialPhysicalMemory - total_physical_memory();
213 
214 	sort_address_ranges(gKernelArgs.physical_memory_range,
215 		gKernelArgs.num_physical_memory_ranges);
216 }
217 
218 
219 static void
220 build_physical_allocated_list(size_t memoryMapSize,
221 	efi_memory_descriptor *memoryMap, size_t descriptorSize,
222 	uint32_t descriptorVersion)
223 {
224 	addr_t addr = (addr_t)memoryMap;
225 	for (size_t i = 0; i < memoryMapSize / descriptorSize; ++i) {
226 		efi_memory_descriptor* entry = (efi_memory_descriptor *)(addr + i * descriptorSize);
227 		switch (entry->Type) {
228 			case EfiLoaderData: {
229 				uint64_t base = entry->PhysicalStart;
230 				uint64_t size = entry->NumberOfPages * B_PAGE_SIZE;
231 				insert_physical_allocated_range(base, size);
232 				break;
233 			}
234 			default:
235 				;
236 		}
237 	}
238 
239 	sort_address_ranges(gKernelArgs.physical_allocated_range,
240 		gKernelArgs.num_physical_allocated_ranges);
241 }
242 
243 
244 void
245 arch_mmu_post_efi_setup(size_t memoryMapSize,
246 	efi_memory_descriptor *memoryMap, size_t descriptorSize,
247 	uint32_t descriptorVersion)
248 {
249 	build_physical_allocated_list(memoryMapSize, memoryMap,
250 		descriptorSize, descriptorVersion);
251 
252 	// Switch EFI to virtual mode, using the kernel pmap.
253 	kRuntimeServices->SetVirtualAddressMap(memoryMapSize, descriptorSize,
254 		descriptorVersion, memoryMap);
255 
256 #ifdef TRACE_MEMORY_MAP
257 	dprintf("phys memory ranges:\n");
258 	for (uint32_t i = 0; i < gKernelArgs.num_physical_memory_ranges; i++) {
259 		uint64 start = gKernelArgs.physical_memory_range[i].start;
260 		uint64 size = gKernelArgs.physical_memory_range[i].size;
261 		dprintf("    0x%08" B_PRIx64 "-0x%08" B_PRIx64 ", length 0x%08" B_PRIx64 "\n",
262 			start, start + size, size);
263 	}
264 
265 	dprintf("allocated phys memory ranges:\n");
266 	for (uint32_t i = 0; i < gKernelArgs.num_physical_allocated_ranges; i++) {
267 		uint64 start = gKernelArgs.physical_allocated_range[i].start;
268 		uint64 size = gKernelArgs.physical_allocated_range[i].size;
269 		dprintf("    0x%08" B_PRIx64 "-0x%08" B_PRIx64 ", length 0x%08" B_PRIx64 "\n",
270 			start, start + size, size);
271 	}
272 
273 	dprintf("allocated virt memory ranges:\n");
274 	for (uint32_t i = 0; i < gKernelArgs.num_virtual_allocated_ranges; i++) {
275 		uint64 start = gKernelArgs.virtual_allocated_range[i].start;
276 		uint64 size = gKernelArgs.virtual_allocated_range[i].size;
277 		dprintf("    0x%08" B_PRIx64 "-0x%08" B_PRIx64 ", length 0x%08" B_PRIx64 "\n",
278 			start, start + size, size);
279 	}
280 
281 	dprintf("virt memory ranges to keep:\n");
282 	for (uint32_t i = 0; i < gKernelArgs.arch_args.num_virtual_ranges_to_keep; i++) {
283 		uint32 start = gKernelArgs.arch_args.virtual_ranges_to_keep[i].start;
284 		uint32 size = gKernelArgs.arch_args.virtual_ranges_to_keep[i].size;
285 		dprintf("    0x%08" B_PRIx32 "-0x%08" B_PRIx32 ", length 0x%08" B_PRIx32 "\n",
286 			start, start + size, size);
287 	}
288 #endif
289 }
290 
291 
292 static void
293 arch_mmu_allocate_page_tables(void)
294 {
295 	if (platform_allocate_region((void **)&sPageDirectory,
296 		ARM_MMU_L1_TABLE_SIZE + ALIGN_PAGEDIR + PAGE_TABLE_AREA_SIZE, 0, false) != B_OK)
297 		panic("Failed to allocate page directory.");
298 	sPageDirectory = (uint32 *)ROUNDUP((uint32)sPageDirectory, ALIGN_PAGEDIR);
299 	memset(sPageDirectory, 0, ARM_MMU_L1_TABLE_SIZE);
300 
301 	sNextPageTable = (uint32*)((uint32)sPageDirectory + ARM_MMU_L1_TABLE_SIZE);
302 	sLastPageTable = (uint32*)((uint32)sNextPageTable + PAGE_TABLE_AREA_SIZE);
303 
304 	memset(sNextPageTable, 0, PAGE_TABLE_AREA_SIZE);
305 
306 	TRACE("sPageDirectory = 0x%08x\n", (uint32)sPageDirectory);
307 	TRACE("sNextPageTable = 0x%08x\n", (uint32)sNextPageTable);
308 	TRACE("sLastPageTable = 0x%08x\n", (uint32)sLastPageTable);
309 }
310 
311 
312 uint32_t
313 arch_mmu_generate_post_efi_page_tables(size_t memoryMapSize,
314 	efi_memory_descriptor *memoryMap, size_t descriptorSize,
315 	uint32_t descriptorVersion)
316 {
317 	arch_mmu_allocate_page_tables();
318 
319 	build_physical_memory_list(memoryMapSize, memoryMap,
320 		descriptorSize, descriptorVersion);
321 
322 	addr_t memoryMapAddr = (addr_t)memoryMap;
323 	for (size_t i = 0; i < memoryMapSize / descriptorSize; ++i) {
324 		efi_memory_descriptor* entry =
325 			(efi_memory_descriptor *)(memoryMapAddr + i * descriptorSize);
326 		if ((entry->Attribute & EFI_MEMORY_RUNTIME) != 0) {
327 			map_range_to_new_area(entry,
328 				ARM_MMU_L2_FLAG_B | ARM_MMU_L2_FLAG_C | ARM_MMU_L2_FLAG_AP_KRW);
329 		}
330 	}
331 
332 	void* cookie = NULL;
333 	addr_t vaddr;
334 	phys_addr_t paddr;
335 	size_t size;
336 	while (mmu_next_region(&cookie, &vaddr, &paddr, &size)) {
337 		map_range(vaddr, paddr, size,
338 			ARM_MMU_L2_FLAG_B | ARM_MMU_L2_FLAG_C | ARM_MMU_L2_FLAG_AP_KRW);
339 	}
340 
341 	map_range_to_new_area(gKernelArgs.arch_args.uart.regs,
342 		ARM_MMU_L2_FLAG_B | ARM_MMU_L2_FLAG_AP_KRW | ARM_MMU_L2_FLAG_XN);
343 
344 	sort_address_ranges(gKernelArgs.virtual_allocated_range,
345 		gKernelArgs.num_virtual_allocated_ranges);
346 
347 	addr_t virtPageDirectory;
348 	platform_bootloader_address_to_kernel_address((void*)sPageDirectory, &virtPageDirectory);
349 
350 	gKernelArgs.arch_args.phys_pgdir = (uint32)sPageDirectory;
351 	gKernelArgs.arch_args.vir_pgdir = (uint32)virtPageDirectory;
352 	gKernelArgs.arch_args.next_pagetable = (uint32)(sNextPageTable) - (uint32)sPageDirectory;
353 	gKernelArgs.arch_args.last_pagetable = (uint32)(sLastPageTable) - (uint32)sPageDirectory;
354 
355 	TRACE("gKernelArgs.arch_args.phys_pgdir     = 0x%08x\n",
356 		(uint32_t)gKernelArgs.arch_args.phys_pgdir);
357 	TRACE("gKernelArgs.arch_args.vir_pgdir      = 0x%08x\n",
358 		(uint32_t)gKernelArgs.arch_args.vir_pgdir);
359 	TRACE("gKernelArgs.arch_args.next_pagetable = 0x%08x\n",
360 		(uint32_t)gKernelArgs.arch_args.next_pagetable);
361 	TRACE("gKernelArgs.arch_args.last_pagetable = 0x%08x\n",
362 		(uint32_t)gKernelArgs.arch_args.last_pagetable);
363 
364 #ifdef TRACE_PAGE_DIRECTORY
365 	dump_page_dir();
366 #endif
367 
368 	return (uint32_t)sPageDirectory;
369 }
370 
371 
372 void
373 arch_mmu_init()
374 {
375 	// empty
376 }
377