xref: /haiku/src/system/boot/platform/efi/arch/arm/arch_mmu.cpp (revision 9e25244c5e9051f6cd333820d6332397361abd6c)
1 /*
2  * Copyright 2019-2022 Haiku, Inc. All rights reserved.
3  * Released under the terms of the MIT License.
4  */
5 
6 
7 #include <algorithm>
8 
9 #include <arm_mmu.h>
10 #include <kernel.h>
11 #include <arch_kernel.h>
12 #include <boot/platform.h>
13 #include <boot/stage2.h>
14 #include <efi/types.h>
15 #include <efi/boot-services.h>
16 
17 #include "mmu.h"
18 #include "efi_platform.h"
19 
20 
21 //#define TRACE_MMU
22 #ifdef TRACE_MMU
23 #	define TRACE(x...) dprintf(x)
24 #else
25 #	define TRACE(x...) ;
26 #endif
27 
28 
29 //#define TRACE_MEMORY_MAP
30 //#define TRACE_PAGE_DIRECTORY
31 
32 #define ALIGN_PAGEDIR			(1024 * 16)
33 #define MAX_PAGE_TABLES			192
34 #define PAGE_TABLE_AREA_SIZE	(MAX_PAGE_TABLES * ARM_MMU_L2_COARSE_TABLE_SIZE)
35 
36 static uint32_t *sPageDirectory = NULL;
37 static uint32_t *sNextPageTable = NULL;
38 static uint32_t *sLastPageTable = NULL;
39 
40 
41 #ifdef TRACE_PAGE_DIRECTORY
42 static void
43 dump_page_dir(void)
44 {
45 	dprintf("=== Page Directory ===\n");
46 	for (uint32_t i = 0; i < ARM_MMU_L1_TABLE_ENTRY_COUNT; i++) {
47 		uint32 directoryEntry = sPageDirectory[i];
48 		if (directoryEntry != 0) {
49 			dprintf("virt 0x%08x --> page table 0x%08x type 0x%08x\n",
50 				i << 20, directoryEntry & ARM_PDE_ADDRESS_MASK,
51 				directoryEntry & ARM_PDE_TYPE_MASK);
52 			uint32_t *pageTable = (uint32_t *)(directoryEntry & ARM_PDE_ADDRESS_MASK);
53 			for (uint32_t j = 0; j < ARM_MMU_L2_COARSE_ENTRY_COUNT; j++) {
54 				uint32 tableEntry = pageTable[j];
55 				if (tableEntry != 0) {
56 					dprintf("virt 0x%08x     --> page 0x%08x type+flags 0x%08x\n",
57 						(i << 20) | (j << 12),
58 						tableEntry & ARM_PTE_ADDRESS_MASK,
59 						tableEntry & (~ARM_PTE_ADDRESS_MASK));
60 				}
61 			}
62 		}
63 	}
64 }
65 #endif
66 
67 
68 static uint32 *
69 get_next_page_table(void)
70 {
71 	uint32 *pageTable = sNextPageTable;
72 	sNextPageTable += ARM_MMU_L2_COARSE_ENTRY_COUNT;
73 	if (sNextPageTable >= sLastPageTable)
74 		panic("ran out of page tables\n");
75 	return pageTable;
76 }
77 
78 
79 static void
80 map_page(addr_t virtAddr, phys_addr_t physAddr, uint32_t flags)
81 {
82 	physAddr &= ~(B_PAGE_SIZE - 1);
83 
84 	uint32 *pageTable = NULL;
85 	uint32 pageDirectoryIndex = VADDR_TO_PDENT(virtAddr);
86 	uint32 pageDirectoryEntry = sPageDirectory[pageDirectoryIndex];
87 
88 	if (pageDirectoryEntry == 0) {
89 		pageTable = get_next_page_table();
90 		sPageDirectory[pageDirectoryIndex] = (uint32_t)pageTable | ARM_MMU_L1_TYPE_COARSE;
91 	} else {
92 		pageTable = (uint32 *)(pageDirectoryEntry & ARM_PDE_ADDRESS_MASK);
93 	}
94 
95 	uint32 pageTableIndex = VADDR_TO_PTENT(virtAddr);
96 	pageTable[pageTableIndex] = physAddr | flags | ARM_MMU_L2_TYPE_SMALLNEW;
97 }
98 
99 
100 static void
101 map_range(addr_t virtAddr, phys_addr_t physAddr, size_t size, uint32_t flags)
102 {
103 	//TRACE("map 0x%08" B_PRIxADDR " --> 0x%08" B_PRIxPHYSADDR
104 	//	", len=0x%08" B_PRIxSIZE ", flags=0x%08" PRIx32 "\n",
105 	//	virtAddr, physAddr, size, flags);
106 
107 	for (addr_t offset = 0; offset < size; offset += B_PAGE_SIZE) {
108 		map_page(virtAddr + offset, physAddr + offset, flags);
109 	}
110 
111 	if (virtAddr >= KERNEL_LOAD_BASE)
112 		ASSERT_ALWAYS(insert_virtual_allocated_range(virtAddr, size) >= B_OK);
113 }
114 
115 
116 static void
117 insert_virtual_range_to_keep(uint64 start, uint64 size)
118 {
119 	status_t status = insert_address_range(
120 		gKernelArgs.arch_args.virtual_ranges_to_keep,
121 		&gKernelArgs.arch_args.num_virtual_ranges_to_keep,
122 		MAX_VIRTUAL_RANGES_TO_KEEP, start, size);
123 
124 	if (status == B_ENTRY_NOT_FOUND)
125 		panic("too many virtual ranges to keep");
126 	else if (status != B_OK)
127 		panic("failed to add virtual range to keep");
128 }
129 
130 
131 static void
132 map_range_to_new_area(addr_range& range, uint32_t flags)
133 {
134 	if (range.size == 0) {
135 		range.start = 0;
136 		return;
137 	}
138 
139 	phys_addr_t physAddr = range.start;
140 	addr_t virtAddr = get_next_virtual_address(range.size);
141 
142 	map_range(virtAddr, physAddr, range.size, flags);
143 
144 	range.start = virtAddr;
145 
146 	insert_virtual_range_to_keep(range.start, range.size);
147 }
148 
149 
150 static void
151 map_range_to_new_area(efi_memory_descriptor *entry, uint32_t flags)
152 {
153 	uint64_t size = entry->NumberOfPages * B_PAGE_SIZE;
154 	entry->VirtualStart = get_next_virtual_address(size);
155 	map_range(entry->VirtualStart, entry->PhysicalStart, size, flags);
156 	insert_virtual_range_to_keep(entry->VirtualStart, size);
157 }
158 
159 
160 static void
161 build_physical_memory_list(size_t memoryMapSize,
162 	efi_memory_descriptor *memoryMap, size_t descriptorSize,
163 	uint32_t descriptorVersion)
164 {
165 	addr_t addr = (addr_t)memoryMap;
166 
167 	gKernelArgs.num_physical_memory_ranges = 0;
168 
169 	// First scan: Add all usable ranges
170 	for (size_t i = 0; i < memoryMapSize / descriptorSize; ++i) {
171 		efi_memory_descriptor* entry = (efi_memory_descriptor *)(addr + i * descriptorSize);
172 		switch (entry->Type) {
173 			case EfiLoaderCode:
174 			case EfiLoaderData:
175 			case EfiBootServicesCode:
176 			case EfiBootServicesData:
177 			case EfiConventionalMemory: {
178 				// Usable memory.
179 				uint64_t base = entry->PhysicalStart;
180 				uint64_t size = entry->NumberOfPages * B_PAGE_SIZE;
181 				insert_physical_memory_range(base, size);
182 				break;
183 			}
184 			default:
185 				break;
186 		}
187 	}
188 
189 	uint64_t initialPhysicalMemory = total_physical_memory();
190 
191 	// Second scan: Remove everything reserved that may overlap
192 	for (size_t i = 0; i < memoryMapSize / descriptorSize; ++i) {
193 		efi_memory_descriptor* entry = (efi_memory_descriptor *)(addr + i * descriptorSize);
194 		switch (entry->Type) {
195 			case EfiLoaderCode:
196 			case EfiLoaderData:
197 			case EfiBootServicesCode:
198 			case EfiBootServicesData:
199 			case EfiConventionalMemory:
200 				break;
201 			default:
202 				uint64_t base = entry->PhysicalStart;
203 				uint64_t size = entry->NumberOfPages * B_PAGE_SIZE;
204 				remove_physical_memory_range(base, size);
205 		}
206 	}
207 
208 	gKernelArgs.ignored_physical_memory
209 		+= initialPhysicalMemory - total_physical_memory();
210 
211 	sort_address_ranges(gKernelArgs.physical_memory_range,
212 		gKernelArgs.num_physical_memory_ranges);
213 }
214 
215 
216 static void
217 build_physical_allocated_list(size_t memoryMapSize,
218 	efi_memory_descriptor *memoryMap, size_t descriptorSize,
219 	uint32_t descriptorVersion)
220 {
221 	addr_t addr = (addr_t)memoryMap;
222 	for (size_t i = 0; i < memoryMapSize / descriptorSize; ++i) {
223 		efi_memory_descriptor* entry = (efi_memory_descriptor *)(addr + i * descriptorSize);
224 		switch (entry->Type) {
225 			case EfiLoaderData: {
226 				uint64_t base = entry->PhysicalStart;
227 				uint64_t size = entry->NumberOfPages * B_PAGE_SIZE;
228 				insert_physical_allocated_range(base, size);
229 				break;
230 			}
231 			default:
232 				;
233 		}
234 	}
235 
236 	sort_address_ranges(gKernelArgs.physical_allocated_range,
237 		gKernelArgs.num_physical_allocated_ranges);
238 }
239 
240 
241 void
242 arch_mmu_post_efi_setup(size_t memoryMapSize,
243 	efi_memory_descriptor *memoryMap, size_t descriptorSize,
244 	uint32_t descriptorVersion)
245 {
246 	build_physical_allocated_list(memoryMapSize, memoryMap,
247 		descriptorSize, descriptorVersion);
248 
249 	// Switch EFI to virtual mode, using the kernel pmap.
250 	kRuntimeServices->SetVirtualAddressMap(memoryMapSize, descriptorSize,
251 		descriptorVersion, memoryMap);
252 
253 #ifdef TRACE_MEMORY_MAP
254 	dprintf("phys memory ranges:\n");
255 	for (uint32_t i = 0; i < gKernelArgs.num_physical_memory_ranges; i++) {
256 		uint64 start = gKernelArgs.physical_memory_range[i].start;
257 		uint64 size = gKernelArgs.physical_memory_range[i].size;
258 		dprintf("    0x%08" B_PRIx64 "-0x%08" B_PRIx64 ", length 0x%08" B_PRIx64 "\n",
259 			start, start + size, size);
260 	}
261 
262 	dprintf("allocated phys memory ranges:\n");
263 	for (uint32_t i = 0; i < gKernelArgs.num_physical_allocated_ranges; i++) {
264 		uint64 start = gKernelArgs.physical_allocated_range[i].start;
265 		uint64 size = gKernelArgs.physical_allocated_range[i].size;
266 		dprintf("    0x%08" B_PRIx64 "-0x%08" B_PRIx64 ", length 0x%08" B_PRIx64 "\n",
267 			start, start + size, size);
268 	}
269 
270 	dprintf("allocated virt memory ranges:\n");
271 	for (uint32_t i = 0; i < gKernelArgs.num_virtual_allocated_ranges; i++) {
272 		uint64 start = gKernelArgs.virtual_allocated_range[i].start;
273 		uint64 size = gKernelArgs.virtual_allocated_range[i].size;
274 		dprintf("    0x%08" B_PRIx64 "-0x%08" B_PRIx64 ", length 0x%08" B_PRIx64 "\n",
275 			start, start + size, size);
276 	}
277 
278 	dprintf("virt memory ranges to keep:\n");
279 	for (uint32_t i = 0; i < gKernelArgs.arch_args.num_virtual_ranges_to_keep; i++) {
280 		uint32 start = gKernelArgs.arch_args.virtual_ranges_to_keep[i].start;
281 		uint32 size = gKernelArgs.arch_args.virtual_ranges_to_keep[i].size;
282 		dprintf("    0x%08" B_PRIx32 "-0x%08" B_PRIx32 ", length 0x%08" B_PRIx32 "\n",
283 			start, start + size, size);
284 	}
285 #endif
286 }
287 
288 
289 static void
290 arch_mmu_allocate_page_tables(void)
291 {
292 	if (platform_allocate_region((void **)&sPageDirectory,
293 		ARM_MMU_L1_TABLE_SIZE + ALIGN_PAGEDIR + PAGE_TABLE_AREA_SIZE, 0, false) != B_OK)
294 		panic("Failed to allocate page directory.");
295 	sPageDirectory = (uint32 *)ROUNDUP((uint32)sPageDirectory, ALIGN_PAGEDIR);
296 	memset(sPageDirectory, 0, ARM_MMU_L1_TABLE_SIZE);
297 
298 	sNextPageTable = (uint32*)((uint32)sPageDirectory + ARM_MMU_L1_TABLE_SIZE);
299 	sLastPageTable = (uint32*)((uint32)sNextPageTable + PAGE_TABLE_AREA_SIZE);
300 
301 	memset(sNextPageTable, 0, PAGE_TABLE_AREA_SIZE);
302 
303 	TRACE("sPageDirectory = 0x%08x\n", (uint32)sPageDirectory);
304 	TRACE("sNextPageTable = 0x%08x\n", (uint32)sNextPageTable);
305 	TRACE("sLastPageTable = 0x%08x\n", (uint32)sLastPageTable);
306 }
307 
308 
309 uint32_t
310 arch_mmu_generate_post_efi_page_tables(size_t memoryMapSize,
311 	efi_memory_descriptor *memoryMap, size_t descriptorSize,
312 	uint32_t descriptorVersion)
313 {
314 	arch_mmu_allocate_page_tables();
315 
316 	build_physical_memory_list(memoryMapSize, memoryMap,
317 		descriptorSize, descriptorVersion);
318 
319 	addr_t memoryMapAddr = (addr_t)memoryMap;
320 	for (size_t i = 0; i < memoryMapSize / descriptorSize; ++i) {
321 		efi_memory_descriptor* entry =
322 			(efi_memory_descriptor *)(memoryMapAddr + i * descriptorSize);
323 		if ((entry->Attribute & EFI_MEMORY_RUNTIME) != 0) {
324 			map_range_to_new_area(entry,
325 				ARM_MMU_L2_FLAG_B | ARM_MMU_L2_FLAG_C | ARM_MMU_L2_FLAG_AP_RW);
326 		}
327 	}
328 
329 	void* cookie = NULL;
330 	addr_t vaddr;
331 	phys_addr_t paddr;
332 	size_t size;
333 	while (mmu_next_region(&cookie, &vaddr, &paddr, &size)) {
334 		map_range(vaddr, paddr, size,
335 			ARM_MMU_L2_FLAG_B | ARM_MMU_L2_FLAG_C | ARM_MMU_L2_FLAG_AP_RW);
336 	}
337 
338 	map_range_to_new_area(gKernelArgs.arch_args.uart.regs, ARM_MMU_L2_FLAG_B);
339 
340 	sort_address_ranges(gKernelArgs.virtual_allocated_range,
341 		gKernelArgs.num_virtual_allocated_ranges);
342 
343 	addr_t virtPageDirectory;
344 	platform_bootloader_address_to_kernel_address((void*)sPageDirectory, &virtPageDirectory);
345 
346 	gKernelArgs.arch_args.phys_pgdir = (uint32)sPageDirectory;
347 	gKernelArgs.arch_args.vir_pgdir = (uint32)virtPageDirectory;
348 	gKernelArgs.arch_args.next_pagetable = (uint32)(sNextPageTable) - (uint32)sPageDirectory;
349 	gKernelArgs.arch_args.last_pagetable = (uint32)(sLastPageTable) - (uint32)sPageDirectory;
350 
351 	TRACE("gKernelArgs.arch_args.phys_pgdir     = 0x%08x\n",
352 		(uint32_t)gKernelArgs.arch_args.phys_pgdir);
353 	TRACE("gKernelArgs.arch_args.vir_pgdir      = 0x%08x\n",
354 		(uint32_t)gKernelArgs.arch_args.vir_pgdir);
355 	TRACE("gKernelArgs.arch_args.next_pagetable = 0x%08x\n",
356 		(uint32_t)gKernelArgs.arch_args.next_pagetable);
357 	TRACE("gKernelArgs.arch_args.last_pagetable = 0x%08x\n",
358 		(uint32_t)gKernelArgs.arch_args.last_pagetable);
359 
360 #ifdef TRACE_PAGE_DIRECTORY
361 	dump_page_dir();
362 #endif
363 
364 	return (uint32_t)sPageDirectory;
365 }
366 
367 
368 void
369 arch_mmu_init()
370 {
371 	// empty
372 }
373