xref: /haiku/src/system/boot/platform/efi/arch/arm/arch_mmu.cpp (revision 4958c5d7b68abe13db5fbfa61305db81693f0eef)
1 /*
2  * Copyright 2019-2023 Haiku, Inc. All rights reserved.
3  * Released under the terms of the MIT License.
4  */
5 
6 
7 #include <algorithm>
8 
9 #include <arm_mmu.h>
10 #include <kernel.h>
11 #include <arch_kernel.h>
12 #include <boot/platform.h>
13 #include <boot/stage2.h>
14 #include <efi/types.h>
15 #include <efi/boot-services.h>
16 
17 #include "efi_platform.h"
18 #include "generic_mmu.h"
19 #include "mmu.h"
20 
21 
22 //#define TRACE_MMU
23 #ifdef TRACE_MMU
24 #	define TRACE(x...) dprintf(x)
25 #else
26 #	define TRACE(x...) ;
27 #endif
28 
29 
30 static constexpr bool kTraceMemoryMap = false;
31 static constexpr bool kTracePageDirectory = false;
32 
33 
34 // Ignore memory above 512GB
35 #define PHYSICAL_MEMORY_LOW		0x00000000
36 #define PHYSICAL_MEMORY_HIGH	0x8000000000ull
37 
38 #define USER_VECTOR_ADDR_HIGH	0xffff0000
39 
40 #define ALIGN_PAGEDIR			(1024 * 16)
41 #define MAX_PAGE_TABLES			192
42 #define PAGE_TABLE_AREA_SIZE	(MAX_PAGE_TABLES * ARM_MMU_L2_COARSE_TABLE_SIZE)
43 
44 static uint32_t *sPageDirectory = NULL;
45 static uint32_t *sNextPageTable = NULL;
46 static uint32_t *sLastPageTable = NULL;
47 static uint32_t *sVectorTable = (uint32_t*)USER_VECTOR_ADDR_HIGH;
48 
49 
50 static void
51 dump_page_dir(void)
52 {
53 	dprintf("=== Page Directory ===\n");
54 	for (uint32_t i = 0; i < ARM_MMU_L1_TABLE_ENTRY_COUNT; i++) {
55 		uint32 directoryEntry = sPageDirectory[i];
56 		if (directoryEntry != 0) {
57 			dprintf("virt 0x%08x --> page table 0x%08x type 0x%08x\n",
58 				i << 20, directoryEntry & ARM_PDE_ADDRESS_MASK,
59 				directoryEntry & ARM_PDE_TYPE_MASK);
60 			uint32_t *pageTable = (uint32_t *)(directoryEntry & ARM_PDE_ADDRESS_MASK);
61 			for (uint32_t j = 0; j < ARM_MMU_L2_COARSE_ENTRY_COUNT; j++) {
62 				uint32 tableEntry = pageTable[j];
63 				if (tableEntry != 0) {
64 					dprintf("virt 0x%08x     --> page 0x%08x type+flags 0x%08x\n",
65 						(i << 20) | (j << 12),
66 						tableEntry & ARM_PTE_ADDRESS_MASK,
67 						tableEntry & (~ARM_PTE_ADDRESS_MASK));
68 				}
69 			}
70 		}
71 	}
72 }
73 
74 
75 static uint32 *
76 get_next_page_table(void)
77 {
78 	uint32 *pageTable = sNextPageTable;
79 	sNextPageTable += ARM_MMU_L2_COARSE_ENTRY_COUNT;
80 	if (sNextPageTable >= sLastPageTable)
81 		panic("ran out of page tables\n");
82 	return pageTable;
83 }
84 
85 
86 static void
87 map_page(addr_t virtAddr, phys_addr_t physAddr, uint32_t flags)
88 {
89 	physAddr &= ~(B_PAGE_SIZE - 1);
90 
91 	uint32 *pageTable = NULL;
92 	uint32 pageDirectoryIndex = VADDR_TO_PDENT(virtAddr);
93 	uint32 pageDirectoryEntry = sPageDirectory[pageDirectoryIndex];
94 
95 	if (pageDirectoryEntry == 0) {
96 		pageTable = get_next_page_table();
97 		sPageDirectory[pageDirectoryIndex] = (uint32_t)pageTable | ARM_MMU_L1_TYPE_COARSE;
98 	} else {
99 		pageTable = (uint32 *)(pageDirectoryEntry & ARM_PDE_ADDRESS_MASK);
100 	}
101 
102 	uint32 pageTableIndex = VADDR_TO_PTENT(virtAddr);
103 	pageTable[pageTableIndex] = physAddr | flags | ARM_MMU_L2_TYPE_SMALLNEW;
104 }
105 
106 
107 static void
108 map_range(addr_t virtAddr, phys_addr_t physAddr, size_t size, uint32_t flags)
109 {
110 	//TRACE("map 0x%08" B_PRIxADDR " --> 0x%08" B_PRIxPHYSADDR
111 	//	", len=0x%08" B_PRIxSIZE ", flags=0x%08" PRIx32 "\n",
112 	//	virtAddr, physAddr, size, flags);
113 
114 	for (addr_t offset = 0; offset < size; offset += B_PAGE_SIZE) {
115 		map_page(virtAddr + offset, physAddr + offset, flags);
116 	}
117 
118 	if (virtAddr >= KERNEL_LOAD_BASE)
119 		ASSERT_ALWAYS(insert_virtual_allocated_range(virtAddr, size) >= B_OK);
120 }
121 
122 
123 static void
124 insert_virtual_range_to_keep(uint64 start, uint64 size)
125 {
126 	status_t status = insert_address_range(
127 		gKernelArgs.arch_args.virtual_ranges_to_keep,
128 		&gKernelArgs.arch_args.num_virtual_ranges_to_keep,
129 		MAX_VIRTUAL_RANGES_TO_KEEP, start, size);
130 
131 	if (status == B_ENTRY_NOT_FOUND)
132 		panic("too many virtual ranges to keep");
133 	else if (status != B_OK)
134 		panic("failed to add virtual range to keep");
135 }
136 
137 
138 static addr_t
139 map_range_to_new_area(addr_t start, size_t size, uint32_t flags)
140 {
141 	if (size == 0)
142 		return 0;
143 
144 	phys_addr_t physAddr = ROUNDDOWN(start, B_PAGE_SIZE);
145 	size_t alignedSize = ROUNDUP(size + (start - physAddr), B_PAGE_SIZE);
146 	addr_t virtAddr = get_next_virtual_address(alignedSize);
147 
148 	map_range(virtAddr, physAddr, alignedSize, flags);
149 	insert_virtual_range_to_keep(virtAddr, alignedSize);
150 
151 	return virtAddr + (start - physAddr);
152 }
153 
154 
155 static void
156 map_range_to_new_area(addr_range& range, uint32_t flags)
157 {
158 	range.start = map_range_to_new_area(range.start, range.size, flags);
159 }
160 
161 
162 static void
163 map_range_to_new_area(efi_memory_descriptor *entry, uint32_t flags)
164 {
165 	uint64_t size = entry->NumberOfPages * B_PAGE_SIZE;
166 	entry->VirtualStart = map_range_to_new_area(entry->PhysicalStart, size, flags);
167 }
168 
169 
170 void
171 arch_mmu_post_efi_setup(size_t memoryMapSize,
172 	efi_memory_descriptor *memoryMap, size_t descriptorSize,
173 	uint32_t descriptorVersion)
174 {
175 	build_physical_allocated_list(memoryMapSize, memoryMap,
176 		descriptorSize, descriptorVersion);
177 
178 	// Switch EFI to virtual mode, using the kernel pmap.
179 	kRuntimeServices->SetVirtualAddressMap(memoryMapSize, descriptorSize,
180 		descriptorVersion, memoryMap);
181 
182 	if (kTraceMemoryMap) {
183 		dprintf("phys memory ranges:\n");
184 		for (uint32_t i = 0; i < gKernelArgs.num_physical_memory_ranges; i++) {
185 			uint64 start = gKernelArgs.physical_memory_range[i].start;
186 			uint64 size = gKernelArgs.physical_memory_range[i].size;
187 			dprintf("    0x%08" B_PRIx64 "-0x%08" B_PRIx64 ", length 0x%08" B_PRIx64 "\n",
188 				start, start + size, size);
189 		}
190 
191 		dprintf("allocated phys memory ranges:\n");
192 		for (uint32_t i = 0; i < gKernelArgs.num_physical_allocated_ranges; i++) {
193 			uint64 start = gKernelArgs.physical_allocated_range[i].start;
194 			uint64 size = gKernelArgs.physical_allocated_range[i].size;
195 			dprintf("    0x%08" B_PRIx64 "-0x%08" B_PRIx64 ", length 0x%08" B_PRIx64 "\n",
196 				start, start + size, size);
197 		}
198 
199 		dprintf("allocated virt memory ranges:\n");
200 		for (uint32_t i = 0; i < gKernelArgs.num_virtual_allocated_ranges; i++) {
201 			uint64 start = gKernelArgs.virtual_allocated_range[i].start;
202 			uint64 size = gKernelArgs.virtual_allocated_range[i].size;
203 			dprintf("    0x%08" B_PRIx64 "-0x%08" B_PRIx64 ", length 0x%08" B_PRIx64 "\n",
204 				start, start + size, size);
205 		}
206 
207 		dprintf("virt memory ranges to keep:\n");
208 		for (uint32_t i = 0; i < gKernelArgs.arch_args.num_virtual_ranges_to_keep; i++) {
209 			uint32 start = gKernelArgs.arch_args.virtual_ranges_to_keep[i].start;
210 			uint32 size = gKernelArgs.arch_args.virtual_ranges_to_keep[i].size;
211 			dprintf("    0x%08" B_PRIx32 "-0x%08" B_PRIx32 ", length 0x%08" B_PRIx32 "\n",
212 				start, start + size, size);
213 		}
214 	}
215 }
216 
217 
218 static void
219 arch_mmu_allocate_page_tables(void)
220 {
221 	if (platform_allocate_region((void **)&sPageDirectory,
222 		ARM_MMU_L1_TABLE_SIZE + ALIGN_PAGEDIR + PAGE_TABLE_AREA_SIZE, 0, false) != B_OK)
223 		panic("Failed to allocate page directory.");
224 	sPageDirectory = (uint32 *)ROUNDUP((uint32)sPageDirectory, ALIGN_PAGEDIR);
225 	memset(sPageDirectory, 0, ARM_MMU_L1_TABLE_SIZE);
226 
227 	sNextPageTable = (uint32*)((uint32)sPageDirectory + ARM_MMU_L1_TABLE_SIZE);
228 	sLastPageTable = (uint32*)((uint32)sNextPageTable + PAGE_TABLE_AREA_SIZE);
229 
230 	memset(sNextPageTable, 0, PAGE_TABLE_AREA_SIZE);
231 
232 	TRACE("sPageDirectory = 0x%08x\n", (uint32)sPageDirectory);
233 	TRACE("sNextPageTable = 0x%08x\n", (uint32)sNextPageTable);
234 	TRACE("sLastPageTable = 0x%08x\n", (uint32)sLastPageTable);
235 }
236 
237 
238 static void
239 arch_mmu_allocate_vector_table(void)
240 {
241 	if (platform_allocate_region((void **)&sVectorTable, B_PAGE_SIZE, 0, false) != B_OK)
242 		panic("Failed to allocate vector table.");
243 
244 	memset(sVectorTable, 0, B_PAGE_SIZE);
245 }
246 
247 
248 uint32_t
249 arch_mmu_generate_post_efi_page_tables(size_t memoryMapSize,
250 	efi_memory_descriptor *memoryMap, size_t descriptorSize,
251 	uint32_t descriptorVersion)
252 {
253 	arch_mmu_allocate_page_tables();
254 	arch_mmu_allocate_vector_table();
255 
256 	build_physical_memory_list(memoryMapSize, memoryMap,
257 		descriptorSize, descriptorVersion,
258 		PHYSICAL_MEMORY_LOW, PHYSICAL_MEMORY_HIGH);
259 
260 	addr_t memoryMapAddr = (addr_t)memoryMap;
261 	for (size_t i = 0; i < memoryMapSize / descriptorSize; ++i) {
262 		efi_memory_descriptor* entry =
263 			(efi_memory_descriptor *)(memoryMapAddr + i * descriptorSize);
264 		if ((entry->Attribute & EFI_MEMORY_RUNTIME) != 0) {
265 			map_range_to_new_area(entry,
266 				ARM_MMU_L2_FLAG_B | ARM_MMU_L2_FLAG_C | ARM_MMU_L2_FLAG_HAIKU_KERNEL_RW);
267 		}
268 	}
269 
270 	void* cookie = NULL;
271 	addr_t vaddr;
272 	phys_addr_t paddr;
273 	size_t size;
274 	while (mmu_next_region(&cookie, &vaddr, &paddr, &size)) {
275 		map_range(vaddr, paddr, size,
276 			ARM_MMU_L2_FLAG_B | ARM_MMU_L2_FLAG_C | ARM_MMU_L2_FLAG_HAIKU_KERNEL_RW);
277 	}
278 
279 	map_range_to_new_area(gKernelArgs.arch_args.uart.regs,
280 		ARM_MMU_L2_FLAG_B | ARM_MMU_L2_FLAG_HAIKU_KERNEL_RW | ARM_MMU_L2_FLAG_XN);
281 
282 	sort_address_ranges(gKernelArgs.virtual_allocated_range,
283 		gKernelArgs.num_virtual_allocated_ranges);
284 
285 	addr_t virtPageDirectory;
286 	platform_bootloader_address_to_kernel_address((void*)sPageDirectory, &virtPageDirectory);
287 
288 	gKernelArgs.arch_args.phys_pgdir = (uint32)sPageDirectory;
289 	gKernelArgs.arch_args.vir_pgdir = (uint32)virtPageDirectory;
290 	gKernelArgs.arch_args.next_pagetable = (uint32)(sNextPageTable) - (uint32)sPageDirectory;
291 	gKernelArgs.arch_args.last_pagetable = (uint32)(sLastPageTable) - (uint32)sPageDirectory;
292 
293 	TRACE("gKernelArgs.arch_args.phys_pgdir     = 0x%08x\n",
294 		(uint32_t)gKernelArgs.arch_args.phys_pgdir);
295 	TRACE("gKernelArgs.arch_args.vir_pgdir      = 0x%08x\n",
296 		(uint32_t)gKernelArgs.arch_args.vir_pgdir);
297 	TRACE("gKernelArgs.arch_args.next_pagetable = 0x%08x\n",
298 		(uint32_t)gKernelArgs.arch_args.next_pagetable);
299 	TRACE("gKernelArgs.arch_args.last_pagetable = 0x%08x\n",
300 		(uint32_t)gKernelArgs.arch_args.last_pagetable);
301 
302 	if (kTracePageDirectory)
303 		dump_page_dir();
304 
305 	return (uint32_t)sPageDirectory;
306 }
307 
308 
309 void
310 arch_mmu_init()
311 {
312 	// empty
313 }
314