xref: /haiku/src/system/boot/platform/efi/arch/arm/arch_mmu.cpp (revision ef240bfffa9bd342a276eb73691a6996aa254156)
1 /*
2  * Copyright 2019-2023 Haiku, Inc. All rights reserved.
3  * Released under the terms of the MIT License.
4  */
5 
6 
7 #include <algorithm>
8 
9 #include <arm_mmu.h>
10 #include <kernel.h>
11 #include <arch_kernel.h>
12 #include <boot/platform.h>
13 #include <boot/stage2.h>
14 #include <efi/types.h>
15 #include <efi/boot-services.h>
16 
17 #include "efi_platform.h"
18 #include "generic_mmu.h"
19 #include "mmu.h"
20 
21 
22 //#define TRACE_MMU
23 #ifdef TRACE_MMU
24 #	define TRACE(x...) dprintf(x)
25 #else
26 #	define TRACE(x...) ;
27 #endif
28 
29 
30 static constexpr bool kTraceMemoryMap = false;
31 static constexpr bool kTracePageDirectory = false;
32 
33 
34 // Ignore memory above 512GB
35 #define PHYSICAL_MEMORY_LOW		0x00000000
36 #define PHYSICAL_MEMORY_HIGH	0x8000000000ull
37 
38 #define ALIGN_PAGEDIR			(1024 * 16)
39 #define MAX_PAGE_TABLES			192
40 #define PAGE_TABLE_AREA_SIZE	(MAX_PAGE_TABLES * ARM_MMU_L2_COARSE_TABLE_SIZE)
41 
42 static uint32_t *sPageDirectory = NULL;
43 static uint32_t *sNextPageTable = NULL;
44 static uint32_t *sLastPageTable = NULL;
45 
46 
47 static void
48 dump_page_dir(void)
49 {
50 	dprintf("=== Page Directory ===\n");
51 	for (uint32_t i = 0; i < ARM_MMU_L1_TABLE_ENTRY_COUNT; i++) {
52 		uint32 directoryEntry = sPageDirectory[i];
53 		if (directoryEntry != 0) {
54 			dprintf("virt 0x%08x --> page table 0x%08x type 0x%08x\n",
55 				i << 20, directoryEntry & ARM_PDE_ADDRESS_MASK,
56 				directoryEntry & ARM_PDE_TYPE_MASK);
57 			uint32_t *pageTable = (uint32_t *)(directoryEntry & ARM_PDE_ADDRESS_MASK);
58 			for (uint32_t j = 0; j < ARM_MMU_L2_COARSE_ENTRY_COUNT; j++) {
59 				uint32 tableEntry = pageTable[j];
60 				if (tableEntry != 0) {
61 					dprintf("virt 0x%08x     --> page 0x%08x type+flags 0x%08x\n",
62 						(i << 20) | (j << 12),
63 						tableEntry & ARM_PTE_ADDRESS_MASK,
64 						tableEntry & (~ARM_PTE_ADDRESS_MASK));
65 				}
66 			}
67 		}
68 	}
69 }
70 
71 
72 static uint32 *
73 get_next_page_table(void)
74 {
75 	uint32 *pageTable = sNextPageTable;
76 	sNextPageTable += ARM_MMU_L2_COARSE_ENTRY_COUNT;
77 	if (sNextPageTable >= sLastPageTable)
78 		panic("ran out of page tables\n");
79 	return pageTable;
80 }
81 
82 
83 static void
84 map_page(addr_t virtAddr, phys_addr_t physAddr, uint32_t flags)
85 {
86 	physAddr &= ~(B_PAGE_SIZE - 1);
87 
88 	uint32 *pageTable = NULL;
89 	uint32 pageDirectoryIndex = VADDR_TO_PDENT(virtAddr);
90 	uint32 pageDirectoryEntry = sPageDirectory[pageDirectoryIndex];
91 
92 	if (pageDirectoryEntry == 0) {
93 		pageTable = get_next_page_table();
94 		sPageDirectory[pageDirectoryIndex] = (uint32_t)pageTable | ARM_MMU_L1_TYPE_COARSE;
95 	} else {
96 		pageTable = (uint32 *)(pageDirectoryEntry & ARM_PDE_ADDRESS_MASK);
97 	}
98 
99 	uint32 pageTableIndex = VADDR_TO_PTENT(virtAddr);
100 	pageTable[pageTableIndex] = physAddr | flags | ARM_MMU_L2_TYPE_SMALLNEW;
101 }
102 
103 
104 static void
105 map_range(addr_t virtAddr, phys_addr_t physAddr, size_t size, uint32_t flags)
106 {
107 	//TRACE("map 0x%08" B_PRIxADDR " --> 0x%08" B_PRIxPHYSADDR
108 	//	", len=0x%08" B_PRIxSIZE ", flags=0x%08" PRIx32 "\n",
109 	//	virtAddr, physAddr, size, flags);
110 
111 	for (addr_t offset = 0; offset < size; offset += B_PAGE_SIZE) {
112 		map_page(virtAddr + offset, physAddr + offset, flags);
113 	}
114 
115 	if (virtAddr >= KERNEL_LOAD_BASE)
116 		ASSERT_ALWAYS(insert_virtual_allocated_range(virtAddr, size) >= B_OK);
117 }
118 
119 
120 static void
121 insert_virtual_range_to_keep(uint64 start, uint64 size)
122 {
123 	status_t status = insert_address_range(
124 		gKernelArgs.arch_args.virtual_ranges_to_keep,
125 		&gKernelArgs.arch_args.num_virtual_ranges_to_keep,
126 		MAX_VIRTUAL_RANGES_TO_KEEP, start, size);
127 
128 	if (status == B_ENTRY_NOT_FOUND)
129 		panic("too many virtual ranges to keep");
130 	else if (status != B_OK)
131 		panic("failed to add virtual range to keep");
132 }
133 
134 
135 static addr_t
136 map_range_to_new_area(addr_t start, size_t size, uint32_t flags)
137 {
138 	if (size == 0)
139 		return 0;
140 
141 	phys_addr_t physAddr = ROUNDDOWN(start, B_PAGE_SIZE);
142 	size_t alignedSize = ROUNDUP(size + (start - physAddr), B_PAGE_SIZE);
143 	addr_t virtAddr = get_next_virtual_address(alignedSize);
144 
145 	map_range(virtAddr, physAddr, alignedSize, flags);
146 	insert_virtual_range_to_keep(virtAddr, alignedSize);
147 
148 	return virtAddr + (start - physAddr);
149 }
150 
151 
152 static void
153 map_range_to_new_area(addr_range& range, uint32_t flags)
154 {
155 	range.start = map_range_to_new_area(range.start, range.size, flags);
156 }
157 
158 
159 static void
160 map_range_to_new_area(efi_memory_descriptor *entry, uint32_t flags)
161 {
162 	uint64_t size = entry->NumberOfPages * B_PAGE_SIZE;
163 	entry->VirtualStart = map_range_to_new_area(entry->PhysicalStart, size, flags);
164 }
165 
166 
167 void
168 arch_mmu_post_efi_setup(size_t memoryMapSize,
169 	efi_memory_descriptor *memoryMap, size_t descriptorSize,
170 	uint32_t descriptorVersion)
171 {
172 	build_physical_allocated_list(memoryMapSize, memoryMap,
173 		descriptorSize, descriptorVersion);
174 
175 	// Switch EFI to virtual mode, using the kernel pmap.
176 	kRuntimeServices->SetVirtualAddressMap(memoryMapSize, descriptorSize,
177 		descriptorVersion, memoryMap);
178 
179 	if (kTraceMemoryMap) {
180 		dprintf("phys memory ranges:\n");
181 		for (uint32_t i = 0; i < gKernelArgs.num_physical_memory_ranges; i++) {
182 			uint64 start = gKernelArgs.physical_memory_range[i].start;
183 			uint64 size = gKernelArgs.physical_memory_range[i].size;
184 			dprintf("    0x%08" B_PRIx64 "-0x%08" B_PRIx64 ", length 0x%08" B_PRIx64 "\n",
185 				start, start + size, size);
186 		}
187 
188 		dprintf("allocated phys memory ranges:\n");
189 		for (uint32_t i = 0; i < gKernelArgs.num_physical_allocated_ranges; i++) {
190 			uint64 start = gKernelArgs.physical_allocated_range[i].start;
191 			uint64 size = gKernelArgs.physical_allocated_range[i].size;
192 			dprintf("    0x%08" B_PRIx64 "-0x%08" B_PRIx64 ", length 0x%08" B_PRIx64 "\n",
193 				start, start + size, size);
194 		}
195 
196 		dprintf("allocated virt memory ranges:\n");
197 		for (uint32_t i = 0; i < gKernelArgs.num_virtual_allocated_ranges; i++) {
198 			uint64 start = gKernelArgs.virtual_allocated_range[i].start;
199 			uint64 size = gKernelArgs.virtual_allocated_range[i].size;
200 			dprintf("    0x%08" B_PRIx64 "-0x%08" B_PRIx64 ", length 0x%08" B_PRIx64 "\n",
201 				start, start + size, size);
202 		}
203 
204 		dprintf("virt memory ranges to keep:\n");
205 		for (uint32_t i = 0; i < gKernelArgs.arch_args.num_virtual_ranges_to_keep; i++) {
206 			uint32 start = gKernelArgs.arch_args.virtual_ranges_to_keep[i].start;
207 			uint32 size = gKernelArgs.arch_args.virtual_ranges_to_keep[i].size;
208 			dprintf("    0x%08" B_PRIx32 "-0x%08" B_PRIx32 ", length 0x%08" B_PRIx32 "\n",
209 				start, start + size, size);
210 		}
211 	}
212 }
213 
214 
215 static void
216 arch_mmu_allocate_page_tables(void)
217 {
218 	if (platform_allocate_region((void **)&sPageDirectory,
219 		ARM_MMU_L1_TABLE_SIZE + ALIGN_PAGEDIR + PAGE_TABLE_AREA_SIZE, 0, false) != B_OK)
220 		panic("Failed to allocate page directory.");
221 	sPageDirectory = (uint32 *)ROUNDUP((uint32)sPageDirectory, ALIGN_PAGEDIR);
222 	memset(sPageDirectory, 0, ARM_MMU_L1_TABLE_SIZE);
223 
224 	sNextPageTable = (uint32*)((uint32)sPageDirectory + ARM_MMU_L1_TABLE_SIZE);
225 	sLastPageTable = (uint32*)((uint32)sNextPageTable + PAGE_TABLE_AREA_SIZE);
226 
227 	memset(sNextPageTable, 0, PAGE_TABLE_AREA_SIZE);
228 
229 	TRACE("sPageDirectory = 0x%08x\n", (uint32)sPageDirectory);
230 	TRACE("sNextPageTable = 0x%08x\n", (uint32)sNextPageTable);
231 	TRACE("sLastPageTable = 0x%08x\n", (uint32)sLastPageTable);
232 }
233 
234 
235 uint32_t
236 arch_mmu_generate_post_efi_page_tables(size_t memoryMapSize,
237 	efi_memory_descriptor *memoryMap, size_t descriptorSize,
238 	uint32_t descriptorVersion)
239 {
240 	arch_mmu_allocate_page_tables();
241 
242 	build_physical_memory_list(memoryMapSize, memoryMap,
243 		descriptorSize, descriptorVersion,
244 		PHYSICAL_MEMORY_LOW, PHYSICAL_MEMORY_HIGH);
245 
246 	addr_t memoryMapAddr = (addr_t)memoryMap;
247 	for (size_t i = 0; i < memoryMapSize / descriptorSize; ++i) {
248 		efi_memory_descriptor* entry =
249 			(efi_memory_descriptor *)(memoryMapAddr + i * descriptorSize);
250 		if ((entry->Attribute & EFI_MEMORY_RUNTIME) != 0) {
251 			map_range_to_new_area(entry,
252 				ARM_MMU_L2_FLAG_B | ARM_MMU_L2_FLAG_C | ARM_MMU_L2_FLAG_AP_KRW);
253 		}
254 	}
255 
256 	void* cookie = NULL;
257 	addr_t vaddr;
258 	phys_addr_t paddr;
259 	size_t size;
260 	while (mmu_next_region(&cookie, &vaddr, &paddr, &size)) {
261 		map_range(vaddr, paddr, size,
262 			ARM_MMU_L2_FLAG_B | ARM_MMU_L2_FLAG_C | ARM_MMU_L2_FLAG_AP_KRW);
263 	}
264 
265 	map_range_to_new_area(gKernelArgs.arch_args.uart.regs,
266 		ARM_MMU_L2_FLAG_B | ARM_MMU_L2_FLAG_AP_KRW | ARM_MMU_L2_FLAG_XN);
267 
268 	sort_address_ranges(gKernelArgs.virtual_allocated_range,
269 		gKernelArgs.num_virtual_allocated_ranges);
270 
271 	addr_t virtPageDirectory;
272 	platform_bootloader_address_to_kernel_address((void*)sPageDirectory, &virtPageDirectory);
273 
274 	gKernelArgs.arch_args.phys_pgdir = (uint32)sPageDirectory;
275 	gKernelArgs.arch_args.vir_pgdir = (uint32)virtPageDirectory;
276 	gKernelArgs.arch_args.next_pagetable = (uint32)(sNextPageTable) - (uint32)sPageDirectory;
277 	gKernelArgs.arch_args.last_pagetable = (uint32)(sLastPageTable) - (uint32)sPageDirectory;
278 
279 	TRACE("gKernelArgs.arch_args.phys_pgdir     = 0x%08x\n",
280 		(uint32_t)gKernelArgs.arch_args.phys_pgdir);
281 	TRACE("gKernelArgs.arch_args.vir_pgdir      = 0x%08x\n",
282 		(uint32_t)gKernelArgs.arch_args.vir_pgdir);
283 	TRACE("gKernelArgs.arch_args.next_pagetable = 0x%08x\n",
284 		(uint32_t)gKernelArgs.arch_args.next_pagetable);
285 	TRACE("gKernelArgs.arch_args.last_pagetable = 0x%08x\n",
286 		(uint32_t)gKernelArgs.arch_args.last_pagetable);
287 
288 	if (kTracePageDirectory)
289 		dump_page_dir();
290 
291 	return (uint32_t)sPageDirectory;
292 }
293 
294 
295 void
296 arch_mmu_init()
297 {
298 	// empty
299 }
300