xref: /haiku/src/system/boot/platform/efi/arch/arm/arch_mmu.cpp (revision 4a55cc230cf7566cadcbb23b1928eefff8aea9a2)
1 /*
2  * Copyright 2019-2022 Haiku, Inc. All rights reserved.
3  * Released under the terms of the MIT License.
4  */
5 
6 
7 #include <algorithm>
8 
9 #include <arm_mmu.h>
10 #include <kernel.h>
11 #include <arch_kernel.h>
12 #include <boot/platform.h>
13 #include <boot/stage2.h>
14 #include <efi/types.h>
15 #include <efi/boot-services.h>
16 
17 #include "efi_platform.h"
18 #include "generic_mmu.h"
19 #include "mmu.h"
20 
21 
22 //#define TRACE_MMU
23 #ifdef TRACE_MMU
24 #	define TRACE(x...) dprintf(x)
25 #else
26 #	define TRACE(x...) ;
27 #endif
28 
29 
30 //#define TRACE_MEMORY_MAP
31 //#define TRACE_PAGE_DIRECTORY
32 
33 // Ignore memory above 512GB
34 #define PHYSICAL_MEMORY_LOW		0x00000000
35 #define PHYSICAL_MEMORY_HIGH	0x8000000000ull
36 
37 #define ALIGN_PAGEDIR			(1024 * 16)
38 #define MAX_PAGE_TABLES			192
39 #define PAGE_TABLE_AREA_SIZE	(MAX_PAGE_TABLES * ARM_MMU_L2_COARSE_TABLE_SIZE)
40 
41 static uint32_t *sPageDirectory = NULL;
42 static uint32_t *sNextPageTable = NULL;
43 static uint32_t *sLastPageTable = NULL;
44 
45 
46 #ifdef TRACE_PAGE_DIRECTORY
47 static void
48 dump_page_dir(void)
49 {
50 	dprintf("=== Page Directory ===\n");
51 	for (uint32_t i = 0; i < ARM_MMU_L1_TABLE_ENTRY_COUNT; i++) {
52 		uint32 directoryEntry = sPageDirectory[i];
53 		if (directoryEntry != 0) {
54 			dprintf("virt 0x%08x --> page table 0x%08x type 0x%08x\n",
55 				i << 20, directoryEntry & ARM_PDE_ADDRESS_MASK,
56 				directoryEntry & ARM_PDE_TYPE_MASK);
57 			uint32_t *pageTable = (uint32_t *)(directoryEntry & ARM_PDE_ADDRESS_MASK);
58 			for (uint32_t j = 0; j < ARM_MMU_L2_COARSE_ENTRY_COUNT; j++) {
59 				uint32 tableEntry = pageTable[j];
60 				if (tableEntry != 0) {
61 					dprintf("virt 0x%08x     --> page 0x%08x type+flags 0x%08x\n",
62 						(i << 20) | (j << 12),
63 						tableEntry & ARM_PTE_ADDRESS_MASK,
64 						tableEntry & (~ARM_PTE_ADDRESS_MASK));
65 				}
66 			}
67 		}
68 	}
69 }
70 #endif
71 
72 
73 static uint32 *
74 get_next_page_table(void)
75 {
76 	uint32 *pageTable = sNextPageTable;
77 	sNextPageTable += ARM_MMU_L2_COARSE_ENTRY_COUNT;
78 	if (sNextPageTable >= sLastPageTable)
79 		panic("ran out of page tables\n");
80 	return pageTable;
81 }
82 
83 
84 static void
85 map_page(addr_t virtAddr, phys_addr_t physAddr, uint32_t flags)
86 {
87 	physAddr &= ~(B_PAGE_SIZE - 1);
88 
89 	uint32 *pageTable = NULL;
90 	uint32 pageDirectoryIndex = VADDR_TO_PDENT(virtAddr);
91 	uint32 pageDirectoryEntry = sPageDirectory[pageDirectoryIndex];
92 
93 	if (pageDirectoryEntry == 0) {
94 		pageTable = get_next_page_table();
95 		sPageDirectory[pageDirectoryIndex] = (uint32_t)pageTable | ARM_MMU_L1_TYPE_COARSE;
96 	} else {
97 		pageTable = (uint32 *)(pageDirectoryEntry & ARM_PDE_ADDRESS_MASK);
98 	}
99 
100 	uint32 pageTableIndex = VADDR_TO_PTENT(virtAddr);
101 	pageTable[pageTableIndex] = physAddr | flags | ARM_MMU_L2_TYPE_SMALLNEW;
102 }
103 
104 
105 static void
106 map_range(addr_t virtAddr, phys_addr_t physAddr, size_t size, uint32_t flags)
107 {
108 	//TRACE("map 0x%08" B_PRIxADDR " --> 0x%08" B_PRIxPHYSADDR
109 	//	", len=0x%08" B_PRIxSIZE ", flags=0x%08" PRIx32 "\n",
110 	//	virtAddr, physAddr, size, flags);
111 
112 	for (addr_t offset = 0; offset < size; offset += B_PAGE_SIZE) {
113 		map_page(virtAddr + offset, physAddr + offset, flags);
114 	}
115 
116 	if (virtAddr >= KERNEL_LOAD_BASE)
117 		ASSERT_ALWAYS(insert_virtual_allocated_range(virtAddr, size) >= B_OK);
118 }
119 
120 
121 static void
122 insert_virtual_range_to_keep(uint64 start, uint64 size)
123 {
124 	status_t status = insert_address_range(
125 		gKernelArgs.arch_args.virtual_ranges_to_keep,
126 		&gKernelArgs.arch_args.num_virtual_ranges_to_keep,
127 		MAX_VIRTUAL_RANGES_TO_KEEP, start, size);
128 
129 	if (status == B_ENTRY_NOT_FOUND)
130 		panic("too many virtual ranges to keep");
131 	else if (status != B_OK)
132 		panic("failed to add virtual range to keep");
133 }
134 
135 
136 static addr_t
137 map_range_to_new_area(addr_t start, size_t size, uint32_t flags)
138 {
139 	if (size == 0)
140 		return 0;
141 
142 	phys_addr_t physAddr = ROUNDDOWN(start, B_PAGE_SIZE);
143 	size_t alignedSize = ROUNDUP(size + (start - physAddr), B_PAGE_SIZE);
144 	addr_t virtAddr = get_next_virtual_address(alignedSize);
145 
146 	map_range(virtAddr, physAddr, alignedSize, flags);
147 	insert_virtual_range_to_keep(virtAddr, alignedSize);
148 
149 	return virtAddr + (start - physAddr);
150 }
151 
152 
153 static void
154 map_range_to_new_area(addr_range& range, uint32_t flags)
155 {
156 	range.start = map_range_to_new_area(range.start, range.size, flags);
157 }
158 
159 
160 static void
161 map_range_to_new_area(efi_memory_descriptor *entry, uint32_t flags)
162 {
163 	uint64_t size = entry->NumberOfPages * B_PAGE_SIZE;
164 	entry->VirtualStart = map_range_to_new_area(entry->PhysicalStart, size, flags);
165 }
166 
167 
168 void
169 arch_mmu_post_efi_setup(size_t memoryMapSize,
170 	efi_memory_descriptor *memoryMap, size_t descriptorSize,
171 	uint32_t descriptorVersion)
172 {
173 	build_physical_allocated_list(memoryMapSize, memoryMap,
174 		descriptorSize, descriptorVersion);
175 
176 	// Switch EFI to virtual mode, using the kernel pmap.
177 	kRuntimeServices->SetVirtualAddressMap(memoryMapSize, descriptorSize,
178 		descriptorVersion, memoryMap);
179 
180 #ifdef TRACE_MEMORY_MAP
181 	dprintf("phys memory ranges:\n");
182 	for (uint32_t i = 0; i < gKernelArgs.num_physical_memory_ranges; i++) {
183 		uint64 start = gKernelArgs.physical_memory_range[i].start;
184 		uint64 size = gKernelArgs.physical_memory_range[i].size;
185 		dprintf("    0x%08" B_PRIx64 "-0x%08" B_PRIx64 ", length 0x%08" B_PRIx64 "\n",
186 			start, start + size, size);
187 	}
188 
189 	dprintf("allocated phys memory ranges:\n");
190 	for (uint32_t i = 0; i < gKernelArgs.num_physical_allocated_ranges; i++) {
191 		uint64 start = gKernelArgs.physical_allocated_range[i].start;
192 		uint64 size = gKernelArgs.physical_allocated_range[i].size;
193 		dprintf("    0x%08" B_PRIx64 "-0x%08" B_PRIx64 ", length 0x%08" B_PRIx64 "\n",
194 			start, start + size, size);
195 	}
196 
197 	dprintf("allocated virt memory ranges:\n");
198 	for (uint32_t i = 0; i < gKernelArgs.num_virtual_allocated_ranges; i++) {
199 		uint64 start = gKernelArgs.virtual_allocated_range[i].start;
200 		uint64 size = gKernelArgs.virtual_allocated_range[i].size;
201 		dprintf("    0x%08" B_PRIx64 "-0x%08" B_PRIx64 ", length 0x%08" B_PRIx64 "\n",
202 			start, start + size, size);
203 	}
204 
205 	dprintf("virt memory ranges to keep:\n");
206 	for (uint32_t i = 0; i < gKernelArgs.arch_args.num_virtual_ranges_to_keep; i++) {
207 		uint32 start = gKernelArgs.arch_args.virtual_ranges_to_keep[i].start;
208 		uint32 size = gKernelArgs.arch_args.virtual_ranges_to_keep[i].size;
209 		dprintf("    0x%08" B_PRIx32 "-0x%08" B_PRIx32 ", length 0x%08" B_PRIx32 "\n",
210 			start, start + size, size);
211 	}
212 #endif
213 }
214 
215 
216 static void
217 arch_mmu_allocate_page_tables(void)
218 {
219 	if (platform_allocate_region((void **)&sPageDirectory,
220 		ARM_MMU_L1_TABLE_SIZE + ALIGN_PAGEDIR + PAGE_TABLE_AREA_SIZE, 0, false) != B_OK)
221 		panic("Failed to allocate page directory.");
222 	sPageDirectory = (uint32 *)ROUNDUP((uint32)sPageDirectory, ALIGN_PAGEDIR);
223 	memset(sPageDirectory, 0, ARM_MMU_L1_TABLE_SIZE);
224 
225 	sNextPageTable = (uint32*)((uint32)sPageDirectory + ARM_MMU_L1_TABLE_SIZE);
226 	sLastPageTable = (uint32*)((uint32)sNextPageTable + PAGE_TABLE_AREA_SIZE);
227 
228 	memset(sNextPageTable, 0, PAGE_TABLE_AREA_SIZE);
229 
230 	TRACE("sPageDirectory = 0x%08x\n", (uint32)sPageDirectory);
231 	TRACE("sNextPageTable = 0x%08x\n", (uint32)sNextPageTable);
232 	TRACE("sLastPageTable = 0x%08x\n", (uint32)sLastPageTable);
233 }
234 
235 
236 uint32_t
237 arch_mmu_generate_post_efi_page_tables(size_t memoryMapSize,
238 	efi_memory_descriptor *memoryMap, size_t descriptorSize,
239 	uint32_t descriptorVersion)
240 {
241 	arch_mmu_allocate_page_tables();
242 
243 	build_physical_memory_list(memoryMapSize, memoryMap,
244 		descriptorSize, descriptorVersion,
245 		PHYSICAL_MEMORY_LOW, PHYSICAL_MEMORY_HIGH);
246 
247 	addr_t memoryMapAddr = (addr_t)memoryMap;
248 	for (size_t i = 0; i < memoryMapSize / descriptorSize; ++i) {
249 		efi_memory_descriptor* entry =
250 			(efi_memory_descriptor *)(memoryMapAddr + i * descriptorSize);
251 		if ((entry->Attribute & EFI_MEMORY_RUNTIME) != 0) {
252 			map_range_to_new_area(entry,
253 				ARM_MMU_L2_FLAG_B | ARM_MMU_L2_FLAG_C | ARM_MMU_L2_FLAG_AP_KRW);
254 		}
255 	}
256 
257 	void* cookie = NULL;
258 	addr_t vaddr;
259 	phys_addr_t paddr;
260 	size_t size;
261 	while (mmu_next_region(&cookie, &vaddr, &paddr, &size)) {
262 		map_range(vaddr, paddr, size,
263 			ARM_MMU_L2_FLAG_B | ARM_MMU_L2_FLAG_C | ARM_MMU_L2_FLAG_AP_KRW);
264 	}
265 
266 	map_range_to_new_area(gKernelArgs.arch_args.uart.regs,
267 		ARM_MMU_L2_FLAG_B | ARM_MMU_L2_FLAG_AP_KRW | ARM_MMU_L2_FLAG_XN);
268 
269 	sort_address_ranges(gKernelArgs.virtual_allocated_range,
270 		gKernelArgs.num_virtual_allocated_ranges);
271 
272 	addr_t virtPageDirectory;
273 	platform_bootloader_address_to_kernel_address((void*)sPageDirectory, &virtPageDirectory);
274 
275 	gKernelArgs.arch_args.phys_pgdir = (uint32)sPageDirectory;
276 	gKernelArgs.arch_args.vir_pgdir = (uint32)virtPageDirectory;
277 	gKernelArgs.arch_args.next_pagetable = (uint32)(sNextPageTable) - (uint32)sPageDirectory;
278 	gKernelArgs.arch_args.last_pagetable = (uint32)(sLastPageTable) - (uint32)sPageDirectory;
279 
280 	TRACE("gKernelArgs.arch_args.phys_pgdir     = 0x%08x\n",
281 		(uint32_t)gKernelArgs.arch_args.phys_pgdir);
282 	TRACE("gKernelArgs.arch_args.vir_pgdir      = 0x%08x\n",
283 		(uint32_t)gKernelArgs.arch_args.vir_pgdir);
284 	TRACE("gKernelArgs.arch_args.next_pagetable = 0x%08x\n",
285 		(uint32_t)gKernelArgs.arch_args.next_pagetable);
286 	TRACE("gKernelArgs.arch_args.last_pagetable = 0x%08x\n",
287 		(uint32_t)gKernelArgs.arch_args.last_pagetable);
288 
289 #ifdef TRACE_PAGE_DIRECTORY
290 	dump_page_dir();
291 #endif
292 
293 	return (uint32_t)sPageDirectory;
294 }
295 
296 
297 void
298 arch_mmu_init()
299 {
300 	// empty
301 }
302