xref: /haiku/src/system/boot/platform/efi/mmu.cpp (revision 7bdeef54a24d3417300f251af891df962b638b9b)
1 /*
2  * Copyright 2016 Haiku, Inc. All rights reserved.
3  * Copyright 2014, Jessica Hamilton, jessica.l.hamilton@gmail.com.
4  * Copyright 2014, Henry Harrington, henry.harrington@gmail.com.
5  * Distributed under the terms of the MIT License.
6  */
7 
8 
9 #include <algorithm>
10 
11 #include <boot/platform.h>
12 #include <boot/stage2.h>
13 #include <kernel/arch/x86/arch_kernel.h>
14 #include <kernel/kernel.h>
15 
16 #include "efi_platform.h"
17 #include "mmu.h"
18 
19 
20 struct allocated_memory_region {
21 	allocated_memory_region *next;
22 	uint64_t vaddr;
23 	uint64_t paddr;
24 	size_t size;
25 	bool released;
26 };
27 
28 
29 static uint64_t next_virtual_address = KERNEL_LOAD_BASE_64_BIT + 32 * 1024 * 1024;
30 static allocated_memory_region *allocated_memory_regions = NULL;
31 
32 
33 static uint64_t mmu_allocate_page()
34 {
35 	EFI_PHYSICAL_ADDRESS addr;
36 	EFI_STATUS s = kBootServices->AllocatePages(AllocateAnyPages, EfiLoaderData, 1, &addr);
37 	if (s != EFI_SUCCESS)
38 		panic("Unabled to allocate memory: %li", s);
39 
40 	return addr;
41 }
42 
43 
44 uint64_t
45 mmu_generate_post_efi_page_tables(UINTN memory_map_size,
46 	EFI_MEMORY_DESCRIPTOR *memory_map, UINTN descriptor_size,
47 	UINTN descriptor_version)
48 {
49 	// Generate page tables, matching bios_ia32/long.cpp.
50 	uint64_t *pml4;
51 	uint64_t *pdpt;
52 	uint64_t *pageDir;
53 	uint64_t *pageTable;
54 
55 	// Allocate the top level PML4.
56 	pml4 = NULL;
57 	if (platform_allocate_region((void**)&pml4, B_PAGE_SIZE, 0, false) != B_OK)
58 		panic("Failed to allocate PML4.");
59 	gKernelArgs.arch_args.phys_pgdir = (uint32_t)(addr_t)pml4;
60 	memset(pml4, 0, B_PAGE_SIZE);
61 	platform_bootloader_address_to_kernel_address(pml4, &gKernelArgs.arch_args.vir_pgdir);
62 
63 	// Store the virtual memory usage information.
64 	gKernelArgs.virtual_allocated_range[0].start = KERNEL_LOAD_BASE_64_BIT;
65 	gKernelArgs.virtual_allocated_range[0].size = next_virtual_address - KERNEL_LOAD_BASE_64_BIT;
66 	gKernelArgs.num_virtual_allocated_ranges = 1;
67 	gKernelArgs.arch_args.virtual_end = ROUNDUP(KERNEL_LOAD_BASE_64_BIT
68 		+ gKernelArgs.virtual_allocated_range[0].size, 0x200000);
69 
70 	// Find the highest physical memory address. We map all physical memory
71 	// into the kernel address space, so we want to make sure we map everything
72 	// we have available.
73 	uint64 maxAddress = 0;
74 	for (UINTN i = 0; i < memory_map_size / descriptor_size; ++i) {
75 		EFI_MEMORY_DESCRIPTOR *entry = (EFI_MEMORY_DESCRIPTOR *)((addr_t)memory_map + i * descriptor_size);
76 		maxAddress = std::max(maxAddress,
77 				      entry->PhysicalStart + entry->NumberOfPages * 4096);
78 	}
79 
80 	// Want to map at least 4GB, there may be stuff other than usable RAM that
81 	// could be in the first 4GB of physical address space.
82 	maxAddress = std::max(maxAddress, (uint64)0x100000000ll);
83 	maxAddress = ROUNDUP(maxAddress, 0x40000000);
84 
85 	// Currently only use 1 PDPT (512GB). This will need to change if someone
86 	// wants to use Haiku on a box with more than 512GB of RAM but that's
87 	// probably not going to happen any time soon.
88 	if (maxAddress / 0x40000000 > 512)
89 		panic("Can't currently support more than 512GB of RAM!");
90 
91 	// Create page tables for the physical map area. Also map this PDPT
92 	// temporarily at the bottom of the address space so that we are identity
93 	// mapped.
94 
95 	pdpt = (uint64*)mmu_allocate_page();
96 	memset(pdpt, 0, B_PAGE_SIZE);
97 	pml4[510] = (addr_t)pdpt | kTableMappingFlags;
98 	pml4[0] = (addr_t)pdpt | kTableMappingFlags;
99 
100 	for (uint64 i = 0; i < maxAddress; i += 0x40000000) {
101 		pageDir = (uint64*)mmu_allocate_page();
102 		memset(pageDir, 0, B_PAGE_SIZE);
103 		pdpt[i / 0x40000000] = (addr_t)pageDir | kTableMappingFlags;
104 
105 		for (uint64 j = 0; j < 0x40000000; j += 0x200000) {
106 			pageDir[j / 0x200000] = (i + j) | kLargePageMappingFlags;
107 		}
108 	}
109 
110 	// Allocate tables for the kernel mappings.
111 
112 	pdpt = (uint64*)mmu_allocate_page();
113 	memset(pdpt, 0, B_PAGE_SIZE);
114 	pml4[511] = (addr_t)pdpt | kTableMappingFlags;
115 
116 	pageDir = (uint64*)mmu_allocate_page();
117 	memset(pageDir, 0, B_PAGE_SIZE);
118 	pdpt[510] = (addr_t)pageDir | kTableMappingFlags;
119 
120 	// We can now allocate page tables and duplicate the mappings across from
121 	// the 32-bit address space to them.
122 	pageTable = NULL; // shush, compiler.
123 	for (uint32 i = 0; i < gKernelArgs.virtual_allocated_range[0].size
124 			/ B_PAGE_SIZE; i++) {
125 		if ((i % 512) == 0) {
126 			pageTable = (uint64*)mmu_allocate_page();
127 			memset(pageTable, 0, B_PAGE_SIZE);
128 			pageDir[i / 512] = (addr_t)pageTable | kTableMappingFlags;
129 		}
130 
131 		// Get the physical address to map.
132 		void *phys;
133 		if (platform_kernel_address_to_bootloader_address(KERNEL_LOAD_BASE_64_BIT + (i * B_PAGE_SIZE),
134 								  &phys) != B_OK)
135 			continue;
136 
137 		pageTable[i % 512] = (addr_t)phys | kPageMappingFlags;
138 	}
139 
140 	return (uint64)pml4;
141 }
142 
143 
144 // Called after EFI boot services exit.
145 // Currently assumes that the memory map is sane... Sorted and no overlapping
146 // regions.
147 void
148 mmu_post_efi_setup(UINTN memory_map_size, EFI_MEMORY_DESCRIPTOR *memory_map, UINTN descriptor_size, UINTN descriptor_version)
149 {
150 	// Add physical memory to the kernel args and update virtual addresses for EFI regions..
151 	addr_t addr = (addr_t)memory_map;
152 	gKernelArgs.num_physical_memory_ranges = 0;
153 	for (UINTN i = 0; i < memory_map_size / descriptor_size; ++i) {
154 		EFI_MEMORY_DESCRIPTOR *entry = (EFI_MEMORY_DESCRIPTOR *)(addr + i * descriptor_size);
155 		switch (entry->Type) {
156 		case EfiLoaderCode:
157 		case EfiLoaderData:
158 		case EfiBootServicesCode:
159 		case EfiBootServicesData:
160 		case EfiConventionalMemory: {
161 			// Usable memory.
162 			// Ignore memory below 1MB and above 512GB.
163 			uint64_t base = entry->PhysicalStart;
164 			uint64_t end = entry->PhysicalStart + entry->NumberOfPages * 4096;
165 			if (base < 0x100000)
166 				base = 0x100000;
167 			if (end > (512ull * 1024 * 1024 * 1024))
168 				end = 512ull * 1024 * 1024 * 1024;
169 			if (base >= end)
170 				break;
171 			uint64_t size = end - base;
172 
173 			insert_physical_memory_range(base, size);
174 			// LoaderData memory is bootloader allocated memory, possibly
175 			// containing the kernel or loaded drivers.
176 			if (entry->Type == EfiLoaderData)
177 				insert_physical_allocated_range(base, size);
178 			break;
179 		}
180 		case EfiACPIReclaimMemory:
181 			// ACPI reclaim -- physical memory we could actually use later
182 			gKernelArgs.ignored_physical_memory += entry->NumberOfPages * 4096;
183 			break;
184 		case EfiRuntimeServicesCode:
185 		case EfiRuntimeServicesData:
186 			entry->VirtualStart = entry->PhysicalStart + 0xFFFFFF0000000000ull;
187 			break;
188 		}
189 	}
190 
191 	// Sort the address ranges.
192 	sort_address_ranges(gKernelArgs.physical_memory_range,
193 		gKernelArgs.num_physical_memory_ranges);
194 	sort_address_ranges(gKernelArgs.physical_allocated_range,
195 		gKernelArgs.num_physical_allocated_ranges);
196 	sort_address_ranges(gKernelArgs.virtual_allocated_range,
197 		gKernelArgs.num_virtual_allocated_ranges);
198 
199 	// Switch EFI to virtual mode, using the kernel pmap.
200 	// Something involving ConvertPointer might need to be done after this?
201 	// http://wiki.phoenix.com/wiki/index.php/EFI_RUNTIME_SERVICES#SetVirtualAddressMap.28.29
202 	kRuntimeServices->SetVirtualAddressMap(memory_map_size, descriptor_size, descriptor_version, memory_map);
203 }
204 
205 
206 // Platform allocator.
207 // The bootloader assumes that bootloader address space == kernel address space.
208 // This is not true until just before the kernel is booted, so an ugly hack is
209 // used to cover the difference. platform_allocate_region allocates addresses
210 // in bootloader space, but can convert them to kernel space. The ELF loader
211 // accesses kernel memory via Mao(), and much later in the boot process,
212 // addresses in the kernel argument struct are converted from bootloader
213 // addresses to kernel addresses.
214 
215 extern "C" status_t
216 platform_allocate_region(void **_address, size_t size, uint8 /* protection */, bool exactAddress)
217 {
218 	// We don't have any control over the page tables, give up right away if an
219 	// exactAddress is wanted.
220 	if (exactAddress)
221 		return B_NO_MEMORY;
222 
223 	EFI_PHYSICAL_ADDRESS addr;
224 	size_t aligned_size = ROUNDUP(size, B_PAGE_SIZE);
225 	allocated_memory_region *region = new(std::nothrow) allocated_memory_region;
226 
227 	if (region == NULL)
228 		return B_NO_MEMORY;
229 
230 	EFI_STATUS status = kBootServices->AllocatePages(AllocateAnyPages,
231 		EfiLoaderData, aligned_size / B_PAGE_SIZE, &addr);
232 	if (status != EFI_SUCCESS) {
233 		delete region;
234 		return B_NO_MEMORY;
235 	}
236 
237 	// Addresses above 512GB not supported.
238 	// Memory map regions above 512GB can be ignored, but if EFI returns pages
239 	// above that there's nothing that can be done to fix it.
240 	if (addr + size > (512ull * 1024 * 1024 * 1024))
241 		panic("Can't currently support more than 512GB of RAM!");
242 
243 	region->next = allocated_memory_regions;
244 	allocated_memory_regions = region;
245 	region->vaddr = 0;
246 	region->paddr = addr;
247 	region->size = size;
248 	region->released = false;
249 
250 	if (*_address != NULL) {
251 		region->vaddr = (uint64_t)*_address;
252 	}
253 
254 	//dprintf("Allocated region %#lx (requested %p) %#lx %lu\n", region->vaddr, *_address, region->paddr, region->size);
255 
256 	*_address = (void *)region->paddr;
257 
258 	return B_OK;
259 }
260 
261 
262 /*!
263 	Neither \a virtualAddress nor \a size need to be aligned, but the function
264 	will map all pages the range intersects with.
265 	If physicalAddress is not page-aligned, the returned virtual address will
266 	have the same "misalignment".
267 */
268 extern "C" addr_t
269 mmu_map_physical_memory(addr_t physicalAddress, size_t size, uint32 flags)
270 {
271 	addr_t pageOffset = physicalAddress & (B_PAGE_SIZE - 1);
272 
273 	physicalAddress -= pageOffset;
274 	size += pageOffset;
275 
276 	size_t aligned_size = ROUNDUP(size, B_PAGE_SIZE);
277 	allocated_memory_region *region = new(std::nothrow) allocated_memory_region;
278 
279 	if (!region)
280 		return B_NO_MEMORY;
281 
282 	// Addresses above 512GB not supported.
283 	// Memory map regions above 512GB can be ignored, but if EFI returns pages above
284 	// that there's nothing that can be done to fix it.
285 	if (physicalAddress + size > (512ull * 1024 * 1024 * 1024))
286 		panic("Can't currently support more than 512GB of RAM!");
287 
288 	region->next = allocated_memory_regions;
289 	allocated_memory_regions = region;
290 	region->vaddr = 0;
291 	region->paddr = physicalAddress;
292 	region->size = aligned_size;
293 	region->released = false;
294 
295 	return physicalAddress + pageOffset;
296 }
297 
298 
299 extern "C" void
300 mmu_free(void *virtualAddress, size_t size)
301 {
302 	addr_t physicalAddress = (addr_t)virtualAddress;
303 	addr_t pageOffset = physicalAddress & (B_PAGE_SIZE - 1);
304 
305 	physicalAddress -= pageOffset;
306 	size += pageOffset;
307 
308 	size_t aligned_size = ROUNDUP(size, B_PAGE_SIZE);
309 
310 	for (allocated_memory_region *region = allocated_memory_regions; region; region = region->next) {
311 		if (region->paddr == physicalAddress && region->size == aligned_size) {
312 			region->released = true;
313 			return;
314 		}
315 	}
316 }
317 
318 
319 static allocated_memory_region *
320 get_region(void *address, size_t size)
321 {
322 	for (allocated_memory_region *region = allocated_memory_regions; region; region = region->next) {
323 		if (region->paddr == (uint64_t)address && region->size == size) {
324 			return region;
325 		}
326 	}
327 	return 0;
328 }
329 
330 
331 extern "C" status_t
332 platform_bootloader_address_to_kernel_address(void *address, uint64_t *_result)
333 {
334 	uint64_t addr = (uint64_t)address;
335 
336 	for (allocated_memory_region *region = allocated_memory_regions; region; region = region->next) {
337 		if (region->paddr <= addr && addr < region->paddr + region->size) {
338 			// Lazily allocate virtual memory.
339 			if (region->vaddr == 0) {
340 				region->vaddr = next_virtual_address;
341 				next_virtual_address += ROUNDUP(region->size, B_PAGE_SIZE);
342 			}
343 			*_result = region->vaddr + (addr - region->paddr);
344 			//dprintf("Converted bootloader address %p in region %#lx-%#lx to %#lx\n",
345 			//	address, region->paddr, region->paddr + region->size, *_result);
346 			return B_OK;
347 		}
348 	}
349 
350 	return B_ERROR;
351 }
352 
353 
354 extern "C" status_t
355 platform_kernel_address_to_bootloader_address(uint64_t address, void **_result)
356 {
357 	for (allocated_memory_region *region = allocated_memory_regions; region; region = region->next) {
358 		if (region->vaddr != 0 && region->vaddr <= address && address < region->vaddr + region->size) {
359 			*_result = (void *)(region->paddr + (address - region->vaddr));
360 			//dprintf("Converted kernel address %#lx in region %#lx-%#lx to %p\n",
361 			//	address, region->vaddr, region->vaddr + region->size, *_result);
362 			return B_OK;
363 		}
364 	}
365 
366 	return B_ERROR;
367 }
368 
369 
370 extern "C" status_t
371 platform_free_region(void *address, size_t size)
372 {
373 	//dprintf("Release region %p %lu\n", address, size);
374 	allocated_memory_region *region = get_region(address, size);
375 	if (!region)
376 		panic("Unknown region??");
377 
378 	kBootServices->FreePages((EFI_PHYSICAL_ADDRESS)address, ROUNDUP(size, B_PAGE_SIZE) / B_PAGE_SIZE);
379 
380 	return B_OK;
381 }
382