xref: /haiku/src/system/boot/platform/efi/mmu.cpp (revision 21258e2674226d6aa732321b6f8494841895af5f)
1 /*
2  * Copyright 2016 Haiku, Inc. All rights reserved.
3  * Copyright 2014, Jessica Hamilton, jessica.l.hamilton@gmail.com.
4  * Copyright 2014, Henry Harrington, henry.harrington@gmail.com.
5  * Distributed under the terms of the MIT License.
6  */
7 
8 
9 #include <algorithm>
10 
11 #include <boot/addr_range.h>
12 #include <boot/platform.h>
13 #include <boot/stage2.h>
14 #include <kernel/kernel.h>
15 
16 #include "efi_platform.h"
17 #include "mmu.h"
18 
19 
20 //#define TRACE_MMU
21 #ifdef TRACE_MMU
22 #   define TRACE(x...) dprintf("efi/mmu: " x)
23 #else
24 #   define TRACE(x...) ;
25 #endif
26 
27 
28 struct allocated_memory_region {
29 	allocated_memory_region *next;
30 	uint64_t vaddr;
31 	uint64_t paddr;
32 	size_t size;
33 	bool released;
34 };
35 
36 
37 #if defined(KERNEL_LOAD_BASE_64_BIT)
38 static addr_t sNextVirtualAddress = KERNEL_LOAD_BASE_64_BIT + 32 * 1024 * 1024;
39 #elif defined(KERNEL_LOAD_BASE)
40 static addr_t sNextVirtualAddress = KERNEL_LOAD_BASE + 32 * 1024 * 1024;
41 #else
42 #error Unable to find kernel load base on this architecture!
43 #endif
44 
45 
46 static allocated_memory_region *allocated_memory_regions = NULL;
47 
48 
49 extern "C" uint64_t
50 mmu_allocate_page()
51 {
52 	TRACE("%s: called\n", __func__);
53 
54 	efi_physical_addr addr;
55 	efi_status s = kBootServices->AllocatePages(AllocateAnyPages,
56 		EfiLoaderData, 1, &addr);
57 
58 	if (s != EFI_SUCCESS)
59 		panic("Unabled to allocate memory: %li", s);
60 
61 	return addr;
62 }
63 
64 
65 extern "C" addr_t
66 get_next_virtual_address(size_t size)
67 {
68 	TRACE("%s: called. size: %" B_PRIuSIZE "\n", __func__, size);
69 
70 	addr_t address = sNextVirtualAddress;
71 	sNextVirtualAddress += ROUNDUP(size, B_PAGE_SIZE);
72 	return address;
73 }
74 
75 
76 extern "C" addr_t
77 get_current_virtual_address()
78 {
79 	TRACE("%s: called\n", __func__);
80 
81 	return sNextVirtualAddress;
82 }
83 
84 
85 // Platform allocator.
86 // The bootloader assumes that bootloader address space == kernel address space.
87 // This is not true until just before the kernel is booted, so an ugly hack is
88 // used to cover the difference. platform_allocate_region allocates addresses
89 // in bootloader space, but can convert them to kernel space. The ELF loader
90 // accesses kernel memory via Mao(), and much later in the boot process,
91 // addresses in the kernel argument struct are converted from bootloader
92 // addresses to kernel addresses.
93 
94 extern "C" status_t
95 platform_allocate_region(void **_address, size_t size, uint8 /* protection */,
96 	bool exactAddress)
97 {
98 	TRACE("%s: called\n", __func__);
99 
100 	// We don't have any control over the page tables, give up right away if an
101 	// exactAddress is wanted.
102 	if (exactAddress)
103 		return B_NO_MEMORY;
104 
105 	efi_physical_addr addr;
106 	size_t aligned_size = ROUNDUP(size, B_PAGE_SIZE);
107 	allocated_memory_region *region = new(std::nothrow) allocated_memory_region;
108 
109 	if (region == NULL)
110 		return B_NO_MEMORY;
111 
112 	efi_status status = kBootServices->AllocatePages(AllocateAnyPages,
113 		EfiLoaderData, aligned_size / B_PAGE_SIZE, &addr);
114 	if (status != EFI_SUCCESS) {
115 		delete region;
116 		return B_NO_MEMORY;
117 	}
118 
119 	// Addresses above 512GB not supported.
120 	// Memory map regions above 512GB can be ignored, but if EFI returns pages
121 	// above that there's nothing that can be done to fix it.
122 	if (addr + size > (512ull * 1024 * 1024 * 1024))
123 		panic("Can't currently support more than 512GB of RAM!");
124 
125 	region->next = allocated_memory_regions;
126 	allocated_memory_regions = region;
127 	region->vaddr = 0;
128 	region->paddr = addr;
129 	region->size = size;
130 	region->released = false;
131 
132 	if (*_address != NULL) {
133 		region->vaddr = (uint64_t)*_address;
134 	}
135 
136 	//dprintf("Allocated region %#lx (requested %p) %#lx %lu\n", region->vaddr, *_address, region->paddr, region->size);
137 
138 	*_address = (void *)region->paddr;
139 
140 	return B_OK;
141 }
142 
143 
144 /*!
145 	Neither \a virtualAddress nor \a size need to be aligned, but the function
146 	will map all pages the range intersects with.
147 	If physicalAddress is not page-aligned, the returned virtual address will
148 	have the same "misalignment".
149 */
150 extern "C" addr_t
151 mmu_map_physical_memory(addr_t physicalAddress, size_t size, uint32 flags)
152 {
153 	TRACE("%s: called\n", __func__);
154 
155 	addr_t pageOffset = physicalAddress & (B_PAGE_SIZE - 1);
156 
157 	physicalAddress -= pageOffset;
158 	size += pageOffset;
159 
160 	if (insert_physical_allocated_range(physicalAddress,
161 		ROUNDUP(size, B_PAGE_SIZE)) != B_OK) {
162 		return B_NO_MEMORY;
163 	}
164 
165 	return physicalAddress + pageOffset;
166 }
167 
168 
169 extern "C" void
170 mmu_free(void *virtualAddress, size_t size)
171 {
172 	TRACE("%s: called\n", __func__);
173 
174 	addr_t physicalAddress = (addr_t)virtualAddress;
175 	addr_t pageOffset = physicalAddress & (B_PAGE_SIZE - 1);
176 
177 	physicalAddress -= pageOffset;
178 	size += pageOffset;
179 
180 	size_t aligned_size = ROUNDUP(size, B_PAGE_SIZE);
181 
182 	for (allocated_memory_region *region = allocated_memory_regions; region;
183 		region = region->next) {
184 		if (region->paddr == physicalAddress && region->size == aligned_size) {
185 			region->released = true;
186 			return;
187 		}
188 	}
189 }
190 
191 
192 static allocated_memory_region *
193 get_region(void *address, size_t size)
194 {
195 	TRACE("%s: called\n", __func__);
196 
197 	for (allocated_memory_region *region = allocated_memory_regions; region;
198 		region = region->next) {
199 		if (region->paddr == (uint64_t)address && region->size == size) {
200 			return region;
201 		}
202 	}
203 	return 0;
204 }
205 
206 
207 static void
208 convert_physical_ranges()
209 {
210 	TRACE("%s: called\n", __func__);
211 
212 	addr_range *range = gKernelArgs.physical_allocated_range;
213 	uint32 num_ranges = gKernelArgs.num_physical_allocated_ranges;
214 
215 	for (uint32 i = 0; i < num_ranges; ++i) {
216 		allocated_memory_region *region
217 			= new(std::nothrow) allocated_memory_region;
218 
219 		if (!region)
220 			panic("Couldn't add allocated region");
221 
222 		// Addresses above 512GB not supported.
223 		// Memory map regions above 512GB can be ignored, but if EFI returns
224 		// pages above that there's nothing that can be done to fix it.
225 		if (range[i].start + range[i].size > (512ull * 1024 * 1024 * 1024))
226 			panic("Can't currently support more than 512GB of RAM!");
227 
228 		region->next = allocated_memory_regions;
229 		allocated_memory_regions = region;
230 		region->vaddr = 0;
231 		region->paddr = range[i].start;
232 		region->size = range[i].size;
233 		region->released = false;
234 
235 		// Clear out the allocated range
236 		range[i].start = 0;
237 		range[i].size = 0;
238 		gKernelArgs.num_physical_allocated_ranges--;
239 	}
240 }
241 
242 
243 extern "C" status_t
244 platform_bootloader_address_to_kernel_address(void *address,
245 	uint64_t *_result)
246 {
247 	TRACE("%s: called\n", __func__);
248 
249 	// Convert any physical ranges prior to looking up address
250 	convert_physical_ranges();
251 
252 	uint64_t addr = (uint64_t)address;
253 
254 	for (allocated_memory_region *region = allocated_memory_regions; region;
255 		region = region->next) {
256 		if (region->paddr <= addr && addr < region->paddr + region->size) {
257 			// Lazily allocate virtual memory.
258 			if (region->vaddr == 0) {
259 				region->vaddr = get_next_virtual_address(region->size);
260 			}
261 			*_result = region->vaddr + (addr - region->paddr);
262 			//dprintf("Converted bootloader address %p in region %#lx-%#lx to %#lx\n",
263 			//	address, region->paddr, region->paddr + region->size, *_result);
264 			return B_OK;
265 		}
266 	}
267 
268 	return B_ERROR;
269 }
270 
271 
272 extern "C" status_t
273 platform_kernel_address_to_bootloader_address(uint64_t address, void **_result)
274 {
275 	TRACE("%s: called\n", __func__);
276 
277 	for (allocated_memory_region *region = allocated_memory_regions; region; region = region->next) {
278 		if (region->vaddr != 0 && region->vaddr <= address && address < region->vaddr + region->size) {
279 			*_result = (void *)(region->paddr + (address - region->vaddr));
280 			//dprintf("Converted kernel address %#lx in region %#lx-%#lx to %p\n",
281 			//	address, region->vaddr, region->vaddr + region->size, *_result);
282 			return B_OK;
283 		}
284 	}
285 
286 	return B_ERROR;
287 }
288 
289 
290 extern "C" status_t
291 platform_free_region(void *address, size_t size)
292 {
293 	TRACE("%s: called to release region %p (%" B_PRIuSIZE ")\n", __func__,
294 		address, size);
295 
296 	allocated_memory_region *region = get_region(address, size);
297 	if (!region)
298 		panic("Unknown region??");
299 
300 	kBootServices->FreePages((efi_physical_addr)address, ROUNDUP(size, B_PAGE_SIZE) / B_PAGE_SIZE);
301 
302 	return B_OK;
303 }
304