xref: /haiku/src/system/boot/platform/efi/mmu.cpp (revision 4c8e85b316c35a9161f5a1c50ad70bc91c83a76f)
1 /*
2  * Copyright 2016-2020 Haiku, Inc. All rights reserved.
3  * Copyright 2014, Jessica Hamilton, jessica.l.hamilton@gmail.com.
4  * Copyright 2014, Henry Harrington, henry.harrington@gmail.com.
5  * Distributed under the terms of the MIT License.
6  */
7 
8 
9 #include <algorithm>
10 
11 #include <boot/addr_range.h>
12 #include <boot/platform.h>
13 #include <boot/stage2.h>
14 #include <kernel/kernel.h>
15 
16 #include "efi_platform.h"
17 #include "mmu.h"
18 
19 
20 //#define TRACE_MMU
21 #ifdef TRACE_MMU
22 #   define TRACE(x...) dprintf("efi/mmu: " x)
23 #else
24 #   define TRACE(x...) ;
25 #endif
26 
27 
28 struct memory_region {
29 	memory_region *next;
30 	addr_t vaddr;
31 	phys_addr_t paddr;
32 	size_t size;
33 
34 	void dprint(const char * msg) {
35  	  dprintf("%s memory_region v: %#" B_PRIxADDR " p: %#" B_PRIxPHYSADDR " size: %lu\n", msg, vaddr,
36 			paddr, size);
37 	}
38 
39 	bool matches(phys_addr_t expected_paddr, size_t expected_size) {
40 		return paddr == expected_paddr && size == expected_size;
41 	}
42 };
43 
44 
45 #if defined(KERNEL_LOAD_BASE_64_BIT)
46 static addr_t sNextVirtualAddress = KERNEL_LOAD_BASE_64_BIT + 32 * 1024 * 1024;
47 #elif defined(KERNEL_LOAD_BASE)
48 static addr_t sNextVirtualAddress = KERNEL_LOAD_BASE + 32 * 1024 * 1024;
49 #else
50 #error Unable to find kernel load base on this architecture!
51 #endif
52 
53 
54 static memory_region *allocated_regions = NULL;
55 
56 
57 extern "C" phys_addr_t
58 mmu_allocate_page()
59 {
60 	TRACE("%s: called\n", __func__);
61 
62 	efi_physical_addr addr;
63 	efi_status s = kBootServices->AllocatePages(AllocateAnyPages,
64 		EfiLoaderData, 1, &addr);
65 
66 	if (s != EFI_SUCCESS)
67 		panic("Unabled to allocate memory: %li", s);
68 
69 	return addr;
70 }
71 
72 
73 extern "C" addr_t
74 get_next_virtual_address(size_t size)
75 {
76 	TRACE("%s: called. size: %" B_PRIuSIZE "\n", __func__, size);
77 
78 	addr_t address = sNextVirtualAddress;
79 	sNextVirtualAddress += ROUNDUP(size, B_PAGE_SIZE);
80 	return address;
81 }
82 
83 
84 extern "C" addr_t
85 get_current_virtual_address()
86 {
87 	TRACE("%s: called\n", __func__);
88 	return sNextVirtualAddress;
89 }
90 
91 
92 // Platform allocator.
93 // The bootloader assumes that bootloader address space == kernel address space.
94 // This is not true until just before the kernel is booted, so an ugly hack is
95 // used to cover the difference. platform_allocate_region allocates addresses
96 // in bootloader space, but can convert them to kernel space. The ELF loader
97 // accesses kernel memory via Mao(), and much later in the boot process,
98 // addresses in the kernel argument struct are converted from bootloader
99 // addresses to kernel addresses.
100 
101 extern "C" status_t
102 platform_allocate_region(void **_address, size_t size, uint8 /* protection */,
103 	bool exactAddress)
104 {
105 	TRACE("%s: called\n", __func__);
106 
107 	// We don't have any control over the page tables, give up right away if an
108 	// exactAddress is wanted.
109 	if (exactAddress)
110 		return B_NO_MEMORY;
111 
112 	efi_physical_addr addr;
113 	size_t pages = ROUNDUP(size, B_PAGE_SIZE) / B_PAGE_SIZE;
114 	efi_status status = kBootServices->AllocatePages(AllocateAnyPages,
115 		EfiLoaderData, pages, &addr);
116 	if (status != EFI_SUCCESS)
117 		return B_NO_MEMORY;
118 
119 	// Addresses above 512GB not supported.
120 	// Memory map regions above 512GB can be ignored, but if EFI returns pages
121 	// above that there's nothing that can be done to fix it.
122 	if (addr + size > (512ull * 1024 * 1024 * 1024))
123 		panic("Can't currently support more than 512GB of RAM!");
124 
125 	memory_region *region = new(std::nothrow) memory_region {
126 		next: allocated_regions,
127 #ifdef __riscv
128 		// Disables allocation at fixed virtual address
129 		vaddr: 0,
130 #else
131 		vaddr: *_address == NULL ? 0 : (addr_t)*_address,
132 #endif
133 		paddr: (phys_addr_t)addr,
134 		size: size
135 	};
136 
137 	if (region == NULL) {
138 		kBootServices->FreePages(addr, pages);
139 		return B_NO_MEMORY;
140 	}
141 
142 #ifdef TRACE_MMU
143 	//region->dprint("Allocated");
144 #endif
145 	allocated_regions = region;
146 	*_address = (void *)region->paddr;
147 	return B_OK;
148 }
149 
150 
151 /*!
152 	Neither \a virtualAddress nor \a size need to be aligned, but the function
153 	will map all pages the range intersects with.
154 	If physicalAddress is not page-aligned, the returned virtual address will
155 	have the same "misalignment".
156 */
157 extern "C" addr_t
158 mmu_map_physical_memory(addr_t physicalAddress, size_t size, uint32 flags)
159 {
160 	TRACE("%s: called\n", __func__);
161 
162 	addr_t pageOffset = physicalAddress & (B_PAGE_SIZE - 1);
163 
164 	physicalAddress -= pageOffset;
165 	size += pageOffset;
166 
167 	if (insert_physical_allocated_range(physicalAddress,
168 			ROUNDUP(size, B_PAGE_SIZE)) != B_OK)
169 		return B_NO_MEMORY;
170 
171 	return physicalAddress + pageOffset;
172 }
173 
174 
175 static void
176 convert_physical_ranges()
177 {
178 	TRACE("%s: called\n", __func__);
179 
180 	addr_range *range = gKernelArgs.physical_allocated_range;
181 	uint32 num_ranges = gKernelArgs.num_physical_allocated_ranges;
182 
183 	for (uint32 i = 0; i < num_ranges; ++i) {
184 		// Addresses above 512GB not supported.
185 		// Memory map regions above 512GB can be ignored, but if EFI returns
186 		// pages above that there's nothing that can be done to fix it.
187 		if (range[i].start + range[i].size > (512ull * 1024 * 1024 * 1024))
188 			panic("Can't currently support more than 512GB of RAM!");
189 
190 		memory_region *region = new(std::nothrow) memory_region {
191 			next: allocated_regions,
192 			vaddr: 0,
193 			paddr: (phys_addr_t)range[i].start,
194 			size: (size_t)range[i].size
195 		};
196 
197 		if (!region)
198 			panic("Couldn't add allocated region");
199 
200 		allocated_regions = region;
201 
202 		// Clear out the allocated range
203 		range[i].start = 0;
204 		range[i].size = 0;
205 		gKernelArgs.num_physical_allocated_ranges--;
206 	}
207 }
208 
209 
210 extern "C" status_t
211 platform_bootloader_address_to_kernel_address(void *address, addr_t *_result)
212 {
213 	TRACE("%s: called\n", __func__);
214 
215 	// Convert any physical ranges prior to looking up address
216 	convert_physical_ranges();
217 
218 	phys_addr_t addr = (phys_addr_t)address;
219 
220 	for (memory_region *region = allocated_regions; region;
221 			region = region->next) {
222 		if (region->paddr <= addr && addr < region->paddr + region->size) {
223 			// Lazily allocate virtual memory.
224 			if (region->vaddr == 0) {
225 				region->vaddr = get_next_virtual_address(region->size);
226 			}
227 			*_result = region->vaddr + (addr - region->paddr);
228 			//dprintf("Converted bootloader address %p in region %#lx-%#lx to %#lx\n",
229 			//	address, region->paddr, region->paddr + region->size, *_result);
230 			return B_OK;
231 		}
232 	}
233 
234 	return B_ERROR;
235 }
236 
237 
238 extern "C" status_t
239 platform_kernel_address_to_bootloader_address(addr_t address, void **_result)
240 {
241 	TRACE("%s: called\n", __func__);
242 
243 	for (memory_region *region = allocated_regions; region;
244 			region = region->next) {
245 		if (region->vaddr != 0 && region->vaddr <= address
246 				&& address < region->vaddr + region->size) {
247 			*_result = (void *)(region->paddr + (address - region->vaddr));
248 			//dprintf("Converted kernel address %#lx in region %#lx-%#lx to %p\n",
249 			//	address, region->vaddr, region->vaddr + region->size, *_result);
250 			return B_OK;
251 		}
252 	}
253 
254 	return B_ERROR;
255 }
256 
257 
258 extern "C" status_t
259 platform_free_region(void *address, size_t size)
260 {
261 	TRACE("%s: called to release region %p (%" B_PRIuSIZE ")\n", __func__,
262 		address, size);
263 
264 	for (memory_region **ref = &allocated_regions; *ref;
265 			ref = &(*ref)->next) {
266 		if ((*ref)->matches((phys_addr_t)address, size)) {
267 			kBootServices->FreePages((efi_physical_addr)address,
268 				ROUNDUP(size, B_PAGE_SIZE) / B_PAGE_SIZE);
269 			memory_region* old = *ref;
270 			//pointer to current allocated_memory_region* now points to next
271 			*ref = (*ref)->next;
272 #ifdef TRACE_MMU
273 			old->dprint("Freeing");
274 #endif
275 			delete old;
276 			return B_OK;
277 		}
278 	}
279 	panic("platform_free_region: Unknown region to free??");
280 	return B_ERROR; // NOT Reached
281 }
282 
283 
284 bool
285 mmu_next_region(void** cookie, addr_t* vaddr, phys_addr_t* paddr, size_t* size)
286 {
287 	if (*cookie == NULL)
288 		*cookie = &allocated_regions;
289 	else
290 		*cookie = ((memory_region*)*cookie)->next;
291 
292 	memory_region* region = (memory_region*)*cookie;
293 	if (region == NULL)
294 		return false;
295 
296 	if (region->vaddr == 0)
297 		region->vaddr = get_next_virtual_address(region->size);
298 
299 	*vaddr = region->vaddr;
300 	*paddr = region->paddr;
301 	*size = region->size;
302 	return true;
303 }
304