1 /* 2 * Copyright 2016 Haiku, Inc. All rights reserved. 3 * Copyright 2014, Jessica Hamilton, jessica.l.hamilton@gmail.com. 4 * Copyright 2014, Henry Harrington, henry.harrington@gmail.com. 5 * Distributed under the terms of the MIT License. 6 */ 7 8 9 #include <algorithm> 10 11 #include <boot/addr_range.h> 12 #include <boot/platform.h> 13 #include <boot/stage2.h> 14 #include <kernel/kernel.h> 15 16 #include "efi_platform.h" 17 #include "mmu.h" 18 19 20 struct allocated_memory_region { 21 allocated_memory_region *next; 22 uint64_t vaddr; 23 uint64_t paddr; 24 size_t size; 25 bool released; 26 }; 27 28 29 #if defined(KERNEL_LOAD_BASE_64_BIT) 30 static addr_t sNextVirtualAddress = KERNEL_LOAD_BASE_64_BIT + 32 * 1024 * 1024; 31 #elif defined(KERNEL_LOAD_BASE) 32 static addr_t sNextVirtualAddress = KERNEL_LOAD_BASE + 32 * 1024 * 1024; 33 #else 34 #error Unable to find kernel load base on this architecture! 35 #endif 36 37 38 static allocated_memory_region *allocated_memory_regions = NULL; 39 40 41 extern "C" uint64_t 42 mmu_allocate_page() 43 { 44 efi_physical_addr addr; 45 efi_status s = kBootServices->AllocatePages(AllocateAnyPages, EfiLoaderData, 1, &addr); 46 if (s != EFI_SUCCESS) 47 panic("Unabled to allocate memory: %li", s); 48 49 return addr; 50 } 51 52 53 extern "C" addr_t 54 get_next_virtual_address(size_t size) 55 { 56 addr_t address = sNextVirtualAddress; 57 sNextVirtualAddress += ROUNDUP(size, B_PAGE_SIZE); 58 return address; 59 } 60 61 62 extern "C" addr_t 63 get_current_virtual_address() 64 { 65 return sNextVirtualAddress; 66 } 67 68 69 // Platform allocator. 70 // The bootloader assumes that bootloader address space == kernel address space. 71 // This is not true until just before the kernel is booted, so an ugly hack is 72 // used to cover the difference. platform_allocate_region allocates addresses 73 // in bootloader space, but can convert them to kernel space. The ELF loader 74 // accesses kernel memory via Mao(), and much later in the boot process, 75 // addresses in the kernel argument struct are converted from bootloader 76 // addresses to kernel addresses. 77 78 extern "C" status_t 79 platform_allocate_region(void **_address, size_t size, uint8 /* protection */, bool exactAddress) 80 { 81 // We don't have any control over the page tables, give up right away if an 82 // exactAddress is wanted. 83 if (exactAddress) 84 return B_NO_MEMORY; 85 86 efi_physical_addr addr; 87 size_t aligned_size = ROUNDUP(size, B_PAGE_SIZE); 88 allocated_memory_region *region = new(std::nothrow) allocated_memory_region; 89 90 if (region == NULL) 91 return B_NO_MEMORY; 92 93 efi_status status = kBootServices->AllocatePages(AllocateAnyPages, 94 EfiLoaderData, aligned_size / B_PAGE_SIZE, &addr); 95 if (status != EFI_SUCCESS) { 96 delete region; 97 return B_NO_MEMORY; 98 } 99 100 // Addresses above 512GB not supported. 101 // Memory map regions above 512GB can be ignored, but if EFI returns pages 102 // above that there's nothing that can be done to fix it. 103 if (addr + size > (512ull * 1024 * 1024 * 1024)) 104 panic("Can't currently support more than 512GB of RAM!"); 105 106 region->next = allocated_memory_regions; 107 allocated_memory_regions = region; 108 region->vaddr = 0; 109 region->paddr = addr; 110 region->size = size; 111 region->released = false; 112 113 if (*_address != NULL) { 114 region->vaddr = (uint64_t)*_address; 115 } 116 117 //dprintf("Allocated region %#lx (requested %p) %#lx %lu\n", region->vaddr, *_address, region->paddr, region->size); 118 119 *_address = (void *)region->paddr; 120 121 return B_OK; 122 } 123 124 125 /*! 126 Neither \a virtualAddress nor \a size need to be aligned, but the function 127 will map all pages the range intersects with. 128 If physicalAddress is not page-aligned, the returned virtual address will 129 have the same "misalignment". 130 */ 131 extern "C" addr_t 132 mmu_map_physical_memory(addr_t physicalAddress, size_t size, uint32 flags) 133 { 134 addr_t pageOffset = physicalAddress & (B_PAGE_SIZE - 1); 135 136 physicalAddress -= pageOffset; 137 size += pageOffset; 138 139 if (insert_physical_allocated_range(physicalAddress, ROUNDUP(size, B_PAGE_SIZE)) != B_OK) 140 return B_NO_MEMORY; 141 142 return physicalAddress + pageOffset; 143 } 144 145 146 extern "C" void 147 mmu_free(void *virtualAddress, size_t size) 148 { 149 addr_t physicalAddress = (addr_t)virtualAddress; 150 addr_t pageOffset = physicalAddress & (B_PAGE_SIZE - 1); 151 152 physicalAddress -= pageOffset; 153 size += pageOffset; 154 155 size_t aligned_size = ROUNDUP(size, B_PAGE_SIZE); 156 157 for (allocated_memory_region *region = allocated_memory_regions; region; region = region->next) { 158 if (region->paddr == physicalAddress && region->size == aligned_size) { 159 region->released = true; 160 return; 161 } 162 } 163 } 164 165 166 static allocated_memory_region * 167 get_region(void *address, size_t size) 168 { 169 for (allocated_memory_region *region = allocated_memory_regions; region; region = region->next) { 170 if (region->paddr == (uint64_t)address && region->size == size) { 171 return region; 172 } 173 } 174 return 0; 175 } 176 177 178 static void 179 convert_physical_ranges() { 180 addr_range *range = gKernelArgs.physical_allocated_range; 181 uint32 num_ranges = gKernelArgs.num_physical_allocated_ranges; 182 183 for (uint32 i = 0; i < num_ranges; ++i) { 184 allocated_memory_region *region = new(std::nothrow) allocated_memory_region; 185 186 if (!region) 187 panic("Couldn't add allocated region"); 188 189 // Addresses above 512GB not supported. 190 // Memory map regions above 512GB can be ignored, but if EFI returns pages above 191 // that there's nothing that can be done to fix it. 192 if (range[i].start + range[i].size > (512ull * 1024 * 1024 * 1024)) 193 panic("Can't currently support more than 512GB of RAM!"); 194 195 region->next = allocated_memory_regions; 196 allocated_memory_regions = region; 197 region->vaddr = 0; 198 region->paddr = range[i].start; 199 region->size = range[i].size; 200 region->released = false; 201 202 // Clear out the allocated range 203 range[i].start = 0; 204 range[i].size = 0; 205 gKernelArgs.num_physical_allocated_ranges--; 206 } 207 } 208 209 210 extern "C" status_t 211 platform_bootloader_address_to_kernel_address(void *address, uint64_t *_result) 212 { 213 // Convert any physical ranges prior to looking up address 214 convert_physical_ranges(); 215 216 uint64_t addr = (uint64_t)address; 217 218 for (allocated_memory_region *region = allocated_memory_regions; region; region = region->next) { 219 if (region->paddr <= addr && addr < region->paddr + region->size) { 220 // Lazily allocate virtual memory. 221 if (region->vaddr == 0) { 222 region->vaddr = get_next_virtual_address(region->size); 223 } 224 *_result = region->vaddr + (addr - region->paddr); 225 //dprintf("Converted bootloader address %p in region %#lx-%#lx to %#lx\n", 226 // address, region->paddr, region->paddr + region->size, *_result); 227 return B_OK; 228 } 229 } 230 231 return B_ERROR; 232 } 233 234 235 extern "C" status_t 236 platform_kernel_address_to_bootloader_address(uint64_t address, void **_result) 237 { 238 for (allocated_memory_region *region = allocated_memory_regions; region; region = region->next) { 239 if (region->vaddr != 0 && region->vaddr <= address && address < region->vaddr + region->size) { 240 *_result = (void *)(region->paddr + (address - region->vaddr)); 241 //dprintf("Converted kernel address %#lx in region %#lx-%#lx to %p\n", 242 // address, region->vaddr, region->vaddr + region->size, *_result); 243 return B_OK; 244 } 245 } 246 247 return B_ERROR; 248 } 249 250 251 extern "C" status_t 252 platform_free_region(void *address, size_t size) 253 { 254 //dprintf("Release region %p %lu\n", address, size); 255 allocated_memory_region *region = get_region(address, size); 256 if (!region) 257 panic("Unknown region??"); 258 259 kBootServices->FreePages((efi_physical_addr)address, ROUNDUP(size, B_PAGE_SIZE) / B_PAGE_SIZE); 260 261 return B_OK; 262 } 263