1 /* 2 * Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de. 3 * Copyright 2004-2008, Axel Dörfler, axeld@pinc-software.de. 4 * Distributed under the terms of the MIT License. 5 */ 6 7 8 #include <OS.h> 9 10 #include <string.h> 11 12 #include <algorithm> 13 14 #include <kernel.h> 15 #include <boot/stage2.h> 16 #include <boot/platform.h> 17 18 19 static const size_t kChunkSize = 8 * B_PAGE_SIZE; 20 21 kernel_args gKernelArgs; 22 23 static void* sFirstFree; 24 static void* sLast; 25 static size_t sFree = kChunkSize; 26 27 28 template<typename RangeType> 29 static void 30 remove_range_index(RangeType* ranges, uint32& numRanges, uint32 index) 31 { 32 if (index + 1 == numRanges) { 33 // remove last range 34 numRanges--; 35 return; 36 } 37 38 memmove(&ranges[index], &ranges[index + 1], 39 sizeof(RangeType) * (numRanges - 1 - index)); 40 numRanges--; 41 } 42 43 44 template<typename RangeType, typename AddressType, typename SizeType> 45 static status_t 46 insert_range(RangeType* ranges, uint32* _numRanges, uint32 maxRanges, 47 AddressType start, SizeType size) 48 { 49 uint32 numRanges = *_numRanges; 50 51 start = ROUNDDOWN(start, B_PAGE_SIZE); 52 size = ROUNDUP(size, B_PAGE_SIZE); 53 AddressType end = start + size; 54 55 for (uint32 i = 0; i < numRanges; i++) { 56 AddressType rangeStart = ranges[i].start; 57 AddressType rangeEnd = rangeStart + ranges[i].size; 58 59 if (end < rangeStart || start > rangeEnd) { 60 // ranges don't intersect or touch each other 61 continue; 62 } 63 if (start >= rangeStart && end <= rangeEnd) { 64 // range is already completely covered 65 return B_OK; 66 } 67 68 if (start < rangeStart) { 69 // prepend to the existing range 70 ranges[i].start = start; 71 ranges[i].size += rangeStart - start; 72 } 73 if (end > ranges[i].start + ranges[i].size) { 74 // append to the existing range 75 ranges[i].size = end - ranges[i].start; 76 } 77 78 // join ranges if possible 79 80 for (uint32 j = 0; j < numRanges; j++) { 81 if (i == j) 82 continue; 83 84 rangeStart = ranges[i].start; 85 rangeEnd = rangeStart + ranges[i].size; 86 AddressType joinStart = ranges[j].start; 87 AddressType joinEnd = joinStart + ranges[j].size; 88 89 if (rangeStart <= joinEnd && joinEnd <= rangeEnd) { 90 // join range that used to be before the current one, or 91 // the one that's now entirely included by the current one 92 if (joinStart < rangeStart) { 93 ranges[i].size += rangeStart - joinStart; 94 ranges[i].start = joinStart; 95 } 96 97 remove_range_index(ranges, numRanges, j--); 98 } else if (joinStart <= rangeEnd && joinEnd > rangeEnd) { 99 // join range that used to be after the current one 100 ranges[i].size += joinEnd - rangeEnd; 101 102 remove_range_index(ranges, numRanges, j--); 103 } 104 } 105 106 *_numRanges = numRanges; 107 return B_OK; 108 } 109 110 // no range matched, we need to create a new one 111 112 if (numRanges >= maxRanges) 113 return B_ENTRY_NOT_FOUND; 114 115 ranges[numRanges].start = (AddressType)start; 116 ranges[numRanges].size = size; 117 (*_numRanges)++; 118 119 return B_OK; 120 } 121 122 123 template<typename RangeType, typename AddressType, typename SizeType> 124 static status_t 125 remove_range(RangeType* ranges, uint32* _numRanges, uint32 maxRanges, 126 AddressType start, SizeType size) 127 { 128 uint32 numRanges = *_numRanges; 129 130 AddressType end = ROUNDUP(start + size, B_PAGE_SIZE); 131 start = ROUNDDOWN(start, B_PAGE_SIZE); 132 133 for (uint32 i = 0; i < numRanges; i++) { 134 AddressType rangeStart = ranges[i].start; 135 AddressType rangeEnd = rangeStart + ranges[i].size; 136 137 if (start <= rangeStart) { 138 if (end <= rangeStart) { 139 // no intersection 140 } else if (end >= rangeEnd) { 141 // remove the complete range 142 remove_range_index(ranges, numRanges, i); 143 i--; 144 } else { 145 // remove the head of the range 146 ranges[i].start = end; 147 ranges[i].size = rangeEnd - end; 148 } 149 } else if (end >= rangeEnd) { 150 if (start < rangeEnd) { 151 // remove the tail 152 ranges[i].size = start - rangeStart; 153 } // else: no intersection 154 } else { 155 // rangeStart < start < end < rangeEnd 156 // The ugly case: We have to remove something from the middle of 157 // the range. We keep the head of the range and insert its tail 158 // as a new range. 159 ranges[i].size = start - rangeStart; 160 return insert_range<RangeType, AddressType, SizeType>(ranges, 161 _numRanges, maxRanges, end, rangeEnd - end); 162 } 163 } 164 165 *_numRanges = numRanges; 166 return B_OK; 167 } 168 169 170 template<typename RangeType, typename AddressType, typename SizeType> 171 static bool 172 get_free_range(RangeType* ranges, uint32 numRanges, AddressType base, 173 SizeType size, AddressType* _rangeBase) 174 { 175 AddressType end = base + size - 1; 176 if (end < base) 177 return false; 178 179 // Note: We don't assume that the ranges are sorted, so we can't do this 180 // in a simple loop. Instead we restart the loop whenever our range 181 // intersects with an existing one. 182 183 for (uint32 i = 0; i < numRanges;) { 184 AddressType rangeStart = ranges[i].start; 185 AddressType rangeEnd = ranges[i].start + ranges[i].size - 1; 186 187 if (base <= rangeEnd && rangeStart <= end) { 188 base = rangeEnd + 1; 189 end = rangeEnd + size; 190 if (base == 0 || end < base) 191 return false; 192 193 i = 0; 194 continue; 195 } 196 197 i++; 198 } 199 200 *_rangeBase = base; 201 return true; 202 } 203 204 205 template<typename RangeType, typename AddressType, typename SizeType> 206 static bool 207 is_range_covered(RangeType* ranges, uint32 numRanges, AddressType base, 208 SizeType size) 209 { 210 // Note: We don't assume that the ranges are sorted, so we can't do this 211 // in a simple loop. Instead we restart the loop whenever the start of the 212 // given range intersects with an existing one. 213 214 for (uint32 i = 0; i < numRanges;) { 215 AddressType rangeStart = ranges[i].start; 216 AddressType rangeSize = ranges[i].size; 217 218 if (rangeStart <= base && rangeSize > base - rangeStart) { 219 SizeType intersect = std::min(rangeStart + rangeSize - base, size); 220 base += intersect; 221 size -= intersect; 222 if (size == 0) 223 return true; 224 225 i = 0; 226 continue; 227 } 228 229 i++; 230 } 231 232 return false; 233 } 234 235 236 template<typename RangeType> 237 static void 238 sort_ranges(RangeType* ranges, uint32 count) 239 { 240 // TODO: This is a pretty sucky bubble sort implementation! 241 bool done; 242 243 do { 244 done = true; 245 for (uint32 i = 1; i < count; i++) { 246 if (ranges[i].start < ranges[i - 1].start) { 247 done = false; 248 RangeType tempRange; 249 memcpy(&tempRange, &ranges[i], sizeof(RangeType)); 250 memcpy(&ranges[i], &ranges[i - 1], sizeof(RangeType)); 251 memcpy(&ranges[i - 1], &tempRange, sizeof(RangeType)); 252 } 253 } 254 } while (!done); 255 } 256 257 258 // #pragma mark - 259 260 261 static status_t 262 add_kernel_args_range(void* start, size_t size) 263 { 264 return insert_address_range(gKernelArgs.kernel_args_range, 265 &gKernelArgs.num_kernel_args_ranges, MAX_KERNEL_ARGS_RANGE, 266 (addr_t)start, size); 267 } 268 269 270 // #pragma mark - addr_range utility functions 271 272 273 /*! Inserts the specified (start, size) pair (aka range) in the 274 addr_range array. 275 It will extend existing ranges in order to have as little 276 ranges in the array as possible. 277 Returns B_OK on success, or B_ENTRY_NOT_FOUND if there was 278 no free array entry available anymore. 279 */ 280 extern "C" status_t 281 insert_address_range(addr_range* ranges, uint32* _numRanges, uint32 maxRanges, 282 addr_t start, size_t size) 283 { 284 return insert_range<addr_range, addr_t, size_t>(ranges, _numRanges, 285 maxRanges, start, size); 286 } 287 288 289 extern "C" status_t 290 remove_address_range(addr_range* ranges, uint32* _numRanges, uint32 maxRanges, 291 addr_t start, size_t size) 292 { 293 return remove_range<addr_range, addr_t, size_t>(ranges, _numRanges, 294 maxRanges, start, size); 295 } 296 297 298 bool 299 get_free_address_range(addr_range* ranges, uint32 numRanges, addr_t base, 300 size_t size, addr_t* _rangeBase) 301 { 302 return get_free_range<addr_range, addr_t, size_t>(ranges, numRanges, base, 303 size, _rangeBase); 304 } 305 306 307 bool 308 is_address_range_covered(addr_range* ranges, uint32 numRanges, addr_t base, 309 size_t size) 310 { 311 return is_range_covered<addr_range, addr_t, size_t>(ranges, numRanges, base, 312 size); 313 } 314 315 316 void 317 sort_address_ranges(addr_range* ranges, uint32 numRanges) 318 { 319 sort_ranges(ranges, numRanges); 320 } 321 322 323 // #pragma mark - phys_addr_range utility functions 324 325 326 status_t 327 insert_physical_address_range(phys_addr_range* ranges, uint32* _numRanges, 328 uint32 maxRanges, phys_addr_t start, phys_size_t size) 329 { 330 return insert_range<phys_addr_range, phys_addr_t, phys_size_t>(ranges, 331 _numRanges, maxRanges, start, size); 332 } 333 334 335 status_t 336 remove_physical_address_range(phys_addr_range* ranges, uint32* _numRanges, 337 uint32 maxRanges, phys_addr_t start, phys_size_t size) 338 { 339 return remove_range<phys_addr_range, phys_addr_t, phys_size_t>(ranges, 340 _numRanges, maxRanges, start, size); 341 } 342 343 344 bool 345 get_free_physical_address_range(phys_addr_range* ranges, uint32 numRanges, 346 phys_addr_t base, phys_size_t size, phys_addr_t* _rangeBase) 347 { 348 return get_free_range<phys_addr_range, phys_addr_t, phys_size_t>(ranges, 349 numRanges, base, size, _rangeBase); 350 } 351 352 353 bool 354 is_physical_address_range_covered(phys_addr_range* ranges, uint32 numRanges, 355 phys_addr_t base, phys_size_t size) 356 { 357 return is_range_covered<phys_addr_range, phys_addr_t, phys_size_t>(ranges, 358 numRanges, base, size); 359 } 360 361 362 void 363 sort_physical_address_ranges(phys_addr_range* ranges, uint32 numRanges) 364 { 365 sort_ranges(ranges, numRanges); 366 } 367 368 369 // #pragma mark - kernel args range functions 370 371 372 status_t 373 insert_physical_memory_range(phys_addr_t start, phys_size_t size) 374 { 375 return insert_physical_address_range(gKernelArgs.physical_memory_range, 376 &gKernelArgs.num_physical_memory_ranges, MAX_PHYSICAL_MEMORY_RANGE, 377 start, size); 378 } 379 380 381 status_t 382 insert_physical_allocated_range(phys_addr_t start, phys_size_t size) 383 { 384 return insert_physical_address_range(gKernelArgs.physical_allocated_range, 385 &gKernelArgs.num_physical_allocated_ranges, 386 MAX_PHYSICAL_ALLOCATED_RANGE, start, size); 387 } 388 389 390 status_t 391 insert_virtual_allocated_range(addr_t start, size_t size) 392 { 393 return insert_address_range(gKernelArgs.virtual_allocated_range, 394 &gKernelArgs.num_virtual_allocated_ranges, MAX_VIRTUAL_ALLOCATED_RANGE, 395 start, size); 396 } 397 398 399 #if B_HAIKU_PHYSICAL_BITS > 32 400 401 void 402 ignore_physical_memory_ranges_beyond_4gb() 403 { 404 // sort 405 sort_physical_address_ranges(gKernelArgs.physical_memory_range, 406 gKernelArgs.num_physical_memory_ranges); 407 408 static const phys_addr_t kLimit = (phys_addr_t)1 << 32; 409 410 // remove everything past 4 GB 411 for (uint32 i = gKernelArgs.num_physical_memory_ranges; i > 0; i--) { 412 phys_addr_range& range = gKernelArgs.physical_memory_range[i - 1]; 413 if (range.start >= kLimit) { 414 // the complete range is beyond the limit 415 dprintf("ignore_physical_memory_ranges_beyond_4gb(): ignoring " 416 "range: %#" B_PRIxPHYSADDR " - %#" B_PRIxPHYSADDR "\n", 417 range.start, range.start + range.size); 418 gKernelArgs.ignored_physical_memory += range.size; 419 gKernelArgs.num_physical_memory_ranges = i - 1; 420 continue; 421 } 422 423 if (kLimit - range.start < range.size) { 424 // the range is partially beyond the limit 425 dprintf("ignore_physical_memory_ranges_beyond_4gb(): ignoring " 426 "range: %#" B_PRIxPHYSADDR " - %#" B_PRIxPHYSADDR "\n", kLimit, 427 range.start + range.size); 428 gKernelArgs.ignored_physical_memory 429 += range.size - (kLimit - range.start); 430 } 431 432 break; 433 } 434 } 435 436 #endif // B_HAIKU_PHYSICAL_BITS > 32 437 438 439 // #pragma mark - kernel_args allocations 440 441 442 /*! This function can be used to allocate memory that is going 443 to be passed over to the kernel. For example, the preloaded_image 444 structures are allocated this way. 445 The boot loader heap doesn't make it into the kernel! 446 */ 447 extern "C" void* 448 kernel_args_malloc(size_t size) 449 { 450 //dprintf("kernel_args_malloc(): %ld bytes (%ld bytes left)\n", size, sFree); 451 452 if (sFirstFree != NULL && size <= sFree) { 453 // there is enough space in the current buffer 454 void* address = sFirstFree; 455 sFirstFree = (void*)((addr_t)sFirstFree + size); 456 sLast = address; 457 sFree -= size; 458 459 return address; 460 } 461 462 if (size > kChunkSize / 2 && sFree < size) { 463 // the block is so large, we'll allocate a new block for it 464 void* block = NULL; 465 if (platform_allocate_region(&block, size, B_READ_AREA | B_WRITE_AREA, 466 false) != B_OK) { 467 return NULL; 468 } 469 470 if (add_kernel_args_range(block, size) != B_OK) 471 panic("kernel_args max range to low!\n"); 472 return block; 473 } 474 475 // just allocate a new block and "close" the old one 476 void* block = NULL; 477 if (platform_allocate_region(&block, kChunkSize, B_READ_AREA | B_WRITE_AREA, 478 false) != B_OK) { 479 return NULL; 480 } 481 482 sFirstFree = (void*)((addr_t)block + size); 483 sLast = block; 484 sFree = kChunkSize - size; 485 if (add_kernel_args_range(block, kChunkSize) != B_OK) 486 panic("kernel_args max range to low!\n"); 487 488 return block; 489 } 490 491 492 /*! Convenience function that copies strdup() functions for the 493 kernel args heap. 494 */ 495 extern "C" char* 496 kernel_args_strdup(const char* string) 497 { 498 if (string == NULL || string[0] == '\0') 499 return NULL; 500 501 size_t length = strlen(string) + 1; 502 503 char* target = (char*)kernel_args_malloc(length); 504 if (target == NULL) 505 return NULL; 506 507 memcpy(target, string, length); 508 return target; 509 } 510 511 512 /*! This function frees a block allocated via kernel_args_malloc(). 513 It's very simple; it can only free the last allocation. It's 514 enough for its current usage in the boot loader, though. 515 */ 516 extern "C" void 517 kernel_args_free(void* block) 518 { 519 if (sLast != block) { 520 // sorry, we're dumb 521 return; 522 } 523 524 sFree = (addr_t)sFirstFree - (addr_t)sLast; 525 sFirstFree = block; 526 sLast = NULL; 527 } 528 529