1 /* 2 * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de. 3 * Copyright 2003-2011, Axel Dörfler, axeld@pinc-software.de. 4 * Distributed under the terms of the MIT License. 5 * 6 * Copyright 2002, Manuel J. Petit. All rights reserved. 7 * Copyright 2001, Travis Geiselbrecht. All rights reserved. 8 * Distributed under the terms of the NewOS License. 9 */ 10 11 #include "images.h" 12 13 #include <stdio.h> 14 #include <stdlib.h> 15 #include <string.h> 16 17 #include <algorithm> 18 19 #include <syscalls.h> 20 #include <image_defs.h> 21 #include <vm_defs.h> 22 23 #include "add_ons.h" 24 #include "elf_tls.h" 25 #include "runtime_loader_private.h" 26 27 #include <util/kernel_cpp.h> 28 29 30 // keep in sync with app ldscript 31 #ifdef __x86_64__ 32 // runtime_loader potentially occupies 0x200000 - 0x600000 due to large 33 // page segment alignment. 34 # define RLD_PROGRAM_BASE 0x600000 35 # define MAX_PAGE_SIZE 0x200000 36 #elif __arm__ 37 # define RLD_PROGRAM_BASE 0x200000 38 # define MAX_PAGE_SIZE 0x10000 39 #else 40 # define RLD_PROGRAM_BASE 0x200000 41 # define MAX_PAGE_SIZE B_PAGE_SIZE 42 #endif 43 44 45 bool gInvalidImageIDs; 46 47 static image_queue_t sLoadedImages = {0, 0}; 48 static image_queue_t sDisposableImages = {0, 0}; 49 static uint32 sLoadedImageCount = 0; 50 51 52 //! Remaps the image ID of \a image after fork. 53 static status_t 54 update_image_id(image_t* image) 55 { 56 int32 cookie = 0; 57 image_info info; 58 while (_kern_get_next_image_info(B_CURRENT_TEAM, &cookie, &info, 59 sizeof(image_info)) == B_OK) { 60 for (uint32 i = 0; i < image->num_regions; i++) { 61 if (image->regions[i].vmstart == (addr_t)info.text) { 62 image->id = info.id; 63 return B_OK; 64 } 65 } 66 } 67 68 FATAL("Could not update image ID %" B_PRId32 " after fork()!\n", image->id); 69 return B_ENTRY_NOT_FOUND; 70 } 71 72 73 static void 74 enqueue_image(image_queue_t* queue, image_t* image) 75 { 76 image->next = NULL; 77 78 image->prev = queue->tail; 79 if (queue->tail) 80 queue->tail->next = image; 81 82 queue->tail = image; 83 if (!queue->head) 84 queue->head = image; 85 } 86 87 88 static void 89 dequeue_image(image_queue_t* queue, image_t* image) 90 { 91 if (image->next) 92 image->next->prev = image->prev; 93 else 94 queue->tail = image->prev; 95 96 if (image->prev) 97 image->prev->next = image->next; 98 else 99 queue->head = image->next; 100 101 image->prev = NULL; 102 image->next = NULL; 103 } 104 105 106 static image_t* 107 find_image_in_queue(image_queue_t* queue, const char* name, bool isPath, 108 uint32 typeMask) 109 { 110 for (image_t* image = queue->head; image; image = image->next) { 111 const char* imageName = isPath ? image->path : image->name; 112 int length = isPath ? sizeof(image->path) : sizeof(image->name); 113 114 if (!strncmp(imageName, name, length) 115 && (typeMask & IMAGE_TYPE_TO_MASK(image->type)) != 0) { 116 return image; 117 } 118 } 119 120 return NULL; 121 } 122 123 124 static void 125 update_image_flags_recursively(image_t* image, uint32 flagsToSet, 126 uint32 flagsToClear) 127 { 128 image_t* queue[sLoadedImageCount]; 129 uint32 count = 0; 130 uint32 index = 0; 131 queue[count++] = image; 132 image->flags |= RFLAG_VISITED; 133 134 while (index < count) { 135 // pop next image 136 image = queue[index++]; 137 138 // push dependencies 139 for (uint32 i = 0; i < image->num_needed; i++) { 140 image_t* needed = image->needed[i]; 141 if ((needed->flags & RFLAG_VISITED) == 0) { 142 queue[count++] = needed; 143 needed->flags |= RFLAG_VISITED; 144 } 145 } 146 } 147 148 // update flags 149 for (uint32 i = 0; i < count; i++) { 150 queue[i]->flags = (queue[i]->flags | flagsToSet) 151 & ~(flagsToClear | RFLAG_VISITED); 152 } 153 } 154 155 156 static uint32 157 topological_sort(image_t* image, uint32 slot, image_t** initList, 158 uint32 sortFlag) 159 { 160 if (image->flags & sortFlag) 161 return slot; 162 163 image->flags |= sortFlag; /* make sure we don't visit this one */ 164 for (uint32 i = 0; i < image->num_needed; i++) 165 slot = topological_sort(image->needed[i], slot, initList, sortFlag); 166 167 initList[slot] = image; 168 return slot + 1; 169 } 170 171 172 /*! Finds the load address and address specifier of the given image region. 173 */ 174 static void 175 get_image_region_load_address(image_t* image, uint32 index, long lastDelta, 176 bool fixed, addr_t& loadAddress, uint32& addressSpecifier) 177 { 178 if (!fixed) { 179 // relocatable image... we can afford to place wherever 180 if (index == 0) { 181 // but only the first segment gets a free ride 182 loadAddress = RLD_PROGRAM_BASE; 183 addressSpecifier = B_RANDOMIZED_BASE_ADDRESS; 184 } else { 185 loadAddress = image->regions[index].vmstart + lastDelta; 186 addressSpecifier = B_EXACT_ADDRESS; 187 } 188 } else { 189 // not relocatable, put it where it asks or die trying 190 loadAddress = image->regions[index].vmstart; 191 addressSpecifier = B_EXACT_ADDRESS; 192 } 193 } 194 195 196 // #pragma mark - 197 198 199 image_t* 200 create_image(const char* name, const char* path, int regionCount) 201 { 202 size_t allocSize = sizeof(image_t) 203 + (regionCount - 1) * sizeof(elf_region_t); 204 205 image_t* image = (image_t*)malloc(allocSize); 206 if (image == NULL) { 207 FATAL("no memory for image %s\n", path); 208 return NULL; 209 } 210 211 memset(image, 0, allocSize); 212 213 strlcpy(image->path, path, sizeof(image->path)); 214 215 // Make the last component of the supplied name the image name. 216 // If present, DT_SONAME will replace this name. 217 const char* lastSlash = strrchr(name, '/'); 218 if (lastSlash != NULL) 219 strlcpy(image->name, lastSlash + 1, sizeof(image->name)); 220 else 221 strlcpy(image->name, name, sizeof(image->name)); 222 223 image->ref_count = 1; 224 image->num_regions = regionCount; 225 226 return image; 227 } 228 229 230 void 231 delete_image_struct(image_t* image) 232 { 233 #ifdef DEBUG 234 size_t size = sizeof(image_t) 235 + (image->num_regions - 1) * sizeof(elf_region_t); 236 memset(image->needed, 0xa5, sizeof(image->needed[0]) * image->num_needed); 237 #endif 238 free(image->needed); 239 free(image->versions); 240 241 while (RuntimeLoaderSymbolPatcher* patcher 242 = image->defined_symbol_patchers) { 243 image->defined_symbol_patchers = patcher->next; 244 delete patcher; 245 } 246 while (RuntimeLoaderSymbolPatcher* patcher 247 = image->undefined_symbol_patchers) { 248 image->undefined_symbol_patchers = patcher->next; 249 delete patcher; 250 } 251 252 #ifdef DEBUG 253 // overwrite images to make sure they aren't accidently reused anywhere 254 memset(image, 0xa5, size); 255 #endif 256 free(image); 257 } 258 259 260 void 261 delete_image(image_t* image) 262 { 263 if (image == NULL) 264 return; 265 266 _kern_unregister_image(image->id); 267 // registered in load_container() 268 269 delete_image_struct(image); 270 } 271 272 273 void 274 put_image(image_t* image) 275 { 276 // If all references to the image are gone, add it to the disposable list 277 // and remove all dependencies 278 279 if (atomic_add(&image->ref_count, -1) == 1) { 280 dequeue_image(&sLoadedImages, image); 281 enqueue_image(&sDisposableImages, image); 282 sLoadedImageCount--; 283 284 // If the image wasn't fully loaded, its NEEDED may be incomplete. 285 if (image->needed == NULL) 286 image->num_needed = 0; 287 for (size_t i = 0; i < image->num_needed; i++) { 288 if (image->needed[i] == NULL) 289 continue; 290 put_image(image->needed[i]); 291 } 292 } 293 } 294 295 296 status_t 297 map_image(int fd, char const* path, image_t* image, bool fixed) 298 { 299 // cut the file name from the path as base name for the created areas 300 const char* baseName = strrchr(path, '/'); 301 if (baseName != NULL) 302 baseName++; 303 else 304 baseName = path; 305 306 // determine how much space we need for all loaded segments 307 308 addr_t reservedAddress = 0; 309 addr_t loadAddress; 310 size_t reservedSize = 0; 311 size_t length = 0; 312 uint32 addressSpecifier = B_RANDOMIZED_ANY_ADDRESS; 313 314 for (uint32 i = 0; i < image->num_regions; i++) { 315 // for BeOS compatibility: if we load an old BeOS executable, we 316 // have to relocate it, if possible - we recognize it because the 317 // vmstart is set to 0 (hopefully always) 318 if (fixed && image->regions[i].vmstart == 0) 319 fixed = false; 320 321 uint32 regionAddressSpecifier; 322 get_image_region_load_address(image, i, 323 i > 0 ? loadAddress - image->regions[i - 1].vmstart : 0, 324 fixed, loadAddress, regionAddressSpecifier); 325 if (i == 0) { 326 reservedAddress = loadAddress; 327 addressSpecifier = regionAddressSpecifier; 328 } 329 330 length += TO_PAGE_SIZE(image->regions[i].vmsize 331 + (loadAddress % B_PAGE_SIZE)); 332 333 size_t size = TO_PAGE_SIZE(loadAddress + image->regions[i].vmsize) 334 - reservedAddress; 335 if (size > reservedSize) 336 reservedSize = size; 337 } 338 339 // Check whether the segments have an unreasonable amount of unused space 340 // inbetween. 341 if (reservedSize > length + MAX_PAGE_SIZE * 2) 342 return B_BAD_DATA; 343 344 // reserve that space and allocate the areas from that one 345 if (_kern_reserve_address_range(&reservedAddress, addressSpecifier, 346 reservedSize) != B_OK) 347 return B_NO_MEMORY; 348 349 for (uint32 i = 0; i < image->num_regions; i++) { 350 char regionName[B_OS_NAME_LENGTH]; 351 352 snprintf(regionName, sizeof(regionName), "%s_seg%" B_PRIu32 "%s", 353 baseName, i, (image->regions[i].flags & RFLAG_RW) ? "rw" : "ro"); 354 355 get_image_region_load_address(image, i, 356 i > 0 ? image->regions[i - 1].delta : 0, fixed, loadAddress, 357 addressSpecifier); 358 359 // If the image position is arbitrary, we must let it point to the start 360 // of the reserved address range. 361 if (addressSpecifier != B_EXACT_ADDRESS) 362 loadAddress = reservedAddress; 363 364 if ((image->regions[i].flags & RFLAG_ANON) != 0) { 365 image->regions[i].id = _kern_create_area(regionName, 366 (void**)&loadAddress, B_EXACT_ADDRESS, 367 image->regions[i].vmsize, B_NO_LOCK, 368 B_READ_AREA | B_WRITE_AREA); 369 370 if (image->regions[i].id < 0) { 371 _kern_unreserve_address_range(reservedAddress, reservedSize); 372 return image->regions[i].id; 373 } 374 } else { 375 // Map all segments r/w first -- write access might be needed for 376 // relocations. When we've done with those we change the protection 377 // of read-only segments back to read-only. We map those segments 378 // over-committing, since quite likely only a relatively small 379 // number of pages needs to be touched and we want to avoid a lot 380 // of memory to be committed for them temporarily, just because we 381 // have to write map them. 382 uint32 protection = B_READ_AREA | B_WRITE_AREA 383 | ((image->regions[i].flags & RFLAG_RW) != 0 384 ? 0 : B_OVERCOMMITTING_AREA); 385 image->regions[i].id = _kern_map_file(regionName, 386 (void**)&loadAddress, B_EXACT_ADDRESS, 387 image->regions[i].vmsize, protection, REGION_PRIVATE_MAP, false, 388 fd, PAGE_BASE(image->regions[i].fdstart)); 389 390 if (image->regions[i].id < 0) { 391 _kern_unreserve_address_range(reservedAddress, reservedSize); 392 return image->regions[i].id; 393 } 394 395 TRACE(("\"%s\" at %p, 0x%lx bytes (%s)\n", path, 396 (void *)loadAddress, image->regions[i].vmsize, 397 image->regions[i].flags & RFLAG_RW ? "rw" : "read-only")); 398 399 // handle trailer bits in data segment 400 if (image->regions[i].flags & RFLAG_RW) { 401 addr_t startClearing = loadAddress 402 + PAGE_OFFSET(image->regions[i].start) 403 + image->regions[i].size; 404 addr_t toClear = image->regions[i].vmsize 405 - PAGE_OFFSET(image->regions[i].start) 406 - image->regions[i].size; 407 408 TRACE(("cleared 0x%lx and the following 0x%lx bytes\n", 409 startClearing, toClear)); 410 memset((void *)startClearing, 0, toClear); 411 } 412 } 413 414 image->regions[i].delta = loadAddress - image->regions[i].vmstart; 415 image->regions[i].vmstart = loadAddress; 416 if (i == 0) { 417 TLSBlockTemplates::Get().SetBaseAddress(image->dso_tls_id, 418 loadAddress); 419 } 420 } 421 422 if (image->dynamic_ptr != 0) 423 image->dynamic_ptr += image->regions[0].delta; 424 425 return B_OK; 426 } 427 428 429 void 430 unmap_image(image_t* image) 431 { 432 for (uint32 i = 0; i < image->num_regions; i++) { 433 _kern_delete_area(image->regions[i].id); 434 435 image->regions[i].id = -1; 436 } 437 } 438 439 440 /*! This function will change the protection of all read-only segments to really 441 be read-only (and executable). 442 The areas have to be read/write first, so that they can be relocated. 443 If at least one image is in compatibility mode then we allow execution of 444 all areas. 445 */ 446 void 447 remap_images() 448 { 449 for (image_t* image = sLoadedImages.head; image != NULL; 450 image = image->next) { 451 for (uint32 i = 0; i < image->num_regions; i++) { 452 // we only need to do this once, so we remember those we've already 453 // mapped 454 if ((image->regions[i].flags & RFLAG_REMAPPED) != 0) 455 continue; 456 457 status_t result = B_OK; 458 if ((image->regions[i].flags & RFLAG_RW) == 0) { 459 result = _kern_set_area_protection(image->regions[i].id, 460 B_READ_AREA | B_EXECUTE_AREA); 461 } else if (image->abi < B_HAIKU_ABI_GCC_2_HAIKU) { 462 result = _kern_set_area_protection(image->regions[i].id, 463 B_READ_AREA | B_WRITE_AREA | B_EXECUTE_AREA); 464 } 465 466 if (result == B_OK) 467 image->regions[i].flags |= RFLAG_REMAPPED; 468 } 469 } 470 } 471 472 473 void 474 register_image(image_t* image, int fd, const char* path) 475 { 476 struct stat stat; 477 extended_image_info info; 478 479 // TODO: set these correctly 480 info.basic_info.id = 0; 481 info.basic_info.type = image->type; 482 info.basic_info.sequence = 0; 483 info.basic_info.init_order = 0; 484 info.basic_info.init_routine = (void (*)())image->init_routine; 485 info.basic_info.term_routine = (void (*)())image->term_routine; 486 487 if (_kern_read_stat(fd, NULL, false, &stat, sizeof(struct stat)) == B_OK) { 488 info.basic_info.device = stat.st_dev; 489 info.basic_info.node = stat.st_ino; 490 } else { 491 info.basic_info.device = -1; 492 info.basic_info.node = -1; 493 } 494 495 // We may have split segments into separate regions. Compute the correct 496 // segments for the image info. 497 addr_t textBase = 0; 498 addr_t textEnd = 0; 499 addr_t dataBase = 0; 500 addr_t dataEnd = 0; 501 for (uint32 i= 0; i < image->num_regions; i++) { 502 addr_t base = image->regions[i].vmstart; 503 addr_t end = base + image->regions[i].vmsize; 504 if (image->regions[i].flags & RFLAG_RW) { 505 // data 506 if (dataBase == 0) { 507 dataBase = base; 508 dataEnd = end; 509 } else { 510 dataBase = std::min(dataBase, base); 511 dataEnd = std::max(dataEnd, end); 512 } 513 } else { 514 // text 515 if (textBase == 0) { 516 textBase = base; 517 textEnd = end; 518 } else { 519 textBase = std::min(textBase, base); 520 textEnd = std::max(textEnd, end); 521 } 522 } 523 } 524 525 strlcpy(info.basic_info.name, path, sizeof(info.basic_info.name)); 526 info.basic_info.text = (void*)textBase; 527 info.basic_info.text_size = textEnd - textBase; 528 info.basic_info.data = (void*)dataBase; 529 info.basic_info.data_size = dataEnd - dataBase; 530 info.basic_info.api_version = image->api_version; 531 info.basic_info.abi = image->abi; 532 info.text_delta = image->regions[0].delta; 533 info.symbol_table = image->syms; 534 info.symbol_hash = image->symhash; 535 info.string_table = image->strtab; 536 image->id = _kern_register_image(&info, sizeof(info)); 537 } 538 539 540 //! After fork, we lazily rebuild the image IDs of all loaded images. 541 status_t 542 update_image_ids() 543 { 544 for (image_t* image = sLoadedImages.head; image; image = image->next) { 545 status_t status = update_image_id(image); 546 if (status != B_OK) 547 return status; 548 } 549 for (image_t* image = sDisposableImages.head; image; image = image->next) { 550 status_t status = update_image_id(image); 551 if (status != B_OK) 552 return status; 553 } 554 555 gInvalidImageIDs = false; 556 return B_OK; 557 } 558 559 560 image_queue_t& 561 get_loaded_images() 562 { 563 return sLoadedImages; 564 } 565 566 567 image_queue_t& 568 get_disposable_images() 569 { 570 return sDisposableImages; 571 } 572 573 574 uint32 575 count_loaded_images() 576 { 577 return sLoadedImageCount; 578 } 579 580 581 void 582 enqueue_loaded_image(image_t* image) 583 { 584 enqueue_image(&sLoadedImages, image); 585 sLoadedImageCount++; 586 } 587 588 589 void 590 dequeue_loaded_image(image_t* image) 591 { 592 dequeue_image(&sLoadedImages, image); 593 sLoadedImageCount--; 594 } 595 596 597 void 598 dequeue_disposable_image(image_t* image) 599 { 600 dequeue_image(&sDisposableImages, image); 601 } 602 603 604 image_t* 605 find_loaded_image_by_name(char const* name, uint32 typeMask) 606 { 607 bool isPath = strchr(name, '/') != NULL; 608 return find_image_in_queue(&sLoadedImages, name, isPath, typeMask); 609 } 610 611 612 image_t* 613 find_loaded_image_by_id(image_id id, bool ignoreDisposable) 614 { 615 if (gInvalidImageIDs) { 616 // After fork, we lazily rebuild the image IDs of all loaded images 617 update_image_ids(); 618 } 619 620 for (image_t* image = sLoadedImages.head; image; image = image->next) { 621 if (image->id == id) 622 return image; 623 } 624 625 if (ignoreDisposable) 626 return NULL; 627 628 for (image_t* image = sDisposableImages.head; image; image = image->next) { 629 if (image->id == id) 630 return image; 631 } 632 633 return NULL; 634 } 635 636 637 image_t* 638 find_loaded_image_by_address(addr_t address) 639 { 640 for (image_t* image = sLoadedImages.head; image; image = image->next) { 641 for (uint32 i = 0; i < image->num_regions; i++) { 642 elf_region_t& region = image->regions[i]; 643 if (region.vmstart <= address 644 && region.vmstart - 1 + region.vmsize >= address) 645 return image; 646 } 647 } 648 649 return NULL; 650 } 651 652 653 void 654 set_image_flags_recursively(image_t* image, uint32 flags) 655 { 656 update_image_flags_recursively(image, flags, 0); 657 } 658 659 660 void 661 clear_image_flags_recursively(image_t* image, uint32 flags) 662 { 663 update_image_flags_recursively(image, 0, flags); 664 } 665 666 667 /*! Returns a topologically sorted image list. 668 669 If \a image is non-NULL, an array containing the image and all its 670 transitive dependencies is returned. If \a image is NULL, all loaded images 671 are returned. In either case dependencies are listed before images 672 depending on them. 673 674 \param image The image specifying the tree of images that shall be sorted. 675 If NULL, all loaded images are sorted. 676 \param _list On success it will be set to an array of the sorted images. 677 The caller is responsible for free()ing it. 678 \param sortFlags The image flag that shall be used for sorting. Images that 679 already have this flag set are ignored (and their dependencies, unless 680 they are also reachable via another path). The flag will be set on all 681 returned images. 682 \return The number of images in the returned array or an error code on 683 failure. 684 */ 685 ssize_t 686 get_sorted_image_list(image_t* image, image_t*** _list, uint32 sortFlag) 687 { 688 image_t** list; 689 690 list = (image_t**)malloc(sLoadedImageCount * sizeof(image_t*)); 691 if (list == NULL) { 692 FATAL("memory shortage in get_sorted_image_list()"); 693 *_list = NULL; 694 return B_NO_MEMORY; 695 } 696 697 memset(list, 0, sLoadedImageCount * sizeof(image_t*)); 698 699 *_list = list; 700 701 if (image != NULL) 702 return topological_sort(image, 0, list, sortFlag); 703 704 // no image given -- sort all loaded images 705 uint32 count = 0; 706 image = sLoadedImages.head; 707 while (image != NULL) { 708 count = topological_sort(image, count, list, sortFlag); 709 image = image->next; 710 } 711 712 return count; 713 } 714