1 /* 2 * Copyright 2008-2009, Ingo Weinhold, ingo_weinhold@gmx.de. 3 * Copyright 2003-2009, Axel Dörfler, axeld@pinc-software.de. 4 * Distributed under the terms of the MIT License. 5 * 6 * Copyright 2002, Manuel J. Petit. All rights reserved. 7 * Copyright 2001, Travis Geiselbrecht. All rights reserved. 8 * Distributed under the terms of the NewOS License. 9 */ 10 11 #include "images.h" 12 13 #include <stdio.h> 14 #include <stdlib.h> 15 #include <string.h> 16 17 #include <algorithm> 18 19 #include <syscalls.h> 20 #include <vm_defs.h> 21 22 #include "add_ons.h" 23 #include "runtime_loader_private.h" 24 25 26 #define RLD_PROGRAM_BASE 0x00200000 27 /* keep in sync with app ldscript */ 28 29 30 bool gInvalidImageIDs; 31 32 static image_queue_t sLoadedImages = {0, 0}; 33 static image_queue_t sDisposableImages = {0, 0}; 34 static uint32 sLoadedImageCount = 0; 35 36 37 //! Remaps the image ID of \a image after fork. 38 static status_t 39 update_image_id(image_t* image) 40 { 41 int32 cookie = 0; 42 image_info info; 43 while (_kern_get_next_image_info(B_CURRENT_TEAM, &cookie, &info, 44 sizeof(image_info)) == B_OK) { 45 for (uint32 i = 0; i < image->num_regions; i++) { 46 if (image->regions[i].vmstart == (addr_t)info.text) { 47 image->id = info.id; 48 return B_OK; 49 } 50 } 51 } 52 53 FATAL("Could not update image ID %ld after fork()!\n", image->id); 54 return B_ENTRY_NOT_FOUND; 55 } 56 57 58 static void 59 enqueue_image(image_queue_t* queue, image_t* image) 60 { 61 image->next = NULL; 62 63 image->prev = queue->tail; 64 if (queue->tail) 65 queue->tail->next = image; 66 67 queue->tail = image; 68 if (!queue->head) 69 queue->head = image; 70 } 71 72 73 static void 74 dequeue_image(image_queue_t* queue, image_t* image) 75 { 76 if (image->next) 77 image->next->prev = image->prev; 78 else 79 queue->tail = image->prev; 80 81 if (image->prev) 82 image->prev->next = image->next; 83 else 84 queue->head = image->next; 85 86 image->prev = NULL; 87 image->next = NULL; 88 } 89 90 91 static image_t* 92 find_image_in_queue(image_queue_t* queue, const char* name, bool isPath, 93 uint32 typeMask) 94 { 95 for (image_t* image = queue->head; image; image = image->next) { 96 const char* imageName = isPath ? image->path : image->name; 97 int length = isPath ? sizeof(image->path) : sizeof(image->name); 98 99 if (!strncmp(imageName, name, length) 100 && (typeMask & IMAGE_TYPE_TO_MASK(image->type)) != 0) { 101 return image; 102 } 103 } 104 105 return NULL; 106 } 107 108 109 static void 110 update_image_flags_recursively(image_t* image, uint32 flagsToSet, 111 uint32 flagsToClear) 112 { 113 image_t* queue[sLoadedImageCount]; 114 uint32 count = 0; 115 uint32 index = 0; 116 queue[count++] = image; 117 image->flags |= RFLAG_VISITED; 118 119 while (index < count) { 120 // pop next image 121 image = queue[index++]; 122 123 // push dependencies 124 for (uint32 i = 0; i < image->num_needed; i++) { 125 image_t* needed = image->needed[i]; 126 if ((needed->flags & RFLAG_VISITED) == 0) { 127 queue[count++] = needed; 128 needed->flags |= RFLAG_VISITED; 129 } 130 } 131 } 132 133 // update flags 134 for (uint32 i = 0; i < count; i++) { 135 queue[i]->flags = (queue[i]->flags | flagsToSet) 136 & ~(flagsToClear | RFLAG_VISITED); 137 } 138 } 139 140 141 static uint32 142 topological_sort(image_t* image, uint32 slot, image_t** initList, 143 uint32 sortFlag) 144 { 145 uint32 i; 146 147 if (image->flags & sortFlag) 148 return slot; 149 150 image->flags |= sortFlag; /* make sure we don't visit this one */ 151 for (i = 0; i < image->num_needed; i++) 152 slot = topological_sort(image->needed[i], slot, initList, sortFlag); 153 154 initList[slot] = image; 155 return slot + 1; 156 } 157 158 159 /*! Finds the load address and address specifier of the given image region. 160 */ 161 static void 162 get_image_region_load_address(image_t* image, uint32 index, int32 lastDelta, 163 bool fixed, addr_t& loadAddress, uint32& addressSpecifier) 164 { 165 if (image->dynamic_ptr != 0 && !fixed) { 166 // relocatable image... we can afford to place wherever 167 if (index == 0) { 168 // but only the first segment gets a free ride 169 loadAddress = RLD_PROGRAM_BASE; 170 addressSpecifier = B_BASE_ADDRESS; 171 } else { 172 loadAddress = image->regions[index].vmstart + lastDelta; 173 addressSpecifier = B_EXACT_ADDRESS; 174 } 175 } else { 176 // not relocatable, put it where it asks or die trying 177 loadAddress = image->regions[index].vmstart; 178 addressSpecifier = B_EXACT_ADDRESS; 179 } 180 } 181 182 183 // #pragma mark - 184 185 186 image_t* 187 create_image(const char* name, const char* path, int regionCount) 188 { 189 size_t allocSize = sizeof(image_t) 190 + (regionCount - 1) * sizeof(elf_region_t); 191 192 image_t* image = (image_t*)malloc(allocSize); 193 if (image == NULL) { 194 FATAL("no memory for image %s\n", path); 195 return NULL; 196 } 197 198 memset(image, 0, allocSize); 199 200 strlcpy(image->path, path, sizeof(image->path)); 201 202 // Make the last component of the supplied name the image name. 203 // If present, DT_SONAME will replace this name. 204 const char* lastSlash = strrchr(name, '/'); 205 if (lastSlash != NULL) 206 strlcpy(image->name, lastSlash + 1, sizeof(image->name)); 207 else 208 strlcpy(image->name, name, sizeof(image->name)); 209 210 image->ref_count = 1; 211 image->num_regions = regionCount; 212 213 return image; 214 } 215 216 217 void 218 delete_image_struct(image_t* image) 219 { 220 #ifdef DEBUG 221 size_t size = sizeof(image_t) 222 + (image->num_regions - 1) * sizeof(elf_region_t); 223 memset(image->needed, 0xa5, sizeof(image->needed[0]) * image->num_needed); 224 #endif 225 free(image->needed); 226 free(image->versions); 227 228 while (RuntimeLoaderSymbolPatcher* patcher 229 = image->defined_symbol_patchers) { 230 image->defined_symbol_patchers = patcher->next; 231 delete patcher; 232 } 233 while (RuntimeLoaderSymbolPatcher* patcher 234 = image->undefined_symbol_patchers) { 235 image->undefined_symbol_patchers = patcher->next; 236 delete patcher; 237 } 238 239 #ifdef DEBUG 240 // overwrite images to make sure they aren't accidently reused anywhere 241 memset(image, 0xa5, size); 242 #endif 243 free(image); 244 } 245 246 247 void 248 delete_image(image_t* image) 249 { 250 if (image == NULL) 251 return; 252 253 _kern_unregister_image(image->id); 254 // registered in load_container() 255 256 delete_image_struct(image); 257 } 258 259 260 void 261 put_image(image_t* image) 262 { 263 // If all references to the image are gone, add it to the disposable list 264 // and remove all dependencies 265 266 if (atomic_add(&image->ref_count, -1) == 1) { 267 size_t i; 268 269 dequeue_image(&sLoadedImages, image); 270 enqueue_image(&sDisposableImages, image); 271 sLoadedImageCount--; 272 273 for (i = 0; i < image->num_needed; i++) 274 put_image(image->needed[i]); 275 } 276 } 277 278 279 status_t 280 map_image(int fd, char const* path, image_t* image, bool fixed) 281 { 282 // cut the file name from the path as base name for the created areas 283 const char* baseName = strrchr(path, '/'); 284 if (baseName != NULL) 285 baseName++; 286 else 287 baseName = path; 288 289 // determine how much space we need for all loaded segments 290 291 addr_t reservedAddress = 0; 292 addr_t loadAddress; 293 size_t reservedSize = 0; 294 size_t length = 0; 295 uint32 addressSpecifier = B_ANY_ADDRESS; 296 297 for (uint32 i = 0; i < image->num_regions; i++) { 298 // for BeOS compatibility: if we load an old BeOS executable, we 299 // have to relocate it, if possible - we recognize it because the 300 // vmstart is set to 0 (hopefully always) 301 if (fixed && image->regions[i].vmstart == 0) 302 fixed = false; 303 304 uint32 regionAddressSpecifier; 305 get_image_region_load_address(image, i, 306 loadAddress - image->regions[i - 1].vmstart, fixed, 307 loadAddress, regionAddressSpecifier); 308 if (i == 0) { 309 reservedAddress = loadAddress; 310 addressSpecifier = regionAddressSpecifier; 311 } 312 313 length += TO_PAGE_SIZE(image->regions[i].vmsize 314 + (loadAddress % B_PAGE_SIZE)); 315 316 size_t size = TO_PAGE_SIZE(loadAddress + image->regions[i].vmsize) 317 - reservedAddress; 318 if (size > reservedSize) 319 reservedSize = size; 320 } 321 322 // Check whether the segments have an unreasonable amount of unused space 323 // inbetween. 324 if (reservedSize > length + 8 * 1024) 325 return B_BAD_DATA; 326 327 // reserve that space and allocate the areas from that one 328 if (_kern_reserve_address_range(&reservedAddress, addressSpecifier, 329 reservedSize) != B_OK) 330 return B_NO_MEMORY; 331 332 for (uint32 i = 0; i < image->num_regions; i++) { 333 char regionName[B_OS_NAME_LENGTH]; 334 335 snprintf(regionName, sizeof(regionName), "%s_seg%lu%s", 336 baseName, i, (image->regions[i].flags & RFLAG_RW) ? "rw" : "ro"); 337 338 get_image_region_load_address(image, i, image->regions[i - 1].delta, 339 fixed, loadAddress, addressSpecifier); 340 341 // If the image position is arbitrary, we must let it point to the start 342 // of the reserved address range. 343 if (addressSpecifier != B_EXACT_ADDRESS) 344 loadAddress = reservedAddress; 345 346 if ((image->regions[i].flags & RFLAG_ANON) != 0) { 347 image->regions[i].id = _kern_create_area(regionName, 348 (void**)&loadAddress, B_EXACT_ADDRESS, 349 image->regions[i].vmsize, B_NO_LOCK, 350 B_READ_AREA | B_WRITE_AREA); 351 352 if (image->regions[i].id < 0) { 353 _kern_unreserve_address_range(reservedAddress, reservedSize); 354 return image->regions[i].id; 355 } 356 } else { 357 image->regions[i].id = _kern_map_file(regionName, 358 (void**)&loadAddress, B_EXACT_ADDRESS, 359 image->regions[i].vmsize, B_READ_AREA | B_WRITE_AREA, 360 REGION_PRIVATE_MAP, false, fd, 361 PAGE_BASE(image->regions[i].fdstart)); 362 363 if (image->regions[i].id < 0) { 364 _kern_unreserve_address_range(reservedAddress, reservedSize); 365 return image->regions[i].id; 366 } 367 368 TRACE(("\"%s\" at %p, 0x%lx bytes (%s)\n", path, 369 (void *)loadAddress, image->regions[i].vmsize, 370 image->regions[i].flags & RFLAG_RW ? "rw" : "read-only")); 371 372 // handle trailer bits in data segment 373 if (image->regions[i].flags & RFLAG_RW) { 374 addr_t startClearing = loadAddress 375 + PAGE_OFFSET(image->regions[i].start) 376 + image->regions[i].size; 377 addr_t toClear = image->regions[i].vmsize 378 - PAGE_OFFSET(image->regions[i].start) 379 - image->regions[i].size; 380 381 TRACE(("cleared 0x%lx and the following 0x%lx bytes\n", 382 startClearing, toClear)); 383 memset((void *)startClearing, 0, toClear); 384 } 385 } 386 387 image->regions[i].delta = loadAddress - image->regions[i].vmstart; 388 image->regions[i].vmstart = loadAddress; 389 } 390 391 if (image->dynamic_ptr != 0) 392 image->dynamic_ptr += image->regions[0].delta; 393 394 return B_OK; 395 } 396 397 398 void 399 unmap_image(image_t* image) 400 { 401 for (uint32 i = 0; i < image->num_regions; i++) { 402 _kern_delete_area(image->regions[i].id); 403 404 image->regions[i].id = -1; 405 } 406 } 407 408 409 /*! This function will change the protection of all read-only segments to really 410 be read-only. 411 The areas have to be read/write first, so that they can be relocated. 412 */ 413 void 414 remap_images() 415 { 416 for (image_t* image = sLoadedImages.head; image != NULL; 417 image = image->next) { 418 for (uint32 i = 0; i < image->num_regions; i++) { 419 if ((image->regions[i].flags & RFLAG_RW) == 0 420 && (image->regions[i].flags & RFLAG_REMAPPED) == 0) { 421 // we only need to do this once, so we remember those we've already mapped 422 if (_kern_set_area_protection(image->regions[i].id, 423 B_READ_AREA | B_EXECUTE_AREA) == B_OK) { 424 image->regions[i].flags |= RFLAG_REMAPPED; 425 } 426 } 427 } 428 } 429 } 430 431 432 void 433 register_image(image_t* image, int fd, const char* path) 434 { 435 struct stat stat; 436 image_info info; 437 438 // TODO: set these correctly 439 info.id = 0; 440 info.type = image->type; 441 info.sequence = 0; 442 info.init_order = 0; 443 info.init_routine = (void (*)())image->init_routine; 444 info.term_routine = (void (*)())image->term_routine; 445 446 if (_kern_read_stat(fd, NULL, false, &stat, sizeof(struct stat)) == B_OK) { 447 info.device = stat.st_dev; 448 info.node = stat.st_ino; 449 } else { 450 info.device = -1; 451 info.node = -1; 452 } 453 454 // We may have split segments into separate regions. Compute the correct 455 // segments for the image info. 456 addr_t textBase = 0; 457 addr_t textEnd = 0; 458 addr_t dataBase = 0; 459 addr_t dataEnd = 0; 460 for (uint32 i= 0; i < image->num_regions; i++) { 461 addr_t base = image->regions[i].vmstart; 462 addr_t end = base + image->regions[i].vmsize; 463 if (image->regions[i].flags & RFLAG_RW) { 464 // data 465 if (dataBase == 0) { 466 dataBase = base; 467 dataEnd = end; 468 } else { 469 dataBase = std::min(dataBase, base); 470 dataEnd = std::max(dataEnd, end); 471 } 472 } else { 473 // text 474 if (textBase == 0) { 475 textBase = base; 476 textEnd = end; 477 } else { 478 textBase = std::min(textBase, base); 479 textEnd = std::max(textEnd, end); 480 } 481 } 482 } 483 484 strlcpy(info.name, path, sizeof(info.name)); 485 info.text = (void*)textBase; 486 info.text_size = textEnd - textBase; 487 info.data = (void*)dataBase; 488 info.data_size = dataEnd - dataBase; 489 info.api_version = image->api_version; 490 info.abi = image->abi; 491 image->id = _kern_register_image(&info, sizeof(image_info)); 492 } 493 494 495 //! After fork, we lazily rebuild the image IDs of all loaded images. 496 status_t 497 update_image_ids() 498 { 499 for (image_t* image = sLoadedImages.head; image; image = image->next) { 500 status_t status = update_image_id(image); 501 if (status != B_OK) 502 return status; 503 } 504 for (image_t* image = sDisposableImages.head; image; image = image->next) { 505 status_t status = update_image_id(image); 506 if (status != B_OK) 507 return status; 508 } 509 510 gInvalidImageIDs = false; 511 return B_OK; 512 } 513 514 515 image_queue_t& 516 get_loaded_images() 517 { 518 return sLoadedImages; 519 } 520 521 522 image_queue_t& 523 get_disposable_images() 524 { 525 return sDisposableImages; 526 } 527 528 529 uint32 530 count_loaded_images() 531 { 532 return sLoadedImageCount; 533 } 534 535 536 void 537 enqueue_loaded_image(image_t* image) 538 { 539 enqueue_image(&sLoadedImages, image); 540 sLoadedImageCount++; 541 } 542 543 544 void 545 dequeue_loaded_image(image_t* image) 546 { 547 dequeue_image(&sLoadedImages, image); 548 sLoadedImageCount--; 549 } 550 551 552 void 553 dequeue_disposable_image(image_t* image) 554 { 555 dequeue_image(&sDisposableImages, image); 556 } 557 558 559 image_t* 560 find_loaded_image_by_name(char const* name, uint32 typeMask) 561 { 562 bool isPath = strchr(name, '/') != NULL; 563 return find_image_in_queue(&sLoadedImages, name, isPath, typeMask); 564 } 565 566 567 image_t* 568 find_loaded_image_by_id(image_id id, bool ignoreDisposable) 569 { 570 if (gInvalidImageIDs) { 571 // After fork, we lazily rebuild the image IDs of all loaded images 572 update_image_ids(); 573 } 574 575 for (image_t* image = sLoadedImages.head; image; image = image->next) { 576 if (image->id == id) 577 return image; 578 } 579 580 if (ignoreDisposable) 581 return NULL; 582 583 for (image_t* image = sDisposableImages.head; image; image = image->next) { 584 if (image->id == id) 585 return image; 586 } 587 588 return NULL; 589 } 590 591 592 void 593 set_image_flags_recursively(image_t* image, uint32 flags) 594 { 595 update_image_flags_recursively(image, flags, 0); 596 } 597 598 599 void 600 clear_image_flags_recursively(image_t* image, uint32 flags) 601 { 602 update_image_flags_recursively(image, 0, flags); 603 } 604 605 606 ssize_t 607 get_sorted_image_list(image_t* image, image_t*** _list, uint32 sortFlag) 608 { 609 image_t** list; 610 611 list = (image_t**)malloc(sLoadedImageCount * sizeof(image_t*)); 612 if (list == NULL) { 613 FATAL("memory shortage in get_sorted_image_list()"); 614 *_list = NULL; 615 return B_NO_MEMORY; 616 } 617 618 memset(list, 0, sLoadedImageCount * sizeof(image_t*)); 619 620 *_list = list; 621 return topological_sort(image, 0, list, sortFlag); 622 } 623