1 /* 2 * Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de. 3 * Copyright 2003-2008, Axel Dörfler, axeld@pinc-software.de. 4 * Distributed under the terms of the MIT License. 5 * 6 * Copyright 2002, Manuel J. Petit. All rights reserved. 7 * Copyright 2001, Travis Geiselbrecht. All rights reserved. 8 * Distributed under the terms of the NewOS License. 9 */ 10 11 12 #include "runtime_loader_private.h" 13 #include "vm.h" 14 15 #include <OS.h> 16 17 #include <string.h> 18 #include <stdio.h> 19 #include <stdlib.h> 20 #include <string.h> 21 22 #include <arch/cpu.h> 23 #include <elf32.h> 24 #include <runtime_loader.h> 25 #include <sem.h> 26 #include <syscalls.h> 27 #include <user_runtime.h> 28 #include <vm_types.h> 29 30 #include "tracing_config.h" 31 32 33 //#define TRACE_RLD 34 #ifdef TRACE_RLD 35 # define TRACE(x) dprintf x 36 #else 37 # define TRACE(x) ; 38 #endif 39 40 41 // ToDo: implement better locking strategy 42 // ToDo: implement lazy binding 43 44 #define PAGE_MASK (B_PAGE_SIZE - 1) 45 46 #define PAGE_OFFSET(x) ((x) & (PAGE_MASK)) 47 #define PAGE_BASE(x) ((x) & ~(PAGE_MASK)) 48 #define TO_PAGE_SIZE(x) ((x + (PAGE_MASK)) & ~(PAGE_MASK)) 49 50 #define RLD_PROGRAM_BASE 0x00200000 51 /* keep in sync with app ldscript */ 52 53 enum { 54 RFLAG_RW = 0x0001, 55 RFLAG_ANON = 0x0002, 56 57 RFLAG_TERMINATED = 0x0200, 58 RFLAG_INITIALIZED = 0x0400, 59 RFLAG_SYMBOLIC = 0x0800, 60 RFLAG_RELOCATED = 0x1000, 61 RFLAG_PROTECTED = 0x2000, 62 RFLAG_DEPENDENCIES_LOADED = 0x4000, 63 RFLAG_REMAPPED = 0x8000, 64 65 RFLAG_VISITED = 0x10000 66 // temporarily set in the symbol resolution code 67 }; 68 69 70 #define IMAGE_TYPE_TO_MASK(type) (1 << ((type) - 1)) 71 #define ALL_IMAGE_TYPES (IMAGE_TYPE_TO_MASK(B_APP_IMAGE) \ 72 | IMAGE_TYPE_TO_MASK(B_LIBRARY_IMAGE) \ 73 | IMAGE_TYPE_TO_MASK(B_ADD_ON_IMAGE) \ 74 | IMAGE_TYPE_TO_MASK(B_SYSTEM_IMAGE)) 75 #define APP_OR_LIBRARY_TYPE (IMAGE_TYPE_TO_MASK(B_APP_IMAGE) \ 76 | IMAGE_TYPE_TO_MASK(B_LIBRARY_IMAGE)) 77 78 typedef void (*init_term_function)(image_id); 79 80 static image_queue_t sLoadedImages = {0, 0}; 81 static image_queue_t sDisposableImages = {0, 0}; 82 static uint32 sLoadedImageCount = 0; 83 static image_t *sProgramImage; 84 static KMessage sErrorMessage; 85 86 // a recursive lock 87 static sem_id rld_sem; 88 static thread_id rld_sem_owner; 89 static int32 rld_sem_count; 90 91 92 #ifdef TRACE_RLD 93 # define FATAL(x...) dprintf("runtime_loader: " x); 94 95 void 96 dprintf(const char *format, ...) 97 { 98 char buffer[1024]; 99 100 va_list list; 101 va_start(list, format); 102 103 vsnprintf(buffer, sizeof(buffer), format, list); 104 _kern_debug_output(buffer); 105 106 va_end(list); 107 } 108 #else 109 # define FATAL(x...) printf("runtime_loader: " x); 110 #endif 111 112 113 /*! Mini atoi(), so we don't have to include the libroot dependencies. 114 */ 115 int 116 atoi(const char* num) 117 { 118 int result = 0; 119 while (*num >= '0' && *num <= '9') { 120 result = (result * 10) + (*num - '0'); 121 num++; 122 } 123 124 return result; 125 } 126 127 128 #ifdef RUNTIME_LOADER_TRACING 129 130 void 131 ktrace_printf(const char *format, ...) 132 { 133 va_list list; 134 va_start(list, format); 135 136 char buffer[1024]; 137 vsnprintf(buffer, sizeof(buffer), format, list); 138 _kern_ktrace_output(buffer); 139 140 va_end(list); 141 } 142 143 #define KTRACE(x...) ktrace_printf(x) 144 145 #else 146 # define KTRACE(x...) 147 #endif // RUNTIME_LOADER_TRACING 148 149 150 static void 151 rld_unlock() 152 { 153 if (rld_sem_count-- == 1) { 154 rld_sem_owner = -1; 155 release_sem(rld_sem); 156 } 157 } 158 159 160 static void 161 rld_lock() 162 { 163 thread_id self = find_thread(NULL); 164 if (self != rld_sem_owner) { 165 acquire_sem(rld_sem); 166 rld_sem_owner = self; 167 } 168 rld_sem_count++; 169 } 170 171 172 static void 173 enqueue_image(image_queue_t *queue, image_t *image) 174 { 175 image->next = 0; 176 177 image->prev = queue->tail; 178 if (queue->tail) 179 queue->tail->next = image; 180 181 queue->tail = image; 182 if (!queue->head) 183 queue->head = image; 184 } 185 186 187 static void 188 dequeue_image(image_queue_t *queue, image_t *image) 189 { 190 if (image->next) 191 image->next->prev = image->prev; 192 else 193 queue->tail = image->prev; 194 195 if (image->prev) 196 image->prev->next = image->next; 197 else 198 queue->head = image->next; 199 200 image->prev = 0; 201 image->next = 0; 202 } 203 204 205 static uint32 206 elf_hash(const uint8 *name) 207 { 208 uint32 hash = 0; 209 uint32 temp; 210 211 while (*name) { 212 hash = (hash << 4) + *name++; 213 if ((temp = hash & 0xf0000000)) { 214 hash ^= temp >> 24; 215 } 216 hash &= ~temp; 217 } 218 return hash; 219 } 220 221 222 static inline bool 223 report_errors() 224 { 225 return gProgramArgs->error_port >= 0; 226 } 227 228 229 static image_t * 230 find_image_in_queue(image_queue_t *queue, const char *name, bool isPath, 231 uint32 typeMask) 232 { 233 image_t *image; 234 235 for (image = queue->head; image; image = image->next) { 236 const char *imageName = isPath ? image->path : image->name; 237 int length = isPath ? sizeof(image->path) : sizeof(image->name); 238 239 if (!strncmp(imageName, name, length) 240 && (typeMask & IMAGE_TYPE_TO_MASK(image->type)) != 0) { 241 return image; 242 } 243 } 244 245 return NULL; 246 } 247 248 249 static image_t * 250 find_image(char const *name, uint32 typeMask) 251 { 252 bool isPath = (strchr(name, '/') != NULL); 253 return find_image_in_queue(&sLoadedImages, name, isPath, typeMask); 254 } 255 256 257 static image_t * 258 find_loaded_image_by_id(image_id id) 259 { 260 image_t *image; 261 262 for (image = sLoadedImages.head; image; image = image->next) { 263 if (image->id == id) 264 return image; 265 } 266 267 // For the termination routine, we need to look into the list of 268 // disposable images as well 269 for (image = sDisposableImages.head; image; image = image->next) { 270 if (image->id == id) 271 return image; 272 } 273 274 return NULL; 275 } 276 277 278 static image_t* 279 get_program_image() 280 { 281 for (image_t *image = sLoadedImages.head; image; image = image->next) { 282 if (image->type == B_APP_IMAGE) 283 return image; 284 } 285 286 return NULL; 287 } 288 289 290 static const char * 291 get_program_path() 292 { 293 if (image_t* image = get_program_image()) 294 return image->path; 295 296 return NULL; 297 } 298 299 300 static status_t 301 parse_elf_header(struct Elf32_Ehdr *eheader, int32 *_pheaderSize, 302 int32 *_sheaderSize) 303 { 304 if (memcmp(eheader->e_ident, ELF_MAGIC, 4) != 0) 305 return B_NOT_AN_EXECUTABLE; 306 307 if (eheader->e_ident[4] != ELFCLASS32) 308 return B_NOT_AN_EXECUTABLE; 309 310 if (eheader->e_phoff == 0) 311 return B_NOT_AN_EXECUTABLE; 312 313 if (eheader->e_phentsize < sizeof(struct Elf32_Phdr)) 314 return B_NOT_AN_EXECUTABLE; 315 316 *_pheaderSize = eheader->e_phentsize * eheader->e_phnum; 317 *_sheaderSize = eheader->e_shentsize * eheader->e_shnum; 318 319 if (*_pheaderSize <= 0 || *_sheaderSize <= 0) 320 return B_NOT_AN_EXECUTABLE; 321 322 return B_OK; 323 } 324 325 326 static int32 327 count_regions(char const *buff, int phnum, int phentsize) 328 { 329 struct Elf32_Phdr *pheaders; 330 int32 count = 0; 331 int i; 332 333 for (i = 0; i < phnum; i++) { 334 pheaders = (struct Elf32_Phdr *)(buff + i * phentsize); 335 336 switch (pheaders->p_type) { 337 case PT_NULL: 338 /* NOP header */ 339 break; 340 case PT_LOAD: 341 count += 1; 342 if (pheaders->p_memsz != pheaders->p_filesz) { 343 addr_t A = TO_PAGE_SIZE(pheaders->p_vaddr + pheaders->p_memsz); 344 addr_t B = TO_PAGE_SIZE(pheaders->p_vaddr + pheaders->p_filesz); 345 346 if (A != B) 347 count += 1; 348 } 349 break; 350 case PT_DYNAMIC: 351 /* will be handled at some other place */ 352 break; 353 case PT_INTERP: 354 /* should check here for appropiate interpreter */ 355 break; 356 case PT_NOTE: 357 /* unsupported */ 358 break; 359 case PT_SHLIB: 360 /* undefined semantics */ 361 break; 362 case PT_PHDR: 363 /* we don't use it */ 364 break; 365 default: 366 FATAL("unhandled pheader type 0x%lx\n", pheaders[i].p_type); 367 return B_BAD_DATA; 368 } 369 } 370 371 return count; 372 } 373 374 375 /* 376 * create_image() & destroy_image() 377 * 378 * Create and destroy image_t structures. The destroyer makes sure that the 379 * memory buffers are full of garbage before freeing. 380 */ 381 382 static image_t * 383 create_image(const char *name, const char *path, int num_regions) 384 { 385 size_t allocSize = sizeof(image_t) + (num_regions - 1) * sizeof(elf_region_t); 386 const char *lastSlash; 387 388 image_t *image = (image_t*)malloc(allocSize); 389 if (image == NULL) { 390 FATAL("no memory for image %s\n", path); 391 return NULL; 392 } 393 394 memset(image, 0, allocSize); 395 396 strlcpy(image->path, path, sizeof(image->path)); 397 398 // Make the last component of the supplied name the image name. 399 // If present, DT_SONAME will replace this name. 400 if ((lastSlash = strrchr(name, '/'))) 401 strlcpy(image->name, lastSlash + 1, sizeof(image->name)); 402 else 403 strlcpy(image->name, name, sizeof(image->name)); 404 405 image->ref_count = 1; 406 image->num_regions = num_regions; 407 408 return image; 409 } 410 411 412 static void 413 delete_image_struct(image_t *image) 414 { 415 #ifdef DEBUG 416 size_t size = sizeof(image_t) + (image->num_regions - 1) * sizeof(elf_region_t); 417 memset(image->needed, 0xa5, sizeof(image->needed[0]) * image->num_needed); 418 #endif 419 free(image->needed); 420 421 #ifdef DEBUG 422 memset(image, 0xa5, size); 423 #endif 424 free(image); 425 } 426 427 428 static void 429 delete_image(image_t *image) 430 { 431 if (image == NULL) 432 return; 433 434 _kern_unregister_image(image->id); 435 // registered in load_container() 436 437 delete_image_struct(image); 438 } 439 440 441 static status_t 442 parse_program_headers(image_t *image, char *buff, int phnum, int phentsize) 443 { 444 struct Elf32_Phdr *pheader; 445 int regcount; 446 int i; 447 448 regcount = 0; 449 for (i = 0; i < phnum; i++) { 450 pheader = (struct Elf32_Phdr *)(buff + i * phentsize); 451 452 switch (pheader->p_type) { 453 case PT_NULL: 454 /* NOP header */ 455 break; 456 case PT_LOAD: 457 if (pheader->p_memsz == pheader->p_filesz) { 458 /* 459 * everything in one area 460 */ 461 image->regions[regcount].start = pheader->p_vaddr; 462 image->regions[regcount].size = pheader->p_memsz; 463 image->regions[regcount].vmstart = PAGE_BASE(pheader->p_vaddr); 464 image->regions[regcount].vmsize = TO_PAGE_SIZE(pheader->p_memsz 465 + PAGE_OFFSET(pheader->p_vaddr)); 466 image->regions[regcount].fdstart = pheader->p_offset; 467 image->regions[regcount].fdsize = pheader->p_filesz; 468 image->regions[regcount].delta = 0; 469 image->regions[regcount].flags = 0; 470 if (pheader->p_flags & PF_WRITE) { 471 // this is a writable segment 472 image->regions[regcount].flags |= RFLAG_RW; 473 } 474 } else { 475 /* 476 * may require splitting 477 */ 478 addr_t A = TO_PAGE_SIZE(pheader->p_vaddr + pheader->p_memsz); 479 addr_t B = TO_PAGE_SIZE(pheader->p_vaddr + pheader->p_filesz); 480 481 image->regions[regcount].start = pheader->p_vaddr; 482 image->regions[regcount].size = pheader->p_filesz; 483 image->regions[regcount].vmstart = PAGE_BASE(pheader->p_vaddr); 484 image->regions[regcount].vmsize = TO_PAGE_SIZE(pheader->p_filesz 485 + PAGE_OFFSET(pheader->p_vaddr)); 486 image->regions[regcount].fdstart = pheader->p_offset; 487 image->regions[regcount].fdsize = pheader->p_filesz; 488 image->regions[regcount].delta = 0; 489 image->regions[regcount].flags = 0; 490 if (pheader->p_flags & PF_WRITE) { 491 // this is a writable segment 492 image->regions[regcount].flags |= RFLAG_RW; 493 } 494 495 if (A != B) { 496 /* 497 * yeah, it requires splitting 498 */ 499 regcount += 1; 500 image->regions[regcount].start = pheader->p_vaddr; 501 image->regions[regcount].size = pheader->p_memsz - pheader->p_filesz; 502 image->regions[regcount].vmstart = image->regions[regcount-1].vmstart + image->regions[regcount-1].vmsize; 503 image->regions[regcount].vmsize = TO_PAGE_SIZE(pheader->p_memsz + PAGE_OFFSET(pheader->p_vaddr)) 504 - image->regions[regcount-1].vmsize; 505 image->regions[regcount].fdstart = 0; 506 image->regions[regcount].fdsize = 0; 507 image->regions[regcount].delta = 0; 508 image->regions[regcount].flags = RFLAG_ANON; 509 if (pheader->p_flags & PF_WRITE) { 510 // this is a writable segment 511 image->regions[regcount].flags |= RFLAG_RW; 512 } 513 } 514 } 515 regcount += 1; 516 break; 517 case PT_DYNAMIC: 518 image->dynamic_ptr = pheader->p_vaddr; 519 break; 520 case PT_INTERP: 521 /* should check here for appropiate interpreter */ 522 break; 523 case PT_NOTE: 524 /* unsupported */ 525 break; 526 case PT_SHLIB: 527 /* undefined semantics */ 528 break; 529 case PT_PHDR: 530 /* we don't use it */ 531 break; 532 default: 533 FATAL("unhandled pheader type 0x%lx\n", pheader[i].p_type); 534 return B_BAD_DATA; 535 } 536 } 537 538 return B_OK; 539 } 540 541 542 static bool 543 analyze_object_gcc_version(int fd, image_t* image, Elf32_Ehdr& eheader, 544 int32 sheaderSize, char* buffer, size_t bufferSize) 545 { 546 image->gcc_version.major = 0; 547 image->gcc_version.middle = 0; 548 image->gcc_version.minor = 0; 549 550 if (sheaderSize > (int)bufferSize) { 551 FATAL("Cannot handle section headers bigger than %lu\n", bufferSize); 552 return false; 553 } 554 555 // read section headers 556 ssize_t length = _kern_read(fd, eheader.e_shoff, buffer, sheaderSize); 557 if (length != sheaderSize) { 558 FATAL("Could not read section headers: %s\n", strerror(length)); 559 return false; 560 } 561 562 // load the string section 563 Elf32_Shdr* sectionHeader 564 = (Elf32_Shdr*)(buffer + eheader.e_shstrndx * eheader.e_shentsize); 565 566 if (sheaderSize + sectionHeader->sh_size > bufferSize) { 567 FATAL("Buffer not big enough for section string section\n"); 568 return false; 569 } 570 571 char* sectionStrings = buffer + bufferSize - sectionHeader->sh_size; 572 length = _kern_read(fd, sectionHeader->sh_offset, sectionStrings, 573 sectionHeader->sh_size); 574 if (length != (int)sectionHeader->sh_size) { 575 FATAL("Could not read section string section: %s\n", strerror(length)); 576 return false; 577 } 578 579 // find the .comment section 580 off_t commentOffset = 0; 581 size_t commentSize = 0; 582 for (uint32 i = 0; i < eheader.e_shnum; i++) { 583 sectionHeader = (Elf32_Shdr*)(buffer + i * eheader.e_shentsize); 584 const char* sectionName = sectionStrings + sectionHeader->sh_name; 585 if (sectionHeader->sh_name != 0 586 && strcmp(sectionName, ".comment") == 0) { 587 commentOffset = sectionHeader->sh_offset; 588 commentSize = sectionHeader->sh_size; 589 break; 590 } 591 } 592 593 if (commentSize == 0) { 594 FATAL("Could not find .comment section\n"); 595 return false; 596 } 597 598 // read a part of the comment section 599 if (commentSize > 512) 600 commentSize = 512; 601 602 length = _kern_read(fd, commentOffset, buffer, commentSize); 603 if (length != (int)commentSize) { 604 FATAL("Could not read .comment section: %s\n", strerror(length)); 605 return false; 606 } 607 608 // the common prefix of the strings in the .comment section 609 static const char* kGCCVersionPrefix = "GCC: (GNU) "; 610 size_t gccVersionPrefixLen = strlen(kGCCVersionPrefix); 611 612 size_t index = 0; 613 int gccMajor = 0; 614 int gccMiddle = 0; 615 int gccMinor = 0; 616 617 // Read up to 10 comments. The first three or four are usually from the 618 // glue code. 619 for (int i = 0; i < 10; i++) { 620 // skip '\0' 621 while (index < commentSize && buffer[index] == '\0') 622 index++; 623 char* stringStart = buffer + index; 624 625 // find string end 626 while (index < commentSize && buffer[index] != '\0') 627 index++; 628 629 // ignore the entry at the end of the buffer 630 if (index == commentSize) 631 break; 632 633 // We have to analyze string like these: 634 // GCC: (GNU) 2.9-beos-991026 635 // GCC: (GNU) 2.95.3-haiku-080322 636 // GCC: (GNU) 4.1.2 637 638 // skip the common prefix 639 if (strncmp(stringStart, kGCCVersionPrefix, gccVersionPrefixLen) != 0) 640 continue; 641 642 // the rest is the GCC version 643 char* gccVersion = stringStart + gccVersionPrefixLen; 644 char* gccPlatform = strchr(gccVersion, '-'); 645 char* patchLevel = NULL; 646 if (gccPlatform != NULL) { 647 *gccPlatform = '\0'; 648 gccPlatform++; 649 patchLevel = strchr(gccPlatform, '-'); 650 if (patchLevel != NULL) { 651 *patchLevel = '\0'; 652 patchLevel++; 653 } 654 } 655 656 // split the gcc version into major, middle, and minor 657 int version[3] = { 0, 0, 0 }; 658 659 for (int k = 0; gccVersion != NULL && k < 3; k++) { 660 char* dot = strchr(gccVersion, '.'); 661 if (dot) { 662 *dot = '\0'; 663 dot++; 664 } 665 version[k] = atoi(gccVersion); 666 gccVersion = dot; 667 } 668 669 // got any version? 670 if (version[0] == 0) 671 continue; 672 673 // Select the gcc version with the smallest major, but the greatest 674 // middle/minor. This should usually ignore the glue code version as 675 // well as cases where e.g. in a gcc 2 program a single C file has 676 // been compiled with gcc 4. 677 if (gccMajor == 0 || gccMajor > version[0] 678 || gccMajor == version[0] 679 && (gccMiddle < version[1] 680 || gccMiddle == version[1] && gccMinor < version[2])) { 681 gccMajor = version[0]; 682 gccMiddle = version[1]; 683 gccMinor = version[2]; 684 } 685 } 686 687 image->gcc_version.major = gccMajor; 688 image->gcc_version.middle = gccMiddle; 689 image->gcc_version.minor = gccMinor; 690 691 return gccMajor != 0; 692 } 693 694 695 static bool 696 assert_dynamic_loadable(image_t *image) 697 { 698 uint32 i; 699 700 if (!image->dynamic_ptr) 701 return true; 702 703 for (i = 0; i < image->num_regions; i++) { 704 if (image->dynamic_ptr >= image->regions[i].start 705 && image->dynamic_ptr < image->regions[i].start + image->regions[i].size) 706 return true; 707 } 708 709 return false; 710 } 711 712 713 /** This function will change the protection of all read-only segments 714 * to really be read-only. 715 * The areas have to be read/write first, so that they can be relocated. 716 */ 717 718 static void 719 remap_images(void) 720 { 721 image_t *image; 722 uint32 i; 723 724 for (image = sLoadedImages.head; image != NULL; image = image->next) { 725 for (i = 0; i < image->num_regions; i++) { 726 if ((image->regions[i].flags & RFLAG_RW) == 0 727 && (image->regions[i].flags & RFLAG_REMAPPED) == 0) { 728 // we only need to do this once, so we remember those we've already mapped 729 if (_kern_set_area_protection(image->regions[i].id, 730 B_READ_AREA | B_EXECUTE_AREA) == B_OK) 731 image->regions[i].flags |= RFLAG_REMAPPED; 732 } 733 } 734 } 735 } 736 737 738 static status_t 739 map_image(int fd, char const *path, image_t *image, bool fixed) 740 { 741 status_t status = B_OK; 742 const char *baseName; 743 uint32 i; 744 745 (void)(fd); 746 747 // cut the file name from the path as base name for the created areas 748 baseName = strrchr(path, '/'); 749 if (baseName != NULL) 750 baseName++; 751 else 752 baseName = path; 753 754 for (i = 0; i < image->num_regions; i++) { 755 char regionName[B_OS_NAME_LENGTH]; 756 addr_t loadAddress; 757 uint32 addressSpecifier; 758 759 // for BeOS compatibility: if we load an old BeOS executable, we 760 // have to relocate it, if possible - we recognize it because the 761 // vmstart is set to 0 (hopefully always) 762 if (fixed && image->regions[i].vmstart == 0) 763 fixed = false; 764 765 snprintf(regionName, sizeof(regionName), "%s_seg%lu%s", 766 baseName, i, (image->regions[i].flags & RFLAG_RW) ? "rw" : "ro"); 767 768 if (image->dynamic_ptr && !fixed) { 769 // relocatable image... we can afford to place wherever 770 if (i == 0) { 771 // but only the first segment gets a free ride 772 loadAddress = RLD_PROGRAM_BASE; 773 addressSpecifier = B_BASE_ADDRESS; 774 } else { 775 loadAddress = image->regions[i].vmstart + image->regions[i-1].delta; 776 addressSpecifier = B_EXACT_ADDRESS; 777 } 778 } else { 779 // not relocatable, put it where it asks or die trying 780 loadAddress = image->regions[i].vmstart; 781 addressSpecifier = B_EXACT_ADDRESS; 782 } 783 784 if (image->regions[i].flags & RFLAG_ANON) { 785 image->regions[i].id = _kern_create_area(regionName, (void **)&loadAddress, 786 addressSpecifier, image->regions[i].vmsize, B_NO_LOCK, 787 B_READ_AREA | B_WRITE_AREA); 788 789 if (image->regions[i].id < 0) { 790 status = image->regions[i].id; 791 goto error; 792 } 793 794 image->regions[i].delta = loadAddress - image->regions[i].vmstart; 795 image->regions[i].vmstart = loadAddress; 796 } else { 797 image->regions[i].id = sys_vm_map_file(regionName, (void **)&loadAddress, 798 addressSpecifier, image->regions[i].vmsize, B_READ_AREA | B_WRITE_AREA, 799 REGION_PRIVATE_MAP, path, PAGE_BASE(image->regions[i].fdstart)); 800 801 if (image->regions[i].id < 0) { 802 status = image->regions[i].id; 803 goto error; 804 } 805 806 TRACE(("\"%s\" at %p, 0x%lx bytes (%s)\n", path, 807 (void *)loadAddress, image->regions[i].vmsize, 808 image->regions[i].flags & RFLAG_RW ? "rw" : "read-only")); 809 810 image->regions[i].delta = loadAddress - image->regions[i].vmstart; 811 image->regions[i].vmstart = loadAddress; 812 813 // handle trailer bits in data segment 814 if (image->regions[i].flags & RFLAG_RW) { 815 addr_t startClearing; 816 addr_t toClear; 817 818 startClearing = image->regions[i].vmstart 819 + PAGE_OFFSET(image->regions[i].start) 820 + image->regions[i].size; 821 toClear = image->regions[i].vmsize 822 - PAGE_OFFSET(image->regions[i].start) 823 - image->regions[i].size; 824 825 TRACE(("cleared 0x%lx and the following 0x%lx bytes\n", startClearing, toClear)); 826 memset((void *)startClearing, 0, toClear); 827 } 828 } 829 } 830 831 if (image->dynamic_ptr) 832 image->dynamic_ptr += image->regions[0].delta; 833 834 return B_OK; 835 836 error: 837 return status; 838 } 839 840 841 static void 842 unmap_image(image_t *image) 843 { 844 uint32 i; 845 846 for (i = 0; i < image->num_regions; i++) { 847 _kern_delete_area(image->regions[i].id); 848 849 image->regions[i].id = -1; 850 } 851 } 852 853 854 static bool 855 parse_dynamic_segment(image_t *image) 856 { 857 struct Elf32_Dyn *d; 858 int i; 859 int sonameOffset = -1; 860 861 image->symhash = 0; 862 image->syms = 0; 863 image->strtab = 0; 864 865 d = (struct Elf32_Dyn *)image->dynamic_ptr; 866 if (!d) 867 return true; 868 869 for (i = 0; d[i].d_tag != DT_NULL; i++) { 870 switch (d[i].d_tag) { 871 case DT_NEEDED: 872 image->num_needed += 1; 873 break; 874 case DT_HASH: 875 image->symhash = (uint32 *)(d[i].d_un.d_ptr + image->regions[0].delta); 876 break; 877 case DT_STRTAB: 878 image->strtab = (char *)(d[i].d_un.d_ptr + image->regions[0].delta); 879 break; 880 case DT_SYMTAB: 881 image->syms = (struct Elf32_Sym *)(d[i].d_un.d_ptr + image->regions[0].delta); 882 break; 883 case DT_REL: 884 image->rel = (struct Elf32_Rel *)(d[i].d_un.d_ptr + image->regions[0].delta); 885 break; 886 case DT_RELSZ: 887 image->rel_len = d[i].d_un.d_val; 888 break; 889 case DT_RELA: 890 image->rela = (struct Elf32_Rela *)(d[i].d_un.d_ptr + image->regions[0].delta); 891 break; 892 case DT_RELASZ: 893 image->rela_len = d[i].d_un.d_val; 894 break; 895 // TK: procedure linkage table 896 case DT_JMPREL: 897 image->pltrel = (struct Elf32_Rel *)(d[i].d_un.d_ptr + image->regions[0].delta); 898 break; 899 case DT_PLTRELSZ: 900 image->pltrel_len = d[i].d_un.d_val; 901 break; 902 case DT_INIT: 903 image->init_routine = (d[i].d_un.d_ptr + image->regions[0].delta); 904 break; 905 case DT_FINI: 906 image->term_routine = (d[i].d_un.d_ptr + image->regions[0].delta); 907 break; 908 case DT_SONAME: 909 sonameOffset = d[i].d_un.d_val; 910 break; 911 default: 912 continue; 913 } 914 } 915 916 // lets make sure we found all the required sections 917 if (!image->symhash || !image->syms || !image->strtab) 918 return false; 919 920 if (sonameOffset >= 0) 921 strlcpy(image->name, STRING(image, sonameOffset), sizeof(image->name)); 922 923 return true; 924 } 925 926 927 static struct Elf32_Sym * 928 find_symbol(image_t *image, const char *name, int32 type) 929 { 930 uint32 hash, i; 931 932 // ToDo: "type" is currently ignored! 933 (void)type; 934 935 if (image->dynamic_ptr == 0) 936 return NULL; 937 938 hash = elf_hash((uint8 *)name) % HASHTABSIZE(image); 939 940 for (i = HASHBUCKETS(image)[hash]; i != STN_UNDEF; i = HASHCHAINS(image)[i]) { 941 struct Elf32_Sym *symbol = &image->syms[i]; 942 943 if (symbol->st_shndx != SHN_UNDEF 944 && ((ELF32_ST_BIND(symbol->st_info)== STB_GLOBAL) 945 || (ELF32_ST_BIND(symbol->st_info) == STB_WEAK)) 946 && !strcmp(SYMNAME(image, symbol), name)) { 947 // check if the type matches 948 if ((type == B_SYMBOL_TYPE_TEXT && ELF32_ST_TYPE(symbol->st_info) != STT_FUNC) 949 || (type == B_SYMBOL_TYPE_DATA && ELF32_ST_TYPE(symbol->st_info) != STT_OBJECT)) 950 continue; 951 952 return symbol; 953 } 954 } 955 956 return NULL; 957 } 958 959 960 static struct Elf32_Sym* 961 find_symbol_recursively_impl(image_t* image, const char* name, 962 image_t** foundInImage) 963 { 964 image->flags |= RFLAG_VISITED; 965 966 struct Elf32_Sym *symbol; 967 968 // look up the symbol in this image 969 if (image->dynamic_ptr) { 970 symbol = find_symbol(image, name, B_SYMBOL_TYPE_ANY); 971 if (symbol) { 972 *foundInImage = image; 973 return symbol; 974 } 975 } 976 977 // recursively search dependencies 978 for (uint32 i = 0; i < image->num_needed; i++) { 979 if (!(image->needed[i]->flags & RFLAG_VISITED)) { 980 symbol = find_symbol_recursively_impl(image->needed[i], name, 981 foundInImage); 982 if (symbol) 983 return symbol; 984 } 985 } 986 987 return NULL; 988 } 989 990 991 static void 992 clear_image_flag_recursively(image_t* image, uint32 flag) 993 { 994 image->flags &= ~flag; 995 996 for (uint32 i = 0; i < image->num_needed; i++) { 997 if (image->needed[i]->flags & flag) 998 clear_image_flag_recursively(image->needed[i], flag); 999 } 1000 } 1001 1002 1003 static struct Elf32_Sym* 1004 find_symbol_recursively(image_t* image, const char* name, 1005 image_t** foundInImage) 1006 { 1007 struct Elf32_Sym* symbol = find_symbol_recursively_impl(image, name, 1008 foundInImage); 1009 clear_image_flag_recursively(image, RFLAG_VISITED); 1010 return symbol; 1011 } 1012 1013 1014 static struct Elf32_Sym* 1015 find_symbol_in_loaded_images(const char* name, image_t** foundInImage) 1016 { 1017 return find_symbol_recursively(sLoadedImages.head, name, foundInImage); 1018 } 1019 1020 1021 static struct Elf32_Sym* 1022 find_undefined_symbol(image_t* rootImage, image_t* image, const char* name, 1023 image_t** foundInImage) 1024 { 1025 // If not simulating BeOS style symbol resolution, undefined symbols are 1026 // searched recursively starting from the root image. 1027 // TODO: Breadth first might be better than the depth first strategy used 1028 // here. We're also visiting images multiple times. Consider building a 1029 // breadth-first sorted array of images for each root image. 1030 if ((rootImage->flags & IMAGE_FLAG_R5_SYMBOL_RESOLUTION) == 0) { 1031 Elf32_Sym* symbol = find_symbol_recursively(rootImage, name, 1032 foundInImage); 1033 if (symbol != NULL) 1034 return symbol; 1035 1036 // If the root image is not the program image (i.e. it is a dynamically 1037 // loaded add-on or library), we try the program image hierarchy too. 1038 image_t* programImage = get_program_image(); 1039 if (rootImage != programImage) 1040 return find_symbol_recursively(programImage, name, foundInImage); 1041 1042 return NULL; 1043 } 1044 1045 // BeOS style symbol resolution: It is sufficient to check the direct 1046 // dependencies. The linker would have complained, if the symbol wasn't 1047 // there. 1048 for (uint32 i = 0; i < image->num_needed; i++) { 1049 if (image->needed[i]->dynamic_ptr) { 1050 struct Elf32_Sym *symbol = find_symbol(image->needed[i], name, 1051 B_SYMBOL_TYPE_ANY); 1052 if (symbol) { 1053 *foundInImage = image->needed[i]; 1054 return symbol; 1055 } 1056 } 1057 } 1058 1059 return NULL; 1060 } 1061 1062 1063 int 1064 resolve_symbol(image_t *rootImage, image_t *image, struct Elf32_Sym *sym, 1065 addr_t *sym_addr) 1066 { 1067 struct Elf32_Sym *sym2; 1068 char *symname; 1069 image_t *shimg; 1070 1071 switch (sym->st_shndx) { 1072 case SHN_UNDEF: 1073 // patch the symbol name 1074 symname = SYMNAME(image, sym); 1075 1076 // it's undefined, must be outside this image, try the other images 1077 sym2 = find_undefined_symbol(rootImage, image, symname, &shimg); 1078 if (!sym2) { 1079 printf("elf_resolve_symbol: could not resolve symbol '%s'\n", symname); 1080 return B_MISSING_SYMBOL; 1081 } 1082 1083 // make sure they're the same type 1084 if (ELF32_ST_TYPE(sym->st_info) != STT_NOTYPE 1085 && ELF32_ST_TYPE(sym->st_info) != ELF32_ST_TYPE(sym2->st_info)) { 1086 printf("elf_resolve_symbol: found symbol '%s' in shared image but wrong type\n", symname); 1087 return B_MISSING_SYMBOL; 1088 } 1089 1090 if (ELF32_ST_BIND(sym2->st_info) != STB_GLOBAL 1091 && ELF32_ST_BIND(sym2->st_info) != STB_WEAK) { 1092 printf("elf_resolve_symbol: found symbol '%s' but not exported\n", symname); 1093 return B_MISSING_SYMBOL; 1094 } 1095 1096 *sym_addr = sym2->st_value + shimg->regions[0].delta; 1097 return B_NO_ERROR; 1098 1099 case SHN_ABS: 1100 *sym_addr = sym->st_value + image->regions[0].delta; 1101 return B_NO_ERROR; 1102 1103 case SHN_COMMON: 1104 // ToDo: finish this 1105 printf("elf_resolve_symbol: COMMON symbol, finish me!\n"); 1106 return B_ERROR; //ERR_NOT_IMPLEMENTED_YET; 1107 1108 default: 1109 // standard symbol 1110 *sym_addr = sym->st_value + image->regions[0].delta; 1111 return B_NO_ERROR; 1112 } 1113 } 1114 1115 1116 static void 1117 register_image(image_t *image, int fd, const char *path) 1118 { 1119 struct stat stat; 1120 image_info info; 1121 1122 // ToDo: set these correctly 1123 info.id = 0; 1124 info.type = image->type; 1125 info.sequence = 0; 1126 info.init_order = 0; 1127 info.init_routine = (void (*)())image->init_routine; 1128 info.term_routine = (void (*)())image->term_routine; 1129 1130 if (_kern_read_stat(fd, NULL, false, &stat, sizeof(struct stat)) == B_OK) { 1131 info.device = stat.st_dev; 1132 info.node = stat.st_ino; 1133 } else { 1134 info.device = -1; 1135 info.node = -1; 1136 } 1137 1138 strlcpy(info.name, path, sizeof(info.name)); 1139 info.text = (void *)image->regions[0].vmstart; 1140 info.text_size = image->regions[0].vmsize; 1141 info.data = (void *)image->regions[1].vmstart; 1142 info.data_size = image->regions[1].vmsize; 1143 image->id = _kern_register_image(&info, sizeof(image_info)); 1144 } 1145 1146 1147 static status_t 1148 relocate_image(image_t *rootImage, image_t *image) 1149 { 1150 status_t status = arch_relocate_image(rootImage, image); 1151 if (status < B_OK) { 1152 FATAL("troubles relocating: 0x%lx (image: %s)\n", status, image->name); 1153 return status; 1154 } 1155 1156 _kern_image_relocated(image->id); 1157 return B_OK; 1158 } 1159 1160 1161 static status_t 1162 load_container(char const *name, image_type type, const char *rpath, image_t **_image) 1163 { 1164 int32 pheaderSize, sheaderSize; 1165 char path[PATH_MAX]; 1166 ssize_t length; 1167 char ph_buff[4096]; 1168 int32 numRegions; 1169 image_t *found; 1170 image_t *image; 1171 status_t status; 1172 int fd; 1173 1174 struct Elf32_Ehdr eheader; 1175 1176 // Have we already loaded that image? Don't check for add-ons -- we always 1177 // reload them. 1178 if (type != B_ADD_ON_IMAGE) { 1179 found = find_image(name, APP_OR_LIBRARY_TYPE); 1180 if (found) { 1181 atomic_add(&found->ref_count, 1); 1182 *_image = found; 1183 KTRACE("rld: load_container(\"%s\", type: %d, rpath: \"%s\") " 1184 "already loaded", name, type, rpath); 1185 return B_OK; 1186 } 1187 } 1188 1189 KTRACE("rld: load_container(\"%s\", type: %d, rpath: \"%s\")", name, type, 1190 rpath); 1191 1192 strlcpy(path, name, sizeof(path)); 1193 1194 // Try to load explicit image path first 1195 fd = open_executable(path, type, rpath, get_program_path()); 1196 if (fd < 0) { 1197 FATAL("cannot open file %s\n", path); 1198 KTRACE("rld: load_container(\"%s\"): failed to open file", name); 1199 return fd; 1200 } 1201 1202 // normalize the image path 1203 status = _kern_normalize_path(path, true, path); 1204 if (status != B_OK) 1205 goto err1; 1206 1207 // Test again if this image has been registered already - this time, 1208 // we can check the full path, not just its name as noted. 1209 // You could end up loading an image twice with symbolic links, else. 1210 if (type != B_ADD_ON_IMAGE) { 1211 found = find_image(path, APP_OR_LIBRARY_TYPE); 1212 if (found) { 1213 atomic_add(&found->ref_count, 1); 1214 *_image = found; 1215 KTRACE("rld: load_container(\"%s\"): already loaded after all", 1216 name); 1217 return B_OK; 1218 } 1219 } 1220 1221 length = _kern_read(fd, 0, &eheader, sizeof(eheader)); 1222 if (length != sizeof(eheader)) { 1223 status = B_NOT_AN_EXECUTABLE; 1224 FATAL("troubles reading ELF header\n"); 1225 goto err1; 1226 } 1227 1228 status = parse_elf_header(&eheader, &pheaderSize, &sheaderSize); 1229 if (status < B_OK) { 1230 FATAL("incorrect ELF header\n"); 1231 goto err1; 1232 } 1233 1234 // ToDo: what to do about this restriction?? 1235 if (pheaderSize > (int)sizeof(ph_buff)) { 1236 FATAL("Cannot handle program headers bigger than %lu\n", sizeof(ph_buff)); 1237 status = B_UNSUPPORTED; 1238 goto err1; 1239 } 1240 1241 length = _kern_read(fd, eheader.e_phoff, ph_buff, pheaderSize); 1242 if (length != pheaderSize) { 1243 FATAL("Could not read program headers: %s\n", strerror(length)); 1244 status = B_BAD_DATA; 1245 goto err1; 1246 } 1247 1248 numRegions = count_regions(ph_buff, eheader.e_phnum, eheader.e_phentsize); 1249 if (numRegions <= 0) { 1250 FATAL("Troubles parsing Program headers, numRegions = %ld\n", numRegions); 1251 status = B_BAD_DATA; 1252 goto err1; 1253 } 1254 1255 image = create_image(name, path, numRegions); 1256 if (image == NULL) { 1257 FATAL("Failed to allocate image_t object\n"); 1258 status = B_NO_MEMORY; 1259 goto err1; 1260 } 1261 1262 status = parse_program_headers(image, ph_buff, eheader.e_phnum, eheader.e_phentsize); 1263 if (status < B_OK) 1264 goto err2; 1265 1266 if (!assert_dynamic_loadable(image)) { 1267 FATAL("Dynamic segment must be loadable (implementation restriction)\n"); 1268 status = B_UNSUPPORTED; 1269 goto err2; 1270 } 1271 1272 if (!analyze_object_gcc_version(fd, image, eheader, sheaderSize, ph_buff, 1273 sizeof(ph_buff))) { 1274 FATAL("Failed to get gcc version for %s\n", path); 1275 // not really fatal, actually 1276 } 1277 1278 // init gcc version dependent image flags 1279 // symbol resolution strategy (fallback is R5-style, if version is 1280 // unavailable) 1281 if (image->gcc_version.major == 0 1282 || image->gcc_version.major == 2 && image->gcc_version.middle < 95) { 1283 image->flags |= IMAGE_FLAG_R5_SYMBOL_RESOLUTION; 1284 } 1285 1286 status = map_image(fd, path, image, type == B_APP_IMAGE); 1287 if (status < B_OK) { 1288 FATAL("Could not map image: %s\n", strerror(status)); 1289 status = B_ERROR; 1290 goto err2; 1291 } 1292 1293 if (!parse_dynamic_segment(image)) { 1294 FATAL("Troubles handling dynamic section\n"); 1295 status = B_BAD_DATA; 1296 goto err3; 1297 } 1298 1299 if (eheader.e_entry != 0) 1300 image->entry_point = eheader.e_entry + image->regions[0].delta; 1301 1302 image->type = type; 1303 register_image(image, fd, path); 1304 1305 _kern_close(fd); 1306 1307 enqueue_image(&sLoadedImages, image); 1308 sLoadedImageCount++; 1309 1310 *_image = image; 1311 1312 KTRACE("rld: load_container(\"%s\"): done: id: %ld (gcc: %d.%d.%d)", name, 1313 image->id, image->gcc_version.major, image->gcc_version.middle, 1314 image->gcc_version.minor); 1315 1316 return B_OK; 1317 1318 err3: 1319 unmap_image(image); 1320 err2: 1321 delete_image_struct(image); 1322 err1: 1323 _kern_close(fd); 1324 1325 KTRACE("rld: load_container(\"%s\"): failed: %s", name, 1326 strerror(status)); 1327 1328 return status; 1329 } 1330 1331 1332 static const char * 1333 find_dt_rpath(image_t *image) 1334 { 1335 int i; 1336 struct Elf32_Dyn *d = (struct Elf32_Dyn *)image->dynamic_ptr; 1337 1338 for (i = 0; d[i].d_tag != DT_NULL; i++) { 1339 if (d[i].d_tag == DT_RPATH) 1340 return STRING(image, d[i].d_un.d_val); 1341 } 1342 1343 return NULL; 1344 } 1345 1346 1347 static status_t 1348 load_dependencies(image_t *image) 1349 { 1350 struct Elf32_Dyn *d = (struct Elf32_Dyn *)image->dynamic_ptr; 1351 bool reportErrors = report_errors(); 1352 status_t status = B_OK; 1353 uint32 i, j; 1354 const char *rpath; 1355 1356 if (!d || (image->flags & RFLAG_DEPENDENCIES_LOADED)) 1357 return B_OK; 1358 1359 image->flags |= RFLAG_DEPENDENCIES_LOADED; 1360 1361 if (image->num_needed == 0) 1362 return B_OK; 1363 1364 KTRACE("rld: load_dependencies(\"%s\", id: %ld)", image->name, 1365 image->id); 1366 1367 image->needed = (image_t**)malloc(image->num_needed * sizeof(image_t *)); 1368 if (image->needed == NULL) { 1369 FATAL("failed to allocate needed struct\n"); 1370 KTRACE("rld: load_dependencies(\"%s\", id: %ld) failed: no memory", 1371 image->name, image->id); 1372 return B_NO_MEMORY; 1373 } 1374 1375 memset(image->needed, 0, image->num_needed * sizeof(image_t *)); 1376 rpath = find_dt_rpath(image); 1377 1378 for (i = 0, j = 0; d[i].d_tag != DT_NULL; i++) { 1379 switch (d[i].d_tag) { 1380 case DT_NEEDED: 1381 { 1382 int32 neededOffset = d[i].d_un.d_val; 1383 const char *name = STRING(image, neededOffset); 1384 1385 status_t loadStatus = load_container(name, B_LIBRARY_IMAGE, 1386 rpath, &image->needed[j]); 1387 if (loadStatus < B_OK) { 1388 status = loadStatus; 1389 // correct error code in case the file could not been found 1390 if (status == B_ENTRY_NOT_FOUND) { 1391 status = B_MISSING_LIBRARY; 1392 1393 if (reportErrors) 1394 sErrorMessage.AddString("missing library", name); 1395 } 1396 1397 // Collect all missing libraries in case we report back 1398 if (!reportErrors) { 1399 KTRACE("rld: load_dependencies(\"%s\", id: %ld) " 1400 "failed: %s", image->name, image->id, 1401 strerror(status)); 1402 return status; 1403 } 1404 } 1405 1406 j += 1; 1407 break; 1408 } 1409 1410 default: 1411 // ignore any other tag 1412 continue; 1413 } 1414 } 1415 1416 if (status < B_OK) { 1417 KTRACE("rld: load_dependencies(\"%s\", id: %ld) " 1418 "failed: %s", image->name, image->id, 1419 strerror(status)); 1420 return status; 1421 } 1422 1423 if (j != image->num_needed) { 1424 FATAL("Internal error at load_dependencies()"); 1425 KTRACE("rld: load_dependencies(\"%s\", id: %ld) " 1426 "failed: internal error", image->name, image->id); 1427 return B_ERROR; 1428 } 1429 1430 KTRACE("rld: load_dependencies(\"%s\", id: %ld) done", image->name, 1431 image->id); 1432 1433 return B_OK; 1434 } 1435 1436 1437 static uint32 1438 topological_sort(image_t *image, uint32 slot, image_t **initList, 1439 uint32 sortFlag) 1440 { 1441 uint32 i; 1442 1443 if (image->flags & sortFlag) 1444 return slot; 1445 1446 image->flags |= sortFlag; /* make sure we don't visit this one */ 1447 for (i = 0; i < image->num_needed; i++) 1448 slot = topological_sort(image->needed[i], slot, initList, sortFlag); 1449 1450 initList[slot] = image; 1451 return slot + 1; 1452 } 1453 1454 1455 static ssize_t 1456 get_sorted_image_list(image_t *image, image_t ***_list, uint32 sortFlag) 1457 { 1458 image_t **list; 1459 1460 list = (image_t**)malloc(sLoadedImageCount * sizeof(image_t *)); 1461 if (list == NULL) { 1462 FATAL("memory shortage in get_sorted_image_list()"); 1463 *_list = NULL; 1464 return B_NO_MEMORY; 1465 } 1466 1467 memset(list, 0, sLoadedImageCount * sizeof(image_t *)); 1468 1469 *_list = list; 1470 return topological_sort(image, 0, list, sortFlag); 1471 } 1472 1473 1474 static status_t 1475 relocate_dependencies(image_t *image) 1476 { 1477 ssize_t count, i; 1478 image_t **list; 1479 1480 count = get_sorted_image_list(image, &list, RFLAG_RELOCATED); 1481 if (count < B_OK) 1482 return count; 1483 1484 for (i = 0; i < count; i++) { 1485 status_t status = relocate_image(image, list[i]); 1486 if (status < B_OK) 1487 return status; 1488 } 1489 1490 free(list); 1491 return B_OK; 1492 } 1493 1494 1495 static void 1496 init_dependencies(image_t *image, bool initHead) 1497 { 1498 image_t **initList; 1499 ssize_t count, i; 1500 1501 count = get_sorted_image_list(image, &initList, RFLAG_INITIALIZED); 1502 if (count <= 0) 1503 return; 1504 1505 if (!initHead) { 1506 // this removes the "calling" image 1507 image->flags &= ~RFLAG_INITIALIZED; 1508 initList[--count] = NULL; 1509 } 1510 1511 TRACE(("%ld: init dependencies\n", find_thread(NULL))); 1512 for (i = 0; i < count; i++) { 1513 image = initList[i]; 1514 1515 TRACE(("%ld: init: %s\n", find_thread(NULL), image->name)); 1516 1517 if (image->init_routine != 0) 1518 ((init_term_function)image->init_routine)(image->id); 1519 } 1520 TRACE(("%ld: init done.\n", find_thread(NULL))); 1521 1522 free(initList); 1523 } 1524 1525 1526 static void 1527 put_image(image_t *image) 1528 { 1529 // If all references to the image are gone, add it to the disposable list 1530 // and remove all dependencies 1531 1532 if (atomic_add(&image->ref_count, -1) == 1) { 1533 size_t i; 1534 1535 dequeue_image(&sLoadedImages, image); 1536 enqueue_image(&sDisposableImages, image); 1537 sLoadedImageCount--; 1538 1539 for (i = 0; i < image->num_needed; i++) { 1540 put_image(image->needed[i]); 1541 } 1542 } 1543 } 1544 1545 1546 // #pragma mark - libroot.so exported functions 1547 1548 1549 image_id 1550 load_program(char const *path, void **_entry) 1551 { 1552 status_t status; 1553 image_t *image; 1554 1555 KTRACE("rld: load_program(\"%s\")", path); 1556 1557 rld_lock(); 1558 // for now, just do stupid simple global locking 1559 1560 TRACE(("rld: load %s\n", path)); 1561 1562 status = load_container(path, B_APP_IMAGE, NULL, &sProgramImage); 1563 if (status < B_OK) 1564 goto err; 1565 1566 for (image = sLoadedImages.head; image != NULL; image = image->next) { 1567 status = load_dependencies(image); 1568 if (status < B_OK) 1569 goto err; 1570 } 1571 1572 status = relocate_dependencies(sProgramImage); 1573 if (status < B_OK) 1574 goto err; 1575 1576 // We patch any exported __gRuntimeLoader symbols to point to our private API 1577 { 1578 struct Elf32_Sym *symbol = find_symbol_in_loaded_images( 1579 "__gRuntimeLoader", &image); 1580 if (symbol != NULL) { 1581 void **_export = (void **)(symbol->st_value + image->regions[0].delta); 1582 *_export = &gRuntimeLoader; 1583 } 1584 } 1585 1586 init_dependencies(sLoadedImages.head, true); 1587 remap_images(); 1588 // ToDo: once setup_system_time() is fixed, move this one line higher! 1589 1590 // Since the images are initialized now, we no longer should use our 1591 // getenv(), but use the one from libroot.so 1592 { 1593 struct Elf32_Sym *symbol = find_symbol_in_loaded_images("getenv", 1594 &image); 1595 if (symbol != NULL) 1596 gGetEnv = (char* (*)(const char*)) 1597 (symbol->st_value + image->regions[0].delta); 1598 } 1599 1600 if (sProgramImage->entry_point == 0) { 1601 status = B_NOT_AN_EXECUTABLE; 1602 goto err; 1603 } 1604 1605 *_entry = (void *)(sProgramImage->entry_point); 1606 1607 rld_unlock(); 1608 1609 KTRACE("rld: load_program(\"%s\") done: entry: %p, id: %ld", path, 1610 *_entry, sProgramImage->id); 1611 1612 return sProgramImage->id; 1613 1614 err: 1615 KTRACE("rld: load_program(\"%s\") failed: %s", path, strerror(status)); 1616 1617 delete_image(sProgramImage); 1618 1619 if (report_errors()) { 1620 // send error message 1621 sErrorMessage.AddInt32("error", status); 1622 sErrorMessage.SetDeliveryInfo(gProgramArgs->error_token, 1623 -1, 0, find_thread(NULL)); 1624 1625 _kern_write_port_etc(gProgramArgs->error_port, 'KMSG', 1626 sErrorMessage.Buffer(), sErrorMessage.ContentSize(), 0, 0); 1627 } 1628 _kern_loading_app_failed(status); 1629 rld_unlock(); 1630 1631 return status; 1632 } 1633 1634 1635 image_id 1636 load_library(char const *path, uint32 flags, bool addOn) 1637 { 1638 image_t *image = NULL; 1639 image_t *iter; 1640 image_type type = (addOn ? B_ADD_ON_IMAGE : B_LIBRARY_IMAGE); 1641 status_t status; 1642 1643 if (path == NULL) 1644 return B_BAD_VALUE; 1645 1646 // ToDo: implement flags 1647 (void)flags; 1648 1649 KTRACE("rld: load_library(\"%s\", 0x%lx, %d)", path, flags, addOn); 1650 1651 rld_lock(); 1652 // for now, just do stupid simple global locking 1653 1654 // have we already loaded this library? 1655 // Checking it at this stage saves loading its dependencies again 1656 if (!addOn) { 1657 image = find_image(path, APP_OR_LIBRARY_TYPE); 1658 if (image) { 1659 atomic_add(&image->ref_count, 1); 1660 rld_unlock(); 1661 KTRACE("rld: load_library(\"%s\"): already loaded: %ld", path, 1662 image->id); 1663 return image->id; 1664 } 1665 } 1666 1667 status = load_container(path, type, NULL, &image); 1668 if (status < B_OK) { 1669 rld_unlock(); 1670 KTRACE("rld: load_library(\"%s\") failed to load container: %s", path, 1671 strerror(status)); 1672 return status; 1673 } 1674 1675 for (iter = sLoadedImages.head; iter; iter = iter->next) { 1676 status = load_dependencies(iter); 1677 if (status < B_OK) 1678 goto err; 1679 } 1680 1681 status = relocate_dependencies(image); 1682 if (status < B_OK) 1683 goto err; 1684 1685 remap_images(); 1686 init_dependencies(image, true); 1687 1688 rld_unlock(); 1689 1690 KTRACE("rld: load_library(\"%s\") done: id: %ld", path, image->id); 1691 1692 return image->id; 1693 1694 err: 1695 KTRACE("rld: load_library(\"%s\") failed: %s", path, strerror(status)); 1696 1697 dequeue_image(&sLoadedImages, image); 1698 sLoadedImageCount--; 1699 delete_image(image); 1700 rld_unlock(); 1701 return status; 1702 } 1703 1704 1705 status_t 1706 unload_library(image_id imageID, bool addOn) 1707 { 1708 status_t status = B_BAD_IMAGE_ID; 1709 image_t *image; 1710 image_type type = addOn ? B_ADD_ON_IMAGE : B_LIBRARY_IMAGE; 1711 1712 if (imageID < B_OK) 1713 return B_BAD_IMAGE_ID; 1714 1715 rld_lock(); 1716 // for now, just do stupid simple global locking 1717 1718 // we only check images that have been already initialized 1719 1720 for (image = sLoadedImages.head; image; image = image->next) { 1721 if (image->id == imageID) { 1722 // unload image 1723 if (type == image->type) { 1724 put_image(image); 1725 status = B_OK; 1726 } else 1727 status = B_BAD_VALUE; 1728 break; 1729 } 1730 } 1731 1732 if (status == B_OK) { 1733 while ((image = sDisposableImages.head) != NULL) { 1734 // call image fini here... 1735 if (gRuntimeLoader.call_atexit_hooks_for_range) { 1736 gRuntimeLoader.call_atexit_hooks_for_range( 1737 image->regions[0].vmstart, image->regions[0].vmsize); 1738 } 1739 1740 if (image->term_routine) 1741 ((init_term_function)image->term_routine)(image->id); 1742 1743 dequeue_image(&sDisposableImages, image); 1744 unmap_image(image); 1745 1746 delete_image(image); 1747 } 1748 } 1749 1750 rld_unlock(); 1751 return status; 1752 } 1753 1754 1755 status_t 1756 get_nth_symbol(image_id imageID, int32 num, char *nameBuffer, int32 *_nameLength, 1757 int32 *_type, void **_location) 1758 { 1759 int32 count = 0, j; 1760 uint32 i; 1761 image_t *image; 1762 1763 rld_lock(); 1764 1765 // get the image from those who have been already initialized 1766 image = find_loaded_image_by_id(imageID); 1767 if (image == NULL) { 1768 rld_unlock(); 1769 return B_BAD_IMAGE_ID; 1770 } 1771 1772 // iterate through all the hash buckets until we've found the one 1773 for (i = 0; i < HASHTABSIZE(image); i++) { 1774 for (j = HASHBUCKETS(image)[i]; j != STN_UNDEF; j = HASHCHAINS(image)[j]) { 1775 struct Elf32_Sym *symbol = &image->syms[i]; 1776 1777 if (count == num) { 1778 strlcpy(nameBuffer, SYMNAME(image, symbol), *_nameLength); 1779 *_nameLength = strlen(SYMNAME(image, symbol)); 1780 1781 if (_type != NULL) { 1782 // ToDo: check with the return types of that BeOS function 1783 if (ELF32_ST_TYPE(symbol->st_info) == STT_FUNC) 1784 *_type = B_SYMBOL_TYPE_TEXT; 1785 else if (ELF32_ST_TYPE(symbol->st_info) == STT_OBJECT) 1786 *_type = B_SYMBOL_TYPE_DATA; 1787 else 1788 *_type = B_SYMBOL_TYPE_ANY; 1789 } 1790 1791 if (_location != NULL) 1792 *_location = (void *)(symbol->st_value + image->regions[0].delta); 1793 goto out; 1794 } 1795 count++; 1796 } 1797 } 1798 out: 1799 rld_unlock(); 1800 1801 if (num != count) 1802 return B_BAD_INDEX; 1803 1804 return B_OK; 1805 } 1806 1807 1808 status_t 1809 get_symbol(image_id imageID, char const *symbolName, int32 symbolType, void **_location) 1810 { 1811 status_t status = B_OK; 1812 image_t *image; 1813 1814 if (imageID < B_OK) 1815 return B_BAD_IMAGE_ID; 1816 if (symbolName == NULL) 1817 return B_BAD_VALUE; 1818 1819 rld_lock(); 1820 // for now, just do stupid simple global locking 1821 1822 // get the image from those who have been already initialized 1823 image = find_loaded_image_by_id(imageID); 1824 if (image != NULL) { 1825 struct Elf32_Sym *symbol; 1826 1827 // get the symbol in the image 1828 symbol = find_symbol(image, symbolName, symbolType); 1829 if (symbol) { 1830 if (_location != NULL) 1831 *_location = (void *)(symbol->st_value + image->regions[0].delta); 1832 } else 1833 status = B_ENTRY_NOT_FOUND; 1834 } else 1835 status = B_BAD_IMAGE_ID; 1836 1837 rld_unlock(); 1838 return status; 1839 } 1840 1841 1842 status_t 1843 get_next_image_dependency(image_id id, uint32 *cookie, const char **_name) 1844 { 1845 uint32 i, j, searchIndex = *cookie; 1846 struct Elf32_Dyn *dynamicSection; 1847 image_t *image; 1848 1849 if (_name == NULL) 1850 return B_BAD_VALUE; 1851 1852 rld_lock(); 1853 1854 image = find_loaded_image_by_id(id); 1855 if (image == NULL) { 1856 rld_unlock(); 1857 return B_BAD_IMAGE_ID; 1858 } 1859 1860 dynamicSection = (struct Elf32_Dyn *)image->dynamic_ptr; 1861 if (dynamicSection == NULL || image->num_needed <= searchIndex) { 1862 rld_unlock(); 1863 return B_ENTRY_NOT_FOUND; 1864 } 1865 1866 for (i = 0, j = 0; dynamicSection[i].d_tag != DT_NULL; i++) { 1867 if (dynamicSection[i].d_tag != DT_NEEDED) 1868 continue; 1869 1870 if (j++ == searchIndex) { 1871 int32 neededOffset = dynamicSection[i].d_un.d_val; 1872 1873 *_name = STRING(image, neededOffset); 1874 *cookie = searchIndex + 1; 1875 rld_unlock(); 1876 return B_OK; 1877 } 1878 } 1879 1880 rld_unlock(); 1881 return B_ENTRY_NOT_FOUND; 1882 } 1883 1884 1885 // #pragma mark - runtime_loader private exports 1886 1887 1888 /** Read and verify the ELF header */ 1889 1890 status_t 1891 elf_verify_header(void *header, int32 length) 1892 { 1893 int32 programSize, sectionSize; 1894 1895 if (length < (int32)sizeof(struct Elf32_Ehdr)) 1896 return B_NOT_AN_EXECUTABLE; 1897 1898 return parse_elf_header((struct Elf32_Ehdr *)header, &programSize, §ionSize); 1899 } 1900 1901 1902 void 1903 terminate_program(void) 1904 { 1905 image_t **termList; 1906 ssize_t count, i; 1907 1908 count = get_sorted_image_list(sProgramImage, &termList, RFLAG_TERMINATED); 1909 if (count < B_OK) 1910 return; 1911 1912 TRACE(("%ld: terminate dependencies\n", find_thread(NULL))); 1913 for (i = count; i-- > 0;) { 1914 image_t *image = termList[i]; 1915 1916 TRACE(("%ld: term: %s\n", find_thread(NULL), image->name)); 1917 1918 if (image->term_routine) 1919 ((init_term_function)image->term_routine)(image->id); 1920 } 1921 TRACE(("%ld: term done.\n", find_thread(NULL))); 1922 1923 free(termList); 1924 } 1925 1926 1927 void 1928 rldelf_init(void) 1929 { 1930 rld_sem = create_sem(1, "rld_lock"); 1931 rld_sem_owner = -1; 1932 rld_sem_count = 0; 1933 1934 // create the debug area 1935 { 1936 int32 size = TO_PAGE_SIZE(sizeof(runtime_loader_debug_area)); 1937 1938 runtime_loader_debug_area *area; 1939 area_id areaID = _kern_create_area(RUNTIME_LOADER_DEBUG_AREA_NAME, 1940 (void **)&area, B_ANY_ADDRESS, size, B_NO_LOCK, 1941 B_READ_AREA | B_WRITE_AREA); 1942 if (areaID < B_OK) { 1943 FATAL("Failed to create debug area.\n"); 1944 _kern_loading_app_failed(areaID); 1945 } 1946 1947 area->loaded_images = &sLoadedImages; 1948 } 1949 1950 // initialize error message if needed 1951 if (report_errors()) { 1952 void *buffer = malloc(1024); 1953 if (buffer == NULL) 1954 return; 1955 1956 sErrorMessage.SetTo(buffer, 1024, 'Rler'); 1957 } 1958 } 1959 1960 1961 status_t 1962 elf_reinit_after_fork() 1963 { 1964 rld_sem = create_sem(1, "rld_lock"); 1965 if (rld_sem < 0) 1966 return rld_sem; 1967 1968 return B_OK; 1969 } 1970