1 /* 2 * Copyright 2008-2009, Ingo Weinhold, ingo_weinhold@gmx.de. 3 * Copyright 2003-2009, Axel Dörfler, axeld@pinc-software.de. 4 * Distributed under the terms of the MIT License. 5 * 6 * Copyright 2002, Manuel J. Petit. All rights reserved. 7 * Copyright 2001, Travis Geiselbrecht. All rights reserved. 8 * Distributed under the terms of the NewOS License. 9 */ 10 11 #include "images.h" 12 13 #include <stdio.h> 14 #include <stdlib.h> 15 #include <string.h> 16 17 #include <syscalls.h> 18 #include <vm_defs.h> 19 20 #include "add_ons.h" 21 #include "runtime_loader_private.h" 22 23 24 #define RLD_PROGRAM_BASE 0x00200000 25 /* keep in sync with app ldscript */ 26 27 28 bool gInvalidImageIDs; 29 30 static image_queue_t sLoadedImages = {0, 0}; 31 static image_queue_t sDisposableImages = {0, 0}; 32 static uint32 sLoadedImageCount = 0; 33 34 35 //! Remaps the image ID of \a image after fork. 36 static status_t 37 update_image_id(image_t* image) 38 { 39 int32 cookie = 0; 40 image_info info; 41 while (_kern_get_next_image_info(B_CURRENT_TEAM, &cookie, &info, 42 sizeof(image_info)) == B_OK) { 43 for (uint32 i = 0; i < image->num_regions; i++) { 44 if (image->regions[i].vmstart == (addr_t)info.text) { 45 image->id = info.id; 46 return B_OK; 47 } 48 } 49 } 50 51 FATAL("Could not update image ID %ld after fork()!\n", image->id); 52 return B_ENTRY_NOT_FOUND; 53 } 54 55 56 static void 57 enqueue_image(image_queue_t* queue, image_t* image) 58 { 59 image->next = NULL; 60 61 image->prev = queue->tail; 62 if (queue->tail) 63 queue->tail->next = image; 64 65 queue->tail = image; 66 if (!queue->head) 67 queue->head = image; 68 } 69 70 71 static void 72 dequeue_image(image_queue_t* queue, image_t* image) 73 { 74 if (image->next) 75 image->next->prev = image->prev; 76 else 77 queue->tail = image->prev; 78 79 if (image->prev) 80 image->prev->next = image->next; 81 else 82 queue->head = image->next; 83 84 image->prev = NULL; 85 image->next = NULL; 86 } 87 88 89 static image_t* 90 find_image_in_queue(image_queue_t* queue, const char* name, bool isPath, 91 uint32 typeMask) 92 { 93 for (image_t* image = queue->head; image; image = image->next) { 94 const char* imageName = isPath ? image->path : image->name; 95 int length = isPath ? sizeof(image->path) : sizeof(image->name); 96 97 if (!strncmp(imageName, name, length) 98 && (typeMask & IMAGE_TYPE_TO_MASK(image->type)) != 0) { 99 return image; 100 } 101 } 102 103 return NULL; 104 } 105 106 107 static void 108 update_image_flags_recursively(image_t* image, uint32 flagsToSet, 109 uint32 flagsToClear) 110 { 111 image_t* queue[sLoadedImageCount]; 112 uint32 count = 0; 113 uint32 index = 0; 114 queue[count++] = image; 115 image->flags |= RFLAG_VISITED; 116 117 while (index < count) { 118 // pop next image 119 image = queue[index++]; 120 121 // push dependencies 122 for (uint32 i = 0; i < image->num_needed; i++) { 123 image_t* needed = image->needed[i]; 124 if ((needed->flags & RFLAG_VISITED) == 0) { 125 queue[count++] = needed; 126 needed->flags |= RFLAG_VISITED; 127 } 128 } 129 } 130 131 // update flags 132 for (uint32 i = 0; i < count; i++) { 133 queue[i]->flags = (queue[i]->flags | flagsToSet) 134 & ~(flagsToClear | RFLAG_VISITED); 135 } 136 } 137 138 139 static uint32 140 topological_sort(image_t* image, uint32 slot, image_t** initList, 141 uint32 sortFlag) 142 { 143 uint32 i; 144 145 if (image->flags & sortFlag) 146 return slot; 147 148 image->flags |= sortFlag; /* make sure we don't visit this one */ 149 for (i = 0; i < image->num_needed; i++) 150 slot = topological_sort(image->needed[i], slot, initList, sortFlag); 151 152 initList[slot] = image; 153 return slot + 1; 154 } 155 156 157 // #pragma mark - 158 159 160 image_t* 161 create_image(const char* name, const char* path, int regionCount) 162 { 163 size_t allocSize = sizeof(image_t) 164 + (regionCount - 1) * sizeof(elf_region_t); 165 166 image_t* image = (image_t*)malloc(allocSize); 167 if (image == NULL) { 168 FATAL("no memory for image %s\n", path); 169 return NULL; 170 } 171 172 memset(image, 0, allocSize); 173 174 strlcpy(image->path, path, sizeof(image->path)); 175 176 // Make the last component of the supplied name the image name. 177 // If present, DT_SONAME will replace this name. 178 const char* lastSlash = strrchr(name, '/'); 179 if (lastSlash != NULL) 180 strlcpy(image->name, lastSlash + 1, sizeof(image->name)); 181 else 182 strlcpy(image->name, name, sizeof(image->name)); 183 184 image->ref_count = 1; 185 image->num_regions = regionCount; 186 187 return image; 188 } 189 190 191 void 192 delete_image_struct(image_t* image) 193 { 194 #ifdef DEBUG 195 size_t size = sizeof(image_t) 196 + (image->num_regions - 1) * sizeof(elf_region_t); 197 memset(image->needed, 0xa5, sizeof(image->needed[0]) * image->num_needed); 198 #endif 199 free(image->needed); 200 free(image->versions); 201 202 while (RuntimeLoaderSymbolPatcher* patcher 203 = image->defined_symbol_patchers) { 204 image->defined_symbol_patchers = patcher->next; 205 delete patcher; 206 } 207 while (RuntimeLoaderSymbolPatcher* patcher 208 = image->undefined_symbol_patchers) { 209 image->undefined_symbol_patchers = patcher->next; 210 delete patcher; 211 } 212 213 #ifdef DEBUG 214 // overwrite images to make sure they aren't accidently reused anywhere 215 memset(image, 0xa5, size); 216 #endif 217 free(image); 218 } 219 220 221 void 222 delete_image(image_t* image) 223 { 224 if (image == NULL) 225 return; 226 227 _kern_unregister_image(image->id); 228 // registered in load_container() 229 230 delete_image_struct(image); 231 } 232 233 234 void 235 put_image(image_t* image) 236 { 237 // If all references to the image are gone, add it to the disposable list 238 // and remove all dependencies 239 240 if (atomic_add(&image->ref_count, -1) == 1) { 241 size_t i; 242 243 dequeue_image(&sLoadedImages, image); 244 enqueue_image(&sDisposableImages, image); 245 sLoadedImageCount--; 246 247 for (i = 0; i < image->num_needed; i++) 248 put_image(image->needed[i]); 249 } 250 } 251 252 253 status_t 254 map_image(int fd, char const* path, image_t* image, bool fixed) 255 { 256 // cut the file name from the path as base name for the created areas 257 const char* baseName = strrchr(path, '/'); 258 if (baseName != NULL) 259 baseName++; 260 else 261 baseName = path; 262 263 for (uint32 i = 0; i < image->num_regions; i++) { 264 char regionName[B_OS_NAME_LENGTH]; 265 addr_t loadAddress; 266 uint32 addressSpecifier; 267 268 // for BeOS compatibility: if we load an old BeOS executable, we 269 // have to relocate it, if possible - we recognize it because the 270 // vmstart is set to 0 (hopefully always) 271 if (fixed && image->regions[i].vmstart == 0) 272 fixed = false; 273 274 snprintf(regionName, sizeof(regionName), "%s_seg%lu%s", 275 baseName, i, (image->regions[i].flags & RFLAG_RW) ? "rw" : "ro"); 276 277 if (image->dynamic_ptr && !fixed) { 278 // relocatable image... we can afford to place wherever 279 if (i == 0) { 280 // but only the first segment gets a free ride 281 loadAddress = RLD_PROGRAM_BASE; 282 addressSpecifier = B_BASE_ADDRESS; 283 } else { 284 loadAddress = image->regions[i].vmstart 285 + image->regions[i-1].delta; 286 addressSpecifier = B_EXACT_ADDRESS; 287 } 288 } else { 289 // not relocatable, put it where it asks or die trying 290 loadAddress = image->regions[i].vmstart; 291 addressSpecifier = B_EXACT_ADDRESS; 292 } 293 294 if (image->regions[i].flags & RFLAG_ANON) { 295 image->regions[i].id = _kern_create_area(regionName, 296 (void**)&loadAddress, addressSpecifier, 297 image->regions[i].vmsize, B_NO_LOCK, 298 B_READ_AREA | B_WRITE_AREA); 299 300 if (image->regions[i].id < 0) 301 return image->regions[i].id; 302 303 image->regions[i].delta = loadAddress - image->regions[i].vmstart; 304 image->regions[i].vmstart = loadAddress; 305 } else { 306 image->regions[i].id = _kern_map_file(regionName, 307 (void**)&loadAddress, addressSpecifier, 308 image->regions[i].vmsize, B_READ_AREA | B_WRITE_AREA, 309 REGION_PRIVATE_MAP, false, fd, 310 PAGE_BASE(image->regions[i].fdstart)); 311 312 if (image->regions[i].id < 0) 313 return image->regions[i].id; 314 315 TRACE(("\"%s\" at %p, 0x%lx bytes (%s)\n", path, 316 (void *)loadAddress, image->regions[i].vmsize, 317 image->regions[i].flags & RFLAG_RW ? "rw" : "read-only")); 318 319 image->regions[i].delta = loadAddress - image->regions[i].vmstart; 320 image->regions[i].vmstart = loadAddress; 321 322 // handle trailer bits in data segment 323 if (image->regions[i].flags & RFLAG_RW) { 324 addr_t startClearing; 325 addr_t toClear; 326 327 startClearing = image->regions[i].vmstart 328 + PAGE_OFFSET(image->regions[i].start) 329 + image->regions[i].size; 330 toClear = image->regions[i].vmsize 331 - PAGE_OFFSET(image->regions[i].start) 332 - image->regions[i].size; 333 334 TRACE(("cleared 0x%lx and the following 0x%lx bytes\n", 335 startClearing, toClear)); 336 memset((void *)startClearing, 0, toClear); 337 } 338 } 339 } 340 341 if (image->dynamic_ptr) 342 image->dynamic_ptr += image->regions[0].delta; 343 344 return B_OK; 345 } 346 347 348 void 349 unmap_image(image_t* image) 350 { 351 for (uint32 i = 0; i < image->num_regions; i++) { 352 _kern_delete_area(image->regions[i].id); 353 354 image->regions[i].id = -1; 355 } 356 } 357 358 359 /*! This function will change the protection of all read-only segments to really 360 be read-only. 361 The areas have to be read/write first, so that they can be relocated. 362 */ 363 void 364 remap_images() 365 { 366 for (image_t* image = sLoadedImages.head; image != NULL; 367 image = image->next) { 368 for (uint32 i = 0; i < image->num_regions; i++) { 369 if ((image->regions[i].flags & RFLAG_RW) == 0 370 && (image->regions[i].flags & RFLAG_REMAPPED) == 0) { 371 // we only need to do this once, so we remember those we've already mapped 372 if (_kern_set_area_protection(image->regions[i].id, 373 B_READ_AREA | B_EXECUTE_AREA) == B_OK) { 374 image->regions[i].flags |= RFLAG_REMAPPED; 375 } 376 } 377 } 378 } 379 } 380 381 382 void 383 register_image(image_t* image, int fd, const char* path) 384 { 385 struct stat stat; 386 image_info info; 387 388 // TODO: set these correctly 389 info.id = 0; 390 info.type = image->type; 391 info.sequence = 0; 392 info.init_order = 0; 393 info.init_routine = (void (*)())image->init_routine; 394 info.term_routine = (void (*)())image->term_routine; 395 396 if (_kern_read_stat(fd, NULL, false, &stat, sizeof(struct stat)) == B_OK) { 397 info.device = stat.st_dev; 398 info.node = stat.st_ino; 399 } else { 400 info.device = -1; 401 info.node = -1; 402 } 403 404 strlcpy(info.name, path, sizeof(info.name)); 405 info.text = (void *)image->regions[0].vmstart; 406 info.text_size = image->regions[0].vmsize; 407 info.data = (void *)image->regions[1].vmstart; 408 info.data_size = image->regions[1].vmsize; 409 info.api_version = image->api_version; 410 info.abi = image->abi; 411 image->id = _kern_register_image(&info, sizeof(image_info)); 412 } 413 414 415 //! After fork, we lazily rebuild the image IDs of all loaded images. 416 status_t 417 update_image_ids() 418 { 419 for (image_t* image = sLoadedImages.head; image; image = image->next) { 420 status_t status = update_image_id(image); 421 if (status != B_OK) 422 return status; 423 } 424 for (image_t* image = sDisposableImages.head; image; image = image->next) { 425 status_t status = update_image_id(image); 426 if (status != B_OK) 427 return status; 428 } 429 430 gInvalidImageIDs = false; 431 return B_OK; 432 } 433 434 435 image_queue_t& 436 get_loaded_images() 437 { 438 return sLoadedImages; 439 } 440 441 442 image_queue_t& 443 get_disposable_images() 444 { 445 return sDisposableImages; 446 } 447 448 449 uint32 450 count_loaded_images() 451 { 452 return sLoadedImageCount; 453 } 454 455 456 void 457 enqueue_loaded_image(image_t* image) 458 { 459 enqueue_image(&sLoadedImages, image); 460 sLoadedImageCount++; 461 } 462 463 464 void 465 dequeue_loaded_image(image_t* image) 466 { 467 dequeue_image(&sLoadedImages, image); 468 sLoadedImageCount--; 469 } 470 471 472 void 473 dequeue_disposable_image(image_t* image) 474 { 475 dequeue_image(&sDisposableImages, image); 476 } 477 478 479 image_t* 480 find_loaded_image_by_name(char const* name, uint32 typeMask) 481 { 482 bool isPath = strchr(name, '/') != NULL; 483 return find_image_in_queue(&sLoadedImages, name, isPath, typeMask); 484 } 485 486 487 image_t* 488 find_loaded_image_by_id(image_id id, bool ignoreDisposable) 489 { 490 if (gInvalidImageIDs) { 491 // After fork, we lazily rebuild the image IDs of all loaded images 492 update_image_ids(); 493 } 494 495 for (image_t* image = sLoadedImages.head; image; image = image->next) { 496 if (image->id == id) 497 return image; 498 } 499 500 if (ignoreDisposable) 501 return NULL; 502 503 for (image_t* image = sDisposableImages.head; image; image = image->next) { 504 if (image->id == id) 505 return image; 506 } 507 508 return NULL; 509 } 510 511 512 void 513 set_image_flags_recursively(image_t* image, uint32 flags) 514 { 515 update_image_flags_recursively(image, flags, 0); 516 } 517 518 519 void 520 clear_image_flags_recursively(image_t* image, uint32 flags) 521 { 522 update_image_flags_recursively(image, 0, flags); 523 } 524 525 526 ssize_t 527 get_sorted_image_list(image_t* image, image_t*** _list, uint32 sortFlag) 528 { 529 image_t** list; 530 531 list = (image_t**)malloc(sLoadedImageCount * sizeof(image_t*)); 532 if (list == NULL) { 533 FATAL("memory shortage in get_sorted_image_list()"); 534 *_list = NULL; 535 return B_NO_MEMORY; 536 } 537 538 memset(list, 0, sLoadedImageCount * sizeof(image_t*)); 539 540 *_list = list; 541 return topological_sort(image, 0, list, sortFlag); 542 } 543