xref: /haiku/src/system/runtime_loader/images.cpp (revision 0044a8c39ab5721051b6279506d1a8c511e20453)
1 /*
2  * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2003-2011, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2002, Manuel J. Petit. All rights reserved.
7  * Copyright 2001, Travis Geiselbrecht. All rights reserved.
8  * Distributed under the terms of the NewOS License.
9  */
10 
11 #include "images.h"
12 
13 #include <stdio.h>
14 #include <stdlib.h>
15 #include <string.h>
16 
17 #include <algorithm>
18 
19 #include <syscalls.h>
20 #include <vm_defs.h>
21 
22 #include "add_ons.h"
23 #include "runtime_loader_private.h"
24 
25 
26 #define RLD_PROGRAM_BASE 0x00200000
27 	/* keep in sync with app ldscript */
28 
29 
30 bool gInvalidImageIDs;
31 
32 static image_queue_t sLoadedImages = {0, 0};
33 static image_queue_t sDisposableImages = {0, 0};
34 static uint32 sLoadedImageCount = 0;
35 
36 
37 //! Remaps the image ID of \a image after fork.
38 static status_t
39 update_image_id(image_t* image)
40 {
41 	int32 cookie = 0;
42 	image_info info;
43 	while (_kern_get_next_image_info(B_CURRENT_TEAM, &cookie, &info,
44 			sizeof(image_info)) == B_OK) {
45 		for (uint32 i = 0; i < image->num_regions; i++) {
46 			if (image->regions[i].vmstart == (addr_t)info.text) {
47 				image->id = info.id;
48 				return B_OK;
49 			}
50 		}
51 	}
52 
53 	FATAL("Could not update image ID %ld after fork()!\n", image->id);
54 	return B_ENTRY_NOT_FOUND;
55 }
56 
57 
58 static void
59 enqueue_image(image_queue_t* queue, image_t* image)
60 {
61 	image->next = NULL;
62 
63 	image->prev = queue->tail;
64 	if (queue->tail)
65 		queue->tail->next = image;
66 
67 	queue->tail = image;
68 	if (!queue->head)
69 		queue->head = image;
70 }
71 
72 
73 static void
74 dequeue_image(image_queue_t* queue, image_t* image)
75 {
76 	if (image->next)
77 		image->next->prev = image->prev;
78 	else
79 		queue->tail = image->prev;
80 
81 	if (image->prev)
82 		image->prev->next = image->next;
83 	else
84 		queue->head = image->next;
85 
86 	image->prev = NULL;
87 	image->next = NULL;
88 }
89 
90 
91 static image_t*
92 find_image_in_queue(image_queue_t* queue, const char* name, bool isPath,
93 	uint32 typeMask)
94 {
95 	for (image_t* image = queue->head; image; image = image->next) {
96 		const char* imageName = isPath ? image->path : image->name;
97 		int length = isPath ? sizeof(image->path) : sizeof(image->name);
98 
99 		if (!strncmp(imageName, name, length)
100 			&& (typeMask & IMAGE_TYPE_TO_MASK(image->type)) != 0) {
101 			return image;
102 		}
103 	}
104 
105 	return NULL;
106 }
107 
108 
109 static void
110 update_image_flags_recursively(image_t* image, uint32 flagsToSet,
111 	uint32 flagsToClear)
112 {
113 	image_t* queue[sLoadedImageCount];
114 	uint32 count = 0;
115 	uint32 index = 0;
116 	queue[count++] = image;
117 	image->flags |= RFLAG_VISITED;
118 
119 	while (index < count) {
120 		// pop next image
121 		image = queue[index++];
122 
123 		// push dependencies
124 		for (uint32 i = 0; i < image->num_needed; i++) {
125 			image_t* needed = image->needed[i];
126 			if ((needed->flags & RFLAG_VISITED) == 0) {
127 				queue[count++] = needed;
128 				needed->flags |= RFLAG_VISITED;
129 			}
130 		}
131 	}
132 
133 	// update flags
134 	for (uint32 i = 0; i < count; i++) {
135 		queue[i]->flags = (queue[i]->flags | flagsToSet)
136 			& ~(flagsToClear | RFLAG_VISITED);
137 	}
138 }
139 
140 
141 static uint32
142 topological_sort(image_t* image, uint32 slot, image_t** initList,
143 	uint32 sortFlag)
144 {
145 	if (image->flags & sortFlag)
146 		return slot;
147 
148 	image->flags |= sortFlag; /* make sure we don't visit this one */
149 	for (uint32 i = 0; i < image->num_needed; i++)
150 		slot = topological_sort(image->needed[i], slot, initList, sortFlag);
151 
152 	initList[slot] = image;
153 	return slot + 1;
154 }
155 
156 
157 /*!	Finds the load address and address specifier of the given image region.
158 */
159 static void
160 get_image_region_load_address(image_t* image, uint32 index, int32 lastDelta,
161 	bool fixed, addr_t& loadAddress, uint32& addressSpecifier)
162 {
163 	if (image->dynamic_ptr != 0 && !fixed) {
164 		// relocatable image... we can afford to place wherever
165 		if (index == 0) {
166 			// but only the first segment gets a free ride
167 			loadAddress = RLD_PROGRAM_BASE;
168 			addressSpecifier = B_BASE_ADDRESS;
169 		} else {
170 			loadAddress = image->regions[index].vmstart + lastDelta;
171 			addressSpecifier = B_EXACT_ADDRESS;
172 		}
173 	} else {
174 		// not relocatable, put it where it asks or die trying
175 		loadAddress = image->regions[index].vmstart;
176 		addressSpecifier = B_EXACT_ADDRESS;
177 	}
178 }
179 
180 
181 // #pragma mark -
182 
183 
184 image_t*
185 create_image(const char* name, const char* path, int regionCount)
186 {
187 	size_t allocSize = sizeof(image_t)
188 		+ (regionCount - 1) * sizeof(elf_region_t);
189 
190 	image_t* image = (image_t*)malloc(allocSize);
191 	if (image == NULL) {
192 		FATAL("no memory for image %s\n", path);
193 		return NULL;
194 	}
195 
196 	memset(image, 0, allocSize);
197 
198 	strlcpy(image->path, path, sizeof(image->path));
199 
200 	// Make the last component of the supplied name the image name.
201 	// If present, DT_SONAME will replace this name.
202 	const char* lastSlash = strrchr(name, '/');
203 	if (lastSlash != NULL)
204 		strlcpy(image->name, lastSlash + 1, sizeof(image->name));
205 	else
206 		strlcpy(image->name, name, sizeof(image->name));
207 
208 	image->ref_count = 1;
209 	image->num_regions = regionCount;
210 
211 	return image;
212 }
213 
214 
215 void
216 delete_image_struct(image_t* image)
217 {
218 #ifdef DEBUG
219 	size_t size = sizeof(image_t)
220 		+ (image->num_regions - 1) * sizeof(elf_region_t);
221 	memset(image->needed, 0xa5, sizeof(image->needed[0]) * image->num_needed);
222 #endif
223 	free(image->needed);
224 	free(image->versions);
225 
226 	while (RuntimeLoaderSymbolPatcher* patcher
227 			= image->defined_symbol_patchers) {
228 		image->defined_symbol_patchers = patcher->next;
229 		delete patcher;
230 	}
231 	while (RuntimeLoaderSymbolPatcher* patcher
232 			= image->undefined_symbol_patchers) {
233 		image->undefined_symbol_patchers = patcher->next;
234 		delete patcher;
235 	}
236 
237 #ifdef DEBUG
238 	// overwrite images to make sure they aren't accidently reused anywhere
239 	memset(image, 0xa5, size);
240 #endif
241 	free(image);
242 }
243 
244 
245 void
246 delete_image(image_t* image)
247 {
248 	if (image == NULL)
249 		return;
250 
251 	_kern_unregister_image(image->id);
252 		// registered in load_container()
253 
254 	delete_image_struct(image);
255 }
256 
257 
258 void
259 put_image(image_t* image)
260 {
261 	// If all references to the image are gone, add it to the disposable list
262 	// and remove all dependencies
263 
264 	if (atomic_add(&image->ref_count, -1) == 1) {
265 		size_t i;
266 
267 		dequeue_image(&sLoadedImages, image);
268 		enqueue_image(&sDisposableImages, image);
269 		sLoadedImageCount--;
270 
271 		for (i = 0; i < image->num_needed; i++)
272 			put_image(image->needed[i]);
273 	}
274 }
275 
276 
277 status_t
278 map_image(int fd, char const* path, image_t* image, bool fixed)
279 {
280 	// cut the file name from the path as base name for the created areas
281 	const char* baseName = strrchr(path, '/');
282 	if (baseName != NULL)
283 		baseName++;
284 	else
285 		baseName = path;
286 
287 	// determine how much space we need for all loaded segments
288 
289 	addr_t reservedAddress = 0;
290 	addr_t loadAddress;
291 	size_t reservedSize = 0;
292 	size_t length = 0;
293 	uint32 addressSpecifier = B_ANY_ADDRESS;
294 
295 	for (uint32 i = 0; i < image->num_regions; i++) {
296 		// for BeOS compatibility: if we load an old BeOS executable, we
297 		// have to relocate it, if possible - we recognize it because the
298 		// vmstart is set to 0 (hopefully always)
299 		if (fixed && image->regions[i].vmstart == 0)
300 			fixed = false;
301 
302 		uint32 regionAddressSpecifier;
303 		get_image_region_load_address(image, i,
304 			i > 0 ? loadAddress - image->regions[i - 1].vmstart : 0,
305 			fixed, loadAddress, regionAddressSpecifier);
306 		if (i == 0) {
307 			reservedAddress = loadAddress;
308 			addressSpecifier = regionAddressSpecifier;
309 		}
310 
311 		length += TO_PAGE_SIZE(image->regions[i].vmsize
312 			+ (loadAddress % B_PAGE_SIZE));
313 
314 		size_t size = TO_PAGE_SIZE(loadAddress + image->regions[i].vmsize)
315 			- reservedAddress;
316 		if (size > reservedSize)
317 			reservedSize = size;
318 	}
319 
320 	// Check whether the segments have an unreasonable amount of unused space
321 	// inbetween.
322 	if (reservedSize > length + 8 * 1024)
323 		return B_BAD_DATA;
324 
325 	// reserve that space and allocate the areas from that one
326 	if (_kern_reserve_address_range(&reservedAddress, addressSpecifier,
327 			reservedSize) != B_OK)
328 		return B_NO_MEMORY;
329 
330 	for (uint32 i = 0; i < image->num_regions; i++) {
331 		char regionName[B_OS_NAME_LENGTH];
332 
333 		snprintf(regionName, sizeof(regionName), "%s_seg%lu%s",
334 			baseName, i, (image->regions[i].flags & RFLAG_RW) ? "rw" : "ro");
335 
336 		get_image_region_load_address(image, i,
337 			i > 0 ? image->regions[i - 1].delta : 0, fixed, loadAddress,
338 			addressSpecifier);
339 
340 		// If the image position is arbitrary, we must let it point to the start
341 		// of the reserved address range.
342 		if (addressSpecifier != B_EXACT_ADDRESS)
343 			loadAddress = reservedAddress;
344 
345 		if ((image->regions[i].flags & RFLAG_ANON) != 0) {
346 			image->regions[i].id = _kern_create_area(regionName,
347 				(void**)&loadAddress, B_EXACT_ADDRESS,
348 				image->regions[i].vmsize, B_NO_LOCK,
349 				B_READ_AREA | B_WRITE_AREA);
350 
351 			if (image->regions[i].id < 0) {
352 				_kern_unreserve_address_range(reservedAddress, reservedSize);
353 				return image->regions[i].id;
354 			}
355 		} else {
356 			// Map all segments r/w first -- write access might be needed for
357 			// relocations. When we've done with those we change the protection
358 			// of read-only segments back to read-only. We map those segments
359 			// over-committing, since quite likely only a relatively small
360 			// number of pages needs to be touched and we want to avoid a lot
361 			// of memory to be committed for them temporarily, just because we
362 			// have to write map them.
363 			uint32 protection = B_READ_AREA | B_WRITE_AREA
364 				| ((image->regions[i].flags & RFLAG_RW) != 0
365 					? 0 : B_OVERCOMMITTING_AREA);
366 			image->regions[i].id = _kern_map_file(regionName,
367 				(void**)&loadAddress, B_EXACT_ADDRESS,
368 				image->regions[i].vmsize, protection, REGION_PRIVATE_MAP, false,
369 				fd, PAGE_BASE(image->regions[i].fdstart));
370 
371 			if (image->regions[i].id < 0) {
372 				_kern_unreserve_address_range(reservedAddress, reservedSize);
373 				return image->regions[i].id;
374 			}
375 
376 			TRACE(("\"%s\" at %p, 0x%lx bytes (%s)\n", path,
377 				(void *)loadAddress, image->regions[i].vmsize,
378 				image->regions[i].flags & RFLAG_RW ? "rw" : "read-only"));
379 
380 			// handle trailer bits in data segment
381 			if (image->regions[i].flags & RFLAG_RW) {
382 				addr_t startClearing = loadAddress
383 					+ PAGE_OFFSET(image->regions[i].start)
384 					+ image->regions[i].size;
385 				addr_t toClear = image->regions[i].vmsize
386 					- PAGE_OFFSET(image->regions[i].start)
387 					- image->regions[i].size;
388 
389 				TRACE(("cleared 0x%lx and the following 0x%lx bytes\n",
390 					startClearing, toClear));
391 				memset((void *)startClearing, 0, toClear);
392 			}
393 		}
394 
395 		image->regions[i].delta = loadAddress - image->regions[i].vmstart;
396 		image->regions[i].vmstart = loadAddress;
397 	}
398 
399 	if (image->dynamic_ptr != 0)
400 		image->dynamic_ptr += image->regions[0].delta;
401 
402 	return B_OK;
403 }
404 
405 
406 void
407 unmap_image(image_t* image)
408 {
409 	for (uint32 i = 0; i < image->num_regions; i++) {
410 		_kern_delete_area(image->regions[i].id);
411 
412 		image->regions[i].id = -1;
413 	}
414 }
415 
416 
417 /*!	This function will change the protection of all read-only segments to really
418 	be read-only.
419 	The areas have to be read/write first, so that they can be relocated.
420 */
421 void
422 remap_images()
423 {
424 	for (image_t* image = sLoadedImages.head; image != NULL;
425 			image = image->next) {
426 		for (uint32 i = 0; i < image->num_regions; i++) {
427 			if ((image->regions[i].flags & RFLAG_RW) == 0
428 				&& (image->regions[i].flags & RFLAG_REMAPPED) == 0) {
429 				// we only need to do this once, so we remember those we've already mapped
430 				if (_kern_set_area_protection(image->regions[i].id,
431 						B_READ_AREA | B_EXECUTE_AREA) == B_OK) {
432 					image->regions[i].flags |= RFLAG_REMAPPED;
433 				}
434 			}
435 		}
436 	}
437 }
438 
439 
440 void
441 register_image(image_t* image, int fd, const char* path)
442 {
443 	struct stat stat;
444 	image_info info;
445 
446 	// TODO: set these correctly
447 	info.id = 0;
448 	info.type = image->type;
449 	info.sequence = 0;
450 	info.init_order = 0;
451 	info.init_routine = (void (*)())image->init_routine;
452 	info.term_routine = (void (*)())image->term_routine;
453 
454 	if (_kern_read_stat(fd, NULL, false, &stat, sizeof(struct stat)) == B_OK) {
455 		info.device = stat.st_dev;
456 		info.node = stat.st_ino;
457 	} else {
458 		info.device = -1;
459 		info.node = -1;
460 	}
461 
462 	// We may have split segments into separate regions. Compute the correct
463 	// segments for the image info.
464 	addr_t textBase = 0;
465 	addr_t textEnd = 0;
466 	addr_t dataBase = 0;
467 	addr_t dataEnd = 0;
468 	for (uint32 i= 0; i < image->num_regions; i++) {
469 		addr_t base = image->regions[i].vmstart;
470 		addr_t end = base + image->regions[i].vmsize;
471 		if (image->regions[i].flags & RFLAG_RW) {
472 			// data
473 			if (dataBase == 0) {
474 				dataBase = base;
475 				dataEnd = end;
476 			} else {
477 				dataBase = std::min(dataBase, base);
478 				dataEnd = std::max(dataEnd, end);
479 			}
480 		} else {
481 			// text
482 			if (textBase == 0) {
483 				textBase = base;
484 				textEnd = end;
485 			} else {
486 				textBase = std::min(textBase, base);
487 				textEnd = std::max(textEnd, end);
488 			}
489 		}
490 	}
491 
492 	strlcpy(info.name, path, sizeof(info.name));
493 	info.text = (void*)textBase;
494 	info.text_size = textEnd - textBase;
495 	info.data = (void*)dataBase;
496 	info.data_size = dataEnd - dataBase;
497 	info.api_version = image->api_version;
498 	info.abi = image->abi;
499 	image->id = _kern_register_image(&info, sizeof(image_info));
500 }
501 
502 
503 //! After fork, we lazily rebuild the image IDs of all loaded images.
504 status_t
505 update_image_ids()
506 {
507 	for (image_t* image = sLoadedImages.head; image; image = image->next) {
508 		status_t status = update_image_id(image);
509 		if (status != B_OK)
510 			return status;
511 	}
512 	for (image_t* image = sDisposableImages.head; image; image = image->next) {
513 		status_t status = update_image_id(image);
514 		if (status != B_OK)
515 			return status;
516 	}
517 
518 	gInvalidImageIDs = false;
519 	return B_OK;
520 }
521 
522 
523 image_queue_t&
524 get_loaded_images()
525 {
526 	return sLoadedImages;
527 }
528 
529 
530 image_queue_t&
531 get_disposable_images()
532 {
533 	return sDisposableImages;
534 }
535 
536 
537 uint32
538 count_loaded_images()
539 {
540 	return sLoadedImageCount;
541 }
542 
543 
544 void
545 enqueue_loaded_image(image_t* image)
546 {
547 	enqueue_image(&sLoadedImages, image);
548 	sLoadedImageCount++;
549 }
550 
551 
552 void
553 dequeue_loaded_image(image_t* image)
554 {
555 	dequeue_image(&sLoadedImages, image);
556 	sLoadedImageCount--;
557 }
558 
559 
560 void
561 dequeue_disposable_image(image_t* image)
562 {
563 	dequeue_image(&sDisposableImages, image);
564 }
565 
566 
567 image_t*
568 find_loaded_image_by_name(char const* name, uint32 typeMask)
569 {
570 	bool isPath = strchr(name, '/') != NULL;
571 	return find_image_in_queue(&sLoadedImages, name, isPath, typeMask);
572 }
573 
574 
575 image_t*
576 find_loaded_image_by_id(image_id id, bool ignoreDisposable)
577 {
578 	if (gInvalidImageIDs) {
579 		// After fork, we lazily rebuild the image IDs of all loaded images
580 		update_image_ids();
581 	}
582 
583 	for (image_t* image = sLoadedImages.head; image; image = image->next) {
584 		if (image->id == id)
585 			return image;
586 	}
587 
588 	if (ignoreDisposable)
589 		return NULL;
590 
591 	for (image_t* image = sDisposableImages.head; image; image = image->next) {
592 		if (image->id == id)
593 			return image;
594 	}
595 
596 	return NULL;
597 }
598 
599 
600 image_t*
601 find_loaded_image_by_address(addr_t address)
602 {
603 	for (image_t* image = sLoadedImages.head; image; image = image->next) {
604 		for (uint32 i = 0; i < image->num_regions; i++) {
605 			elf_region_t& region = image->regions[i];
606 			if (region.vmstart <= address
607 				&& region.vmstart - 1 + region.vmsize >= address)
608 				return image;
609 		}
610 	}
611 
612 	return NULL;
613 }
614 
615 
616 void
617 set_image_flags_recursively(image_t* image, uint32 flags)
618 {
619 	update_image_flags_recursively(image, flags, 0);
620 }
621 
622 
623 void
624 clear_image_flags_recursively(image_t* image, uint32 flags)
625 {
626 	update_image_flags_recursively(image, 0, flags);
627 }
628 
629 
630 /*!	Returns a topologically sorted image list.
631 
632 	If \a image is non-NULL, an array containing the image and all its
633 	transitive dependencies is returned. If \a image is NULL, all loaded images
634 	are returned. In either case dependencies are listed before images
635 	depending on them.
636 
637 	\param image The image specifying the tree of images that shall be sorted.
638 		If NULL, all loaded images are sorted.
639 	\param _list On success it will be set to an array of the sorted images.
640 		The caller is responsible for free()ing it.
641 	\param sortFlags The image flag that shall be used for sorting. Images that
642 		already have this flag set are ignored (and their dependencies, unless
643 		they are also reachable via another path). The flag will be set on all
644 		returned images.
645 	\return The number of images in the returned array or an error code on
646 		failure.
647 */
648 ssize_t
649 get_sorted_image_list(image_t* image, image_t*** _list, uint32 sortFlag)
650 {
651 	image_t** list;
652 
653 	list = (image_t**)malloc(sLoadedImageCount * sizeof(image_t*));
654 	if (list == NULL) {
655 		FATAL("memory shortage in get_sorted_image_list()");
656 		*_list = NULL;
657 		return B_NO_MEMORY;
658 	}
659 
660 	memset(list, 0, sLoadedImageCount * sizeof(image_t*));
661 
662 	*_list = list;
663 
664 	if (image != NULL)
665 		return topological_sort(image, 0, list, sortFlag);
666 
667 	// no image given -- sort all loaded images
668 	uint32 count = 0;
669 	image = sLoadedImages.head;
670 	while (image != NULL) {
671 		count = topological_sort(image, count, list, sortFlag);
672 		image = image->next;
673 	}
674 
675 	return count;
676 }
677