xref: /haiku/src/system/runtime_loader/images.cpp (revision d0ac609964842f8cdb6d54b3c539c6c15293e172)
1 /*
2  * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2003-2011, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2002, Manuel J. Petit. All rights reserved.
7  * Copyright 2001, Travis Geiselbrecht. All rights reserved.
8  * Distributed under the terms of the NewOS License.
9  */
10 
11 #include "images.h"
12 
13 #include <stdio.h>
14 #include <stdlib.h>
15 #include <string.h>
16 
17 #include <algorithm>
18 
19 #include <syscalls.h>
20 #include <vm_defs.h>
21 
22 #include "add_ons.h"
23 #include "elf_tls.h"
24 #include "runtime_loader_private.h"
25 
26 #include <util/kernel_cpp.h>
27 
28 
29 // keep in sync with app ldscript
30 #ifdef __x86_64__
31 	// runtime_loader potentially occupies 0x200000 - 0x600000 due to large
32 	// page segment alignment.
33 #	define RLD_PROGRAM_BASE	0x600000
34 #	define MAX_PAGE_SIZE	0x200000
35 #else
36 #	define RLD_PROGRAM_BASE	0x200000
37 #	define MAX_PAGE_SIZE	B_PAGE_SIZE
38 #endif
39 
40 
41 bool gInvalidImageIDs;
42 
43 static image_queue_t sLoadedImages = {0, 0};
44 static image_queue_t sDisposableImages = {0, 0};
45 static uint32 sLoadedImageCount = 0;
46 
47 
48 //! Remaps the image ID of \a image after fork.
49 static status_t
50 update_image_id(image_t* image)
51 {
52 	int32 cookie = 0;
53 	image_info info;
54 	while (_kern_get_next_image_info(B_CURRENT_TEAM, &cookie, &info,
55 			sizeof(image_info)) == B_OK) {
56 		for (uint32 i = 0; i < image->num_regions; i++) {
57 			if (image->regions[i].vmstart == (addr_t)info.text) {
58 				image->id = info.id;
59 				return B_OK;
60 			}
61 		}
62 	}
63 
64 	FATAL("Could not update image ID %" B_PRId32 " after fork()!\n", image->id);
65 	return B_ENTRY_NOT_FOUND;
66 }
67 
68 
69 static void
70 enqueue_image(image_queue_t* queue, image_t* image)
71 {
72 	image->next = NULL;
73 
74 	image->prev = queue->tail;
75 	if (queue->tail)
76 		queue->tail->next = image;
77 
78 	queue->tail = image;
79 	if (!queue->head)
80 		queue->head = image;
81 }
82 
83 
84 static void
85 dequeue_image(image_queue_t* queue, image_t* image)
86 {
87 	if (image->next)
88 		image->next->prev = image->prev;
89 	else
90 		queue->tail = image->prev;
91 
92 	if (image->prev)
93 		image->prev->next = image->next;
94 	else
95 		queue->head = image->next;
96 
97 	image->prev = NULL;
98 	image->next = NULL;
99 }
100 
101 
102 static image_t*
103 find_image_in_queue(image_queue_t* queue, const char* name, bool isPath,
104 	uint32 typeMask)
105 {
106 	for (image_t* image = queue->head; image; image = image->next) {
107 		const char* imageName = isPath ? image->path : image->name;
108 		int length = isPath ? sizeof(image->path) : sizeof(image->name);
109 
110 		if (!strncmp(imageName, name, length)
111 			&& (typeMask & IMAGE_TYPE_TO_MASK(image->type)) != 0) {
112 			return image;
113 		}
114 	}
115 
116 	return NULL;
117 }
118 
119 
120 static void
121 update_image_flags_recursively(image_t* image, uint32 flagsToSet,
122 	uint32 flagsToClear)
123 {
124 	image_t* queue[sLoadedImageCount];
125 	uint32 count = 0;
126 	uint32 index = 0;
127 	queue[count++] = image;
128 	image->flags |= RFLAG_VISITED;
129 
130 	while (index < count) {
131 		// pop next image
132 		image = queue[index++];
133 
134 		// push dependencies
135 		for (uint32 i = 0; i < image->num_needed; i++) {
136 			image_t* needed = image->needed[i];
137 			if ((needed->flags & RFLAG_VISITED) == 0) {
138 				queue[count++] = needed;
139 				needed->flags |= RFLAG_VISITED;
140 			}
141 		}
142 	}
143 
144 	// update flags
145 	for (uint32 i = 0; i < count; i++) {
146 		queue[i]->flags = (queue[i]->flags | flagsToSet)
147 			& ~(flagsToClear | RFLAG_VISITED);
148 	}
149 }
150 
151 
152 static uint32
153 topological_sort(image_t* image, uint32 slot, image_t** initList,
154 	uint32 sortFlag)
155 {
156 	if (image->flags & sortFlag)
157 		return slot;
158 
159 	image->flags |= sortFlag; /* make sure we don't visit this one */
160 	for (uint32 i = 0; i < image->num_needed; i++)
161 		slot = topological_sort(image->needed[i], slot, initList, sortFlag);
162 
163 	initList[slot] = image;
164 	return slot + 1;
165 }
166 
167 
168 /*!	Finds the load address and address specifier of the given image region.
169 */
170 static void
171 get_image_region_load_address(image_t* image, uint32 index, long lastDelta,
172 	bool fixed, addr_t& loadAddress, uint32& addressSpecifier)
173 {
174 	if (!fixed) {
175 		// relocatable image... we can afford to place wherever
176 		if (index == 0) {
177 			// but only the first segment gets a free ride
178 			loadAddress = RLD_PROGRAM_BASE;
179 			addressSpecifier = B_RANDOMIZED_BASE_ADDRESS;
180 		} else {
181 			loadAddress = image->regions[index].vmstart + lastDelta;
182 			addressSpecifier = B_EXACT_ADDRESS;
183 		}
184 	} else {
185 		// not relocatable, put it where it asks or die trying
186 		loadAddress = image->regions[index].vmstart;
187 		addressSpecifier = B_EXACT_ADDRESS;
188 	}
189 }
190 
191 
192 // #pragma mark -
193 
194 
195 image_t*
196 create_image(const char* name, const char* path, int regionCount)
197 {
198 	size_t allocSize = sizeof(image_t)
199 		+ (regionCount - 1) * sizeof(elf_region_t);
200 
201 	image_t* image = (image_t*)malloc(allocSize);
202 	if (image == NULL) {
203 		FATAL("no memory for image %s\n", path);
204 		return NULL;
205 	}
206 
207 	memset(image, 0, allocSize);
208 
209 	strlcpy(image->path, path, sizeof(image->path));
210 
211 	// Make the last component of the supplied name the image name.
212 	// If present, DT_SONAME will replace this name.
213 	const char* lastSlash = strrchr(name, '/');
214 	if (lastSlash != NULL)
215 		strlcpy(image->name, lastSlash + 1, sizeof(image->name));
216 	else
217 		strlcpy(image->name, name, sizeof(image->name));
218 
219 	image->ref_count = 1;
220 	image->num_regions = regionCount;
221 
222 	return image;
223 }
224 
225 
226 void
227 delete_image_struct(image_t* image)
228 {
229 #ifdef DEBUG
230 	size_t size = sizeof(image_t)
231 		+ (image->num_regions - 1) * sizeof(elf_region_t);
232 	memset(image->needed, 0xa5, sizeof(image->needed[0]) * image->num_needed);
233 #endif
234 	free(image->needed);
235 	free(image->versions);
236 
237 	while (RuntimeLoaderSymbolPatcher* patcher
238 			= image->defined_symbol_patchers) {
239 		image->defined_symbol_patchers = patcher->next;
240 		delete patcher;
241 	}
242 	while (RuntimeLoaderSymbolPatcher* patcher
243 			= image->undefined_symbol_patchers) {
244 		image->undefined_symbol_patchers = patcher->next;
245 		delete patcher;
246 	}
247 
248 #ifdef DEBUG
249 	// overwrite images to make sure they aren't accidently reused anywhere
250 	memset(image, 0xa5, size);
251 #endif
252 	free(image);
253 }
254 
255 
256 void
257 delete_image(image_t* image)
258 {
259 	if (image == NULL)
260 		return;
261 
262 	_kern_unregister_image(image->id);
263 		// registered in load_container()
264 
265 	delete_image_struct(image);
266 }
267 
268 
269 void
270 put_image(image_t* image)
271 {
272 	// If all references to the image are gone, add it to the disposable list
273 	// and remove all dependencies
274 
275 	if (atomic_add(&image->ref_count, -1) == 1) {
276 		size_t i;
277 
278 		dequeue_image(&sLoadedImages, image);
279 		enqueue_image(&sDisposableImages, image);
280 		sLoadedImageCount--;
281 
282 		for (i = 0; i < image->num_needed; i++)
283 			put_image(image->needed[i]);
284 	}
285 }
286 
287 
288 status_t
289 map_image(int fd, char const* path, image_t* image, bool fixed)
290 {
291 	// cut the file name from the path as base name for the created areas
292 	const char* baseName = strrchr(path, '/');
293 	if (baseName != NULL)
294 		baseName++;
295 	else
296 		baseName = path;
297 
298 	// determine how much space we need for all loaded segments
299 
300 	addr_t reservedAddress = 0;
301 	addr_t loadAddress;
302 	size_t reservedSize = 0;
303 	size_t length = 0;
304 	uint32 addressSpecifier = B_RANDOMIZED_ANY_ADDRESS;
305 
306 	for (uint32 i = 0; i < image->num_regions; i++) {
307 		// for BeOS compatibility: if we load an old BeOS executable, we
308 		// have to relocate it, if possible - we recognize it because the
309 		// vmstart is set to 0 (hopefully always)
310 		if (fixed && image->regions[i].vmstart == 0)
311 			fixed = false;
312 
313 		uint32 regionAddressSpecifier;
314 		get_image_region_load_address(image, i,
315 			i > 0 ? loadAddress - image->regions[i - 1].vmstart : 0,
316 			fixed, loadAddress, regionAddressSpecifier);
317 		if (i == 0) {
318 			reservedAddress = loadAddress;
319 			addressSpecifier = regionAddressSpecifier;
320 		}
321 
322 		length += TO_PAGE_SIZE(image->regions[i].vmsize
323 			+ (loadAddress % B_PAGE_SIZE));
324 
325 		size_t size = TO_PAGE_SIZE(loadAddress + image->regions[i].vmsize)
326 			- reservedAddress;
327 		if (size > reservedSize)
328 			reservedSize = size;
329 	}
330 
331 	// Check whether the segments have an unreasonable amount of unused space
332 	// inbetween.
333 	if (reservedSize > length + MAX_PAGE_SIZE * 2)
334 		return B_BAD_DATA;
335 
336 	// reserve that space and allocate the areas from that one
337 	if (_kern_reserve_address_range(&reservedAddress, addressSpecifier,
338 			reservedSize) != B_OK)
339 		return B_NO_MEMORY;
340 
341 	for (uint32 i = 0; i < image->num_regions; i++) {
342 		char regionName[B_OS_NAME_LENGTH];
343 
344 		snprintf(regionName, sizeof(regionName), "%s_seg%" B_PRIu32 "%s",
345 			baseName, i, (image->regions[i].flags & RFLAG_RW) ? "rw" : "ro");
346 
347 		get_image_region_load_address(image, i,
348 			i > 0 ? image->regions[i - 1].delta : 0, fixed, loadAddress,
349 			addressSpecifier);
350 
351 		// If the image position is arbitrary, we must let it point to the start
352 		// of the reserved address range.
353 		if (addressSpecifier != B_EXACT_ADDRESS)
354 			loadAddress = reservedAddress;
355 
356 		if ((image->regions[i].flags & RFLAG_ANON) != 0) {
357 			image->regions[i].id = _kern_create_area(regionName,
358 				(void**)&loadAddress, B_EXACT_ADDRESS,
359 				image->regions[i].vmsize, B_NO_LOCK,
360 				B_READ_AREA | B_WRITE_AREA);
361 
362 			if (image->regions[i].id < 0) {
363 				_kern_unreserve_address_range(reservedAddress, reservedSize);
364 				return image->regions[i].id;
365 			}
366 		} else {
367 			// Map all segments r/w first -- write access might be needed for
368 			// relocations. When we've done with those we change the protection
369 			// of read-only segments back to read-only. We map those segments
370 			// over-committing, since quite likely only a relatively small
371 			// number of pages needs to be touched and we want to avoid a lot
372 			// of memory to be committed for them temporarily, just because we
373 			// have to write map them.
374 			uint32 protection = B_READ_AREA | B_WRITE_AREA
375 				| ((image->regions[i].flags & RFLAG_RW) != 0
376 					? 0 : B_OVERCOMMITTING_AREA);
377 			image->regions[i].id = _kern_map_file(regionName,
378 				(void**)&loadAddress, B_EXACT_ADDRESS,
379 				image->regions[i].vmsize, protection, REGION_PRIVATE_MAP, false,
380 				fd, PAGE_BASE(image->regions[i].fdstart));
381 
382 			if (image->regions[i].id < 0) {
383 				_kern_unreserve_address_range(reservedAddress, reservedSize);
384 				return image->regions[i].id;
385 			}
386 
387 			TRACE(("\"%s\" at %p, 0x%lx bytes (%s)\n", path,
388 				(void *)loadAddress, image->regions[i].vmsize,
389 				image->regions[i].flags & RFLAG_RW ? "rw" : "read-only"));
390 
391 			// handle trailer bits in data segment
392 			if (image->regions[i].flags & RFLAG_RW) {
393 				addr_t startClearing = loadAddress
394 					+ PAGE_OFFSET(image->regions[i].start)
395 					+ image->regions[i].size;
396 				addr_t toClear = image->regions[i].vmsize
397 					- PAGE_OFFSET(image->regions[i].start)
398 					- image->regions[i].size;
399 
400 				TRACE(("cleared 0x%lx and the following 0x%lx bytes\n",
401 					startClearing, toClear));
402 				memset((void *)startClearing, 0, toClear);
403 			}
404 		}
405 
406 		image->regions[i].delta = loadAddress - image->regions[i].vmstart;
407 		image->regions[i].vmstart = loadAddress;
408 		if (i == 0) {
409 			TLSBlockTemplates::Get().SetBaseAddress(image->dso_tls_id,
410 				loadAddress);
411 		}
412 	}
413 
414 	if (image->dynamic_ptr != 0)
415 		image->dynamic_ptr += image->regions[0].delta;
416 
417 	return B_OK;
418 }
419 
420 
421 void
422 unmap_image(image_t* image)
423 {
424 	for (uint32 i = 0; i < image->num_regions; i++) {
425 		_kern_delete_area(image->regions[i].id);
426 
427 		image->regions[i].id = -1;
428 	}
429 }
430 
431 
432 /*!	This function will change the protection of all read-only segments to really
433 	be read-only (and executable).
434 	The areas have to be read/write first, so that they can be relocated.
435 	If at least one image is in compatibility mode then we allow execution of
436 	all areas.
437 */
438 void
439 remap_images()
440 {
441 	for (image_t* image = sLoadedImages.head; image != NULL;
442 			image = image->next) {
443 		for (uint32 i = 0; i < image->num_regions; i++) {
444 			// we only need to do this once, so we remember those we've already
445 			// mapped
446 			if ((image->regions[i].flags & RFLAG_REMAPPED) != 0)
447 				continue;
448 
449 			status_t result = B_OK;
450 			if ((image->regions[i].flags & RFLAG_RW) == 0) {
451 				result = _kern_set_area_protection(image->regions[i].id,
452 						B_READ_AREA | B_EXECUTE_AREA);
453 			} else if (image->abi < B_HAIKU_ABI_GCC_2_HAIKU) {
454 				result = _kern_set_area_protection(image->regions[i].id,
455 						B_READ_AREA | B_WRITE_AREA | B_EXECUTE_AREA);
456 			}
457 
458 			if (result == B_OK)
459 				image->regions[i].flags |= RFLAG_REMAPPED;
460 		}
461 	}
462 }
463 
464 
465 void
466 register_image(image_t* image, int fd, const char* path)
467 {
468 	struct stat stat;
469 	image_info info;
470 
471 	// TODO: set these correctly
472 	info.id = 0;
473 	info.type = image->type;
474 	info.sequence = 0;
475 	info.init_order = 0;
476 	info.init_routine = (void (*)())image->init_routine;
477 	info.term_routine = (void (*)())image->term_routine;
478 
479 	if (_kern_read_stat(fd, NULL, false, &stat, sizeof(struct stat)) == B_OK) {
480 		info.device = stat.st_dev;
481 		info.node = stat.st_ino;
482 	} else {
483 		info.device = -1;
484 		info.node = -1;
485 	}
486 
487 	// We may have split segments into separate regions. Compute the correct
488 	// segments for the image info.
489 	addr_t textBase = 0;
490 	addr_t textEnd = 0;
491 	addr_t dataBase = 0;
492 	addr_t dataEnd = 0;
493 	for (uint32 i= 0; i < image->num_regions; i++) {
494 		addr_t base = image->regions[i].vmstart;
495 		addr_t end = base + image->regions[i].vmsize;
496 		if (image->regions[i].flags & RFLAG_RW) {
497 			// data
498 			if (dataBase == 0) {
499 				dataBase = base;
500 				dataEnd = end;
501 			} else {
502 				dataBase = std::min(dataBase, base);
503 				dataEnd = std::max(dataEnd, end);
504 			}
505 		} else {
506 			// text
507 			if (textBase == 0) {
508 				textBase = base;
509 				textEnd = end;
510 			} else {
511 				textBase = std::min(textBase, base);
512 				textEnd = std::max(textEnd, end);
513 			}
514 		}
515 	}
516 
517 	strlcpy(info.name, path, sizeof(info.name));
518 	info.text = (void*)textBase;
519 	info.text_size = textEnd - textBase;
520 	info.data = (void*)dataBase;
521 	info.data_size = dataEnd - dataBase;
522 	info.api_version = image->api_version;
523 	info.abi = image->abi;
524 	image->id = _kern_register_image(&info, sizeof(image_info));
525 }
526 
527 
528 //! After fork, we lazily rebuild the image IDs of all loaded images.
529 status_t
530 update_image_ids()
531 {
532 	for (image_t* image = sLoadedImages.head; image; image = image->next) {
533 		status_t status = update_image_id(image);
534 		if (status != B_OK)
535 			return status;
536 	}
537 	for (image_t* image = sDisposableImages.head; image; image = image->next) {
538 		status_t status = update_image_id(image);
539 		if (status != B_OK)
540 			return status;
541 	}
542 
543 	gInvalidImageIDs = false;
544 	return B_OK;
545 }
546 
547 
548 image_queue_t&
549 get_loaded_images()
550 {
551 	return sLoadedImages;
552 }
553 
554 
555 image_queue_t&
556 get_disposable_images()
557 {
558 	return sDisposableImages;
559 }
560 
561 
562 uint32
563 count_loaded_images()
564 {
565 	return sLoadedImageCount;
566 }
567 
568 
569 void
570 enqueue_loaded_image(image_t* image)
571 {
572 	enqueue_image(&sLoadedImages, image);
573 	sLoadedImageCount++;
574 }
575 
576 
577 void
578 dequeue_loaded_image(image_t* image)
579 {
580 	dequeue_image(&sLoadedImages, image);
581 	sLoadedImageCount--;
582 }
583 
584 
585 void
586 dequeue_disposable_image(image_t* image)
587 {
588 	dequeue_image(&sDisposableImages, image);
589 }
590 
591 
592 image_t*
593 find_loaded_image_by_name(char const* name, uint32 typeMask)
594 {
595 	bool isPath = strchr(name, '/') != NULL;
596 	return find_image_in_queue(&sLoadedImages, name, isPath, typeMask);
597 }
598 
599 
600 image_t*
601 find_loaded_image_by_id(image_id id, bool ignoreDisposable)
602 {
603 	if (gInvalidImageIDs) {
604 		// After fork, we lazily rebuild the image IDs of all loaded images
605 		update_image_ids();
606 	}
607 
608 	for (image_t* image = sLoadedImages.head; image; image = image->next) {
609 		if (image->id == id)
610 			return image;
611 	}
612 
613 	if (ignoreDisposable)
614 		return NULL;
615 
616 	for (image_t* image = sDisposableImages.head; image; image = image->next) {
617 		if (image->id == id)
618 			return image;
619 	}
620 
621 	return NULL;
622 }
623 
624 
625 image_t*
626 find_loaded_image_by_address(addr_t address)
627 {
628 	for (image_t* image = sLoadedImages.head; image; image = image->next) {
629 		for (uint32 i = 0; i < image->num_regions; i++) {
630 			elf_region_t& region = image->regions[i];
631 			if (region.vmstart <= address
632 				&& region.vmstart - 1 + region.vmsize >= address)
633 				return image;
634 		}
635 	}
636 
637 	return NULL;
638 }
639 
640 
641 void
642 set_image_flags_recursively(image_t* image, uint32 flags)
643 {
644 	update_image_flags_recursively(image, flags, 0);
645 }
646 
647 
648 void
649 clear_image_flags_recursively(image_t* image, uint32 flags)
650 {
651 	update_image_flags_recursively(image, 0, flags);
652 }
653 
654 
655 /*!	Returns a topologically sorted image list.
656 
657 	If \a image is non-NULL, an array containing the image and all its
658 	transitive dependencies is returned. If \a image is NULL, all loaded images
659 	are returned. In either case dependencies are listed before images
660 	depending on them.
661 
662 	\param image The image specifying the tree of images that shall be sorted.
663 		If NULL, all loaded images are sorted.
664 	\param _list On success it will be set to an array of the sorted images.
665 		The caller is responsible for free()ing it.
666 	\param sortFlags The image flag that shall be used for sorting. Images that
667 		already have this flag set are ignored (and their dependencies, unless
668 		they are also reachable via another path). The flag will be set on all
669 		returned images.
670 	\return The number of images in the returned array or an error code on
671 		failure.
672 */
673 ssize_t
674 get_sorted_image_list(image_t* image, image_t*** _list, uint32 sortFlag)
675 {
676 	image_t** list;
677 
678 	list = (image_t**)malloc(sLoadedImageCount * sizeof(image_t*));
679 	if (list == NULL) {
680 		FATAL("memory shortage in get_sorted_image_list()");
681 		*_list = NULL;
682 		return B_NO_MEMORY;
683 	}
684 
685 	memset(list, 0, sLoadedImageCount * sizeof(image_t*));
686 
687 	*_list = list;
688 
689 	if (image != NULL)
690 		return topological_sort(image, 0, list, sortFlag);
691 
692 	// no image given -- sort all loaded images
693 	uint32 count = 0;
694 	image = sLoadedImages.head;
695 	while (image != NULL) {
696 		count = topological_sort(image, count, list, sortFlag);
697 		image = image->next;
698 	}
699 
700 	return count;
701 }
702