xref: /haiku/src/system/runtime_loader/images.cpp (revision 220d04022750f40f8bac8f01fa551211e28d04f2)
1 /*
2  * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2003-2011, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2002, Manuel J. Petit. All rights reserved.
7  * Copyright 2001, Travis Geiselbrecht. All rights reserved.
8  * Distributed under the terms of the NewOS License.
9  */
10 
11 #include "images.h"
12 
13 #include <stdio.h>
14 #include <stdlib.h>
15 #include <string.h>
16 
17 #include <algorithm>
18 
19 #include <syscalls.h>
20 #include <vm_defs.h>
21 
22 #include "add_ons.h"
23 #include "elf_tls.h"
24 #include "runtime_loader_private.h"
25 
26 
27 // keep in sync with app ldscript
28 #ifdef __x86_64__
29 	// runtime_loader potentially occupies 0x200000 - 0x600000 due to large
30 	// page segment alignment.
31 #	define RLD_PROGRAM_BASE	0x600000
32 #	define MAX_PAGE_SIZE	0x200000
33 #else
34 #	define RLD_PROGRAM_BASE	0x200000
35 #	define MAX_PAGE_SIZE	B_PAGE_SIZE
36 #endif
37 
38 
39 bool gInvalidImageIDs;
40 
41 static image_queue_t sLoadedImages = {0, 0};
42 static image_queue_t sDisposableImages = {0, 0};
43 static uint32 sLoadedImageCount = 0;
44 
45 
46 //! Remaps the image ID of \a image after fork.
47 static status_t
48 update_image_id(image_t* image)
49 {
50 	int32 cookie = 0;
51 	image_info info;
52 	while (_kern_get_next_image_info(B_CURRENT_TEAM, &cookie, &info,
53 			sizeof(image_info)) == B_OK) {
54 		for (uint32 i = 0; i < image->num_regions; i++) {
55 			if (image->regions[i].vmstart == (addr_t)info.text) {
56 				image->id = info.id;
57 				return B_OK;
58 			}
59 		}
60 	}
61 
62 	FATAL("Could not update image ID %" B_PRId32 " after fork()!\n", image->id);
63 	return B_ENTRY_NOT_FOUND;
64 }
65 
66 
67 static void
68 enqueue_image(image_queue_t* queue, image_t* image)
69 {
70 	image->next = NULL;
71 
72 	image->prev = queue->tail;
73 	if (queue->tail)
74 		queue->tail->next = image;
75 
76 	queue->tail = image;
77 	if (!queue->head)
78 		queue->head = image;
79 }
80 
81 
82 static void
83 dequeue_image(image_queue_t* queue, image_t* image)
84 {
85 	if (image->next)
86 		image->next->prev = image->prev;
87 	else
88 		queue->tail = image->prev;
89 
90 	if (image->prev)
91 		image->prev->next = image->next;
92 	else
93 		queue->head = image->next;
94 
95 	image->prev = NULL;
96 	image->next = NULL;
97 }
98 
99 
100 static image_t*
101 find_image_in_queue(image_queue_t* queue, const char* name, bool isPath,
102 	uint32 typeMask)
103 {
104 	for (image_t* image = queue->head; image; image = image->next) {
105 		const char* imageName = isPath ? image->path : image->name;
106 		int length = isPath ? sizeof(image->path) : sizeof(image->name);
107 
108 		if (!strncmp(imageName, name, length)
109 			&& (typeMask & IMAGE_TYPE_TO_MASK(image->type)) != 0) {
110 			return image;
111 		}
112 	}
113 
114 	return NULL;
115 }
116 
117 
118 static void
119 update_image_flags_recursively(image_t* image, uint32 flagsToSet,
120 	uint32 flagsToClear)
121 {
122 	image_t* queue[sLoadedImageCount];
123 	uint32 count = 0;
124 	uint32 index = 0;
125 	queue[count++] = image;
126 	image->flags |= RFLAG_VISITED;
127 
128 	while (index < count) {
129 		// pop next image
130 		image = queue[index++];
131 
132 		// push dependencies
133 		for (uint32 i = 0; i < image->num_needed; i++) {
134 			image_t* needed = image->needed[i];
135 			if ((needed->flags & RFLAG_VISITED) == 0) {
136 				queue[count++] = needed;
137 				needed->flags |= RFLAG_VISITED;
138 			}
139 		}
140 	}
141 
142 	// update flags
143 	for (uint32 i = 0; i < count; i++) {
144 		queue[i]->flags = (queue[i]->flags | flagsToSet)
145 			& ~(flagsToClear | RFLAG_VISITED);
146 	}
147 }
148 
149 
150 static uint32
151 topological_sort(image_t* image, uint32 slot, image_t** initList,
152 	uint32 sortFlag)
153 {
154 	if (image->flags & sortFlag)
155 		return slot;
156 
157 	image->flags |= sortFlag; /* make sure we don't visit this one */
158 	for (uint32 i = 0; i < image->num_needed; i++)
159 		slot = topological_sort(image->needed[i], slot, initList, sortFlag);
160 
161 	initList[slot] = image;
162 	return slot + 1;
163 }
164 
165 
166 /*!	Finds the load address and address specifier of the given image region.
167 */
168 static void
169 get_image_region_load_address(image_t* image, uint32 index, long lastDelta,
170 	addr_t& loadAddress, uint32& addressSpecifier)
171 {
172 	if (image->dynamic_ptr != 0) {
173 		// relocatable image... we can afford to place wherever
174 		if (index == 0) {
175 			// but only the first segment gets a free ride
176 			loadAddress = RLD_PROGRAM_BASE;
177 			addressSpecifier = B_RANDOMIZED_BASE_ADDRESS;
178 		} else {
179 			loadAddress = image->regions[index].vmstart + lastDelta;
180 			addressSpecifier = B_EXACT_ADDRESS;
181 		}
182 	} else {
183 		// not relocatable, put it where it asks or die trying
184 		loadAddress = image->regions[index].vmstart;
185 		addressSpecifier = B_EXACT_ADDRESS;
186 	}
187 }
188 
189 
190 // #pragma mark -
191 
192 
193 image_t*
194 create_image(const char* name, const char* path, int regionCount)
195 {
196 	size_t allocSize = sizeof(image_t)
197 		+ (regionCount - 1) * sizeof(elf_region_t);
198 
199 	image_t* image = (image_t*)malloc(allocSize);
200 	if (image == NULL) {
201 		FATAL("no memory for image %s\n", path);
202 		return NULL;
203 	}
204 
205 	memset(image, 0, allocSize);
206 
207 	strlcpy(image->path, path, sizeof(image->path));
208 
209 	// Make the last component of the supplied name the image name.
210 	// If present, DT_SONAME will replace this name.
211 	const char* lastSlash = strrchr(name, '/');
212 	if (lastSlash != NULL)
213 		strlcpy(image->name, lastSlash + 1, sizeof(image->name));
214 	else
215 		strlcpy(image->name, name, sizeof(image->name));
216 
217 	image->ref_count = 1;
218 	image->num_regions = regionCount;
219 
220 	return image;
221 }
222 
223 
224 void
225 delete_image_struct(image_t* image)
226 {
227 #ifdef DEBUG
228 	size_t size = sizeof(image_t)
229 		+ (image->num_regions - 1) * sizeof(elf_region_t);
230 	memset(image->needed, 0xa5, sizeof(image->needed[0]) * image->num_needed);
231 #endif
232 	free(image->needed);
233 	free(image->versions);
234 
235 	while (RuntimeLoaderSymbolPatcher* patcher
236 			= image->defined_symbol_patchers) {
237 		image->defined_symbol_patchers = patcher->next;
238 		delete patcher;
239 	}
240 	while (RuntimeLoaderSymbolPatcher* patcher
241 			= image->undefined_symbol_patchers) {
242 		image->undefined_symbol_patchers = patcher->next;
243 		delete patcher;
244 	}
245 
246 #ifdef DEBUG
247 	// overwrite images to make sure they aren't accidently reused anywhere
248 	memset(image, 0xa5, size);
249 #endif
250 	free(image);
251 }
252 
253 
254 void
255 delete_image(image_t* image)
256 {
257 	if (image == NULL)
258 		return;
259 
260 	_kern_unregister_image(image->id);
261 		// registered in load_container()
262 
263 	delete_image_struct(image);
264 }
265 
266 
267 void
268 put_image(image_t* image)
269 {
270 	// If all references to the image are gone, add it to the disposable list
271 	// and remove all dependencies
272 
273 	if (atomic_add(&image->ref_count, -1) == 1) {
274 		size_t i;
275 
276 		dequeue_image(&sLoadedImages, image);
277 		enqueue_image(&sDisposableImages, image);
278 		sLoadedImageCount--;
279 
280 		for (i = 0; i < image->num_needed; i++)
281 			put_image(image->needed[i]);
282 	}
283 }
284 
285 
286 status_t
287 map_image(int fd, char const* path, image_t* image)
288 {
289 	// cut the file name from the path as base name for the created areas
290 	const char* baseName = strrchr(path, '/');
291 	if (baseName != NULL)
292 		baseName++;
293 	else
294 		baseName = path;
295 
296 	// determine how much space we need for all loaded segments
297 
298 	addr_t reservedAddress = 0;
299 	addr_t loadAddress;
300 	size_t reservedSize = 0;
301 	size_t length = 0;
302 	uint32 addressSpecifier = B_RANDOMIZED_ANY_ADDRESS;
303 
304 	for (uint32 i = 0; i < image->num_regions; i++) {
305 		uint32 regionAddressSpecifier;
306 		get_image_region_load_address(image, i,
307 			i > 0 ? loadAddress - image->regions[i - 1].vmstart : 0,
308 			loadAddress, regionAddressSpecifier);
309 		if (i == 0) {
310 			reservedAddress = loadAddress;
311 			addressSpecifier = regionAddressSpecifier;
312 		}
313 
314 		length += TO_PAGE_SIZE(image->regions[i].vmsize
315 			+ (loadAddress % B_PAGE_SIZE));
316 
317 		size_t size = TO_PAGE_SIZE(loadAddress + image->regions[i].vmsize)
318 			- reservedAddress;
319 		if (size > reservedSize)
320 			reservedSize = size;
321 	}
322 
323 	// Check whether the segments have an unreasonable amount of unused space
324 	// inbetween.
325 	if (reservedSize > length + MAX_PAGE_SIZE * 2)
326 		return B_BAD_DATA;
327 
328 	// reserve that space and allocate the areas from that one
329 	if (_kern_reserve_address_range(&reservedAddress, addressSpecifier,
330 			reservedSize) != B_OK)
331 		return B_NO_MEMORY;
332 
333 	for (uint32 i = 0; i < image->num_regions; i++) {
334 		char regionName[B_OS_NAME_LENGTH];
335 
336 		snprintf(regionName, sizeof(regionName), "%s_seg%" B_PRIu32 "%s",
337 			baseName, i, (image->regions[i].flags & RFLAG_RW) ? "rw" : "ro");
338 
339 		get_image_region_load_address(image, i,
340 			i > 0 ? image->regions[i - 1].delta : 0, loadAddress,
341 			addressSpecifier);
342 
343 		// If the image position is arbitrary, we must let it point to the start
344 		// of the reserved address range.
345 		if (addressSpecifier != B_EXACT_ADDRESS)
346 			loadAddress = reservedAddress;
347 
348 		if ((image->regions[i].flags & RFLAG_ANON) != 0) {
349 			image->regions[i].id = _kern_create_area(regionName,
350 				(void**)&loadAddress, B_EXACT_ADDRESS,
351 				image->regions[i].vmsize, B_NO_LOCK,
352 				B_READ_AREA | B_WRITE_AREA);
353 
354 			if (image->regions[i].id < 0) {
355 				_kern_unreserve_address_range(reservedAddress, reservedSize);
356 				return image->regions[i].id;
357 			}
358 		} else {
359 			// Map all segments r/w first -- write access might be needed for
360 			// relocations. When we've done with those we change the protection
361 			// of read-only segments back to read-only. We map those segments
362 			// over-committing, since quite likely only a relatively small
363 			// number of pages needs to be touched and we want to avoid a lot
364 			// of memory to be committed for them temporarily, just because we
365 			// have to write map them.
366 			uint32 protection = B_READ_AREA | B_WRITE_AREA
367 				| ((image->regions[i].flags & RFLAG_RW) != 0
368 					? 0 : B_OVERCOMMITTING_AREA);
369 			image->regions[i].id = _kern_map_file(regionName,
370 				(void**)&loadAddress, B_EXACT_ADDRESS,
371 				image->regions[i].vmsize, protection, REGION_PRIVATE_MAP, false,
372 				fd, PAGE_BASE(image->regions[i].fdstart));
373 
374 			if (image->regions[i].id < 0) {
375 				_kern_unreserve_address_range(reservedAddress, reservedSize);
376 				return image->regions[i].id;
377 			}
378 
379 			TRACE(("\"%s\" at %p, 0x%lx bytes (%s)\n", path,
380 				(void *)loadAddress, image->regions[i].vmsize,
381 				image->regions[i].flags & RFLAG_RW ? "rw" : "read-only"));
382 
383 			// handle trailer bits in data segment
384 			if (image->regions[i].flags & RFLAG_RW) {
385 				addr_t startClearing = loadAddress
386 					+ PAGE_OFFSET(image->regions[i].start)
387 					+ image->regions[i].size;
388 				addr_t toClear = image->regions[i].vmsize
389 					- PAGE_OFFSET(image->regions[i].start)
390 					- image->regions[i].size;
391 
392 				TRACE(("cleared 0x%lx and the following 0x%lx bytes\n",
393 					startClearing, toClear));
394 				memset((void *)startClearing, 0, toClear);
395 			}
396 		}
397 
398 		image->regions[i].delta = loadAddress - image->regions[i].vmstart;
399 		image->regions[i].vmstart = loadAddress;
400 		if (i == 0) {
401 			TLSBlockTemplates::Get().SetBaseAddress(image->dso_tls_id,
402 				loadAddress);
403 		}
404 	}
405 
406 	if (image->dynamic_ptr != 0)
407 		image->dynamic_ptr += image->regions[0].delta;
408 
409 	return B_OK;
410 }
411 
412 
413 void
414 unmap_image(image_t* image)
415 {
416 	for (uint32 i = 0; i < image->num_regions; i++) {
417 		_kern_delete_area(image->regions[i].id);
418 
419 		image->regions[i].id = -1;
420 	}
421 }
422 
423 
424 /*!	This function will change the protection of all read-only segments to really
425 	be read-only (and executable).
426 	The areas have to be read/write first, so that they can be relocated.
427 	If at least one image is in compatibility mode then we allow execution of
428 	all areas.
429 */
430 void
431 remap_images()
432 {
433 	for (image_t* image = sLoadedImages.head; image != NULL;
434 			image = image->next) {
435 		for (uint32 i = 0; i < image->num_regions; i++) {
436 			// we only need to do this once, so we remember those we've already
437 			// mapped
438 			if ((image->regions[i].flags & RFLAG_REMAPPED) != 0)
439 				continue;
440 
441 			status_t result = B_OK;
442 			if ((image->regions[i].flags & RFLAG_RW) == 0) {
443 				result = _kern_set_area_protection(image->regions[i].id,
444 						B_READ_AREA | B_EXECUTE_AREA);
445 			} else if (image->abi < B_HAIKU_ABI_GCC_2_HAIKU) {
446 				result = _kern_set_area_protection(image->regions[i].id,
447 						B_READ_AREA | B_WRITE_AREA | B_EXECUTE_AREA);
448 			}
449 
450 			if (result == B_OK)
451 				image->regions[i].flags |= RFLAG_REMAPPED;
452 		}
453 	}
454 }
455 
456 
457 void
458 register_image(image_t* image, int fd, const char* path)
459 {
460 	struct stat stat;
461 	image_info info;
462 
463 	// TODO: set these correctly
464 	info.id = 0;
465 	info.type = image->type;
466 	info.sequence = 0;
467 	info.init_order = 0;
468 	info.init_routine = (void (*)())image->init_routine;
469 	info.term_routine = (void (*)())image->term_routine;
470 
471 	if (_kern_read_stat(fd, NULL, false, &stat, sizeof(struct stat)) == B_OK) {
472 		info.device = stat.st_dev;
473 		info.node = stat.st_ino;
474 	} else {
475 		info.device = -1;
476 		info.node = -1;
477 	}
478 
479 	// We may have split segments into separate regions. Compute the correct
480 	// segments for the image info.
481 	addr_t textBase = 0;
482 	addr_t textEnd = 0;
483 	addr_t dataBase = 0;
484 	addr_t dataEnd = 0;
485 	for (uint32 i= 0; i < image->num_regions; i++) {
486 		addr_t base = image->regions[i].vmstart;
487 		addr_t end = base + image->regions[i].vmsize;
488 		if (image->regions[i].flags & RFLAG_RW) {
489 			// data
490 			if (dataBase == 0) {
491 				dataBase = base;
492 				dataEnd = end;
493 			} else {
494 				dataBase = std::min(dataBase, base);
495 				dataEnd = std::max(dataEnd, end);
496 			}
497 		} else {
498 			// text
499 			if (textBase == 0) {
500 				textBase = base;
501 				textEnd = end;
502 			} else {
503 				textBase = std::min(textBase, base);
504 				textEnd = std::max(textEnd, end);
505 			}
506 		}
507 	}
508 
509 	strlcpy(info.name, path, sizeof(info.name));
510 	info.text = (void*)textBase;
511 	info.text_size = textEnd - textBase;
512 	info.data = (void*)dataBase;
513 	info.data_size = dataEnd - dataBase;
514 	info.api_version = image->api_version;
515 	info.abi = image->abi;
516 	image->id = _kern_register_image(&info, sizeof(image_info));
517 }
518 
519 
520 //! After fork, we lazily rebuild the image IDs of all loaded images.
521 status_t
522 update_image_ids()
523 {
524 	for (image_t* image = sLoadedImages.head; image; image = image->next) {
525 		status_t status = update_image_id(image);
526 		if (status != B_OK)
527 			return status;
528 	}
529 	for (image_t* image = sDisposableImages.head; image; image = image->next) {
530 		status_t status = update_image_id(image);
531 		if (status != B_OK)
532 			return status;
533 	}
534 
535 	gInvalidImageIDs = false;
536 	return B_OK;
537 }
538 
539 
540 image_queue_t&
541 get_loaded_images()
542 {
543 	return sLoadedImages;
544 }
545 
546 
547 image_queue_t&
548 get_disposable_images()
549 {
550 	return sDisposableImages;
551 }
552 
553 
554 uint32
555 count_loaded_images()
556 {
557 	return sLoadedImageCount;
558 }
559 
560 
561 void
562 enqueue_loaded_image(image_t* image)
563 {
564 	enqueue_image(&sLoadedImages, image);
565 	sLoadedImageCount++;
566 }
567 
568 
569 void
570 dequeue_loaded_image(image_t* image)
571 {
572 	dequeue_image(&sLoadedImages, image);
573 	sLoadedImageCount--;
574 }
575 
576 
577 void
578 dequeue_disposable_image(image_t* image)
579 {
580 	dequeue_image(&sDisposableImages, image);
581 }
582 
583 
584 image_t*
585 find_loaded_image_by_name(char const* name, uint32 typeMask)
586 {
587 	bool isPath = strchr(name, '/') != NULL;
588 	return find_image_in_queue(&sLoadedImages, name, isPath, typeMask);
589 }
590 
591 
592 image_t*
593 find_loaded_image_by_id(image_id id, bool ignoreDisposable)
594 {
595 	if (gInvalidImageIDs) {
596 		// After fork, we lazily rebuild the image IDs of all loaded images
597 		update_image_ids();
598 	}
599 
600 	for (image_t* image = sLoadedImages.head; image; image = image->next) {
601 		if (image->id == id)
602 			return image;
603 	}
604 
605 	if (ignoreDisposable)
606 		return NULL;
607 
608 	for (image_t* image = sDisposableImages.head; image; image = image->next) {
609 		if (image->id == id)
610 			return image;
611 	}
612 
613 	return NULL;
614 }
615 
616 
617 image_t*
618 find_loaded_image_by_address(addr_t address)
619 {
620 	for (image_t* image = sLoadedImages.head; image; image = image->next) {
621 		for (uint32 i = 0; i < image->num_regions; i++) {
622 			elf_region_t& region = image->regions[i];
623 			if (region.vmstart <= address
624 				&& region.vmstart - 1 + region.vmsize >= address)
625 				return image;
626 		}
627 	}
628 
629 	return NULL;
630 }
631 
632 
633 void
634 set_image_flags_recursively(image_t* image, uint32 flags)
635 {
636 	update_image_flags_recursively(image, flags, 0);
637 }
638 
639 
640 void
641 clear_image_flags_recursively(image_t* image, uint32 flags)
642 {
643 	update_image_flags_recursively(image, 0, flags);
644 }
645 
646 
647 /*!	Returns a topologically sorted image list.
648 
649 	If \a image is non-NULL, an array containing the image and all its
650 	transitive dependencies is returned. If \a image is NULL, all loaded images
651 	are returned. In either case dependencies are listed before images
652 	depending on them.
653 
654 	\param image The image specifying the tree of images that shall be sorted.
655 		If NULL, all loaded images are sorted.
656 	\param _list On success it will be set to an array of the sorted images.
657 		The caller is responsible for free()ing it.
658 	\param sortFlags The image flag that shall be used for sorting. Images that
659 		already have this flag set are ignored (and their dependencies, unless
660 		they are also reachable via another path). The flag will be set on all
661 		returned images.
662 	\return The number of images in the returned array or an error code on
663 		failure.
664 */
665 ssize_t
666 get_sorted_image_list(image_t* image, image_t*** _list, uint32 sortFlag)
667 {
668 	image_t** list;
669 
670 	list = (image_t**)malloc(sLoadedImageCount * sizeof(image_t*));
671 	if (list == NULL) {
672 		FATAL("memory shortage in get_sorted_image_list()");
673 		*_list = NULL;
674 		return B_NO_MEMORY;
675 	}
676 
677 	memset(list, 0, sLoadedImageCount * sizeof(image_t*));
678 
679 	*_list = list;
680 
681 	if (image != NULL)
682 		return topological_sort(image, 0, list, sortFlag);
683 
684 	// no image given -- sort all loaded images
685 	uint32 count = 0;
686 	image = sLoadedImages.head;
687 	while (image != NULL) {
688 		count = topological_sort(image, count, list, sortFlag);
689 		image = image->next;
690 	}
691 
692 	return count;
693 }
694