xref: /haiku/src/system/runtime_loader/images.cpp (revision 40afabe6a340b294781b7b56560a121e13ebf184)
1 /*
2  * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2003-2011, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2002, Manuel J. Petit. All rights reserved.
7  * Copyright 2001, Travis Geiselbrecht. All rights reserved.
8  * Distributed under the terms of the NewOS License.
9  */
10 
11 #include "images.h"
12 
13 #include <stdio.h>
14 #include <stdlib.h>
15 #include <string.h>
16 
17 #include <algorithm>
18 
19 #include <syscalls.h>
20 #include <vm_defs.h>
21 
22 #include "add_ons.h"
23 #include "elf_tls.h"
24 #include "runtime_loader_private.h"
25 
26 #include <util/kernel_cpp.h>
27 
28 
29 // keep in sync with app ldscript
30 #ifdef __x86_64__
31 	// runtime_loader potentially occupies 0x200000 - 0x600000 due to large
32 	// page segment alignment.
33 #	define RLD_PROGRAM_BASE	0x600000
34 #	define MAX_PAGE_SIZE	0x200000
35 #else
36 #	define RLD_PROGRAM_BASE	0x200000
37 #	define MAX_PAGE_SIZE	B_PAGE_SIZE
38 #endif
39 
40 
41 bool gInvalidImageIDs;
42 
43 static image_queue_t sLoadedImages = {0, 0};
44 static image_queue_t sDisposableImages = {0, 0};
45 static uint32 sLoadedImageCount = 0;
46 
47 
48 //! Remaps the image ID of \a image after fork.
49 static status_t
50 update_image_id(image_t* image)
51 {
52 	int32 cookie = 0;
53 	image_info info;
54 	while (_kern_get_next_image_info(B_CURRENT_TEAM, &cookie, &info,
55 			sizeof(image_info)) == B_OK) {
56 		for (uint32 i = 0; i < image->num_regions; i++) {
57 			if (image->regions[i].vmstart == (addr_t)info.text) {
58 				image->id = info.id;
59 				return B_OK;
60 			}
61 		}
62 	}
63 
64 	FATAL("Could not update image ID %" B_PRId32 " after fork()!\n", image->id);
65 	return B_ENTRY_NOT_FOUND;
66 }
67 
68 
69 static void
70 enqueue_image(image_queue_t* queue, image_t* image)
71 {
72 	image->next = NULL;
73 
74 	image->prev = queue->tail;
75 	if (queue->tail)
76 		queue->tail->next = image;
77 
78 	queue->tail = image;
79 	if (!queue->head)
80 		queue->head = image;
81 }
82 
83 
84 static void
85 dequeue_image(image_queue_t* queue, image_t* image)
86 {
87 	if (image->next)
88 		image->next->prev = image->prev;
89 	else
90 		queue->tail = image->prev;
91 
92 	if (image->prev)
93 		image->prev->next = image->next;
94 	else
95 		queue->head = image->next;
96 
97 	image->prev = NULL;
98 	image->next = NULL;
99 }
100 
101 
102 static image_t*
103 find_image_in_queue(image_queue_t* queue, const char* name, bool isPath,
104 	uint32 typeMask)
105 {
106 	for (image_t* image = queue->head; image; image = image->next) {
107 		const char* imageName = isPath ? image->path : image->name;
108 		int length = isPath ? sizeof(image->path) : sizeof(image->name);
109 
110 		if (!strncmp(imageName, name, length)
111 			&& (typeMask & IMAGE_TYPE_TO_MASK(image->type)) != 0) {
112 			return image;
113 		}
114 	}
115 
116 	return NULL;
117 }
118 
119 
120 static void
121 update_image_flags_recursively(image_t* image, uint32 flagsToSet,
122 	uint32 flagsToClear)
123 {
124 	image_t* queue[sLoadedImageCount];
125 	uint32 count = 0;
126 	uint32 index = 0;
127 	queue[count++] = image;
128 	image->flags |= RFLAG_VISITED;
129 
130 	while (index < count) {
131 		// pop next image
132 		image = queue[index++];
133 
134 		// push dependencies
135 		for (uint32 i = 0; i < image->num_needed; i++) {
136 			image_t* needed = image->needed[i];
137 			if ((needed->flags & RFLAG_VISITED) == 0) {
138 				queue[count++] = needed;
139 				needed->flags |= RFLAG_VISITED;
140 			}
141 		}
142 	}
143 
144 	// update flags
145 	for (uint32 i = 0; i < count; i++) {
146 		queue[i]->flags = (queue[i]->flags | flagsToSet)
147 			& ~(flagsToClear | RFLAG_VISITED);
148 	}
149 }
150 
151 
152 static uint32
153 topological_sort(image_t* image, uint32 slot, image_t** initList,
154 	uint32 sortFlag)
155 {
156 	if (image->flags & sortFlag)
157 		return slot;
158 
159 	image->flags |= sortFlag; /* make sure we don't visit this one */
160 	for (uint32 i = 0; i < image->num_needed; i++)
161 		slot = topological_sort(image->needed[i], slot, initList, sortFlag);
162 
163 	initList[slot] = image;
164 	return slot + 1;
165 }
166 
167 
168 /*!	Finds the load address and address specifier of the given image region.
169 */
170 static void
171 get_image_region_load_address(image_t* image, uint32 index, long lastDelta,
172 	addr_t& loadAddress, uint32& addressSpecifier)
173 {
174 	if (image->dynamic_ptr != 0) {
175 		// relocatable image... we can afford to place wherever
176 		if (index == 0) {
177 			// but only the first segment gets a free ride
178 			loadAddress = RLD_PROGRAM_BASE;
179 			addressSpecifier = B_RANDOMIZED_BASE_ADDRESS;
180 		} else {
181 			loadAddress = image->regions[index].vmstart + lastDelta;
182 			addressSpecifier = B_EXACT_ADDRESS;
183 		}
184 	} else {
185 		// not relocatable, put it where it asks or die trying
186 		loadAddress = image->regions[index].vmstart;
187 		addressSpecifier = B_EXACT_ADDRESS;
188 	}
189 }
190 
191 
192 // #pragma mark -
193 
194 
195 image_t*
196 create_image(const char* name, const char* path, int regionCount)
197 {
198 	size_t allocSize = sizeof(image_t)
199 		+ (regionCount - 1) * sizeof(elf_region_t);
200 
201 	image_t* image = (image_t*)malloc(allocSize);
202 	if (image == NULL) {
203 		FATAL("no memory for image %s\n", path);
204 		return NULL;
205 	}
206 
207 	memset(image, 0, allocSize);
208 
209 	strlcpy(image->path, path, sizeof(image->path));
210 
211 	// Make the last component of the supplied name the image name.
212 	// If present, DT_SONAME will replace this name.
213 	const char* lastSlash = strrchr(name, '/');
214 	if (lastSlash != NULL)
215 		strlcpy(image->name, lastSlash + 1, sizeof(image->name));
216 	else
217 		strlcpy(image->name, name, sizeof(image->name));
218 
219 	image->ref_count = 1;
220 	image->num_regions = regionCount;
221 
222 	return image;
223 }
224 
225 
226 void
227 delete_image_struct(image_t* image)
228 {
229 #ifdef DEBUG
230 	size_t size = sizeof(image_t)
231 		+ (image->num_regions - 1) * sizeof(elf_region_t);
232 	memset(image->needed, 0xa5, sizeof(image->needed[0]) * image->num_needed);
233 #endif
234 	free(image->needed);
235 	free(image->versions);
236 
237 	while (RuntimeLoaderSymbolPatcher* patcher
238 			= image->defined_symbol_patchers) {
239 		image->defined_symbol_patchers = patcher->next;
240 		delete patcher;
241 	}
242 	while (RuntimeLoaderSymbolPatcher* patcher
243 			= image->undefined_symbol_patchers) {
244 		image->undefined_symbol_patchers = patcher->next;
245 		delete patcher;
246 	}
247 
248 #ifdef DEBUG
249 	// overwrite images to make sure they aren't accidently reused anywhere
250 	memset(image, 0xa5, size);
251 #endif
252 	free(image);
253 }
254 
255 
256 void
257 delete_image(image_t* image)
258 {
259 	if (image == NULL)
260 		return;
261 
262 	_kern_unregister_image(image->id);
263 		// registered in load_container()
264 
265 	delete_image_struct(image);
266 }
267 
268 
269 void
270 put_image(image_t* image)
271 {
272 	// If all references to the image are gone, add it to the disposable list
273 	// and remove all dependencies
274 
275 	if (atomic_add(&image->ref_count, -1) == 1) {
276 		size_t i;
277 
278 		dequeue_image(&sLoadedImages, image);
279 		enqueue_image(&sDisposableImages, image);
280 		sLoadedImageCount--;
281 
282 		for (i = 0; i < image->num_needed; i++)
283 			put_image(image->needed[i]);
284 	}
285 }
286 
287 
288 status_t
289 map_image(int fd, char const* path, image_t* image)
290 {
291 	// cut the file name from the path as base name for the created areas
292 	const char* baseName = strrchr(path, '/');
293 	if (baseName != NULL)
294 		baseName++;
295 	else
296 		baseName = path;
297 
298 	// determine how much space we need for all loaded segments
299 
300 	addr_t reservedAddress = 0;
301 	addr_t loadAddress;
302 	size_t reservedSize = 0;
303 	size_t length = 0;
304 	uint32 addressSpecifier = B_RANDOMIZED_ANY_ADDRESS;
305 
306 	for (uint32 i = 0; i < image->num_regions; i++) {
307 		uint32 regionAddressSpecifier;
308 		get_image_region_load_address(image, i,
309 			i > 0 ? loadAddress - image->regions[i - 1].vmstart : 0,
310 			loadAddress, regionAddressSpecifier);
311 		if (i == 0) {
312 			reservedAddress = loadAddress;
313 			addressSpecifier = regionAddressSpecifier;
314 		}
315 
316 		length += TO_PAGE_SIZE(image->regions[i].vmsize
317 			+ (loadAddress % B_PAGE_SIZE));
318 
319 		size_t size = TO_PAGE_SIZE(loadAddress + image->regions[i].vmsize)
320 			- reservedAddress;
321 		if (size > reservedSize)
322 			reservedSize = size;
323 	}
324 
325 	// Check whether the segments have an unreasonable amount of unused space
326 	// inbetween.
327 	if (reservedSize > length + MAX_PAGE_SIZE * 2)
328 		return B_BAD_DATA;
329 
330 	// reserve that space and allocate the areas from that one
331 	if (_kern_reserve_address_range(&reservedAddress, addressSpecifier,
332 			reservedSize) != B_OK)
333 		return B_NO_MEMORY;
334 
335 	for (uint32 i = 0; i < image->num_regions; i++) {
336 		char regionName[B_OS_NAME_LENGTH];
337 
338 		snprintf(regionName, sizeof(regionName), "%s_seg%" B_PRIu32 "%s",
339 			baseName, i, (image->regions[i].flags & RFLAG_RW) ? "rw" : "ro");
340 
341 		get_image_region_load_address(image, i,
342 			i > 0 ? image->regions[i - 1].delta : 0, loadAddress,
343 			addressSpecifier);
344 
345 		// If the image position is arbitrary, we must let it point to the start
346 		// of the reserved address range.
347 		if (addressSpecifier != B_EXACT_ADDRESS)
348 			loadAddress = reservedAddress;
349 
350 		if ((image->regions[i].flags & RFLAG_ANON) != 0) {
351 			image->regions[i].id = _kern_create_area(regionName,
352 				(void**)&loadAddress, B_EXACT_ADDRESS,
353 				image->regions[i].vmsize, B_NO_LOCK,
354 				B_READ_AREA | B_WRITE_AREA);
355 
356 			if (image->regions[i].id < 0) {
357 				_kern_unreserve_address_range(reservedAddress, reservedSize);
358 				return image->regions[i].id;
359 			}
360 		} else {
361 			// Map all segments r/w first -- write access might be needed for
362 			// relocations. When we've done with those we change the protection
363 			// of read-only segments back to read-only. We map those segments
364 			// over-committing, since quite likely only a relatively small
365 			// number of pages needs to be touched and we want to avoid a lot
366 			// of memory to be committed for them temporarily, just because we
367 			// have to write map them.
368 			uint32 protection = B_READ_AREA | B_WRITE_AREA
369 				| ((image->regions[i].flags & RFLAG_RW) != 0
370 					? 0 : B_OVERCOMMITTING_AREA);
371 			image->regions[i].id = _kern_map_file(regionName,
372 				(void**)&loadAddress, B_EXACT_ADDRESS,
373 				image->regions[i].vmsize, protection, REGION_PRIVATE_MAP, false,
374 				fd, PAGE_BASE(image->regions[i].fdstart));
375 
376 			if (image->regions[i].id < 0) {
377 				_kern_unreserve_address_range(reservedAddress, reservedSize);
378 				return image->regions[i].id;
379 			}
380 
381 			TRACE(("\"%s\" at %p, 0x%lx bytes (%s)\n", path,
382 				(void *)loadAddress, image->regions[i].vmsize,
383 				image->regions[i].flags & RFLAG_RW ? "rw" : "read-only"));
384 
385 			// handle trailer bits in data segment
386 			if (image->regions[i].flags & RFLAG_RW) {
387 				addr_t startClearing = loadAddress
388 					+ PAGE_OFFSET(image->regions[i].start)
389 					+ image->regions[i].size;
390 				addr_t toClear = image->regions[i].vmsize
391 					- PAGE_OFFSET(image->regions[i].start)
392 					- image->regions[i].size;
393 
394 				TRACE(("cleared 0x%lx and the following 0x%lx bytes\n",
395 					startClearing, toClear));
396 				memset((void *)startClearing, 0, toClear);
397 			}
398 		}
399 
400 		image->regions[i].delta = loadAddress - image->regions[i].vmstart;
401 		image->regions[i].vmstart = loadAddress;
402 		if (i == 0) {
403 			TLSBlockTemplates::Get().SetBaseAddress(image->dso_tls_id,
404 				loadAddress);
405 		}
406 	}
407 
408 	if (image->dynamic_ptr != 0)
409 		image->dynamic_ptr += image->regions[0].delta;
410 
411 	return B_OK;
412 }
413 
414 
415 void
416 unmap_image(image_t* image)
417 {
418 	for (uint32 i = 0; i < image->num_regions; i++) {
419 		_kern_delete_area(image->regions[i].id);
420 
421 		image->regions[i].id = -1;
422 	}
423 }
424 
425 
426 /*!	This function will change the protection of all read-only segments to really
427 	be read-only (and executable).
428 	The areas have to be read/write first, so that they can be relocated.
429 	If at least one image is in compatibility mode then we allow execution of
430 	all areas.
431 */
432 void
433 remap_images()
434 {
435 	for (image_t* image = sLoadedImages.head; image != NULL;
436 			image = image->next) {
437 		for (uint32 i = 0; i < image->num_regions; i++) {
438 			// we only need to do this once, so we remember those we've already
439 			// mapped
440 			if ((image->regions[i].flags & RFLAG_REMAPPED) != 0)
441 				continue;
442 
443 			status_t result = B_OK;
444 			if ((image->regions[i].flags & RFLAG_RW) == 0) {
445 				result = _kern_set_area_protection(image->regions[i].id,
446 						B_READ_AREA | B_EXECUTE_AREA);
447 			} else if (image->abi < B_HAIKU_ABI_GCC_2_HAIKU) {
448 				result = _kern_set_area_protection(image->regions[i].id,
449 						B_READ_AREA | B_WRITE_AREA | B_EXECUTE_AREA);
450 			}
451 
452 			if (result == B_OK)
453 				image->regions[i].flags |= RFLAG_REMAPPED;
454 		}
455 	}
456 }
457 
458 
459 void
460 register_image(image_t* image, int fd, const char* path)
461 {
462 	struct stat stat;
463 	image_info info;
464 
465 	// TODO: set these correctly
466 	info.id = 0;
467 	info.type = image->type;
468 	info.sequence = 0;
469 	info.init_order = 0;
470 	info.init_routine = (void (*)())image->init_routine;
471 	info.term_routine = (void (*)())image->term_routine;
472 
473 	if (_kern_read_stat(fd, NULL, false, &stat, sizeof(struct stat)) == B_OK) {
474 		info.device = stat.st_dev;
475 		info.node = stat.st_ino;
476 	} else {
477 		info.device = -1;
478 		info.node = -1;
479 	}
480 
481 	// We may have split segments into separate regions. Compute the correct
482 	// segments for the image info.
483 	addr_t textBase = 0;
484 	addr_t textEnd = 0;
485 	addr_t dataBase = 0;
486 	addr_t dataEnd = 0;
487 	for (uint32 i= 0; i < image->num_regions; i++) {
488 		addr_t base = image->regions[i].vmstart;
489 		addr_t end = base + image->regions[i].vmsize;
490 		if (image->regions[i].flags & RFLAG_RW) {
491 			// data
492 			if (dataBase == 0) {
493 				dataBase = base;
494 				dataEnd = end;
495 			} else {
496 				dataBase = std::min(dataBase, base);
497 				dataEnd = std::max(dataEnd, end);
498 			}
499 		} else {
500 			// text
501 			if (textBase == 0) {
502 				textBase = base;
503 				textEnd = end;
504 			} else {
505 				textBase = std::min(textBase, base);
506 				textEnd = std::max(textEnd, end);
507 			}
508 		}
509 	}
510 
511 	strlcpy(info.name, path, sizeof(info.name));
512 	info.text = (void*)textBase;
513 	info.text_size = textEnd - textBase;
514 	info.data = (void*)dataBase;
515 	info.data_size = dataEnd - dataBase;
516 	info.api_version = image->api_version;
517 	info.abi = image->abi;
518 	image->id = _kern_register_image(&info, sizeof(image_info));
519 }
520 
521 
522 //! After fork, we lazily rebuild the image IDs of all loaded images.
523 status_t
524 update_image_ids()
525 {
526 	for (image_t* image = sLoadedImages.head; image; image = image->next) {
527 		status_t status = update_image_id(image);
528 		if (status != B_OK)
529 			return status;
530 	}
531 	for (image_t* image = sDisposableImages.head; image; image = image->next) {
532 		status_t status = update_image_id(image);
533 		if (status != B_OK)
534 			return status;
535 	}
536 
537 	gInvalidImageIDs = false;
538 	return B_OK;
539 }
540 
541 
542 image_queue_t&
543 get_loaded_images()
544 {
545 	return sLoadedImages;
546 }
547 
548 
549 image_queue_t&
550 get_disposable_images()
551 {
552 	return sDisposableImages;
553 }
554 
555 
556 uint32
557 count_loaded_images()
558 {
559 	return sLoadedImageCount;
560 }
561 
562 
563 void
564 enqueue_loaded_image(image_t* image)
565 {
566 	enqueue_image(&sLoadedImages, image);
567 	sLoadedImageCount++;
568 }
569 
570 
571 void
572 dequeue_loaded_image(image_t* image)
573 {
574 	dequeue_image(&sLoadedImages, image);
575 	sLoadedImageCount--;
576 }
577 
578 
579 void
580 dequeue_disposable_image(image_t* image)
581 {
582 	dequeue_image(&sDisposableImages, image);
583 }
584 
585 
586 image_t*
587 find_loaded_image_by_name(char const* name, uint32 typeMask)
588 {
589 	bool isPath = strchr(name, '/') != NULL;
590 	return find_image_in_queue(&sLoadedImages, name, isPath, typeMask);
591 }
592 
593 
594 image_t*
595 find_loaded_image_by_id(image_id id, bool ignoreDisposable)
596 {
597 	if (gInvalidImageIDs) {
598 		// After fork, we lazily rebuild the image IDs of all loaded images
599 		update_image_ids();
600 	}
601 
602 	for (image_t* image = sLoadedImages.head; image; image = image->next) {
603 		if (image->id == id)
604 			return image;
605 	}
606 
607 	if (ignoreDisposable)
608 		return NULL;
609 
610 	for (image_t* image = sDisposableImages.head; image; image = image->next) {
611 		if (image->id == id)
612 			return image;
613 	}
614 
615 	return NULL;
616 }
617 
618 
619 image_t*
620 find_loaded_image_by_address(addr_t address)
621 {
622 	for (image_t* image = sLoadedImages.head; image; image = image->next) {
623 		for (uint32 i = 0; i < image->num_regions; i++) {
624 			elf_region_t& region = image->regions[i];
625 			if (region.vmstart <= address
626 				&& region.vmstart - 1 + region.vmsize >= address)
627 				return image;
628 		}
629 	}
630 
631 	return NULL;
632 }
633 
634 
635 void
636 set_image_flags_recursively(image_t* image, uint32 flags)
637 {
638 	update_image_flags_recursively(image, flags, 0);
639 }
640 
641 
642 void
643 clear_image_flags_recursively(image_t* image, uint32 flags)
644 {
645 	update_image_flags_recursively(image, 0, flags);
646 }
647 
648 
649 /*!	Returns a topologically sorted image list.
650 
651 	If \a image is non-NULL, an array containing the image and all its
652 	transitive dependencies is returned. If \a image is NULL, all loaded images
653 	are returned. In either case dependencies are listed before images
654 	depending on them.
655 
656 	\param image The image specifying the tree of images that shall be sorted.
657 		If NULL, all loaded images are sorted.
658 	\param _list On success it will be set to an array of the sorted images.
659 		The caller is responsible for free()ing it.
660 	\param sortFlags The image flag that shall be used for sorting. Images that
661 		already have this flag set are ignored (and their dependencies, unless
662 		they are also reachable via another path). The flag will be set on all
663 		returned images.
664 	\return The number of images in the returned array or an error code on
665 		failure.
666 */
667 ssize_t
668 get_sorted_image_list(image_t* image, image_t*** _list, uint32 sortFlag)
669 {
670 	image_t** list;
671 
672 	list = (image_t**)malloc(sLoadedImageCount * sizeof(image_t*));
673 	if (list == NULL) {
674 		FATAL("memory shortage in get_sorted_image_list()");
675 		*_list = NULL;
676 		return B_NO_MEMORY;
677 	}
678 
679 	memset(list, 0, sLoadedImageCount * sizeof(image_t*));
680 
681 	*_list = list;
682 
683 	if (image != NULL)
684 		return topological_sort(image, 0, list, sortFlag);
685 
686 	// no image given -- sort all loaded images
687 	uint32 count = 0;
688 	image = sLoadedImages.head;
689 	while (image != NULL) {
690 		count = topological_sort(image, count, list, sortFlag);
691 		image = image->next;
692 	}
693 
694 	return count;
695 }
696