xref: /haiku/src/system/runtime_loader/images.cpp (revision 14b32de1d5efe99b4c6d4ef8c25df47eb009cf0f)
1 /*
2  * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2003-2011, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2002, Manuel J. Petit. All rights reserved.
7  * Copyright 2001, Travis Geiselbrecht. All rights reserved.
8  * Distributed under the terms of the NewOS License.
9  */
10 
11 #include "images.h"
12 
13 #include <stdio.h>
14 #include <stdlib.h>
15 #include <string.h>
16 
17 #include <algorithm>
18 
19 #include <syscalls.h>
20 #include <vm_defs.h>
21 
22 #include "add_ons.h"
23 #include "runtime_loader_private.h"
24 
25 
26 // keep in sync with app ldscript
27 #ifdef __x86_64__
28 	// runtime_loader potentially occupies 0x200000 - 0x600000 due to large
29 	// page segment alignment.
30 #	define RLD_PROGRAM_BASE	0x600000
31 #	define MAX_PAGE_SIZE	0x200000
32 #else
33 #	define RLD_PROGRAM_BASE	0x200000
34 #	define MAX_PAGE_SIZE	B_PAGE_SIZE
35 #endif
36 
37 
38 bool gInvalidImageIDs;
39 
40 static image_queue_t sLoadedImages = {0, 0};
41 static image_queue_t sDisposableImages = {0, 0};
42 static uint32 sLoadedImageCount = 0;
43 
44 
45 //! Remaps the image ID of \a image after fork.
46 static status_t
47 update_image_id(image_t* image)
48 {
49 	int32 cookie = 0;
50 	image_info info;
51 	while (_kern_get_next_image_info(B_CURRENT_TEAM, &cookie, &info,
52 			sizeof(image_info)) == B_OK) {
53 		for (uint32 i = 0; i < image->num_regions; i++) {
54 			if (image->regions[i].vmstart == (addr_t)info.text) {
55 				image->id = info.id;
56 				return B_OK;
57 			}
58 		}
59 	}
60 
61 	FATAL("Could not update image ID %" B_PRId32 " after fork()!\n", image->id);
62 	return B_ENTRY_NOT_FOUND;
63 }
64 
65 
66 static void
67 enqueue_image(image_queue_t* queue, image_t* image)
68 {
69 	image->next = NULL;
70 
71 	image->prev = queue->tail;
72 	if (queue->tail)
73 		queue->tail->next = image;
74 
75 	queue->tail = image;
76 	if (!queue->head)
77 		queue->head = image;
78 }
79 
80 
81 static void
82 dequeue_image(image_queue_t* queue, image_t* image)
83 {
84 	if (image->next)
85 		image->next->prev = image->prev;
86 	else
87 		queue->tail = image->prev;
88 
89 	if (image->prev)
90 		image->prev->next = image->next;
91 	else
92 		queue->head = image->next;
93 
94 	image->prev = NULL;
95 	image->next = NULL;
96 }
97 
98 
99 static image_t*
100 find_image_in_queue(image_queue_t* queue, const char* name, bool isPath,
101 	uint32 typeMask)
102 {
103 	for (image_t* image = queue->head; image; image = image->next) {
104 		const char* imageName = isPath ? image->path : image->name;
105 		int length = isPath ? sizeof(image->path) : sizeof(image->name);
106 
107 		if (!strncmp(imageName, name, length)
108 			&& (typeMask & IMAGE_TYPE_TO_MASK(image->type)) != 0) {
109 			return image;
110 		}
111 	}
112 
113 	return NULL;
114 }
115 
116 
117 static void
118 update_image_flags_recursively(image_t* image, uint32 flagsToSet,
119 	uint32 flagsToClear)
120 {
121 	image_t* queue[sLoadedImageCount];
122 	uint32 count = 0;
123 	uint32 index = 0;
124 	queue[count++] = image;
125 	image->flags |= RFLAG_VISITED;
126 
127 	while (index < count) {
128 		// pop next image
129 		image = queue[index++];
130 
131 		// push dependencies
132 		for (uint32 i = 0; i < image->num_needed; i++) {
133 			image_t* needed = image->needed[i];
134 			if ((needed->flags & RFLAG_VISITED) == 0) {
135 				queue[count++] = needed;
136 				needed->flags |= RFLAG_VISITED;
137 			}
138 		}
139 	}
140 
141 	// update flags
142 	for (uint32 i = 0; i < count; i++) {
143 		queue[i]->flags = (queue[i]->flags | flagsToSet)
144 			& ~(flagsToClear | RFLAG_VISITED);
145 	}
146 }
147 
148 
149 static uint32
150 topological_sort(image_t* image, uint32 slot, image_t** initList,
151 	uint32 sortFlag)
152 {
153 	if (image->flags & sortFlag)
154 		return slot;
155 
156 	image->flags |= sortFlag; /* make sure we don't visit this one */
157 	for (uint32 i = 0; i < image->num_needed; i++)
158 		slot = topological_sort(image->needed[i], slot, initList, sortFlag);
159 
160 	initList[slot] = image;
161 	return slot + 1;
162 }
163 
164 
165 /*!	Finds the load address and address specifier of the given image region.
166 */
167 static void
168 get_image_region_load_address(image_t* image, uint32 index, long lastDelta,
169 	addr_t& loadAddress, uint32& addressSpecifier)
170 {
171 	if (image->dynamic_ptr != 0) {
172 		// relocatable image... we can afford to place wherever
173 		if (index == 0) {
174 			// but only the first segment gets a free ride
175 			loadAddress = RLD_PROGRAM_BASE;
176 			addressSpecifier = B_RANDOMIZED_BASE_ADDRESS;
177 		} else {
178 			loadAddress = image->regions[index].vmstart + lastDelta;
179 			addressSpecifier = B_EXACT_ADDRESS;
180 		}
181 	} else {
182 		// not relocatable, put it where it asks or die trying
183 		loadAddress = image->regions[index].vmstart;
184 		addressSpecifier = B_EXACT_ADDRESS;
185 	}
186 }
187 
188 
189 // #pragma mark -
190 
191 
192 image_t*
193 create_image(const char* name, const char* path, int regionCount)
194 {
195 	size_t allocSize = sizeof(image_t)
196 		+ (regionCount - 1) * sizeof(elf_region_t);
197 
198 	image_t* image = (image_t*)malloc(allocSize);
199 	if (image == NULL) {
200 		FATAL("no memory for image %s\n", path);
201 		return NULL;
202 	}
203 
204 	memset(image, 0, allocSize);
205 
206 	strlcpy(image->path, path, sizeof(image->path));
207 
208 	// Make the last component of the supplied name the image name.
209 	// If present, DT_SONAME will replace this name.
210 	const char* lastSlash = strrchr(name, '/');
211 	if (lastSlash != NULL)
212 		strlcpy(image->name, lastSlash + 1, sizeof(image->name));
213 	else
214 		strlcpy(image->name, name, sizeof(image->name));
215 
216 	image->ref_count = 1;
217 	image->num_regions = regionCount;
218 
219 	return image;
220 }
221 
222 
223 void
224 delete_image_struct(image_t* image)
225 {
226 #ifdef DEBUG
227 	size_t size = sizeof(image_t)
228 		+ (image->num_regions - 1) * sizeof(elf_region_t);
229 	memset(image->needed, 0xa5, sizeof(image->needed[0]) * image->num_needed);
230 #endif
231 	free(image->needed);
232 	free(image->versions);
233 
234 	while (RuntimeLoaderSymbolPatcher* patcher
235 			= image->defined_symbol_patchers) {
236 		image->defined_symbol_patchers = patcher->next;
237 		delete patcher;
238 	}
239 	while (RuntimeLoaderSymbolPatcher* patcher
240 			= image->undefined_symbol_patchers) {
241 		image->undefined_symbol_patchers = patcher->next;
242 		delete patcher;
243 	}
244 
245 #ifdef DEBUG
246 	// overwrite images to make sure they aren't accidently reused anywhere
247 	memset(image, 0xa5, size);
248 #endif
249 	free(image);
250 }
251 
252 
253 void
254 delete_image(image_t* image)
255 {
256 	if (image == NULL)
257 		return;
258 
259 	_kern_unregister_image(image->id);
260 		// registered in load_container()
261 
262 	delete_image_struct(image);
263 }
264 
265 
266 void
267 put_image(image_t* image)
268 {
269 	// If all references to the image are gone, add it to the disposable list
270 	// and remove all dependencies
271 
272 	if (atomic_add(&image->ref_count, -1) == 1) {
273 		size_t i;
274 
275 		dequeue_image(&sLoadedImages, image);
276 		enqueue_image(&sDisposableImages, image);
277 		sLoadedImageCount--;
278 
279 		for (i = 0; i < image->num_needed; i++)
280 			put_image(image->needed[i]);
281 	}
282 }
283 
284 
285 status_t
286 map_image(int fd, char const* path, image_t* image)
287 {
288 	// cut the file name from the path as base name for the created areas
289 	const char* baseName = strrchr(path, '/');
290 	if (baseName != NULL)
291 		baseName++;
292 	else
293 		baseName = path;
294 
295 	// determine how much space we need for all loaded segments
296 
297 	addr_t reservedAddress = 0;
298 	addr_t loadAddress;
299 	size_t reservedSize = 0;
300 	size_t length = 0;
301 	uint32 addressSpecifier = B_RANDOMIZED_ANY_ADDRESS;
302 
303 	for (uint32 i = 0; i < image->num_regions; i++) {
304 		uint32 regionAddressSpecifier;
305 		get_image_region_load_address(image, i,
306 			i > 0 ? loadAddress - image->regions[i - 1].vmstart : 0,
307 			loadAddress, regionAddressSpecifier);
308 		if (i == 0) {
309 			reservedAddress = loadAddress;
310 			addressSpecifier = regionAddressSpecifier;
311 		}
312 
313 		length += TO_PAGE_SIZE(image->regions[i].vmsize
314 			+ (loadAddress % B_PAGE_SIZE));
315 
316 		size_t size = TO_PAGE_SIZE(loadAddress + image->regions[i].vmsize)
317 			- reservedAddress;
318 		if (size > reservedSize)
319 			reservedSize = size;
320 	}
321 
322 	// Check whether the segments have an unreasonable amount of unused space
323 	// inbetween.
324 	if (reservedSize > length + MAX_PAGE_SIZE * 2)
325 		return B_BAD_DATA;
326 
327 	// reserve that space and allocate the areas from that one
328 	if (_kern_reserve_address_range(&reservedAddress, addressSpecifier,
329 			reservedSize) != B_OK)
330 		return B_NO_MEMORY;
331 
332 	for (uint32 i = 0; i < image->num_regions; i++) {
333 		char regionName[B_OS_NAME_LENGTH];
334 
335 		snprintf(regionName, sizeof(regionName), "%s_seg%" B_PRIu32 "%s",
336 			baseName, i, (image->regions[i].flags & RFLAG_RW) ? "rw" : "ro");
337 
338 		get_image_region_load_address(image, i,
339 			i > 0 ? image->regions[i - 1].delta : 0, loadAddress,
340 			addressSpecifier);
341 
342 		// If the image position is arbitrary, we must let it point to the start
343 		// of the reserved address range.
344 		if (addressSpecifier != B_EXACT_ADDRESS)
345 			loadAddress = reservedAddress;
346 
347 		if ((image->regions[i].flags & RFLAG_ANON) != 0) {
348 			image->regions[i].id = _kern_create_area(regionName,
349 				(void**)&loadAddress, B_EXACT_ADDRESS,
350 				image->regions[i].vmsize, B_NO_LOCK,
351 				B_READ_AREA | B_WRITE_AREA);
352 
353 			if (image->regions[i].id < 0) {
354 				_kern_unreserve_address_range(reservedAddress, reservedSize);
355 				return image->regions[i].id;
356 			}
357 		} else {
358 			// Map all segments r/w first -- write access might be needed for
359 			// relocations. When we've done with those we change the protection
360 			// of read-only segments back to read-only. We map those segments
361 			// over-committing, since quite likely only a relatively small
362 			// number of pages needs to be touched and we want to avoid a lot
363 			// of memory to be committed for them temporarily, just because we
364 			// have to write map them.
365 			uint32 protection = B_READ_AREA | B_WRITE_AREA
366 				| ((image->regions[i].flags & RFLAG_RW) != 0
367 					? 0 : B_OVERCOMMITTING_AREA);
368 			image->regions[i].id = _kern_map_file(regionName,
369 				(void**)&loadAddress, B_EXACT_ADDRESS,
370 				image->regions[i].vmsize, protection, REGION_PRIVATE_MAP, false,
371 				fd, PAGE_BASE(image->regions[i].fdstart));
372 
373 			if (image->regions[i].id < 0) {
374 				_kern_unreserve_address_range(reservedAddress, reservedSize);
375 				return image->regions[i].id;
376 			}
377 
378 			TRACE(("\"%s\" at %p, 0x%lx bytes (%s)\n", path,
379 				(void *)loadAddress, image->regions[i].vmsize,
380 				image->regions[i].flags & RFLAG_RW ? "rw" : "read-only"));
381 
382 			// handle trailer bits in data segment
383 			if (image->regions[i].flags & RFLAG_RW) {
384 				addr_t startClearing = loadAddress
385 					+ PAGE_OFFSET(image->regions[i].start)
386 					+ image->regions[i].size;
387 				addr_t toClear = image->regions[i].vmsize
388 					- PAGE_OFFSET(image->regions[i].start)
389 					- image->regions[i].size;
390 
391 				TRACE(("cleared 0x%lx and the following 0x%lx bytes\n",
392 					startClearing, toClear));
393 				memset((void *)startClearing, 0, toClear);
394 			}
395 		}
396 
397 		image->regions[i].delta = loadAddress - image->regions[i].vmstart;
398 		image->regions[i].vmstart = loadAddress;
399 	}
400 
401 	if (image->dynamic_ptr != 0)
402 		image->dynamic_ptr += image->regions[0].delta;
403 
404 	return B_OK;
405 }
406 
407 
408 void
409 unmap_image(image_t* image)
410 {
411 	for (uint32 i = 0; i < image->num_regions; i++) {
412 		_kern_delete_area(image->regions[i].id);
413 
414 		image->regions[i].id = -1;
415 	}
416 }
417 
418 
419 /*!	This function will change the protection of all read-only segments to really
420 	be read-only.
421 	The areas have to be read/write first, so that they can be relocated.
422 */
423 void
424 remap_images()
425 {
426 	for (image_t* image = sLoadedImages.head; image != NULL;
427 			image = image->next) {
428 		for (uint32 i = 0; i < image->num_regions; i++) {
429 			if ((image->regions[i].flags & RFLAG_RW) == 0
430 				&& (image->regions[i].flags & RFLAG_REMAPPED) == 0) {
431 				// we only need to do this once, so we remember those we've already mapped
432 				if (_kern_set_area_protection(image->regions[i].id,
433 						B_READ_AREA | B_EXECUTE_AREA) == B_OK) {
434 					image->regions[i].flags |= RFLAG_REMAPPED;
435 				}
436 			}
437 		}
438 	}
439 }
440 
441 
442 void
443 register_image(image_t* image, int fd, const char* path)
444 {
445 	struct stat stat;
446 	image_info info;
447 
448 	// TODO: set these correctly
449 	info.id = 0;
450 	info.type = image->type;
451 	info.sequence = 0;
452 	info.init_order = 0;
453 	info.init_routine = (void (*)())image->init_routine;
454 	info.term_routine = (void (*)())image->term_routine;
455 
456 	if (_kern_read_stat(fd, NULL, false, &stat, sizeof(struct stat)) == B_OK) {
457 		info.device = stat.st_dev;
458 		info.node = stat.st_ino;
459 	} else {
460 		info.device = -1;
461 		info.node = -1;
462 	}
463 
464 	// We may have split segments into separate regions. Compute the correct
465 	// segments for the image info.
466 	addr_t textBase = 0;
467 	addr_t textEnd = 0;
468 	addr_t dataBase = 0;
469 	addr_t dataEnd = 0;
470 	for (uint32 i= 0; i < image->num_regions; i++) {
471 		addr_t base = image->regions[i].vmstart;
472 		addr_t end = base + image->regions[i].vmsize;
473 		if (image->regions[i].flags & RFLAG_RW) {
474 			// data
475 			if (dataBase == 0) {
476 				dataBase = base;
477 				dataEnd = end;
478 			} else {
479 				dataBase = std::min(dataBase, base);
480 				dataEnd = std::max(dataEnd, end);
481 			}
482 		} else {
483 			// text
484 			if (textBase == 0) {
485 				textBase = base;
486 				textEnd = end;
487 			} else {
488 				textBase = std::min(textBase, base);
489 				textEnd = std::max(textEnd, end);
490 			}
491 		}
492 	}
493 
494 	strlcpy(info.name, path, sizeof(info.name));
495 	info.text = (void*)textBase;
496 	info.text_size = textEnd - textBase;
497 	info.data = (void*)dataBase;
498 	info.data_size = dataEnd - dataBase;
499 	info.api_version = image->api_version;
500 	info.abi = image->abi;
501 	image->id = _kern_register_image(&info, sizeof(image_info));
502 }
503 
504 
505 //! After fork, we lazily rebuild the image IDs of all loaded images.
506 status_t
507 update_image_ids()
508 {
509 	for (image_t* image = sLoadedImages.head; image; image = image->next) {
510 		status_t status = update_image_id(image);
511 		if (status != B_OK)
512 			return status;
513 	}
514 	for (image_t* image = sDisposableImages.head; image; image = image->next) {
515 		status_t status = update_image_id(image);
516 		if (status != B_OK)
517 			return status;
518 	}
519 
520 	gInvalidImageIDs = false;
521 	return B_OK;
522 }
523 
524 
525 image_queue_t&
526 get_loaded_images()
527 {
528 	return sLoadedImages;
529 }
530 
531 
532 image_queue_t&
533 get_disposable_images()
534 {
535 	return sDisposableImages;
536 }
537 
538 
539 uint32
540 count_loaded_images()
541 {
542 	return sLoadedImageCount;
543 }
544 
545 
546 void
547 enqueue_loaded_image(image_t* image)
548 {
549 	enqueue_image(&sLoadedImages, image);
550 	sLoadedImageCount++;
551 }
552 
553 
554 void
555 dequeue_loaded_image(image_t* image)
556 {
557 	dequeue_image(&sLoadedImages, image);
558 	sLoadedImageCount--;
559 }
560 
561 
562 void
563 dequeue_disposable_image(image_t* image)
564 {
565 	dequeue_image(&sDisposableImages, image);
566 }
567 
568 
569 image_t*
570 find_loaded_image_by_name(char const* name, uint32 typeMask)
571 {
572 	bool isPath = strchr(name, '/') != NULL;
573 	return find_image_in_queue(&sLoadedImages, name, isPath, typeMask);
574 }
575 
576 
577 image_t*
578 find_loaded_image_by_id(image_id id, bool ignoreDisposable)
579 {
580 	if (gInvalidImageIDs) {
581 		// After fork, we lazily rebuild the image IDs of all loaded images
582 		update_image_ids();
583 	}
584 
585 	for (image_t* image = sLoadedImages.head; image; image = image->next) {
586 		if (image->id == id)
587 			return image;
588 	}
589 
590 	if (ignoreDisposable)
591 		return NULL;
592 
593 	for (image_t* image = sDisposableImages.head; image; image = image->next) {
594 		if (image->id == id)
595 			return image;
596 	}
597 
598 	return NULL;
599 }
600 
601 
602 image_t*
603 find_loaded_image_by_address(addr_t address)
604 {
605 	for (image_t* image = sLoadedImages.head; image; image = image->next) {
606 		for (uint32 i = 0; i < image->num_regions; i++) {
607 			elf_region_t& region = image->regions[i];
608 			if (region.vmstart <= address
609 				&& region.vmstart - 1 + region.vmsize >= address)
610 				return image;
611 		}
612 	}
613 
614 	return NULL;
615 }
616 
617 
618 void
619 set_image_flags_recursively(image_t* image, uint32 flags)
620 {
621 	update_image_flags_recursively(image, flags, 0);
622 }
623 
624 
625 void
626 clear_image_flags_recursively(image_t* image, uint32 flags)
627 {
628 	update_image_flags_recursively(image, 0, flags);
629 }
630 
631 
632 /*!	Returns a topologically sorted image list.
633 
634 	If \a image is non-NULL, an array containing the image and all its
635 	transitive dependencies is returned. If \a image is NULL, all loaded images
636 	are returned. In either case dependencies are listed before images
637 	depending on them.
638 
639 	\param image The image specifying the tree of images that shall be sorted.
640 		If NULL, all loaded images are sorted.
641 	\param _list On success it will be set to an array of the sorted images.
642 		The caller is responsible for free()ing it.
643 	\param sortFlags The image flag that shall be used for sorting. Images that
644 		already have this flag set are ignored (and their dependencies, unless
645 		they are also reachable via another path). The flag will be set on all
646 		returned images.
647 	\return The number of images in the returned array or an error code on
648 		failure.
649 */
650 ssize_t
651 get_sorted_image_list(image_t* image, image_t*** _list, uint32 sortFlag)
652 {
653 	image_t** list;
654 
655 	list = (image_t**)malloc(sLoadedImageCount * sizeof(image_t*));
656 	if (list == NULL) {
657 		FATAL("memory shortage in get_sorted_image_list()");
658 		*_list = NULL;
659 		return B_NO_MEMORY;
660 	}
661 
662 	memset(list, 0, sLoadedImageCount * sizeof(image_t*));
663 
664 	*_list = list;
665 
666 	if (image != NULL)
667 		return topological_sort(image, 0, list, sortFlag);
668 
669 	// no image given -- sort all loaded images
670 	uint32 count = 0;
671 	image = sLoadedImages.head;
672 	while (image != NULL) {
673 		count = topological_sort(image, count, list, sortFlag);
674 		image = image->next;
675 	}
676 
677 	return count;
678 }
679