xref: /haiku/src/system/runtime_loader/images.cpp (revision dfc8a217db488098641462dfc334dcc0f7d62456)
1 /*
2  * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2003-2009, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2002, Manuel J. Petit. All rights reserved.
7  * Copyright 2001, Travis Geiselbrecht. All rights reserved.
8  * Distributed under the terms of the NewOS License.
9  */
10 
11 #include "images.h"
12 
13 #include <stdio.h>
14 #include <stdlib.h>
15 #include <string.h>
16 
17 #include <algorithm>
18 
19 #include <syscalls.h>
20 #include <vm_defs.h>
21 
22 #include "add_ons.h"
23 #include "runtime_loader_private.h"
24 
25 
26 #define RLD_PROGRAM_BASE 0x00200000
27 	/* keep in sync with app ldscript */
28 
29 
30 bool gInvalidImageIDs;
31 
32 static image_queue_t sLoadedImages = {0, 0};
33 static image_queue_t sDisposableImages = {0, 0};
34 static uint32 sLoadedImageCount = 0;
35 
36 
37 //! Remaps the image ID of \a image after fork.
38 static status_t
39 update_image_id(image_t* image)
40 {
41 	int32 cookie = 0;
42 	image_info info;
43 	while (_kern_get_next_image_info(B_CURRENT_TEAM, &cookie, &info,
44 			sizeof(image_info)) == B_OK) {
45 		for (uint32 i = 0; i < image->num_regions; i++) {
46 			if (image->regions[i].vmstart == (addr_t)info.text) {
47 				image->id = info.id;
48 				return B_OK;
49 			}
50 		}
51 	}
52 
53 	FATAL("Could not update image ID %ld after fork()!\n", image->id);
54 	return B_ENTRY_NOT_FOUND;
55 }
56 
57 
58 static void
59 enqueue_image(image_queue_t* queue, image_t* image)
60 {
61 	image->next = NULL;
62 
63 	image->prev = queue->tail;
64 	if (queue->tail)
65 		queue->tail->next = image;
66 
67 	queue->tail = image;
68 	if (!queue->head)
69 		queue->head = image;
70 }
71 
72 
73 static void
74 dequeue_image(image_queue_t* queue, image_t* image)
75 {
76 	if (image->next)
77 		image->next->prev = image->prev;
78 	else
79 		queue->tail = image->prev;
80 
81 	if (image->prev)
82 		image->prev->next = image->next;
83 	else
84 		queue->head = image->next;
85 
86 	image->prev = NULL;
87 	image->next = NULL;
88 }
89 
90 
91 static image_t*
92 find_image_in_queue(image_queue_t* queue, const char* name, bool isPath,
93 	uint32 typeMask)
94 {
95 	for (image_t* image = queue->head; image; image = image->next) {
96 		const char* imageName = isPath ? image->path : image->name;
97 		int length = isPath ? sizeof(image->path) : sizeof(image->name);
98 
99 		if (!strncmp(imageName, name, length)
100 			&& (typeMask & IMAGE_TYPE_TO_MASK(image->type)) != 0) {
101 			return image;
102 		}
103 	}
104 
105 	return NULL;
106 }
107 
108 
109 static void
110 update_image_flags_recursively(image_t* image, uint32 flagsToSet,
111 	uint32 flagsToClear)
112 {
113 	image_t* queue[sLoadedImageCount];
114 	uint32 count = 0;
115 	uint32 index = 0;
116 	queue[count++] = image;
117 	image->flags |= RFLAG_VISITED;
118 
119 	while (index < count) {
120 		// pop next image
121 		image = queue[index++];
122 
123 		// push dependencies
124 		for (uint32 i = 0; i < image->num_needed; i++) {
125 			image_t* needed = image->needed[i];
126 			if ((needed->flags & RFLAG_VISITED) == 0) {
127 				queue[count++] = needed;
128 				needed->flags |= RFLAG_VISITED;
129 			}
130 		}
131 	}
132 
133 	// update flags
134 	for (uint32 i = 0; i < count; i++) {
135 		queue[i]->flags = (queue[i]->flags | flagsToSet)
136 			& ~(flagsToClear | RFLAG_VISITED);
137 	}
138 }
139 
140 
141 static uint32
142 topological_sort(image_t* image, uint32 slot, image_t** initList,
143 	uint32 sortFlag)
144 {
145 	if (image->flags & sortFlag)
146 		return slot;
147 
148 	image->flags |= sortFlag; /* make sure we don't visit this one */
149 	for (uint32 i = 0; i < image->num_needed; i++)
150 		slot = topological_sort(image->needed[i], slot, initList, sortFlag);
151 
152 	initList[slot] = image;
153 	return slot + 1;
154 }
155 
156 
157 /*!	Finds the load address and address specifier of the given image region.
158 */
159 static void
160 get_image_region_load_address(image_t* image, uint32 index, int32 lastDelta,
161 	bool fixed, addr_t& loadAddress, uint32& addressSpecifier)
162 {
163 	if (image->dynamic_ptr != 0 && !fixed) {
164 		// relocatable image... we can afford to place wherever
165 		if (index == 0) {
166 			// but only the first segment gets a free ride
167 			loadAddress = RLD_PROGRAM_BASE;
168 			addressSpecifier = B_BASE_ADDRESS;
169 		} else {
170 			loadAddress = image->regions[index].vmstart + lastDelta;
171 			addressSpecifier = B_EXACT_ADDRESS;
172 		}
173 	} else {
174 		// not relocatable, put it where it asks or die trying
175 		loadAddress = image->regions[index].vmstart;
176 		addressSpecifier = B_EXACT_ADDRESS;
177 	}
178 }
179 
180 
181 // #pragma mark -
182 
183 
184 image_t*
185 create_image(const char* name, const char* path, int regionCount)
186 {
187 	size_t allocSize = sizeof(image_t)
188 		+ (regionCount - 1) * sizeof(elf_region_t);
189 
190 	image_t* image = (image_t*)malloc(allocSize);
191 	if (image == NULL) {
192 		FATAL("no memory for image %s\n", path);
193 		return NULL;
194 	}
195 
196 	memset(image, 0, allocSize);
197 
198 	strlcpy(image->path, path, sizeof(image->path));
199 
200 	// Make the last component of the supplied name the image name.
201 	// If present, DT_SONAME will replace this name.
202 	const char* lastSlash = strrchr(name, '/');
203 	if (lastSlash != NULL)
204 		strlcpy(image->name, lastSlash + 1, sizeof(image->name));
205 	else
206 		strlcpy(image->name, name, sizeof(image->name));
207 
208 	image->ref_count = 1;
209 	image->num_regions = regionCount;
210 
211 	return image;
212 }
213 
214 
215 void
216 delete_image_struct(image_t* image)
217 {
218 #ifdef DEBUG
219 	size_t size = sizeof(image_t)
220 		+ (image->num_regions - 1) * sizeof(elf_region_t);
221 	memset(image->needed, 0xa5, sizeof(image->needed[0]) * image->num_needed);
222 #endif
223 	free(image->needed);
224 	free(image->versions);
225 
226 	while (RuntimeLoaderSymbolPatcher* patcher
227 			= image->defined_symbol_patchers) {
228 		image->defined_symbol_patchers = patcher->next;
229 		delete patcher;
230 	}
231 	while (RuntimeLoaderSymbolPatcher* patcher
232 			= image->undefined_symbol_patchers) {
233 		image->undefined_symbol_patchers = patcher->next;
234 		delete patcher;
235 	}
236 
237 #ifdef DEBUG
238 	// overwrite images to make sure they aren't accidently reused anywhere
239 	memset(image, 0xa5, size);
240 #endif
241 	free(image);
242 }
243 
244 
245 void
246 delete_image(image_t* image)
247 {
248 	if (image == NULL)
249 		return;
250 
251 	_kern_unregister_image(image->id);
252 		// registered in load_container()
253 
254 	delete_image_struct(image);
255 }
256 
257 
258 void
259 put_image(image_t* image)
260 {
261 	// If all references to the image are gone, add it to the disposable list
262 	// and remove all dependencies
263 
264 	if (atomic_add(&image->ref_count, -1) == 1) {
265 		size_t i;
266 
267 		dequeue_image(&sLoadedImages, image);
268 		enqueue_image(&sDisposableImages, image);
269 		sLoadedImageCount--;
270 
271 		for (i = 0; i < image->num_needed; i++)
272 			put_image(image->needed[i]);
273 	}
274 }
275 
276 
277 status_t
278 map_image(int fd, char const* path, image_t* image, bool fixed)
279 {
280 	// cut the file name from the path as base name for the created areas
281 	const char* baseName = strrchr(path, '/');
282 	if (baseName != NULL)
283 		baseName++;
284 	else
285 		baseName = path;
286 
287 	// determine how much space we need for all loaded segments
288 
289 	addr_t reservedAddress = 0;
290 	addr_t loadAddress;
291 	size_t reservedSize = 0;
292 	size_t length = 0;
293 	uint32 addressSpecifier = B_ANY_ADDRESS;
294 
295 	for (uint32 i = 0; i < image->num_regions; i++) {
296 		// for BeOS compatibility: if we load an old BeOS executable, we
297 		// have to relocate it, if possible - we recognize it because the
298 		// vmstart is set to 0 (hopefully always)
299 		if (fixed && image->regions[i].vmstart == 0)
300 			fixed = false;
301 
302 		uint32 regionAddressSpecifier;
303 		get_image_region_load_address(image, i,
304 			loadAddress - image->regions[i - 1].vmstart, fixed,
305 			loadAddress, regionAddressSpecifier);
306 		if (i == 0) {
307 			reservedAddress = loadAddress;
308 			addressSpecifier = regionAddressSpecifier;
309 		}
310 
311 		length += TO_PAGE_SIZE(image->regions[i].vmsize
312 			+ (loadAddress % B_PAGE_SIZE));
313 
314 		size_t size = TO_PAGE_SIZE(loadAddress + image->regions[i].vmsize)
315 			- reservedAddress;
316 		if (size > reservedSize)
317 			reservedSize = size;
318 	}
319 
320 	// Check whether the segments have an unreasonable amount of unused space
321 	// inbetween.
322 	if (reservedSize > length + 8 * 1024)
323 		return B_BAD_DATA;
324 
325 	// reserve that space and allocate the areas from that one
326 	if (_kern_reserve_address_range(&reservedAddress, addressSpecifier,
327 			reservedSize) != B_OK)
328 		return B_NO_MEMORY;
329 
330 	for (uint32 i = 0; i < image->num_regions; i++) {
331 		char regionName[B_OS_NAME_LENGTH];
332 
333 		snprintf(regionName, sizeof(regionName), "%s_seg%lu%s",
334 			baseName, i, (image->regions[i].flags & RFLAG_RW) ? "rw" : "ro");
335 
336 		get_image_region_load_address(image, i, image->regions[i - 1].delta,
337 			fixed, loadAddress, addressSpecifier);
338 
339 		// If the image position is arbitrary, we must let it point to the start
340 		// of the reserved address range.
341 		if (addressSpecifier != B_EXACT_ADDRESS)
342 			loadAddress = reservedAddress;
343 
344 		if ((image->regions[i].flags & RFLAG_ANON) != 0) {
345 			image->regions[i].id = _kern_create_area(regionName,
346 				(void**)&loadAddress, B_EXACT_ADDRESS,
347 				image->regions[i].vmsize, B_NO_LOCK,
348 				B_READ_AREA | B_WRITE_AREA);
349 
350 			if (image->regions[i].id < 0) {
351 				_kern_unreserve_address_range(reservedAddress, reservedSize);
352 				return image->regions[i].id;
353 			}
354 		} else {
355 			// Map all segments r/w first -- write access might be needed for
356 			// relocations. When we've done with those we change the protection
357 			// of read-only segments back to read-only. We map those segments
358 			// over-committing, since quite likely only a relatively small
359 			// number of pages needs to be touched and we want to avoid a lot
360 			// of memory to be committed for them temporarily, just because we
361 			// have to write map them.
362 			uint32 protection = B_READ_AREA | B_WRITE_AREA
363 				| ((image->regions[i].flags & RFLAG_RW) != 0
364 					? 0 : B_OVERCOMMITTING_AREA);
365 			image->regions[i].id = _kern_map_file(regionName,
366 				(void**)&loadAddress, B_EXACT_ADDRESS,
367 				image->regions[i].vmsize, protection, REGION_PRIVATE_MAP, false,
368 				fd, PAGE_BASE(image->regions[i].fdstart));
369 
370 			if (image->regions[i].id < 0) {
371 				_kern_unreserve_address_range(reservedAddress, reservedSize);
372 				return image->regions[i].id;
373 			}
374 
375 			TRACE(("\"%s\" at %p, 0x%lx bytes (%s)\n", path,
376 				(void *)loadAddress, image->regions[i].vmsize,
377 				image->regions[i].flags & RFLAG_RW ? "rw" : "read-only"));
378 
379 			// handle trailer bits in data segment
380 			if (image->regions[i].flags & RFLAG_RW) {
381 				addr_t startClearing = loadAddress
382 					+ PAGE_OFFSET(image->regions[i].start)
383 					+ image->regions[i].size;
384 				addr_t toClear = image->regions[i].vmsize
385 					- PAGE_OFFSET(image->regions[i].start)
386 					- image->regions[i].size;
387 
388 				TRACE(("cleared 0x%lx and the following 0x%lx bytes\n",
389 					startClearing, toClear));
390 				memset((void *)startClearing, 0, toClear);
391 			}
392 		}
393 
394 		image->regions[i].delta = loadAddress - image->regions[i].vmstart;
395 		image->regions[i].vmstart = loadAddress;
396 	}
397 
398 	if (image->dynamic_ptr != 0)
399 		image->dynamic_ptr += image->regions[0].delta;
400 
401 	return B_OK;
402 }
403 
404 
405 void
406 unmap_image(image_t* image)
407 {
408 	for (uint32 i = 0; i < image->num_regions; i++) {
409 		_kern_delete_area(image->regions[i].id);
410 
411 		image->regions[i].id = -1;
412 	}
413 }
414 
415 
416 /*!	This function will change the protection of all read-only segments to really
417 	be read-only.
418 	The areas have to be read/write first, so that they can be relocated.
419 */
420 void
421 remap_images()
422 {
423 	for (image_t* image = sLoadedImages.head; image != NULL;
424 			image = image->next) {
425 		for (uint32 i = 0; i < image->num_regions; i++) {
426 			if ((image->regions[i].flags & RFLAG_RW) == 0
427 				&& (image->regions[i].flags & RFLAG_REMAPPED) == 0) {
428 				// we only need to do this once, so we remember those we've already mapped
429 				if (_kern_set_area_protection(image->regions[i].id,
430 						B_READ_AREA | B_EXECUTE_AREA) == B_OK) {
431 					image->regions[i].flags |= RFLAG_REMAPPED;
432 				}
433 			}
434 		}
435 	}
436 }
437 
438 
439 void
440 register_image(image_t* image, int fd, const char* path)
441 {
442 	struct stat stat;
443 	image_info info;
444 
445 	// TODO: set these correctly
446 	info.id = 0;
447 	info.type = image->type;
448 	info.sequence = 0;
449 	info.init_order = 0;
450 	info.init_routine = (void (*)())image->init_routine;
451 	info.term_routine = (void (*)())image->term_routine;
452 
453 	if (_kern_read_stat(fd, NULL, false, &stat, sizeof(struct stat)) == B_OK) {
454 		info.device = stat.st_dev;
455 		info.node = stat.st_ino;
456 	} else {
457 		info.device = -1;
458 		info.node = -1;
459 	}
460 
461 	// We may have split segments into separate regions. Compute the correct
462 	// segments for the image info.
463 	addr_t textBase = 0;
464 	addr_t textEnd = 0;
465 	addr_t dataBase = 0;
466 	addr_t dataEnd = 0;
467 	for (uint32 i= 0; i < image->num_regions; i++) {
468 		addr_t base = image->regions[i].vmstart;
469 		addr_t end = base + image->regions[i].vmsize;
470 		if (image->regions[i].flags & RFLAG_RW) {
471 			// data
472 			if (dataBase == 0) {
473 				dataBase = base;
474 				dataEnd = end;
475 			} else {
476 				dataBase = std::min(dataBase, base);
477 				dataEnd = std::max(dataEnd, end);
478 			}
479 		} else {
480 			// text
481 			if (textBase == 0) {
482 				textBase = base;
483 				textEnd = end;
484 			} else {
485 				textBase = std::min(textBase, base);
486 				textEnd = std::max(textEnd, end);
487 			}
488 		}
489 	}
490 
491 	strlcpy(info.name, path, sizeof(info.name));
492 	info.text = (void*)textBase;
493 	info.text_size = textEnd - textBase;
494 	info.data = (void*)dataBase;
495 	info.data_size = dataEnd - dataBase;
496 	info.api_version = image->api_version;
497 	info.abi = image->abi;
498 	image->id = _kern_register_image(&info, sizeof(image_info));
499 }
500 
501 
502 //! After fork, we lazily rebuild the image IDs of all loaded images.
503 status_t
504 update_image_ids()
505 {
506 	for (image_t* image = sLoadedImages.head; image; image = image->next) {
507 		status_t status = update_image_id(image);
508 		if (status != B_OK)
509 			return status;
510 	}
511 	for (image_t* image = sDisposableImages.head; image; image = image->next) {
512 		status_t status = update_image_id(image);
513 		if (status != B_OK)
514 			return status;
515 	}
516 
517 	gInvalidImageIDs = false;
518 	return B_OK;
519 }
520 
521 
522 image_queue_t&
523 get_loaded_images()
524 {
525 	return sLoadedImages;
526 }
527 
528 
529 image_queue_t&
530 get_disposable_images()
531 {
532 	return sDisposableImages;
533 }
534 
535 
536 uint32
537 count_loaded_images()
538 {
539 	return sLoadedImageCount;
540 }
541 
542 
543 void
544 enqueue_loaded_image(image_t* image)
545 {
546 	enqueue_image(&sLoadedImages, image);
547 	sLoadedImageCount++;
548 }
549 
550 
551 void
552 dequeue_loaded_image(image_t* image)
553 {
554 	dequeue_image(&sLoadedImages, image);
555 	sLoadedImageCount--;
556 }
557 
558 
559 void
560 dequeue_disposable_image(image_t* image)
561 {
562 	dequeue_image(&sDisposableImages, image);
563 }
564 
565 
566 image_t*
567 find_loaded_image_by_name(char const* name, uint32 typeMask)
568 {
569 	bool isPath = strchr(name, '/') != NULL;
570 	return find_image_in_queue(&sLoadedImages, name, isPath, typeMask);
571 }
572 
573 
574 image_t*
575 find_loaded_image_by_id(image_id id, bool ignoreDisposable)
576 {
577 	if (gInvalidImageIDs) {
578 		// After fork, we lazily rebuild the image IDs of all loaded images
579 		update_image_ids();
580 	}
581 
582 	for (image_t* image = sLoadedImages.head; image; image = image->next) {
583 		if (image->id == id)
584 			return image;
585 	}
586 
587 	if (ignoreDisposable)
588 		return NULL;
589 
590 	for (image_t* image = sDisposableImages.head; image; image = image->next) {
591 		if (image->id == id)
592 			return image;
593 	}
594 
595 	return NULL;
596 }
597 
598 
599 void
600 set_image_flags_recursively(image_t* image, uint32 flags)
601 {
602 	update_image_flags_recursively(image, flags, 0);
603 }
604 
605 
606 void
607 clear_image_flags_recursively(image_t* image, uint32 flags)
608 {
609 	update_image_flags_recursively(image, 0, flags);
610 }
611 
612 
613 /*!	Returns a topologically sorted image list.
614 
615 	If \a image is non-NULL, an array containing the image and all its
616 	transitive dependencies is returned. If \a image is NULL, all loaded images
617 	are returned. In either case dependencies are listed before images
618 	depending on them.
619 
620 	\param image The image specifying the tree of images that shall be sorted.
621 		If NULL, all loaded images are sorted.
622 	\param _list On success it will be set to an array of the sorted images.
623 		The caller is responsible for free()ing it.
624 	\param sortFlags The image flag that shall be used for sorting. Images that
625 		already have this flag set are ignored (and their dependencies, unless
626 		they are also reachable via another path). The flag will be set on all
627 		returned images.
628 	\return The number of images in the returned array or an error code on
629 		failure.
630 */
631 ssize_t
632 get_sorted_image_list(image_t* image, image_t*** _list, uint32 sortFlag)
633 {
634 	image_t** list;
635 
636 	list = (image_t**)malloc(sLoadedImageCount * sizeof(image_t*));
637 	if (list == NULL) {
638 		FATAL("memory shortage in get_sorted_image_list()");
639 		*_list = NULL;
640 		return B_NO_MEMORY;
641 	}
642 
643 	memset(list, 0, sLoadedImageCount * sizeof(image_t*));
644 
645 	*_list = list;
646 
647 	if (image != NULL)
648 		return topological_sort(image, 0, list, sortFlag);
649 
650 	// no image given -- sort all loaded images
651 	uint32 count = 0;
652 	image = sLoadedImages.head;
653 	while (image != NULL) {
654 		count = topological_sort(image, count, list, sortFlag);
655 		image = image->next;
656 	}
657 
658 	return count;
659 }
660