xref: /haiku/src/system/runtime_loader/images.cpp (revision 508f54795f39c3e7552d87c95aae9dd8ec6f505b)
1 /*
2  * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2003-2009, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2002, Manuel J. Petit. All rights reserved.
7  * Copyright 2001, Travis Geiselbrecht. All rights reserved.
8  * Distributed under the terms of the NewOS License.
9  */
10 
11 #include "images.h"
12 
13 #include <stdio.h>
14 #include <stdlib.h>
15 #include <string.h>
16 
17 #include <algorithm>
18 
19 #include <syscalls.h>
20 #include <vm_defs.h>
21 
22 #include "add_ons.h"
23 #include "runtime_loader_private.h"
24 
25 
26 #define RLD_PROGRAM_BASE 0x00200000
27 	/* keep in sync with app ldscript */
28 
29 
30 bool gInvalidImageIDs;
31 
32 static image_queue_t sLoadedImages = {0, 0};
33 static image_queue_t sDisposableImages = {0, 0};
34 static uint32 sLoadedImageCount = 0;
35 
36 
37 //! Remaps the image ID of \a image after fork.
38 static status_t
39 update_image_id(image_t* image)
40 {
41 	int32 cookie = 0;
42 	image_info info;
43 	while (_kern_get_next_image_info(B_CURRENT_TEAM, &cookie, &info,
44 			sizeof(image_info)) == B_OK) {
45 		for (uint32 i = 0; i < image->num_regions; i++) {
46 			if (image->regions[i].vmstart == (addr_t)info.text) {
47 				image->id = info.id;
48 				return B_OK;
49 			}
50 		}
51 	}
52 
53 	FATAL("Could not update image ID %ld after fork()!\n", image->id);
54 	return B_ENTRY_NOT_FOUND;
55 }
56 
57 
58 static void
59 enqueue_image(image_queue_t* queue, image_t* image)
60 {
61 	image->next = NULL;
62 
63 	image->prev = queue->tail;
64 	if (queue->tail)
65 		queue->tail->next = image;
66 
67 	queue->tail = image;
68 	if (!queue->head)
69 		queue->head = image;
70 }
71 
72 
73 static void
74 dequeue_image(image_queue_t* queue, image_t* image)
75 {
76 	if (image->next)
77 		image->next->prev = image->prev;
78 	else
79 		queue->tail = image->prev;
80 
81 	if (image->prev)
82 		image->prev->next = image->next;
83 	else
84 		queue->head = image->next;
85 
86 	image->prev = NULL;
87 	image->next = NULL;
88 }
89 
90 
91 static image_t*
92 find_image_in_queue(image_queue_t* queue, const char* name, bool isPath,
93 	uint32 typeMask)
94 {
95 	for (image_t* image = queue->head; image; image = image->next) {
96 		const char* imageName = isPath ? image->path : image->name;
97 		int length = isPath ? sizeof(image->path) : sizeof(image->name);
98 
99 		if (!strncmp(imageName, name, length)
100 			&& (typeMask & IMAGE_TYPE_TO_MASK(image->type)) != 0) {
101 			return image;
102 		}
103 	}
104 
105 	return NULL;
106 }
107 
108 
109 static void
110 update_image_flags_recursively(image_t* image, uint32 flagsToSet,
111 	uint32 flagsToClear)
112 {
113 	image_t* queue[sLoadedImageCount];
114 	uint32 count = 0;
115 	uint32 index = 0;
116 	queue[count++] = image;
117 	image->flags |= RFLAG_VISITED;
118 
119 	while (index < count) {
120 		// pop next image
121 		image = queue[index++];
122 
123 		// push dependencies
124 		for (uint32 i = 0; i < image->num_needed; i++) {
125 			image_t* needed = image->needed[i];
126 			if ((needed->flags & RFLAG_VISITED) == 0) {
127 				queue[count++] = needed;
128 				needed->flags |= RFLAG_VISITED;
129 			}
130 		}
131 	}
132 
133 	// update flags
134 	for (uint32 i = 0; i < count; i++) {
135 		queue[i]->flags = (queue[i]->flags | flagsToSet)
136 			& ~(flagsToClear | RFLAG_VISITED);
137 	}
138 }
139 
140 
141 static uint32
142 topological_sort(image_t* image, uint32 slot, image_t** initList,
143 	uint32 sortFlag)
144 {
145 	uint32 i;
146 
147 	if (image->flags & sortFlag)
148 		return slot;
149 
150 	image->flags |= sortFlag; /* make sure we don't visit this one */
151 	for (i = 0; i < image->num_needed; i++)
152 		slot = topological_sort(image->needed[i], slot, initList, sortFlag);
153 
154 	initList[slot] = image;
155 	return slot + 1;
156 }
157 
158 
159 /*!	Finds the load address and address specifier of the given image region.
160 */
161 static void
162 get_image_region_load_address(image_t* image, uint32 index, int32 lastDelta,
163 	bool fixed, addr_t& loadAddress, uint32& addressSpecifier)
164 {
165 	if (image->dynamic_ptr != 0 && !fixed) {
166 		// relocatable image... we can afford to place wherever
167 		if (index == 0) {
168 			// but only the first segment gets a free ride
169 			loadAddress = RLD_PROGRAM_BASE;
170 			addressSpecifier = B_BASE_ADDRESS;
171 		} else {
172 			loadAddress = image->regions[index].vmstart + lastDelta;
173 			addressSpecifier = B_EXACT_ADDRESS;
174 		}
175 	} else {
176 		// not relocatable, put it where it asks or die trying
177 		loadAddress = image->regions[index].vmstart;
178 		addressSpecifier = B_EXACT_ADDRESS;
179 	}
180 }
181 
182 
183 // #pragma mark -
184 
185 
186 image_t*
187 create_image(const char* name, const char* path, int regionCount)
188 {
189 	size_t allocSize = sizeof(image_t)
190 		+ (regionCount - 1) * sizeof(elf_region_t);
191 
192 	image_t* image = (image_t*)malloc(allocSize);
193 	if (image == NULL) {
194 		FATAL("no memory for image %s\n", path);
195 		return NULL;
196 	}
197 
198 	memset(image, 0, allocSize);
199 
200 	strlcpy(image->path, path, sizeof(image->path));
201 
202 	// Make the last component of the supplied name the image name.
203 	// If present, DT_SONAME will replace this name.
204 	const char* lastSlash = strrchr(name, '/');
205 	if (lastSlash != NULL)
206 		strlcpy(image->name, lastSlash + 1, sizeof(image->name));
207 	else
208 		strlcpy(image->name, name, sizeof(image->name));
209 
210 	image->ref_count = 1;
211 	image->num_regions = regionCount;
212 
213 	return image;
214 }
215 
216 
217 void
218 delete_image_struct(image_t* image)
219 {
220 #ifdef DEBUG
221 	size_t size = sizeof(image_t)
222 		+ (image->num_regions - 1) * sizeof(elf_region_t);
223 	memset(image->needed, 0xa5, sizeof(image->needed[0]) * image->num_needed);
224 #endif
225 	free(image->needed);
226 	free(image->versions);
227 
228 	while (RuntimeLoaderSymbolPatcher* patcher
229 			= image->defined_symbol_patchers) {
230 		image->defined_symbol_patchers = patcher->next;
231 		delete patcher;
232 	}
233 	while (RuntimeLoaderSymbolPatcher* patcher
234 			= image->undefined_symbol_patchers) {
235 		image->undefined_symbol_patchers = patcher->next;
236 		delete patcher;
237 	}
238 
239 #ifdef DEBUG
240 	// overwrite images to make sure they aren't accidently reused anywhere
241 	memset(image, 0xa5, size);
242 #endif
243 	free(image);
244 }
245 
246 
247 void
248 delete_image(image_t* image)
249 {
250 	if (image == NULL)
251 		return;
252 
253 	_kern_unregister_image(image->id);
254 		// registered in load_container()
255 
256 	delete_image_struct(image);
257 }
258 
259 
260 void
261 put_image(image_t* image)
262 {
263 	// If all references to the image are gone, add it to the disposable list
264 	// and remove all dependencies
265 
266 	if (atomic_add(&image->ref_count, -1) == 1) {
267 		size_t i;
268 
269 		dequeue_image(&sLoadedImages, image);
270 		enqueue_image(&sDisposableImages, image);
271 		sLoadedImageCount--;
272 
273 		for (i = 0; i < image->num_needed; i++)
274 			put_image(image->needed[i]);
275 	}
276 }
277 
278 
279 status_t
280 map_image(int fd, char const* path, image_t* image, bool fixed)
281 {
282 	// cut the file name from the path as base name for the created areas
283 	const char* baseName = strrchr(path, '/');
284 	if (baseName != NULL)
285 		baseName++;
286 	else
287 		baseName = path;
288 
289 	// determine how much space we need for all loaded segments
290 
291 	addr_t reservedAddress = 0;
292 	addr_t loadAddress;
293 	size_t reservedSize = 0;
294 	size_t length = 0;
295 	uint32 addressSpecifier = B_ANY_ADDRESS;
296 
297 	for (uint32 i = 0; i < image->num_regions; i++) {
298 		// for BeOS compatibility: if we load an old BeOS executable, we
299 		// have to relocate it, if possible - we recognize it because the
300 		// vmstart is set to 0 (hopefully always)
301 		if (fixed && image->regions[i].vmstart == 0)
302 			fixed = false;
303 
304 		uint32 regionAddressSpecifier;
305 		get_image_region_load_address(image, i,
306 			loadAddress - image->regions[i - 1].vmstart, fixed,
307 			loadAddress, regionAddressSpecifier);
308 		if (i == 0) {
309 			reservedAddress = loadAddress;
310 			addressSpecifier = regionAddressSpecifier;
311 		}
312 
313 		length += TO_PAGE_SIZE(image->regions[i].vmsize
314 			+ (loadAddress % B_PAGE_SIZE));
315 
316 		size_t size = TO_PAGE_SIZE(loadAddress + image->regions[i].vmsize)
317 			- reservedAddress;
318 		if (size > reservedSize)
319 			reservedSize = size;
320 	}
321 
322 	// Check whether the segments have an unreasonable amount of unused space
323 	// inbetween.
324 	if (reservedSize > length + 8 * 1024)
325 		return B_BAD_DATA;
326 
327 	// reserve that space and allocate the areas from that one
328 	if (_kern_reserve_address_range(&reservedAddress, addressSpecifier,
329 			reservedSize) != B_OK)
330 		return B_NO_MEMORY;
331 
332 	for (uint32 i = 0; i < image->num_regions; i++) {
333 		char regionName[B_OS_NAME_LENGTH];
334 
335 		snprintf(regionName, sizeof(regionName), "%s_seg%lu%s",
336 			baseName, i, (image->regions[i].flags & RFLAG_RW) ? "rw" : "ro");
337 
338 		get_image_region_load_address(image, i, image->regions[i - 1].delta,
339 			fixed, loadAddress, addressSpecifier);
340 
341 		// If the image position is arbitrary, we must let it point to the start
342 		// of the reserved address range.
343 		if (addressSpecifier != B_EXACT_ADDRESS)
344 			loadAddress = reservedAddress;
345 
346 		if ((image->regions[i].flags & RFLAG_ANON) != 0) {
347 			image->regions[i].id = _kern_create_area(regionName,
348 				(void**)&loadAddress, B_EXACT_ADDRESS,
349 				image->regions[i].vmsize, B_NO_LOCK,
350 				B_READ_AREA | B_WRITE_AREA);
351 
352 			if (image->regions[i].id < 0) {
353 				_kern_unreserve_address_range(reservedAddress, reservedSize);
354 				return image->regions[i].id;
355 			}
356 		} else {
357 			// Map all segments r/w first -- write access might be needed for
358 			// relocations. When we've done with those we change the protection
359 			// of read-only segments back to read-only. We map those segments
360 			// over-committing, since quite likely only a relatively small
361 			// number of pages needs to be touched and we want to avoid a lot
362 			// of memory to be committed for them temporarily, just because we
363 			// have to write map them.
364 			uint32 protection = B_READ_AREA | B_WRITE_AREA
365 				| ((image->regions[i].flags & RFLAG_RW) != 0
366 					? 0 : B_OVERCOMMITTING_AREA);
367 			image->regions[i].id = _kern_map_file(regionName,
368 				(void**)&loadAddress, B_EXACT_ADDRESS,
369 				image->regions[i].vmsize, protection, REGION_PRIVATE_MAP, false,
370 				fd, PAGE_BASE(image->regions[i].fdstart));
371 
372 			if (image->regions[i].id < 0) {
373 				_kern_unreserve_address_range(reservedAddress, reservedSize);
374 				return image->regions[i].id;
375 			}
376 
377 			TRACE(("\"%s\" at %p, 0x%lx bytes (%s)\n", path,
378 				(void *)loadAddress, image->regions[i].vmsize,
379 				image->regions[i].flags & RFLAG_RW ? "rw" : "read-only"));
380 
381 			// handle trailer bits in data segment
382 			if (image->regions[i].flags & RFLAG_RW) {
383 				addr_t startClearing = loadAddress
384 					+ PAGE_OFFSET(image->regions[i].start)
385 					+ image->regions[i].size;
386 				addr_t toClear = image->regions[i].vmsize
387 					- PAGE_OFFSET(image->regions[i].start)
388 					- image->regions[i].size;
389 
390 				TRACE(("cleared 0x%lx and the following 0x%lx bytes\n",
391 					startClearing, toClear));
392 				memset((void *)startClearing, 0, toClear);
393 			}
394 		}
395 
396 		image->regions[i].delta = loadAddress - image->regions[i].vmstart;
397 		image->regions[i].vmstart = loadAddress;
398 	}
399 
400 	if (image->dynamic_ptr != 0)
401 		image->dynamic_ptr += image->regions[0].delta;
402 
403 	return B_OK;
404 }
405 
406 
407 void
408 unmap_image(image_t* image)
409 {
410 	for (uint32 i = 0; i < image->num_regions; i++) {
411 		_kern_delete_area(image->regions[i].id);
412 
413 		image->regions[i].id = -1;
414 	}
415 }
416 
417 
418 /*!	This function will change the protection of all read-only segments to really
419 	be read-only.
420 	The areas have to be read/write first, so that they can be relocated.
421 */
422 void
423 remap_images()
424 {
425 	for (image_t* image = sLoadedImages.head; image != NULL;
426 			image = image->next) {
427 		for (uint32 i = 0; i < image->num_regions; i++) {
428 			if ((image->regions[i].flags & RFLAG_RW) == 0
429 				&& (image->regions[i].flags & RFLAG_REMAPPED) == 0) {
430 				// we only need to do this once, so we remember those we've already mapped
431 				if (_kern_set_area_protection(image->regions[i].id,
432 						B_READ_AREA | B_EXECUTE_AREA) == B_OK) {
433 					image->regions[i].flags |= RFLAG_REMAPPED;
434 				}
435 			}
436 		}
437 	}
438 }
439 
440 
441 void
442 register_image(image_t* image, int fd, const char* path)
443 {
444 	struct stat stat;
445 	image_info info;
446 
447 	// TODO: set these correctly
448 	info.id = 0;
449 	info.type = image->type;
450 	info.sequence = 0;
451 	info.init_order = 0;
452 	info.init_routine = (void (*)())image->init_routine;
453 	info.term_routine = (void (*)())image->term_routine;
454 
455 	if (_kern_read_stat(fd, NULL, false, &stat, sizeof(struct stat)) == B_OK) {
456 		info.device = stat.st_dev;
457 		info.node = stat.st_ino;
458 	} else {
459 		info.device = -1;
460 		info.node = -1;
461 	}
462 
463 	// We may have split segments into separate regions. Compute the correct
464 	// segments for the image info.
465 	addr_t textBase = 0;
466 	addr_t textEnd = 0;
467 	addr_t dataBase = 0;
468 	addr_t dataEnd = 0;
469 	for (uint32 i= 0; i < image->num_regions; i++) {
470 		addr_t base = image->regions[i].vmstart;
471 		addr_t end = base + image->regions[i].vmsize;
472 		if (image->regions[i].flags & RFLAG_RW) {
473 			// data
474 			if (dataBase == 0) {
475 				dataBase = base;
476 				dataEnd = end;
477 			} else {
478 				dataBase = std::min(dataBase, base);
479 				dataEnd = std::max(dataEnd, end);
480 			}
481 		} else {
482 			// text
483 			if (textBase == 0) {
484 				textBase = base;
485 				textEnd = end;
486 			} else {
487 				textBase = std::min(textBase, base);
488 				textEnd = std::max(textEnd, end);
489 			}
490 		}
491 	}
492 
493 	strlcpy(info.name, path, sizeof(info.name));
494 	info.text = (void*)textBase;
495 	info.text_size = textEnd - textBase;
496 	info.data = (void*)dataBase;
497 	info.data_size = dataEnd - dataBase;
498 	info.api_version = image->api_version;
499 	info.abi = image->abi;
500 	image->id = _kern_register_image(&info, sizeof(image_info));
501 }
502 
503 
504 //! After fork, we lazily rebuild the image IDs of all loaded images.
505 status_t
506 update_image_ids()
507 {
508 	for (image_t* image = sLoadedImages.head; image; image = image->next) {
509 		status_t status = update_image_id(image);
510 		if (status != B_OK)
511 			return status;
512 	}
513 	for (image_t* image = sDisposableImages.head; image; image = image->next) {
514 		status_t status = update_image_id(image);
515 		if (status != B_OK)
516 			return status;
517 	}
518 
519 	gInvalidImageIDs = false;
520 	return B_OK;
521 }
522 
523 
524 image_queue_t&
525 get_loaded_images()
526 {
527 	return sLoadedImages;
528 }
529 
530 
531 image_queue_t&
532 get_disposable_images()
533 {
534 	return sDisposableImages;
535 }
536 
537 
538 uint32
539 count_loaded_images()
540 {
541 	return sLoadedImageCount;
542 }
543 
544 
545 void
546 enqueue_loaded_image(image_t* image)
547 {
548 	enqueue_image(&sLoadedImages, image);
549 	sLoadedImageCount++;
550 }
551 
552 
553 void
554 dequeue_loaded_image(image_t* image)
555 {
556 	dequeue_image(&sLoadedImages, image);
557 	sLoadedImageCount--;
558 }
559 
560 
561 void
562 dequeue_disposable_image(image_t* image)
563 {
564 	dequeue_image(&sDisposableImages, image);
565 }
566 
567 
568 image_t*
569 find_loaded_image_by_name(char const* name, uint32 typeMask)
570 {
571 	bool isPath = strchr(name, '/') != NULL;
572 	return find_image_in_queue(&sLoadedImages, name, isPath, typeMask);
573 }
574 
575 
576 image_t*
577 find_loaded_image_by_id(image_id id, bool ignoreDisposable)
578 {
579 	if (gInvalidImageIDs) {
580 		// After fork, we lazily rebuild the image IDs of all loaded images
581 		update_image_ids();
582 	}
583 
584 	for (image_t* image = sLoadedImages.head; image; image = image->next) {
585 		if (image->id == id)
586 			return image;
587 	}
588 
589 	if (ignoreDisposable)
590 		return NULL;
591 
592 	for (image_t* image = sDisposableImages.head; image; image = image->next) {
593 		if (image->id == id)
594 			return image;
595 	}
596 
597 	return NULL;
598 }
599 
600 
601 void
602 set_image_flags_recursively(image_t* image, uint32 flags)
603 {
604 	update_image_flags_recursively(image, flags, 0);
605 }
606 
607 
608 void
609 clear_image_flags_recursively(image_t* image, uint32 flags)
610 {
611 	update_image_flags_recursively(image, 0, flags);
612 }
613 
614 
615 ssize_t
616 get_sorted_image_list(image_t* image, image_t*** _list, uint32 sortFlag)
617 {
618 	image_t** list;
619 
620 	list = (image_t**)malloc(sLoadedImageCount * sizeof(image_t*));
621 	if (list == NULL) {
622 		FATAL("memory shortage in get_sorted_image_list()");
623 		*_list = NULL;
624 		return B_NO_MEMORY;
625 	}
626 
627 	memset(list, 0, sLoadedImageCount * sizeof(image_t*));
628 
629 	*_list = list;
630 	return topological_sort(image, 0, list, sortFlag);
631 }
632