xref: /haiku/src/system/runtime_loader/images.cpp (revision 239222b2369c39dc52df52b0a7cdd6cc0a91bc92)
1 /*
2  * Copyright 2008-2009, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2003-2009, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2002, Manuel J. Petit. All rights reserved.
7  * Copyright 2001, Travis Geiselbrecht. All rights reserved.
8  * Distributed under the terms of the NewOS License.
9  */
10 
11 #include "images.h"
12 
13 #include <stdio.h>
14 #include <stdlib.h>
15 #include <string.h>
16 
17 #include <syscalls.h>
18 #include <vm_defs.h>
19 
20 #include "add_ons.h"
21 #include "runtime_loader_private.h"
22 
23 
24 #define RLD_PROGRAM_BASE 0x00200000
25 	/* keep in sync with app ldscript */
26 
27 
28 bool gInvalidImageIDs;
29 
30 static image_queue_t sLoadedImages = {0, 0};
31 static image_queue_t sDisposableImages = {0, 0};
32 static uint32 sLoadedImageCount = 0;
33 
34 
35 //! Remaps the image ID of \a image after fork.
36 static status_t
37 update_image_id(image_t* image)
38 {
39 	int32 cookie = 0;
40 	image_info info;
41 	while (_kern_get_next_image_info(B_CURRENT_TEAM, &cookie, &info,
42 			sizeof(image_info)) == B_OK) {
43 		for (uint32 i = 0; i < image->num_regions; i++) {
44 			if (image->regions[i].vmstart == (addr_t)info.text) {
45 				image->id = info.id;
46 				return B_OK;
47 			}
48 		}
49 	}
50 
51 	FATAL("Could not update image ID %ld after fork()!\n", image->id);
52 	return B_ENTRY_NOT_FOUND;
53 }
54 
55 
56 static void
57 enqueue_image(image_queue_t* queue, image_t* image)
58 {
59 	image->next = NULL;
60 
61 	image->prev = queue->tail;
62 	if (queue->tail)
63 		queue->tail->next = image;
64 
65 	queue->tail = image;
66 	if (!queue->head)
67 		queue->head = image;
68 }
69 
70 
71 static void
72 dequeue_image(image_queue_t* queue, image_t* image)
73 {
74 	if (image->next)
75 		image->next->prev = image->prev;
76 	else
77 		queue->tail = image->prev;
78 
79 	if (image->prev)
80 		image->prev->next = image->next;
81 	else
82 		queue->head = image->next;
83 
84 	image->prev = NULL;
85 	image->next = NULL;
86 }
87 
88 
89 static image_t*
90 find_image_in_queue(image_queue_t* queue, const char* name, bool isPath,
91 	uint32 typeMask)
92 {
93 	for (image_t* image = queue->head; image; image = image->next) {
94 		const char* imageName = isPath ? image->path : image->name;
95 		int length = isPath ? sizeof(image->path) : sizeof(image->name);
96 
97 		if (!strncmp(imageName, name, length)
98 			&& (typeMask & IMAGE_TYPE_TO_MASK(image->type)) != 0) {
99 			return image;
100 		}
101 	}
102 
103 	return NULL;
104 }
105 
106 
107 static void
108 update_image_flags_recursively(image_t* image, uint32 flagsToSet,
109 	uint32 flagsToClear)
110 {
111 	image_t* queue[sLoadedImageCount];
112 	uint32 count = 0;
113 	uint32 index = 0;
114 	queue[count++] = image;
115 	image->flags |= RFLAG_VISITED;
116 
117 	while (index < count) {
118 		// pop next image
119 		image = queue[index++];
120 
121 		// push dependencies
122 		for (uint32 i = 0; i < image->num_needed; i++) {
123 			image_t* needed = image->needed[i];
124 			if ((needed->flags & RFLAG_VISITED) == 0) {
125 				queue[count++] = needed;
126 				needed->flags |= RFLAG_VISITED;
127 			}
128 		}
129 	}
130 
131 	// update flags
132 	for (uint32 i = 0; i < count; i++) {
133 		queue[i]->flags = (queue[i]->flags | flagsToSet)
134 			& ~(flagsToClear | RFLAG_VISITED);
135 	}
136 }
137 
138 
139 static uint32
140 topological_sort(image_t* image, uint32 slot, image_t** initList,
141 	uint32 sortFlag)
142 {
143 	uint32 i;
144 
145 	if (image->flags & sortFlag)
146 		return slot;
147 
148 	image->flags |= sortFlag; /* make sure we don't visit this one */
149 	for (i = 0; i < image->num_needed; i++)
150 		slot = topological_sort(image->needed[i], slot, initList, sortFlag);
151 
152 	initList[slot] = image;
153 	return slot + 1;
154 }
155 
156 
157 /*!	Finds the load address and address specifier of the given image region.
158 */
159 static void
160 get_image_region_load_address(image_t* image, uint32 index, int32 lastDelta,
161 	bool fixed, addr_t& loadAddress, uint32& addressSpecifier)
162 {
163 	if (image->dynamic_ptr != 0 && !fixed) {
164 		// relocatable image... we can afford to place wherever
165 		if (index == 0) {
166 			// but only the first segment gets a free ride
167 			loadAddress = RLD_PROGRAM_BASE;
168 			addressSpecifier = B_BASE_ADDRESS;
169 		} else {
170 			loadAddress = image->regions[index].vmstart + lastDelta;
171 			addressSpecifier = B_EXACT_ADDRESS;
172 		}
173 	} else {
174 		// not relocatable, put it where it asks or die trying
175 		loadAddress = image->regions[index].vmstart;
176 		addressSpecifier = B_EXACT_ADDRESS;
177 	}
178 }
179 
180 
181 // #pragma mark -
182 
183 
184 image_t*
185 create_image(const char* name, const char* path, int regionCount)
186 {
187 	size_t allocSize = sizeof(image_t)
188 		+ (regionCount - 1) * sizeof(elf_region_t);
189 
190 	image_t* image = (image_t*)malloc(allocSize);
191 	if (image == NULL) {
192 		FATAL("no memory for image %s\n", path);
193 		return NULL;
194 	}
195 
196 	memset(image, 0, allocSize);
197 
198 	strlcpy(image->path, path, sizeof(image->path));
199 
200 	// Make the last component of the supplied name the image name.
201 	// If present, DT_SONAME will replace this name.
202 	const char* lastSlash = strrchr(name, '/');
203 	if (lastSlash != NULL)
204 		strlcpy(image->name, lastSlash + 1, sizeof(image->name));
205 	else
206 		strlcpy(image->name, name, sizeof(image->name));
207 
208 	image->ref_count = 1;
209 	image->num_regions = regionCount;
210 
211 	return image;
212 }
213 
214 
215 void
216 delete_image_struct(image_t* image)
217 {
218 #ifdef DEBUG
219 	size_t size = sizeof(image_t)
220 		+ (image->num_regions - 1) * sizeof(elf_region_t);
221 	memset(image->needed, 0xa5, sizeof(image->needed[0]) * image->num_needed);
222 #endif
223 	free(image->needed);
224 	free(image->versions);
225 
226 	while (RuntimeLoaderSymbolPatcher* patcher
227 			= image->defined_symbol_patchers) {
228 		image->defined_symbol_patchers = patcher->next;
229 		delete patcher;
230 	}
231 	while (RuntimeLoaderSymbolPatcher* patcher
232 			= image->undefined_symbol_patchers) {
233 		image->undefined_symbol_patchers = patcher->next;
234 		delete patcher;
235 	}
236 
237 #ifdef DEBUG
238 	// overwrite images to make sure they aren't accidently reused anywhere
239 	memset(image, 0xa5, size);
240 #endif
241 	free(image);
242 }
243 
244 
245 void
246 delete_image(image_t* image)
247 {
248 	if (image == NULL)
249 		return;
250 
251 	_kern_unregister_image(image->id);
252 		// registered in load_container()
253 
254 	delete_image_struct(image);
255 }
256 
257 
258 void
259 put_image(image_t* image)
260 {
261 	// If all references to the image are gone, add it to the disposable list
262 	// and remove all dependencies
263 
264 	if (atomic_add(&image->ref_count, -1) == 1) {
265 		size_t i;
266 
267 		dequeue_image(&sLoadedImages, image);
268 		enqueue_image(&sDisposableImages, image);
269 		sLoadedImageCount--;
270 
271 		for (i = 0; i < image->num_needed; i++)
272 			put_image(image->needed[i]);
273 	}
274 }
275 
276 
277 status_t
278 map_image(int fd, char const* path, image_t* image, bool fixed)
279 {
280 	// cut the file name from the path as base name for the created areas
281 	const char* baseName = strrchr(path, '/');
282 	if (baseName != NULL)
283 		baseName++;
284 	else
285 		baseName = path;
286 
287 	// determine how much space we need for all loaded segments
288 
289 	addr_t reservedAddress = 0;
290 	addr_t loadAddress;
291 	size_t reservedSize = 0;
292 	size_t length = 0;
293 	uint32 addressSpecifier = B_ANY_ADDRESS;
294 
295 	for (uint32 i = 0; i < image->num_regions; i++) {
296 		// for BeOS compatibility: if we load an old BeOS executable, we
297 		// have to relocate it, if possible - we recognize it because the
298 		// vmstart is set to 0 (hopefully always)
299 		if (fixed && image->regions[i].vmstart == 0)
300 			fixed = false;
301 
302 		uint32 regionAddressSpecifier;
303 		get_image_region_load_address(image, i,
304 			loadAddress - image->regions[i - 1].vmstart, fixed,
305 			loadAddress, regionAddressSpecifier);
306 		if (i == 0) {
307 			reservedAddress = loadAddress;
308 			addressSpecifier = regionAddressSpecifier;
309 		}
310 
311 		length += TO_PAGE_SIZE(image->regions[i].vmsize
312 			+ (loadAddress % B_PAGE_SIZE));
313 
314 		size_t size = TO_PAGE_SIZE(loadAddress + image->regions[i].vmsize)
315 			- reservedAddress;
316 		if (size > reservedSize)
317 			reservedSize = size;
318 	}
319 
320 	// Check whether the segments have an unreasonable amount of unused space
321 	// inbetween.
322 	if (reservedSize > length + 8 * 1024)
323 		return B_BAD_DATA;
324 
325 	// reserve that space and allocate the areas from that one
326 	if (_kern_reserve_address_range(&reservedAddress, addressSpecifier,
327 			reservedSize) != B_OK)
328 		return B_NO_MEMORY;
329 
330 	for (uint32 i = 0; i < image->num_regions; i++) {
331 		char regionName[B_OS_NAME_LENGTH];
332 
333 		snprintf(regionName, sizeof(regionName), "%s_seg%lu%s",
334 			baseName, i, (image->regions[i].flags & RFLAG_RW) ? "rw" : "ro");
335 
336 		get_image_region_load_address(image, i, image->regions[i - 1].delta,
337 			fixed, loadAddress, addressSpecifier);
338 
339 		// If the image position is arbitrary, we must let it point to the start
340 		// of the reserved address range.
341 		if (addressSpecifier != B_EXACT_ADDRESS)
342 			loadAddress = reservedAddress;
343 
344 		if ((image->regions[i].flags & RFLAG_ANON) != 0) {
345 			image->regions[i].id = _kern_create_area(regionName,
346 				(void**)&loadAddress, B_EXACT_ADDRESS,
347 				image->regions[i].vmsize, B_NO_LOCK,
348 				B_READ_AREA | B_WRITE_AREA);
349 
350 			if (image->regions[i].id < 0) {
351 				_kern_unreserve_address_range(reservedAddress, reservedSize);
352 				return image->regions[i].id;
353 			}
354 		} else {
355 			image->regions[i].id = _kern_map_file(regionName,
356 				(void**)&loadAddress, B_EXACT_ADDRESS,
357 				image->regions[i].vmsize, B_READ_AREA | B_WRITE_AREA,
358 				REGION_PRIVATE_MAP, false, fd,
359 				PAGE_BASE(image->regions[i].fdstart));
360 
361 			if (image->regions[i].id < 0) {
362 				_kern_unreserve_address_range(reservedAddress, reservedSize);
363 				return image->regions[i].id;
364 			}
365 
366 			TRACE(("\"%s\" at %p, 0x%lx bytes (%s)\n", path,
367 				(void *)loadAddress, image->regions[i].vmsize,
368 				image->regions[i].flags & RFLAG_RW ? "rw" : "read-only"));
369 
370 			// handle trailer bits in data segment
371 			if (image->regions[i].flags & RFLAG_RW) {
372 				addr_t startClearing = loadAddress
373 					+ PAGE_OFFSET(image->regions[i].start)
374 					+ image->regions[i].size;
375 				addr_t toClear = image->regions[i].vmsize
376 					- PAGE_OFFSET(image->regions[i].start)
377 					- image->regions[i].size;
378 
379 				TRACE(("cleared 0x%lx and the following 0x%lx bytes\n",
380 					startClearing, toClear));
381 				memset((void *)startClearing, 0, toClear);
382 			}
383 		}
384 
385 		image->regions[i].delta = loadAddress - image->regions[i].vmstart;
386 		image->regions[i].vmstart = loadAddress;
387 	}
388 
389 	if (image->dynamic_ptr != 0)
390 		image->dynamic_ptr += image->regions[0].delta;
391 
392 	return B_OK;
393 }
394 
395 
396 void
397 unmap_image(image_t* image)
398 {
399 	for (uint32 i = 0; i < image->num_regions; i++) {
400 		_kern_delete_area(image->regions[i].id);
401 
402 		image->regions[i].id = -1;
403 	}
404 }
405 
406 
407 /*!	This function will change the protection of all read-only segments to really
408 	be read-only.
409 	The areas have to be read/write first, so that they can be relocated.
410 */
411 void
412 remap_images()
413 {
414 	for (image_t* image = sLoadedImages.head; image != NULL;
415 			image = image->next) {
416 		for (uint32 i = 0; i < image->num_regions; i++) {
417 			if ((image->regions[i].flags & RFLAG_RW) == 0
418 				&& (image->regions[i].flags & RFLAG_REMAPPED) == 0) {
419 				// we only need to do this once, so we remember those we've already mapped
420 				if (_kern_set_area_protection(image->regions[i].id,
421 						B_READ_AREA | B_EXECUTE_AREA) == B_OK) {
422 					image->regions[i].flags |= RFLAG_REMAPPED;
423 				}
424 			}
425 		}
426 	}
427 }
428 
429 
430 void
431 register_image(image_t* image, int fd, const char* path)
432 {
433 	struct stat stat;
434 	image_info info;
435 
436 	// TODO: set these correctly
437 	info.id = 0;
438 	info.type = image->type;
439 	info.sequence = 0;
440 	info.init_order = 0;
441 	info.init_routine = (void (*)())image->init_routine;
442 	info.term_routine = (void (*)())image->term_routine;
443 
444 	if (_kern_read_stat(fd, NULL, false, &stat, sizeof(struct stat)) == B_OK) {
445 		info.device = stat.st_dev;
446 		info.node = stat.st_ino;
447 	} else {
448 		info.device = -1;
449 		info.node = -1;
450 	}
451 
452 	strlcpy(info.name, path, sizeof(info.name));
453 	info.text = (void *)image->regions[0].vmstart;
454 	info.text_size = image->regions[0].vmsize;
455 	info.data = (void *)image->regions[1].vmstart;
456 	info.data_size = image->regions[1].vmsize;
457 	info.api_version = image->api_version;
458 	info.abi = image->abi;
459 	image->id = _kern_register_image(&info, sizeof(image_info));
460 }
461 
462 
463 //! After fork, we lazily rebuild the image IDs of all loaded images.
464 status_t
465 update_image_ids()
466 {
467 	for (image_t* image = sLoadedImages.head; image; image = image->next) {
468 		status_t status = update_image_id(image);
469 		if (status != B_OK)
470 			return status;
471 	}
472 	for (image_t* image = sDisposableImages.head; image; image = image->next) {
473 		status_t status = update_image_id(image);
474 		if (status != B_OK)
475 			return status;
476 	}
477 
478 	gInvalidImageIDs = false;
479 	return B_OK;
480 }
481 
482 
483 image_queue_t&
484 get_loaded_images()
485 {
486 	return sLoadedImages;
487 }
488 
489 
490 image_queue_t&
491 get_disposable_images()
492 {
493 	return sDisposableImages;
494 }
495 
496 
497 uint32
498 count_loaded_images()
499 {
500 	return sLoadedImageCount;
501 }
502 
503 
504 void
505 enqueue_loaded_image(image_t* image)
506 {
507 	enqueue_image(&sLoadedImages, image);
508 	sLoadedImageCount++;
509 }
510 
511 
512 void
513 dequeue_loaded_image(image_t* image)
514 {
515 	dequeue_image(&sLoadedImages, image);
516 	sLoadedImageCount--;
517 }
518 
519 
520 void
521 dequeue_disposable_image(image_t* image)
522 {
523 	dequeue_image(&sDisposableImages, image);
524 }
525 
526 
527 image_t*
528 find_loaded_image_by_name(char const* name, uint32 typeMask)
529 {
530 	bool isPath = strchr(name, '/') != NULL;
531 	return find_image_in_queue(&sLoadedImages, name, isPath, typeMask);
532 }
533 
534 
535 image_t*
536 find_loaded_image_by_id(image_id id, bool ignoreDisposable)
537 {
538 	if (gInvalidImageIDs) {
539 		// After fork, we lazily rebuild the image IDs of all loaded images
540 		update_image_ids();
541 	}
542 
543 	for (image_t* image = sLoadedImages.head; image; image = image->next) {
544 		if (image->id == id)
545 			return image;
546 	}
547 
548 	if (ignoreDisposable)
549 		return NULL;
550 
551 	for (image_t* image = sDisposableImages.head; image; image = image->next) {
552 		if (image->id == id)
553 			return image;
554 	}
555 
556 	return NULL;
557 }
558 
559 
560 void
561 set_image_flags_recursively(image_t* image, uint32 flags)
562 {
563 	update_image_flags_recursively(image, flags, 0);
564 }
565 
566 
567 void
568 clear_image_flags_recursively(image_t* image, uint32 flags)
569 {
570 	update_image_flags_recursively(image, 0, flags);
571 }
572 
573 
574 ssize_t
575 get_sorted_image_list(image_t* image, image_t*** _list, uint32 sortFlag)
576 {
577 	image_t** list;
578 
579 	list = (image_t**)malloc(sLoadedImageCount * sizeof(image_t*));
580 	if (list == NULL) {
581 		FATAL("memory shortage in get_sorted_image_list()");
582 		*_list = NULL;
583 		return B_NO_MEMORY;
584 	}
585 
586 	memset(list, 0, sLoadedImageCount * sizeof(image_t*));
587 
588 	*_list = list;
589 	return topological_sort(image, 0, list, sortFlag);
590 }
591