xref: /haiku/src/system/runtime_loader/elf.cpp (revision ed6250c95736c0b55da79d6e9dd01369532260c0)
1 /*
2  * Copyright 2003-2008, Axel Dörfler, axeld@pinc-software.de.
3  * Distributed under the terms of the MIT License.
4  *
5  * Copyright 2002, Manuel J. Petit. All rights reserved.
6  * Copyright 2001, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 #include "runtime_loader_private.h"
12 #include "vm.h"
13 
14 #include <OS.h>
15 
16 #include <string.h>
17 #include <stdio.h>
18 #include <stdlib.h>
19 
20 #include <arch/cpu.h>
21 #include <elf32.h>
22 #include <runtime_loader.h>
23 #include <sem.h>
24 #include <syscalls.h>
25 #include <tracing_config.h>
26 #include <user_runtime.h>
27 #include <vm_types.h>
28 
29 
30 //#define TRACE_RLD
31 #ifdef TRACE_RLD
32 #	define TRACE(x) dprintf x
33 #else
34 #	define TRACE(x) ;
35 #endif
36 
37 
38 // ToDo: implement better locking strategy
39 // ToDo: implement lazy binding
40 
41 #define	PAGE_MASK (B_PAGE_SIZE - 1)
42 
43 #define	PAGE_OFFSET(x) ((x) & (PAGE_MASK))
44 #define	PAGE_BASE(x) ((x) & ~(PAGE_MASK))
45 #define TO_PAGE_SIZE(x) ((x + (PAGE_MASK)) & ~(PAGE_MASK))
46 
47 #define RLD_PROGRAM_BASE 0x00200000
48 	/* keep in sync with app ldscript */
49 
50 enum {
51 	RFLAG_RW					= 0x0001,
52 	RFLAG_ANON					= 0x0002,
53 
54 	RFLAG_TERMINATED			= 0x0200,
55 	RFLAG_INITIALIZED			= 0x0400,
56 	RFLAG_SYMBOLIC				= 0x0800,
57 	RFLAG_RELOCATED				= 0x1000,
58 	RFLAG_PROTECTED				= 0x2000,
59 	RFLAG_DEPENDENCIES_LOADED	= 0x4000,
60 	RFLAG_REMAPPED				= 0x8000,
61 
62 	RFLAG_VISITED				= 0x10000
63 		// temporarily set in the symbol resolution code
64 };
65 
66 
67 #define IMAGE_TYPE_TO_MASK(type)	(1 << ((type) - 1))
68 #define ALL_IMAGE_TYPES				(IMAGE_TYPE_TO_MASK(B_APP_IMAGE) \
69 									| IMAGE_TYPE_TO_MASK(B_LIBRARY_IMAGE) \
70 									| IMAGE_TYPE_TO_MASK(B_ADD_ON_IMAGE) \
71 									| IMAGE_TYPE_TO_MASK(B_SYSTEM_IMAGE))
72 #define APP_OR_LIBRARY_TYPE			(IMAGE_TYPE_TO_MASK(B_APP_IMAGE) \
73 									| IMAGE_TYPE_TO_MASK(B_LIBRARY_IMAGE))
74 
75 typedef void (*init_term_function)(image_id);
76 
77 static image_queue_t sLoadedImages = {0, 0};
78 static image_queue_t sDisposableImages = {0, 0};
79 static uint32 sLoadedImageCount = 0;
80 static image_t *sProgramImage;
81 static KMessage sErrorMessage;
82 
83 #ifdef BEOS_STYLE_SYMBOLS_RESOLUTION
84 static bool sResolveSymbolsBeOSStyle = true;
85 #else
86 static bool sResolveSymbolsBeOSStyle = false;
87 #endif
88 
89 // a recursive lock
90 static sem_id rld_sem;
91 static thread_id rld_sem_owner;
92 static int32 rld_sem_count;
93 
94 
95 #ifdef TRACE_RLD
96 #	define FATAL(x...) dprintf("runtime_loader: " x);
97 
98 void
99 dprintf(const char *format, ...)
100 {
101 	char buffer[1024];
102 
103 	va_list list;
104 	va_start(list, format);
105 
106 	vsnprintf(buffer, sizeof(buffer), format, list);
107 	_kern_debug_output(buffer);
108 
109 	va_end(list);
110 }
111 #else
112 #	define FATAL(x...) printf("runtime_loader: " x);
113 #endif
114 
115 
116 #ifdef RUNTIME_LOADER_TRACING
117 
118 void
119 ktrace_printf(const char *format, ...)
120 {
121 	va_list list;
122 	va_start(list, format);
123 
124 	char buffer[1024];
125 	vsnprintf(buffer, sizeof(buffer), format, list);
126 	_kern_ktrace_output(buffer);
127 
128 	va_end(list);
129 }
130 
131 #define KTRACE(x...)	ktrace_printf(x)
132 
133 #else
134 #	define KTRACE(x...)
135 #endif	// RUNTIME_LOADER_TRACING
136 
137 
138 static void
139 rld_unlock()
140 {
141 	if (rld_sem_count-- == 1) {
142 		rld_sem_owner = -1;
143 		release_sem(rld_sem);
144 	}
145 }
146 
147 
148 static void
149 rld_lock()
150 {
151 	thread_id self = find_thread(NULL);
152 	if (self != rld_sem_owner) {
153 		acquire_sem(rld_sem);
154 		rld_sem_owner = self;
155 	}
156 	rld_sem_count++;
157 }
158 
159 
160 static void
161 enqueue_image(image_queue_t *queue, image_t *image)
162 {
163 	image->next = 0;
164 
165 	image->prev = queue->tail;
166 	if (queue->tail)
167 		queue->tail->next = image;
168 
169 	queue->tail = image;
170 	if (!queue->head)
171 		queue->head = image;
172 }
173 
174 
175 static void
176 dequeue_image(image_queue_t *queue, image_t *image)
177 {
178 	if (image->next)
179 		image->next->prev = image->prev;
180 	else
181 		queue->tail = image->prev;
182 
183 	if (image->prev)
184 		image->prev->next = image->next;
185 	else
186 		queue->head = image->next;
187 
188 	image->prev = 0;
189 	image->next = 0;
190 }
191 
192 
193 static uint32
194 elf_hash(const uint8 *name)
195 {
196 	uint32 hash = 0;
197 	uint32 temp;
198 
199 	while (*name) {
200 		hash = (hash << 4) + *name++;
201 		if ((temp = hash & 0xf0000000)) {
202 			hash ^= temp >> 24;
203 		}
204 		hash &= ~temp;
205 	}
206 	return hash;
207 }
208 
209 
210 static inline bool
211 report_errors()
212 {
213 	return gProgramArgs->error_port >= 0;
214 }
215 
216 
217 static image_t *
218 find_image_in_queue(image_queue_t *queue, const char *name, bool isPath,
219 	uint32 typeMask)
220 {
221 	image_t *image;
222 
223 	for (image = queue->head; image; image = image->next) {
224 		const char *imageName = isPath ? image->path : image->name;
225 		int length = isPath ? sizeof(image->path) : sizeof(image->name);
226 
227 		if (!strncmp(imageName, name, length)
228 			&& (typeMask & IMAGE_TYPE_TO_MASK(image->type)) != 0) {
229 			return image;
230 		}
231 	}
232 
233 	return NULL;
234 }
235 
236 
237 static image_t *
238 find_image(char const *name, uint32 typeMask)
239 {
240 	bool isPath = (strchr(name, '/') != NULL);
241 	return find_image_in_queue(&sLoadedImages, name, isPath, typeMask);
242 }
243 
244 
245 static image_t *
246 find_loaded_image_by_id(image_id id)
247 {
248 	image_t *image;
249 
250 	for (image = sLoadedImages.head; image; image = image->next) {
251 		if (image->id == id)
252 			return image;
253 	}
254 
255 	// For the termination routine, we need to look into the list of
256 	// disposable images as well
257 	for (image = sDisposableImages.head; image; image = image->next) {
258 		if (image->id == id)
259 			return image;
260 	}
261 
262 	return NULL;
263 }
264 
265 
266 static const char *
267 get_program_path()
268 {
269 	for (image_t *image = sLoadedImages.head; image; image = image->next) {
270 		if (image->type == B_APP_IMAGE)
271 			return image->path;
272 	}
273 
274 	return NULL;
275 }
276 
277 
278 static status_t
279 parse_elf_header(struct Elf32_Ehdr *eheader, int32 *_pheaderSize,
280 	int32 *_sheaderSize)
281 {
282 	if (memcmp(eheader->e_ident, ELF_MAGIC, 4) != 0)
283 		return B_NOT_AN_EXECUTABLE;
284 
285 	if (eheader->e_ident[4] != ELFCLASS32)
286 		return B_NOT_AN_EXECUTABLE;
287 
288 	if (eheader->e_phoff == 0)
289 		return B_NOT_AN_EXECUTABLE;
290 
291 	if (eheader->e_phentsize < sizeof(struct Elf32_Phdr))
292 		return B_NOT_AN_EXECUTABLE;
293 
294 	*_pheaderSize = eheader->e_phentsize * eheader->e_phnum;
295 	*_sheaderSize = eheader->e_shentsize * eheader->e_shnum;
296 
297 	if (*_pheaderSize <= 0 || *_sheaderSize <= 0)
298 		return B_NOT_AN_EXECUTABLE;
299 
300 	return B_OK;
301 }
302 
303 
304 static int32
305 count_regions(char const *buff, int phnum, int phentsize)
306 {
307 	struct Elf32_Phdr *pheaders;
308 	int32 count = 0;
309 	int i;
310 
311 	for (i = 0; i < phnum; i++) {
312 		pheaders = (struct Elf32_Phdr *)(buff + i * phentsize);
313 
314 		switch (pheaders->p_type) {
315 			case PT_NULL:
316 				/* NOP header */
317 				break;
318 			case PT_LOAD:
319 				count += 1;
320 				if (pheaders->p_memsz != pheaders->p_filesz) {
321 					addr_t A = TO_PAGE_SIZE(pheaders->p_vaddr + pheaders->p_memsz);
322 					addr_t B = TO_PAGE_SIZE(pheaders->p_vaddr + pheaders->p_filesz);
323 
324 					if (A != B)
325 						count += 1;
326 				}
327 				break;
328 			case PT_DYNAMIC:
329 				/* will be handled at some other place */
330 				break;
331 			case PT_INTERP:
332 				/* should check here for appropiate interpreter */
333 				break;
334 			case PT_NOTE:
335 				/* unsupported */
336 				break;
337 			case PT_SHLIB:
338 				/* undefined semantics */
339 				break;
340 			case PT_PHDR:
341 				/* we don't use it */
342 				break;
343 			default:
344 				FATAL("unhandled pheader type 0x%lx\n", pheaders[i].p_type);
345 				return B_BAD_DATA;
346 		}
347 	}
348 
349 	return count;
350 }
351 
352 
353 /*
354  * create_image() & destroy_image()
355  *
356  * 	Create and destroy image_t structures. The destroyer makes sure that the
357  * 	memory buffers are full of garbage before freeing.
358  */
359 
360 static image_t *
361 create_image(const char *name, const char *path, int num_regions)
362 {
363 	size_t allocSize = sizeof(image_t) + (num_regions - 1) * sizeof(elf_region_t);
364 	const char *lastSlash;
365 
366 	image_t *image = (image_t*)malloc(allocSize);
367 	if (image == NULL) {
368 		FATAL("no memory for image %s\n", path);
369 		return NULL;
370 	}
371 
372 	memset(image, 0, allocSize);
373 
374 	strlcpy(image->path, path, sizeof(image->path));
375 
376 	// Make the last component of the supplied name the image name.
377 	// If present, DT_SONAME will replace this name.
378 	if ((lastSlash = strrchr(name, '/')))
379 		strlcpy(image->name, lastSlash + 1, sizeof(image->name));
380 	else
381 		strlcpy(image->name, name, sizeof(image->name));
382 
383 	image->ref_count = 1;
384 	image->num_regions = num_regions;
385 
386 	return image;
387 }
388 
389 
390 static void
391 delete_image_struct(image_t *image)
392 {
393 #ifdef DEBUG
394 	size_t size = sizeof(image_t) + (image->num_regions - 1) * sizeof(elf_region_t);
395 	memset(image->needed, 0xa5, sizeof(image->needed[0]) * image->num_needed);
396 #endif
397 	free(image->needed);
398 
399 #ifdef DEBUG
400 	memset(image, 0xa5, size);
401 #endif
402 	free(image);
403 }
404 
405 
406 static void
407 delete_image(image_t *image)
408 {
409 	if (image == NULL)
410 		return;
411 
412 	_kern_unregister_image(image->id);
413 		// registered in load_container()
414 
415 	delete_image_struct(image);
416 }
417 
418 
419 static status_t
420 parse_program_headers(image_t *image, char *buff, int phnum, int phentsize)
421 {
422 	struct Elf32_Phdr *pheader;
423 	int regcount;
424 	int i;
425 
426 	regcount = 0;
427 	for (i = 0; i < phnum; i++) {
428 		pheader = (struct Elf32_Phdr *)(buff + i * phentsize);
429 
430 		switch (pheader->p_type) {
431 			case PT_NULL:
432 				/* NOP header */
433 				break;
434 			case PT_LOAD:
435 				if (pheader->p_memsz == pheader->p_filesz) {
436 					/*
437 					 * everything in one area
438 					 */
439 					image->regions[regcount].start = pheader->p_vaddr;
440 					image->regions[regcount].size = pheader->p_memsz;
441 					image->regions[regcount].vmstart = PAGE_BASE(pheader->p_vaddr);
442 					image->regions[regcount].vmsize = TO_PAGE_SIZE(pheader->p_memsz
443 						+ PAGE_OFFSET(pheader->p_vaddr));
444 					image->regions[regcount].fdstart = pheader->p_offset;
445 					image->regions[regcount].fdsize = pheader->p_filesz;
446 					image->regions[regcount].delta = 0;
447 					image->regions[regcount].flags = 0;
448 					if (pheader->p_flags & PF_WRITE) {
449 						// this is a writable segment
450 						image->regions[regcount].flags |= RFLAG_RW;
451 					}
452 				} else {
453 					/*
454 					 * may require splitting
455 					 */
456 					addr_t A = TO_PAGE_SIZE(pheader->p_vaddr + pheader->p_memsz);
457 					addr_t B = TO_PAGE_SIZE(pheader->p_vaddr + pheader->p_filesz);
458 
459 					image->regions[regcount].start = pheader->p_vaddr;
460 					image->regions[regcount].size = pheader->p_filesz;
461 					image->regions[regcount].vmstart = PAGE_BASE(pheader->p_vaddr);
462 					image->regions[regcount].vmsize = TO_PAGE_SIZE(pheader->p_filesz
463 						+ PAGE_OFFSET(pheader->p_vaddr));
464 					image->regions[regcount].fdstart = pheader->p_offset;
465 					image->regions[regcount].fdsize = pheader->p_filesz;
466 					image->regions[regcount].delta = 0;
467 					image->regions[regcount].flags = 0;
468 					if (pheader->p_flags & PF_WRITE) {
469 						// this is a writable segment
470 						image->regions[regcount].flags |= RFLAG_RW;
471 					}
472 
473 					if (A != B) {
474 						/*
475 						 * yeah, it requires splitting
476 						 */
477 						regcount += 1;
478 						image->regions[regcount].start = pheader->p_vaddr;
479 						image->regions[regcount].size = pheader->p_memsz - pheader->p_filesz;
480 						image->regions[regcount].vmstart = image->regions[regcount-1].vmstart + image->regions[regcount-1].vmsize;
481 						image->regions[regcount].vmsize = TO_PAGE_SIZE(pheader->p_memsz + PAGE_OFFSET(pheader->p_vaddr))
482 							- image->regions[regcount-1].vmsize;
483 						image->regions[regcount].fdstart = 0;
484 						image->regions[regcount].fdsize = 0;
485 						image->regions[regcount].delta = 0;
486 						image->regions[regcount].flags = RFLAG_ANON;
487 						if (pheader->p_flags & PF_WRITE) {
488 							// this is a writable segment
489 							image->regions[regcount].flags |= RFLAG_RW;
490 						}
491 					}
492 				}
493 				regcount += 1;
494 				break;
495 			case PT_DYNAMIC:
496 				image->dynamic_ptr = pheader->p_vaddr;
497 				break;
498 			case PT_INTERP:
499 				/* should check here for appropiate interpreter */
500 				break;
501 			case PT_NOTE:
502 				/* unsupported */
503 				break;
504 			case PT_SHLIB:
505 				/* undefined semantics */
506 				break;
507 			case PT_PHDR:
508 				/* we don't use it */
509 				break;
510 			default:
511 				FATAL("unhandled pheader type 0x%lx\n", pheader[i].p_type);
512 				return B_BAD_DATA;
513 		}
514 	}
515 
516 	return B_OK;
517 }
518 
519 
520 static bool
521 assert_dynamic_loadable(image_t *image)
522 {
523 	uint32 i;
524 
525 	if (!image->dynamic_ptr)
526 		return true;
527 
528 	for (i = 0; i < image->num_regions; i++) {
529 		if (image->dynamic_ptr >= image->regions[i].start
530 			&& image->dynamic_ptr < image->regions[i].start + image->regions[i].size)
531 			return true;
532 	}
533 
534 	return false;
535 }
536 
537 
538 /**	This function will change the protection of all read-only segments
539  *	to really be read-only.
540  *	The areas have to be read/write first, so that they can be relocated.
541  */
542 
543 static void
544 remap_images(void)
545 {
546 	image_t *image;
547 	uint32 i;
548 
549 	for (image = sLoadedImages.head; image != NULL; image = image->next) {
550 		for (i = 0; i < image->num_regions; i++) {
551 			if ((image->regions[i].flags & RFLAG_RW) == 0
552 				&& (image->regions[i].flags & RFLAG_REMAPPED) == 0) {
553 				// we only need to do this once, so we remember those we've already mapped
554 				if (_kern_set_area_protection(image->regions[i].id,
555 						B_READ_AREA | B_EXECUTE_AREA) == B_OK)
556 					image->regions[i].flags |= RFLAG_REMAPPED;
557 			}
558 		}
559 	}
560 }
561 
562 
563 static status_t
564 map_image(int fd, char const *path, image_t *image, bool fixed)
565 {
566 	status_t status = B_OK;
567 	const char *baseName;
568 	uint32 i;
569 
570 	(void)(fd);
571 
572 	// cut the file name from the path as base name for the created areas
573 	baseName = strrchr(path, '/');
574 	if (baseName != NULL)
575 		baseName++;
576 	else
577 		baseName = path;
578 
579 	for (i = 0; i < image->num_regions; i++) {
580 		char regionName[B_OS_NAME_LENGTH];
581 		addr_t loadAddress;
582 		uint32 addressSpecifier;
583 
584 		// for BeOS compatibility: if we load an old BeOS executable, we
585 		// have to relocate it, if possible - we recognize it because the
586 		// vmstart is set to 0 (hopefully always)
587 		if (fixed && image->regions[i].vmstart == 0)
588 			fixed = false;
589 
590 		snprintf(regionName, sizeof(regionName), "%s_seg%lu%s",
591 			baseName, i, (image->regions[i].flags & RFLAG_RW) ? "rw" : "ro");
592 
593 		if (image->dynamic_ptr && !fixed) {
594 			// relocatable image... we can afford to place wherever
595 			if (i == 0) {
596 				// but only the first segment gets a free ride
597 				loadAddress = RLD_PROGRAM_BASE;
598 				addressSpecifier = B_BASE_ADDRESS;
599 			} else {
600 				loadAddress = image->regions[i].vmstart + image->regions[i-1].delta;
601 				addressSpecifier = B_EXACT_ADDRESS;
602 			}
603 		} else {
604 			// not relocatable, put it where it asks or die trying
605 			loadAddress = image->regions[i].vmstart;
606 			addressSpecifier = B_EXACT_ADDRESS;
607 		}
608 
609 		if (image->regions[i].flags & RFLAG_ANON) {
610 			image->regions[i].id = _kern_create_area(regionName, (void **)&loadAddress,
611 				addressSpecifier, image->regions[i].vmsize, B_NO_LOCK,
612 				B_READ_AREA | B_WRITE_AREA);
613 
614 			if (image->regions[i].id < 0) {
615 				status = image->regions[i].id;
616 				goto error;
617 			}
618 
619 			image->regions[i].delta = loadAddress - image->regions[i].vmstart;
620 			image->regions[i].vmstart = loadAddress;
621 		} else {
622 			image->regions[i].id = sys_vm_map_file(regionName, (void **)&loadAddress,
623 				addressSpecifier, image->regions[i].vmsize, B_READ_AREA | B_WRITE_AREA,
624 				REGION_PRIVATE_MAP, path, PAGE_BASE(image->regions[i].fdstart));
625 
626 			if (image->regions[i].id < 0) {
627 				status = image->regions[i].id;
628 				goto error;
629 			}
630 
631 			TRACE(("\"%s\" at %p, 0x%lx bytes (%s)\n", path,
632 				(void *)loadAddress, image->regions[i].vmsize,
633 				image->regions[i].flags & RFLAG_RW ? "rw" : "read-only"));
634 
635 			image->regions[i].delta = loadAddress - image->regions[i].vmstart;
636 			image->regions[i].vmstart = loadAddress;
637 
638 			// handle trailer bits in data segment
639 			if (image->regions[i].flags & RFLAG_RW) {
640 				addr_t startClearing;
641 				addr_t toClear;
642 
643 				startClearing = image->regions[i].vmstart
644 					+ PAGE_OFFSET(image->regions[i].start)
645 					+ image->regions[i].size;
646 				toClear = image->regions[i].vmsize
647 					- PAGE_OFFSET(image->regions[i].start)
648 					- image->regions[i].size;
649 
650 				TRACE(("cleared 0x%lx and the following 0x%lx bytes\n", startClearing, toClear));
651 				memset((void *)startClearing, 0, toClear);
652 			}
653 		}
654 	}
655 
656 	if (image->dynamic_ptr)
657 		image->dynamic_ptr += image->regions[0].delta;
658 
659 	return B_OK;
660 
661 error:
662 	return status;
663 }
664 
665 
666 static void
667 unmap_image(image_t *image)
668 {
669 	uint32 i;
670 
671 	for (i = 0; i < image->num_regions; i++) {
672 		_kern_delete_area(image->regions[i].id);
673 
674 		image->regions[i].id = -1;
675 	}
676 }
677 
678 
679 static bool
680 parse_dynamic_segment(image_t *image)
681 {
682 	struct Elf32_Dyn *d;
683 	int i;
684 	int sonameOffset = -1;
685 
686 	image->symhash = 0;
687 	image->syms = 0;
688 	image->strtab = 0;
689 
690 	d = (struct Elf32_Dyn *)image->dynamic_ptr;
691 	if (!d)
692 		return true;
693 
694 	for (i = 0; d[i].d_tag != DT_NULL; i++) {
695 		switch (d[i].d_tag) {
696 			case DT_NEEDED:
697 				image->num_needed += 1;
698 				break;
699 			case DT_HASH:
700 				image->symhash = (uint32 *)(d[i].d_un.d_ptr + image->regions[0].delta);
701 				break;
702 			case DT_STRTAB:
703 				image->strtab = (char *)(d[i].d_un.d_ptr + image->regions[0].delta);
704 				break;
705 			case DT_SYMTAB:
706 				image->syms = (struct Elf32_Sym *)(d[i].d_un.d_ptr + image->regions[0].delta);
707 				break;
708 			case DT_REL:
709 				image->rel = (struct Elf32_Rel *)(d[i].d_un.d_ptr + image->regions[0].delta);
710 				break;
711 			case DT_RELSZ:
712 				image->rel_len = d[i].d_un.d_val;
713 				break;
714 			case DT_RELA:
715 				image->rela = (struct Elf32_Rela *)(d[i].d_un.d_ptr + image->regions[0].delta);
716 				break;
717 			case DT_RELASZ:
718 				image->rela_len = d[i].d_un.d_val;
719 				break;
720 			// TK: procedure linkage table
721 			case DT_JMPREL:
722 				image->pltrel = (struct Elf32_Rel *)(d[i].d_un.d_ptr + image->regions[0].delta);
723 				break;
724 			case DT_PLTRELSZ:
725 				image->pltrel_len = d[i].d_un.d_val;
726 				break;
727 			case DT_INIT:
728 				image->init_routine = (d[i].d_un.d_ptr + image->regions[0].delta);
729 				break;
730 			case DT_FINI:
731 				image->term_routine = (d[i].d_un.d_ptr + image->regions[0].delta);
732 				break;
733 			case DT_SONAME:
734 				sonameOffset = d[i].d_un.d_val;
735 				break;
736 			default:
737 				continue;
738 		}
739 	}
740 
741 	// lets make sure we found all the required sections
742 	if (!image->symhash || !image->syms || !image->strtab)
743 		return false;
744 
745 	if (sonameOffset >= 0)
746 		strlcpy(image->name, STRING(image, sonameOffset), sizeof(image->name));
747 
748 	return true;
749 }
750 
751 
752 static struct Elf32_Sym *
753 find_symbol(image_t *image, const char *name, int32 type)
754 {
755 	uint32 hash, i;
756 
757 	// ToDo: "type" is currently ignored!
758 	(void)type;
759 
760 	if (image->dynamic_ptr == NULL)
761 		return NULL;
762 
763 	hash = elf_hash((uint8 *)name) % HASHTABSIZE(image);
764 
765 	for (i = HASHBUCKETS(image)[hash]; i != STN_UNDEF; i = HASHCHAINS(image)[i]) {
766 		struct Elf32_Sym *symbol = &image->syms[i];
767 
768 		if (symbol->st_shndx != SHN_UNDEF
769 			&& ((ELF32_ST_BIND(symbol->st_info)== STB_GLOBAL)
770 				|| (ELF32_ST_BIND(symbol->st_info) == STB_WEAK))
771 			&& !strcmp(SYMNAME(image, symbol), name)) {
772 			// check if the type matches
773 			if ((type == B_SYMBOL_TYPE_TEXT && ELF32_ST_TYPE(symbol->st_info) != STT_FUNC)
774 				|| (type == B_SYMBOL_TYPE_DATA && ELF32_ST_TYPE(symbol->st_info) != STT_OBJECT))
775 				continue;
776 
777 			return symbol;
778 		}
779 	}
780 
781 	return NULL;
782 }
783 
784 
785 static struct Elf32_Sym*
786 find_symbol_recursively_impl(image_t* image, const char* name,
787 	image_t** foundInImage)
788 {
789 	image->flags |= RFLAG_VISITED;
790 
791 	struct Elf32_Sym *symbol;
792 
793 	// look up the symbol in this image
794 	if (image->dynamic_ptr) {
795 		symbol = find_symbol(image, name, B_SYMBOL_TYPE_ANY);
796 		if (symbol) {
797 			*foundInImage = image;
798 			return symbol;
799 		}
800 	}
801 
802 	// recursively search dependencies
803 	for (uint32 i = 0; i < image->num_needed; i++) {
804 		if (!(image->needed[i]->flags & RFLAG_VISITED)) {
805 			symbol = find_symbol_recursively_impl(image->needed[i], name,
806 				foundInImage);
807 			if (symbol)
808 				return symbol;
809 		}
810 	}
811 
812 	return NULL;
813 }
814 
815 
816 static void
817 clear_image_flag_recursively(image_t* image, uint32 flag)
818 {
819 	image->flags &= ~flag;
820 
821 	for (uint32 i = 0; i < image->num_needed; i++) {
822 		if (image->needed[i]->flags & flag)
823 			clear_image_flag_recursively(image->needed[i], flag);
824 	}
825 }
826 
827 
828 static struct Elf32_Sym*
829 find_symbol_recursively(image_t* image, const char* name,
830 	image_t** foundInImage)
831 {
832 	struct Elf32_Sym* symbol = find_symbol_recursively_impl(image, name,
833 		foundInImage);
834 	clear_image_flag_recursively(image, RFLAG_VISITED);
835 	return symbol;
836 }
837 
838 
839 static struct Elf32_Sym*
840 find_symbol_in_loaded_images(const char* name, image_t** foundInImage)
841 {
842 	return find_symbol_recursively(sLoadedImages.head, name, foundInImage);
843 }
844 
845 
846 static struct Elf32_Sym*
847 find_undefined_symbol(image_t* rootImage, image_t* image, const char* name,
848 	image_t** foundInImage)
849 {
850 	// If not simulating BeOS style symbol resolution, undefined symbols are
851 	// search recursively starting from the root image. (Note, breadth first
852 	// might be better than the depth first use here.)
853 	if (!sResolveSymbolsBeOSStyle)
854 		return find_symbol_recursively(rootImage, name, foundInImage);
855 
856 	// BeOS style symbol resolution: It is sufficient to check the direct
857 	// dependencies. The linker would have complained, if the symbol wasn't
858 	// there.
859 	for (uint32 i = 0; i < image->num_needed; i++) {
860 		if (image->needed[i]->dynamic_ptr) {
861 			struct Elf32_Sym *symbol = find_symbol(image->needed[i], name,
862 				B_SYMBOL_TYPE_ANY);
863 			if (symbol) {
864 				*foundInImage = image->needed[i];
865 				return symbol;
866 			}
867 		}
868 	}
869 
870 	return NULL;
871 }
872 
873 
874 int
875 resolve_symbol(image_t *rootImage, image_t *image, struct Elf32_Sym *sym,
876 	addr_t *sym_addr)
877 {
878 	struct Elf32_Sym *sym2;
879 	char *symname;
880 	image_t *shimg;
881 
882 	switch (sym->st_shndx) {
883 		case SHN_UNDEF:
884 			// patch the symbol name
885 			symname = SYMNAME(image, sym);
886 
887 			// it's undefined, must be outside this image, try the other images
888 			sym2 = find_undefined_symbol(rootImage, image, symname, &shimg);
889 			if (!sym2) {
890 				printf("elf_resolve_symbol: could not resolve symbol '%s'\n", symname);
891 				return B_MISSING_SYMBOL;
892 			}
893 
894 			// make sure they're the same type
895 			if (ELF32_ST_TYPE(sym->st_info) != STT_NOTYPE
896 				&& ELF32_ST_TYPE(sym->st_info) != ELF32_ST_TYPE(sym2->st_info)) {
897 				printf("elf_resolve_symbol: found symbol '%s' in shared image but wrong type\n", symname);
898 				return B_MISSING_SYMBOL;
899 			}
900 
901 			if (ELF32_ST_BIND(sym2->st_info) != STB_GLOBAL
902 				&& ELF32_ST_BIND(sym2->st_info) != STB_WEAK) {
903 				printf("elf_resolve_symbol: found symbol '%s' but not exported\n", symname);
904 				return B_MISSING_SYMBOL;
905 			}
906 
907 			*sym_addr = sym2->st_value + shimg->regions[0].delta;
908 			return B_NO_ERROR;
909 
910 		case SHN_ABS:
911 			*sym_addr = sym->st_value + image->regions[0].delta;
912 			return B_NO_ERROR;
913 
914 		case SHN_COMMON:
915 			// ToDo: finish this
916 			printf("elf_resolve_symbol: COMMON symbol, finish me!\n");
917 			return B_ERROR; //ERR_NOT_IMPLEMENTED_YET;
918 
919 		default:
920 			// standard symbol
921 			*sym_addr = sym->st_value + image->regions[0].delta;
922 			return B_NO_ERROR;
923 	}
924 }
925 
926 
927 static void
928 register_image(image_t *image, int fd, const char *path)
929 {
930 	struct stat stat;
931 	image_info info;
932 
933 	// ToDo: set these correctly
934 	info.id = 0;
935 	info.type = image->type;
936 	info.sequence = 0;
937 	info.init_order = 0;
938 	info.init_routine = (void (*)())image->init_routine;
939 	info.term_routine = (void (*)())image->term_routine;
940 
941 	if (_kern_read_stat(fd, NULL, false, &stat, sizeof(struct stat)) == B_OK) {
942 		info.device = stat.st_dev;
943 		info.node = stat.st_ino;
944 	} else {
945 		info.device = -1;
946 		info.node = -1;
947 	}
948 
949 	strlcpy(info.name, path, sizeof(info.name));
950 	info.text = (void *)image->regions[0].vmstart;
951 	info.text_size = image->regions[0].vmsize;
952 	info.data = (void *)image->regions[1].vmstart;
953 	info.data_size = image->regions[1].vmsize;
954 	image->id = _kern_register_image(&info, sizeof(image_info));
955 }
956 
957 
958 static status_t
959 relocate_image(image_t *rootImage, image_t *image)
960 {
961 	status_t status = arch_relocate_image(rootImage, image);
962 	if (status < B_OK) {
963 		FATAL("troubles relocating: 0x%lx (image: %s)\n", status, image->name);
964 		return status;
965 	}
966 
967 	_kern_image_relocated(image->id);
968 	return B_OK;
969 }
970 
971 
972 static status_t
973 load_container(char const *name, image_type type, const char *rpath, image_t **_image)
974 {
975 	int32 pheaderSize, sheaderSize;
976 	char path[PATH_MAX];
977 	ssize_t length;
978 	char ph_buff[4096];
979 	int32 numRegions;
980 	image_t *found;
981 	image_t *image;
982 	status_t status;
983 	int fd;
984 
985 	struct Elf32_Ehdr eheader;
986 
987 	// Have we already loaded that image? Don't check for add-ons -- we always
988 	// reload them.
989 	if (type != B_ADD_ON_IMAGE) {
990 		found = find_image(name, APP_OR_LIBRARY_TYPE);
991 		if (found) {
992 			atomic_add(&found->ref_count, 1);
993 			*_image = found;
994 			KTRACE("rld: load_container(\"%s\", type: %d, rpath: \"%s\") "
995 				"already loaded", name, type, rpath);
996 			return B_OK;
997 		}
998 	}
999 
1000 	KTRACE("rld: load_container(\"%s\", type: %d, rpath: \"%s\")", name, type,
1001 		rpath);
1002 
1003 	strlcpy(path, name, sizeof(path));
1004 
1005 	// Try to load explicit image path first
1006 	fd = open_executable(path, type, rpath, get_program_path());
1007 	if (fd < 0) {
1008 		FATAL("cannot open file %s\n", path);
1009 		KTRACE("rld: load_container(\"%s\"): failed to open file", name);
1010 		return fd;
1011 	}
1012 
1013 	// normalize the image path
1014 	status = _kern_normalize_path(path, true, path);
1015 	if (status != B_OK)
1016 		goto err1;
1017 
1018 	// Test again if this image has been registered already - this time,
1019 	// we can check the full path, not just its name as noted.
1020 	// You could end up loading an image twice with symbolic links, else.
1021 	if (type != B_ADD_ON_IMAGE) {
1022 		found = find_image(path, APP_OR_LIBRARY_TYPE);
1023 		if (found) {
1024 			atomic_add(&found->ref_count, 1);
1025 			*_image = found;
1026 			KTRACE("rld: load_container(\"%s\"): already loaded after all",
1027 				name);
1028 			return B_OK;
1029 		}
1030 	}
1031 
1032 	length = _kern_read(fd, 0, &eheader, sizeof(eheader));
1033 	if (length != sizeof(eheader)) {
1034 		status = B_NOT_AN_EXECUTABLE;
1035 		FATAL("troubles reading ELF header\n");
1036 		goto err1;
1037 	}
1038 
1039 	status = parse_elf_header(&eheader, &pheaderSize, &sheaderSize);
1040 	if (status < B_OK) {
1041 		FATAL("incorrect ELF header\n");
1042 		goto err1;
1043 	}
1044 
1045 	// ToDo: what to do about this restriction??
1046 	if (pheaderSize > (int)sizeof(ph_buff)) {
1047 		FATAL("Cannot handle program headers bigger than %lu\n", sizeof(ph_buff));
1048 		status = B_UNSUPPORTED;
1049 		goto err1;
1050 	}
1051 
1052 	length = _kern_read(fd, eheader.e_phoff, ph_buff, pheaderSize);
1053 	if (length != pheaderSize) {
1054 		FATAL("Could not read program headers: %s\n", strerror(length));
1055 		status = B_BAD_DATA;
1056 		goto err1;
1057 	}
1058 
1059 	numRegions = count_regions(ph_buff, eheader.e_phnum, eheader.e_phentsize);
1060 	if (numRegions <= 0) {
1061 		FATAL("Troubles parsing Program headers, numRegions = %ld\n", numRegions);
1062 		status = B_BAD_DATA;
1063 		goto err1;
1064 	}
1065 
1066 	image = create_image(name, path, numRegions);
1067 	if (image == NULL) {
1068 		FATAL("Failed to allocate image_t object\n");
1069 		status = B_NO_MEMORY;
1070 		goto err1;
1071 	}
1072 
1073 	status = parse_program_headers(image, ph_buff, eheader.e_phnum, eheader.e_phentsize);
1074 	if (status < B_OK)
1075 		goto err2;
1076 
1077 	if (!assert_dynamic_loadable(image)) {
1078 		FATAL("Dynamic segment must be loadable (implementation restriction)\n");
1079 		status = B_UNSUPPORTED;
1080 		goto err2;
1081 	}
1082 
1083 	status = map_image(fd, path, image, type == B_APP_IMAGE);
1084 	if (status < B_OK) {
1085 		FATAL("Could not map image: %s\n", strerror(status));
1086 		status = B_ERROR;
1087 		goto err2;
1088 	}
1089 
1090 	if (!parse_dynamic_segment(image)) {
1091 		FATAL("Troubles handling dynamic section\n");
1092 		status = B_BAD_DATA;
1093 		goto err3;
1094 	}
1095 
1096 	if (eheader.e_entry != NULL)
1097 		image->entry_point = eheader.e_entry + image->regions[0].delta;
1098 
1099 	image->type = type;
1100 	register_image(image, fd, path);
1101 
1102 	_kern_close(fd);
1103 
1104 	enqueue_image(&sLoadedImages, image);
1105 	sLoadedImageCount++;
1106 
1107 	*_image = image;
1108 
1109 	KTRACE("rld: load_container(\"%s\"): done: id: %ld", name, image->id);
1110 
1111 	return B_OK;
1112 
1113 err3:
1114 	unmap_image(image);
1115 err2:
1116 	delete_image_struct(image);
1117 err1:
1118 	_kern_close(fd);
1119 
1120 	KTRACE("rld: load_container(\"%s\"): failed: %s", name,
1121 		strerror(status));
1122 
1123 	return status;
1124 }
1125 
1126 
1127 static const char *
1128 find_dt_rpath(image_t *image)
1129 {
1130 	int i;
1131 	struct Elf32_Dyn *d = (struct Elf32_Dyn *)image->dynamic_ptr;
1132 
1133 	for (i = 0; d[i].d_tag != DT_NULL; i++) {
1134 		if (d[i].d_tag == DT_RPATH)
1135 			return STRING(image, d[i].d_un.d_val);
1136 	}
1137 
1138 	return NULL;
1139 }
1140 
1141 
1142 static status_t
1143 load_dependencies(image_t *image)
1144 {
1145 	struct Elf32_Dyn *d = (struct Elf32_Dyn *)image->dynamic_ptr;
1146 	bool reportErrors = report_errors();
1147 	status_t status = B_OK;
1148 	uint32 i, j;
1149 	const char *rpath;
1150 
1151 	if (!d || (image->flags & RFLAG_DEPENDENCIES_LOADED))
1152 		return B_OK;
1153 
1154 	image->flags |= RFLAG_DEPENDENCIES_LOADED;
1155 
1156 	if (image->num_needed == 0)
1157 		return B_OK;
1158 
1159 	KTRACE("rld: load_dependencies(\"%s\", id: %ld)", image->name,
1160 		image->id);
1161 
1162 	image->needed = (image_t**)malloc(image->num_needed * sizeof(image_t *));
1163 	if (image->needed == NULL) {
1164 		FATAL("failed to allocate needed struct\n");
1165 		KTRACE("rld: load_dependencies(\"%s\", id: %ld) failed: no memory",
1166 			image->name, image->id);
1167 		return B_NO_MEMORY;
1168 	}
1169 
1170 	memset(image->needed, 0, image->num_needed * sizeof(image_t *));
1171 	rpath = find_dt_rpath(image);
1172 
1173 	for (i = 0, j = 0; d[i].d_tag != DT_NULL; i++) {
1174 		switch (d[i].d_tag) {
1175 			case DT_NEEDED:
1176 			{
1177 				int32 neededOffset = d[i].d_un.d_val;
1178 				const char *name = STRING(image, neededOffset);
1179 
1180 				status_t loadStatus = load_container(name, B_LIBRARY_IMAGE,
1181 					rpath, &image->needed[j]);
1182 				if (loadStatus < B_OK) {
1183 					status = loadStatus;
1184 					// correct error code in case the file could not been found
1185 					if (status == B_ENTRY_NOT_FOUND) {
1186 						status = B_MISSING_LIBRARY;
1187 
1188 						if (reportErrors)
1189 							sErrorMessage.AddString("missing library", name);
1190 					}
1191 
1192 					// Collect all missing libraries in case we report back
1193 					if (!reportErrors) {
1194 						KTRACE("rld: load_dependencies(\"%s\", id: %ld) "
1195 							"failed: %s", image->name, image->id,
1196 							strerror(status));
1197 						return status;
1198 					}
1199 				}
1200 
1201 				j += 1;
1202 				break;
1203 			}
1204 
1205 			default:
1206 				// ignore any other tag
1207 				continue;
1208 		}
1209 	}
1210 
1211 	if (status < B_OK) {
1212 		KTRACE("rld: load_dependencies(\"%s\", id: %ld) "
1213 			"failed: %s", image->name, image->id,
1214 			strerror(status));
1215 		return status;
1216 	}
1217 
1218 	if (j != image->num_needed) {
1219 		FATAL("Internal error at load_dependencies()");
1220 		KTRACE("rld: load_dependencies(\"%s\", id: %ld) "
1221 			"failed: internal error", image->name, image->id);
1222 		return B_ERROR;
1223 	}
1224 
1225 	KTRACE("rld: load_dependencies(\"%s\", id: %ld) done", image->name,
1226 		image->id);
1227 
1228 	return B_OK;
1229 }
1230 
1231 
1232 static uint32
1233 topological_sort(image_t *image, uint32 slot, image_t **initList,
1234 	uint32 sortFlag)
1235 {
1236 	uint32 i;
1237 
1238 	if (image->flags & sortFlag)
1239 		return slot;
1240 
1241 	image->flags |= sortFlag; /* make sure we don't visit this one */
1242 	for (i = 0; i < image->num_needed; i++)
1243 		slot = topological_sort(image->needed[i], slot, initList, sortFlag);
1244 
1245 	initList[slot] = image;
1246 	return slot + 1;
1247 }
1248 
1249 
1250 static ssize_t
1251 get_sorted_image_list(image_t *image, image_t ***_list, uint32 sortFlag)
1252 {
1253 	image_t **list;
1254 
1255 	list = (image_t**)malloc(sLoadedImageCount * sizeof(image_t *));
1256 	if (list == NULL) {
1257 		FATAL("memory shortage in get_sorted_image_list()");
1258 		*_list = NULL;
1259 		return B_NO_MEMORY;
1260 	}
1261 
1262 	memset(list, 0, sLoadedImageCount * sizeof(image_t *));
1263 
1264 	*_list = list;
1265 	return topological_sort(image, 0, list, sortFlag);
1266 }
1267 
1268 
1269 static status_t
1270 relocate_dependencies(image_t *image)
1271 {
1272 	ssize_t count, i;
1273 	image_t **list;
1274 
1275 	count = get_sorted_image_list(image, &list, RFLAG_RELOCATED);
1276 	if (count < B_OK)
1277 		return count;
1278 
1279 	for (i = 0; i < count; i++) {
1280 		status_t status = relocate_image(image, list[i]);
1281 		if (status < B_OK)
1282 			return status;
1283 	}
1284 
1285 	free(list);
1286 	return B_OK;
1287 }
1288 
1289 
1290 static void
1291 init_dependencies(image_t *image, bool initHead)
1292 {
1293 	image_t **initList;
1294 	ssize_t count, i;
1295 
1296 	count = get_sorted_image_list(image, &initList, RFLAG_INITIALIZED);
1297 	if (count <= 0)
1298 		return;
1299 
1300 	if (!initHead) {
1301 		// this removes the "calling" image
1302 		image->flags &= ~RFLAG_INITIALIZED;
1303 		initList[--count] = NULL;
1304 	}
1305 
1306 	TRACE(("%ld: init dependencies\n", find_thread(NULL)));
1307 	for (i = 0; i < count; i++) {
1308 		image = initList[i];
1309 
1310 		TRACE(("%ld:  init: %s\n", find_thread(NULL), image->name));
1311 
1312 		if (image->init_routine != NULL)
1313 			((init_term_function)image->init_routine)(image->id);
1314 	}
1315 	TRACE(("%ld:  init done.\n", find_thread(NULL)));
1316 
1317 	free(initList);
1318 }
1319 
1320 
1321 static void
1322 put_image(image_t *image)
1323 {
1324 	// If all references to the image are gone, add it to the disposable list
1325 	// and remove all dependencies
1326 
1327 	if (atomic_add(&image->ref_count, -1) == 1) {
1328 		size_t i;
1329 
1330 		dequeue_image(&sLoadedImages, image);
1331 		enqueue_image(&sDisposableImages, image);
1332 		sLoadedImageCount--;
1333 
1334 		for (i = 0; i < image->num_needed; i++) {
1335 			put_image(image->needed[i]);
1336 		}
1337 	}
1338 }
1339 
1340 
1341 //	#pragma mark - libroot.so exported functions
1342 
1343 
1344 image_id
1345 load_program(char const *path, void **_entry)
1346 {
1347 	status_t status;
1348 	image_t *image;
1349 
1350 	KTRACE("rld: load_program(\"%s\")", path);
1351 
1352 	rld_lock();
1353 		// for now, just do stupid simple global locking
1354 
1355 	TRACE(("rld: load %s\n", path));
1356 
1357 	status = load_container(path, B_APP_IMAGE, NULL, &sProgramImage);
1358 	if (status < B_OK)
1359 		goto err;
1360 
1361 	for (image = sLoadedImages.head; image != NULL; image = image->next) {
1362 		status = load_dependencies(image);
1363 		if (status < B_OK)
1364 			goto err;
1365 	}
1366 
1367 	status = relocate_dependencies(sProgramImage);
1368 	if (status < B_OK)
1369 		goto err;
1370 
1371 	// We patch any exported __gRuntimeLoader symbols to point to our private API
1372 	{
1373 		struct Elf32_Sym *symbol = find_symbol_in_loaded_images(
1374 			"__gRuntimeLoader", &image);
1375 		if (symbol != NULL) {
1376 			void **_export = (void **)(symbol->st_value + image->regions[0].delta);
1377 			*_export = &gRuntimeLoader;
1378 		}
1379 	}
1380 
1381 	init_dependencies(sLoadedImages.head, true);
1382 	remap_images();
1383 		// ToDo: once setup_system_time() is fixed, move this one line higher!
1384 
1385 	// Since the images are initialized now, we no longer should use our
1386 	// getenv(), but use the one from libroot.so
1387 	{
1388 		struct Elf32_Sym *symbol = find_symbol_in_loaded_images("getenv",
1389 			&image);
1390 		if (symbol != NULL)
1391 			gGetEnv = (char* (*)(const char*))
1392 				(symbol->st_value + image->regions[0].delta);
1393 	}
1394 
1395 	if (sProgramImage->entry_point == NULL) {
1396 		status = B_NOT_AN_EXECUTABLE;
1397 		goto err;
1398 	}
1399 
1400 	*_entry = (void *)(sProgramImage->entry_point);
1401 
1402 	rld_unlock();
1403 
1404 	KTRACE("rld: load_program(\"%s\") done: entry: %p, id: %ld", path,
1405 		*_entry, sProgramImage->id);
1406 
1407 	return sProgramImage->id;
1408 
1409 err:
1410 	KTRACE("rld: load_program(\"%s\") failed: %s", path, strerror(status));
1411 
1412 	delete_image(sProgramImage);
1413 
1414 	if (report_errors()) {
1415 		// send error message
1416 		sErrorMessage.AddInt32("error", status);
1417 		sErrorMessage.SetDeliveryInfo(gProgramArgs->error_token,
1418 			-1, 0, find_thread(NULL));
1419 
1420 		_kern_write_port_etc(gProgramArgs->error_port, 'KMSG',
1421 			sErrorMessage.Buffer(), sErrorMessage.ContentSize(), 0, 0);
1422 	}
1423 	_kern_loading_app_failed(status);
1424 	rld_unlock();
1425 
1426 	return status;
1427 }
1428 
1429 
1430 image_id
1431 load_library(char const *path, uint32 flags, bool addOn)
1432 {
1433 	image_t *image = NULL;
1434 	image_t *iter;
1435 	image_type type = (addOn ? B_ADD_ON_IMAGE : B_LIBRARY_IMAGE);
1436 	status_t status;
1437 
1438 	if (path == NULL)
1439 		return B_BAD_VALUE;
1440 
1441 	// ToDo: implement flags
1442 	(void)flags;
1443 
1444 	KTRACE("rld: load_library(\"%s\", 0x%lx, %d)", path, flags, addOn);
1445 
1446 	rld_lock();
1447 		// for now, just do stupid simple global locking
1448 
1449 	// have we already loaded this library?
1450 	// Checking it at this stage saves loading its dependencies again
1451 	if (!addOn) {
1452 		image = find_image(path, APP_OR_LIBRARY_TYPE);
1453 		if (image) {
1454 			atomic_add(&image->ref_count, 1);
1455 			rld_unlock();
1456 			KTRACE("rld: load_library(\"%s\"): already loaded: %ld", path,
1457 				image->id);
1458 			return image->id;
1459 		}
1460 	}
1461 
1462 	status = load_container(path, type, NULL, &image);
1463 	if (status < B_OK) {
1464 		rld_unlock();
1465 		KTRACE("rld: load_library(\"%s\") failed to load container: %s", path,
1466 			strerror(status));
1467 		return status;
1468 	}
1469 
1470 	for (iter = sLoadedImages.head; iter; iter = iter->next) {
1471 		status = load_dependencies(iter);
1472 		if (status < B_OK)
1473 			goto err;
1474 	}
1475 
1476 	status = relocate_dependencies(image);
1477 	if (status < B_OK)
1478 		goto err;
1479 
1480 	remap_images();
1481 	init_dependencies(image, true);
1482 
1483 	rld_unlock();
1484 
1485 	KTRACE("rld: load_library(\"%s\") done: id: %ld", path, image->id);
1486 
1487 	return image->id;
1488 
1489 err:
1490 	KTRACE("rld: load_library(\"%s\") failed: %s", path, strerror(status));
1491 
1492 	dequeue_image(&sLoadedImages, image);
1493 	sLoadedImageCount--;
1494 	delete_image(image);
1495 	rld_unlock();
1496 	return status;
1497 }
1498 
1499 
1500 status_t
1501 unload_library(image_id imageID, bool addOn)
1502 {
1503 	status_t status = B_BAD_IMAGE_ID;
1504 	image_t *image;
1505 	image_type type = addOn ? B_ADD_ON_IMAGE : B_LIBRARY_IMAGE;
1506 
1507 	if (imageID < B_OK)
1508 		return B_BAD_IMAGE_ID;
1509 
1510 	rld_lock();
1511 		// for now, just do stupid simple global locking
1512 
1513 	// we only check images that have been already initialized
1514 
1515 	for (image = sLoadedImages.head; image; image = image->next) {
1516 		if (image->id == imageID) {
1517 			// unload image
1518 			if (type == image->type) {
1519 				put_image(image);
1520 				status = B_OK;
1521 			} else
1522 				status = B_BAD_VALUE;
1523 			break;
1524 		}
1525 	}
1526 
1527 	if (status == B_OK) {
1528 		while ((image = sDisposableImages.head) != NULL) {
1529 			// call image fini here...
1530 			if (gRuntimeLoader.call_atexit_hooks_for_range) {
1531 				gRuntimeLoader.call_atexit_hooks_for_range(
1532 					image->regions[0].vmstart, image->regions[0].vmsize);
1533 			}
1534 
1535 			if (image->term_routine)
1536 				((init_term_function)image->term_routine)(image->id);
1537 
1538 			dequeue_image(&sDisposableImages, image);
1539 			unmap_image(image);
1540 
1541 			delete_image(image);
1542 		}
1543 	}
1544 
1545 	rld_unlock();
1546 	return status;
1547 }
1548 
1549 
1550 status_t
1551 get_nth_symbol(image_id imageID, int32 num, char *nameBuffer, int32 *_nameLength,
1552 	int32 *_type, void **_location)
1553 {
1554 	int32 count = 0, j;
1555 	uint32 i;
1556 	image_t *image;
1557 
1558 	rld_lock();
1559 
1560 	// get the image from those who have been already initialized
1561 	image = find_loaded_image_by_id(imageID);
1562 	if (image == NULL) {
1563 		rld_unlock();
1564 		return B_BAD_IMAGE_ID;
1565 	}
1566 
1567 	// iterate through all the hash buckets until we've found the one
1568 	for (i = 0; i < HASHTABSIZE(image); i++) {
1569 		for (j = HASHBUCKETS(image)[i]; j != STN_UNDEF; j = HASHCHAINS(image)[j]) {
1570 			struct Elf32_Sym *symbol = &image->syms[i];
1571 
1572 			if (count == num) {
1573 				strlcpy(nameBuffer, SYMNAME(image, symbol), *_nameLength);
1574 				*_nameLength = strlen(SYMNAME(image, symbol));
1575 
1576 				if (_type != NULL) {
1577 					// ToDo: check with the return types of that BeOS function
1578 					if (ELF32_ST_TYPE(symbol->st_info) == STT_FUNC)
1579 						*_type = B_SYMBOL_TYPE_TEXT;
1580 					else if (ELF32_ST_TYPE(symbol->st_info) == STT_OBJECT)
1581 						*_type = B_SYMBOL_TYPE_DATA;
1582 					else
1583 						*_type = B_SYMBOL_TYPE_ANY;
1584 				}
1585 
1586 				if (_location != NULL)
1587 					*_location = (void *)(symbol->st_value + image->regions[0].delta);
1588 				goto out;
1589 			}
1590 			count++;
1591 		}
1592 	}
1593 out:
1594 	rld_unlock();
1595 
1596 	if (num != count)
1597 		return B_BAD_INDEX;
1598 
1599 	return B_OK;
1600 }
1601 
1602 
1603 status_t
1604 get_symbol(image_id imageID, char const *symbolName, int32 symbolType, void **_location)
1605 {
1606 	status_t status = B_OK;
1607 	image_t *image;
1608 
1609 	if (imageID < B_OK)
1610 		return B_BAD_IMAGE_ID;
1611 	if (symbolName == NULL)
1612 		return B_BAD_VALUE;
1613 
1614 	rld_lock();
1615 		// for now, just do stupid simple global locking
1616 
1617 	// get the image from those who have been already initialized
1618 	image = find_loaded_image_by_id(imageID);
1619 	if (image != NULL) {
1620 		struct Elf32_Sym *symbol;
1621 
1622 		// get the symbol in the image
1623 		symbol = find_symbol(image, symbolName, symbolType);
1624 		if (symbol) {
1625 			if (_location != NULL)
1626 				*_location = (void *)(symbol->st_value + image->regions[0].delta);
1627 		} else
1628 			status = B_ENTRY_NOT_FOUND;
1629 	} else
1630 		status = B_BAD_IMAGE_ID;
1631 
1632 	rld_unlock();
1633 	return status;
1634 }
1635 
1636 
1637 status_t
1638 get_next_image_dependency(image_id id, uint32 *cookie, const char **_name)
1639 {
1640 	uint32 i, j, searchIndex = *cookie;
1641 	struct Elf32_Dyn *dynamicSection;
1642 	image_t *image;
1643 
1644 	if (_name == NULL)
1645 		return B_BAD_VALUE;
1646 
1647 	rld_lock();
1648 
1649 	image = find_loaded_image_by_id(id);
1650 	if (image == NULL) {
1651 		rld_unlock();
1652 		return B_BAD_IMAGE_ID;
1653 	}
1654 
1655 	dynamicSection = (struct Elf32_Dyn *)image->dynamic_ptr;
1656 	if (dynamicSection == NULL || image->num_needed <= searchIndex) {
1657 		rld_unlock();
1658 		return B_ENTRY_NOT_FOUND;
1659 	}
1660 
1661 	for (i = 0, j = 0; dynamicSection[i].d_tag != DT_NULL; i++) {
1662 		if (dynamicSection[i].d_tag != DT_NEEDED)
1663 			continue;
1664 
1665 		if (j++ == searchIndex) {
1666 			int32 neededOffset = dynamicSection[i].d_un.d_val;
1667 
1668 			*_name = STRING(image, neededOffset);
1669 			*cookie = searchIndex + 1;
1670 			rld_unlock();
1671 			return B_OK;
1672 		}
1673 	}
1674 
1675 	rld_unlock();
1676 	return B_ENTRY_NOT_FOUND;
1677 }
1678 
1679 
1680 //	#pragma mark - runtime_loader private exports
1681 
1682 
1683 /** Read and verify the ELF header */
1684 
1685 status_t
1686 elf_verify_header(void *header, int32 length)
1687 {
1688 	int32 programSize, sectionSize;
1689 
1690 	if (length < (int32)sizeof(struct Elf32_Ehdr))
1691 		return B_NOT_AN_EXECUTABLE;
1692 
1693 	return parse_elf_header((struct Elf32_Ehdr *)header, &programSize, &sectionSize);
1694 }
1695 
1696 
1697 void
1698 terminate_program(void)
1699 {
1700 	image_t **termList;
1701 	ssize_t count, i;
1702 
1703 	count = get_sorted_image_list(sProgramImage, &termList, RFLAG_TERMINATED);
1704 	if (count < B_OK)
1705 		return;
1706 
1707 	TRACE(("%ld: terminate dependencies\n", find_thread(NULL)));
1708 	for (i = count; i-- > 0;) {
1709 		image_t *image = termList[i];
1710 
1711 		TRACE(("%ld:  term: %s\n", find_thread(NULL), image->name));
1712 
1713 		if (image->term_routine)
1714 			((init_term_function)image->term_routine)(image->id);
1715 	}
1716 	TRACE(("%ld:  term done.\n", find_thread(NULL)));
1717 
1718 	free(termList);
1719 }
1720 
1721 
1722 void
1723 rldelf_init(void)
1724 {
1725 	rld_sem = create_sem(1, "rld_lock");
1726 	rld_sem_owner = -1;
1727 	rld_sem_count = 0;
1728 
1729 	// create the debug area
1730 	{
1731 		int32 size = TO_PAGE_SIZE(sizeof(runtime_loader_debug_area));
1732 
1733 		runtime_loader_debug_area *area;
1734 		area_id areaID = _kern_create_area(RUNTIME_LOADER_DEBUG_AREA_NAME,
1735 			(void **)&area, B_ANY_ADDRESS, size, B_NO_LOCK,
1736 			B_READ_AREA | B_WRITE_AREA);
1737 		if (areaID < B_OK) {
1738 			FATAL("Failed to create debug area.\n");
1739 			_kern_loading_app_failed(areaID);
1740 		}
1741 
1742 		area->loaded_images = &sLoadedImages;
1743 	}
1744 
1745 	// initialize error message if needed
1746 	if (report_errors()) {
1747 		void *buffer = malloc(1024);
1748 		if (buffer == NULL)
1749 			return;
1750 
1751 		sErrorMessage.SetTo(buffer, 1024, 'Rler');
1752 	}
1753 }
1754 
1755 
1756 status_t
1757 elf_reinit_after_fork()
1758 {
1759 	rld_sem = create_sem(1, "rld_lock");
1760 	if (rld_sem < 0)
1761 		return rld_sem;
1762 
1763 	return B_OK;
1764 }
1765