xref: /haiku/src/system/libroot/posix/malloc/hoard2/wrapper.cpp (revision 9a6a20d4689307142a7ed26a1437ba47e244e73f)
1 /*
2  * Copyright 2002-2007, Haiku Inc.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 /* Hoard: A Fast, Scalable, and Memory-Efficient Allocator
7  * 		for Shared-Memory Multiprocessors
8  * Contact author: Emery Berger, http://www.cs.utexas.edu/users/emery
9  *
10  * Copyright (c) 1998-2000, The University of Texas at Austin.
11  *
12  * This library is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU Library General Public License as
14  * published by the Free Software Foundation, http://www.fsf.org.
15  *
16  * This library is distributed in the hope that it will be useful, but
17  * WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * Library General Public License for more details.
20  */
21 
22 #include "config.h"
23 #include "threadheap.h"
24 #include "processheap.h"
25 #include "arch-specific.h"
26 
27 #include <image.h>
28 
29 #include <errno.h>
30 #include <string.h>
31 
32 #include <errno_private.h>
33 #include <user_thread.h>
34 
35 #include "tracing_config.h"
36 
37 using namespace BPrivate;
38 
39 
40 #if USER_MALLOC_TRACING
41 #	define KTRACE(format...)	ktrace_printf(format)
42 #else
43 #	define KTRACE(format...)	do {} while (false)
44 #endif
45 
46 
47 #if HEAP_LEAK_CHECK
48 static block* sUsedList = NULL;
49 static hoardLockType sUsedLock = MUTEX_INITIALIZER("");
50 
51 
52 /*!
53 	Finds the closest symbol that comes before the given address.
54 */
55 static status_t
56 get_symbol_for_address(void* address, char *imageBuffer, size_t imageBufferSize,
57 	char* buffer, size_t bufferSize, int32& offset)
58 {
59 	offset = -1;
60 
61 	image_info info;
62 	int32 cookie = 0;
63 	while (get_next_image_info(0, &cookie, &info) == B_OK) {
64 		if (((addr_t)info.text > (addr_t)address
65 				|| (addr_t)info.text + info.text_size < (addr_t)address)
66 			&& ((addr_t)info.data > (addr_t)address
67 				|| (addr_t)info.data + info.data_size < (addr_t)address))
68 			continue;
69 
70 		char name[256];
71 		int32 index = 0;
72 		int32 nameLength = sizeof(name);
73 		int32 symbolType;
74 		void* location;
75 		while (get_nth_image_symbol(info.id, index, name, &nameLength,
76 				&symbolType, &location) == B_OK) {
77 			if ((addr_t)address >= (addr_t)location) {
78 				// see if this is better than what we have
79 				int32 newOffset = (addr_t)address - (addr_t)location;
80 
81 				if (offset == -1 || offset > newOffset) {
82 					const char* imageName = strrchr(info.name, '/');
83 					if (imageName != NULL)
84 						strlcpy(imageBuffer, imageName + 1, imageBufferSize);
85 					else
86 						strlcpy(imageBuffer, info.name, imageBufferSize);
87 
88 					strlcpy(buffer, name, bufferSize);
89 					offset = newOffset;
90 				}
91 			}
92 
93 			nameLength = sizeof(name);
94 			index++;
95 		}
96 	}
97 
98 	return offset != -1 ? B_OK : B_ENTRY_NOT_FOUND;
99 }
100 
101 
102 static void
103 dump_block(block* b)
104 {
105 	printf("  %p, %ld bytes: call stack", b + 1, b->getAllocatedSize());
106 
107 	for (int i = 0; i < HEAP_CALL_STACK_SIZE; i++) {
108 		if (b->getCallStack(i) != NULL) {
109 			char image[256];
110 			char name[256];
111 			int32 offset;
112 			if (get_symbol_for_address(b->getCallStack(i), image, sizeof(image),
113 					name, sizeof(name), offset) != B_OK) {
114 				strcpy(name, "???");
115 				offset = 0;
116 			}
117 
118 			printf(": %p (%s:%s+0x%lx)", b->getCallStack(i), image, name, offset);
119 		}
120 	}
121 	putchar('\n');
122 }
123 
124 
125 extern "C" void __dump_allocated(void);
126 
127 extern "C" void
128 __dump_allocated(void)
129 {
130 	hoardLock(sUsedLock);
131 
132 	puts("allocated:\n");
133 
134 	block* b = sUsedList;
135 	while (b != NULL) {
136 		dump_block(b);
137 
138 		b = b->getNext();
139 	}
140 
141 	hoardUnlock(sUsedLock);
142 }
143 
144 
145 static void
146 add_address(void* address, size_t size)
147 {
148 	block *b = (block *)address - 1;
149 
150 #ifdef __i386__
151 	// set call stack
152 	struct stack_frame {
153 		struct stack_frame*	previous;
154 		void*				return_address;
155 	};
156 
157 	stack_frame* frame = (stack_frame*)get_stack_frame();
158 
159 	for (int i = 0; i < HEAP_CALL_STACK_SIZE; i++) {
160 		if (frame != NULL) {
161 			b->setCallStack(i, frame->return_address);
162 			frame = frame->previous;
163 		} else
164 			b->setCallStack(i, NULL);
165 	}
166 
167 	b->setAllocatedSize(size);
168 #endif
169 
170 	hoardLock(sUsedLock);
171 
172 	b->setNext(sUsedList);
173 	sUsedList = b;
174 
175 	hoardUnlock(sUsedLock);
176 }
177 
178 
179 static void
180 remove_address(void* address)
181 {
182 	block* b = (block *)address - 1;
183 	hoardLock(sUsedLock);
184 
185 	if (sUsedList == b) {
186 		// we're lucky, it's the first block in the list
187 		sUsedList = b->getNext();
188 	} else {
189 		// search for block in the used list (very slow!)
190 		block* last = sUsedList;
191 		while (last != NULL && last->getNext() != b) {
192 			last = last->getNext();
193 		}
194 
195 		if (last == NULL) {
196 			printf("freed block not in used list!\n");
197 			dump_block(b);
198 		} else
199 			last->setNext(b->getNext());
200 	}
201 
202 	hoardUnlock(sUsedLock);
203 }
204 
205 #endif	// HEAP_LEAK_CHECK
206 
207 #if HEAP_WALL
208 
209 static void*
210 set_wall(void* addr, size_t size)
211 {
212 	size_t *start = (size_t*)addr;
213 
214 	start[0] = size;
215 	memset(start + 1, 0x88, HEAP_WALL_SIZE - sizeof(size_t));
216 	memset((uint8*)addr + size - HEAP_WALL_SIZE, 0x66, HEAP_WALL_SIZE);
217 
218 	return (uint8*)addr + HEAP_WALL_SIZE;
219 }
220 
221 
222 static void*
223 check_wall(uint8* buffer)
224 {
225 	buffer -= HEAP_WALL_SIZE;
226 	size_t size = *(size_t*)buffer;
227 
228 	if (threadHeap::objectSize(buffer) < size)
229 		debugger("invalid size");
230 
231 	for (size_t i = 0; i < HEAP_WALL_SIZE; i++) {
232 		if (i >= sizeof(size_t) && buffer[i] != 0x88) {
233 			debug_printf("allocation %p, size %ld front wall clobbered at byte %ld.\n",
234 				buffer + HEAP_WALL_SIZE, size - 2 * HEAP_WALL_SIZE, i);
235 			debugger("front wall clobbered");
236 		}
237 		if (buffer[i + size - HEAP_WALL_SIZE] != 0x66) {
238 			debug_printf("allocation %p, size %ld back wall clobbered at byte %ld.\n",
239 				buffer + HEAP_WALL_SIZE, size - 2 * HEAP_WALL_SIZE, i);
240 			debugger("back wall clobbered");
241 		}
242 	}
243 
244 	return buffer;
245 }
246 
247 #endif	// HEAP_WALL
248 
249 inline static processHeap *
250 getAllocator(void)
251 {
252 	static char *buffer = (char *)hoardSbrk(sizeof(processHeap));
253 	static processHeap *theAllocator = new (buffer) processHeap();
254 
255 	return theAllocator;
256 }
257 
258 
259 extern "C" void
260 __heap_before_fork(void)
261 {
262 	static processHeap *pHeap = getAllocator();
263 	for (int i = 0; i < pHeap->getMaxThreadHeaps(); i++)
264 		pHeap->getHeap(i).lock();
265 
266 	static_cast<hoardHeap*>(pHeap)->lock();
267 }
268 
269 void __init_after_fork(void);
270 
271 extern "C" void
272 __heap_after_fork_child(void)
273 {
274 	__init_after_fork();
275 	static processHeap *pHeap = getAllocator();
276 	for (int i = 0; i < pHeap->getMaxThreadHeaps(); i++)
277 		pHeap->getHeap(i).initLock();
278 
279 	pHeap->initLock();
280 }
281 
282 
283 extern "C" void
284 __heap_after_fork_parent(void)
285 {
286 	static processHeap *pHeap = getAllocator();
287 	for (int i = 0; i < pHeap->getMaxThreadHeaps(); i++)
288 		pHeap->getHeap(i).unlock();
289 
290 	static_cast<hoardHeap*>(pHeap)->unlock();
291 }
292 
293 
294 extern "C" void
295 __heap_thread_init(void)
296 {
297 }
298 
299 
300 extern "C" void
301 __heap_thread_exit(void)
302 {
303 }
304 
305 
306 //	#pragma mark - public functions
307 
308 
309 extern "C" void *
310 malloc(size_t size)
311 {
312 	static processHeap *pHeap = getAllocator();
313 
314 #if HEAP_WALL
315 	size += 2 * HEAP_WALL_SIZE;
316 #endif
317 
318 	defer_signals();
319 
320 	void *addr = pHeap->getHeap(pHeap->getHeapIndex()).malloc(size);
321 	if (addr == NULL) {
322 		undefer_signals();
323 		__set_errno(B_NO_MEMORY);
324 		KTRACE("malloc(%lu) -> NULL", size);
325 		return NULL;
326 	}
327 
328 #if HEAP_LEAK_CHECK
329 	add_address(addr, size);
330 #endif
331 
332 	undefer_signals();
333 
334 #if HEAP_WALL
335 	addr = set_wall(addr, size);
336 #endif
337 
338 	KTRACE("malloc(%lu) -> %p", size, addr);
339 
340 	return addr;
341 }
342 
343 
344 extern "C" void *
345 calloc(size_t nelem, size_t elsize)
346 {
347 	static processHeap *pHeap = getAllocator();
348 	size_t size = nelem * elsize;
349 	void *ptr = NULL;
350 
351 	if ((nelem > 0) && ((size/nelem) != elsize))
352 		goto nomem;
353 
354 #if HEAP_WALL
355 	size += 2 * HEAP_WALL_SIZE;
356 
357 	if (nelem == 0 || elsize == 0)
358 		goto ok;
359 	if (size < (nelem * size)&& size < (elsize * size))
360 		goto nomem;
361 
362 ok:
363 #endif
364 
365 	defer_signals();
366 
367 	ptr = pHeap->getHeap(pHeap->getHeapIndex()).malloc(size);
368 	if (ptr == NULL) {
369 		undefer_signals();
370 	nomem:
371 		__set_errno(B_NO_MEMORY);
372 		KTRACE("calloc(%lu, %lu) -> NULL", nelem, elsize);
373 		return NULL;
374 	}
375 
376 #if HEAP_LEAK_CHECK
377 	add_address(ptr, size);
378 #endif
379 
380 	undefer_signals();
381 
382 #if HEAP_WALL
383 	ptr = set_wall(ptr, size);
384 	size -= 2 * HEAP_WALL_SIZE;
385 #endif
386 
387 	// Zero out the malloc'd block.
388 	memset(ptr, 0, size);
389 	KTRACE("calloc(%lu, %lu) -> %p", nelem, elsize, ptr);
390 	return ptr;
391 }
392 
393 
394 extern "C" void
395 free(void *ptr)
396 {
397 	static processHeap *pHeap = getAllocator();
398 
399 #if HEAP_WALL
400 	if (ptr == NULL)
401 		return;
402 	KTRACE("free(%p)", ptr);
403 	ptr = check_wall((uint8*)ptr);
404 #else
405 	KTRACE("free(%p)", ptr);
406 #endif
407 
408 	defer_signals();
409 
410 #if HEAP_LEAK_CHECK
411 	if (ptr != NULL)
412 		remove_address(ptr);
413 #endif
414 	pHeap->free(ptr);
415 
416 	undefer_signals();
417 }
418 
419 
420 extern "C" void *
421 memalign(size_t alignment, size_t size)
422 {
423 	static processHeap *pHeap = getAllocator();
424 
425 #if HEAP_WALL
426 	debug_printf("memalign() is not yet supported by the wall code.\n");
427 	return NULL;
428 #endif
429 
430 	defer_signals();
431 
432 	void *addr = pHeap->getHeap(pHeap->getHeapIndex()).memalign(alignment,
433 		size);
434 	if (addr == NULL) {
435 		undefer_signals();
436 		__set_errno(B_NO_MEMORY);
437 		KTRACE("memalign(%lu, %lu) -> NULL", alignment, size);
438 		return NULL;
439 	}
440 
441 #if HEAP_LEAK_CHECK
442 	add_address(addr, size);
443 #endif
444 
445 	undefer_signals();
446 
447 	KTRACE("memalign(%lu, %lu) -> %p", alignment, size, addr);
448 	return addr;
449 }
450 
451 
452 extern "C" void *
453 aligned_alloc(size_t alignment, size_t size)
454 {
455 	if (size % alignment != 0) {
456 		__set_errno(B_BAD_VALUE);
457 		return NULL;
458 	}
459 	return memalign(alignment, size);
460 }
461 
462 
463 extern "C" int
464 posix_memalign(void **_pointer, size_t alignment, size_t size)
465 {
466 	if ((alignment & (sizeof(void *) - 1)) != 0 || _pointer == NULL)
467 		return B_BAD_VALUE;
468 
469 #if HEAP_WALL
470 	debug_printf("posix_memalign() is not yet supported by the wall code.\n");
471 	return -1;
472 #endif
473 	static processHeap *pHeap = getAllocator();
474 	defer_signals();
475 	void *pointer = pHeap->getHeap(pHeap->getHeapIndex()).memalign(alignment,
476 		size);
477 	if (pointer == NULL) {
478 		undefer_signals();
479 		KTRACE("posix_memalign(%p, %lu, %lu) -> NULL", _pointer, alignment,
480 			size);
481 		return B_NO_MEMORY;
482 	}
483 
484 #if HEAP_LEAK_CHECK
485 	add_address(pointer, size);
486 #endif
487 
488 	undefer_signals();
489 
490 	*_pointer = pointer;
491 	KTRACE("posix_memalign(%p, %lu, %lu) -> %p", _pointer, alignment, size,
492 		pointer);
493 	return 0;
494 }
495 
496 
497 extern "C" void *
498 valloc(size_t size)
499 {
500 	return memalign(B_PAGE_SIZE, size);
501 }
502 
503 
504 extern "C" void *
505 realloc(void *ptr, size_t size)
506 {
507 	if (ptr == NULL)
508 		return malloc(size);
509 
510 	if (size == 0) {
511 		free(ptr);
512 		return NULL;
513 	}
514 
515 	// If the existing object can hold the new size,
516 	// just return it.
517 
518 #if HEAP_WALL
519 	size += 2 * HEAP_WALL_SIZE;
520 	ptr = (uint8*)ptr - HEAP_WALL_SIZE;
521 #endif
522 
523 	size_t objSize = threadHeap::objectSize(ptr);
524 	if (objSize >= size) {
525 #if HEAP_WALL
526 		check_wall((uint8*)ptr + HEAP_WALL_SIZE);
527 		ptr = set_wall(ptr, size);
528 #endif
529 		KTRACE("realloc(%p, %lu) -> %p", ptr, size, ptr);
530 		return ptr;
531 	}
532 
533 #if HEAP_WALL
534 	size -= 2 * HEAP_WALL_SIZE;
535 	objSize -= 2 * HEAP_WALL_SIZE;
536 	ptr = (uint8*)ptr + HEAP_WALL_SIZE;
537 #endif
538 
539 	// Allocate a new block of size sz.
540 	void *buffer = malloc(size);
541 	if (buffer == NULL) {
542 		// Allocation failed, leave old block and return
543 		__set_errno(B_NO_MEMORY);
544 		KTRACE("realloc(%p, %lu) -> NULL", ptr, size);
545 		return NULL;
546 	}
547 
548 	// Copy the contents of the original object
549 	// up to the size of the new block.
550 
551 	size_t minSize = (objSize < size) ? objSize : size;
552 	memcpy(buffer, ptr, minSize);
553 
554 	// Free the old block.
555 	free(ptr);
556 
557 	// Return a pointer to the new one.
558 	KTRACE("realloc(%p, %lu) -> %p", ptr, size, buffer);
559 	return buffer;
560 }
561 
562 
563 extern "C" size_t
564 malloc_usable_size(void *ptr)
565 {
566 	if (ptr == NULL)
567 		return 0;
568 	return threadHeap::objectSize(ptr);
569 }
570 
571 
572 //	#pragma mark - BeOS specific extensions
573 
574 
575 struct mstats {
576 	size_t bytes_total;
577 	size_t chunks_used;
578 	size_t bytes_used;
579 	size_t chunks_free;
580 	size_t bytes_free;
581 };
582 
583 
584 extern "C" struct mstats mstats(void);
585 
586 extern "C" struct mstats
587 mstats(void)
588 {
589 	// Note, the stats structure is not thread-safe, but it doesn't
590 	// matter that much either
591 	processHeap *heap = getAllocator();
592 	static struct mstats stats;
593 
594 	int allocated = 0;
595 	int used = 0;
596 	int chunks = 0;
597 
598 	for (int i = 0; i < hoardHeap::SIZE_CLASSES; i++) {
599 		int classUsed, classAllocated;
600 		heap->getStats(i, classUsed, classAllocated);
601 
602 		if (classUsed > 0)
603 			chunks++;
604 
605 		allocated += classAllocated;
606 		used += classUsed;
607 	}
608 
609 	stats.bytes_total = allocated;
610 	stats.chunks_used = chunks;
611 	stats.bytes_used = used;
612 	stats.chunks_free = hoardHeap::SIZE_CLASSES - chunks;
613 	stats.bytes_free = allocated - used;
614 
615 	return stats;
616 }
617 
618