xref: /haiku/src/system/kernel/heap.cpp (revision b6b0567fbd186f8ce8a0c90bdc7a7b5b4c649678)
1 /*
2  * Copyright 2008, Michael Lotz, mmlr@mlotz.ch.
3  * Distributed under the terms of the MIT License.
4  *
5  * Copyright 2002-2006, Axel Dörfler, axeld@pinc-software.de.
6  * Distributed under the terms of the MIT License.
7  *
8  * Copyright 2001, Travis Geiselbrecht. All rights reserved.
9  * Distributed under the terms of the NewOS License.
10  */
11 
12 #include <arch/debug.h>
13 #include <debug.h>
14 #include <elf.h>
15 #include <heap.h>
16 #include <int.h>
17 #include <kernel.h>
18 #include <lock.h>
19 #include <malloc.h>
20 #include <signal.h>
21 #include <string.h>
22 #include <team.h>
23 #include <thread.h>
24 #include <tracing.h>
25 #include <util/AutoLock.h>
26 #include <vm.h>
27 
28 
29 //#define TRACE_HEAP
30 #ifdef TRACE_HEAP
31 #	define TRACE(x) dprintf x
32 #else
33 #	define TRACE(x) ;
34 #endif
35 
36 
37 #if KERNEL_HEAP_LEAK_CHECK
38 typedef struct heap_leak_check_info_s {
39 	addr_t		caller;
40 	size_t		size;
41 	thread_id	thread;
42 	team_id		team;
43 } heap_leak_check_info;
44 
45 struct caller_info {
46 	addr_t		caller;
47 	uint32		count;
48 	uint32		size;
49 };
50 
51 static const int32 kCallerInfoTableSize = 1024;
52 static caller_info sCallerInfoTable[kCallerInfoTableSize];
53 static int32 sCallerInfoCount = 0;
54 #endif
55 
56 typedef struct heap_page_s heap_page;
57 
58 typedef struct heap_area_s {
59 	area_id			area;
60 
61 	addr_t			base;
62 	size_t			size;
63 
64 	uint32			page_count;
65 	uint32			free_page_count;
66 
67 	heap_page *		free_pages;
68 	heap_page *		page_table;
69 
70 	heap_area_s *	prev;
71 	heap_area_s *	next;
72 	heap_area_s *	all_next;
73 } heap_area;
74 
75 typedef struct heap_page_s {
76 	heap_area *		area;
77 	uint16			index;
78 	uint16			bin_index : 5;
79 	uint16			free_count : 10;
80 	uint16			in_use : 1;
81 	heap_page_s *	next;
82 	heap_page_s *	prev;
83 	union {
84 		uint16			empty_index;
85 		uint16			allocation_id; // used for bin == bin_count allocations
86 	};
87 	addr_t *		free_list;
88 } heap_page;
89 
90 typedef struct heap_bin_s {
91 	mutex		lock;
92 	uint32		element_size;
93 	uint16		max_free_count;
94 	heap_page *	page_list; // sorted so that the desired page is always first
95 } heap_bin;
96 
97 struct heap_allocator_s {
98 	rw_lock		area_lock;
99 	mutex		page_lock;
100 
101 	const char *name;
102 	uint32		bin_count;
103 	uint32		page_size;
104 
105 	uint32		total_pages;
106 	uint32		total_free_pages;
107 	uint32		empty_areas;
108 
109 #if KERNEL_HEAP_LEAK_CHECK
110 	addr_t		(*get_caller)();
111 #endif
112 
113 	heap_bin *	bins;
114 	heap_area *	areas; // sorted so that the desired area is always first
115 	heap_area *	all_areas; // all areas including full ones
116 };
117 
118 static const uint32 kAreaAllocationMagic = 'AAMG';
119 typedef struct area_allocation_info_s {
120 	area_id		area;
121 	void *		base;
122 	uint32		magic;
123 	size_t		size;
124 	size_t		allocation_size;
125 	size_t		allocation_alignment;
126 	void *		allocation_base;
127 } area_allocation_info;
128 
129 struct DeferredFreeListEntry : SinglyLinkedListLinkImpl<DeferredFreeListEntry> {
130 };
131 
132 typedef SinglyLinkedList<DeferredFreeListEntry> DeferredFreeList;
133 typedef SinglyLinkedList<DeferredDeletable> DeferredDeletableList;
134 
135 // Heap class configuration
136 #define HEAP_CLASS_COUNT 3
137 static heap_class sHeapClasses[HEAP_CLASS_COUNT] = {
138 	{
139 		"small",					/* name */
140 		50,							/* initial percentage */
141 		B_PAGE_SIZE / 8,			/* max allocation size */
142 		B_PAGE_SIZE,				/* page size */
143 		8,							/* min bin size */
144 		4,							/* bin alignment */
145 		8,							/* min count per page */
146 		16							/* max waste per page */
147 	},
148 	{
149 		"medium",					/* name */
150 		30,							/* initial percentage */
151 		B_PAGE_SIZE * 2,			/* max allocation size */
152 		B_PAGE_SIZE * 8,			/* page size */
153 		B_PAGE_SIZE / 8,			/* min bin size */
154 		32,							/* bin alignment */
155 		4,							/* min count per page */
156 		64							/* max waste per page */
157 	},
158 	{
159 		"large",					/* name */
160 		20,							/* initial percentage */
161 		HEAP_AREA_USE_THRESHOLD,	/* max allocation size */
162 		B_PAGE_SIZE * 16,			/* page size */
163 		B_PAGE_SIZE * 2,			/* min bin size */
164 		128,						/* bin alignment */
165 		1,							/* min count per page */
166 		256							/* max waste per page */
167 	}
168 };
169 
170 static heap_allocator *sHeaps[HEAP_CLASS_COUNT];
171 static uint32 *sLastGrowRequest[HEAP_CLASS_COUNT];
172 static uint32 *sLastHandledGrowRequest[HEAP_CLASS_COUNT];
173 static heap_allocator *sGrowHeap = NULL;
174 static thread_id sHeapGrowThread = -1;
175 static sem_id sHeapGrowSem = -1;
176 static sem_id sHeapGrownNotify = -1;
177 static bool sAddGrowHeap = false;
178 
179 static DeferredFreeList sDeferredFreeList;
180 static DeferredDeletableList sDeferredDeletableList;
181 static spinlock sDeferredFreeListLock;
182 
183 
184 
185 // #pragma mark - Tracing
186 
187 #if KERNEL_HEAP_TRACING
188 namespace KernelHeapTracing {
189 
190 class Allocate : public AbstractTraceEntry {
191 	public:
192 		Allocate(addr_t address, size_t size)
193 			:	fAddress(address),
194 				fSize(size)
195 		{
196 			Initialized();
197 		}
198 
199 		virtual void AddDump(TraceOutput &out)
200 		{
201 			out.Print("heap allocate: 0x%08lx (%lu bytes)", fAddress, fSize);
202 		}
203 
204 	private:
205 		addr_t	fAddress;
206 		size_t	fSize;
207 };
208 
209 
210 class Reallocate : public AbstractTraceEntry {
211 	public:
212 		Reallocate(addr_t oldAddress, addr_t newAddress, size_t newSize)
213 			:	fOldAddress(oldAddress),
214 				fNewAddress(newAddress),
215 				fNewSize(newSize)
216 		{
217 			Initialized();
218 		};
219 
220 		virtual void AddDump(TraceOutput &out)
221 		{
222 			out.Print("heap reallocate: 0x%08lx -> 0x%08lx (%lu bytes)",
223 				fOldAddress, fNewAddress, fNewSize);
224 		}
225 
226 	private:
227 		addr_t	fOldAddress;
228 		addr_t	fNewAddress;
229 		size_t	fNewSize;
230 };
231 
232 
233 class Free : public AbstractTraceEntry {
234 	public:
235 		Free(addr_t address)
236 			:	fAddress(address)
237 		{
238 			Initialized();
239 		};
240 
241 		virtual void AddDump(TraceOutput &out)
242 		{
243 			out.Print("heap free: 0x%08lx", fAddress);
244 		}
245 
246 	private:
247 		addr_t	fAddress;
248 };
249 
250 
251 } // namespace KernelHeapTracing
252 
253 #	define T(x)	if (!gKernelStartup) new(std::nothrow) KernelHeapTracing::x;
254 #else
255 #	define T(x)	;
256 #endif
257 
258 
259 // #pragma mark - Debug functions
260 
261 
262 #if KERNEL_HEAP_LEAK_CHECK
263 static addr_t
264 get_caller()
265 {
266 	// Find the first return address outside of the allocator code. Note, that
267 	// this makes certain assumptions about how the code for the functions
268 	// ends up in the kernel object.
269 	addr_t returnAddresses[5];
270 	int32 depth = arch_debug_get_stack_trace(returnAddresses, 5, 0, 1, false);
271 	for (int32 i = 0; i < depth; i++) {
272 		if (returnAddresses[i] < (addr_t)&get_caller
273 			|| returnAddresses[i] > (addr_t)&malloc_referenced_release) {
274 			return returnAddresses[i];
275 		}
276 	}
277 
278 	return 0;
279 }
280 #endif
281 
282 
283 static void
284 dump_page(heap_page *page)
285 {
286 	uint32 count = 0;
287 	for (addr_t *temp = page->free_list; temp != NULL; temp = (addr_t *)*temp)
288 		count++;
289 
290 	kprintf("\t\tpage %p: bin_index: %u; free_count: %u; empty_index: %u; "
291 		"free_list %p (%lu entr%s)\n", page, page->bin_index, page->free_count,
292 		page->empty_index, page->free_list, count, count == 1 ? "y" : "ies");
293 }
294 
295 
296 static void
297 dump_bin(heap_bin *bin)
298 {
299 	kprintf("\telement_size: %lu; max_free_count: %u; page_list %p;\n",
300 		bin->element_size, bin->max_free_count, bin->page_list);
301 
302 	for (heap_page *temp = bin->page_list; temp != NULL; temp = temp->next)
303 		dump_page(temp);
304 }
305 
306 
307 static void
308 dump_bin_list(heap_allocator *heap)
309 {
310 	for (uint32 i = 0; i < heap->bin_count; i++)
311 		dump_bin(&heap->bins[i]);
312 	kprintf("\n");
313 }
314 
315 
316 static void
317 dump_allocator_areas(heap_allocator *heap)
318 {
319 	heap_area *area = heap->all_areas;
320 	while (area) {
321 		kprintf("\tarea %p: area: %ld; base: 0x%08lx; size: %lu; page_count: "
322 			"%lu; free_pages: %p (%lu entr%s)\n", area, area->area, area->base,
323 			area->size, area->page_count, area->free_pages,
324 			area->free_page_count, area->free_page_count == 1 ? "y" : "ies");
325 		area = area->all_next;
326 	}
327 
328 	kprintf("\n");
329 }
330 
331 
332 static void
333 dump_allocator(heap_allocator *heap, bool areas, bool bins)
334 {
335 	kprintf("allocator %p: name: %s; page_size: %lu; bin_count: %lu; pages: "
336 		"%lu; free_pages: %lu; empty_areas: %lu\n", heap, heap->name,
337 		heap->page_size, heap->bin_count, heap->total_pages,
338 		heap->total_free_pages, heap->empty_areas);
339 
340 	if (areas)
341 		dump_allocator_areas(heap);
342 	if (bins)
343 		dump_bin_list(heap);
344 }
345 
346 
347 static int
348 dump_heap_list(int argc, char **argv)
349 {
350 	if (argc == 2) {
351 		uint64 heapAddress;
352 
353 		if (strcmp(argv[1], "grow") == 0) {
354 			// only dump dedicated grow heap info
355 			kprintf("dedicated grow heap:\n");
356 			dump_allocator(sGrowHeap, true, true);
357 		} else if (strcmp(argv[1], "stats") == 0) {
358 			for (uint32 i = 0; i < HEAP_CLASS_COUNT; i++)
359 				dump_allocator(sHeaps[i], false, false);
360 		} else if (evaluate_debug_expression(argv[1], &heapAddress, true)) {
361 			dump_allocator((heap_allocator*)(addr_t)heapAddress, true, true);
362 		} else
363 			print_debugger_command_usage(argv[0]);
364 
365 		return 0;
366 	}
367 
368 	for (uint32 i = 0; i < HEAP_CLASS_COUNT; i++)
369 		dump_allocator(sHeaps[i], true, true);
370 
371 	return 0;
372 }
373 
374 
375 #if !KERNEL_HEAP_LEAK_CHECK
376 
377 static int
378 dump_allocations(int argc, char **argv)
379 {
380 	uint64 heapAddress = 0;
381 	bool statsOnly = false;
382 	for (int32 i = 1; i < argc; i++) {
383 		if (strcmp(argv[i], "stats") == 0)
384 			statsOnly = true;
385 		else if (!evaluate_debug_expression(argv[i], &heapAddress, true)) {
386 			print_debugger_command_usage(argv[0]);
387 			return 0;
388 		}
389 	}
390 
391 	size_t totalSize = 0;
392 	uint32 totalCount = 0;
393 	for (uint32 classIndex = 0; classIndex < HEAP_CLASS_COUNT; classIndex++) {
394 		heap_allocator *heap = sHeaps[classIndex];
395 		if (heapAddress != 0)
396 			heap = (heap_allocator *)(addr_t)heapAddress;
397 
398 		// go through all the pages in all the areas
399 		heap_area *area = heap->all_areas;
400 		while (area) {
401 			for (uint32 i = 0; i < area->page_count; i++) {
402 				heap_page *page = &area->page_table[i];
403 				if (!page->in_use)
404 					continue;
405 
406 				addr_t base = area->base + i * heap->page_size;
407 				if (page->bin_index < heap->bin_count) {
408 					// page is used by a small allocation bin
409 					uint32 elementCount = page->empty_index;
410 					size_t elementSize
411 						= heap->bins[page->bin_index].element_size;
412 					for (uint32 j = 0; j < elementCount;
413 							j++, base += elementSize) {
414 						// walk the free list to see if this element is in use
415 						bool elementInUse = true;
416 						for (addr_t *temp = page->free_list; temp != NULL;
417 								temp = (addr_t *)*temp) {
418 							if ((addr_t)temp == base) {
419 								elementInUse = false;
420 								break;
421 							}
422 						}
423 
424 						if (!elementInUse)
425 							continue;
426 
427 						if (!statsOnly) {
428 							kprintf("address: 0x%08lx; size: %lu bytes\n",
429 								base, elementSize);
430 						}
431 
432 						totalSize += elementSize;
433 						totalCount++;
434 					}
435 				} else {
436 					// page is used by a big allocation, find the page count
437 					uint32 pageCount = 1;
438 					while (i + pageCount < area->page_count
439 						&& area->page_table[i + pageCount].in_use
440 						&& area->page_table[i + pageCount].bin_index
441 							== heap->bin_count
442 						&& area->page_table[i + pageCount].allocation_id
443 							== page->allocation_id)
444 						pageCount++;
445 
446 					size_t size = pageCount * heap->page_size;
447 
448 					if (!statsOnly) {
449 						kprintf("address: 0x%08lx; size: %lu bytes\n", base,
450 							size);
451 					}
452 
453 					totalSize += size;
454 					totalCount++;
455 
456 					// skip the allocated pages
457 					i += pageCount - 1;
458 				}
459 			}
460 
461 			area = area->all_next;
462 		}
463 
464 		if (heapAddress != 0)
465 			break;
466 	}
467 
468 	kprintf("total allocations: %lu; total bytes: %lu\n", totalCount, totalSize);
469 	return 0;
470 }
471 
472 #else // !KERNEL_HEAP_LEAK_CHECK
473 
474 static int
475 dump_allocations(int argc, char **argv)
476 {
477 	team_id team = -1;
478 	thread_id thread = -1;
479 	addr_t caller = 0;
480 	bool statsOnly = false;
481 	for (int32 i = 1; i < argc; i++) {
482 		if (strcmp(argv[i], "team") == 0)
483 			team = strtoul(argv[++i], NULL, 0);
484 		else if (strcmp(argv[i], "thread") == 0)
485 			thread = strtoul(argv[++i], NULL, 0);
486 		else if (strcmp(argv[i], "caller") == 0)
487 			caller = strtoul(argv[++i], NULL, 0);
488 		else if (strcmp(argv[i], "stats") == 0)
489 			statsOnly = true;
490 		else {
491 			print_debugger_command_usage(argv[0]);
492 			return 0;
493 		}
494 	}
495 
496 	size_t totalSize = 0;
497 	uint32 totalCount = 0;
498 	for (uint32 classIndex = 0; classIndex < HEAP_CLASS_COUNT; classIndex++) {
499 		heap_allocator *heap = sHeaps[classIndex];
500 
501 		// go through all the pages in all the areas
502 		heap_area *area = heap->all_areas;
503 		while (area) {
504 			heap_leak_check_info *info = NULL;
505 			for (uint32 i = 0; i < area->page_count; i++) {
506 				heap_page *page = &area->page_table[i];
507 				if (!page->in_use)
508 					continue;
509 
510 				addr_t base = area->base + i * heap->page_size;
511 				if (page->bin_index < heap->bin_count) {
512 					// page is used by a small allocation bin
513 					uint32 elementCount = page->empty_index;
514 					size_t elementSize
515 						= heap->bins[page->bin_index].element_size;
516 					for (uint32 j = 0; j < elementCount;
517 							j++, base += elementSize) {
518 						// walk the free list to see if this element is in use
519 						bool elementInUse = true;
520 						for (addr_t *temp = page->free_list; temp != NULL;
521 								temp = (addr_t *)*temp) {
522 							if ((addr_t)temp == base) {
523 								elementInUse = false;
524 								break;
525 							}
526 						}
527 
528 						if (!elementInUse)
529 							continue;
530 
531 						info = (heap_leak_check_info *)(base + elementSize
532 							- sizeof(heap_leak_check_info));
533 
534 						if ((team == -1 || info->team == team)
535 							&& (thread == -1 || info->thread == thread)
536 							&& (caller == 0 || info->caller == caller)) {
537 							// interesting...
538 							if (!statsOnly) {
539 								kprintf("team: % 6ld; thread: % 6ld; "
540 									"address: 0x%08lx; size: %lu bytes; "
541 									"caller: %#lx\n", info->team, info->thread,
542 									base, info->size, info->caller);
543 							}
544 
545 							totalSize += info->size;
546 							totalCount++;
547 						}
548 					}
549 				} else {
550 					// page is used by a big allocation, find the page count
551 					uint32 pageCount = 1;
552 					while (i + pageCount < area->page_count
553 						&& area->page_table[i + pageCount].in_use
554 						&& area->page_table[i + pageCount].bin_index
555 							== heap->bin_count
556 						&& area->page_table[i + pageCount].allocation_id
557 							== page->allocation_id)
558 						pageCount++;
559 
560 					info = (heap_leak_check_info *)(base + pageCount
561 						* heap->page_size - sizeof(heap_leak_check_info));
562 
563 					if ((team == -1 || info->team == team)
564 						&& (thread == -1 || info->thread == thread)
565 						&& (caller == 0 || info->caller == caller)) {
566 						// interesting...
567 						if (!statsOnly) {
568 							kprintf("team: % 6ld; thread: % 6ld;"
569 								" address: 0x%08lx; size: %lu bytes;"
570 								" caller: %#lx\n", info->team, info->thread,
571 								base, info->size, info->caller);
572 						}
573 
574 						totalSize += info->size;
575 						totalCount++;
576 					}
577 
578 					// skip the allocated pages
579 					i += pageCount - 1;
580 				}
581 			}
582 
583 			area = area->all_next;
584 		}
585 	}
586 
587 	kprintf("total allocations: %lu; total bytes: %lu\n", totalCount, totalSize);
588 	return 0;
589 }
590 
591 
592 static caller_info*
593 get_caller_info(addr_t caller)
594 {
595 	// find the caller info
596 	for (int32 i = 0; i < sCallerInfoCount; i++) {
597 		if (caller == sCallerInfoTable[i].caller)
598 			return &sCallerInfoTable[i];
599 	}
600 
601 	// not found, add a new entry, if there are free slots
602 	if (sCallerInfoCount >= kCallerInfoTableSize)
603 		return NULL;
604 
605 	caller_info* info = &sCallerInfoTable[sCallerInfoCount++];
606 	info->caller = caller;
607 	info->count = 0;
608 	info->size = 0;
609 
610 	return info;
611 }
612 
613 
614 static int
615 caller_info_compare_size(const void* _a, const void* _b)
616 {
617 	const caller_info* a = (const caller_info*)_a;
618 	const caller_info* b = (const caller_info*)_b;
619 	return (int)(b->size - a->size);
620 }
621 
622 
623 static int
624 caller_info_compare_count(const void* _a, const void* _b)
625 {
626 	const caller_info* a = (const caller_info*)_a;
627 	const caller_info* b = (const caller_info*)_b;
628 	return (int)(b->count - a->count);
629 }
630 
631 
632 static bool
633 analyze_allocation_callers(heap_allocator *heap)
634 {
635 	// go through all the pages in all the areas
636 	heap_area *area = heap->all_areas;
637 	while (area) {
638 		heap_leak_check_info *info = NULL;
639 		for (uint32 i = 0; i < area->page_count; i++) {
640 			heap_page *page = &area->page_table[i];
641 			if (!page->in_use)
642 				continue;
643 
644 			addr_t base = area->base + i * heap->page_size;
645 			if (page->bin_index < heap->bin_count) {
646 				// page is used by a small allocation bin
647 				uint32 elementCount = page->empty_index;
648 				size_t elementSize = heap->bins[page->bin_index].element_size;
649 				for (uint32 j = 0; j < elementCount; j++, base += elementSize) {
650 					// walk the free list to see if this element is in use
651 					bool elementInUse = true;
652 					for (addr_t *temp = page->free_list; temp != NULL;
653 						temp = (addr_t *)*temp) {
654 						if ((addr_t)temp == base) {
655 							elementInUse = false;
656 							break;
657 						}
658 					}
659 
660 					if (!elementInUse)
661 						continue;
662 
663 					info = (heap_leak_check_info *)(base + elementSize
664 						- sizeof(heap_leak_check_info));
665 
666 					caller_info *callerInfo = get_caller_info(info->caller);
667 					if (callerInfo == NULL) {
668 						kprintf("out of space for caller infos\n");
669 						return 0;
670 					}
671 
672 					callerInfo->count++;
673 					callerInfo->size += info->size;
674 				}
675 			} else {
676 				// page is used by a big allocation, find the page count
677 				uint32 pageCount = 1;
678 				while (i + pageCount < area->page_count
679 					&& area->page_table[i + pageCount].in_use
680 					&& area->page_table[i + pageCount].bin_index
681 						== heap->bin_count
682 					&& area->page_table[i + pageCount].allocation_id
683 						== page->allocation_id) {
684 					pageCount++;
685 				}
686 
687 				info = (heap_leak_check_info *)(base + pageCount
688 					* heap->page_size - sizeof(heap_leak_check_info));
689 
690 				caller_info *callerInfo = get_caller_info(info->caller);
691 				if (callerInfo == NULL) {
692 					kprintf("out of space for caller infos\n");
693 					return false;
694 				}
695 
696 				callerInfo->count++;
697 				callerInfo->size += info->size;
698 
699 				// skip the allocated pages
700 				i += pageCount - 1;
701 			}
702 		}
703 
704 		area = area->all_next;
705 	}
706 
707 	return true;
708 }
709 
710 
711 static int
712 dump_allocations_per_caller(int argc, char **argv)
713 {
714 	bool sortBySize = true;
715 	heap_allocator *heap = NULL;
716 
717 	for (int32 i = 1; i < argc; i++) {
718 		if (strcmp(argv[i], "-c") == 0) {
719 			sortBySize = false;
720 		} else if (strcmp(argv[i], "-h") == 0) {
721 			uint64 heapAddress;
722 			if (++i >= argc
723 				|| !evaluate_debug_expression(argv[i], &heapAddress, true)) {
724 				print_debugger_command_usage(argv[0]);
725 				return 0;
726 			}
727 
728 			heap = (heap_allocator*)(addr_t)heapAddress;
729 		} else {
730 			print_debugger_command_usage(argv[0]);
731 			return 0;
732 		}
733 	}
734 
735 	sCallerInfoCount = 0;
736 
737 	if (heap != NULL) {
738 		if (!analyze_allocation_callers(heap))
739 			return 0;
740 	} else {
741 		for (uint32 classIndex = 0; classIndex < HEAP_CLASS_COUNT;
742 				classIndex++) {
743 			if (!analyze_allocation_callers(sHeaps[classIndex]))
744 				return 0;
745 		}
746 	}
747 
748 	// sort the array
749 	qsort(sCallerInfoTable, sCallerInfoCount, sizeof(caller_info),
750 		sortBySize ? &caller_info_compare_size : &caller_info_compare_count);
751 
752 	kprintf("%ld different callers, sorted by %s...\n\n", sCallerInfoCount,
753 		sortBySize ? "size" : "count");
754 
755 	kprintf("     count        size      caller\n");
756 	kprintf("----------------------------------\n");
757 	for (int32 i = 0; i < sCallerInfoCount; i++) {
758 		caller_info& info = sCallerInfoTable[i];
759 		kprintf("%10ld  %10ld  %#08lx", info.count, info.size, info.caller);
760 
761 		const char *symbol;
762 		const char *imageName;
763 		bool exactMatch;
764 		addr_t baseAddress;
765 
766 		if (elf_debug_lookup_symbol_address(info.caller, &baseAddress, &symbol,
767 				&imageName, &exactMatch) == B_OK) {
768 			kprintf("  %s + 0x%lx (%s)%s\n", symbol,
769 				info.caller - baseAddress, imageName,
770 				exactMatch ? "" : " (nearest)");
771 		} else
772 			kprintf("\n");
773 	}
774 
775 	return 0;
776 }
777 
778 #endif // KERNEL_HEAP_LEAK_CHECK
779 
780 
781 #if PARANOID_HEAP_VALIDATION
782 static void
783 heap_validate_heap(heap_allocator *heap)
784 {
785 	ReadLocker areaReadLocker(heap->area_lock);
786 	for (uint32 i = 0; i < heap->bin_count; i++)
787 		mutex_lock(&heap->bins[i].lock);
788 	MutexLocker pageLocker(heap->page_lock);
789 
790 	uint32 totalPageCount = 0;
791 	uint32 totalFreePageCount = 0;
792 	heap_area *area = heap->all_areas;
793 	while (area != NULL) {
794 		// validate the free pages list
795 		uint32 freePageCount = 0;
796 		heap_page *lastPage = NULL;
797 		heap_page *page = area->free_pages;
798 		while (page) {
799 			if ((addr_t)page < (addr_t)&area->page_table[0]
800 				|| (addr_t)page >= (addr_t)&area->page_table[area->page_count])
801 				panic("free page is not part of the page table\n");
802 
803 			if (page->index >= area->page_count)
804 				panic("free page has invalid index\n");
805 
806 			if ((addr_t)&area->page_table[page->index] != (addr_t)page)
807 				panic("free page index does not lead to target page\n");
808 
809 			if (page->prev != lastPage)
810 				panic("free page entry has invalid prev link\n");
811 
812 			if (page->in_use)
813 				panic("free page marked as in use\n");
814 
815 			lastPage = page;
816 			page = page->next;
817 			freePageCount++;
818 		}
819 
820 		totalPageCount += freePageCount;
821 		totalFreePageCount += freePageCount;
822 		if (area->free_page_count != freePageCount)
823 			panic("free page count doesn't match free page list\n");
824 
825 		// validate the page table
826 		uint32 usedPageCount = 0;
827 		for (uint32 i = 0; i < area->page_count; i++) {
828 			if (area->page_table[i].in_use)
829 				usedPageCount++;
830 		}
831 
832 		totalPageCount += usedPageCount;
833 		if (freePageCount + usedPageCount != area->page_count) {
834 			panic("free pages and used pages do not add up (%lu + %lu != %lu)\n",
835 				freePageCount, usedPageCount, area->page_count);
836 		}
837 
838 		area = area->all_next;
839 	}
840 
841 	// validate the areas
842 	area = heap->areas;
843 	heap_area *lastArea = NULL;
844 	uint32 lastFreeCount = 0;
845 	while (area != NULL) {
846 		if (area->free_page_count < lastFreeCount)
847 			panic("size ordering of area list broken\n");
848 
849 		if (area->prev != lastArea)
850 			panic("area list entry has invalid prev link\n");
851 
852 		lastArea = area;
853 		lastFreeCount = area->free_page_count;
854 		area = area->next;
855 	}
856 
857 	lastArea = NULL;
858 	area = heap->all_areas;
859 	while (area != NULL) {
860 		if (lastArea != NULL && lastArea->base < area->base)
861 			panic("base ordering of all_areas list broken\n");
862 
863 		lastArea = area;
864 		area = area->all_next;
865 	}
866 
867 	// validate the bins
868 	for (uint32 i = 0; i < heap->bin_count; i++) {
869 		heap_bin *bin = &heap->bins[i];
870 		heap_page *lastPage = NULL;
871 		heap_page *page = bin->page_list;
872 		lastFreeCount = 0;
873 		while (page) {
874 			area = heap->all_areas;
875 			while (area) {
876 				if (area == page->area)
877 					break;
878 				area = area->all_next;
879 			}
880 
881 			if (area == NULL) {
882 				panic("page area not present in area list\n");
883 				page = page->next;
884 				continue;
885 			}
886 
887 			if ((addr_t)page < (addr_t)&area->page_table[0]
888 				|| (addr_t)page >= (addr_t)&area->page_table[area->page_count])
889 				panic("used page is not part of the page table\n");
890 
891 			if (page->index >= area->page_count)
892 				panic("used page has invalid index\n");
893 
894 			if ((addr_t)&area->page_table[page->index] != (addr_t)page)
895 				panic("used page index does not lead to target page\n");
896 
897 			if (page->prev != lastPage) {
898 				panic("used page entry has invalid prev link (%p vs %p bin "
899 					"%lu)\n", page->prev, lastPage, i);
900 			}
901 
902 			if (!page->in_use)
903 				panic("used page marked as not in use\n");
904 
905 			if (page->bin_index != i) {
906 				panic("used page with bin index %u in page list of bin %lu\n",
907 					page->bin_index, i);
908 			}
909 
910 			if (page->free_count < lastFreeCount)
911 				panic("ordering of bin page list broken\n");
912 
913 			// validate the free list
914 			uint32 freeSlotsCount = 0;
915 			addr_t *element = page->free_list;
916 			addr_t pageBase = area->base + page->index * heap->page_size;
917 			while (element) {
918 				if ((addr_t)element < pageBase
919 					|| (addr_t)element >= pageBase + heap->page_size)
920 					panic("free list entry out of page range\n");
921 
922 				if (((addr_t)element - pageBase) % bin->element_size != 0)
923 					panic("free list entry not on a element boundary\n");
924 
925 				element = (addr_t *)*element;
926 				freeSlotsCount++;
927 			}
928 
929 			uint32 slotCount = bin->max_free_count;
930 			if (page->empty_index > slotCount) {
931 				panic("empty index beyond slot count (%u with %lu slots)\n",
932 					page->empty_index, slotCount);
933 			}
934 
935 			freeSlotsCount += (slotCount - page->empty_index);
936 			if (freeSlotsCount > slotCount)
937 				panic("more free slots than fit into the page\n");
938 
939 			lastPage = page;
940 			lastFreeCount = page->free_count;
941 			page = page->next;
942 		}
943 	}
944 
945 	pageLocker.Unlock();
946 	for (uint32 i = 0; i < heap->bin_count; i++)
947 		mutex_unlock(&heap->bins[i].lock);
948 	areaReadLocker.Unlock();
949 }
950 #endif // PARANOID_HEAP_VALIDATION
951 
952 
953 // #pragma mark - Heap functions
954 
955 
956 static void
957 heap_add_area(heap_allocator *heap, area_id areaID, addr_t base, size_t size)
958 {
959 	heap_area *area = (heap_area *)base;
960 	area->area = areaID;
961 
962 	base += sizeof(heap_area);
963 	size -= sizeof(heap_area);
964 
965 	uint32 pageCount = size / heap->page_size;
966 	size_t pageTableSize = pageCount * sizeof(heap_page);
967 	area->page_table = (heap_page *)base;
968 	base += pageTableSize;
969 	size -= pageTableSize;
970 
971 	// the rest is now actually usable memory (rounded to the next page)
972 	area->base = ROUNDUP(base, B_PAGE_SIZE);
973 	area->size = size & ~(B_PAGE_SIZE - 1);
974 
975 	// now we know the real page count
976 	pageCount = area->size / heap->page_size;
977 	area->page_count = pageCount;
978 
979 	// zero out the page table and fill in page indexes
980 	memset((void *)area->page_table, 0, pageTableSize);
981 	for (uint32 i = 0; i < pageCount; i++) {
982 		area->page_table[i].area = area;
983 		area->page_table[i].index = i;
984 	}
985 
986 	// add all pages up into the free pages list
987 	for (uint32 i = 1; i < pageCount; i++) {
988 		area->page_table[i - 1].next = &area->page_table[i];
989 		area->page_table[i].prev = &area->page_table[i - 1];
990 	}
991 	area->free_pages = &area->page_table[0];
992 	area->free_page_count = pageCount;
993 	area->page_table[0].prev = NULL;
994 	area->next = NULL;
995 
996 	WriteLocker areaWriteLocker(heap->area_lock);
997 	MutexLocker pageLocker(heap->page_lock);
998 	if (heap->areas == NULL) {
999 		// it's the only (empty) area in that heap
1000 		area->prev = NULL;
1001 		heap->areas = area;
1002 	} else {
1003 		// link in this area as the last one as it is completely empty
1004 		heap_area *lastArea = heap->areas;
1005 		while (lastArea->next != NULL)
1006 			lastArea = lastArea->next;
1007 
1008 		lastArea->next = area;
1009 		area->prev = lastArea;
1010 	}
1011 
1012 	// insert this area in the all_areas list so it stays ordered by base
1013 	if (heap->all_areas == NULL || heap->all_areas->base < area->base) {
1014 		area->all_next = heap->all_areas;
1015 		heap->all_areas = area;
1016 	} else {
1017 		heap_area *insert = heap->all_areas;
1018 		while (insert->all_next && insert->all_next->base > area->base)
1019 			insert = insert->all_next;
1020 
1021 		area->all_next = insert->all_next;
1022 		insert->all_next = area;
1023 	}
1024 
1025 	heap->total_pages += area->page_count;
1026 	heap->total_free_pages += area->free_page_count;
1027 
1028 	if (areaID >= 0) {
1029 		// this later on deletable area is yet empty - the empty count will be
1030 		// decremented as soon as this area is used for the first time
1031 		heap->empty_areas++;
1032 	}
1033 
1034 	pageLocker.Unlock();
1035 	areaWriteLocker.Unlock();
1036 
1037 	dprintf("heap_add_area: area %ld added to %s heap %p - usable range 0x%08lx "
1038 		"- 0x%08lx\n", area->area, heap->name, heap, area->base,
1039 		area->base + area->size);
1040 }
1041 
1042 
1043 static status_t
1044 heap_remove_area(heap_allocator *heap, heap_area *area)
1045 {
1046 	if (area->free_page_count != area->page_count) {
1047 		panic("tried removing heap area that has still pages in use");
1048 		return B_ERROR;
1049 	}
1050 
1051 	if (area->prev == NULL && area->next == NULL) {
1052 		panic("tried removing the last non-full heap area");
1053 		return B_ERROR;
1054 	}
1055 
1056 	if (heap->areas == area)
1057 		heap->areas = area->next;
1058 	if (area->prev != NULL)
1059 		area->prev->next = area->next;
1060 	if (area->next != NULL)
1061 		area->next->prev = area->prev;
1062 
1063 	if (heap->all_areas == area)
1064 		heap->all_areas = area->all_next;
1065 	else {
1066 		heap_area *previous = heap->all_areas;
1067 		while (previous) {
1068 			if (previous->all_next == area) {
1069 				previous->all_next = area->all_next;
1070 				break;
1071 			}
1072 
1073 			previous = previous->all_next;
1074 		}
1075 
1076 		if (previous == NULL)
1077 			panic("removing heap area that is not in all list");
1078 	}
1079 
1080 	heap->total_pages -= area->page_count;
1081 	heap->total_free_pages -= area->free_page_count;
1082 
1083 	dprintf("heap_remove_area: area %ld with range 0x%08lx - 0x%08lx removed "
1084 		"from %s heap %p\n", area->area, area->base, area->base + area->size,
1085 		heap->name, heap);
1086 
1087 	return B_OK;
1088 }
1089 
1090 
1091 heap_allocator *
1092 heap_create_allocator(const char *name, addr_t base, size_t size,
1093 	const heap_class *heapClass)
1094 {
1095 	heap_allocator *heap = (heap_allocator *)base;
1096 	base += sizeof(heap_allocator);
1097 	size -= sizeof(heap_allocator);
1098 
1099 	heap->name = name;
1100 	heap->page_size = heapClass->page_size;
1101 	heap->total_pages = heap->total_free_pages = heap->empty_areas = 0;
1102 	heap->areas = heap->all_areas = NULL;
1103 	heap->bins = (heap_bin *)base;
1104 
1105 #if KERNEL_HEAP_LEAK_CHECK
1106 	heap->get_caller = &get_caller;
1107 #endif
1108 
1109 	heap->bin_count = 0;
1110 	size_t binSize = 0, lastSize = 0;
1111 	uint32 count = heap->page_size / heapClass->min_bin_size;
1112 	for (; count >= heapClass->min_count_per_page; count--, lastSize = binSize) {
1113 		binSize = (heap->page_size / count) & ~(heapClass->bin_alignment - 1);
1114 		if (binSize == lastSize)
1115 			continue;
1116 		if (heap->page_size - count * binSize > heapClass->max_waste_per_page)
1117 			continue;
1118 
1119 		heap_bin *bin = &heap->bins[heap->bin_count];
1120 		mutex_init(&bin->lock, "heap bin lock");
1121 		bin->element_size = binSize;
1122 		bin->max_free_count = heap->page_size / binSize;
1123 		bin->page_list = NULL;
1124 		heap->bin_count++;
1125 
1126 		if (heap->bin_count >= 32)
1127 			panic("heap configuration invalid - max bin count reached\n");
1128 	};
1129 
1130 	base += heap->bin_count * sizeof(heap_bin);
1131 	size -= heap->bin_count * sizeof(heap_bin);
1132 
1133 	rw_lock_init(&heap->area_lock, "heap area rw lock");
1134 	mutex_init(&heap->page_lock, "heap page lock");
1135 
1136 	heap_add_area(heap, -1, base, size);
1137 	return heap;
1138 }
1139 
1140 
1141 static inline void
1142 heap_free_pages_added(heap_allocator *heap, heap_area *area, uint32 pageCount)
1143 {
1144 	area->free_page_count += pageCount;
1145 	heap->total_free_pages += pageCount;
1146 
1147 	if (area->free_page_count == pageCount) {
1148 		// we need to add ourselfs to the area list of the heap
1149 		area->prev = NULL;
1150 		area->next = heap->areas;
1151 		if (area->next)
1152 			area->next->prev = area;
1153 		heap->areas = area;
1154 	} else {
1155 		// we might need to move back in the area list
1156 		if (area->next && area->next->free_page_count < area->free_page_count) {
1157 			// move ourselfs so the list stays ordered
1158 			heap_area *insert = area->next;
1159 			while (insert->next
1160 				&& insert->next->free_page_count < area->free_page_count)
1161 				insert = insert->next;
1162 
1163 			if (area->prev)
1164 				area->prev->next = area->next;
1165 			if (area->next)
1166 				area->next->prev = area->prev;
1167 			if (heap->areas == area)
1168 				heap->areas = area->next;
1169 
1170 			area->prev = insert;
1171 			area->next = insert->next;
1172 			if (area->next)
1173 				area->next->prev = area;
1174 			insert->next = area;
1175 		}
1176 	}
1177 
1178 	if (area->free_page_count == area->page_count && area->area >= 0)
1179 		heap->empty_areas++;
1180 }
1181 
1182 
1183 static inline void
1184 heap_free_pages_removed(heap_allocator *heap, heap_area *area, uint32 pageCount)
1185 {
1186 	if (area->free_page_count == area->page_count && area->area >= 0) {
1187 		// this area was completely empty
1188 		heap->empty_areas--;
1189 	}
1190 
1191 	area->free_page_count -= pageCount;
1192 	heap->total_free_pages -= pageCount;
1193 
1194 	if (area->free_page_count == 0) {
1195 		// the area is now full so we remove it from the area list
1196 		heap->areas = area->next;
1197 		if (area->next)
1198 			area->next->prev = NULL;
1199 		area->next = area->prev = NULL;
1200 	}
1201 }
1202 
1203 
1204 static inline void
1205 heap_link_page(heap_page *page, heap_page **list)
1206 {
1207 	page->prev = NULL;
1208 	page->next = *list;
1209 	if (page->next)
1210 		page->next->prev = page;
1211 	*list = page;
1212 }
1213 
1214 
1215 static inline void
1216 heap_unlink_page(heap_page *page, heap_page **list)
1217 {
1218 	if (page->prev)
1219 		page->prev->next = page->next;
1220 	if (page->next)
1221 		page->next->prev = page->prev;
1222 	if (list && *list == page) {
1223 		*list = page->next;
1224 		if (page->next)
1225 			page->next->prev = NULL;
1226 	}
1227 }
1228 
1229 
1230 static heap_page *
1231 heap_allocate_contiguous_pages(heap_allocator *heap, uint32 pageCount)
1232 {
1233 	MutexLocker pageLocker(heap->page_lock);
1234 	heap_area *area = heap->areas;
1235 	while (area) {
1236 		bool found = false;
1237 		int32 first = -1;
1238 		for (uint32 i = 0; i < area->page_count; i++) {
1239 			if (area->page_table[i].in_use) {
1240 				first = -1;
1241 				continue;
1242 			}
1243 
1244 			if (first > 0) {
1245 				if ((i + 1 - first) == pageCount) {
1246 					found = true;
1247 					break;
1248 				}
1249 			} else
1250 				first = i;
1251 		}
1252 
1253 		if (!found) {
1254 			area = area->next;
1255 			continue;
1256 		}
1257 
1258 		for (uint32 i = first; i < first + pageCount; i++) {
1259 			heap_page *page = &area->page_table[i];
1260 			page->in_use = 1;
1261 			page->bin_index = heap->bin_count;
1262 
1263 			heap_unlink_page(page, &area->free_pages);
1264 
1265 			page->next = page->prev = NULL;
1266 			page->free_list = NULL;
1267 			page->allocation_id = (uint16)first;
1268 		}
1269 
1270 		heap_free_pages_removed(heap, area, pageCount);
1271 		return &area->page_table[first];
1272 	}
1273 
1274 	return NULL;
1275 }
1276 
1277 
1278 static void *
1279 heap_raw_alloc(heap_allocator *heap, size_t size, uint32 binIndex)
1280 {
1281 	TRACE(("raw_alloc: heap %p; size %lu; bin %lu\n", heap, size, binIndex));
1282 	if (binIndex < heap->bin_count) {
1283 		heap_bin *bin = &heap->bins[binIndex];
1284 		MutexLocker binLocker(bin->lock);
1285 
1286 		heap_page *page = bin->page_list;
1287 		if (page == NULL) {
1288 			MutexLocker pageLocker(heap->page_lock);
1289 			heap_area *area = heap->areas;
1290 			if (area == NULL) {
1291 				TRACE(("heap %p: no free pages to allocate %lu bytes\n", heap,
1292 					size));
1293 				return NULL;
1294 			}
1295 
1296 			// by design there are only areas in the list that still have
1297 			// free pages available
1298 			page = area->free_pages;
1299 			area->free_pages = page->next;
1300 			if (page->next)
1301 				page->next->prev = NULL;
1302 
1303 			heap_free_pages_removed(heap, area, 1);
1304 
1305 			if (page->in_use)
1306 				panic("got an in use page from the free pages list\n");
1307 			page->in_use = 1;
1308 
1309 			pageLocker.Unlock();
1310 
1311 			page->bin_index = binIndex;
1312 			page->free_count = bin->max_free_count;
1313 			page->empty_index = 0;
1314 			page->free_list = NULL;
1315 			page->next = page->prev = NULL;
1316 			bin->page_list = page;
1317 		}
1318 
1319 		// we have a page where we have a free slot
1320 		void *address = NULL;
1321 		if (page->free_list) {
1322 			// there's a previously freed entry we can use
1323 			address = page->free_list;
1324 			page->free_list = (addr_t *)*page->free_list;
1325 		} else {
1326 			// the page hasn't been fully allocated so use the next empty_index
1327 			address = (void *)(page->area->base + page->index * heap->page_size
1328 				+ page->empty_index * bin->element_size);
1329 			page->empty_index++;
1330 		}
1331 
1332 		page->free_count--;
1333 		if (page->free_count == 0) {
1334 			// the page is now full so we remove it from the page_list
1335 			bin->page_list = page->next;
1336 			if (page->next)
1337 				page->next->prev = NULL;
1338 			page->next = page->prev = NULL;
1339 		}
1340 
1341 		binLocker.Unlock();
1342 
1343 #if KERNEL_HEAP_LEAK_CHECK
1344 		heap_leak_check_info *info = (heap_leak_check_info *)((addr_t)address
1345 			+ bin->element_size - sizeof(heap_leak_check_info));
1346 		info->size = size - sizeof(heap_leak_check_info);
1347 		info->thread = (gKernelStartup ? 0 : thread_get_current_thread_id());
1348 		info->team = (gKernelStartup ? 0 : team_get_current_team_id());
1349 		info->caller = heap->get_caller();
1350 #endif
1351 		return address;
1352 	}
1353 
1354 	uint32 pageCount = (size + heap->page_size - 1) / heap->page_size;
1355 	heap_page *firstPage = heap_allocate_contiguous_pages(heap, pageCount);
1356 	if (firstPage == NULL) {
1357 		TRACE(("heap %p: found no contiguous pages to allocate %ld bytes\n",
1358 			heap, size));
1359 		return NULL;
1360 	}
1361 
1362 #if KERNEL_HEAP_LEAK_CHECK
1363 	heap_leak_check_info *info = (heap_leak_check_info *)(firstPage->area->base
1364 		+ (firstPage->index + pageCount) * heap->page_size
1365 		- sizeof(heap_leak_check_info));
1366 	info->size = size - sizeof(heap_leak_check_info);
1367 	info->thread = (gKernelStartup ? 0 : thread_get_current_thread_id());
1368 	info->team = (gKernelStartup ? 0 : team_get_current_team_id());
1369 	info->caller = heap->get_caller();
1370 #endif
1371 	return (void *)(firstPage->area->base + firstPage->index * heap->page_size);
1372 }
1373 
1374 
1375 #if DEBUG
1376 static bool
1377 is_valid_alignment(size_t number)
1378 {
1379 	// this cryptic line accepts zero and all powers of two
1380 	return ((~number + 1) | ((number << 1) - 1)) == ~0UL;
1381 }
1382 #endif
1383 
1384 
1385 inline bool
1386 heap_should_grow(heap_allocator *heap)
1387 {
1388 	// suggest growing if there is less than 20% of a grow size available
1389 	return heap->total_free_pages * heap->page_size < HEAP_GROW_SIZE / 5;
1390 }
1391 
1392 
1393 void *
1394 heap_memalign(heap_allocator *heap, size_t alignment, size_t size)
1395 {
1396 	TRACE(("memalign(alignment = %lu, size = %lu)\n", alignment, size));
1397 
1398 #if DEBUG
1399 	if (!is_valid_alignment(alignment))
1400 		panic("memalign() with an alignment which is not a power of 2\n");
1401 #endif
1402 
1403 #if KERNEL_HEAP_LEAK_CHECK
1404 	size += sizeof(heap_leak_check_info);
1405 #endif
1406 
1407 	if (alignment != 0) {
1408 		// TODO: The alignment is done by ensuring that the element size
1409 		// of the target bin is aligned with the requested alignment. This
1410 		// has the problem that it wastes space because a better (smaller) bin
1411 		// could possibly be selected. We should pick the best bin and check
1412 		// if there is an aligned block in the free list or if a new (page
1413 		// aligned) page has to be allocated anyway.
1414 		size = ROUNDUP(size, alignment);
1415 	}
1416 
1417 	uint32 binIndex;
1418 	for (binIndex = 0; binIndex < heap->bin_count; binIndex++) {
1419 		if (size <= heap->bins[binIndex].element_size)
1420 			break;
1421 	}
1422 
1423 	void *address = heap_raw_alloc(heap, size, binIndex);
1424 
1425 #if KERNEL_HEAP_LEAK_CHECK
1426 	size -= sizeof(heap_leak_check_info);
1427 #endif
1428 
1429 	TRACE(("memalign(): asked to allocate %lu bytes, returning pointer %p\n",
1430 		size, address));
1431 
1432 	T(Allocate((addr_t)address, size));
1433 	if (address == NULL)
1434 		return address;
1435 
1436 #if PARANOID_KERNEL_MALLOC
1437 	memset(address, 0xcc, size);
1438 #endif
1439 
1440 #if PARANOID_KERNEL_FREE
1441 	// make sure 0xdeadbeef is cleared if we do not overwrite the memory
1442 	// and the user does not clear it
1443 	if (((uint32 *)address)[1] == 0xdeadbeef)
1444 		((uint32 *)address)[1] = 0xcccccccc;
1445 #endif
1446 
1447 	return address;
1448 }
1449 
1450 
1451 status_t
1452 heap_free(heap_allocator *heap, void *address)
1453 {
1454 	if (address == NULL)
1455 		return B_OK;
1456 
1457 	ReadLocker areaReadLocker(heap->area_lock);
1458 	heap_area *area = heap->all_areas;
1459 	while (area) {
1460 		// since the all_areas list is ordered by base with the biggest
1461 		// base at the top, we need only find the first area with a base
1462 		// smaller than our address to become our only candidate for freeing
1463 		if (area->base <= (addr_t)address) {
1464 			if ((addr_t)address >= area->base + area->size) {
1465 				// none of the other areas can contain the address as the list
1466 				// is ordered
1467 				return B_ENTRY_NOT_FOUND;
1468 			}
1469 
1470 			// this area contains the allocation, we're done searching
1471 			break;
1472 		}
1473 
1474 		area = area->all_next;
1475 	}
1476 
1477 	if (area == NULL) {
1478 		// this address does not belong to us
1479 		return B_ENTRY_NOT_FOUND;
1480 	}
1481 
1482 	TRACE(("free(): asked to free pointer %p\n", address));
1483 
1484 	heap_page *page = &area->page_table[((addr_t)address - area->base)
1485 		/ heap->page_size];
1486 
1487 	TRACE(("free(): page %p: bin_index %d, free_count %d\n", page,
1488 		page->bin_index, page->free_count));
1489 
1490 	if (page->bin_index > heap->bin_count) {
1491 		panic("free(): page %p: invalid bin_index %d\n", page, page->bin_index);
1492 		return B_ERROR;
1493 	}
1494 
1495 	if (page->bin_index < heap->bin_count) {
1496 		// small allocation
1497 		heap_bin *bin = &heap->bins[page->bin_index];
1498 
1499 #if PARANOID_KERNEL_FREE
1500 		if (((uint32 *)address)[1] == 0xdeadbeef) {
1501 			// This block looks like it was freed already, walk the free list
1502 			// on this page to make sure this address doesn't exist.
1503 			MutexLocker binLocker(bin->lock);
1504 			for (addr_t *temp = page->free_list; temp != NULL;
1505 					temp = (addr_t *)*temp) {
1506 				if (temp == address) {
1507 					panic("free(): address %p already exists in page free "
1508 						"list\n", address);
1509 					return B_ERROR;
1510 				}
1511 			}
1512 		}
1513 
1514 		// the first 4 bytes are overwritten with the next free list pointer
1515 		// later
1516 		uint32 *dead = (uint32 *)address;
1517 		for (uint32 i = 1; i < bin->element_size / sizeof(uint32); i++)
1518 			dead[i] = 0xdeadbeef;
1519 #endif
1520 
1521 		MutexLocker binLocker(bin->lock);
1522 		if (((addr_t)address - area->base - page->index
1523 			* heap->page_size) % bin->element_size != 0) {
1524 			panic("free(): passed invalid pointer %p supposed to be in bin for "
1525 				"element size %ld\n", address, bin->element_size);
1526 			return B_ERROR;
1527 		}
1528 
1529 		// add the address to the page free list
1530 		*(addr_t *)address = (addr_t)page->free_list;
1531 		page->free_list = (addr_t *)address;
1532 		page->free_count++;
1533 
1534 		if (page->free_count == bin->max_free_count) {
1535 			// we are now empty, remove the page from the bin list
1536 			MutexLocker pageLocker(heap->page_lock);
1537 			heap_unlink_page(page, &bin->page_list);
1538 			page->in_use = 0;
1539 			heap_link_page(page, &area->free_pages);
1540 			heap_free_pages_added(heap, area, 1);
1541 		} else if (page->free_count == 1) {
1542 			// we need to add ourselfs to the page list of the bin
1543 			heap_link_page(page, &bin->page_list);
1544 		} else {
1545 			// we might need to move back in the free pages list
1546 			if (page->next && page->next->free_count < page->free_count) {
1547 				// move ourselfs so the list stays ordered
1548 				heap_page *insert = page->next;
1549 				while (insert->next
1550 					&& insert->next->free_count < page->free_count)
1551 					insert = insert->next;
1552 
1553 				heap_unlink_page(page, &bin->page_list);
1554 
1555 				page->prev = insert;
1556 				page->next = insert->next;
1557 				if (page->next)
1558 					page->next->prev = page;
1559 				insert->next = page;
1560 			}
1561 		}
1562 	} else {
1563 		// large allocation, just return the pages to the page free list
1564 		uint32 allocationID = page->allocation_id;
1565 		uint32 maxPages = area->page_count - page->index;
1566 		uint32 pageCount = 0;
1567 
1568 		MutexLocker pageLocker(heap->page_lock);
1569 		for (uint32 i = 0; i < maxPages; i++) {
1570 			// loop until we find the end of this allocation
1571 			if (!page[i].in_use || page[i].bin_index != heap->bin_count
1572 				|| page[i].allocation_id != allocationID)
1573 				break;
1574 
1575 			// this page still belongs to the same allocation
1576 			page[i].in_use = 0;
1577 			page[i].allocation_id = 0;
1578 
1579 			// return it to the free list
1580 			heap_link_page(&page[i], &area->free_pages);
1581 			pageCount++;
1582 		}
1583 
1584 		heap_free_pages_added(heap, area, pageCount);
1585 	}
1586 
1587 	T(Free((addr_t)address));
1588 	areaReadLocker.Unlock();
1589 
1590 	if (heap->empty_areas > 1) {
1591 		WriteLocker areaWriteLocker(heap->area_lock);
1592 		MutexLocker pageLocker(heap->page_lock);
1593 
1594 		area = heap->areas;
1595 		while (area != NULL && heap->empty_areas > 1) {
1596 			if (area->area >= 0
1597 				&& area->free_page_count == area->page_count
1598 				&& heap_remove_area(heap, area) == B_OK) {
1599 				delete_area(area->area);
1600 				heap->empty_areas--;
1601 			}
1602 
1603 			area = area->next;
1604 		}
1605 	}
1606 
1607 	return B_OK;
1608 }
1609 
1610 
1611 #if KERNEL_HEAP_LEAK_CHECK
1612 extern "C" void
1613 heap_set_get_caller(heap_allocator* heap, addr_t (*getCaller)())
1614 {
1615 	heap->get_caller = getCaller;
1616 }
1617 #endif
1618 
1619 
1620 static status_t
1621 heap_realloc(heap_allocator *heap, void *address, void **newAddress,
1622 	size_t newSize)
1623 {
1624 	ReadLocker areaReadLocker(heap->area_lock);
1625 	heap_area *area = heap->all_areas;
1626 	while (area) {
1627 		// since the all_areas list is ordered by base with the biggest
1628 		// base at the top, we need only find the first area with a base
1629 		// smaller than our address to become our only candidate for
1630 		// reallocating
1631 		if (area->base <= (addr_t)address) {
1632 			if ((addr_t)address >= area->base + area->size) {
1633 				// none of the other areas can contain the address as the list
1634 				// is ordered
1635 				return B_ENTRY_NOT_FOUND;
1636 			}
1637 
1638 			// this area contains the allocation, we're done searching
1639 			break;
1640 		}
1641 
1642 		area = area->all_next;
1643 	}
1644 
1645 	if (area == NULL) {
1646 		// this address does not belong to us
1647 		return B_ENTRY_NOT_FOUND;
1648 	}
1649 
1650 	TRACE(("realloc(address = %p, newSize = %lu)\n", address, newSize));
1651 
1652 	heap_page *page = &area->page_table[((addr_t)address - area->base)
1653 		/ heap->page_size];
1654 	if (page->bin_index > heap->bin_count) {
1655 		panic("realloc(): page %p: invalid bin_index %d\n", page,
1656 			page->bin_index);
1657 		return B_ERROR;
1658 	}
1659 
1660 	// find out the size of the old allocation first
1661 	size_t minSize = 0;
1662 	size_t maxSize = 0;
1663 	if (page->bin_index < heap->bin_count) {
1664 		// this was a small allocation
1665 		heap_bin *bin = &heap->bins[page->bin_index];
1666 		maxSize = bin->element_size;
1667 		if (page->bin_index > 0)
1668 			minSize = heap->bins[page->bin_index - 1].element_size + 1;
1669 	} else {
1670 		// this was a large allocation
1671 		uint32 allocationID = page->allocation_id;
1672 		uint32 maxPages = area->page_count - page->index;
1673 		maxSize = heap->page_size;
1674 
1675 		MutexLocker pageLocker(heap->page_lock);
1676 		for (uint32 i = 1; i < maxPages; i++) {
1677 			if (!page[i].in_use || page[i].bin_index != heap->bin_count
1678 				|| page[i].allocation_id != allocationID)
1679 				break;
1680 
1681 			minSize += heap->page_size;
1682 			maxSize += heap->page_size;
1683 		}
1684 	}
1685 
1686 	areaReadLocker.Unlock();
1687 
1688 #if KERNEL_HEAP_LEAK_CHECK
1689 	newSize += sizeof(heap_leak_check_info);
1690 #endif
1691 
1692 	// does the new allocation simply fit in the old allocation?
1693 	if (newSize > minSize && newSize <= maxSize) {
1694 #if KERNEL_HEAP_LEAK_CHECK
1695 		// update the size info (the info is at the end so stays where it is)
1696 		heap_leak_check_info *info = (heap_leak_check_info *)((addr_t)address
1697 			+ maxSize - sizeof(heap_leak_check_info));
1698 		info->size = newSize - sizeof(heap_leak_check_info);
1699 		newSize -= sizeof(heap_leak_check_info);
1700 #endif
1701 
1702 		T(Reallocate((addr_t)address, (addr_t)address, newSize));
1703 		*newAddress = address;
1704 		return B_OK;
1705 	}
1706 
1707 #if KERNEL_HEAP_LEAK_CHECK
1708 	// new leak check info will be created with the malloc below
1709 	newSize -= sizeof(heap_leak_check_info);
1710 #endif
1711 
1712 	// if not, allocate a new chunk of memory
1713 	*newAddress = memalign(0, newSize);
1714 	T(Reallocate((addr_t)address, (addr_t)*newAddress, newSize));
1715 	if (*newAddress == NULL) {
1716 		// we tried but it didn't work out, but still the operation is done
1717 		return B_OK;
1718 	}
1719 
1720 	// copy the old data and free the old allocation
1721 	memcpy(*newAddress, address, min_c(maxSize, newSize));
1722 	heap_free(heap, address);
1723 	return B_OK;
1724 }
1725 
1726 
1727 inline uint32
1728 heap_class_for(size_t size)
1729 {
1730 #if KERNEL_HEAP_LEAK_CHECK
1731 	// take the extra info size into account
1732 	size += sizeof(heap_leak_check_info_s);
1733 #endif
1734 
1735 	uint32 index = 0;
1736 	for (; index < HEAP_CLASS_COUNT - 1; index++) {
1737 		if (size <= sHeapClasses[index].max_allocation_size)
1738 			return index;
1739 	}
1740 
1741 	return index;
1742 }
1743 
1744 
1745 static void
1746 deferred_deleter(void *arg, int iteration)
1747 {
1748 	// move entries and deletables to on-stack lists
1749 	InterruptsSpinLocker locker(sDeferredFreeListLock);
1750 	if (sDeferredFreeList.IsEmpty() && sDeferredDeletableList.IsEmpty())
1751 		return;
1752 
1753 	DeferredFreeList entries;
1754 	entries.MoveFrom(&sDeferredFreeList);
1755 
1756 	DeferredDeletableList deletables;
1757 	deletables.MoveFrom(&sDeferredDeletableList);
1758 
1759 	locker.Unlock();
1760 
1761 	// free the entries
1762 	while (DeferredFreeListEntry* entry = entries.RemoveHead())
1763 		free(entry);
1764 
1765 	// delete the deletables
1766 	while (DeferredDeletable* deletable = deletables.RemoveHead())
1767 		delete deletable;
1768 }
1769 
1770 
1771 //	#pragma mark -
1772 
1773 
1774 static status_t
1775 heap_create_new_heap_area(heap_allocator *heap, const char *name, size_t size)
1776 {
1777 	void *address = NULL;
1778 	area_id heapArea = create_area(name, &address,
1779 		B_ANY_KERNEL_BLOCK_ADDRESS, size, B_FULL_LOCK,
1780 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
1781 	if (heapArea < B_OK) {
1782 		TRACE(("heap: couldn't allocate heap area \"%s\"\n", name));
1783 		return heapArea;
1784 	}
1785 
1786 	heap_add_area(heap, heapArea, (addr_t)address, size);
1787 #if PARANOID_HEAP_VALIDATION
1788 	heap_validate_heap(heap);
1789 #endif
1790 	return B_OK;
1791 }
1792 
1793 
1794 static int32
1795 heap_grow_thread(void *)
1796 {
1797 	while (true) {
1798 		// wait for a request to grow the heap list
1799 		if (acquire_sem(sHeapGrowSem) < B_OK)
1800 			continue;
1801 
1802 		if (sAddGrowHeap) {
1803 			// the grow heap is going to run full soon, try to allocate a new
1804 			// one to make some room.
1805 			TRACE(("heap_grower: grow heaps will run out of memory soon\n"));
1806 			if (heap_create_new_heap_area(sGrowHeap, "additional grow heap",
1807 					HEAP_DEDICATED_GROW_SIZE) != B_OK)
1808 				dprintf("heap_grower: failed to create new grow heap area\n");
1809 		}
1810 
1811 		for (uint32 i = 0; i < HEAP_CLASS_COUNT; i++) {
1812 			heap_allocator *heap = sHeaps[i];
1813 			if (sLastGrowRequest[i] > sLastHandledGrowRequest[i]
1814 				|| heap_should_grow(heap)) {
1815 				// grow this heap if it is nearly full or if a grow was
1816 				// explicitly requested for this heap (happens when a large
1817 				// allocation cannot be fulfilled due to lack of contiguous
1818 				// pages)
1819 				if (heap_create_new_heap_area(heap, "additional heap",
1820 						HEAP_GROW_SIZE) != B_OK)
1821 					dprintf("heap_grower: failed to create new heap area\n");
1822 				sLastHandledGrowRequest[i] = sLastGrowRequest[i];
1823 			}
1824 		}
1825 
1826 		// notify anyone waiting for this request
1827 		release_sem_etc(sHeapGrownNotify, -1, B_RELEASE_ALL);
1828 	}
1829 
1830 	return 0;
1831 }
1832 
1833 
1834 status_t
1835 heap_init(addr_t base, size_t size)
1836 {
1837 	for (uint32 i = 0; i < HEAP_CLASS_COUNT; i++) {
1838 		size_t partSize = size * sHeapClasses[i].initial_percentage / 100;
1839 		sHeaps[i] = heap_create_allocator(sHeapClasses[i].name, base, partSize,
1840 			&sHeapClasses[i]);
1841 		sLastGrowRequest[i] = sLastHandledGrowRequest[i] = 0;
1842 		base += partSize;
1843 	}
1844 
1845 	// set up some debug commands
1846 	add_debugger_command_etc("heap", &dump_heap_list,
1847 		"Dump infos about the kernel heap(s)",
1848 		"[(\"grow\" | \"stats\" | <heap>)]\n"
1849 		"Dump infos about the kernel heap(s). If \"grow\" is specified, only\n"
1850 		"infos about the dedicated grow heap are printed. If \"stats\" is\n"
1851 		"given as the argument, currently only the heap count is printed.\n"
1852 		"If <heap> is given, it is interpreted as the address of the heap to\n"
1853 		"print infos about.\n", 0);
1854 #if !KERNEL_HEAP_LEAK_CHECK
1855 	add_debugger_command_etc("allocations", &dump_allocations,
1856 		"Dump current heap allocations",
1857 		"[\"stats\"] [<heap>]\n"
1858 		"If no parameters are given, all current alloactions are dumped.\n"
1859 		"If the optional argument \"stats\" is specified, only the allocation\n"
1860 		"counts and no individual allocations are printed\n"
1861 		"If a specific heap address is given, only allocations of this\n"
1862 		"allocator are dumped\n", 0);
1863 #else // !KERNEL_HEAP_LEAK_CHECK
1864 	add_debugger_command_etc("allocations", &dump_allocations,
1865 		"Dump current heap allocations",
1866 		"[(\"team\" | \"thread\") <id>] [ \"caller\" <address> ] [\"stats\"]\n"
1867 		"If no parameters are given, all current alloactions are dumped.\n"
1868 		"If \"team\", \"thread\", and/or \"caller\" is specified as the first\n"
1869 		"argument, only allocations matching the team id, thread id, or \n"
1870 		"caller address given in the second argument are printed.\n"
1871 		"If the optional argument \"stats\" is specified, only the allocation\n"
1872 		"counts and no individual allocations are printed\n", 0);
1873 	add_debugger_command_etc("allocations_per_caller",
1874 		&dump_allocations_per_caller,
1875 		"Dump current heap allocations summed up per caller",
1876 		"[ \"-c\" ] [ -h <heap> ]\n"
1877 		"The current allocations will by summed up by caller (their count and\n"
1878 		"size) printed in decreasing order by size or, if \"-c\" is\n"
1879 		"specified, by allocation count. If given <heap> specifies the\n"
1880 		"address of the heap for which to print the allocations.\n", 0);
1881 #endif // KERNEL_HEAP_LEAK_CHECK
1882 	return B_OK;
1883 }
1884 
1885 
1886 status_t
1887 heap_init_post_sem()
1888 {
1889 	sHeapGrowSem = create_sem(0, "heap_grow_sem");
1890 	if (sHeapGrowSem < 0) {
1891 		panic("heap_init_post_sem(): failed to create heap grow sem\n");
1892 		return B_ERROR;
1893 	}
1894 
1895 	sHeapGrownNotify = create_sem(0, "heap_grown_notify");
1896 	if (sHeapGrownNotify < 0) {
1897 		panic("heap_init_post_sem(): failed to create heap grown notify sem\n");
1898 		return B_ERROR;
1899 	}
1900 
1901 	return B_OK;
1902 }
1903 
1904 
1905 status_t
1906 heap_init_post_thread()
1907 {
1908 	void *address = NULL;
1909 	area_id growHeapArea = create_area("dedicated grow heap", &address,
1910 		B_ANY_KERNEL_BLOCK_ADDRESS, HEAP_DEDICATED_GROW_SIZE, B_FULL_LOCK,
1911 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
1912 	if (growHeapArea < B_OK) {
1913 		panic("heap_init_post_thread(): couldn't allocate dedicate grow heap "
1914 			"area");
1915 		return growHeapArea;
1916 	}
1917 
1918 	sGrowHeap = heap_create_allocator("grow", (addr_t)address,
1919 		HEAP_DEDICATED_GROW_SIZE, &sHeapClasses[0]);
1920 	if (sGrowHeap == NULL) {
1921 		panic("heap_init_post_thread(): failed to create dedicated grow heap\n");
1922 		return B_ERROR;
1923 	}
1924 
1925 	sHeapGrowThread = spawn_kernel_thread(heap_grow_thread, "heap grower",
1926 		B_URGENT_PRIORITY, NULL);
1927 	if (sHeapGrowThread < 0) {
1928 		panic("heap_init_post_thread(): cannot create heap grow thread\n");
1929 		return sHeapGrowThread;
1930 	}
1931 
1932 	// run the deferred deleter roughly once a second
1933 	if (register_kernel_daemon(deferred_deleter, NULL, 10) != B_OK)
1934 		panic("heap_init_post_thread(): failed to init deferred deleter");
1935 
1936 	send_signal_etc(sHeapGrowThread, SIGCONT, B_DO_NOT_RESCHEDULE);
1937 	return B_OK;
1938 }
1939 
1940 
1941 //	#pragma mark - Public API
1942 
1943 
1944 void *
1945 memalign(size_t alignment, size_t size)
1946 {
1947 	if (!gKernelStartup && !are_interrupts_enabled()) {
1948 		panic("memalign(): called with interrupts disabled\n");
1949 		return NULL;
1950 	}
1951 
1952 	if (!gKernelStartup && size > HEAP_AREA_USE_THRESHOLD) {
1953 		// don't even attempt such a huge allocation - use areas instead
1954 		size_t areaSize = size + sizeof(area_allocation_info);
1955 		if (alignment != 0)
1956 			areaSize = ROUNDUP(areaSize, alignment);
1957 
1958 		void *address = NULL;
1959 		areaSize = ROUNDUP(areaSize, B_PAGE_SIZE);
1960 		area_id allocationArea = create_area("memalign area", &address,
1961 			B_ANY_KERNEL_BLOCK_ADDRESS, areaSize, B_FULL_LOCK,
1962 			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
1963 		if (allocationArea < B_OK) {
1964 			dprintf("heap: failed to create area for huge allocation\n");
1965 			return NULL;
1966 		}
1967 
1968 		area_allocation_info *info = (area_allocation_info *)address;
1969 		info->magic = kAreaAllocationMagic;
1970 		info->area = allocationArea;
1971 		info->base = address;
1972 		info->size = areaSize;
1973 		info->allocation_size = size;
1974 		info->allocation_alignment = alignment;
1975 
1976 		address = (void *)((addr_t)address + sizeof(area_allocation_info));
1977 		if (alignment != 0)
1978 			address = (void *)ROUNDUP((addr_t)address, alignment);
1979 
1980 		TRACE(("heap: allocated area %ld for huge allocation of %lu bytes\n",
1981 			allocationArea, size));
1982 
1983 		info->allocation_base = address;
1984 
1985 #if PARANOID_KERNEL_MALLOC
1986 		memset(address, 0xcc, size);
1987 #endif
1988 		return address;
1989 	}
1990 
1991 	uint32 heapClass = heap_class_for(size);
1992 	heap_allocator *heap = sHeaps[heapClass];
1993 	void *result = heap_memalign(heap, alignment, size);
1994 	if (result == NULL) {
1995 		// request an urgent grow and wait - we don't do it ourselfs here to
1996 		// serialize growing through the grow thread, as otherwise multiple
1997 		// threads hitting this situation (likely when memory ran out) would
1998 		// all add areas
1999 		sLastGrowRequest[heapClass]++;
2000 		switch_sem(sHeapGrowSem, sHeapGrownNotify);
2001 
2002 		// and then try again
2003 		result = heap_memalign(heap, alignment, size);
2004 	} else if (heap_should_grow(heap)) {
2005 		// should grow sometime soon, notify the grower
2006 		release_sem_etc(sHeapGrowSem, 1, B_DO_NOT_RESCHEDULE);
2007 	}
2008 
2009 #if PARANOID_HEAP_VALIDATION
2010 	heap_validate_heap(heap);
2011 #endif
2012 
2013 	if (result == NULL)
2014 		panic("heap: kernel heap has run out of memory\n");
2015 	return result;
2016 }
2017 
2018 
2019 void *
2020 memalign_nogrow(size_t alignment, size_t size)
2021 {
2022 	// use dedicated memory in the grow thread by default
2023 	if (thread_get_current_thread_id() == sHeapGrowThread) {
2024 		void *result = heap_memalign(sGrowHeap, alignment, size);
2025 		if (!sAddGrowHeap && heap_should_grow(sGrowHeap)) {
2026 			// hopefully the heap grower will manage to create a new heap
2027 			// before running out of private memory...
2028 			dprintf("heap: requesting new grow heap\n");
2029 			sAddGrowHeap = true;
2030 			release_sem_etc(sHeapGrowSem, 1, B_DO_NOT_RESCHEDULE);
2031 		}
2032 
2033 		if (result != NULL)
2034 			return result;
2035 	}
2036 
2037 	// try public memory, there might be something available
2038 	heap_allocator *heap = sHeaps[heap_class_for(size)];
2039 	void *result = heap_memalign(heap, alignment, size);
2040 	if (result != NULL)
2041 		return result;
2042 
2043 	// no memory available
2044 	if (thread_get_current_thread_id() == sHeapGrowThread)
2045 		panic("heap: all heaps have run out of memory while growing\n");
2046 	else
2047 		dprintf("heap: all heaps have run out of memory\n");
2048 
2049 	return NULL;
2050 }
2051 
2052 
2053 void *
2054 malloc_nogrow(size_t size)
2055 {
2056 	return memalign_nogrow(0, size);
2057 }
2058 
2059 
2060 void *
2061 malloc(size_t size)
2062 {
2063 	return memalign(0, size);
2064 }
2065 
2066 
2067 void
2068 free(void *address)
2069 {
2070 	if (!gKernelStartup && !are_interrupts_enabled()) {
2071 		panic("free(): called with interrupts disabled\n");
2072 		return;
2073 	}
2074 
2075 	for (uint32 i = 0; i < HEAP_CLASS_COUNT; i++) {
2076 		heap_allocator *heap = sHeaps[i];
2077 		if (heap_free(heap, address) == B_OK) {
2078 #if PARANOID_HEAP_VALIDATION
2079 			heap_validate_heap(heap);
2080 #endif
2081 			return;
2082 		}
2083 	}
2084 
2085 	// maybe it was allocated from the dedicated grow heap
2086 	if (heap_free(sGrowHeap, address) == B_OK)
2087 		return;
2088 
2089 	// or maybe it was a huge allocation using an area
2090 	area_info areaInfo;
2091 	area_id area = area_for(address);
2092 	if (area >= B_OK && get_area_info(area, &areaInfo) == B_OK) {
2093 		area_allocation_info *info = (area_allocation_info *)areaInfo.address;
2094 
2095 		// just make extra sure it was allocated by us
2096 		if (info->magic == kAreaAllocationMagic && info->area == area
2097 			&& info->size == areaInfo.size && info->base == areaInfo.address
2098 			&& info->allocation_size < areaInfo.size) {
2099 			delete_area(area);
2100 			TRACE(("free(): freed huge allocation by deleting area %ld\n",
2101 				area));
2102 			return;
2103 		}
2104 	}
2105 
2106 	panic("free(): free failed for address %p\n", address);
2107 }
2108 
2109 
2110 void *
2111 realloc(void *address, size_t newSize)
2112 {
2113 	if (!gKernelStartup && !are_interrupts_enabled()) {
2114 		panic("realloc(): called with interrupts disabled\n");
2115 		return NULL;
2116 	}
2117 
2118 	if (address == NULL)
2119 		return memalign(0, newSize);
2120 
2121 	if (newSize == 0) {
2122 		free(address);
2123 		return NULL;
2124 	}
2125 
2126 	void *newAddress = NULL;
2127 	for (uint32 i = 0; i < HEAP_CLASS_COUNT; i++) {
2128 		heap_allocator *heap = sHeaps[i];
2129 		if (heap_realloc(heap, address, &newAddress, newSize) == B_OK) {
2130 #if PARANOID_HEAP_VALIDATION
2131 			heap_validate_heap(heap);
2132 #endif
2133 			return newAddress;
2134 		}
2135 	}
2136 
2137 	// maybe it was allocated from the dedicated grow heap
2138 	if (heap_realloc(sGrowHeap, address, &newAddress, newSize) == B_OK)
2139 		return newAddress;
2140 
2141 	// or maybe it was a huge allocation using an area
2142 	area_info areaInfo;
2143 	area_id area = area_for(address);
2144 	if (area >= B_OK && get_area_info(area, &areaInfo) == B_OK) {
2145 		area_allocation_info *info = (area_allocation_info *)areaInfo.address;
2146 
2147 		// just make extra sure it was allocated by us
2148 		if (info->magic == kAreaAllocationMagic && info->area == area
2149 			&& info->size == areaInfo.size && info->base == areaInfo.address
2150 			&& info->allocation_size < areaInfo.size) {
2151 			size_t available = info->size - ((addr_t)info->allocation_base
2152 				- (addr_t)info->base);
2153 
2154 			if (available >= newSize) {
2155 				// there is enough room available for the newSize
2156 				TRACE(("realloc(): new size %ld fits in old area %ld with %ld "
2157 					"available\n", newSize, area, available));
2158 				return address;
2159 			}
2160 
2161 			// have to allocate/copy/free - TODO maybe resize the area instead?
2162 			newAddress = memalign(0, newSize);
2163 			if (newAddress == NULL) {
2164 				dprintf("realloc(): failed to allocate new block of %ld bytes\n",
2165 					newSize);
2166 				return NULL;
2167 			}
2168 
2169 			memcpy(newAddress, address, min_c(newSize, info->allocation_size));
2170 			delete_area(area);
2171 			TRACE(("realloc(): allocated new block %p for size %ld and deleted "
2172 				"old area %ld\n", newAddress, newSize, area));
2173 			return newAddress;
2174 		}
2175 	}
2176 
2177 	panic("realloc(): failed to realloc address %p to size %lu\n", address,
2178 		newSize);
2179 	return NULL;
2180 }
2181 
2182 
2183 void *
2184 calloc(size_t numElements, size_t size)
2185 {
2186 	void *address = memalign(0, numElements * size);
2187 	if (address != NULL)
2188 		memset(address, 0, numElements * size);
2189 
2190 	return address;
2191 }
2192 
2193 
2194 void
2195 deferred_free(void *block)
2196 {
2197 	if (block == NULL)
2198 		return;
2199 
2200 	DeferredFreeListEntry *entry = new(block) DeferredFreeListEntry;
2201 
2202 	InterruptsSpinLocker _(sDeferredFreeListLock);
2203 	sDeferredFreeList.Add(entry);
2204 }
2205 
2206 
2207 void *
2208 malloc_referenced(size_t size)
2209 {
2210 	int32 *referencedData = (int32 *)malloc(size + 4);
2211 	if (referencedData == NULL)
2212 		return NULL;
2213 
2214 	*referencedData = 1;
2215 	return referencedData + 1;
2216 }
2217 
2218 
2219 void *
2220 malloc_referenced_acquire(void *data)
2221 {
2222 	if (data != NULL) {
2223 		int32 *referencedData = (int32 *)data - 1;
2224 		atomic_add(referencedData, 1);
2225 	}
2226 
2227 	return data;
2228 }
2229 
2230 
2231 void
2232 malloc_referenced_release(void *data)
2233 {
2234 	if (data == NULL)
2235 		return;
2236 
2237 	int32 *referencedData = (int32 *)data - 1;
2238 	if (atomic_add(referencedData, -1) < 1)
2239 		free(referencedData);
2240 }
2241 
2242 
2243 DeferredDeletable::~DeferredDeletable()
2244 {
2245 }
2246 
2247 
2248 void
2249 deferred_delete(DeferredDeletable *deletable)
2250 {
2251 	if (deletable == NULL)
2252 		return;
2253 
2254 	InterruptsSpinLocker _(sDeferredFreeListLock);
2255 	sDeferredDeletableList.Add(deletable);
2256 }
2257