xref: /haiku/src/system/kernel/heap.cpp (revision e221c09e508ffc3c62738140c9b6fc4fa211662a)
1 /*
2  * Copyright 2008, Michael Lotz, mmlr@mlotz.ch.
3  * Distributed under the terms of the MIT License.
4  *
5  * Copyright 2002-2006, Axel Dörfler, axeld@pinc-software.de.
6  * Distributed under the terms of the MIT License.
7  *
8  * Copyright 2001, Travis Geiselbrecht. All rights reserved.
9  * Distributed under the terms of the NewOS License.
10  */
11 #include <arch/debug.h>
12 #include <debug.h>
13 #include <elf.h>
14 #include <heap.h>
15 #include <int.h>
16 #include <kernel.h>
17 #include <lock.h>
18 #include <malloc.h>
19 #include <signal.h>
20 #include <string.h>
21 #include <team.h>
22 #include <thread.h>
23 #include <tracing.h>
24 #include <util/AutoLock.h>
25 #include <util/DoublyLinkedList.h>
26 #include <vm.h>
27 
28 //#define TRACE_HEAP
29 #ifdef TRACE_HEAP
30 #	define TRACE(x) dprintf x
31 #else
32 #	define TRACE(x) ;
33 #endif
34 
35 // initialize newly allocated memory with something non zero
36 #define PARANOID_KMALLOC 1
37 // check for double free, and fill freed memory with 0xdeadbeef
38 #define PARANOID_KFREE 1
39 // validate sanity of the heap after each operation (slow!)
40 #define PARANOID_VALIDATION 0
41 // store size, thread and team info at the end of each allocation block
42 #define KERNEL_HEAP_LEAK_CHECK 0
43 
44 #if KERNEL_HEAP_LEAK_CHECK
45 typedef struct heap_leak_check_info_s {
46 	addr_t		caller;
47 	size_t		size;
48 	thread_id	thread;
49 	team_id		team;
50 } heap_leak_check_info;
51 
52 struct caller_info {
53 	addr_t		caller;
54 	uint32		count;
55 	uint32		size;
56 };
57 
58 static const int32 kCallerInfoTableSize = 1024;
59 static caller_info sCallerInfoTable[kCallerInfoTableSize];
60 static int32 sCallerInfoCount = 0;
61 #endif
62 
63 typedef struct heap_page_s {
64 	uint16			index;
65 	uint16			bin_index : 5;
66 	uint16			free_count : 10;
67 	uint16			in_use : 1;
68 	heap_page_s *	next;
69 	heap_page_s *	prev;
70 	union {
71 		uint16			empty_index;
72 		uint16			allocation_id; // used for bin == bin_count allocations
73 	};
74 	addr_t *		free_list;
75 } heap_page;
76 
77 typedef struct heap_bin_s {
78 	uint32		element_size;
79 	uint16		max_free_count;
80 	heap_page *	page_list; // sorted so that the desired page is always first
81 } heap_bin;
82 
83 typedef struct heap_allocator_s {
84 	addr_t		base;
85 	size_t		size;
86 	mutex		lock;
87 
88 	uint32		bin_count;
89 	uint32		page_count;
90 	heap_page *	free_pages;
91 
92 	heap_bin *	bins;
93 	heap_page *	page_table;
94 
95 	heap_allocator_s *	next;
96 } heap_allocator;
97 
98 struct DeferredFreeListEntry : DoublyLinkedListLinkImpl<DeferredFreeListEntry> {
99 };
100 typedef DoublyLinkedList<DeferredFreeListEntry> DeferredFreeList;
101 
102 static heap_allocator *sHeapList = NULL;
103 static heap_allocator *sLastGrowRequest = NULL;
104 static heap_allocator *sGrowHeapList = NULL;
105 static thread_id sHeapGrowThread = -1;
106 static sem_id sHeapGrowSem = -1;
107 static sem_id sHeapGrownNotify = -1;
108 static bool sAddGrowHeap = false;
109 
110 static DeferredFreeList sDeferredFreeList;
111 static spinlock sDeferredFreeListLock;
112 
113 
114 
115 // #pragma mark - Tracing
116 
117 #if KERNEL_HEAP_TRACING
118 namespace KernelHeapTracing {
119 
120 class Allocate : public AbstractTraceEntry {
121 	public:
122 		Allocate(addr_t address, size_t size)
123 			:	fAddress(address),
124 				fSize(size)
125 		{
126 			Initialized();
127 		}
128 
129 		virtual void AddDump(TraceOutput &out)
130 		{
131 			out.Print("heap allocate: 0x%08lx (%lu bytes)", fAddress, fSize);
132 		}
133 
134 	private:
135 		addr_t	fAddress;
136 		size_t	fSize;
137 };
138 
139 
140 class Reallocate : public AbstractTraceEntry {
141 	public:
142 		Reallocate(addr_t oldAddress, addr_t newAddress, size_t newSize)
143 			:	fOldAddress(oldAddress),
144 				fNewAddress(newAddress),
145 				fNewSize(newSize)
146 		{
147 			Initialized();
148 		};
149 
150 		virtual void AddDump(TraceOutput &out)
151 		{
152 			out.Print("heap reallocate: 0x%08lx -> 0x%08lx (%lu bytes)",
153 				fOldAddress, fNewAddress, fNewSize);
154 		}
155 
156 	private:
157 		addr_t	fOldAddress;
158 		addr_t	fNewAddress;
159 		size_t	fNewSize;
160 };
161 
162 
163 class Free : public AbstractTraceEntry {
164 	public:
165 		Free(addr_t address)
166 			:	fAddress(address)
167 		{
168 			Initialized();
169 		};
170 
171 		virtual void AddDump(TraceOutput &out)
172 		{
173 			out.Print("heap free: 0x%08lx", fAddress);
174 		}
175 
176 	private:
177 		addr_t	fAddress;
178 };
179 
180 
181 } // namespace KernelHeapTracing
182 
183 #	define T(x)	if (!kernel_startup) new(std::nothrow) KernelHeapTracing::x;
184 #else
185 #	define T(x)	;
186 #endif
187 
188 
189 // #pragma mark - Debug functions
190 
191 
192 #if KERNEL_HEAP_LEAK_CHECK
193 static addr_t
194 get_caller()
195 {
196 	// Find the first return address outside of the allocator code. Note, that
197 	// this makes certain assumptions about how the code for the functions
198 	// ends up in the kernel object.
199 	addr_t returnAddresses[5];
200 	int32 depth = arch_debug_get_stack_trace(returnAddresses, 5, 1, false);
201 	for (int32 i = 0; i < depth; i++) {
202 		if (returnAddresses[i] < (addr_t)&get_caller
203 			|| returnAddresses[i] > (addr_t)&malloc_referenced_release) {
204 			return returnAddresses[i];
205 		}
206 	}
207 
208 	return 0;
209 }
210 #endif
211 
212 
213 static void
214 dump_page(heap_page *page)
215 {
216 	uint32 count = 0;
217 	for (addr_t *temp = page->free_list; temp != NULL; temp = (addr_t *)*temp)
218 		count++;
219 
220 	dprintf("\t\tpage %p: bin_index: %u; free_count: %u; empty_index: %u; free_list %p (%lu entr%s)\n",
221 		page, page->bin_index, page->free_count, page->empty_index,
222 		page->free_list, count, count == 1 ? "y" : "ies");
223 }
224 
225 
226 static void
227 dump_bin(heap_bin *bin)
228 {
229 	dprintf("\telement_size: %lu; max_free_count: %u; page_list %p;\n",
230 		bin->element_size, bin->max_free_count, bin->page_list);
231 
232 	for (heap_page *temp = bin->page_list; temp != NULL; temp = temp->next)
233 		dump_page(temp);
234 }
235 
236 
237 static void
238 dump_allocator(heap_allocator *heap)
239 {
240 	uint32 count = 0;
241 	for (heap_page *page = heap->free_pages; page != NULL; page = page->next)
242 		count++;
243 
244 	dprintf("allocator %p: base: 0x%08lx; size: %lu; bin_count: %lu; free_pages: %p (%lu entr%s)\n", heap,
245 		heap->base, heap->size, heap->bin_count, heap->free_pages, count,
246 		count == 1 ? "y" : "ies");
247 
248 	for (uint32 i = 0; i < heap->bin_count; i++)
249 		dump_bin(&heap->bins[i]);
250 
251 	dprintf("\n");
252 }
253 
254 
255 static int
256 dump_heap_list(int argc, char **argv)
257 {
258 	if (argc == 2) {
259 		if (strcmp(argv[1], "grow") == 0) {
260 			// only dump dedicated grow heap info
261 			dprintf("dedicated grow heap(s):\n");
262 			heap_allocator *heap = sGrowHeapList;
263 			while (heap) {
264 				dump_allocator(heap);
265 				heap = heap->next;
266 			}
267 		} else if (strcmp(argv[1], "stats") == 0) {
268 			uint32 heapCount = 0;
269 			heap_allocator *heap = sHeapList;
270 			while (heap) {
271 				heapCount++;
272 				heap = heap->next;
273 			}
274 
275 			dprintf("current heap count: %ld\n", heapCount);
276 		} else
277 			print_debugger_command_usage(argv[0]);
278 
279 		return 0;
280 	}
281 
282 	heap_allocator *heap = sHeapList;
283 	while (heap) {
284 		dump_allocator(heap);
285 		heap = heap->next;
286 	}
287 
288 	return 0;
289 }
290 
291 
292 #if KERNEL_HEAP_LEAK_CHECK
293 
294 static int
295 dump_allocations(int argc, char **argv)
296 {
297 	team_id team = -1;
298 	thread_id thread = -1;
299 	addr_t caller = 0;
300 	bool statsOnly = false;
301 	for (int32 i = 1; i < argc; i++) {
302 		if (strcmp(argv[i], "team") == 0)
303 			team = strtoul(argv[++i], NULL, 0);
304 		else if (strcmp(argv[i], "thread") == 0)
305 			thread = strtoul(argv[++i], NULL, 0);
306 		else if (strcmp(argv[i], "caller") == 0)
307 			caller = strtoul(argv[++i], NULL, 0);
308 		else if (strcmp(argv[i], "stats") == 0)
309 			statsOnly = true;
310 		else {
311 			print_debugger_command_usage(argv[0]);
312 			return 0;
313 		}
314 	}
315 
316 	size_t totalSize = 0;
317 	uint32 totalCount = 0;
318 	heap_allocator *heap = sHeapList;
319 	while (heap) {
320 		// go through all the pages
321 		heap_leak_check_info *info = NULL;
322 		for (uint32 i = 0; i < heap->page_count; i++) {
323 			heap_page *page = &heap->page_table[i];
324 			if (!page->in_use)
325 				continue;
326 
327 			addr_t base = heap->base + i * B_PAGE_SIZE;
328 			if (page->bin_index < heap->bin_count) {
329 				// page is used by a small allocation bin
330 				uint32 elementCount = page->empty_index;
331 				size_t elementSize = heap->bins[page->bin_index].element_size;
332 				for (uint32 j = 0; j < elementCount; j++, base += elementSize) {
333 					// walk the free list to see if this element is in use
334 					bool elementInUse = true;
335 					for (addr_t *temp = page->free_list; temp != NULL; temp = (addr_t *)*temp) {
336 						if ((addr_t)temp == base) {
337 							elementInUse = false;
338 							break;
339 						}
340 					}
341 
342 					if (!elementInUse)
343 						continue;
344 
345 					info = (heap_leak_check_info *)(base + elementSize
346 						- sizeof(heap_leak_check_info));
347 
348 					if ((team == -1 || info->team == team)
349 						&& (thread == -1 || info->thread == thread)
350 						&& (caller == 0 || info->caller == caller)) {
351 						// interesting...
352 						if (!statsOnly) {
353 							dprintf("team: % 6ld; thread: % 6ld; "
354 								"address: 0x%08lx; size: %lu bytes, "
355 								"caller: %#lx\n", info->team, info->thread,
356 								base, info->size, info->caller);
357 						}
358 
359 						totalSize += info->size;
360 						totalCount++;
361 					}
362 				}
363 			} else {
364 				// page is used by a big allocation, find the page count
365 				uint32 pageCount = 1;
366 				while (i + pageCount < heap->page_count
367 					&& heap->page_table[i + pageCount].in_use
368 					&& heap->page_table[i + pageCount].bin_index == heap->bin_count
369 					&& heap->page_table[i + pageCount].allocation_id == page->allocation_id)
370 					pageCount++;
371 
372 				info = (heap_leak_check_info *)(base + pageCount * B_PAGE_SIZE
373 					- sizeof(heap_leak_check_info));
374 
375 				if ((team == -1 || info->team == team)
376 					&& (thread == -1 || info->thread == thread)
377 					&& (caller == 0 || info->caller == caller)) {
378 					// interesting...
379 					if (!statsOnly) {
380 						dprintf("team: % 6ld; thread: % 6ld; address: 0x%08lx;"
381 							" size: %lu bytes, caller: %#lx\n", info->team,
382 							info->thread, base, info->size, info->caller);
383 					}
384 
385 					totalSize += info->size;
386 					totalCount++;
387 				}
388 
389 				// skip the allocated pages
390 				i += pageCount - 1;
391 			}
392 		}
393 
394 		heap = heap->next;
395 	}
396 
397 	dprintf("total allocations: %lu; total bytes: %lu\n", totalCount, totalSize);
398 	return 0;
399 }
400 
401 
402 static caller_info*
403 get_caller_info(addr_t caller)
404 {
405 	// find the caller info
406 	for (int32 i = 0; i < sCallerInfoCount; i++) {
407 		if (caller == sCallerInfoTable[i].caller)
408 			return &sCallerInfoTable[i];
409 	}
410 
411 	// not found, add a new entry, if there are free slots
412 	if (sCallerInfoCount >= kCallerInfoTableSize)
413 		return NULL;
414 
415 	caller_info* info = &sCallerInfoTable[sCallerInfoCount++];
416 	info->caller = caller;
417 	info->count = 0;
418 	info->size = 0;
419 
420 	return info;
421 }
422 
423 
424 static int
425 caller_info_compare_size(const void* _a, const void* _b)
426 {
427 	const caller_info* a = (const caller_info*)_a;
428 	const caller_info* b = (const caller_info*)_b;
429 	return (int)(b->size - a->size);
430 }
431 
432 
433 static int
434 caller_info_compare_count(const void* _a, const void* _b)
435 {
436 	const caller_info* a = (const caller_info*)_a;
437 	const caller_info* b = (const caller_info*)_b;
438 	return (int)(b->count - a->count);
439 }
440 
441 
442 static int
443 dump_allocations_per_caller(int argc, char **argv)
444 {
445 	bool sortBySize = true;
446 
447 	for (int32 i = 1; i < argc; i++) {
448 		if (strcmp(argv[i], "-c") == 0) {
449 			sortBySize = false;
450 		} else {
451 			print_debugger_command_usage(argv[0]);
452 			return 0;
453 		}
454 	}
455 
456 	sCallerInfoCount = 0;
457 
458 	heap_allocator *heap = sHeapList;
459 	while (heap) {
460 		// go through all the pages
461 		heap_leak_check_info *info = NULL;
462 		for (uint32 i = 0; i < heap->page_count; i++) {
463 			heap_page *page = &heap->page_table[i];
464 			if (!page->in_use)
465 				continue;
466 
467 			addr_t base = heap->base + i * B_PAGE_SIZE;
468 			if (page->bin_index < heap->bin_count) {
469 				// page is used by a small allocation bin
470 				uint32 elementCount = page->empty_index;
471 				size_t elementSize = heap->bins[page->bin_index].element_size;
472 				for (uint32 j = 0; j < elementCount; j++, base += elementSize) {
473 					// walk the free list to see if this element is in use
474 					bool elementInUse = true;
475 					for (addr_t *temp = page->free_list; temp != NULL;
476 							temp = (addr_t *)*temp) {
477 						if ((addr_t)temp == base) {
478 							elementInUse = false;
479 							break;
480 						}
481 					}
482 
483 					if (!elementInUse)
484 						continue;
485 
486 					info = (heap_leak_check_info *)(base + elementSize
487 						- sizeof(heap_leak_check_info));
488 
489 					caller_info* callerInfo = get_caller_info(info->caller);
490 					if (callerInfo == NULL) {
491 						kprintf("out of space for caller infos\n");
492 						return 0;
493 					}
494 
495 					callerInfo->count++;
496 					callerInfo->size += info->size;
497 				}
498 			} else {
499 				// page is used by a big allocation, find the page count
500 				uint32 pageCount = 1;
501 				while (i + pageCount < heap->page_count
502 					&& heap->page_table[i + pageCount].in_use
503 					&& heap->page_table[i + pageCount].bin_index == heap->bin_count
504 					&& heap->page_table[i + pageCount].allocation_id == page->allocation_id)
505 					pageCount++;
506 
507 				info = (heap_leak_check_info *)(base + pageCount * B_PAGE_SIZE
508 					- sizeof(heap_leak_check_info));
509 
510 				caller_info* callerInfo = get_caller_info(info->caller);
511 				if (callerInfo == NULL) {
512 					kprintf("out of space for caller infos\n");
513 					return 0;
514 				}
515 
516 				callerInfo->count++;
517 				callerInfo->size += info->size;
518 
519 				// skip the allocated pages
520 				i += pageCount - 1;
521 			}
522 		}
523 
524 		heap = heap->next;
525 	}
526 
527 	// sort the array
528 	qsort(sCallerInfoTable, sCallerInfoCount, sizeof(caller_info),
529 		sortBySize ? &caller_info_compare_size : &caller_info_compare_count);
530 
531 	kprintf("%ld different callers, sorted by %s...\n\n", sCallerInfoCount,
532 		sortBySize ? "size" : "count");
533 
534 	kprintf("     count        size      caller\n");
535 	kprintf("----------------------------------\n");
536 	for (int32 i = 0; i < sCallerInfoCount; i++) {
537 		caller_info& info = sCallerInfoTable[i];
538 		kprintf("%10ld  %10ld  %#08lx", info.count, info.size, info.caller);
539 
540 		const char* symbol;
541 		const char* imageName;
542 		bool exactMatch;
543 		addr_t baseAddress;
544 
545 		if (elf_debug_lookup_symbol_address(info.caller, &baseAddress, &symbol,
546 				&imageName, &exactMatch) == B_OK) {
547 			kprintf("  %s + 0x%lx (%s)%s\n", symbol,
548 				info.caller - baseAddress, imageName,
549 				exactMatch ? "" : " (nearest)");
550 		} else
551 			kprintf("\n");
552 	}
553 
554 	return 0;
555 }
556 
557 #endif // KERNEL_HEAP_LEAK_CHECK
558 
559 
560 #if PARANOID_VALIDATION
561 static void
562 heap_validate_heap(heap_allocator *heap)
563 {
564 	mutex_lock(&heap->lock);
565 
566 	// validate the free pages list
567 	uint32 freePageCount = 0;
568 	heap_page *lastPage = NULL;
569 	heap_page *page = heap->free_pages;
570 	while (page) {
571 		if ((addr_t)page < (addr_t)&heap->page_table[0]
572 			|| (addr_t)page >= (addr_t)&heap->page_table[heap->page_count])
573 			panic("free page is not part of the page table\n");
574 
575 		if (page->index >= heap->page_count)
576 			panic("free page has invalid index\n");
577 
578 		if ((addr_t)&heap->page_table[page->index] != (addr_t)page)
579 			panic("free page index does not lead to target page\n");
580 
581 		if (page->prev != lastPage)
582 			panic("free page entry has invalid prev link\n");
583 
584 		if (page->in_use)
585 			panic("free page marked as in use\n");
586 
587 		lastPage = page;
588 		page = page->next;
589 		freePageCount++;
590 	}
591 
592 	// validate the page table
593 	uint32 usedPageCount = 0;
594 	for (uint32 i = 0; i < heap->page_count; i++) {
595 		if (heap->page_table[i].in_use)
596 			usedPageCount++;
597 	}
598 
599 	if (freePageCount + usedPageCount != heap->page_count) {
600 		panic("free pages and used pages do not add up (%lu + %lu != %lu)\n",
601 			freePageCount, usedPageCount, heap->page_count);
602 	}
603 
604 	// validate the bins
605 	for (uint32 i = 0; i < heap->bin_count; i++) {
606 		heap_bin *bin = &heap->bins[i];
607 
608 		lastPage = NULL;
609 		page = bin->page_list;
610 		int32 lastFreeCount = 0;
611 		while (page) {
612 			if ((addr_t)page < (addr_t)&heap->page_table[0]
613 				|| (addr_t)page >= (addr_t)&heap->page_table[heap->page_count])
614 				panic("used page is not part of the page table\n");
615 
616 			if (page->index >= heap->page_count)
617 				panic("used page has invalid index\n");
618 
619 			if ((addr_t)&heap->page_table[page->index] != (addr_t)page)
620 				panic("used page index does not lead to target page\n");
621 
622 			if (page->prev != lastPage)
623 				panic("used page entry has invalid prev link (%p vs %p bin %lu)\n",
624 					page->prev, lastPage, i);
625 
626 			if (!page->in_use)
627 				panic("used page marked as not in use\n");
628 
629 			if (page->bin_index != i)
630 				panic("used page with bin index %u in page list of bin %lu\n",
631 					page->bin_index, i);
632 
633 			if (page->free_count < lastFreeCount)
634 				panic("ordering of bin page list broken\n");
635 
636 			// validate the free list
637 			uint32 freeSlotsCount = 0;
638 			addr_t *element = page->free_list;
639 			addr_t pageBase = heap->base + page->index * B_PAGE_SIZE;
640 			while (element) {
641 				if ((addr_t)element < pageBase
642 					|| (addr_t)element >= pageBase + B_PAGE_SIZE)
643 					panic("free list entry out of page range\n");
644 
645 				if (((addr_t)element - pageBase) % bin->element_size != 0)
646 					panic("free list entry not on a element boundary\n");
647 
648 				element = (addr_t *)*element;
649 				freeSlotsCount++;
650 			}
651 
652 			uint32 slotCount = bin->max_free_count;
653 			if (page->empty_index > slotCount)
654 				panic("empty index beyond slot count (%u with %lu slots)\n",
655 					page->empty_index, slotCount);
656 
657 			freeSlotsCount += (slotCount - page->empty_index);
658 			if (freeSlotsCount > slotCount)
659 				panic("more free slots than fit into the page\n");
660 
661 			lastPage = page;
662 			lastFreeCount = page->free_count;
663 			page = page->next;
664 		}
665 	}
666 
667 	mutex_unlock(&heap->lock);
668 }
669 #endif // PARANOID_VALIDATION
670 
671 
672 // #pragma mark - Heap functions
673 
674 
675 static heap_allocator *
676 heap_attach(addr_t base, size_t size)
677 {
678 	heap_allocator *heap = (heap_allocator *)base;
679 	base += sizeof(heap_allocator);
680 	size -= sizeof(heap_allocator);
681 
682 	size_t binSizes[] = { 8, 16, 24, 32, 48, 64, 96, 128, 192, 256, 384, 512, 1024, 2048, B_PAGE_SIZE };
683 	uint32 binCount = sizeof(binSizes) / sizeof(binSizes[0]);
684 	heap->bin_count = binCount;
685 	heap->bins = (heap_bin *)base;
686 	base += binCount * sizeof(heap_bin);
687 	size -= binCount * sizeof(heap_bin);
688 
689 	for (uint32 i = 0; i < binCount; i++) {
690 		heap_bin *bin = &heap->bins[i];
691 		bin->element_size = binSizes[i];
692 		bin->max_free_count = B_PAGE_SIZE / binSizes[i];
693 		bin->page_list = NULL;
694 	}
695 
696 	uint32 pageCount = size / B_PAGE_SIZE;
697 	size_t pageTableSize = pageCount * sizeof(heap_page);
698 	heap->page_table = (heap_page *)base;
699 	base += pageTableSize;
700 	size -= pageTableSize;
701 
702 	// the rest is now actually usable memory (rounded to the next page)
703 	heap->base = (addr_t)(base + B_PAGE_SIZE - 1) / B_PAGE_SIZE * B_PAGE_SIZE;
704 	heap->size = (size_t)(size / B_PAGE_SIZE) * B_PAGE_SIZE;
705 
706 	// now we know the real page count
707 	pageCount = heap->size / B_PAGE_SIZE;
708 	heap->page_count = pageCount;
709 
710 	// zero out the heap alloc table at the base of the heap
711 	memset((void *)heap->page_table, 0, pageTableSize);
712 	for (uint32 i = 0; i < pageCount; i++)
713 		heap->page_table[i].index = i;
714 
715 	// add all pages up into the free pages list
716 	for (uint32 i = 1; i < pageCount; i++) {
717 		heap->page_table[i - 1].next = &heap->page_table[i];
718 		heap->page_table[i].prev = &heap->page_table[i - 1];
719 	}
720 	heap->free_pages = &heap->page_table[0];
721 	heap->page_table[0].prev = NULL;
722 
723 	mutex_init(&heap->lock, "heap_mutex");
724 
725 	heap->next = NULL;
726 	dprintf("heap_attach: attached to %p - usable range 0x%08lx - 0x%08lx\n",
727 		heap, heap->base, heap->base + heap->size);
728 	return heap;
729 }
730 
731 
732 static inline void
733 heap_link_page(heap_page *page, heap_page **list)
734 {
735 	page->prev = NULL;
736 	page->next = *list;
737 	if (page->next)
738 		page->next->prev = page;
739 	*list = page;
740 }
741 
742 
743 static inline void
744 heap_unlink_page(heap_page *page, heap_page **list)
745 {
746 	if (page->prev)
747 		page->prev->next = page->next;
748 	if (page->next)
749 		page->next->prev = page->prev;
750 	if (list && *list == page) {
751 		*list = page->next;
752 		if (page->next)
753 			page->next->prev = NULL;
754 	}
755 }
756 
757 
758 static void *
759 heap_raw_alloc(heap_allocator *heap, size_t size, uint32 binIndex)
760 {
761 	heap_bin *bin = NULL;
762 	if (binIndex < heap->bin_count)
763 		bin = &heap->bins[binIndex];
764 
765 	if (bin && bin->page_list != NULL) {
766 		// we have a page where we have a free slot
767 		void *address = NULL;
768 		heap_page *page = bin->page_list;
769 		if (page->free_list) {
770 			// there's a previously freed entry we can use
771 			address = page->free_list;
772 			page->free_list = (addr_t *)*page->free_list;
773 		} else {
774 			// the page hasn't been fully allocated so use the next empty_index
775 			address = (void *)(heap->base + page->index * B_PAGE_SIZE
776 				+ page->empty_index * bin->element_size);
777 			page->empty_index++;
778 		}
779 
780 		page->free_count--;
781 		if (page->free_count == 0) {
782 			// the page is now full so we remove it from the page_list
783 			bin->page_list = page->next;
784 			if (page->next)
785 				page->next->prev = NULL;
786 			page->next = page->prev = NULL;
787 		}
788 
789 #if KERNEL_HEAP_LEAK_CHECK
790 		heap_leak_check_info *info = (heap_leak_check_info *)((addr_t)address
791 			+ bin->element_size - sizeof(heap_leak_check_info));
792 		info->size = size - sizeof(heap_leak_check_info);
793 		info->thread = (kernel_startup ? 0 : thread_get_current_thread_id());
794 		info->team = (kernel_startup ? 0 : team_get_current_team_id());
795 		info->caller = get_caller();
796 #endif
797 		return address;
798 	}
799 
800 	// we don't have anything free right away, we must allocate a new page
801 	if (heap->free_pages == NULL) {
802 		// there are no free pages anymore, we ran out of memory
803 		TRACE(("heap %p: no free pages to allocate %lu bytes\n", heap, size));
804 		return NULL;
805 	}
806 
807 	if (bin) {
808 		// small allocation, just grab the next free page
809 		heap_page *page = heap->free_pages;
810 		heap->free_pages = page->next;
811 		if (page->next)
812 			page->next->prev = NULL;
813 
814 		page->in_use = 1;
815 		page->bin_index = binIndex;
816 		page->free_count = bin->max_free_count - 1;
817 		page->empty_index = 1;
818 		page->free_list = NULL;
819 		page->next = page->prev = NULL;
820 
821 		if (page->free_count > 0) {
822 			// by design there are no other pages in the bins page list
823 			bin->page_list = page;
824 		}
825 
826 #if KERNEL_HEAP_LEAK_CHECK
827 		heap_leak_check_info *info = (heap_leak_check_info *)(heap->base
828 			+ page->index * B_PAGE_SIZE + bin->element_size
829 			- sizeof(heap_leak_check_info));
830 		info->size = size - sizeof(heap_leak_check_info);
831 		info->thread = (kernel_startup ? 0 : thread_get_current_thread_id());
832 		info->team = (kernel_startup ? 0 : team_get_current_team_id());
833 		info->caller = get_caller();
834 #endif
835 
836 		// we return the first slot in this page
837 		return (void *)(heap->base + page->index * B_PAGE_SIZE);
838 	}
839 
840 	// large allocation, we must search for contiguous slots
841 	bool found = false;
842 	int32 first = -1;
843 	for (uint32 i = 0; i < heap->page_count; i++) {
844 		if (heap->page_table[i].in_use) {
845 			first = -1;
846 			continue;
847 		}
848 
849 		if (first > 0) {
850 			if ((1 + i - first) * B_PAGE_SIZE >= size) {
851 				found = true;
852 				break;
853 			}
854 		} else
855 			first = i;
856 	}
857 
858 	if (!found) {
859 		TRACE(("heap %p: found no contiguous pages to allocate %ld bytes\n", heap, size));
860 		return NULL;
861 	}
862 
863 	uint32 pageCount = (size + B_PAGE_SIZE - 1) / B_PAGE_SIZE;
864 	for (uint32 i = first; i < first + pageCount; i++) {
865 		heap_page *page = &heap->page_table[i];
866 		page->in_use = 1;
867 		page->bin_index = binIndex;
868 
869 		heap_unlink_page(page, &heap->free_pages);
870 
871 		page->next = page->prev = NULL;
872 		page->free_list = NULL;
873 		page->allocation_id = (uint16)first;
874 	}
875 
876 #if KERNEL_HEAP_LEAK_CHECK
877 	heap_leak_check_info *info = (heap_leak_check_info *)(heap->base
878 		+ (first + pageCount) * B_PAGE_SIZE - sizeof(heap_leak_check_info));
879 	info->size = size - sizeof(heap_leak_check_info);
880 	info->thread = (kernel_startup ? 0 : thread_get_current_thread_id());
881 	info->team = (kernel_startup ? 0 : team_get_current_team_id());
882 	info->caller = get_caller();
883 #endif
884 	return (void *)(heap->base + first * B_PAGE_SIZE);
885 }
886 
887 
888 #if DEBUG
889 static bool
890 is_valid_alignment(size_t number)
891 {
892 	// this cryptic line accepts zero and all powers of two
893 	return ((~number + 1) | ((number << 1) - 1)) == ~0UL;
894 }
895 #endif
896 
897 
898 static void *
899 heap_memalign(heap_allocator *heap, size_t alignment, size_t size,
900 	bool *shouldGrow)
901 {
902 	TRACE(("memalign(alignment = %lu, size = %lu)\n", alignment, size));
903 
904 #if DEBUG
905 	if (!is_valid_alignment(alignment))
906 		panic("memalign() with an alignment which is not a power of 2\n");
907 #endif
908 
909 	mutex_lock(&heap->lock);
910 
911 #if KERNEL_HEAP_LEAK_CHECK
912 	size += sizeof(heap_leak_check_info);
913 #endif
914 
915 	// ToDo: that code "aligns" the buffer because the bins are always
916 	//	aligned on their bin size
917 	if (size < alignment)
918 		size = alignment;
919 
920 	uint32 binIndex;
921 	for (binIndex = 0; binIndex < heap->bin_count; binIndex++) {
922 		if (size <= heap->bins[binIndex].element_size)
923 			break;
924 	}
925 
926 	void *address = heap_raw_alloc(heap, size, binIndex);
927 
928 	TRACE(("memalign(): asked to allocate %lu bytes, returning pointer %p\n", size, address));
929 
930 	if (heap->next == NULL && shouldGrow) {
931 		// suggest growing if we are the last heap and we have
932 		// less than three free pages left
933 		*shouldGrow = (heap->free_pages == NULL
934 			|| heap->free_pages->next == NULL
935 			|| heap->free_pages->next->next == NULL);
936 	}
937 
938 #if KERNEL_HEAP_LEAK_CHECK
939 	size -= sizeof(heap_leak_check_info);
940 #endif
941 
942 	T(Allocate((addr_t)address, size));
943 	mutex_unlock(&heap->lock);
944 	if (address == NULL)
945 		return address;
946 
947 #if PARANOID_KFREE
948 	// make sure 0xdeadbeef is cleared if we do not overwrite the memory
949 	// and the user does not clear it
950 	if (((uint32 *)address)[1] == 0xdeadbeef)
951 		((uint32 *)address)[1] = 0xcccccccc;
952 #endif
953 
954 #if PARANOID_KMALLOC
955 	memset(address, 0xcc, size);
956 #endif
957 
958 	return address;
959 }
960 
961 
962 static status_t
963 heap_free(heap_allocator *heap, void *address)
964 {
965 	if (address == NULL)
966 		return B_OK;
967 
968 	if ((addr_t)address < heap->base
969 		|| (addr_t)address >= heap->base + heap->size) {
970 		// this address does not belong to us
971 		return B_ENTRY_NOT_FOUND;
972 	}
973 
974 	mutex_lock(&heap->lock);
975 
976 	TRACE(("free(): asked to free at ptr = %p\n", address));
977 
978 	heap_page *page = &heap->page_table[((addr_t)address - heap->base) / B_PAGE_SIZE];
979 
980 	TRACE(("free(): page %p: bin_index %d, free_count %d\n", page, page->bin_index, page->free_count));
981 
982 	if (page->bin_index > heap->bin_count) {
983 		panic("free(): page %p: invalid bin_index %d\n", page, page->bin_index);
984 		mutex_unlock(&heap->lock);
985 		return B_ERROR;
986 	}
987 
988 	if (page->bin_index < heap->bin_count) {
989 		// small allocation
990 		heap_bin *bin = &heap->bins[page->bin_index];
991 		if (((addr_t)address - heap->base - page->index * B_PAGE_SIZE) % bin->element_size != 0) {
992 			panic("free(): passed invalid pointer %p supposed to be in bin for element size %ld\n", address, bin->element_size);
993 			mutex_unlock(&heap->lock);
994 			return B_ERROR;
995 		}
996 
997 #if PARANOID_KFREE
998 		if (((uint32 *)address)[1] == 0xdeadbeef) {
999 			// This block looks like it was freed already, walk the free list
1000 			// on this page to make sure this address doesn't exist.
1001 			for (addr_t *temp = page->free_list; temp != NULL; temp = (addr_t *)*temp) {
1002 				if (temp == address) {
1003 					panic("free(): address %p already exists in page free list\n", address);
1004 					mutex_unlock(&heap->lock);
1005 					return B_ERROR;
1006 				}
1007 			}
1008 		}
1009 
1010 		uint32 *dead = (uint32 *)address;
1011 		if (bin->element_size % 4 != 0) {
1012 			panic("free(): didn't expect a bin element size that is not a multiple of 4\n");
1013 			mutex_unlock(&heap->lock);
1014 			return B_ERROR;
1015 		}
1016 
1017 		// the first 4 bytes are overwritten with the next free list pointer later
1018 		for (uint32 i = 1; i < bin->element_size / sizeof(uint32); i++)
1019 			dead[i] = 0xdeadbeef;
1020 #endif
1021 
1022 		// add the address to the page free list
1023 		*(addr_t *)address = (addr_t)page->free_list;
1024 		page->free_list = (addr_t *)address;
1025 		page->free_count++;
1026 
1027 		if (page->free_count == bin->max_free_count) {
1028 			// we are now empty, remove the page from the bin list
1029 			heap_unlink_page(page, &bin->page_list);
1030 			page->in_use = 0;
1031 			heap_link_page(page, &heap->free_pages);
1032 		} else if (page->free_count == 1) {
1033 			// we need to add ourselfs to the page list of the bin
1034 			heap_link_page(page, &bin->page_list);
1035 		} else {
1036 			// we might need to move back in the free pages list
1037 			if (page->next && page->next->free_count < page->free_count) {
1038 				// move ourselfs so the list stays ordered
1039 				heap_page *insert = page->next;
1040 				while (insert->next
1041 					&& insert->next->free_count < page->free_count)
1042 					insert = insert->next;
1043 
1044 				heap_unlink_page(page, &bin->page_list);
1045 
1046 				page->prev = insert;
1047 				page->next = insert->next;
1048 				if (page->next)
1049 					page->next->prev = page;
1050 				insert->next = page;
1051 			}
1052 		}
1053 	} else {
1054 		// large allocation, just return the pages to the page free list
1055 		uint32 allocationID = page->allocation_id;
1056 		uint32 maxPages = heap->page_count - page->index;
1057 		for (uint32 i = 0; i < maxPages; i++) {
1058 			// loop until we find the end of this allocation
1059 			if (!page[i].in_use || page[i].bin_index != heap->bin_count
1060 				|| page[i].allocation_id != allocationID)
1061 				break;
1062 
1063 			// this page still belongs to the same allocation
1064 			page[i].in_use = 0;
1065 			page[i].allocation_id = 0;
1066 
1067 			// return it to the free list
1068 			heap_link_page(&page[i], &heap->free_pages);
1069 		}
1070 	}
1071 
1072 	T(Free((addr_t)address));
1073 	mutex_unlock(&heap->lock);
1074 	return B_OK;
1075 }
1076 
1077 
1078 static status_t
1079 heap_realloc(heap_allocator *heap, void *address, void **newAddress,
1080 	size_t newSize)
1081 {
1082 	if ((addr_t)address < heap->base
1083 		|| (addr_t)address >= heap->base + heap->size) {
1084 		// this address does not belong to us
1085 		return B_ENTRY_NOT_FOUND;
1086 	}
1087 
1088 	mutex_lock(&heap->lock);
1089 	TRACE(("realloc(address = %p, newSize = %lu)\n", address, newSize));
1090 
1091 	heap_page *page = &heap->page_table[((addr_t)address - heap->base) / B_PAGE_SIZE];
1092 	if (page->bin_index > heap->bin_count) {
1093 		panic("realloc(): page %p: invalid bin_index %d\n", page, page->bin_index);
1094 		mutex_unlock(&heap->lock);
1095 		return B_ERROR;
1096 	}
1097 
1098 	// find out the size of the old allocation first
1099 	size_t minSize = 0;
1100 	size_t maxSize = 0;
1101 	if (page->bin_index < heap->bin_count) {
1102 		// this was a small allocation
1103 		heap_bin *bin = &heap->bins[page->bin_index];
1104 		maxSize = bin->element_size;
1105 		if (page->bin_index > 0)
1106 			minSize = heap->bins[page->bin_index - 1].element_size + 1;
1107 	} else {
1108 		// this was a large allocation
1109 		uint32 allocationID = page->allocation_id;
1110 		uint32 maxPages = heap->page_count - page->index;
1111 		maxSize = B_PAGE_SIZE;
1112 		for (uint32 i = 1; i < maxPages; i++) {
1113 			if (!page[i].in_use || page[i].bin_index != heap->bin_count
1114 				|| page[i].allocation_id != allocationID)
1115 				break;
1116 
1117 			minSize += B_PAGE_SIZE;
1118 			maxSize += B_PAGE_SIZE;
1119 		}
1120 	}
1121 
1122 	mutex_unlock(&heap->lock);
1123 
1124 #if KERNEL_HEAP_LEAK_CHECK
1125 	newSize += sizeof(heap_leak_check_info);
1126 #endif
1127 
1128 	// does the new allocation simply fit in the old allocation?
1129 	if (newSize > minSize && newSize <= maxSize) {
1130 #if KERNEL_HEAP_LEAK_CHECK
1131 		// update the size info (the info is at the end so stays where it is)
1132 		heap_leak_check_info *info = (heap_leak_check_info *)((addr_t)address
1133 			+ maxSize - sizeof(heap_leak_check_info));
1134 		info->size = newSize - sizeof(heap_leak_check_info);
1135 		newSize -= sizeof(heap_leak_check_info);
1136 #endif
1137 
1138 		T(Reallocate((addr_t)address, (addr_t)address, newSize));
1139 		*newAddress = address;
1140 		return B_OK;
1141 	}
1142 
1143 #if KERNEL_HEAP_LEAK_CHECK
1144 	// new leak check info will be created with the malloc below
1145 	newSize -= sizeof(heap_leak_check_info);
1146 #endif
1147 
1148 	// if not, allocate a new chunk of memory
1149 	*newAddress = malloc(newSize);
1150 	T(Reallocate((addr_t)address, (addr_t)*newAddress, newSize));
1151 	if (*newAddress == NULL) {
1152 		// we tried but it didn't work out, but still the operation is done
1153 		return B_OK;
1154 	}
1155 
1156 	// copy the old data and free the old allocation
1157 	memcpy(*newAddress, address, min_c(maxSize, newSize));
1158 	free(address);
1159 	return B_OK;
1160 }
1161 
1162 
1163 static void
1164 deferred_deleter(void *arg, int iteration)
1165 {
1166 	// move entries to on-stack list
1167 	InterruptsSpinLocker locker(sDeferredFreeListLock);
1168 	if (sDeferredFreeList.IsEmpty())
1169 		return;
1170 
1171 	DeferredFreeList entries;
1172 	entries.MoveFrom(&sDeferredFreeList);
1173 
1174 	locker.Unlock();
1175 
1176 	// free the entries
1177 	while (DeferredFreeListEntry* entry = entries.RemoveHead())
1178 		free(entry);
1179 }
1180 
1181 
1182 //	#pragma mark -
1183 
1184 
1185 static heap_allocator *
1186 heap_create_new_heap(const char *name, size_t size)
1187 {
1188 	void *heapAddress = NULL;
1189 	area_id heapArea = create_area(name, &heapAddress,
1190 		B_ANY_KERNEL_BLOCK_ADDRESS, size, B_FULL_LOCK,
1191 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
1192 	if (heapArea < B_OK) {
1193 		TRACE(("heap: couldn't allocate heap \"%s\"\n", name));
1194 		return NULL;
1195 	}
1196 
1197 	heap_allocator *newHeap = heap_attach((addr_t)heapAddress, size);
1198 	if (newHeap == NULL) {
1199 		panic("heap: could not attach heap to area!\n");
1200 		delete_area(heapArea);
1201 		return NULL;
1202 	}
1203 
1204 #if PARANOID_VALIDATION
1205 	heap_validate_heap(newHeap);
1206 #endif
1207 	return newHeap;
1208 }
1209 
1210 
1211 static int32
1212 heap_grow_thread(void *)
1213 {
1214 	heap_allocator *heap = sHeapList;
1215 	heap_allocator *growHeap = sGrowHeapList;
1216 	while (true) {
1217 		// wait for a request to grow the heap list
1218 		if (acquire_sem(sHeapGrowSem) < B_OK)
1219 			continue;
1220 
1221 		if (sAddGrowHeap) {
1222 			while (growHeap->next)
1223 				growHeap = growHeap->next;
1224 
1225 			// the last grow heap is going to run full soon, try to allocate
1226 			// a new one to make some room.
1227 			TRACE(("heap_grower: grow heaps will run out of memory soon\n"));
1228 			heap_allocator *newHeap = heap_create_new_heap(
1229 				"additional grow heap", HEAP_DEDICATED_GROW_SIZE);
1230 			if (newHeap != NULL) {
1231 #if PARANOID_VALIDATION
1232 				heap_validate_heap(newHeap);
1233 #endif
1234 				growHeap->next = newHeap;
1235 				sAddGrowHeap = false;
1236 				TRACE(("heap_grower: new grow heap %p linked in\n", newHeap));
1237 			}
1238 		}
1239 
1240 		// find the last heap
1241 		while (heap->next)
1242 			heap = heap->next;
1243 
1244 		if (sLastGrowRequest != heap) {
1245 			// we have already grown since the latest request, just ignore
1246 			continue;
1247 		}
1248 
1249 		TRACE(("heap_grower: kernel heap will run out of memory soon, allocating new one\n"));
1250 		heap_allocator *newHeap = heap_create_new_heap("additional heap",
1251 			HEAP_GROW_SIZE);
1252 		if (newHeap != NULL) {
1253 #if PARANOID_VALIDATION
1254 			heap_validate_heap(newHeap);
1255 #endif
1256 			heap->next = newHeap;
1257 			TRACE(("heap_grower: new heap linked in\n"));
1258 		}
1259 
1260 		// notify anyone waiting for this request
1261 		release_sem_etc(sHeapGrownNotify, -1, B_RELEASE_ALL);
1262 	}
1263 
1264 	return 0;
1265 }
1266 
1267 
1268 status_t
1269 heap_init(addr_t base, size_t size)
1270 {
1271 	sHeapList = heap_attach(base, size);
1272 
1273 	// set up some debug commands
1274 	add_debugger_command_etc("heap", &dump_heap_list,
1275 		"Dump infos about the kernel heap(s)", "[(\"grow\" | \"stats\")]\n"
1276 		"Dump infos about the kernel heap(s). If \"grow\" is specified, only\n"
1277 		"infos about the dedicated grow heap are printed. If \"stats\" is\n"
1278 		"given as the argument, currently only the heap count is printed\n", 0);
1279 #if KERNEL_HEAP_LEAK_CHECK
1280 	add_debugger_command_etc("allocations", &dump_allocations,
1281 		"Dump current allocations",
1282 		"[(\"team\" | \"thread\") <id>] [ \"caller\" <address> ] [\"stats\"]\n"
1283 		"If no parameters are given, all current alloactions are dumped.\n"
1284 		"If \"team\", \"thread\", and/or \"caller\" is specified as the first\n"
1285 		"argument, only allocations matching the team id, thread id, or \n"
1286 		"caller address given in the second argument are printed.\n"
1287 		"If the optional argument \"stats\" is specified, only the allocation\n"
1288 		"counts and no individual allocations are printed\n", 0);
1289 	add_debugger_command_etc("allocations_per_caller",
1290 		&dump_allocations_per_caller,
1291 		"Dump current allocations summed up per caller",
1292 		"[ \"-c\" ]\n"
1293 		"The current allocations will by summed up by caller (their count and\n"
1294 		"size) printed in decreasing order by size or, if \"-c\" is\n"
1295 		"specified, by allocation count.\n", 0);
1296 #endif
1297 	return B_OK;
1298 }
1299 
1300 
1301 status_t
1302 heap_init_post_sem()
1303 {
1304 	sHeapGrowSem = create_sem(0, "heap_grow_sem");
1305 	if (sHeapGrowSem < 0) {
1306 		panic("heap_init_post_sem(): failed to create heap grow sem\n");
1307 		return B_ERROR;
1308 	}
1309 
1310 	sHeapGrownNotify = create_sem(0, "heap_grown_notify");
1311 	if (sHeapGrownNotify < 0) {
1312 		panic("heap_init_post_sem(): failed to create heap grown notify sem\n");
1313 		return B_ERROR;
1314 	}
1315 
1316 	return B_OK;
1317 }
1318 
1319 
1320 status_t
1321 heap_init_post_thread()
1322 {
1323 	sGrowHeapList = heap_create_new_heap("dedicated grow heap",
1324 		HEAP_DEDICATED_GROW_SIZE);
1325 	if (sGrowHeapList == NULL) {
1326 		panic("heap_init_post_thread(): failed to attach dedicated grow heap\n");
1327 		return B_ERROR;
1328 	}
1329 
1330 	sHeapGrowThread = spawn_kernel_thread(heap_grow_thread, "heap grower",
1331 		B_URGENT_PRIORITY, NULL);
1332 	if (sHeapGrowThread < 0) {
1333 		panic("heap_init_post_thread(): cannot create heap grow thread\n");
1334 		return sHeapGrowThread;
1335 	}
1336 
1337 	if (register_kernel_daemon(deferred_deleter, NULL, 50) != B_OK)
1338 		panic("heap_init_post_thread(): failed to init deferred deleter");
1339 
1340 	send_signal_etc(sHeapGrowThread, SIGCONT, B_DO_NOT_RESCHEDULE);
1341 	return B_OK;
1342 }
1343 
1344 
1345 //	#pragma mark - Public API
1346 
1347 
1348 void *
1349 memalign(size_t alignment, size_t size)
1350 {
1351 	if (!kernel_startup && !are_interrupts_enabled()) {
1352 		panic("memalign(): called with interrupts disabled\n");
1353 		return NULL;
1354 	}
1355 
1356 	if (size > (HEAP_GROW_SIZE * 3) / 4) {
1357 		// don't even attempt such a huge allocation
1358 		panic("heap: huge allocation of %lu bytes asked!\n", size);
1359 		return NULL;
1360 	}
1361 
1362 	heap_allocator *heap = sHeapList;
1363 	while (heap) {
1364 		bool shouldGrow = false;
1365 		void *result = heap_memalign(heap, alignment, size, &shouldGrow);
1366 		if (heap->next == NULL && (shouldGrow || result == NULL)) {
1367 			// the last heap will or has run out of memory, notify the grower
1368 			sLastGrowRequest = heap;
1369 			if (result == NULL) {
1370 				// urgent request, do the request and wait
1371 				switch_sem(sHeapGrowSem, sHeapGrownNotify);
1372 				if (heap->next == NULL) {
1373 					// the grower didn't manage to add a new heap
1374 					return NULL;
1375 				}
1376 			} else {
1377 				// not so urgent, just notify the grower
1378 				release_sem_etc(sHeapGrowSem, 1, B_DO_NOT_RESCHEDULE);
1379 			}
1380 		}
1381 
1382 		if (result == NULL) {
1383 			heap = heap->next;
1384 			continue;
1385 		}
1386 
1387 #if PARANOID_VALIDATION
1388 		heap_validate_heap(heap);
1389 #endif
1390 
1391 		return result;
1392 	}
1393 
1394 	panic("heap: kernel heap has run out of memory\n");
1395 	return NULL;
1396 }
1397 
1398 
1399 void *
1400 malloc_nogrow(size_t size)
1401 {
1402 	// use dedicated memory in the grow thread by default
1403 	if (thread_get_current_thread_id() == sHeapGrowThread) {
1404 		bool shouldGrow = false;
1405 		heap_allocator *heap = sGrowHeapList;
1406 		while (heap) {
1407 			void *result = heap_memalign(heap, 0, size, &shouldGrow);
1408 			if (shouldGrow && heap->next == NULL && !sAddGrowHeap) {
1409 				// hopefully the heap grower will manage to create a new heap
1410 				// before running out of private memory...
1411 				dprintf("heap: requesting new grow heap\n");
1412 				sAddGrowHeap = true;
1413 				release_sem_etc(sHeapGrowSem, 1, B_DO_NOT_RESCHEDULE);
1414 			}
1415 
1416 			if (result != NULL)
1417 				return result;
1418 
1419 			heap = heap->next;
1420 		}
1421 	}
1422 
1423 	// try public memory, there might be something available
1424 	heap_allocator *heap = sHeapList;
1425 	while (heap) {
1426 		void *result = heap_memalign(heap, 0, size, NULL);
1427 		if (result != NULL)
1428 			return result;
1429 
1430 		heap = heap->next;
1431 	}
1432 
1433 	// no memory available
1434 	panic("heap: all heaps have run out of memory\n");
1435 	return NULL;
1436 }
1437 
1438 
1439 void *
1440 malloc(size_t size)
1441 {
1442 	return memalign(0, size);
1443 }
1444 
1445 
1446 void
1447 free(void *address)
1448 {
1449 	if (!kernel_startup && !are_interrupts_enabled()) {
1450 		panic("free(): called with interrupts disabled\n");
1451 		return;
1452 	}
1453 
1454 	heap_allocator *heap = sHeapList;
1455 	while (heap) {
1456 		if (heap_free(heap, address) == B_OK) {
1457 #if PARANOID_VALIDATION
1458 			heap_validate_heap(heap);
1459 #endif
1460 			return;
1461 		}
1462 
1463 		heap = heap->next;
1464 	}
1465 
1466 	// maybe it was allocated from a dedicated grow heap
1467 	heap = sGrowHeapList;
1468 	while (heap) {
1469 		if (heap_free(heap, address) == B_OK)
1470 			return;
1471 
1472 		heap = heap->next;
1473 	}
1474 
1475 	panic("free(): free failed for address %p\n", address);
1476 }
1477 
1478 
1479 void *
1480 realloc(void *address, size_t newSize)
1481 {
1482 	if (!kernel_startup && !are_interrupts_enabled()) {
1483 		panic("realloc(): called with interrupts disabled\n");
1484 		return NULL;
1485 	}
1486 
1487 	if (address == NULL)
1488 		return memalign(0, newSize);
1489 
1490 	if (newSize == 0) {
1491 		free(address);
1492 		return NULL;
1493 	}
1494 
1495 	heap_allocator *heap = sHeapList;
1496 	while (heap) {
1497 		void *newAddress = NULL;
1498 		if (heap_realloc(heap, address, &newAddress, newSize) == B_OK) {
1499 #if PARANOID_VALIDATION
1500 			heap_validate_heap(heap);
1501 #endif
1502 			return newAddress;
1503 		}
1504 
1505 		heap = heap->next;
1506 	}
1507 
1508 	panic("realloc(): failed to realloc address %p to size %lu\n", address, newSize);
1509 	return NULL;
1510 }
1511 
1512 
1513 void *
1514 calloc(size_t numElements, size_t size)
1515 {
1516 	void *address = memalign(0, numElements * size);
1517 	if (address != NULL)
1518 		memset(address, 0, numElements * size);
1519 
1520 	return address;
1521 }
1522 
1523 
1524 void
1525 deferred_free(void* block)
1526 {
1527 	if (block == NULL)
1528 		return;
1529 
1530 	// TODO: Use SinglyLinkedList, so that we only need sizeof(void*).
1531 	DeferredFreeListEntry* entry = new(block) DeferredFreeListEntry;
1532 
1533 	InterruptsSpinLocker _(sDeferredFreeListLock);
1534 	sDeferredFreeList.Add(entry);
1535 }
1536 
1537 
1538 void*
1539 malloc_referenced(size_t size)
1540 {
1541 	int32* referencedData = (int32*)malloc(size + 4);
1542 	if (referencedData == NULL)
1543 		return NULL;
1544 
1545 	*referencedData = 1;
1546 
1547 	return referencedData + 1;
1548 }
1549 
1550 
1551 void*
1552 malloc_referenced_acquire(void* data)
1553 {
1554 	if (data != NULL) {
1555 		int32* referencedData = (int32*)data - 1;
1556 		atomic_add(referencedData, 1);
1557 	}
1558 
1559 	return data;
1560 }
1561 
1562 
1563 void
1564 malloc_referenced_release(void* data)
1565 {
1566 	if (data == NULL)
1567 		return;
1568 
1569 	int32* referencedData = (int32*)data - 1;
1570 	if (atomic_add(referencedData, -1) < 1)
1571 		free(referencedData);
1572 }
1573