xref: /haiku/src/system/kernel/heap.cpp (revision 4f2fd49bdc6078128b1391191e4edac647044c3d)
1 /*
2  * Copyright 2008, Michael Lotz, mmlr@mlotz.ch.
3  * Distributed under the terms of the MIT License.
4  *
5  * Copyright 2002-2006, Axel Dörfler, axeld@pinc-software.de.
6  * Distributed under the terms of the MIT License.
7  *
8  * Copyright 2001, Travis Geiselbrecht. All rights reserved.
9  * Distributed under the terms of the NewOS License.
10  */
11 #include <arch/debug.h>
12 #include <debug.h>
13 #include <elf.h>
14 #include <heap.h>
15 #include <int.h>
16 #include <kernel.h>
17 #include <lock.h>
18 #include <malloc.h>
19 #include <signal.h>
20 #include <string.h>
21 #include <team.h>
22 #include <thread.h>
23 #include <tracing.h>
24 #include <util/AutoLock.h>
25 #include <vm.h>
26 
27 
28 //#define TRACE_HEAP
29 #ifdef TRACE_HEAP
30 #	define TRACE(x) dprintf x
31 #else
32 #	define TRACE(x) ;
33 #endif
34 
35 
36 #if KERNEL_HEAP_LEAK_CHECK
37 typedef struct heap_leak_check_info_s {
38 	addr_t		caller;
39 	size_t		size;
40 	thread_id	thread;
41 	team_id		team;
42 } heap_leak_check_info;
43 
44 struct caller_info {
45 	addr_t		caller;
46 	uint32		count;
47 	uint32		size;
48 };
49 
50 static const int32 kCallerInfoTableSize = 1024;
51 static caller_info sCallerInfoTable[kCallerInfoTableSize];
52 static int32 sCallerInfoCount = 0;
53 #endif
54 
55 typedef struct heap_page_s heap_page;
56 
57 typedef struct heap_area_s {
58 	area_id			area;
59 
60 	addr_t			base;
61 	size_t			size;
62 
63 	uint32			page_count;
64 	uint32			free_page_count;
65 
66 	heap_page *		free_pages;
67 	heap_page *		page_table;
68 
69 	heap_area_s *	prev;
70 	heap_area_s *	next;
71 	heap_area_s *	all_next;
72 } heap_area;
73 
74 typedef struct heap_page_s {
75 	heap_area *		area;
76 	uint16			index;
77 	uint16			bin_index : 5;
78 	uint16			free_count : 10;
79 	uint16			in_use : 1;
80 	heap_page_s *	next;
81 	heap_page_s *	prev;
82 	union {
83 		uint16			empty_index;
84 		uint16			allocation_id; // used for bin == bin_count allocations
85 	};
86 	addr_t *		free_list;
87 } heap_page;
88 
89 typedef struct heap_bin_s {
90 	uint32		element_size;
91 	uint16		max_free_count;
92 	heap_page *	page_list; // sorted so that the desired page is always first
93 } heap_bin;
94 
95 struct heap_allocator_s {
96 	mutex		lock;
97 
98 	const char *name;
99 	uint32		bin_count;
100 	uint32		page_size;
101 
102 	uint32		total_pages;
103 	uint32		total_free_pages;
104 	uint32		empty_areas;
105 
106 #if KERNEL_HEAP_LEAK_CHECK
107 	addr_t		(*get_caller)();
108 #endif
109 
110 	heap_bin *	bins;
111 	heap_area *	areas; // sorted so that the desired area is always first
112 	heap_area *	all_areas; // all areas including full ones
113 };
114 
115 static const uint32 kAreaAllocationMagic = 'AAMG';
116 typedef struct area_allocation_info_s {
117 	area_id		area;
118 	void *		base;
119 	uint32		magic;
120 	size_t		size;
121 	size_t		allocation_size;
122 	size_t		allocation_alignment;
123 	void *		allocation_base;
124 } area_allocation_info;
125 
126 struct DeferredFreeListEntry : SinglyLinkedListLinkImpl<DeferredFreeListEntry> {
127 };
128 
129 typedef SinglyLinkedList<DeferredFreeListEntry> DeferredFreeList;
130 typedef SinglyLinkedList<DeferredDeletable> DeferredDeletableList;
131 
132 // Heap class configuration
133 #define HEAP_CLASS_COUNT 3
134 static heap_class sHeapClasses[HEAP_CLASS_COUNT] = {
135 	{
136 		"small",					/* name */
137 		50,							/* initial percentage */
138 		B_PAGE_SIZE / 8,			/* max allocation size */
139 		B_PAGE_SIZE,				/* page size */
140 		8,							/* min bin size */
141 		4,							/* bin alignment */
142 		8,							/* min count per page */
143 		16							/* max waste per page */
144 	},
145 	{
146 		"medium",					/* name */
147 		30,							/* initial percentage */
148 		B_PAGE_SIZE * 2,			/* max allocation size */
149 		B_PAGE_SIZE * 8,			/* page size */
150 		B_PAGE_SIZE / 8,			/* min bin size */
151 		32,							/* bin alignment */
152 		4,							/* min count per page */
153 		64							/* max waste per page */
154 	},
155 	{
156 		"large",					/* name */
157 		20,							/* initial percentage */
158 		HEAP_AREA_USE_THRESHOLD,	/* max allocation size */
159 		B_PAGE_SIZE * 16,			/* page size */
160 		B_PAGE_SIZE * 2,			/* min bin size */
161 		128,						/* bin alignment */
162 		1,							/* min count per page */
163 		256							/* max waste per page */
164 	}
165 };
166 
167 static heap_allocator *sHeaps[HEAP_CLASS_COUNT];
168 static uint32 *sLastGrowRequest[HEAP_CLASS_COUNT];
169 static uint32 *sLastHandledGrowRequest[HEAP_CLASS_COUNT];
170 static heap_allocator *sGrowHeap = NULL;
171 static thread_id sHeapGrowThread = -1;
172 static sem_id sHeapGrowSem = -1;
173 static sem_id sHeapGrownNotify = -1;
174 static bool sAddGrowHeap = false;
175 
176 static DeferredFreeList sDeferredFreeList;
177 static DeferredDeletableList sDeferredDeletableList;
178 static spinlock sDeferredFreeListLock;
179 
180 
181 
182 // #pragma mark - Tracing
183 
184 #if KERNEL_HEAP_TRACING
185 namespace KernelHeapTracing {
186 
187 class Allocate : public AbstractTraceEntry {
188 	public:
189 		Allocate(addr_t address, size_t size)
190 			:	fAddress(address),
191 				fSize(size)
192 		{
193 			Initialized();
194 		}
195 
196 		virtual void AddDump(TraceOutput &out)
197 		{
198 			out.Print("heap allocate: 0x%08lx (%lu bytes)", fAddress, fSize);
199 		}
200 
201 	private:
202 		addr_t	fAddress;
203 		size_t	fSize;
204 };
205 
206 
207 class Reallocate : public AbstractTraceEntry {
208 	public:
209 		Reallocate(addr_t oldAddress, addr_t newAddress, size_t newSize)
210 			:	fOldAddress(oldAddress),
211 				fNewAddress(newAddress),
212 				fNewSize(newSize)
213 		{
214 			Initialized();
215 		};
216 
217 		virtual void AddDump(TraceOutput &out)
218 		{
219 			out.Print("heap reallocate: 0x%08lx -> 0x%08lx (%lu bytes)",
220 				fOldAddress, fNewAddress, fNewSize);
221 		}
222 
223 	private:
224 		addr_t	fOldAddress;
225 		addr_t	fNewAddress;
226 		size_t	fNewSize;
227 };
228 
229 
230 class Free : public AbstractTraceEntry {
231 	public:
232 		Free(addr_t address)
233 			:	fAddress(address)
234 		{
235 			Initialized();
236 		};
237 
238 		virtual void AddDump(TraceOutput &out)
239 		{
240 			out.Print("heap free: 0x%08lx", fAddress);
241 		}
242 
243 	private:
244 		addr_t	fAddress;
245 };
246 
247 
248 } // namespace KernelHeapTracing
249 
250 #	define T(x)	if (!gKernelStartup) new(std::nothrow) KernelHeapTracing::x;
251 #else
252 #	define T(x)	;
253 #endif
254 
255 
256 // #pragma mark - Debug functions
257 
258 
259 #if KERNEL_HEAP_LEAK_CHECK
260 static addr_t
261 get_caller()
262 {
263 	// Find the first return address outside of the allocator code. Note, that
264 	// this makes certain assumptions about how the code for the functions
265 	// ends up in the kernel object.
266 	addr_t returnAddresses[5];
267 	int32 depth = arch_debug_get_stack_trace(returnAddresses, 5, 0, 1, false);
268 	for (int32 i = 0; i < depth; i++) {
269 		if (returnAddresses[i] < (addr_t)&get_caller
270 			|| returnAddresses[i] > (addr_t)&malloc_referenced_release) {
271 			return returnAddresses[i];
272 		}
273 	}
274 
275 	return 0;
276 }
277 #endif
278 
279 
280 static void
281 dump_page(heap_page *page)
282 {
283 	uint32 count = 0;
284 	for (addr_t *temp = page->free_list; temp != NULL; temp = (addr_t *)*temp)
285 		count++;
286 
287 	dprintf("\t\tpage %p: bin_index: %u; free_count: %u; empty_index: %u; free_list %p (%lu entr%s)\n",
288 		page, page->bin_index, page->free_count, page->empty_index,
289 		page->free_list, count, count == 1 ? "y" : "ies");
290 }
291 
292 
293 static void
294 dump_bin(heap_bin *bin)
295 {
296 	dprintf("\telement_size: %lu; max_free_count: %u; page_list %p;\n",
297 		bin->element_size, bin->max_free_count, bin->page_list);
298 
299 	for (heap_page *temp = bin->page_list; temp != NULL; temp = temp->next)
300 		dump_page(temp);
301 }
302 
303 
304 static void
305 dump_bin_list(heap_allocator *heap)
306 {
307 	for (uint32 i = 0; i < heap->bin_count; i++)
308 		dump_bin(&heap->bins[i]);
309 	dprintf("\n");
310 }
311 
312 
313 static void
314 dump_allocator_areas(heap_allocator *heap)
315 {
316 	heap_area *area = heap->all_areas;
317 	while (area) {
318 		dprintf("\tarea %p: area: %ld; base: 0x%08lx; size: %lu; page_count: %lu; free_pages: %p (%lu entr%s)\n",
319 			area, area->area, area->base, area->size, area->page_count,
320 			area->free_pages, area->free_page_count,
321 			area->free_page_count == 1 ? "y" : "ies");
322 		area = area->all_next;
323 	}
324 
325 	dprintf("\n");
326 }
327 
328 
329 static void
330 dump_allocator(heap_allocator *heap, bool areas, bool bins)
331 {
332 	dprintf("allocator %p: name: %s; page_size: %lu; bin_count: %lu; pages: %lu; free_pages: %lu; empty_areas: %lu\n", heap,
333 		heap->name, heap->page_size, heap->bin_count, heap->total_pages,
334 		heap->total_free_pages, heap->empty_areas);
335 
336 	if (areas)
337 		dump_allocator_areas(heap);
338 	if (bins)
339 		dump_bin_list(heap);
340 }
341 
342 
343 static int
344 dump_heap_list(int argc, char **argv)
345 {
346 	if (argc == 2) {
347 		uint64 heapAddress;
348 
349 		if (strcmp(argv[1], "grow") == 0) {
350 			// only dump dedicated grow heap info
351 			dprintf("dedicated grow heap:\n");
352 			dump_allocator(sGrowHeap, true, true);
353 		} else if (strcmp(argv[1], "stats") == 0) {
354 			for (uint32 i = 0; i < HEAP_CLASS_COUNT; i++)
355 				dump_allocator(sHeaps[i], false, false);
356 		} else if (evaluate_debug_expression(argv[1], &heapAddress, true)) {
357 			dump_allocator((heap_allocator*)(addr_t)heapAddress, true, true);
358 		} else
359 			print_debugger_command_usage(argv[0]);
360 
361 		return 0;
362 	}
363 
364 	for (uint32 i = 0; i < HEAP_CLASS_COUNT; i++)
365 		dump_allocator(sHeaps[i], true, true);
366 
367 	return 0;
368 }
369 
370 
371 #if KERNEL_HEAP_LEAK_CHECK
372 
373 static int
374 dump_allocations(int argc, char **argv)
375 {
376 	team_id team = -1;
377 	thread_id thread = -1;
378 	addr_t caller = 0;
379 	bool statsOnly = false;
380 	for (int32 i = 1; i < argc; i++) {
381 		if (strcmp(argv[i], "team") == 0)
382 			team = strtoul(argv[++i], NULL, 0);
383 		else if (strcmp(argv[i], "thread") == 0)
384 			thread = strtoul(argv[++i], NULL, 0);
385 		else if (strcmp(argv[i], "caller") == 0)
386 			caller = strtoul(argv[++i], NULL, 0);
387 		else if (strcmp(argv[i], "stats") == 0)
388 			statsOnly = true;
389 		else {
390 			print_debugger_command_usage(argv[0]);
391 			return 0;
392 		}
393 	}
394 
395 	size_t totalSize = 0;
396 	uint32 totalCount = 0;
397 	for (uint32 classIndex = 0; classIndex < HEAP_CLASS_COUNT; classIndex++) {
398 		heap_allocator *heap = sHeaps[classIndex];
399 
400 		// go through all the pages in all the areas
401 		heap_area *area = heap->all_areas;
402 		while (area) {
403 			heap_leak_check_info *info = NULL;
404 			for (uint32 i = 0; i < area->page_count; i++) {
405 				heap_page *page = &area->page_table[i];
406 				if (!page->in_use)
407 					continue;
408 
409 				addr_t base = area->base + i * heap->page_size;
410 				if (page->bin_index < heap->bin_count) {
411 					// page is used by a small allocation bin
412 					uint32 elementCount = page->empty_index;
413 					size_t elementSize = heap->bins[page->bin_index].element_size;
414 					for (uint32 j = 0; j < elementCount; j++, base += elementSize) {
415 						// walk the free list to see if this element is in use
416 						bool elementInUse = true;
417 						for (addr_t *temp = page->free_list; temp != NULL; temp = (addr_t *)*temp) {
418 							if ((addr_t)temp == base) {
419 								elementInUse = false;
420 								break;
421 							}
422 						}
423 
424 						if (!elementInUse)
425 							continue;
426 
427 						info = (heap_leak_check_info *)(base + elementSize
428 							- sizeof(heap_leak_check_info));
429 
430 						if ((team == -1 || info->team == team)
431 							&& (thread == -1 || info->thread == thread)
432 							&& (caller == 0 || info->caller == caller)) {
433 							// interesting...
434 							if (!statsOnly) {
435 								dprintf("team: % 6ld; thread: % 6ld; "
436 									"address: 0x%08lx; size: %lu bytes; "
437 									"caller: %#lx\n", info->team, info->thread,
438 									base, info->size, info->caller);
439 							}
440 
441 							totalSize += info->size;
442 							totalCount++;
443 						}
444 					}
445 				} else {
446 					// page is used by a big allocation, find the page count
447 					uint32 pageCount = 1;
448 					while (i + pageCount < area->page_count
449 						&& area->page_table[i + pageCount].in_use
450 						&& area->page_table[i + pageCount].bin_index == heap->bin_count
451 						&& area->page_table[i + pageCount].allocation_id == page->allocation_id)
452 						pageCount++;
453 
454 					info = (heap_leak_check_info *)(base + pageCount
455 						* heap->page_size - sizeof(heap_leak_check_info));
456 
457 					if ((team == -1 || info->team == team)
458 						&& (thread == -1 || info->thread == thread)
459 						&& (caller == 0 || info->caller == caller)) {
460 						// interesting...
461 						if (!statsOnly) {
462 							dprintf("team: % 6ld; thread: % 6ld;"
463 								" address: 0x%08lx; size: %lu bytes;"
464 								" caller: %#lx\n", info->team, info->thread,
465 								base, info->size, info->caller);
466 						}
467 
468 						totalSize += info->size;
469 						totalCount++;
470 					}
471 
472 					// skip the allocated pages
473 					i += pageCount - 1;
474 				}
475 			}
476 
477 			area = area->all_next;
478 		}
479 	}
480 
481 	dprintf("total allocations: %lu; total bytes: %lu\n", totalCount, totalSize);
482 	return 0;
483 }
484 
485 
486 static caller_info*
487 get_caller_info(addr_t caller)
488 {
489 	// find the caller info
490 	for (int32 i = 0; i < sCallerInfoCount; i++) {
491 		if (caller == sCallerInfoTable[i].caller)
492 			return &sCallerInfoTable[i];
493 	}
494 
495 	// not found, add a new entry, if there are free slots
496 	if (sCallerInfoCount >= kCallerInfoTableSize)
497 		return NULL;
498 
499 	caller_info* info = &sCallerInfoTable[sCallerInfoCount++];
500 	info->caller = caller;
501 	info->count = 0;
502 	info->size = 0;
503 
504 	return info;
505 }
506 
507 
508 static int
509 caller_info_compare_size(const void* _a, const void* _b)
510 {
511 	const caller_info* a = (const caller_info*)_a;
512 	const caller_info* b = (const caller_info*)_b;
513 	return (int)(b->size - a->size);
514 }
515 
516 
517 static int
518 caller_info_compare_count(const void* _a, const void* _b)
519 {
520 	const caller_info* a = (const caller_info*)_a;
521 	const caller_info* b = (const caller_info*)_b;
522 	return (int)(b->count - a->count);
523 }
524 
525 
526 static bool
527 analyze_allocation_callers(heap_allocator* heap)
528 {
529 	// go through all the pages in all the areas
530 	heap_area *area = heap->all_areas;
531 	while (area) {
532 		heap_leak_check_info *info = NULL;
533 		for (uint32 i = 0; i < area->page_count; i++) {
534 			heap_page *page = &area->page_table[i];
535 			if (!page->in_use)
536 				continue;
537 
538 			addr_t base = area->base + i * heap->page_size;
539 			if (page->bin_index < heap->bin_count) {
540 				// page is used by a small allocation bin
541 				uint32 elementCount = page->empty_index;
542 				size_t elementSize = heap->bins[page->bin_index].element_size;
543 				for (uint32 j = 0; j < elementCount; j++, base += elementSize) {
544 					// walk the free list to see if this element is in use
545 					bool elementInUse = true;
546 					for (addr_t *temp = page->free_list; temp != NULL;
547 						temp = (addr_t *)*temp) {
548 						if ((addr_t)temp == base) {
549 							elementInUse = false;
550 							break;
551 						}
552 					}
553 
554 					if (!elementInUse)
555 						continue;
556 
557 					info = (heap_leak_check_info *)(base + elementSize
558 						- sizeof(heap_leak_check_info));
559 
560 					caller_info* callerInfo = get_caller_info(info->caller);
561 					if (callerInfo == NULL) {
562 						kprintf("out of space for caller infos\n");
563 						return 0;
564 					}
565 
566 					callerInfo->count++;
567 					callerInfo->size += info->size;
568 				}
569 			} else {
570 				// page is used by a big allocation, find the page count
571 				uint32 pageCount = 1;
572 				while (i + pageCount < area->page_count
573 					&& area->page_table[i + pageCount].in_use
574 					&& area->page_table[i + pageCount].bin_index
575 						== heap->bin_count
576 					&& area->page_table[i + pageCount].allocation_id
577 						== page->allocation_id) {
578 					pageCount++;
579 				}
580 
581 				info = (heap_leak_check_info *)(base + pageCount
582 					* heap->page_size - sizeof(heap_leak_check_info));
583 
584 				caller_info* callerInfo = get_caller_info(info->caller);
585 				if (callerInfo == NULL) {
586 					kprintf("out of space for caller infos\n");
587 					return false;
588 				}
589 
590 				callerInfo->count++;
591 				callerInfo->size += info->size;
592 
593 				// skip the allocated pages
594 				i += pageCount - 1;
595 			}
596 		}
597 
598 		area = area->all_next;
599 	}
600 
601 	return true;
602 }
603 
604 
605 static int
606 dump_allocations_per_caller(int argc, char **argv)
607 {
608 	bool sortBySize = true;
609 	heap_allocator* heap = NULL;
610 
611 	for (int32 i = 1; i < argc; i++) {
612 		if (strcmp(argv[i], "-c") == 0) {
613 			sortBySize = false;
614 		} else if (strcmp(argv[i], "-h") == 0) {
615 			uint64 heapAddress;
616 			if (++i >= argc
617 				|| !evaluate_debug_expression(argv[i], &heapAddress, true)) {
618 				print_debugger_command_usage(argv[0]);
619 				return 0;
620 			}
621 
622 			heap = (heap_allocator*)(addr_t)heapAddress;
623 		} else {
624 			print_debugger_command_usage(argv[0]);
625 			return 0;
626 		}
627 	}
628 
629 	sCallerInfoCount = 0;
630 
631 	if (heap != NULL) {
632 		if (!analyze_allocation_callers(heap))
633 			return 0;
634 	} else {
635 		for (uint32 classIndex = 0; classIndex < HEAP_CLASS_COUNT;
636 				classIndex++) {
637 			if (!analyze_allocation_callers(sHeaps[classIndex]))
638 				return 0;
639 		}
640 	}
641 
642 	// sort the array
643 	qsort(sCallerInfoTable, sCallerInfoCount, sizeof(caller_info),
644 		sortBySize ? &caller_info_compare_size : &caller_info_compare_count);
645 
646 	kprintf("%ld different callers, sorted by %s...\n\n", sCallerInfoCount,
647 		sortBySize ? "size" : "count");
648 
649 	kprintf("     count        size      caller\n");
650 	kprintf("----------------------------------\n");
651 	for (int32 i = 0; i < sCallerInfoCount; i++) {
652 		caller_info& info = sCallerInfoTable[i];
653 		kprintf("%10ld  %10ld  %#08lx", info.count, info.size, info.caller);
654 
655 		const char* symbol;
656 		const char* imageName;
657 		bool exactMatch;
658 		addr_t baseAddress;
659 
660 		if (elf_debug_lookup_symbol_address(info.caller, &baseAddress, &symbol,
661 				&imageName, &exactMatch) == B_OK) {
662 			kprintf("  %s + 0x%lx (%s)%s\n", symbol,
663 				info.caller - baseAddress, imageName,
664 				exactMatch ? "" : " (nearest)");
665 		} else
666 			kprintf("\n");
667 	}
668 
669 	return 0;
670 }
671 
672 #endif // KERNEL_HEAP_LEAK_CHECK
673 
674 
675 #if PARANOID_HEAP_VALIDATION
676 static void
677 heap_validate_heap(heap_allocator *heap)
678 {
679 	mutex_lock(&heap->lock);
680 
681 	uint32 totalPageCount = 0;
682 	uint32 totalFreePageCount = 0;
683 	heap_area *area = heap->all_areas;
684 	while (area != NULL) {
685 		// validate the free pages list
686 		uint32 freePageCount = 0;
687 		heap_page *lastPage = NULL;
688 		heap_page *page = area->free_pages;
689 		while (page) {
690 			if ((addr_t)page < (addr_t)&area->page_table[0]
691 				|| (addr_t)page >= (addr_t)&area->page_table[area->page_count])
692 				panic("free page is not part of the page table\n");
693 
694 			if (page->index >= area->page_count)
695 				panic("free page has invalid index\n");
696 
697 			if ((addr_t)&area->page_table[page->index] != (addr_t)page)
698 				panic("free page index does not lead to target page\n");
699 
700 			if (page->prev != lastPage)
701 				panic("free page entry has invalid prev link\n");
702 
703 			if (page->in_use)
704 				panic("free page marked as in use\n");
705 
706 			lastPage = page;
707 			page = page->next;
708 			freePageCount++;
709 		}
710 
711 		totalPageCount += freePageCount;
712 		totalFreePageCount += freePageCount;
713 		if (area->free_page_count != freePageCount)
714 			panic("free page count doesn't match free page list\n");
715 
716 		// validate the page table
717 		uint32 usedPageCount = 0;
718 		for (uint32 i = 0; i < area->page_count; i++) {
719 			if (area->page_table[i].in_use)
720 				usedPageCount++;
721 		}
722 
723 		totalPageCount += usedPageCount;
724 		if (freePageCount + usedPageCount != area->page_count) {
725 			panic("free pages and used pages do not add up (%lu + %lu != %lu)\n",
726 				freePageCount, usedPageCount, area->page_count);
727 		}
728 
729 		area = area->all_next;
730 	}
731 
732 	// validate the areas
733 	area = heap->areas;
734 	heap_area *lastArea = NULL;
735 	uint32 lastFreeCount = 0;
736 	while (area != NULL) {
737 		if (area->free_page_count < lastFreeCount)
738 			panic("size ordering of area list broken\n");
739 
740 		if (area->prev != lastArea)
741 			panic("area list entry has invalid prev link\n");
742 
743 		lastArea = area;
744 		lastFreeCount = area->free_page_count;
745 		area = area->next;
746 	}
747 
748 	lastArea = NULL;
749 	area = heap->all_areas;
750 	while (area != NULL) {
751 		if (lastArea != NULL && lastArea->base < area->base)
752 			panic("base ordering of all_areas list broken\n");
753 
754 		lastArea = area;
755 		area = area->all_next;
756 	}
757 
758 	// validate the bins
759 	for (uint32 i = 0; i < heap->bin_count; i++) {
760 		heap_bin *bin = &heap->bins[i];
761 
762 		heap_page *lastPage = NULL;
763 		heap_page *page = bin->page_list;
764 		lastFreeCount = 0;
765 		while (page) {
766 			area = heap->all_areas;
767 			while (area) {
768 				if (area == page->area)
769 					break;
770 				area = area->all_next;
771 			}
772 
773 			if (area == NULL) {
774 				panic("page area not present in area list\n");
775 				page = page->next;
776 				continue;
777 			}
778 
779 			if ((addr_t)page < (addr_t)&area->page_table[0]
780 				|| (addr_t)page >= (addr_t)&area->page_table[area->page_count])
781 				panic("used page is not part of the page table\n");
782 
783 			if (page->index >= area->page_count)
784 				panic("used page has invalid index\n");
785 
786 			if ((addr_t)&area->page_table[page->index] != (addr_t)page)
787 				panic("used page index does not lead to target page\n");
788 
789 			if (page->prev != lastPage)
790 				panic("used page entry has invalid prev link (%p vs %p bin %lu)\n",
791 					page->prev, lastPage, i);
792 
793 			if (!page->in_use)
794 				panic("used page marked as not in use\n");
795 
796 			if (page->bin_index != i)
797 				panic("used page with bin index %u in page list of bin %lu\n",
798 					page->bin_index, i);
799 
800 			if (page->free_count < lastFreeCount)
801 				panic("ordering of bin page list broken\n");
802 
803 			// validate the free list
804 			uint32 freeSlotsCount = 0;
805 			addr_t *element = page->free_list;
806 			addr_t pageBase = area->base + page->index * heap->page_size;
807 			while (element) {
808 				if ((addr_t)element < pageBase
809 					|| (addr_t)element >= pageBase + heap->page_size)
810 					panic("free list entry out of page range\n");
811 
812 				if (((addr_t)element - pageBase) % bin->element_size != 0)
813 					panic("free list entry not on a element boundary\n");
814 
815 				element = (addr_t *)*element;
816 				freeSlotsCount++;
817 			}
818 
819 			uint32 slotCount = bin->max_free_count;
820 			if (page->empty_index > slotCount)
821 				panic("empty index beyond slot count (%u with %lu slots)\n",
822 					page->empty_index, slotCount);
823 
824 			freeSlotsCount += (slotCount - page->empty_index);
825 			if (freeSlotsCount > slotCount)
826 				panic("more free slots than fit into the page\n");
827 
828 			lastPage = page;
829 			lastFreeCount = page->free_count;
830 			page = page->next;
831 		}
832 	}
833 
834 	mutex_unlock(&heap->lock);
835 }
836 #endif // PARANOID_HEAP_VALIDATION
837 
838 
839 // #pragma mark - Heap functions
840 
841 
842 static void
843 heap_add_area(heap_allocator *heap, area_id areaID, addr_t base, size_t size)
844 {
845 	mutex_lock(&heap->lock);
846 
847 	heap_area *area = (heap_area *)base;
848 	area->area = areaID;
849 
850 	base += sizeof(heap_area);
851 	size -= sizeof(heap_area);
852 
853 	uint32 pageCount = size / heap->page_size;
854 	size_t pageTableSize = pageCount * sizeof(heap_page);
855 	area->page_table = (heap_page *)base;
856 	base += pageTableSize;
857 	size -= pageTableSize;
858 
859 	// the rest is now actually usable memory (rounded to the next page)
860 	area->base = ROUNDUP(base, B_PAGE_SIZE);
861 	area->size = size & ~(B_PAGE_SIZE - 1);
862 
863 	// now we know the real page count
864 	pageCount = area->size / heap->page_size;
865 	area->page_count = pageCount;
866 
867 	// zero out the page table and fill in page indexes
868 	memset((void *)area->page_table, 0, pageTableSize);
869 	for (uint32 i = 0; i < pageCount; i++) {
870 		area->page_table[i].area = area;
871 		area->page_table[i].index = i;
872 	}
873 
874 	// add all pages up into the free pages list
875 	for (uint32 i = 1; i < pageCount; i++) {
876 		area->page_table[i - 1].next = &area->page_table[i];
877 		area->page_table[i].prev = &area->page_table[i - 1];
878 	}
879 	area->free_pages = &area->page_table[0];
880 	area->free_page_count = pageCount;
881 	area->page_table[0].prev = NULL;
882 
883 	area->next = NULL;
884 	if (heap->areas == NULL) {
885 		// it's the only (empty) area in that heap
886 		area->prev = NULL;
887 		heap->areas = area;
888 	} else {
889 		// link in this area as the last one as it is completely empty
890 		heap_area *lastArea = heap->areas;
891 		while (lastArea->next != NULL)
892 			lastArea = lastArea->next;
893 
894 		lastArea->next = area;
895 		area->prev = lastArea;
896 	}
897 
898 	// insert this area in the all_areas list so it stays ordered by base
899 	if (heap->all_areas == NULL || heap->all_areas->base < area->base) {
900 		area->all_next = heap->all_areas;
901 		heap->all_areas = area;
902 	} else {
903 		heap_area *insert = heap->all_areas;
904 		while (insert->all_next && insert->all_next->base > area->base)
905 			insert = insert->all_next;
906 
907 		area->all_next = insert->all_next;
908 		insert->all_next = area;
909 	}
910 
911 	heap->total_pages += area->page_count;
912 	heap->total_free_pages += area->free_page_count;
913 
914 	if (areaID >= B_OK) {
915 		// this later on deletable area is yet empty - the empty count will be
916 		// decremented as soon as this area is used for the first time
917 		heap->empty_areas++;
918 	}
919 
920 	mutex_unlock(&heap->lock);
921 
922 	dprintf("heap_add_area: area %ld added to %s heap %p - usable range 0x%08lx - 0x%08lx\n",
923 		area->area, heap->name, heap, area->base, area->base + area->size);
924 }
925 
926 
927 static status_t
928 heap_remove_area(heap_allocator *heap, heap_area *area, bool locked)
929 {
930 	if (!locked)
931 		mutex_lock(&heap->lock);
932 
933 	if (area->free_page_count != area->page_count) {
934 		panic("tried removing heap area that has still pages in use");
935 		return B_ERROR;
936 	}
937 
938 	if (area->prev == NULL && area->next == NULL) {
939 		panic("tried removing the last non-full heap area");
940 		return B_ERROR;
941 	}
942 
943 	if (heap->areas == area)
944 		heap->areas = area->next;
945 	if (area->prev != NULL)
946 		area->prev->next = area->next;
947 	if (area->next != NULL)
948 		area->next->prev = area->prev;
949 
950 	if (heap->all_areas == area)
951 		heap->all_areas = area->all_next;
952 	else {
953 		heap_area *previous = heap->all_areas;
954 		while (previous) {
955 			if (previous->all_next == area) {
956 				previous->all_next = area->all_next;
957 				break;
958 			}
959 
960 			previous = previous->all_next;
961 		}
962 
963 		if (previous == NULL)
964 			panic("removing heap area that is not in all list");
965 	}
966 
967 	heap->total_pages -= area->page_count;
968 	heap->total_free_pages -= area->free_page_count;
969 
970 	if (!locked)
971 		mutex_unlock(&heap->lock);
972 
973 	dprintf("heap_remove_area: area %ld with range 0x%08lx - 0x%08lx removed from %s heap %p\n",
974 		area->area, area->base, area->base + area->size, heap->name, heap);
975 	return B_OK;
976 }
977 
978 
979 heap_allocator *
980 heap_create_allocator(const char *name, addr_t base, size_t size,
981 	const heap_class *heapClass)
982 {
983 	heap_allocator *heap = (heap_allocator *)base;
984 	base += sizeof(heap_allocator);
985 	size -= sizeof(heap_allocator);
986 
987 	heap->name = name;
988 	heap->page_size = heapClass->page_size;
989 	heap->total_pages = heap->total_free_pages = heap->empty_areas = 0;
990 	heap->areas = heap->all_areas = NULL;
991 	heap->bins = (heap_bin *)base;
992 
993 #if KERNEL_HEAP_LEAK_CHECK
994 	heap->get_caller = &get_caller;
995 #endif
996 
997 	heap->bin_count = 0;
998 	size_t binSize = 0, lastSize = 0;
999 	uint32 count = heap->page_size / heapClass->min_bin_size;
1000 	for (; count >= heapClass->min_count_per_page; count--, lastSize = binSize) {
1001 		binSize = (heap->page_size / count) & ~(heapClass->bin_alignment - 1);
1002 		if (binSize == lastSize)
1003 			continue;
1004 		if (heap->page_size - count * binSize > heapClass->max_waste_per_page)
1005 			continue;
1006 
1007 		heap_bin *bin = &heap->bins[heap->bin_count];
1008 		bin->element_size = binSize;
1009 		bin->max_free_count = heap->page_size / binSize;
1010 		bin->page_list = NULL;
1011 		heap->bin_count++;
1012 
1013 		if (heap->bin_count >= 32)
1014 			panic("heap configuration invalid - max bin count reached\n");
1015 	};
1016 
1017 	base += heap->bin_count * sizeof(heap_bin);
1018 	size -= heap->bin_count * sizeof(heap_bin);
1019 
1020 	mutex_init(&heap->lock, "heap_mutex");
1021 
1022 	heap_add_area(heap, -1, base, size);
1023 	return heap;
1024 }
1025 
1026 
1027 static inline area_id
1028 heap_free_pages_added(heap_allocator *heap, heap_area *area, uint32 pageCount)
1029 {
1030 	area->free_page_count += pageCount;
1031 	heap->total_free_pages += pageCount;
1032 
1033 	if (area->free_page_count == pageCount) {
1034 		// we need to add ourselfs to the area list of the heap
1035 		area->prev = NULL;
1036 		area->next = heap->areas;
1037 		if (area->next)
1038 			area->next->prev = area;
1039 		heap->areas = area;
1040 	} else {
1041 		// we might need to move back in the area list
1042 		if (area->next && area->next->free_page_count < area->free_page_count) {
1043 			// move ourselfs so the list stays ordered
1044 			heap_area *insert = area->next;
1045 			while (insert->next
1046 				&& insert->next->free_page_count < area->free_page_count)
1047 				insert = insert->next;
1048 
1049 			if (area->prev)
1050 				area->prev->next = area->next;
1051 			if (area->next)
1052 				area->next->prev = area->prev;
1053 			if (heap->areas == area)
1054 				heap->areas = area->next;
1055 
1056 			area->prev = insert;
1057 			area->next = insert->next;
1058 			if (area->next)
1059 				area->next->prev = area;
1060 			insert->next = area;
1061 		}
1062 	}
1063 
1064 	// can and should we free this area?
1065 	if (area->free_page_count == area->page_count && area->area >= B_OK) {
1066 		if (heap->empty_areas > 0) {
1067 			// we already have at least another empty area, just free this one
1068 			if (heap_remove_area(heap, area, true) == B_OK) {
1069 				// we cannot delete the area here, because it would result
1070 				// in calls to free, which would necessarily deadlock as we
1071 				// are locked at this point
1072 				return area->area;
1073 			}
1074 		}
1075 
1076 		heap->empty_areas++;
1077 	}
1078 
1079 	return -1;
1080 }
1081 
1082 
1083 static inline void
1084 heap_free_pages_removed(heap_allocator *heap, heap_area *area, uint32 pageCount)
1085 {
1086 	if (area->free_page_count == area->page_count && area->area >= B_OK) {
1087 		// this area was completely empty
1088 		heap->empty_areas--;
1089 	}
1090 
1091 	area->free_page_count -= pageCount;
1092 	heap->total_free_pages -= pageCount;
1093 
1094 	if (area->free_page_count == 0) {
1095 		// the area is now full so we remove it from the area list
1096 		heap->areas = area->next;
1097 		if (area->next)
1098 			area->next->prev = NULL;
1099 		area->next = area->prev = NULL;
1100 	}
1101 }
1102 
1103 
1104 static inline void
1105 heap_link_page(heap_page *page, heap_page **list)
1106 {
1107 	page->prev = NULL;
1108 	page->next = *list;
1109 	if (page->next)
1110 		page->next->prev = page;
1111 	*list = page;
1112 }
1113 
1114 
1115 static inline void
1116 heap_unlink_page(heap_page *page, heap_page **list)
1117 {
1118 	if (page->prev)
1119 		page->prev->next = page->next;
1120 	if (page->next)
1121 		page->next->prev = page->prev;
1122 	if (list && *list == page) {
1123 		*list = page->next;
1124 		if (page->next)
1125 			page->next->prev = NULL;
1126 	}
1127 }
1128 
1129 
1130 static heap_page *
1131 heap_allocate_contiguous_pages(heap_allocator *heap, uint32 pageCount)
1132 {
1133 	heap_area *area = heap->areas;
1134 	while (area) {
1135 		bool found = false;
1136 		int32 first = -1;
1137 		for (uint32 i = 0; i < area->page_count; i++) {
1138 			if (area->page_table[i].in_use) {
1139 				first = -1;
1140 				continue;
1141 			}
1142 
1143 			if (first > 0) {
1144 				if ((i + 1 - first) == pageCount) {
1145 					found = true;
1146 					break;
1147 				}
1148 			} else
1149 				first = i;
1150 		}
1151 
1152 		if (!found) {
1153 			area = area->next;
1154 			continue;
1155 		}
1156 
1157 		for (uint32 i = first; i < first + pageCount; i++) {
1158 			heap_page *page = &area->page_table[i];
1159 			page->in_use = 1;
1160 			page->bin_index = heap->bin_count;
1161 
1162 			heap_unlink_page(page, &area->free_pages);
1163 
1164 			page->next = page->prev = NULL;
1165 			page->free_list = NULL;
1166 			page->allocation_id = (uint16)first;
1167 		}
1168 
1169 		heap_free_pages_removed(heap, area, pageCount);
1170 		return &area->page_table[first];
1171 	}
1172 
1173 	return NULL;
1174 }
1175 
1176 
1177 static void *
1178 heap_raw_alloc(heap_allocator *heap, size_t size, uint32 binIndex)
1179 {
1180 	if (binIndex < heap->bin_count) {
1181 		heap_bin *bin = &heap->bins[binIndex];
1182 		heap_page *page = bin->page_list;
1183 		if (page == NULL) {
1184 			heap_area *area = heap->areas;
1185 			if (area == NULL) {
1186 				TRACE(("heap %p: no free pages to allocate %lu bytes\n", heap,
1187 					size));
1188 				return NULL;
1189 			}
1190 
1191 			// by design there are only areas in the list that still have
1192 			// free pages available
1193 			page = area->free_pages;
1194 			area->free_pages = page->next;
1195 			if (page->next)
1196 				page->next->prev = NULL;
1197 
1198 			heap_free_pages_removed(heap, area, 1);
1199 			page->in_use = 1;
1200 			page->bin_index = binIndex;
1201 			page->free_count = bin->max_free_count;
1202 			page->empty_index = 0;
1203 			page->free_list = NULL;
1204 			page->next = page->prev = NULL;
1205 			bin->page_list = page;
1206 		}
1207 
1208 		// we have a page where we have a free slot
1209 		void *address = NULL;
1210 		if (page->free_list) {
1211 			// there's a previously freed entry we can use
1212 			address = page->free_list;
1213 			page->free_list = (addr_t *)*page->free_list;
1214 		} else {
1215 			// the page hasn't been fully allocated so use the next empty_index
1216 			address = (void *)(page->area->base + page->index * heap->page_size
1217 				+ page->empty_index * bin->element_size);
1218 			page->empty_index++;
1219 		}
1220 
1221 		page->free_count--;
1222 		if (page->free_count == 0) {
1223 			// the page is now full so we remove it from the page_list
1224 			bin->page_list = page->next;
1225 			if (page->next)
1226 				page->next->prev = NULL;
1227 			page->next = page->prev = NULL;
1228 		}
1229 
1230 #if KERNEL_HEAP_LEAK_CHECK
1231 		heap_leak_check_info *info = (heap_leak_check_info *)((addr_t)address
1232 			+ bin->element_size - sizeof(heap_leak_check_info));
1233 		info->size = size - sizeof(heap_leak_check_info);
1234 		info->thread = (gKernelStartup ? 0 : thread_get_current_thread_id());
1235 		info->team = (gKernelStartup ? 0 : team_get_current_team_id());
1236 		info->caller = heap->get_caller();
1237 #endif
1238 		return address;
1239 	}
1240 
1241 	uint32 pageCount = (size + heap->page_size - 1) / heap->page_size;
1242 	heap_page *firstPage = heap_allocate_contiguous_pages(heap, pageCount);
1243 	if (firstPage == NULL) {
1244 		TRACE(("heap %p: found no contiguous pages to allocate %ld bytes\n",
1245 			heap, size));
1246 		return NULL;
1247 	}
1248 
1249 #if KERNEL_HEAP_LEAK_CHECK
1250 	heap_leak_check_info *info = (heap_leak_check_info *)(firstPage->area->base
1251 		+ (firstPage->index + pageCount) * heap->page_size
1252 		- sizeof(heap_leak_check_info));
1253 	info->size = size - sizeof(heap_leak_check_info);
1254 	info->thread = (gKernelStartup ? 0 : thread_get_current_thread_id());
1255 	info->team = (gKernelStartup ? 0 : team_get_current_team_id());
1256 	info->caller = heap->get_caller();
1257 #endif
1258 	return (void *)(firstPage->area->base + firstPage->index * heap->page_size);
1259 }
1260 
1261 
1262 #if DEBUG
1263 static bool
1264 is_valid_alignment(size_t number)
1265 {
1266 	// this cryptic line accepts zero and all powers of two
1267 	return ((~number + 1) | ((number << 1) - 1)) == ~0UL;
1268 }
1269 #endif
1270 
1271 
1272 inline bool
1273 heap_should_grow(heap_allocator *heap)
1274 {
1275 	// suggest growing if there is less than 20% of a grow size available
1276 	return heap->total_free_pages * heap->page_size < HEAP_GROW_SIZE / 5;
1277 }
1278 
1279 
1280 void *
1281 heap_memalign(heap_allocator *heap, size_t alignment, size_t size)
1282 {
1283 	TRACE(("memalign(alignment = %lu, size = %lu)\n", alignment, size));
1284 
1285 #if DEBUG
1286 	if (!is_valid_alignment(alignment))
1287 		panic("memalign() with an alignment which is not a power of 2\n");
1288 #endif
1289 
1290 	mutex_lock(&heap->lock);
1291 
1292 #if KERNEL_HEAP_LEAK_CHECK
1293 	size += sizeof(heap_leak_check_info);
1294 #endif
1295 
1296 	if (alignment != 0) {
1297 		// TODO: The alignment is done by ensuring that the element size
1298 		// of the target bin is aligned with the requested alignment. This
1299 		// has the problem that it wastes space because a better (smaller) bin
1300 		// could possibly be selected. We should pick the best bin and check
1301 		// if there is an aligned block in the free list or if a new (page
1302 		// aligned) page has to be allocated anyway.
1303 		size = ROUNDUP(size, alignment);
1304 	}
1305 
1306 	uint32 binIndex;
1307 	for (binIndex = 0; binIndex < heap->bin_count; binIndex++) {
1308 		if (size <= heap->bins[binIndex].element_size)
1309 			break;
1310 	}
1311 
1312 	void *address = heap_raw_alloc(heap, size, binIndex);
1313 
1314 	TRACE(("memalign(): asked to allocate %lu bytes, returning pointer %p\n", size, address));
1315 
1316 #if KERNEL_HEAP_LEAK_CHECK
1317 	size -= sizeof(heap_leak_check_info);
1318 #endif
1319 
1320 	T(Allocate((addr_t)address, size));
1321 	mutex_unlock(&heap->lock);
1322 	if (address == NULL)
1323 		return address;
1324 
1325 #if PARANOID_KERNEL_FREE
1326 	// make sure 0xdeadbeef is cleared if we do not overwrite the memory
1327 	// and the user does not clear it
1328 	if (((uint32 *)address)[1] == 0xdeadbeef)
1329 		((uint32 *)address)[1] = 0xcccccccc;
1330 #endif
1331 
1332 #if PARANOID_KERNEL_MALLOC
1333 	memset(address, 0xcc, size);
1334 #endif
1335 
1336 	return address;
1337 }
1338 
1339 
1340 status_t
1341 heap_free(heap_allocator *heap, void *address)
1342 {
1343 	if (address == NULL)
1344 		return B_OK;
1345 
1346 	mutex_lock(&heap->lock);
1347 
1348 	heap_area *area = heap->all_areas;
1349 	while (area) {
1350 		// since the all_areas list is ordered by base with the biggest
1351 		// base at the top, we need only find the first area with a base
1352 		// smaller than our address to become our only candidate for freeing
1353 		if (area->base <= (addr_t)address) {
1354 			if ((addr_t)address >= area->base + area->size) {
1355 				// the only candidate area doesn't contain the address,
1356 				// set it to NULL so we return below (none of the other areas
1357 				// can contain the address as the list is ordered)
1358 				area = NULL;
1359 			}
1360 
1361 			break;
1362 		}
1363 
1364 		area = area->all_next;
1365 	}
1366 
1367 	if (area == NULL) {
1368 		// this address does not belong to us
1369 		mutex_unlock(&heap->lock);
1370 		return B_ENTRY_NOT_FOUND;
1371 	}
1372 
1373 	TRACE(("free(): asked to free pointer %p\n", address));
1374 
1375 	heap_page *page = &area->page_table[((addr_t)address - area->base)
1376 		/ heap->page_size];
1377 
1378 	TRACE(("free(): page %p: bin_index %d, free_count %d\n", page, page->bin_index, page->free_count));
1379 
1380 	if (page->bin_index > heap->bin_count) {
1381 		panic("free(): page %p: invalid bin_index %d\n", page, page->bin_index);
1382 		mutex_unlock(&heap->lock);
1383 		return B_ERROR;
1384 	}
1385 
1386 	area_id areaToDelete = -1;
1387 	if (page->bin_index < heap->bin_count) {
1388 		// small allocation
1389 		heap_bin *bin = &heap->bins[page->bin_index];
1390 		if (((addr_t)address - area->base - page->index
1391 			* heap->page_size) % bin->element_size != 0) {
1392 			panic("free(): passed invalid pointer %p supposed to be in bin for element size %ld\n", address, bin->element_size);
1393 			mutex_unlock(&heap->lock);
1394 			return B_ERROR;
1395 		}
1396 
1397 #if PARANOID_KERNEL_FREE
1398 		if (((uint32 *)address)[1] == 0xdeadbeef) {
1399 			// This block looks like it was freed already, walk the free list
1400 			// on this page to make sure this address doesn't exist.
1401 			for (addr_t *temp = page->free_list; temp != NULL; temp = (addr_t *)*temp) {
1402 				if (temp == address) {
1403 					panic("free(): address %p already exists in page free list\n", address);
1404 					mutex_unlock(&heap->lock);
1405 					return B_ERROR;
1406 				}
1407 			}
1408 		}
1409 
1410 		uint32 *dead = (uint32 *)address;
1411 		if (bin->element_size % 4 != 0) {
1412 			panic("free(): didn't expect a bin element size that is not a multiple of 4\n");
1413 			mutex_unlock(&heap->lock);
1414 			return B_ERROR;
1415 		}
1416 
1417 		// the first 4 bytes are overwritten with the next free list pointer later
1418 		for (uint32 i = 1; i < bin->element_size / sizeof(uint32); i++)
1419 			dead[i] = 0xdeadbeef;
1420 #endif
1421 
1422 		// add the address to the page free list
1423 		*(addr_t *)address = (addr_t)page->free_list;
1424 		page->free_list = (addr_t *)address;
1425 		page->free_count++;
1426 
1427 		if (page->free_count == bin->max_free_count) {
1428 			// we are now empty, remove the page from the bin list
1429 			heap_unlink_page(page, &bin->page_list);
1430 			page->in_use = 0;
1431 			heap_link_page(page, &area->free_pages);
1432 			areaToDelete = heap_free_pages_added(heap, area, 1);
1433 		} else if (page->free_count == 1) {
1434 			// we need to add ourselfs to the page list of the bin
1435 			heap_link_page(page, &bin->page_list);
1436 		} else {
1437 			// we might need to move back in the free pages list
1438 			if (page->next && page->next->free_count < page->free_count) {
1439 				// move ourselfs so the list stays ordered
1440 				heap_page *insert = page->next;
1441 				while (insert->next
1442 					&& insert->next->free_count < page->free_count)
1443 					insert = insert->next;
1444 
1445 				heap_unlink_page(page, &bin->page_list);
1446 
1447 				page->prev = insert;
1448 				page->next = insert->next;
1449 				if (page->next)
1450 					page->next->prev = page;
1451 				insert->next = page;
1452 			}
1453 		}
1454 	} else {
1455 		// large allocation, just return the pages to the page free list
1456 		uint32 allocationID = page->allocation_id;
1457 		uint32 maxPages = area->page_count - page->index;
1458 		uint32 pageCount = 0;
1459 		for (uint32 i = 0; i < maxPages; i++) {
1460 			// loop until we find the end of this allocation
1461 			if (!page[i].in_use || page[i].bin_index != heap->bin_count
1462 				|| page[i].allocation_id != allocationID)
1463 				break;
1464 
1465 			// this page still belongs to the same allocation
1466 			page[i].in_use = 0;
1467 			page[i].allocation_id = 0;
1468 
1469 			// return it to the free list
1470 			heap_link_page(&page[i], &area->free_pages);
1471 			pageCount++;
1472 		}
1473 
1474 		areaToDelete = heap_free_pages_added(heap, area, pageCount);
1475 	}
1476 
1477 	T(Free((addr_t)address));
1478 	mutex_unlock(&heap->lock);
1479 
1480 	if (areaToDelete >= B_OK) {
1481 		// adding free pages caused an area to become empty and freeable that
1482 		// we can now delete as we don't hold the heap lock anymore
1483 		delete_area(areaToDelete);
1484 	}
1485 
1486 	return B_OK;
1487 }
1488 
1489 
1490 #if KERNEL_HEAP_LEAK_CHECK
1491 extern "C" void
1492 heap_set_get_caller(heap_allocator* heap, addr_t (*getCaller)())
1493 {
1494 	heap->get_caller = getCaller;
1495 }
1496 #endif
1497 
1498 
1499 static status_t
1500 heap_realloc(heap_allocator *heap, void *address, void **newAddress,
1501 	size_t newSize)
1502 {
1503 	mutex_lock(&heap->lock);
1504 	heap_area *area = heap->all_areas;
1505 	while (area) {
1506 		if ((addr_t)address >= area->base
1507 			&& (addr_t)address < area->base + area->size)
1508 			break;
1509 
1510 		area = area->all_next;
1511 	}
1512 
1513 	if (area == NULL) {
1514 		// this address does not belong to us
1515 		mutex_unlock(&heap->lock);
1516 		return B_ENTRY_NOT_FOUND;
1517 	}
1518 
1519 	TRACE(("realloc(address = %p, newSize = %lu)\n", address, newSize));
1520 
1521 	heap_page *page = &area->page_table[((addr_t)address - area->base)
1522 		/ heap->page_size];
1523 	if (page->bin_index > heap->bin_count) {
1524 		panic("realloc(): page %p: invalid bin_index %d\n", page, page->bin_index);
1525 		mutex_unlock(&heap->lock);
1526 		return B_ERROR;
1527 	}
1528 
1529 	// find out the size of the old allocation first
1530 	size_t minSize = 0;
1531 	size_t maxSize = 0;
1532 	if (page->bin_index < heap->bin_count) {
1533 		// this was a small allocation
1534 		heap_bin *bin = &heap->bins[page->bin_index];
1535 		maxSize = bin->element_size;
1536 		if (page->bin_index > 0)
1537 			minSize = heap->bins[page->bin_index - 1].element_size + 1;
1538 	} else {
1539 		// this was a large allocation
1540 		uint32 allocationID = page->allocation_id;
1541 		uint32 maxPages = area->page_count - page->index;
1542 		maxSize = heap->page_size;
1543 		for (uint32 i = 1; i < maxPages; i++) {
1544 			if (!page[i].in_use || page[i].bin_index != heap->bin_count
1545 				|| page[i].allocation_id != allocationID)
1546 				break;
1547 
1548 			minSize += heap->page_size;
1549 			maxSize += heap->page_size;
1550 		}
1551 	}
1552 
1553 	mutex_unlock(&heap->lock);
1554 
1555 #if KERNEL_HEAP_LEAK_CHECK
1556 	newSize += sizeof(heap_leak_check_info);
1557 #endif
1558 
1559 	// does the new allocation simply fit in the old allocation?
1560 	if (newSize > minSize && newSize <= maxSize) {
1561 #if KERNEL_HEAP_LEAK_CHECK
1562 		// update the size info (the info is at the end so stays where it is)
1563 		heap_leak_check_info *info = (heap_leak_check_info *)((addr_t)address
1564 			+ maxSize - sizeof(heap_leak_check_info));
1565 		info->size = newSize - sizeof(heap_leak_check_info);
1566 		newSize -= sizeof(heap_leak_check_info);
1567 #endif
1568 
1569 		T(Reallocate((addr_t)address, (addr_t)address, newSize));
1570 		*newAddress = address;
1571 		return B_OK;
1572 	}
1573 
1574 #if KERNEL_HEAP_LEAK_CHECK
1575 	// new leak check info will be created with the malloc below
1576 	newSize -= sizeof(heap_leak_check_info);
1577 #endif
1578 
1579 	// if not, allocate a new chunk of memory
1580 	*newAddress = memalign(0, newSize);
1581 	T(Reallocate((addr_t)address, (addr_t)*newAddress, newSize));
1582 	if (*newAddress == NULL) {
1583 		// we tried but it didn't work out, but still the operation is done
1584 		return B_OK;
1585 	}
1586 
1587 	// copy the old data and free the old allocation
1588 	memcpy(*newAddress, address, min_c(maxSize, newSize));
1589 	heap_free(heap, address);
1590 	return B_OK;
1591 }
1592 
1593 
1594 inline uint32
1595 heap_class_for(size_t size)
1596 {
1597 #if KERNEL_HEAP_LEAK_CHECK
1598 	// take the extra info size into account
1599 	size += sizeof(heap_leak_check_info_s);
1600 #endif
1601 
1602 	uint32 index = 0;
1603 	for (; index < HEAP_CLASS_COUNT - 1; index++) {
1604 		if (size <= sHeapClasses[index].max_allocation_size)
1605 			return index;
1606 	}
1607 
1608 	return index;
1609 }
1610 
1611 
1612 static void
1613 deferred_deleter(void *arg, int iteration)
1614 {
1615 	// move entries and deletables to on-stack lists
1616 	InterruptsSpinLocker locker(sDeferredFreeListLock);
1617 	if (sDeferredFreeList.IsEmpty() && sDeferredDeletableList.IsEmpty())
1618 		return;
1619 
1620 	DeferredFreeList entries;
1621 	entries.MoveFrom(&sDeferredFreeList);
1622 
1623 	DeferredDeletableList deletables;
1624 	deletables.MoveFrom(&sDeferredDeletableList);
1625 
1626 	locker.Unlock();
1627 
1628 	// free the entries
1629 	while (DeferredFreeListEntry* entry = entries.RemoveHead())
1630 		free(entry);
1631 
1632 	// delete the deletables
1633 	while (DeferredDeletable* deletable = deletables.RemoveHead())
1634 		delete deletable;
1635 }
1636 
1637 
1638 //	#pragma mark -
1639 
1640 
1641 static status_t
1642 heap_create_new_heap_area(heap_allocator *heap, const char *name, size_t size)
1643 {
1644 	void *address = NULL;
1645 	area_id heapArea = create_area(name, &address,
1646 		B_ANY_KERNEL_BLOCK_ADDRESS, size, B_FULL_LOCK,
1647 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
1648 	if (heapArea < B_OK) {
1649 		TRACE(("heap: couldn't allocate heap area \"%s\"\n", name));
1650 		return heapArea;
1651 	}
1652 
1653 	heap_add_area(heap, heapArea, (addr_t)address, size);
1654 #if PARANOID_HEAP_VALIDATION
1655 	heap_validate_heap(heap);
1656 #endif
1657 	return B_OK;
1658 }
1659 
1660 
1661 static int32
1662 heap_grow_thread(void *)
1663 {
1664 	while (true) {
1665 		// wait for a request to grow the heap list
1666 		if (acquire_sem(sHeapGrowSem) < B_OK)
1667 			continue;
1668 
1669 		if (sAddGrowHeap) {
1670 			// the grow heap is going to run full soon, try to allocate a new
1671 			// one to make some room.
1672 			TRACE(("heap_grower: grow heaps will run out of memory soon\n"));
1673 			if (heap_create_new_heap_area(sGrowHeap, "additional grow heap",
1674 				HEAP_DEDICATED_GROW_SIZE) != B_OK)
1675 				dprintf("heap_grower: failed to create new grow heap area\n");
1676 		}
1677 
1678 		for (uint32 i = 0; i < HEAP_CLASS_COUNT; i++) {
1679 			heap_allocator *heap = sHeaps[i];
1680 			if (sLastGrowRequest[i] > sLastHandledGrowRequest[i]
1681 				|| heap_should_grow(heap)) {
1682 				// grow this heap if it is nearly full or if a grow was
1683 				// explicitly requested for this heap (happens when a large
1684 				// allocation cannot be fulfilled due to lack of contiguous
1685 				// pages)
1686 				if (heap_create_new_heap_area(heap, "additional heap",
1687 					HEAP_GROW_SIZE) != B_OK)
1688 					dprintf("heap_grower: failed to create new heap area\n");
1689 				sLastHandledGrowRequest[i] = sLastGrowRequest[i];
1690 			}
1691 		}
1692 
1693 		// notify anyone waiting for this request
1694 		release_sem_etc(sHeapGrownNotify, -1, B_RELEASE_ALL);
1695 	}
1696 
1697 	return 0;
1698 }
1699 
1700 
1701 status_t
1702 heap_init(addr_t base, size_t size)
1703 {
1704 	for (uint32 i = 0; i < HEAP_CLASS_COUNT; i++) {
1705 		size_t partSize = size * sHeapClasses[i].initial_percentage / 100;
1706 		sHeaps[i] = heap_create_allocator(sHeapClasses[i].name, base, partSize,
1707 			&sHeapClasses[i]);
1708 		sLastGrowRequest[i] = sLastHandledGrowRequest[i] = 0;
1709 		base += partSize;
1710 	}
1711 
1712 	// set up some debug commands
1713 	add_debugger_command_etc("heap", &dump_heap_list,
1714 		"Dump infos about the kernel heap(s)",
1715 		"[(\"grow\" | \"stats\" | <heap>)]\n"
1716 		"Dump infos about the kernel heap(s). If \"grow\" is specified, only\n"
1717 		"infos about the dedicated grow heap are printed. If \"stats\" is\n"
1718 		"given as the argument, currently only the heap count is printed.\n"
1719 		"If <heap> is given, it is interpreted as the address of the heap to\n"
1720 		"print infos about.\n", 0);
1721 #if KERNEL_HEAP_LEAK_CHECK
1722 	add_debugger_command_etc("allocations", &dump_allocations,
1723 		"Dump current allocations",
1724 		"[(\"team\" | \"thread\") <id>] [ \"caller\" <address> ] [\"stats\"]\n"
1725 		"If no parameters are given, all current alloactions are dumped.\n"
1726 		"If \"team\", \"thread\", and/or \"caller\" is specified as the first\n"
1727 		"argument, only allocations matching the team id, thread id, or \n"
1728 		"caller address given in the second argument are printed.\n"
1729 		"If the optional argument \"stats\" is specified, only the allocation\n"
1730 		"counts and no individual allocations are printed\n", 0);
1731 	add_debugger_command_etc("allocations_per_caller",
1732 		&dump_allocations_per_caller,
1733 		"Dump current allocations summed up per caller",
1734 		"[ \"-c\" ] [ -h <heap> ]\n"
1735 		"The current allocations will by summed up by caller (their count and\n"
1736 		"size) printed in decreasing order by size or, if \"-c\" is\n"
1737 		"specified, by allocation count. If given <heap> specifies the\n"
1738 		"address of the heap for which to print the allocations.\n", 0);
1739 #endif
1740 	return B_OK;
1741 }
1742 
1743 
1744 status_t
1745 heap_init_post_sem()
1746 {
1747 	sHeapGrowSem = create_sem(0, "heap_grow_sem");
1748 	if (sHeapGrowSem < 0) {
1749 		panic("heap_init_post_sem(): failed to create heap grow sem\n");
1750 		return B_ERROR;
1751 	}
1752 
1753 	sHeapGrownNotify = create_sem(0, "heap_grown_notify");
1754 	if (sHeapGrownNotify < 0) {
1755 		panic("heap_init_post_sem(): failed to create heap grown notify sem\n");
1756 		return B_ERROR;
1757 	}
1758 
1759 	return B_OK;
1760 }
1761 
1762 
1763 status_t
1764 heap_init_post_thread()
1765 {
1766 	void *address = NULL;
1767 	area_id growHeapArea = create_area("dedicated grow heap", &address,
1768 		B_ANY_KERNEL_BLOCK_ADDRESS, HEAP_DEDICATED_GROW_SIZE, B_FULL_LOCK,
1769 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
1770 	if (growHeapArea < B_OK) {
1771 		panic("heap_init_post_thread(): couldn't allocate dedicate grow heap area");
1772 		return growHeapArea;
1773 	}
1774 
1775 	sGrowHeap = heap_create_allocator("grow", (addr_t)address,
1776 		HEAP_DEDICATED_GROW_SIZE, &sHeapClasses[0]);
1777 	if (sGrowHeap == NULL) {
1778 		panic("heap_init_post_thread(): failed to create dedicated grow heap\n");
1779 		return B_ERROR;
1780 	}
1781 
1782 	sHeapGrowThread = spawn_kernel_thread(heap_grow_thread, "heap grower",
1783 		B_URGENT_PRIORITY, NULL);
1784 	if (sHeapGrowThread < 0) {
1785 		panic("heap_init_post_thread(): cannot create heap grow thread\n");
1786 		return sHeapGrowThread;
1787 	}
1788 
1789 	// run the deferred deleter roughly once a second
1790 	if (register_kernel_daemon(deferred_deleter, NULL, 10) != B_OK)
1791 		panic("heap_init_post_thread(): failed to init deferred deleter");
1792 
1793 	send_signal_etc(sHeapGrowThread, SIGCONT, B_DO_NOT_RESCHEDULE);
1794 	return B_OK;
1795 }
1796 
1797 
1798 //	#pragma mark - Public API
1799 
1800 
1801 void *
1802 memalign(size_t alignment, size_t size)
1803 {
1804 	if (!gKernelStartup && !are_interrupts_enabled()) {
1805 		panic("memalign(): called with interrupts disabled\n");
1806 		return NULL;
1807 	}
1808 
1809 	if (!gKernelStartup && size > HEAP_AREA_USE_THRESHOLD) {
1810 		// don't even attempt such a huge allocation - use areas instead
1811 		size_t areaSize = size + sizeof(area_allocation_info);
1812 		if (alignment != 0)
1813 			areaSize = ROUNDUP(areaSize, alignment);
1814 
1815 		void *address = NULL;
1816 		areaSize = ROUNDUP(areaSize, B_PAGE_SIZE);
1817 		area_id allocationArea = create_area("memalign area", &address,
1818 			B_ANY_KERNEL_BLOCK_ADDRESS, areaSize, B_FULL_LOCK,
1819 			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
1820 		if (allocationArea < B_OK) {
1821 			dprintf("heap: failed to create area for huge allocation\n");
1822 			return NULL;
1823 		}
1824 
1825 		area_allocation_info *info = (area_allocation_info *)address;
1826 		info->magic = kAreaAllocationMagic;
1827 		info->area = allocationArea;
1828 		info->base = address;
1829 		info->size = areaSize;
1830 		info->allocation_size = size;
1831 		info->allocation_alignment = alignment;
1832 
1833 		address = (void *)((addr_t)address + sizeof(area_allocation_info));
1834 		if (alignment != 0)
1835 			address = (void *)ROUNDUP((addr_t)address, alignment);
1836 
1837 		TRACE(("heap: allocated area %ld for huge allocation of %lu bytes\n",
1838 			allocationArea, size));
1839 
1840 		info->allocation_base = address;
1841 
1842 #if PARANOID_KERNEL_MALLOC
1843 		memset(address, 0xcc, size);
1844 #endif
1845 		return address;
1846 	}
1847 
1848 	uint32 heapClass = heap_class_for(size);
1849 	heap_allocator *heap = sHeaps[heapClass];
1850 	void *result = heap_memalign(heap, alignment, size);
1851 	if (result == NULL) {
1852 		// request an urgent grow and wait - we don't do it ourselfs here to
1853 		// serialize growing through the grow thread, as otherwise multiple
1854 		// threads hitting this situation (likely when memory ran out) would
1855 		// all add areas
1856 		sLastGrowRequest[heapClass]++;
1857 		switch_sem(sHeapGrowSem, sHeapGrownNotify);
1858 
1859 		// and then try again
1860 		result = heap_memalign(heap, alignment, size);
1861 	} else if (heap_should_grow(heap)) {
1862 		// should grow sometime soon, notify the grower
1863 		release_sem_etc(sHeapGrowSem, 1, B_DO_NOT_RESCHEDULE);
1864 	}
1865 
1866 #if PARANOID_HEAP_VALIDATION
1867 	heap_validate_heap(heap);
1868 #endif
1869 
1870 	if (result == NULL)
1871 		panic("heap: kernel heap has run out of memory\n");
1872 	return result;
1873 }
1874 
1875 
1876 void *
1877 malloc_nogrow(size_t size)
1878 {
1879 	// use dedicated memory in the grow thread by default
1880 	if (thread_get_current_thread_id() == sHeapGrowThread) {
1881 		void *result = heap_memalign(sGrowHeap, 0, size);
1882 		if (!sAddGrowHeap && heap_should_grow(sGrowHeap)) {
1883 			// hopefully the heap grower will manage to create a new heap
1884 			// before running out of private memory...
1885 			dprintf("heap: requesting new grow heap\n");
1886 			sAddGrowHeap = true;
1887 			release_sem_etc(sHeapGrowSem, 1, B_DO_NOT_RESCHEDULE);
1888 		}
1889 
1890 		if (result != NULL)
1891 			return result;
1892 	}
1893 
1894 	// try public memory, there might be something available
1895 	heap_allocator *heap = sHeaps[heap_class_for(size)];
1896 	void *result = heap_memalign(heap, 0, size);
1897 	if (result != NULL)
1898 		return result;
1899 
1900 	// no memory available
1901 	if (thread_get_current_thread_id() == sHeapGrowThread)
1902 		panic("heap: all heaps have run out of memory while growing\n");
1903 	else
1904 		dprintf("heap: all heaps have run out of memory\n");
1905 
1906 	return NULL;
1907 }
1908 
1909 
1910 void *
1911 malloc(size_t size)
1912 {
1913 	return memalign(0, size);
1914 }
1915 
1916 
1917 void
1918 free(void *address)
1919 {
1920 	if (!gKernelStartup && !are_interrupts_enabled()) {
1921 		panic("free(): called with interrupts disabled\n");
1922 		return;
1923 	}
1924 
1925 	for (uint32 i = 0; i < HEAP_CLASS_COUNT; i++) {
1926 		heap_allocator *heap = sHeaps[i];
1927 		if (heap_free(heap, address) == B_OK) {
1928 #if PARANOID_HEAP_VALIDATION
1929 			heap_validate_heap(heap);
1930 #endif
1931 			return;
1932 		}
1933 	}
1934 
1935 	// maybe it was allocated from the dedicated grow heap
1936 	if (heap_free(sGrowHeap, address) == B_OK)
1937 		return;
1938 
1939 	// or maybe it was a huge allocation using an area
1940 	area_info areaInfo;
1941 	area_id area = area_for(address);
1942 	if (area >= B_OK && get_area_info(area, &areaInfo) == B_OK) {
1943 		area_allocation_info *info = (area_allocation_info *)areaInfo.address;
1944 
1945 		// just make extra sure it was allocated by us
1946 		if (info->magic == kAreaAllocationMagic && info->area == area
1947 			&& info->size == areaInfo.size && info->base == areaInfo.address
1948 			&& info->allocation_size < areaInfo.size) {
1949 			delete_area(area);
1950 			TRACE(("free(): freed huge allocation by deleting area %ld\n", area));
1951 			return;
1952 		}
1953 	}
1954 
1955 	panic("free(): free failed for address %p\n", address);
1956 }
1957 
1958 
1959 void *
1960 realloc(void *address, size_t newSize)
1961 {
1962 	if (!gKernelStartup && !are_interrupts_enabled()) {
1963 		panic("realloc(): called with interrupts disabled\n");
1964 		return NULL;
1965 	}
1966 
1967 	if (address == NULL)
1968 		return memalign(0, newSize);
1969 
1970 	if (newSize == 0) {
1971 		free(address);
1972 		return NULL;
1973 	}
1974 
1975 	void *newAddress = NULL;
1976 	for (uint32 i = 0; i < HEAP_CLASS_COUNT; i++) {
1977 		heap_allocator *heap = sHeaps[i];
1978 		if (heap_realloc(heap, address, &newAddress, newSize) == B_OK) {
1979 #if PARANOID_HEAP_VALIDATION
1980 			heap_validate_heap(heap);
1981 #endif
1982 			return newAddress;
1983 		}
1984 	}
1985 
1986 	// maybe it was allocated from the dedicated grow heap
1987 	if (heap_realloc(sGrowHeap, address, &newAddress, newSize) == B_OK)
1988 		return newAddress;
1989 
1990 	// or maybe it was a huge allocation using an area
1991 	area_info areaInfo;
1992 	area_id area = area_for(address);
1993 	if (area >= B_OK && get_area_info(area, &areaInfo) == B_OK) {
1994 		area_allocation_info *info = (area_allocation_info *)areaInfo.address;
1995 
1996 		// just make extra sure it was allocated by us
1997 		if (info->magic == kAreaAllocationMagic && info->area == area
1998 			&& info->size == areaInfo.size && info->base == areaInfo.address
1999 			&& info->allocation_size < areaInfo.size) {
2000 			size_t available = info->size - ((addr_t)info->allocation_base
2001 				- (addr_t)info->base);
2002 
2003 			if (available >= newSize) {
2004 				// there is enough room available for the newSize
2005 				TRACE(("realloc(): new size %ld fits in old area %ld with %ld available\n",
2006 					newSize, area, available));
2007 				return address;
2008 			}
2009 
2010 			// have to allocate/copy/free - TODO maybe resize the area instead?
2011 			newAddress = memalign(0, newSize);
2012 			if (newAddress == NULL) {
2013 				dprintf("realloc(): failed to allocate new block of %ld bytes\n",
2014 					newSize);
2015 				return NULL;
2016 			}
2017 
2018 			memcpy(newAddress, address, min_c(newSize, info->allocation_size));
2019 			delete_area(area);
2020 			TRACE(("realloc(): allocated new block %p for size %ld and deleted old area %ld\n",
2021 				newAddress, newSize, area));
2022 			return newAddress;
2023 		}
2024 	}
2025 
2026 	panic("realloc(): failed to realloc address %p to size %lu\n", address, newSize);
2027 	return NULL;
2028 }
2029 
2030 
2031 void *
2032 calloc(size_t numElements, size_t size)
2033 {
2034 	void *address = memalign(0, numElements * size);
2035 	if (address != NULL)
2036 		memset(address, 0, numElements * size);
2037 
2038 	return address;
2039 }
2040 
2041 
2042 void
2043 deferred_free(void* block)
2044 {
2045 	if (block == NULL)
2046 		return;
2047 
2048 	DeferredFreeListEntry* entry = new(block) DeferredFreeListEntry;
2049 
2050 	InterruptsSpinLocker _(sDeferredFreeListLock);
2051 	sDeferredFreeList.Add(entry);
2052 }
2053 
2054 
2055 void*
2056 malloc_referenced(size_t size)
2057 {
2058 	int32* referencedData = (int32*)malloc(size + 4);
2059 	if (referencedData == NULL)
2060 		return NULL;
2061 
2062 	*referencedData = 1;
2063 
2064 	return referencedData + 1;
2065 }
2066 
2067 
2068 void*
2069 malloc_referenced_acquire(void* data)
2070 {
2071 	if (data != NULL) {
2072 		int32* referencedData = (int32*)data - 1;
2073 		atomic_add(referencedData, 1);
2074 	}
2075 
2076 	return data;
2077 }
2078 
2079 
2080 void
2081 malloc_referenced_release(void* data)
2082 {
2083 	if (data == NULL)
2084 		return;
2085 
2086 	int32* referencedData = (int32*)data - 1;
2087 	if (atomic_add(referencedData, -1) < 1)
2088 		free(referencedData);
2089 }
2090 
2091 
2092 DeferredDeletable::~DeferredDeletable()
2093 {
2094 }
2095 
2096 
2097 void
2098 deferred_delete(DeferredDeletable* deletable)
2099 {
2100 	if (deletable == NULL)
2101 		return;
2102 
2103 	InterruptsSpinLocker _(sDeferredFreeListLock);
2104 	sDeferredDeletableList.Add(deletable);
2105 }
2106