1 /*
2 * Copyright 2008-2009, Michael Lotz, mmlr@mlotz.ch.
3 * Distributed under the terms of the MIT License.
4 *
5 * Copyright 2002-2011, Axel Dörfler, axeld@pinc-software.de.
6 * Distributed under the terms of the MIT License.
7 *
8 * Copyright 2001, Travis Geiselbrecht. All rights reserved.
9 * Distributed under the terms of the NewOS License.
10 */
11
12
13 #include "malloc_debug_api.h"
14
15 #include <malloc.h>
16 #include <stdio.h>
17 #include <string.h>
18 #include <stdlib.h>
19
20 #include <errno.h>
21 #include <sys/mman.h>
22
23 #include <errno_private.h>
24 #include <locks.h>
25 #include <syscalls.h>
26
27 #include <Debug.h>
28 #include <OS.h>
29
30
31 //#define TRACE_HEAP
32 #ifdef TRACE_HEAP
33 # define INFO(x) debug_printf x
34 #else
35 # define INFO(x) ;
36 #endif
37
38 #undef ASSERT
39 #define ASSERT(x) if (!(x)) panic("assert failed: %s", #x);
40
41
42 static void *debug_heap_memalign(size_t alignment, size_t size);
43
44
45 static bool sDebuggerCalls = true;
46 static bool sReuseMemory = true;
47 static bool sParanoidValidation = false;
48 static thread_id sWallCheckThread = -1;
49 static bool sStopWallChecking = false;
50 static bool sUseGuardPage = false;
51
52 #if __cplusplus >= 201103L
53 #include <cstddef>
54 using namespace std;
55 static size_t sDefaultAlignment = alignof(max_align_t);
56 #else
57 static size_t sDefaultAlignment = 8;
58 #endif
59
60
61 void
panic(const char * format,...)62 panic(const char *format, ...)
63 {
64 char buffer[1024];
65
66 va_list args;
67 va_start(args, format);
68 vsnprintf(buffer, sizeof(buffer), format, args);
69 va_end(args);
70
71 if (sDebuggerCalls)
72 debugger(buffer);
73 else
74 debug_printf("%s", buffer);
75 }
76
77
78 #define ROUNDUP(x, y) (((x) + (y) - 1) / (y) * (y))
79
80 #define HEAP_INITIAL_SIZE 1 * 1024 * 1024
81 #define HEAP_GROW_SIZE 2 * 1024 * 1024
82 #define HEAP_AREA_USE_THRESHOLD 1 * 1024 * 1024
83
84 typedef struct heap_leak_check_info_s {
85 size_t size;
86 thread_id thread;
87 } heap_leak_check_info;
88
89 typedef struct heap_page_s heap_page;
90
91 typedef struct heap_area_s {
92 area_id area;
93
94 addr_t base;
95 size_t size;
96
97 uint32 page_count;
98 uint32 free_page_count;
99
100 heap_page * free_pages;
101 heap_page * page_table;
102
103 heap_area_s * prev;
104 heap_area_s * next;
105 heap_area_s * all_next;
106 } heap_area;
107
108 typedef struct heap_page_s {
109 heap_area * area;
110 uint16 index;
111 uint16 bin_index : 5;
112 uint16 free_count : 10;
113 uint16 in_use : 1;
114 heap_page_s * next;
115 heap_page_s * prev;
116 union {
117 uint16 empty_index;
118 uint16 allocation_id; // used for bin == bin_count allocations
119 };
120 addr_t * free_list;
121 } heap_page;
122
123 typedef struct heap_bin_s {
124 mutex lock;
125 uint32 element_size;
126 uint16 max_free_count;
127 heap_page * page_list; // sorted so that the desired page is always first
128 } heap_bin;
129
130 typedef struct heap_allocator_s {
131 rw_lock area_lock;
132 mutex page_lock;
133
134 const char *name;
135 uint32 bin_count;
136 uint32 page_size;
137
138 uint32 total_pages;
139 uint32 total_free_pages;
140 uint32 empty_areas;
141
142 heap_bin * bins;
143 heap_area * areas; // sorted so that the desired area is always first
144 heap_area * all_areas; // all areas including full ones
145 } heap_allocator;
146
147 static const uint32 kAreaAllocationMagic = 'AAMG';
148 typedef struct area_allocation_info_s {
149 area_id area;
150 void * base;
151 uint32 magic;
152 size_t size;
153 thread_id thread;
154 size_t allocation_size;
155 size_t allocation_alignment;
156 void * allocation_base;
157 } area_allocation_info;
158
159 typedef struct heap_class_s {
160 const char *name;
161 uint32 initial_percentage;
162 size_t max_allocation_size;
163 size_t page_size;
164 size_t min_bin_size;
165 size_t bin_alignment;
166 uint32 min_count_per_page;
167 size_t max_waste_per_page;
168 } heap_class;
169
170 // Heap class configuration
171 #define HEAP_CLASS_COUNT 3
172 static const heap_class sHeapClasses[HEAP_CLASS_COUNT] = {
173 {
174 "small", /* name */
175 50, /* initial percentage */
176 B_PAGE_SIZE / 8, /* max allocation size */
177 B_PAGE_SIZE, /* page size */
178 8, /* min bin size */
179 4, /* bin alignment */
180 8, /* min count per page */
181 16 /* max waste per page */
182 },
183 {
184 "medium", /* name */
185 30, /* initial percentage */
186 B_PAGE_SIZE * 2, /* max allocation size */
187 B_PAGE_SIZE * 8, /* page size */
188 B_PAGE_SIZE / 8, /* min bin size */
189 32, /* bin alignment */
190 4, /* min count per page */
191 64 /* max waste per page */
192 },
193 {
194 "large", /* name */
195 20, /* initial percentage */
196 HEAP_AREA_USE_THRESHOLD, /* max allocation size */
197 B_PAGE_SIZE * 16, /* page size */
198 B_PAGE_SIZE * 2, /* min bin size */
199 128, /* bin alignment */
200 1, /* min count per page */
201 256 /* max waste per page */
202 }
203 };
204
205 static heap_allocator *sHeaps[HEAP_CLASS_COUNT];
206
207
208 // #pragma mark - Debug functions
209
210
211 static void
dump_page(heap_page * page)212 dump_page(heap_page *page)
213 {
214 uint32 count = 0;
215 for (addr_t *temp = page->free_list; temp != NULL; temp = (addr_t *)*temp)
216 count++;
217
218 printf("\t\tpage %p: bin_index: %u; free_count: %u; empty_index: %u; "
219 "free_list %p (%" B_PRIu32 " entr%s)\n", page, page->bin_index,
220 page->free_count, page->empty_index, page->free_list, count,
221 count == 1 ? "y" : "ies");
222 }
223
224
225 static void
dump_bin(heap_bin * bin)226 dump_bin(heap_bin *bin)
227 {
228 printf("\telement_size: %" B_PRIu32 "; max_free_count: %u; page_list %p;\n",
229 bin->element_size, bin->max_free_count, bin->page_list);
230
231 for (heap_page *temp = bin->page_list; temp != NULL; temp = temp->next)
232 dump_page(temp);
233 }
234
235
236 static void
dump_bin_list(heap_allocator * heap)237 dump_bin_list(heap_allocator *heap)
238 {
239 for (uint32 i = 0; i < heap->bin_count; i++)
240 dump_bin(&heap->bins[i]);
241 printf("\n");
242 }
243
244
245 static void
dump_allocator_areas(heap_allocator * heap)246 dump_allocator_areas(heap_allocator *heap)
247 {
248 heap_area *area = heap->all_areas;
249 while (area) {
250 printf("\tarea %p: area: %" B_PRId32 "; base: 0x%08lx; size: %" B_PRIuSIZE "; "
251 "page_count: %" B_PRIu32 "; free_pages: %p (%" B_PRIu32 " entr%s)\n",
252 area, area->area, area->base, area->size, area->page_count,
253 area->free_pages, area->free_page_count,
254 area->free_page_count == 1 ? "y" : "ies");
255 area = area->all_next;
256 }
257
258 printf("\n");
259 }
260
261
262 static void
dump_allocator(heap_allocator * heap,bool areas,bool bins)263 dump_allocator(heap_allocator *heap, bool areas, bool bins)
264 {
265 printf("allocator %p: name: %s; page_size: %" B_PRIu32 "; bin_count: %"
266 B_PRIu32 "; pages: %" B_PRIu32 "; free_pages: %" B_PRIu32 "; "
267 "empty_areas: %" B_PRIu32 "\n", heap, heap->name, heap->page_size,
268 heap->bin_count, heap->total_pages, heap->total_free_pages,
269 heap->empty_areas);
270
271 if (areas)
272 dump_allocator_areas(heap);
273 if (bins)
274 dump_bin_list(heap);
275 }
276
277
278 static void
dump_allocations(bool statsOnly,thread_id thread)279 dump_allocations(bool statsOnly, thread_id thread)
280 {
281 size_t totalSize = 0;
282 uint32 totalCount = 0;
283 for (uint32 classIndex = 0; classIndex < HEAP_CLASS_COUNT; classIndex++) {
284 heap_allocator *heap = sHeaps[classIndex];
285
286 // go through all the pages in all the areas
287 heap_area *area = heap->all_areas;
288 while (area) {
289 heap_leak_check_info *info = NULL;
290 for (uint32 i = 0; i < area->page_count; i++) {
291 heap_page *page = &area->page_table[i];
292 if (!page->in_use)
293 continue;
294
295 addr_t base = area->base + i * heap->page_size;
296 if (page->bin_index < heap->bin_count) {
297 // page is used by a small allocation bin
298 uint32 elementCount = page->empty_index;
299 size_t elementSize
300 = heap->bins[page->bin_index].element_size;
301 for (uint32 j = 0; j < elementCount;
302 j++, base += elementSize) {
303 // walk the free list to see if this element is in use
304 bool elementInUse = true;
305 for (addr_t *temp = page->free_list; temp != NULL;
306 temp = (addr_t *)*temp) {
307 if ((addr_t)temp == base) {
308 elementInUse = false;
309 break;
310 }
311 }
312
313 if (!elementInUse)
314 continue;
315
316 info = (heap_leak_check_info *)(base + elementSize
317 - sizeof(heap_leak_check_info));
318
319 if (thread == -1 || info->thread == thread) {
320 // interesting...
321 if (!statsOnly) {
322 printf("thread: % 6" B_PRId32 "; address: "
323 "0x%08lx; size: %" B_PRIuSIZE " bytes\n", info->thread,
324 base, info->size);
325 }
326
327 totalSize += info->size;
328 totalCount++;
329 }
330 }
331 } else {
332 // page is used by a big allocation, find the page count
333 uint32 pageCount = 1;
334 while (i + pageCount < area->page_count
335 && area->page_table[i + pageCount].in_use
336 && area->page_table[i + pageCount].bin_index
337 == heap->bin_count
338 && area->page_table[i + pageCount].allocation_id
339 == page->allocation_id)
340 pageCount++;
341
342 info = (heap_leak_check_info *)(base + pageCount
343 * heap->page_size - sizeof(heap_leak_check_info));
344
345 if (thread == -1 || info->thread == thread) {
346 // interesting...
347 if (!statsOnly) {
348 printf("thread: % 6" B_PRId32 "; address: 0x%08lx;"
349 " size: %" B_PRIuSIZE " bytes\n", info->thread,
350 base, info->size);
351 }
352
353 totalSize += info->size;
354 totalCount++;
355 }
356
357 // skip the allocated pages
358 i += pageCount - 1;
359 }
360 }
361
362 area = area->all_next;
363 }
364 }
365
366 printf("total allocations: %" B_PRIu32 "; total bytes: %" B_PRIuSIZE "\n", totalCount,
367 totalSize);
368 }
369
370
371 static void
heap_validate_walls()372 heap_validate_walls()
373 {
374 for (uint32 classIndex = 0; classIndex < HEAP_CLASS_COUNT; classIndex++) {
375 heap_allocator *heap = sHeaps[classIndex];
376 ReadLocker areaReadLocker(heap->area_lock);
377 for (uint32 i = 0; i < heap->bin_count; i++)
378 mutex_lock(&heap->bins[i].lock);
379 MutexLocker pageLocker(heap->page_lock);
380
381 // go through all the pages in all the areas
382 heap_area *area = heap->all_areas;
383 while (area) {
384 heap_leak_check_info *info = NULL;
385 for (uint32 i = 0; i < area->page_count; i++) {
386 heap_page *page = &area->page_table[i];
387 if (!page->in_use)
388 continue;
389
390 addr_t base = area->base + i * heap->page_size;
391 if (page->bin_index < heap->bin_count) {
392 // page is used by a small allocation bin
393 uint32 elementCount = page->empty_index;
394 size_t elementSize
395 = heap->bins[page->bin_index].element_size;
396 for (uint32 j = 0; j < elementCount;
397 j++, base += elementSize) {
398 // walk the free list to see if this element is in use
399 bool elementInUse = true;
400 for (addr_t *temp = page->free_list; temp != NULL;
401 temp = (addr_t *)*temp) {
402 if ((addr_t)temp == base) {
403 elementInUse = false;
404 break;
405 }
406 }
407
408 if (!elementInUse)
409 continue;
410
411 info = (heap_leak_check_info *)(base + elementSize
412 - sizeof(heap_leak_check_info));
413 if (info->size > elementSize - sizeof(addr_t)
414 - sizeof(heap_leak_check_info)) {
415 panic("leak check info has invalid size %" B_PRIuSIZE " for"
416 " element size %" B_PRIuSIZE "\n", info->size, elementSize);
417 continue;
418 }
419
420 addr_t wallValue;
421 addr_t wallAddress = base + info->size;
422 memcpy(&wallValue, (void *)wallAddress, sizeof(addr_t));
423 if (wallValue != wallAddress) {
424 panic("someone wrote beyond small allocation at"
425 " 0x%08lx; size: %" B_PRIuSIZE " bytes;"
426 " allocated by %" B_PRId32 ";"
427 " value: 0x%08lx\n", base, info->size,
428 info->thread, wallValue);
429 }
430 }
431 } else {
432 // page is used by a big allocation, find the page count
433 uint32 pageCount = 1;
434 while (i + pageCount < area->page_count
435 && area->page_table[i + pageCount].in_use
436 && area->page_table[i + pageCount].bin_index
437 == heap->bin_count
438 && area->page_table[i + pageCount].allocation_id
439 == page->allocation_id)
440 pageCount++;
441
442 info = (heap_leak_check_info *)(base + pageCount
443 * heap->page_size - sizeof(heap_leak_check_info));
444
445 if (info->size > pageCount * heap->page_size
446 - sizeof(addr_t) - sizeof(heap_leak_check_info)) {
447 panic("leak check info has invalid size %" B_PRIuSIZE " for"
448 " page count %" B_PRIu32 " (%" B_PRIu32 " bytes)\n", info->size,
449 pageCount, pageCount * heap->page_size);
450 continue;
451 }
452
453 addr_t wallValue;
454 addr_t wallAddress = base + info->size;
455 memcpy(&wallValue, (void *)wallAddress, sizeof(addr_t));
456 if (wallValue != wallAddress) {
457 panic("someone wrote beyond big allocation at 0x%08lx;"
458 " size: %" B_PRIuSIZE " bytes; allocated by %" B_PRId32 ";"
459 " value: 0x%08lx\n", base, info->size,
460 info->thread, wallValue);
461 }
462
463 // skip the allocated pages
464 i += pageCount - 1;
465 }
466 }
467
468 area = area->all_next;
469 }
470
471 pageLocker.Unlock();
472 for (uint32 i = 0; i < heap->bin_count; i++)
473 mutex_unlock(&heap->bins[i].lock);
474 }
475 }
476
477
478 static void
heap_validate_heap(heap_allocator * heap)479 heap_validate_heap(heap_allocator *heap)
480 {
481 ReadLocker areaReadLocker(heap->area_lock);
482 for (uint32 i = 0; i < heap->bin_count; i++)
483 mutex_lock(&heap->bins[i].lock);
484 MutexLocker pageLocker(heap->page_lock);
485
486 uint32 totalPageCount = 0;
487 uint32 totalFreePageCount = 0;
488 heap_area *area = heap->all_areas;
489 while (area != NULL) {
490 // validate the free pages list
491 uint32 freePageCount = 0;
492 heap_page *lastPage = NULL;
493 heap_page *page = area->free_pages;
494 while (page) {
495 if ((addr_t)page < (addr_t)&area->page_table[0]
496 || (addr_t)page >= (addr_t)&area->page_table[area->page_count])
497 panic("free page is not part of the page table\n");
498
499 if (page->index >= area->page_count)
500 panic("free page has invalid index\n");
501
502 if ((addr_t)&area->page_table[page->index] != (addr_t)page)
503 panic("free page index does not lead to target page\n");
504
505 if (page->prev != lastPage)
506 panic("free page entry has invalid prev link\n");
507
508 if (page->in_use)
509 panic("free page marked as in use\n");
510
511 lastPage = page;
512 page = page->next;
513 freePageCount++;
514 }
515
516 totalPageCount += freePageCount;
517 totalFreePageCount += freePageCount;
518 if (area->free_page_count != freePageCount) {
519 panic("free page count %" B_PRIu32 " doesn't match free page list %" B_PRIu32 "\n",
520 area->free_page_count, freePageCount);
521 }
522
523 // validate the page table
524 uint32 usedPageCount = 0;
525 for (uint32 i = 0; i < area->page_count; i++) {
526 if (area->page_table[i].in_use)
527 usedPageCount++;
528 }
529
530 totalPageCount += usedPageCount;
531 if (freePageCount + usedPageCount != area->page_count) {
532 panic("free pages and used pages do not add up "
533 "(%" B_PRIu32 " + %" B_PRIu32 " != %" B_PRIu32 ")\n",
534 freePageCount, usedPageCount, area->page_count);
535 }
536
537 area = area->all_next;
538 }
539
540 // validate the areas
541 area = heap->areas;
542 heap_area *lastArea = NULL;
543 uint32 lastFreeCount = 0;
544 while (area != NULL) {
545 if (area->free_page_count < lastFreeCount)
546 panic("size ordering of area list broken\n");
547
548 if (area->prev != lastArea)
549 panic("area list entry has invalid prev link\n");
550
551 lastArea = area;
552 lastFreeCount = area->free_page_count;
553 area = area->next;
554 }
555
556 lastArea = NULL;
557 area = heap->all_areas;
558 while (area != NULL) {
559 if (lastArea != NULL && lastArea->base < area->base)
560 panic("base ordering of all_areas list broken\n");
561
562 lastArea = area;
563 area = area->all_next;
564 }
565
566 // validate the bins
567 for (uint32 i = 0; i < heap->bin_count; i++) {
568 heap_bin *bin = &heap->bins[i];
569 heap_page *lastPage = NULL;
570 heap_page *page = bin->page_list;
571 lastFreeCount = 0;
572 while (page) {
573 area = heap->all_areas;
574 while (area) {
575 if (area == page->area)
576 break;
577 area = area->all_next;
578 }
579
580 if (area == NULL) {
581 panic("page area not present in area list\n");
582 page = page->next;
583 continue;
584 }
585
586 if ((addr_t)page < (addr_t)&area->page_table[0]
587 || (addr_t)page >= (addr_t)&area->page_table[area->page_count])
588 panic("used page is not part of the page table\n");
589
590 if (page->index >= area->page_count)
591 panic("used page has invalid index %" B_PRIu16 "\n", page->index);
592
593 if ((addr_t)&area->page_table[page->index] != (addr_t)page) {
594 panic("used page index does not lead to target page"
595 " (%p vs. %p)\n", &area->page_table[page->index],
596 page);
597 }
598
599 if (page->prev != lastPage) {
600 panic("used page entry has invalid prev link (%p vs %p bin "
601 "%" B_PRIu32 ")\n", page->prev, lastPage, i);
602 }
603
604 if (!page->in_use)
605 panic("used page %p marked as not in use\n", page);
606
607 if (page->bin_index != i) {
608 panic("used page with bin index %" B_PRIu16 " in page list of bin %" B_PRIu32 "\n",
609 page->bin_index, i);
610 }
611
612 if (page->free_count < lastFreeCount)
613 panic("ordering of bin page list broken\n");
614
615 // validate the free list
616 uint32 freeSlotsCount = 0;
617 addr_t *element = page->free_list;
618 addr_t pageBase = area->base + page->index * heap->page_size;
619 while (element) {
620 if ((addr_t)element < pageBase
621 || (addr_t)element >= pageBase + heap->page_size)
622 panic("free list entry out of page range %p\n", element);
623
624 if (((addr_t)element - pageBase) % bin->element_size != 0)
625 panic("free list entry not on a element boundary\n");
626
627 element = (addr_t *)*element;
628 freeSlotsCount++;
629 }
630
631 uint32 slotCount = bin->max_free_count;
632 if (page->empty_index > slotCount) {
633 panic("empty index beyond slot count (%" B_PRIu16 " with %" B_PRIu32 " slots)\n",
634 page->empty_index, slotCount);
635 }
636
637 freeSlotsCount += (slotCount - page->empty_index);
638 if (freeSlotsCount > slotCount)
639 panic("more free slots than fit into the page\n");
640
641 lastPage = page;
642 lastFreeCount = page->free_count;
643 page = page->next;
644 }
645 }
646
647 pageLocker.Unlock();
648 for (uint32 i = 0; i < heap->bin_count; i++)
649 mutex_unlock(&heap->bins[i].lock);
650 }
651
652
653 // #pragma mark - Heap functions
654
655
656 static void
heap_add_area(heap_allocator * heap,area_id areaID,addr_t base,size_t size)657 heap_add_area(heap_allocator *heap, area_id areaID, addr_t base, size_t size)
658 {
659 heap_area *area = (heap_area *)base;
660 area->area = areaID;
661
662 base += sizeof(heap_area);
663 size -= sizeof(heap_area);
664
665 uint32 pageCount = size / heap->page_size;
666 size_t pageTableSize = pageCount * sizeof(heap_page);
667 area->page_table = (heap_page *)base;
668 base += pageTableSize;
669 size -= pageTableSize;
670
671 // the rest is now actually usable memory (rounded to the next page)
672 area->base = ROUNDUP(base, B_PAGE_SIZE);
673 area->size = size & ~(B_PAGE_SIZE - 1);
674
675 // now we know the real page count
676 pageCount = area->size / heap->page_size;
677 area->page_count = pageCount;
678
679 // zero out the page table and fill in page indexes
680 memset((void *)area->page_table, 0, pageTableSize);
681 for (uint32 i = 0; i < pageCount; i++) {
682 area->page_table[i].area = area;
683 area->page_table[i].index = i;
684 }
685
686 // add all pages up into the free pages list
687 for (uint32 i = 1; i < pageCount; i++) {
688 area->page_table[i - 1].next = &area->page_table[i];
689 area->page_table[i].prev = &area->page_table[i - 1];
690 }
691 area->free_pages = &area->page_table[0];
692 area->free_page_count = pageCount;
693 area->page_table[0].prev = NULL;
694 area->next = NULL;
695
696 WriteLocker areaWriteLocker(heap->area_lock);
697 MutexLocker pageLocker(heap->page_lock);
698 if (heap->areas == NULL) {
699 // it's the only (empty) area in that heap
700 area->prev = NULL;
701 heap->areas = area;
702 } else {
703 // link in this area as the last one as it is completely empty
704 heap_area *lastArea = heap->areas;
705 while (lastArea->next != NULL)
706 lastArea = lastArea->next;
707
708 lastArea->next = area;
709 area->prev = lastArea;
710 }
711
712 // insert this area in the all_areas list so it stays ordered by base
713 if (heap->all_areas == NULL || heap->all_areas->base < area->base) {
714 area->all_next = heap->all_areas;
715 heap->all_areas = area;
716 } else {
717 heap_area *insert = heap->all_areas;
718 while (insert->all_next && insert->all_next->base > area->base)
719 insert = insert->all_next;
720
721 area->all_next = insert->all_next;
722 insert->all_next = area;
723 }
724
725 heap->total_pages += area->page_count;
726 heap->total_free_pages += area->free_page_count;
727
728 if (areaID >= 0) {
729 // this later on deletable area is yet empty - the empty count will be
730 // decremented as soon as this area is used for the first time
731 heap->empty_areas++;
732 }
733
734 pageLocker.Unlock();
735 areaWriteLocker.Unlock();
736 }
737
738
739 static status_t
heap_remove_area(heap_allocator * heap,heap_area * area)740 heap_remove_area(heap_allocator *heap, heap_area *area)
741 {
742 if (area->free_page_count != area->page_count) {
743 panic("tried removing heap area that has still pages in use");
744 return B_ERROR;
745 }
746
747 if (area->prev == NULL && area->next == NULL) {
748 panic("tried removing the last non-full heap area");
749 return B_ERROR;
750 }
751
752 if (heap->areas == area)
753 heap->areas = area->next;
754 if (area->prev != NULL)
755 area->prev->next = area->next;
756 if (area->next != NULL)
757 area->next->prev = area->prev;
758
759 if (heap->all_areas == area)
760 heap->all_areas = area->all_next;
761 else {
762 heap_area *previous = heap->all_areas;
763 while (previous) {
764 if (previous->all_next == area) {
765 previous->all_next = area->all_next;
766 break;
767 }
768
769 previous = previous->all_next;
770 }
771
772 if (previous == NULL)
773 panic("removing heap area that is not in all list");
774 }
775
776 heap->total_pages -= area->page_count;
777 heap->total_free_pages -= area->free_page_count;
778 return B_OK;
779 }
780
781
782 static heap_allocator *
heap_create_allocator(const char * name,addr_t base,size_t size,const heap_class * heapClass)783 heap_create_allocator(const char *name, addr_t base, size_t size,
784 const heap_class *heapClass)
785 {
786 heap_allocator *heap = (heap_allocator *)base;
787 base += sizeof(heap_allocator);
788 size -= sizeof(heap_allocator);
789
790 heap->name = name;
791 heap->page_size = heapClass->page_size;
792 heap->total_pages = heap->total_free_pages = heap->empty_areas = 0;
793 heap->areas = heap->all_areas = NULL;
794 heap->bins = (heap_bin *)base;
795
796 heap->bin_count = 0;
797 size_t binSize = 0, lastSize = 0;
798 uint32 count = heap->page_size / heapClass->min_bin_size;
799 for (; count >= heapClass->min_count_per_page; count--, lastSize = binSize) {
800 if (heap->bin_count >= 32)
801 panic("heap configuration invalid - max bin count reached\n");
802
803 binSize = (heap->page_size / count) & ~(heapClass->bin_alignment - 1);
804 if (binSize == lastSize)
805 continue;
806 if (heap->page_size - count * binSize > heapClass->max_waste_per_page)
807 continue;
808
809 heap_bin *bin = &heap->bins[heap->bin_count];
810 mutex_init(&bin->lock, "heap bin lock");
811 bin->element_size = binSize;
812 bin->max_free_count = heap->page_size / binSize;
813 bin->page_list = NULL;
814 heap->bin_count++;
815 };
816
817 base += heap->bin_count * sizeof(heap_bin);
818 size -= heap->bin_count * sizeof(heap_bin);
819
820 rw_lock_init(&heap->area_lock, "heap area lock");
821 mutex_init(&heap->page_lock, "heap page lock");
822
823 heap_add_area(heap, -1, base, size);
824 return heap;
825 }
826
827
828 static inline void
heap_free_pages_added(heap_allocator * heap,heap_area * area,uint32 pageCount)829 heap_free_pages_added(heap_allocator *heap, heap_area *area, uint32 pageCount)
830 {
831 area->free_page_count += pageCount;
832 heap->total_free_pages += pageCount;
833
834 if (area->free_page_count == pageCount) {
835 // we need to add ourselfs to the area list of the heap
836 area->prev = NULL;
837 area->next = heap->areas;
838 if (area->next)
839 area->next->prev = area;
840 heap->areas = area;
841 } else {
842 // we might need to move back in the area list
843 if (area->next && area->next->free_page_count < area->free_page_count) {
844 // move ourselfs so the list stays ordered
845 heap_area *insert = area->next;
846 while (insert->next
847 && insert->next->free_page_count < area->free_page_count)
848 insert = insert->next;
849
850 if (area->prev)
851 area->prev->next = area->next;
852 if (area->next)
853 area->next->prev = area->prev;
854 if (heap->areas == area)
855 heap->areas = area->next;
856
857 area->prev = insert;
858 area->next = insert->next;
859 if (area->next)
860 area->next->prev = area;
861 insert->next = area;
862 }
863 }
864
865 if (area->free_page_count == area->page_count && area->area >= 0)
866 heap->empty_areas++;
867 }
868
869
870 static inline void
heap_free_pages_removed(heap_allocator * heap,heap_area * area,uint32 pageCount)871 heap_free_pages_removed(heap_allocator *heap, heap_area *area, uint32 pageCount)
872 {
873 if (area->free_page_count == area->page_count && area->area >= 0) {
874 // this area was completely empty
875 heap->empty_areas--;
876 }
877
878 area->free_page_count -= pageCount;
879 heap->total_free_pages -= pageCount;
880
881 if (area->free_page_count == 0) {
882 // the area is now full so we remove it from the area list
883 if (area->prev)
884 area->prev->next = area->next;
885 if (area->next)
886 area->next->prev = area->prev;
887 if (heap->areas == area)
888 heap->areas = area->next;
889 area->next = area->prev = NULL;
890 } else {
891 // we might need to move forward in the area list
892 if (area->prev && area->prev->free_page_count > area->free_page_count) {
893 // move ourselfs so the list stays ordered
894 heap_area *insert = area->prev;
895 while (insert->prev
896 && insert->prev->free_page_count > area->free_page_count)
897 insert = insert->prev;
898
899 if (area->prev)
900 area->prev->next = area->next;
901 if (area->next)
902 area->next->prev = area->prev;
903
904 area->prev = insert->prev;
905 area->next = insert;
906 if (area->prev)
907 area->prev->next = area;
908 if (heap->areas == insert)
909 heap->areas = area;
910 insert->prev = area;
911 }
912 }
913 }
914
915
916 static inline void
heap_link_page(heap_page * page,heap_page ** list)917 heap_link_page(heap_page *page, heap_page **list)
918 {
919 page->prev = NULL;
920 page->next = *list;
921 if (page->next)
922 page->next->prev = page;
923 *list = page;
924 }
925
926
927 static inline void
heap_unlink_page(heap_page * page,heap_page ** list)928 heap_unlink_page(heap_page *page, heap_page **list)
929 {
930 if (page->prev)
931 page->prev->next = page->next;
932 if (page->next)
933 page->next->prev = page->prev;
934 if (list && *list == page) {
935 *list = page->next;
936 if (page->next)
937 page->next->prev = NULL;
938 }
939 }
940
941
942 static heap_page *
heap_allocate_contiguous_pages(heap_allocator * heap,uint32 pageCount,size_t alignment)943 heap_allocate_contiguous_pages(heap_allocator *heap, uint32 pageCount,
944 size_t alignment)
945 {
946 heap_area *area = heap->areas;
947 while (area) {
948 if (area->free_page_count < pageCount) {
949 area = area->next;
950 continue;
951 }
952
953 uint32 step = 1;
954 uint32 firstValid = 0;
955 const uint32 lastValid = area->page_count - pageCount + 1;
956
957 if (alignment > heap->page_size) {
958 firstValid = (ROUNDUP(area->base, alignment) - area->base)
959 / heap->page_size;
960 step = alignment / heap->page_size;
961 }
962
963 int32 first = -1;
964 for (uint32 i = firstValid; i < lastValid; i += step) {
965 if (area->page_table[i].in_use)
966 continue;
967
968 first = i;
969
970 for (uint32 j = 1; j < pageCount; j++) {
971 if (area->page_table[i + j].in_use) {
972 first = -1;
973 i += j / step * step;
974 break;
975 }
976 }
977
978 if (first >= 0)
979 break;
980 }
981
982 if (first < 0) {
983 area = area->next;
984 continue;
985 }
986
987 for (uint32 i = first; i < first + pageCount; i++) {
988 heap_page *page = &area->page_table[i];
989 page->in_use = 1;
990 page->bin_index = heap->bin_count;
991
992 heap_unlink_page(page, &area->free_pages);
993
994 page->next = page->prev = NULL;
995 page->free_list = NULL;
996 page->allocation_id = (uint16)first;
997 }
998
999 heap_free_pages_removed(heap, area, pageCount);
1000 return &area->page_table[first];
1001 }
1002
1003 return NULL;
1004 }
1005
1006
1007 static void
heap_add_leak_check_info(addr_t address,size_t allocated,size_t size)1008 heap_add_leak_check_info(addr_t address, size_t allocated, size_t size)
1009 {
1010 size -= sizeof(addr_t) + sizeof(heap_leak_check_info);
1011 heap_leak_check_info *info = (heap_leak_check_info *)(address + allocated
1012 - sizeof(heap_leak_check_info));
1013 info->thread = find_thread(NULL);
1014 info->size = size;
1015
1016 addr_t wallAddress = address + size;
1017 memcpy((void *)wallAddress, &wallAddress, sizeof(addr_t));
1018 }
1019
1020
1021 static void *
heap_raw_alloc(heap_allocator * heap,size_t size,size_t alignment)1022 heap_raw_alloc(heap_allocator *heap, size_t size, size_t alignment)
1023 {
1024 INFO(("heap %p: allocate %" B_PRIuSIZE " bytes from raw pages with"
1025 " alignment %" B_PRIuSIZE "\n", heap, size, alignment));
1026
1027 uint32 pageCount = (size + heap->page_size - 1) / heap->page_size;
1028
1029 MutexLocker pageLocker(heap->page_lock);
1030 heap_page *firstPage = heap_allocate_contiguous_pages(heap, pageCount,
1031 alignment);
1032 if (firstPage == NULL) {
1033 INFO(("heap %p: found no contiguous pages to allocate %" B_PRIuSIZE " bytes\n",
1034 heap, size));
1035 return NULL;
1036 }
1037
1038 addr_t address = firstPage->area->base + firstPage->index * heap->page_size;
1039 heap_add_leak_check_info(address, pageCount * heap->page_size, size);
1040 return (void *)address;
1041 }
1042
1043
1044 static void *
heap_allocate_from_bin(heap_allocator * heap,uint32 binIndex,size_t size)1045 heap_allocate_from_bin(heap_allocator *heap, uint32 binIndex, size_t size)
1046 {
1047 heap_bin *bin = &heap->bins[binIndex];
1048 INFO(("heap %p: allocate %" B_PRIuSIZE " bytes from bin %" B_PRIu32
1049 " with element_size %" B_PRIu32 "\n",
1050 heap, size, binIndex, bin->element_size));
1051
1052 MutexLocker binLocker(bin->lock);
1053 heap_page *page = bin->page_list;
1054 if (page == NULL) {
1055 MutexLocker pageLocker(heap->page_lock);
1056 heap_area *area = heap->areas;
1057 if (area == NULL) {
1058 INFO(("heap %p: no free pages to allocate %" B_PRIuSIZE " bytes\n", heap,
1059 size));
1060 return NULL;
1061 }
1062
1063 // by design there are only areas in the list that still have
1064 // free pages available
1065 page = area->free_pages;
1066 area->free_pages = page->next;
1067 if (page->next)
1068 page->next->prev = NULL;
1069
1070 heap_free_pages_removed(heap, area, 1);
1071
1072 if (page->in_use)
1073 panic("got an in use page from the free pages list\n");
1074 page->in_use = 1;
1075
1076 pageLocker.Unlock();
1077
1078 page->bin_index = binIndex;
1079 page->free_count = bin->max_free_count;
1080 page->empty_index = 0;
1081 page->free_list = NULL;
1082 page->next = page->prev = NULL;
1083 bin->page_list = page;
1084 }
1085
1086 // we have a page where we have a free slot
1087 void *address = NULL;
1088 if (page->free_list) {
1089 // there's a previously freed entry we can use
1090 address = page->free_list;
1091 page->free_list = (addr_t *)*page->free_list;
1092 } else {
1093 // the page hasn't been fully allocated so use the next empty_index
1094 address = (void *)(page->area->base + page->index * heap->page_size
1095 + page->empty_index * bin->element_size);
1096 page->empty_index++;
1097 }
1098
1099 page->free_count--;
1100 if (page->free_count == 0) {
1101 // the page is now full so we remove it from the page_list
1102 bin->page_list = page->next;
1103 if (page->next)
1104 page->next->prev = NULL;
1105 page->next = page->prev = NULL;
1106 }
1107
1108 heap_add_leak_check_info((addr_t)address, bin->element_size, size);
1109 return address;
1110 }
1111
1112
1113 static void *
heap_memalign(heap_allocator * heap,size_t alignment,size_t size)1114 heap_memalign(heap_allocator *heap, size_t alignment, size_t size)
1115 {
1116 INFO(("memalign(alignment = %" B_PRIuSIZE ", size = %" B_PRIuSIZE ")\n",
1117 alignment, size));
1118
1119 if (!is_valid_alignment(alignment)) {
1120 panic("memalign() with an alignment which is not a power of 2 (%" B_PRIuSIZE ")\n",
1121 alignment);
1122 }
1123
1124 size += sizeof(addr_t) + sizeof(heap_leak_check_info);
1125
1126 void *address = NULL;
1127 if (alignment < B_PAGE_SIZE) {
1128 if (alignment != 0) {
1129 // TODO: The alignment is done by ensuring that the element size
1130 // of the target bin is aligned with the requested alignment. This
1131 // has the problem that it wastes space because a better (smaller)
1132 // bin could possibly be selected. We should pick the best bin and
1133 // check if there is an aligned block in the free list or if a new
1134 // (page aligned) page has to be allocated anyway.
1135 size = ROUNDUP(size, alignment);
1136 for (uint32 i = 0; i < heap->bin_count; i++) {
1137 if (size <= heap->bins[i].element_size
1138 && is_valid_alignment(heap->bins[i].element_size)) {
1139 address = heap_allocate_from_bin(heap, i, size);
1140 break;
1141 }
1142 }
1143 } else {
1144 for (uint32 i = 0; i < heap->bin_count; i++) {
1145 if (size <= heap->bins[i].element_size) {
1146 address = heap_allocate_from_bin(heap, i, size);
1147 break;
1148 }
1149 }
1150 }
1151 }
1152
1153 if (address == NULL)
1154 address = heap_raw_alloc(heap, size, alignment);
1155
1156 size -= sizeof(addr_t) + sizeof(heap_leak_check_info);
1157
1158 INFO(("memalign(): asked to allocate %" B_PRIuSIZE " bytes, returning pointer %p\n",
1159 size, address));
1160
1161 if (address == NULL)
1162 return address;
1163
1164 memset(address, 0xcc, size);
1165
1166 // make sure 0xdeadbeef is cleared if we do not overwrite the memory
1167 // and the user does not clear it
1168 if (((uint32 *)address)[1] == 0xdeadbeef)
1169 ((uint32 *)address)[1] = 0xcccccccc;
1170
1171 return address;
1172 }
1173
1174
1175 static status_t
heap_free(heap_allocator * heap,void * address)1176 heap_free(heap_allocator *heap, void *address)
1177 {
1178 if (address == NULL)
1179 return B_OK;
1180
1181 ReadLocker areaReadLocker(heap->area_lock);
1182 heap_area *area = heap->all_areas;
1183 while (area) {
1184 // since the all_areas list is ordered by base with the biggest
1185 // base at the top, we need only find the first area with a base
1186 // smaller than our address to become our only candidate for freeing
1187 if (area->base <= (addr_t)address) {
1188 if ((addr_t)address >= area->base + area->size) {
1189 // none of the other areas can contain the address as the list
1190 // is ordered
1191 return B_ENTRY_NOT_FOUND;
1192 }
1193
1194 // this area contains the allocation, we're done searching
1195 break;
1196 }
1197
1198 area = area->all_next;
1199 }
1200
1201 if (area == NULL) {
1202 // this address does not belong to us
1203 return B_ENTRY_NOT_FOUND;
1204 }
1205
1206 INFO(("free(): asked to free pointer %p\n", address));
1207
1208 heap_page *page = &area->page_table[((addr_t)address - area->base)
1209 / heap->page_size];
1210
1211 INFO(("free(): page %p: bin_index %d, free_count %d\n", page,
1212 page->bin_index, page->free_count));
1213
1214 if (page->bin_index > heap->bin_count) {
1215 panic("free(): page %p: invalid bin_index %d\n", page,
1216 page->bin_index);
1217 return B_ERROR;
1218 }
1219
1220 addr_t pageBase = area->base + page->index * heap->page_size;
1221 if (page->bin_index < heap->bin_count) {
1222 // small allocation
1223 heap_bin *bin = &heap->bins[page->bin_index];
1224 if (((addr_t)address - pageBase) % bin->element_size != 0) {
1225 panic("free(): address %p does not fall on allocation boundary"
1226 " for page base %p and element size %" B_PRIu32 "\n", address,
1227 (void *)pageBase, bin->element_size);
1228 return B_ERROR;
1229 }
1230
1231 MutexLocker binLocker(bin->lock);
1232
1233 if (((uint32 *)address)[1] == 0xdeadbeef) {
1234 // This block looks like it was freed already, walk the free list
1235 // on this page to make sure this address doesn't exist.
1236 for (addr_t *temp = page->free_list; temp != NULL;
1237 temp = (addr_t *)*temp) {
1238 if (temp == address) {
1239 panic("free(): address %p already exists in page free"
1240 " list (double free)\n", address);
1241 return B_ERROR;
1242 }
1243 }
1244 }
1245
1246 heap_leak_check_info *info = (heap_leak_check_info *)((addr_t)address
1247 + bin->element_size - sizeof(heap_leak_check_info));
1248 if (info->size > bin->element_size - sizeof(addr_t)
1249 - sizeof(heap_leak_check_info)) {
1250 panic("leak check info has invalid size %" B_PRIuSIZE
1251 " for element size %" B_PRIu32 ","
1252 " probably memory has been overwritten past allocation size\n",
1253 info->size, bin->element_size);
1254 }
1255
1256 addr_t wallValue;
1257 addr_t wallAddress = (addr_t)address + info->size;
1258 memcpy(&wallValue, (void *)wallAddress, sizeof(addr_t));
1259 if (wallValue != wallAddress) {
1260 panic("someone wrote beyond small allocation at %p;"
1261 " size: %" B_PRIuSIZE " bytes; allocated by %" B_PRId32 ";"
1262 " value: 0x%08lx\n",
1263 address, info->size, info->thread, wallValue);
1264 }
1265
1266 // the first 4 bytes are overwritten with the next free list pointer
1267 // later
1268 uint32 *dead = (uint32 *)address;
1269 for (uint32 i = 0; i < bin->element_size / sizeof(uint32); i++)
1270 dead[i] = 0xdeadbeef;
1271
1272 if (sReuseMemory) {
1273 // add the address to the page free list
1274 *(addr_t *)address = (addr_t)page->free_list;
1275 page->free_list = (addr_t *)address;
1276 page->free_count++;
1277
1278 if (page->free_count == bin->max_free_count) {
1279 // we are now empty, remove the page from the bin list
1280 MutexLocker pageLocker(heap->page_lock);
1281 heap_unlink_page(page, &bin->page_list);
1282 page->in_use = 0;
1283 heap_link_page(page, &area->free_pages);
1284 heap_free_pages_added(heap, area, 1);
1285 } else if (page->free_count == 1) {
1286 // we need to add ourselfs to the page list of the bin
1287 heap_link_page(page, &bin->page_list);
1288 } else {
1289 // we might need to move back in the free pages list
1290 if (page->next && page->next->free_count < page->free_count) {
1291 // move ourselfs so the list stays ordered
1292 heap_page *insert = page->next;
1293 while (insert->next
1294 && insert->next->free_count < page->free_count)
1295 insert = insert->next;
1296
1297 heap_unlink_page(page, &bin->page_list);
1298
1299 page->prev = insert;
1300 page->next = insert->next;
1301 if (page->next)
1302 page->next->prev = page;
1303 insert->next = page;
1304 }
1305 }
1306 }
1307 } else {
1308 if ((addr_t)address != pageBase) {
1309 panic("free(): large allocation address %p not on page base %p\n",
1310 address, (void *)pageBase);
1311 return B_ERROR;
1312 }
1313
1314 // large allocation, just return the pages to the page free list
1315 uint32 allocationID = page->allocation_id;
1316 uint32 maxPages = area->page_count - page->index;
1317 uint32 pageCount = 0;
1318
1319 MutexLocker pageLocker(heap->page_lock);
1320 for (uint32 i = 0; i < maxPages; i++) {
1321 // loop until we find the end of this allocation
1322 if (!page[i].in_use || page[i].bin_index != heap->bin_count
1323 || page[i].allocation_id != allocationID)
1324 break;
1325
1326 // this page still belongs to the same allocation
1327 if (sReuseMemory) {
1328 page[i].in_use = 0;
1329 page[i].allocation_id = 0;
1330
1331 // return it to the free list
1332 heap_link_page(&page[i], &area->free_pages);
1333 }
1334
1335 pageCount++;
1336 }
1337
1338 size_t allocationSize = pageCount * heap->page_size;
1339 heap_leak_check_info *info = (heap_leak_check_info *)((addr_t)address
1340 + allocationSize - sizeof(heap_leak_check_info));
1341 if (info->size > allocationSize - sizeof(addr_t)
1342 - sizeof(heap_leak_check_info)) {
1343 panic("leak check info has invalid size %" B_PRIuSIZE
1344 " for allocation of %" B_PRIuSIZE ","
1345 " probably memory has been overwritten past allocation size\n",
1346 info->size, allocationSize);
1347 }
1348
1349 addr_t wallValue;
1350 addr_t wallAddress = (addr_t)address + info->size;
1351 memcpy(&wallValue, (void *)wallAddress, sizeof(addr_t));
1352 if (wallValue != wallAddress) {
1353 panic("someone wrote beyond big allocation at %p;"
1354 " size: %" B_PRIuSIZE " bytes; allocated by %" B_PRId32 "; value: 0x%08lx\n",
1355 address, info->size, info->thread, wallValue);
1356 }
1357
1358 uint32 *dead = (uint32 *)address;
1359 for (uint32 i = 0; i < allocationSize / sizeof(uint32); i++)
1360 dead[i] = 0xdeadbeef;
1361
1362 if (sReuseMemory)
1363 heap_free_pages_added(heap, area, pageCount);
1364 }
1365
1366 areaReadLocker.Unlock();
1367
1368 if (heap->empty_areas > 1) {
1369 WriteLocker areaWriteLocker(heap->area_lock);
1370 MutexLocker pageLocker(heap->page_lock);
1371
1372 area = heap->areas;
1373 while (area != NULL && heap->empty_areas > 1) {
1374 heap_area *next = area->next;
1375 if (area->area >= 0
1376 && area->free_page_count == area->page_count
1377 && heap_remove_area(heap, area) == B_OK) {
1378 delete_area(area->area);
1379 heap->empty_areas--;
1380 }
1381
1382 area = next;
1383 }
1384 }
1385
1386 return B_OK;
1387 }
1388
1389
1390 static status_t
heap_realloc(heap_allocator * heap,void * address,void ** newAddress,size_t newSize)1391 heap_realloc(heap_allocator *heap, void *address, void **newAddress,
1392 size_t newSize)
1393 {
1394 ReadLocker areaReadLocker(heap->area_lock);
1395 heap_area *area = heap->all_areas;
1396 while (area) {
1397 // since the all_areas list is ordered by base with the biggest
1398 // base at the top, we need only find the first area with a base
1399 // smaller than our address to become our only candidate for
1400 // reallocating
1401 if (area->base <= (addr_t)address) {
1402 if ((addr_t)address >= area->base + area->size) {
1403 // none of the other areas can contain the address as the list
1404 // is ordered
1405 return B_ENTRY_NOT_FOUND;
1406 }
1407
1408 // this area contains the allocation, we're done searching
1409 break;
1410 }
1411
1412 area = area->all_next;
1413 }
1414
1415 if (area == NULL) {
1416 // this address does not belong to us
1417 return B_ENTRY_NOT_FOUND;
1418 }
1419
1420 INFO(("realloc(address = %p, newSize = %" B_PRIuSIZE ")\n", address, newSize));
1421
1422 heap_page *page = &area->page_table[((addr_t)address - area->base)
1423 / heap->page_size];
1424 if (page->bin_index > heap->bin_count) {
1425 panic("realloc(): page %p: invalid bin_index %d\n", page,
1426 page->bin_index);
1427 return B_ERROR;
1428 }
1429
1430 // find out the size of the old allocation first
1431 size_t minSize = 0;
1432 size_t maxSize = 0;
1433 if (page->bin_index < heap->bin_count) {
1434 // this was a small allocation
1435 heap_bin *bin = &heap->bins[page->bin_index];
1436 maxSize = bin->element_size;
1437 if (page->bin_index > 0)
1438 minSize = heap->bins[page->bin_index - 1].element_size + 1;
1439 } else {
1440 // this was a large allocation
1441 uint32 allocationID = page->allocation_id;
1442 uint32 maxPages = area->page_count - page->index;
1443 maxSize = heap->page_size;
1444
1445 MutexLocker pageLocker(heap->page_lock);
1446 for (uint32 i = 1; i < maxPages; i++) {
1447 if (!page[i].in_use || page[i].bin_index != heap->bin_count
1448 || page[i].allocation_id != allocationID)
1449 break;
1450
1451 minSize += heap->page_size;
1452 maxSize += heap->page_size;
1453 }
1454 }
1455
1456 newSize += sizeof(addr_t) + sizeof(heap_leak_check_info);
1457
1458 // does the new allocation simply fit in the old allocation?
1459 if (newSize > minSize && newSize <= maxSize) {
1460 // update the size info (the info is at the end so stays where it is)
1461 heap_leak_check_info *info = (heap_leak_check_info *)((addr_t)address
1462 + maxSize - sizeof(heap_leak_check_info));
1463 newSize -= sizeof(addr_t) + sizeof(heap_leak_check_info);
1464 *newAddress = address;
1465
1466 MutexLocker pageLocker(heap->page_lock);
1467 info->size = newSize;
1468 addr_t wallAddress = (addr_t)address + newSize;
1469 memcpy((void *)wallAddress, &wallAddress, sizeof(addr_t));
1470 return B_OK;
1471 }
1472
1473 areaReadLocker.Unlock();
1474
1475 // new leak check info will be created with the malloc below
1476 newSize -= sizeof(addr_t) + sizeof(heap_leak_check_info);
1477
1478 // if not, allocate a new chunk of memory
1479 *newAddress = debug_heap_memalign(sDefaultAlignment, newSize);
1480 if (*newAddress == NULL) {
1481 // we tried but it didn't work out, but still the operation is done
1482 return B_OK;
1483 }
1484
1485 // copy the old data and free the old allocation
1486 memcpy(*newAddress, address, min_c(maxSize, newSize));
1487 heap_free(heap, address);
1488 return B_OK;
1489 }
1490
1491
1492 inline uint32
heap_class_for(size_t size)1493 heap_class_for(size_t size)
1494 {
1495 // take the extra info size into account
1496 size += sizeof(addr_t) + sizeof(heap_leak_check_info);
1497
1498 uint32 index = 0;
1499 for (; index < HEAP_CLASS_COUNT - 1; index++) {
1500 if (size <= sHeapClasses[index].max_allocation_size)
1501 return index;
1502 }
1503
1504 return index;
1505 }
1506
1507
1508 static status_t
heap_get_allocation_info(heap_allocator * heap,void * address,size_t * size,thread_id * thread)1509 heap_get_allocation_info(heap_allocator *heap, void *address, size_t *size,
1510 thread_id *thread)
1511 {
1512 ReadLocker areaReadLocker(heap->area_lock);
1513 heap_area *area = heap->all_areas;
1514 while (area) {
1515 // since the all_areas list is ordered by base with the biggest
1516 // base at the top, we need only find the first area with a base
1517 // smaller than our address to become our only candidate for freeing
1518 if (area->base <= (addr_t)address) {
1519 if ((addr_t)address >= area->base + area->size) {
1520 // none of the other areas can contain the address as the list
1521 // is ordered
1522 return B_ENTRY_NOT_FOUND;
1523 }
1524
1525 // this area contains the allocation, we're done searching
1526 break;
1527 }
1528
1529 area = area->all_next;
1530 }
1531
1532 if (area == NULL) {
1533 // this address does not belong to us
1534 return B_ENTRY_NOT_FOUND;
1535 }
1536
1537 heap_page *page = &area->page_table[((addr_t)address - area->base)
1538 / heap->page_size];
1539
1540 if (page->bin_index > heap->bin_count) {
1541 panic("get_allocation_info(): page %p: invalid bin_index %d\n", page,
1542 page->bin_index);
1543 return B_ERROR;
1544 }
1545
1546 heap_leak_check_info *info = NULL;
1547 addr_t pageBase = area->base + page->index * heap->page_size;
1548 if (page->bin_index < heap->bin_count) {
1549 // small allocation
1550 heap_bin *bin = &heap->bins[page->bin_index];
1551 if (((addr_t)address - pageBase) % bin->element_size != 0) {
1552 panic("get_allocation_info(): address %p does not fall on"
1553 " allocation boundary for page base %p and element size %" B_PRIu32 "\n",
1554 address, (void *)pageBase, bin->element_size);
1555 return B_ERROR;
1556 }
1557
1558 MutexLocker binLocker(bin->lock);
1559
1560 info = (heap_leak_check_info *)((addr_t)address + bin->element_size
1561 - sizeof(heap_leak_check_info));
1562 if (info->size > bin->element_size - sizeof(addr_t)
1563 - sizeof(heap_leak_check_info)) {
1564 panic("leak check info has invalid size %" B_PRIuSIZE
1565 " for element size %" B_PRIu32 ","
1566 " probably memory has been overwritten past allocation size\n",
1567 info->size, bin->element_size);
1568 return B_ERROR;
1569 }
1570 } else {
1571 if ((addr_t)address != pageBase) {
1572 panic("get_allocation_info(): large allocation address %p not on"
1573 " page base %p\n", address, (void *)pageBase);
1574 return B_ERROR;
1575 }
1576
1577 uint32 allocationID = page->allocation_id;
1578 uint32 maxPages = area->page_count - page->index;
1579 uint32 pageCount = 0;
1580
1581 MutexLocker pageLocker(heap->page_lock);
1582 for (uint32 i = 0; i < maxPages; i++) {
1583 // loop until we find the end of this allocation
1584 if (!page[i].in_use || page[i].bin_index != heap->bin_count
1585 || page[i].allocation_id != allocationID)
1586 break;
1587
1588 pageCount++;
1589 }
1590
1591 size_t allocationSize = pageCount * heap->page_size;
1592 info = (heap_leak_check_info *)((addr_t)address + allocationSize
1593 - sizeof(heap_leak_check_info));
1594 if (info->size > allocationSize - sizeof(addr_t)
1595 - sizeof(heap_leak_check_info)) {
1596 panic("leak check info has invalid size %" B_PRIuSIZE
1597 " for allocation of %" B_PRIuSIZE ","
1598 " probably memory has been overwritten past allocation size\n",
1599 info->size, allocationSize);
1600 return B_ERROR;
1601 }
1602 }
1603
1604 if (size != NULL)
1605 *size = info->size;
1606 if (thread != NULL)
1607 *thread = info->thread;
1608
1609 return B_OK;
1610 }
1611
1612
1613 // #pragma mark -
1614
1615
1616 static status_t
heap_create_new_heap_area(heap_allocator * heap,const char * name,size_t size)1617 heap_create_new_heap_area(heap_allocator *heap, const char *name, size_t size)
1618 {
1619 void *address = NULL;
1620 area_id heapArea = create_area(name, &address, B_ANY_ADDRESS, size,
1621 B_NO_LOCK, B_READ_AREA | B_WRITE_AREA);
1622 if (heapArea < B_OK) {
1623 INFO(("heap: couldn't allocate heap area \"%s\"\n", name));
1624 return heapArea;
1625 }
1626
1627 heap_add_area(heap, heapArea, (addr_t)address, size);
1628 if (sParanoidValidation)
1629 heap_validate_heap(heap);
1630
1631 return B_OK;
1632 }
1633
1634
1635 static int32
heap_wall_checker(void * data)1636 heap_wall_checker(void *data)
1637 {
1638 int msInterval = (addr_t)data;
1639 while (!sStopWallChecking) {
1640 heap_validate_walls();
1641 snooze(msInterval * 1000);
1642 }
1643
1644 return 0;
1645 }
1646
1647
1648 // #pragma mark - Heap Debug API
1649
1650
1651 static status_t
debug_heap_start_wall_checking(int msInterval)1652 debug_heap_start_wall_checking(int msInterval)
1653 {
1654 if (sWallCheckThread < 0) {
1655 sWallCheckThread = spawn_thread(heap_wall_checker, "heap wall checker",
1656 B_LOW_PRIORITY, (void *)(addr_t)msInterval);
1657 }
1658
1659 if (sWallCheckThread < 0)
1660 return sWallCheckThread;
1661
1662 sStopWallChecking = false;
1663 return resume_thread(sWallCheckThread);
1664 }
1665
1666
1667 static status_t
debug_heap_stop_wall_checking()1668 debug_heap_stop_wall_checking()
1669 {
1670 int32 result;
1671 sStopWallChecking = true;
1672 return wait_for_thread(sWallCheckThread, &result);
1673 }
1674
1675
1676 static void
debug_heap_set_paranoid_validation(bool enabled)1677 debug_heap_set_paranoid_validation(bool enabled)
1678 {
1679 sParanoidValidation = enabled;
1680 }
1681
1682
1683 static void
debug_heap_set_memory_reuse(bool enabled)1684 debug_heap_set_memory_reuse(bool enabled)
1685 {
1686 sReuseMemory = enabled;
1687 }
1688
1689
1690 static void
debug_heap_set_debugger_calls(bool enabled)1691 debug_heap_set_debugger_calls(bool enabled)
1692 {
1693 sDebuggerCalls = enabled;
1694 }
1695
1696
1697 static void
debug_heap_set_default_alignment(size_t defaultAlignment)1698 debug_heap_set_default_alignment(size_t defaultAlignment)
1699 {
1700 sDefaultAlignment = defaultAlignment;
1701 }
1702
1703
1704 static void
debug_heap_validate_heaps()1705 debug_heap_validate_heaps()
1706 {
1707 for (uint32 i = 0; i < HEAP_CLASS_COUNT; i++)
1708 heap_validate_heap(sHeaps[i]);
1709 }
1710
1711
1712 static void
debug_heap_dump_heaps(bool dumpAreas,bool dumpBins)1713 debug_heap_dump_heaps(bool dumpAreas, bool dumpBins)
1714 {
1715 for (uint32 i = 0; i < HEAP_CLASS_COUNT; i++)
1716 dump_allocator(sHeaps[i], dumpAreas, dumpBins);
1717 }
1718
1719
1720 static void *
debug_heap_malloc_with_guard_page(size_t size)1721 debug_heap_malloc_with_guard_page(size_t size)
1722 {
1723 size_t areaSize = ROUNDUP(size + sizeof(area_allocation_info) + B_PAGE_SIZE,
1724 B_PAGE_SIZE);
1725 if (areaSize < size) {
1726 // the size overflowed
1727 return NULL;
1728 }
1729
1730 void *address = NULL;
1731 area_id allocationArea = create_area("guarded area", &address,
1732 B_ANY_ADDRESS, areaSize, B_NO_LOCK, B_READ_AREA | B_WRITE_AREA);
1733 if (allocationArea < B_OK) {
1734 panic("heap: failed to create area for guarded allocation of %" B_PRIuSIZE
1735 " bytes\n", size);
1736 return NULL;
1737 }
1738
1739 if (mprotect((void *)((addr_t)address + areaSize - B_PAGE_SIZE),
1740 B_PAGE_SIZE, PROT_NONE) != 0) {
1741 panic("heap: failed to protect guard page: %s\n", strerror(errno));
1742 delete_area(allocationArea);
1743 return NULL;
1744 }
1745
1746 area_allocation_info *info = (area_allocation_info *)address;
1747 info->magic = kAreaAllocationMagic;
1748 info->area = allocationArea;
1749 info->base = address;
1750 info->size = areaSize;
1751 info->thread = find_thread(NULL);
1752 info->allocation_size = size;
1753 info->allocation_alignment = 0;
1754
1755 // the address is calculated so that the end of the allocation
1756 // is at the end of the usable space of the requested area
1757 address = (void *)((addr_t)address + areaSize - B_PAGE_SIZE - size);
1758
1759 INFO(("heap: allocated area %" B_PRId32 " for guarded allocation of %" B_PRIuSIZE " bytes\n",
1760 allocationArea, size));
1761
1762 info->allocation_base = address;
1763
1764 memset(address, 0xcc, size);
1765 return address;
1766 }
1767
1768
1769 static status_t
debug_heap_get_allocation_info(void * address,size_t * size,thread_id * thread)1770 debug_heap_get_allocation_info(void *address, size_t *size,
1771 thread_id *thread)
1772 {
1773 for (uint32 i = 0; i < HEAP_CLASS_COUNT; i++) {
1774 heap_allocator *heap = sHeaps[i];
1775 if (heap_get_allocation_info(heap, address, size, thread) == B_OK)
1776 return B_OK;
1777 }
1778
1779 // or maybe it was a huge allocation using an area
1780 area_info areaInfo;
1781 area_id area = area_for(address);
1782 if (area >= B_OK && get_area_info(area, &areaInfo) == B_OK) {
1783 area_allocation_info *info = (area_allocation_info *)areaInfo.address;
1784
1785 // just make extra sure it was allocated by us
1786 if (info->magic == kAreaAllocationMagic && info->area == area
1787 && info->size == areaInfo.size && info->base == areaInfo.address
1788 && info->allocation_size < areaInfo.size) {
1789 if (size)
1790 *size = info->allocation_size;
1791 if (thread)
1792 *thread = info->thread;
1793 return B_OK;
1794 }
1795 }
1796
1797 return B_ENTRY_NOT_FOUND;
1798 }
1799
1800
1801 // #pragma mark - Init
1802
1803
1804 static status_t
debug_heap_init(void)1805 debug_heap_init(void)
1806 {
1807 // This will locate the heap base at 384 MB and reserve the next 1152 MB
1808 // for it. They may get reclaimed by other areas, though, but the maximum
1809 // size of the heap is guaranteed until the space is really needed.
1810 void *heapBase = (void *)0x18000000;
1811 status_t status = _kern_reserve_address_range((addr_t *)&heapBase,
1812 B_EXACT_ADDRESS, 0x48000000);
1813
1814 area_id heapArea = create_area("heap", (void **)&heapBase,
1815 status == B_OK ? B_EXACT_ADDRESS : B_BASE_ADDRESS,
1816 HEAP_INITIAL_SIZE, B_NO_LOCK, B_READ_AREA | B_WRITE_AREA);
1817 if (heapArea < B_OK)
1818 return heapArea;
1819
1820 addr_t base = (addr_t)heapBase;
1821 for (uint32 i = 0; i < HEAP_CLASS_COUNT; i++) {
1822 size_t partSize = HEAP_INITIAL_SIZE
1823 * sHeapClasses[i].initial_percentage / 100;
1824 sHeaps[i] = heap_create_allocator(sHeapClasses[i].name, base, partSize,
1825 &sHeapClasses[i]);
1826 base += partSize;
1827 }
1828
1829 return B_OK;
1830 }
1831
1832
1833 // #pragma mark - Public API
1834
1835
1836 static void *
debug_heap_memalign(size_t alignment,size_t size)1837 debug_heap_memalign(size_t alignment, size_t size)
1838 {
1839 size_t alignedSize = size + sizeof(addr_t) + sizeof(heap_leak_check_info);
1840 if (alignment != 0 && alignment < B_PAGE_SIZE)
1841 alignedSize = ROUNDUP(alignedSize, alignment);
1842
1843 if (alignedSize >= HEAP_AREA_USE_THRESHOLD) {
1844 // don't even attempt such a huge allocation - use areas instead
1845 size_t areaSize = ROUNDUP(size + sizeof(area_allocation_info)
1846 + alignment, B_PAGE_SIZE);
1847 if (areaSize < size) {
1848 // the size overflowed
1849 return NULL;
1850 }
1851
1852 void *address = NULL;
1853 area_id allocationArea = create_area("memalign area", &address,
1854 B_ANY_ADDRESS, areaSize, B_NO_LOCK, B_READ_AREA | B_WRITE_AREA);
1855 if (allocationArea < B_OK) {
1856 panic("heap: failed to create area for huge allocation of %" B_PRIuSIZE
1857 " bytes\n", size);
1858 return NULL;
1859 }
1860
1861 area_allocation_info *info = (area_allocation_info *)address;
1862 info->magic = kAreaAllocationMagic;
1863 info->area = allocationArea;
1864 info->base = address;
1865 info->size = areaSize;
1866 info->thread = find_thread(NULL);
1867 info->allocation_size = size;
1868 info->allocation_alignment = alignment;
1869
1870 address = (void *)((addr_t)address + sizeof(area_allocation_info));
1871 if (alignment != 0) {
1872 address = (void *)ROUNDUP((addr_t)address, alignment);
1873 ASSERT((addr_t)address % alignment == 0);
1874 ASSERT((addr_t)address + size - 1 < (addr_t)info + areaSize - 1);
1875 }
1876
1877 INFO(("heap: allocated area %" B_PRId32 " for huge allocation of %" B_PRIuSIZE " bytes\n",
1878 allocationArea, size));
1879
1880 info->allocation_base = address;
1881
1882 memset(address, 0xcc, size);
1883 return address;
1884 }
1885
1886 uint32 heapClass = alignment < B_PAGE_SIZE
1887 ? heap_class_for(alignedSize) : 0;
1888
1889 heap_allocator *heap = sHeaps[heapClass];
1890 void *result = heap_memalign(heap, alignment, size);
1891 if (result == NULL) {
1892 // add new area and try again
1893 heap_create_new_heap_area(heap, "additional heap area", HEAP_GROW_SIZE);
1894 result = heap_memalign(heap, alignment, size);
1895 }
1896
1897 if (sParanoidValidation)
1898 heap_validate_heap(heap);
1899
1900 if (result == NULL) {
1901 panic("heap: heap has run out of memory trying to allocate %" B_PRIuSIZE " bytes\n",
1902 size);
1903 }
1904
1905 if (alignment != 0)
1906 ASSERT((addr_t)result % alignment == 0);
1907
1908 return result;
1909 }
1910
1911
1912 static void *
debug_heap_malloc(size_t size)1913 debug_heap_malloc(size_t size)
1914 {
1915 if (sUseGuardPage)
1916 return debug_heap_malloc_with_guard_page(size);
1917
1918 return debug_heap_memalign(sDefaultAlignment, size);
1919 }
1920
1921
1922 static void
debug_heap_free(void * address)1923 debug_heap_free(void *address)
1924 {
1925 for (uint32 i = 0; i < HEAP_CLASS_COUNT; i++) {
1926 heap_allocator *heap = sHeaps[i];
1927 if (heap_free(heap, address) == B_OK) {
1928 if (sParanoidValidation)
1929 heap_validate_heap(heap);
1930 return;
1931 }
1932 }
1933
1934 // or maybe it was a huge allocation using an area
1935 area_info areaInfo;
1936 area_id area = area_for(address);
1937 if (area >= B_OK && get_area_info(area, &areaInfo) == B_OK) {
1938 area_allocation_info *info = (area_allocation_info *)areaInfo.address;
1939
1940 // just make extra sure it was allocated by us
1941 if (info->magic == kAreaAllocationMagic && info->area == area
1942 && info->size == areaInfo.size && info->base == areaInfo.address
1943 && info->allocation_size < areaInfo.size) {
1944 delete_area(area);
1945 INFO(("free(): freed huge allocation by deleting area %" B_PRId32 "\n",
1946 area));
1947 return;
1948 }
1949 }
1950
1951 panic("free(): free failed for address %p\n", address);
1952 }
1953
1954
1955 static void *
debug_heap_realloc(void * address,size_t newSize)1956 debug_heap_realloc(void *address, size_t newSize)
1957 {
1958 if (address == NULL)
1959 return debug_heap_memalign(sDefaultAlignment, newSize);
1960
1961 if (newSize == 0) {
1962 free(address);
1963 return NULL;
1964 }
1965
1966 void *newAddress = NULL;
1967 for (uint32 i = 0; i < HEAP_CLASS_COUNT; i++) {
1968 heap_allocator *heap = sHeaps[i];
1969 if (heap_realloc(heap, address, &newAddress, newSize) == B_OK) {
1970 if (sParanoidValidation)
1971 heap_validate_heap(heap);
1972 return newAddress;
1973 }
1974 }
1975
1976 // or maybe it was a huge allocation using an area
1977 area_info areaInfo;
1978 area_id area = area_for(address);
1979 if (area >= B_OK && get_area_info(area, &areaInfo) == B_OK) {
1980 area_allocation_info *info = (area_allocation_info *)areaInfo.address;
1981
1982 // just make extra sure it was allocated by us
1983 if (info->magic == kAreaAllocationMagic && info->area == area
1984 && info->size == areaInfo.size && info->base == areaInfo.address
1985 && info->allocation_size < areaInfo.size) {
1986 if (sUseGuardPage) {
1987 size_t available = info->size - B_PAGE_SIZE
1988 - sizeof(area_allocation_info);
1989
1990 if (available >= newSize) {
1991 // there is enough room available for the newSize
1992 newAddress = (void*)((addr_t)info->allocation_base
1993 + info->allocation_size - newSize);
1994 INFO(("realloc(): new size %" B_PRIuSIZE
1995 " fits in old area %" B_PRId32 " with "
1996 "%" B_PRIuSIZE " available -> new address: %p\n",
1997 newSize, area, available, newAddress));
1998 memmove(newAddress, info->allocation_base,
1999 min_c(newSize, info->allocation_size));
2000 info->allocation_base = newAddress;
2001 info->allocation_size = newSize;
2002 return newAddress;
2003 }
2004 } else {
2005 size_t available = info->size - ((addr_t)info->allocation_base
2006 - (addr_t)info->base);
2007
2008 if (available >= newSize) {
2009 // there is enough room available for the newSize
2010 INFO(("realloc(): new size %" B_PRIuSIZE
2011 " fits in old area %" B_PRId32 " with "
2012 "%" B_PRIuSIZE " available\n",
2013 newSize, area, available));
2014 info->allocation_size = newSize;
2015 return address;
2016 }
2017 }
2018
2019 // have to allocate/copy/free - TODO maybe resize the area instead?
2020 newAddress = debug_heap_memalign(sDefaultAlignment, newSize);
2021 if (newAddress == NULL) {
2022 panic("realloc(): failed to allocate new block of %" B_PRIuSIZE
2023 " bytes\n", newSize);
2024 return NULL;
2025 }
2026
2027 memcpy(newAddress, address, min_c(newSize, info->allocation_size));
2028 delete_area(area);
2029 INFO(("realloc(): allocated new block %p for size %" B_PRIuSIZE
2030 " and deleted old area %" B_PRId32 "\n",
2031 newAddress, newSize, area));
2032 return newAddress;
2033 }
2034 }
2035
2036 panic("realloc(): failed to realloc address %p to size %" B_PRIuSIZE "\n", address,
2037 newSize);
2038 return NULL;
2039 }
2040
2041
2042 heap_implementation __mallocDebugHeap = {
2043 debug_heap_init,
2044 NULL, // terminate_after
2045
2046 debug_heap_memalign,
2047 debug_heap_malloc,
2048 debug_heap_free,
2049 debug_heap_realloc,
2050
2051 NULL, // calloc
2052 NULL, // valloc
2053 NULL, // posix_memalign
2054
2055 debug_heap_start_wall_checking,
2056 debug_heap_stop_wall_checking,
2057 debug_heap_set_paranoid_validation,
2058 debug_heap_set_memory_reuse,
2059 debug_heap_set_debugger_calls,
2060 debug_heap_set_default_alignment,
2061 debug_heap_validate_heaps,
2062 heap_validate_walls,
2063 dump_allocations,
2064 debug_heap_dump_heaps,
2065 debug_heap_malloc_with_guard_page,
2066 debug_heap_get_allocation_info,
2067
2068 NULL, // set_dump_allocations_on_exit
2069 NULL // set_stack_trace_depth
2070 };
2071