1 /*
2 * Copyright 2011, Michael Lotz <mmlr@mlotz.ch>.
3 * Distributed under the terms of the MIT License.
4 */
5
6 #include "malloc_debug_api.h"
7
8
9 #include <malloc.h>
10 #include <stdio.h>
11 #include <stdlib.h>
12 #include <string.h>
13
14 #include <signal.h>
15 #include <sys/mman.h>
16
17 #include <locks.h>
18
19 #include <libroot_private.h>
20 #include <runtime_loader.h>
21 #include <vm_defs.h>
22
23 #include <TLS.h>
24
25
26 // #pragma mark - Debug Helpers
27
28 static const size_t kMaxStackTraceDepth = 50;
29
30
31 static bool sDebuggerCalls = true;
32 static bool sDumpAllocationsOnExit = false;
33 static size_t sStackTraceDepth = 0;
34 static int32 sStackBaseTLSIndex = -1;
35 static int32 sStackEndTLSIndex = -1;
36
37 #if __cplusplus >= 201103L
38 #include <cstddef>
39 using namespace std;
40 static size_t sDefaultAlignment = alignof(max_align_t);
41 #else
42 static size_t sDefaultAlignment = 8;
43 #endif
44
45
46 static void
panic(const char * format,...)47 panic(const char* format, ...)
48 {
49 char buffer[1024];
50
51 va_list args;
52 va_start(args, format);
53 vsnprintf(buffer, sizeof(buffer), format, args);
54 va_end(args);
55
56 if (sDebuggerCalls)
57 debugger(buffer);
58 else
59 debug_printf("%s", buffer);
60 }
61
62
63 static void
print_stdout(const char * format,...)64 print_stdout(const char* format, ...)
65 {
66 // To avoid any allocations due to dynamic memory need by printf() we use a
67 // stack buffer and vsnprintf(). Otherwise this does the same as printf().
68 char buffer[1024];
69
70 va_list args;
71 va_start(args, format);
72 vsnprintf(buffer, sizeof(buffer), format, args);
73 va_end(args);
74
75 write(STDOUT_FILENO, buffer, strlen(buffer));
76 }
77
78
79 // #pragma mark - Linked List
80
81
82 #define GET_ITEM(list, item) ((void *)((uint8 *)item - list->offset))
83 #define GET_LINK(list, item) ((list_link *)((uint8 *)item + list->offset))
84
85
86 struct list_link {
87 list_link* next;
88 list_link* prev;
89 };
90
91 struct list {
92 list_link link;
93 int32 offset;
94 };
95
96
97 static inline void
list_remove_item(struct list * list,void * item)98 list_remove_item(struct list* list, void* item)
99 {
100 list_link* link = GET_LINK(list, item);
101
102 link->next->prev = link->prev;
103 link->prev->next = link->next;
104 }
105
106
107 static inline void
list_add_item(struct list * list,void * item)108 list_add_item(struct list* list, void* item)
109 {
110 list_link* link = GET_LINK(list, item);
111
112 link->next = &list->link;
113 link->prev = list->link.prev;
114
115 list->link.prev->next = link;
116 list->link.prev = link;
117 }
118
119
120 static inline void*
list_get_next_item(struct list * list,void * item)121 list_get_next_item(struct list* list, void* item)
122 {
123 if (item == NULL) {
124 if (list->link.next == (list_link *)list)
125 return NULL;
126
127 return GET_ITEM(list, list->link.next);
128 }
129
130 list_link* link = GET_LINK(list, item);
131 if (link->next == &list->link)
132 return NULL;
133
134 return GET_ITEM(list, link->next);
135 }
136
137
138 static inline void
list_init_etc(struct list * list,int32 offset)139 list_init_etc(struct list* list, int32 offset)
140 {
141 list->link.next = list->link.prev = &list->link;
142 list->offset = offset;
143 }
144
145
146 // #pragma mark - Guarded Heap
147
148
149 #define GUARDED_HEAP_PAGE_FLAG_USED 0x01
150 #define GUARDED_HEAP_PAGE_FLAG_FIRST 0x02
151 #define GUARDED_HEAP_PAGE_FLAG_GUARD 0x04
152 #define GUARDED_HEAP_PAGE_FLAG_DEAD 0x08
153 #define GUARDED_HEAP_PAGE_FLAG_AREA 0x10
154
155 #define GUARDED_HEAP_INITIAL_SIZE 1 * 1024 * 1024
156 #define GUARDED_HEAP_GROW_SIZE 2 * 1024 * 1024
157 #define GUARDED_HEAP_AREA_USE_THRESHOLD 1 * 1024 * 1024
158
159
160 struct guarded_heap;
161
162 struct guarded_heap_page {
163 uint8 flags;
164 size_t allocation_size;
165 void* allocation_base;
166 size_t alignment;
167 thread_id allocating_thread;
168 thread_id freeing_thread;
169 list_link free_list_link;
170 size_t alloc_stack_trace_depth;
171 size_t free_stack_trace_depth;
172 addr_t stack_trace[kMaxStackTraceDepth];
173 };
174
175 struct guarded_heap_area {
176 guarded_heap* heap;
177 guarded_heap_area* next;
178 area_id area;
179 addr_t base;
180 size_t size;
181 size_t page_count;
182 size_t used_pages;
183 mutex lock;
184 struct list free_list;
185 guarded_heap_page pages[0];
186 };
187
188 struct guarded_heap {
189 rw_lock lock;
190 size_t page_count;
191 size_t used_pages;
192 uint32 area_creation_counter;
193 bool reuse_memory;
194 guarded_heap_area* areas;
195 };
196
197
198 static guarded_heap sGuardedHeap = {
199 RW_LOCK_INITIALIZER("guarded heap lock"),
200 0, 0, 0, true, NULL
201 };
202
203
204 static void dump_guarded_heap_page(void* address, bool doPanic = false);
205
206
207 static void
guarded_heap_segfault_handler(int signal,siginfo_t * signalInfo,void * vregs)208 guarded_heap_segfault_handler(int signal, siginfo_t* signalInfo, void* vregs)
209 {
210 if (signal != SIGSEGV)
211 return;
212
213 if (signalInfo->si_code != SEGV_ACCERR) {
214 // Not ours.
215 panic("generic segfault");
216 return;
217 }
218
219 dump_guarded_heap_page(signalInfo->si_addr, true);
220
221 exit(-1);
222 }
223
224
225 static void
guarded_heap_page_protect_raw(addr_t address,size_t size,uint32 protection)226 guarded_heap_page_protect_raw(addr_t address, size_t size, uint32 protection)
227 {
228 mprotect((void*)address, size, protection);
229 if (protection == 0)
230 madvise((void*)address, size, MADV_FREE);
231 else
232 memset((void*)address, 0xcc, size);
233 }
234
235
236 static void
guarded_heap_page_protect(guarded_heap_area & area,size_t pageIndex,uint32 protection)237 guarded_heap_page_protect(guarded_heap_area& area, size_t pageIndex,
238 uint32 protection)
239 {
240 guarded_heap_page_protect_raw(area.base + pageIndex * B_PAGE_SIZE,
241 B_PAGE_SIZE, protection);
242 }
243
244
245 static void
guarded_heap_print_stack_trace(addr_t stackTrace[],size_t depth)246 guarded_heap_print_stack_trace(addr_t stackTrace[], size_t depth)
247 {
248 char* imageName;
249 char* symbolName;
250 void* location;
251 bool exactMatch;
252
253 for (size_t i = 0; i < depth; i++) {
254 addr_t address = stackTrace[i];
255
256 status_t status = __gRuntimeLoader->get_nearest_symbol_at_address(
257 (void*)address, NULL, NULL, &imageName, &symbolName, NULL,
258 &location, &exactMatch);
259 if (status != B_OK) {
260 print_stdout("\t%#" B_PRIxADDR " (lookup failed: %s)\n", address,
261 strerror(status));
262 continue;
263 }
264
265 print_stdout("\t<%s> %s + %#" B_PRIxADDR "%s\n", imageName, symbolName,
266 address - (addr_t)location, exactMatch ? "" : " (nearest)");
267 }
268 }
269
270
271 static void
guarded_heap_print_stack_traces(guarded_heap_page & page)272 guarded_heap_print_stack_traces(guarded_heap_page& page)
273 {
274 if (page.alloc_stack_trace_depth > 0) {
275 print_stdout("alloc stack trace (%" B_PRIuSIZE "):\n",
276 page.alloc_stack_trace_depth);
277 guarded_heap_print_stack_trace(page.stack_trace,
278 page.alloc_stack_trace_depth);
279 }
280
281 if (page.free_stack_trace_depth > 0) {
282 print_stdout("free stack trace (%" B_PRIuSIZE "):\n",
283 page.free_stack_trace_depth);
284 guarded_heap_print_stack_trace(
285 &page.stack_trace[page.alloc_stack_trace_depth],
286 page.free_stack_trace_depth);
287 }
288 }
289
290
291 static size_t
guarded_heap_fill_stack_trace(addr_t stackTrace[],size_t maxDepth,size_t skipFrames)292 guarded_heap_fill_stack_trace(addr_t stackTrace[], size_t maxDepth,
293 size_t skipFrames)
294 {
295 if (maxDepth == 0)
296 return 0;
297
298 void** stackBase = tls_address(sStackBaseTLSIndex);
299 void** stackEnd = tls_address(sStackEndTLSIndex);
300 if (*stackBase == NULL || *stackEnd == NULL) {
301 thread_info threadInfo;
302 status_t result = get_thread_info(find_thread(NULL), &threadInfo);
303 if (result != B_OK)
304 return 0;
305
306 *stackBase = (void*)threadInfo.stack_base;
307 *stackEnd = (void*)threadInfo.stack_end;
308 }
309
310 int32 traceDepth = __arch_get_stack_trace(stackTrace, maxDepth, skipFrames,
311 (addr_t)*stackBase, (addr_t)*stackEnd);
312
313 return traceDepth < 0 ? 0 : traceDepth;
314 }
315
316
317 static void
guarded_heap_page_allocate(guarded_heap_area & area,size_t startPageIndex,size_t pagesNeeded,size_t allocationSize,size_t alignment,void * allocationBase)318 guarded_heap_page_allocate(guarded_heap_area& area, size_t startPageIndex,
319 size_t pagesNeeded, size_t allocationSize, size_t alignment,
320 void* allocationBase)
321 {
322 if (pagesNeeded < 2) {
323 panic("need to allocate at least 2 pages, one for guard\n");
324 return;
325 }
326
327 guarded_heap_page* firstPage = NULL;
328 for (size_t i = 0; i < pagesNeeded; i++) {
329 guarded_heap_page& page = area.pages[startPageIndex + i];
330 page.flags = GUARDED_HEAP_PAGE_FLAG_USED;
331 if (i == 0) {
332 page.allocating_thread = find_thread(NULL);
333 page.flags |= GUARDED_HEAP_PAGE_FLAG_FIRST;
334 page.alloc_stack_trace_depth = guarded_heap_fill_stack_trace(
335 page.stack_trace, sStackTraceDepth, 2);
336 firstPage = &page;
337 } else {
338 page.allocating_thread = firstPage->allocating_thread;
339 page.alloc_stack_trace_depth = 0;
340 }
341
342 page.freeing_thread = -1;
343 page.allocation_size = allocationSize;
344 page.allocation_base = allocationBase;
345 page.alignment = alignment;
346 page.free_stack_trace_depth = 0;
347
348 list_remove_item(&area.free_list, &page);
349
350 if (i == pagesNeeded - 1) {
351 page.flags |= GUARDED_HEAP_PAGE_FLAG_GUARD;
352 guarded_heap_page_protect(area, startPageIndex + i, 0);
353 } else {
354 guarded_heap_page_protect(area, startPageIndex + i,
355 B_READ_AREA | B_WRITE_AREA);
356 }
357 }
358 }
359
360
361 static void
guarded_heap_free_page(guarded_heap_area & area,size_t pageIndex,bool force=false)362 guarded_heap_free_page(guarded_heap_area& area, size_t pageIndex,
363 bool force = false)
364 {
365 guarded_heap_page& page = area.pages[pageIndex];
366
367 if (area.heap->reuse_memory || force)
368 page.flags = 0;
369 else
370 page.flags |= GUARDED_HEAP_PAGE_FLAG_DEAD;
371
372 page.freeing_thread = find_thread(NULL);
373
374 list_add_item(&area.free_list, &page);
375
376 guarded_heap_page_protect(area, pageIndex, 0);
377 }
378
379
380 static void
guarded_heap_pages_allocated(guarded_heap & heap,size_t pagesAllocated)381 guarded_heap_pages_allocated(guarded_heap& heap, size_t pagesAllocated)
382 {
383 atomic_add((int32*)&heap.used_pages, pagesAllocated);
384 }
385
386
387 static void*
guarded_heap_area_allocate(guarded_heap_area & area,size_t pagesNeeded,size_t size,size_t alignment)388 guarded_heap_area_allocate(guarded_heap_area& area, size_t pagesNeeded,
389 size_t size, size_t alignment)
390 {
391 if (pagesNeeded > area.page_count - area.used_pages)
392 return NULL;
393
394 // We use the free list this way so that the page that has been free for
395 // the longest time is allocated. This keeps immediate re-use (that may
396 // hide bugs) to a minimum.
397 guarded_heap_page* page
398 = (guarded_heap_page*)list_get_next_item(&area.free_list, NULL);
399
400 for (; page != NULL;
401 page = (guarded_heap_page*)list_get_next_item(&area.free_list, page)) {
402
403 if ((page->flags & GUARDED_HEAP_PAGE_FLAG_USED) != 0)
404 continue;
405
406 size_t pageIndex = page - area.pages;
407 if (pageIndex > area.page_count - pagesNeeded)
408 continue;
409
410 // Candidate, check if we have enough pages going forward
411 // (including the guard page).
412 bool candidate = true;
413 for (size_t j = 1; j < pagesNeeded; j++) {
414 if ((area.pages[pageIndex + j].flags & GUARDED_HEAP_PAGE_FLAG_USED)
415 != 0) {
416 candidate = false;
417 break;
418 }
419 }
420
421 if (!candidate)
422 continue;
423
424 size_t offset = size & (B_PAGE_SIZE - 1);
425 void* result = (void*)((area.base + pageIndex * B_PAGE_SIZE
426 + (offset > 0 ? B_PAGE_SIZE - offset : 0)) & ~(alignment - 1));
427
428 guarded_heap_page_allocate(area, pageIndex, pagesNeeded, size,
429 alignment, result);
430
431 area.used_pages += pagesNeeded;
432 guarded_heap_pages_allocated(*area.heap, pagesNeeded);
433 return result;
434 }
435
436 return NULL;
437 }
438
439
440 static bool
guarded_heap_area_init(guarded_heap & heap,area_id id,void * baseAddress,size_t size)441 guarded_heap_area_init(guarded_heap& heap, area_id id, void* baseAddress,
442 size_t size)
443 {
444 guarded_heap_area* area = (guarded_heap_area*)baseAddress;
445 area->heap = &heap;
446 area->area = id;
447 area->size = size;
448 area->page_count = area->size / B_PAGE_SIZE;
449 area->used_pages = 0;
450
451 size_t pagesNeeded = (sizeof(guarded_heap_area)
452 + area->page_count * sizeof(guarded_heap_page)
453 + B_PAGE_SIZE - 1) / B_PAGE_SIZE;
454
455 area->page_count -= pagesNeeded;
456 area->size = area->page_count * B_PAGE_SIZE;
457 area->base = (addr_t)baseAddress + pagesNeeded * B_PAGE_SIZE;
458
459 mutex_init(&area->lock, "guarded_heap_area_lock");
460
461 list_init_etc(&area->free_list,
462 offsetof(guarded_heap_page, free_list_link));
463
464 for (size_t i = 0; i < area->page_count; i++)
465 guarded_heap_free_page(*area, i, true);
466
467 area->next = heap.areas;
468 heap.areas = area;
469 heap.page_count += area->page_count;
470
471 return true;
472 }
473
474
475 static bool
guarded_heap_area_create(guarded_heap & heap,size_t size)476 guarded_heap_area_create(guarded_heap& heap, size_t size)
477 {
478 for (size_t trySize = size; trySize >= 1 * 1024 * 1024;
479 trySize /= 2) {
480
481 void* baseAddress = NULL;
482 area_id id = create_area("guarded_heap_area", &baseAddress,
483 B_ANY_ADDRESS, trySize, B_NO_LOCK,
484 B_READ_AREA | B_WRITE_AREA | B_OVERCOMMITTING_AREA);
485
486 if (id < 0)
487 continue;
488
489 if (guarded_heap_area_init(heap, id, baseAddress, trySize))
490 return true;
491
492 delete_area(id);
493 }
494
495 panic("failed to allocate a new heap area");
496 return false;
497 }
498
499
500 static bool
guarded_heap_add_area(guarded_heap & heap,uint32 counter)501 guarded_heap_add_area(guarded_heap& heap, uint32 counter)
502 {
503 WriteLocker areaListWriteLocker(heap.lock);
504 if (heap.area_creation_counter != counter)
505 return false;
506
507 return guarded_heap_area_create(heap, GUARDED_HEAP_GROW_SIZE);
508 }
509
510
511 static void*
guarded_heap_allocate_with_area(size_t size,size_t alignment)512 guarded_heap_allocate_with_area(size_t size, size_t alignment)
513 {
514 size_t infoSpace = alignment >= B_PAGE_SIZE ? B_PAGE_SIZE
515 : (sizeof(guarded_heap_page) + alignment - 1) & ~(alignment - 1);
516
517 size_t pagesNeeded = (size + infoSpace + B_PAGE_SIZE - 1) / B_PAGE_SIZE;
518
519 if (alignment > B_PAGE_SIZE)
520 pagesNeeded += alignment / B_PAGE_SIZE - 1;
521
522 void* address = NULL;
523 area_id area = create_area("guarded_heap_huge_allocation", &address,
524 B_ANY_ADDRESS, (pagesNeeded + 1) * B_PAGE_SIZE, B_NO_LOCK,
525 B_READ_AREA | B_WRITE_AREA);
526 if (area < 0) {
527 panic("failed to create area for allocation of %" B_PRIuSIZE " pages",
528 pagesNeeded);
529 return NULL;
530 }
531
532 // We just use a page object
533 guarded_heap_page* page = (guarded_heap_page*)address;
534 page->flags = GUARDED_HEAP_PAGE_FLAG_USED | GUARDED_HEAP_PAGE_FLAG_FIRST
535 | GUARDED_HEAP_PAGE_FLAG_AREA;
536 page->allocation_size = size;
537 page->allocation_base = (void*)(((addr_t)address
538 + pagesNeeded * B_PAGE_SIZE - size) & ~(alignment - 1));
539 page->alignment = alignment;
540 page->allocating_thread = find_thread(NULL);
541 page->freeing_thread = -1;
542 page->alloc_stack_trace_depth = guarded_heap_fill_stack_trace(
543 page->stack_trace, sStackTraceDepth, 2);
544 page->free_stack_trace_depth = 0;
545
546 if (alignment <= B_PAGE_SIZE) {
547 // Protect just the guard page.
548 guarded_heap_page_protect_raw(
549 (addr_t)address + pagesNeeded * B_PAGE_SIZE, B_PAGE_SIZE, 0);
550 } else {
551 // Protect empty pages before the allocation start...
552 addr_t protectedStart = (addr_t)address + B_PAGE_SIZE;
553 size_t protectedSize = (addr_t)page->allocation_base - protectedStart;
554 if (protectedSize > 0)
555 guarded_heap_page_protect_raw(protectedStart, protectedSize, 0);
556
557 // ... and after allocation end.
558 size_t allocatedPages = (size + B_PAGE_SIZE - 1) / B_PAGE_SIZE;
559 protectedStart = (addr_t)page->allocation_base
560 + allocatedPages * B_PAGE_SIZE;
561 protectedSize = (addr_t)address + (pagesNeeded + 1) * B_PAGE_SIZE
562 - protectedStart;
563
564 // There is at least the guard page.
565 guarded_heap_page_protect_raw(protectedStart, protectedSize, 0);
566 }
567
568 return page->allocation_base;
569 }
570
571
572 static void*
guarded_heap_allocate(guarded_heap & heap,size_t size,size_t alignment)573 guarded_heap_allocate(guarded_heap& heap, size_t size, size_t alignment)
574 {
575 if (alignment == 0)
576 alignment = 1;
577
578 size_t pagesNeeded = (size + B_PAGE_SIZE - 1) / B_PAGE_SIZE + 1;
579 if (alignment > B_PAGE_SIZE
580 || pagesNeeded * B_PAGE_SIZE >= GUARDED_HEAP_AREA_USE_THRESHOLD) {
581 // Don't bother, use an area directly. Since it will also fault once
582 // it is deleted, that fits our model quite nicely.
583 return guarded_heap_allocate_with_area(size, alignment);
584 }
585
586 void* result = NULL;
587
588 ReadLocker areaListReadLocker(heap.lock);
589 for (guarded_heap_area* area = heap.areas; area != NULL;
590 area = area->next) {
591
592 MutexLocker locker(area->lock);
593 result = guarded_heap_area_allocate(*area, pagesNeeded, size,
594 alignment);
595 if (result != NULL)
596 break;
597 }
598
599 uint32 counter = heap.area_creation_counter;
600 areaListReadLocker.Unlock();
601
602 if (result == NULL) {
603 guarded_heap_add_area(heap, counter);
604 return guarded_heap_allocate(heap, size, alignment);
605 }
606
607 if (result == NULL)
608 panic("ran out of memory");
609
610 return result;
611 }
612
613
614 static guarded_heap_area*
guarded_heap_get_locked_area_for(guarded_heap & heap,void * address)615 guarded_heap_get_locked_area_for(guarded_heap& heap, void* address)
616 {
617 ReadLocker areaListReadLocker(heap.lock);
618 for (guarded_heap_area* area = heap.areas; area != NULL;
619 area = area->next) {
620 if ((addr_t)address < area->base)
621 continue;
622
623 if ((addr_t)address >= area->base + area->size)
624 continue;
625
626 mutex_lock(&area->lock);
627 return area;
628 }
629
630 return NULL;
631 }
632
633
634 static size_t
guarded_heap_area_page_index_for(guarded_heap_area & area,void * address)635 guarded_heap_area_page_index_for(guarded_heap_area& area, void* address)
636 {
637 size_t pageIndex = ((addr_t)address - area.base) / B_PAGE_SIZE;
638 guarded_heap_page& page = area.pages[pageIndex];
639 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_USED) == 0) {
640 panic("tried to free %p which points at page %" B_PRIuSIZE
641 " which is not marked in use", address, pageIndex);
642 return area.page_count;
643 }
644
645 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_GUARD) != 0) {
646 panic("tried to free %p which points at page %" B_PRIuSIZE
647 " which is a guard page", address, pageIndex);
648 return area.page_count;
649 }
650
651 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_FIRST) == 0) {
652 panic("tried to free %p which points at page %" B_PRIuSIZE
653 " which is not an allocation first page", address, pageIndex);
654 return area.page_count;
655 }
656
657 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_DEAD) != 0) {
658 panic("tried to free %p which points at page %" B_PRIuSIZE
659 " which is a dead page", address, pageIndex);
660 return area.page_count;
661 }
662
663 return pageIndex;
664 }
665
666
667 static bool
guarded_heap_area_free(guarded_heap_area & area,void * address)668 guarded_heap_area_free(guarded_heap_area& area, void* address)
669 {
670 size_t pageIndex = guarded_heap_area_page_index_for(area, address);
671 if (pageIndex >= area.page_count)
672 return false;
673
674 size_t pagesFreed = 0;
675 guarded_heap_page* page = &area.pages[pageIndex];
676 while ((page->flags & GUARDED_HEAP_PAGE_FLAG_GUARD) == 0) {
677 // Mark the allocation page as free.
678 guarded_heap_free_page(area, pageIndex);
679 if (pagesFreed == 0 && sStackTraceDepth > 0) {
680 size_t freeEntries
681 = kMaxStackTraceDepth - page->alloc_stack_trace_depth;
682
683 page->free_stack_trace_depth = guarded_heap_fill_stack_trace(
684 &page->stack_trace[page->alloc_stack_trace_depth],
685 min_c(freeEntries, sStackTraceDepth), 2);
686 }
687
688 pagesFreed++;
689 pageIndex++;
690 page = &area.pages[pageIndex];
691 }
692
693 // Mark the guard page as free as well.
694 guarded_heap_free_page(area, pageIndex);
695 pagesFreed++;
696
697 if (area.heap->reuse_memory) {
698 area.used_pages -= pagesFreed;
699 atomic_add((int32*)&area.heap->used_pages, -pagesFreed);
700 }
701
702 return true;
703 }
704
705
706 static guarded_heap_page*
guarded_heap_area_allocation_for(void * address,area_id & allocationArea)707 guarded_heap_area_allocation_for(void* address, area_id& allocationArea)
708 {
709 allocationArea = area_for(address);
710 if (allocationArea < 0)
711 return NULL;
712
713 area_info areaInfo;
714 if (get_area_info(allocationArea, &areaInfo) != B_OK)
715 return NULL;
716
717 guarded_heap_page* page = (guarded_heap_page*)areaInfo.address;
718 if (page->flags != (GUARDED_HEAP_PAGE_FLAG_USED
719 | GUARDED_HEAP_PAGE_FLAG_FIRST | GUARDED_HEAP_PAGE_FLAG_AREA)) {
720 return NULL;
721 }
722
723 if (page->allocation_base != address)
724 return NULL;
725 if (page->allocation_size >= areaInfo.size)
726 return NULL;
727
728 return page;
729 }
730
731
732 static bool
guarded_heap_free_area_allocation(void * address)733 guarded_heap_free_area_allocation(void* address)
734 {
735 area_id allocationArea;
736 if (guarded_heap_area_allocation_for(address, allocationArea) == NULL)
737 return false;
738
739 delete_area(allocationArea);
740 return true;
741 }
742
743
744 static bool
guarded_heap_free(void * address)745 guarded_heap_free(void* address)
746 {
747 if (address == NULL)
748 return true;
749
750 guarded_heap_area* area = guarded_heap_get_locked_area_for(sGuardedHeap,
751 address);
752 if (area == NULL)
753 return guarded_heap_free_area_allocation(address);
754
755 MutexLocker locker(area->lock, true);
756 return guarded_heap_area_free(*area, address);
757 }
758
759
760 static void*
guarded_heap_realloc(void * address,size_t newSize)761 guarded_heap_realloc(void* address, size_t newSize)
762 {
763 guarded_heap_area* area = guarded_heap_get_locked_area_for(sGuardedHeap,
764 address);
765
766 size_t oldSize;
767 area_id allocationArea = -1;
768 if (area != NULL) {
769 MutexLocker locker(area->lock, true);
770 size_t pageIndex = guarded_heap_area_page_index_for(*area, address);
771 if (pageIndex >= area->page_count)
772 return NULL;
773
774 guarded_heap_page& page = area->pages[pageIndex];
775 oldSize = page.allocation_size;
776 locker.Unlock();
777 } else {
778 guarded_heap_page* page = guarded_heap_area_allocation_for(address,
779 allocationArea);
780 if (page == NULL)
781 return NULL;
782
783 oldSize = page->allocation_size;
784 }
785
786 if (oldSize == newSize)
787 return address;
788
789 void* newBlock = guarded_heap_allocate(sGuardedHeap, newSize,
790 sDefaultAlignment);
791 if (newBlock == NULL)
792 return NULL;
793
794 memcpy(newBlock, address, min_c(oldSize, newSize));
795
796 if (allocationArea >= 0)
797 delete_area(allocationArea);
798 else {
799 MutexLocker locker(area->lock);
800 guarded_heap_area_free(*area, address);
801 }
802
803 return newBlock;
804 }
805
806
807 // #pragma mark - Debugger commands
808
809
810 static void
dump_guarded_heap_page(guarded_heap_page & page)811 dump_guarded_heap_page(guarded_heap_page& page)
812 {
813 printf("flags:");
814 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_USED) != 0)
815 printf(" used");
816 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_FIRST) != 0)
817 printf(" first");
818 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_GUARD) != 0)
819 printf(" guard");
820 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_DEAD) != 0)
821 printf(" dead");
822 printf("\n");
823
824 printf("allocation size: %" B_PRIuSIZE "\n", page.allocation_size);
825 printf("allocation base: %p\n", page.allocation_base);
826 printf("alignment: %" B_PRIuSIZE "\n", page.alignment);
827 printf("allocating thread: %" B_PRId32 "\n", page.allocating_thread);
828 printf("freeing thread: %" B_PRId32 "\n", page.freeing_thread);
829 }
830
831
832 static void
dump_guarded_heap_page(void * address,bool doPanic)833 dump_guarded_heap_page(void* address, bool doPanic)
834 {
835 // Find the area that contains this page.
836 guarded_heap_area* area = NULL;
837 for (guarded_heap_area* candidate = sGuardedHeap.areas; candidate != NULL;
838 candidate = candidate->next) {
839
840 if ((addr_t)address < candidate->base)
841 continue;
842 if ((addr_t)address >= candidate->base + candidate->size)
843 continue;
844
845 area = candidate;
846 break;
847 }
848
849 if (area == NULL) {
850 panic("didn't find area for address %p\n", address);
851 return;
852 }
853
854 size_t pageIndex = ((addr_t)address - area->base) / B_PAGE_SIZE;
855 guarded_heap_page& page = area->pages[pageIndex];
856 dump_guarded_heap_page(page);
857
858 // Find the first page and dump the stack traces.
859 for (ssize_t candidateIndex = (ssize_t)pageIndex;
860 sStackTraceDepth > 0 && candidateIndex >= 0; candidateIndex--) {
861 guarded_heap_page& candidate = area->pages[candidateIndex];
862 if ((candidate.flags & GUARDED_HEAP_PAGE_FLAG_FIRST) == 0)
863 continue;
864
865 guarded_heap_print_stack_traces(candidate);
866 break;
867 }
868
869 if (doPanic) {
870 // Note: we do this the complicated way because we absolutely don't
871 // want any character conversion to happen that might provoke other
872 // segfaults in the locale backend. Therefore we avoid using any string
873 // formats, resulting in the mess below.
874
875 #define DO_PANIC(state) \
876 panic("thread %" B_PRId32 " tried accessing address %p which is " \
877 state " (base: 0x%" B_PRIxADDR ", size: %" B_PRIuSIZE \
878 ", alignment: %" B_PRIuSIZE ", allocated by thread: %" \
879 B_PRId32 ", freed by thread: %" B_PRId32 ")", \
880 find_thread(NULL), address, page.allocation_base, \
881 page.allocation_size, page.alignment, page.allocating_thread, \
882 page.freeing_thread)
883
884 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_USED) == 0)
885 DO_PANIC("not allocated");
886 else if ((page.flags & GUARDED_HEAP_PAGE_FLAG_GUARD) != 0)
887 DO_PANIC("a guard page");
888 else if ((page.flags & GUARDED_HEAP_PAGE_FLAG_DEAD) != 0)
889 DO_PANIC("a dead page");
890 else
891 DO_PANIC("in an unknown state");
892
893 #undef DO_PANIC
894 }
895 }
896
897
898 static void
dump_guarded_heap_area(guarded_heap_area & area)899 dump_guarded_heap_area(guarded_heap_area& area)
900 {
901 printf("guarded heap area: %p\n", &area);
902 printf("next heap area: %p\n", area.next);
903 printf("guarded heap: %p\n", area.heap);
904 printf("area id: %" B_PRId32 "\n", area.area);
905 printf("base: 0x%" B_PRIxADDR "\n", area.base);
906 printf("size: %" B_PRIuSIZE "\n", area.size);
907 printf("page count: %" B_PRIuSIZE "\n", area.page_count);
908 printf("used pages: %" B_PRIuSIZE "\n", area.used_pages);
909 printf("lock: %p\n", &area.lock);
910
911 size_t freeCount = 0;
912 void* item = list_get_next_item(&area.free_list, NULL);
913 while (item != NULL) {
914 freeCount++;
915
916 if ((((guarded_heap_page*)item)->flags & GUARDED_HEAP_PAGE_FLAG_USED)
917 != 0) {
918 printf("free list broken, page %p not actually free\n", item);
919 }
920
921 item = list_get_next_item(&area.free_list, item);
922 }
923
924 printf("free_list: %p (%" B_PRIuSIZE " free)\n", &area.free_list,
925 freeCount);
926
927 freeCount = 0;
928 size_t runLength = 0;
929 size_t longestRun = 0;
930 for (size_t i = 0; i <= area.page_count; i++) {
931 guarded_heap_page& page = area.pages[i];
932 if (i == area.page_count
933 || (page.flags & GUARDED_HEAP_PAGE_FLAG_USED) != 0) {
934 freeCount += runLength;
935 if (runLength > longestRun)
936 longestRun = runLength;
937 runLength = 0;
938 continue;
939 }
940
941 runLength = 1;
942 for (size_t j = 1; j < area.page_count - i; j++) {
943 if ((area.pages[i + j].flags & GUARDED_HEAP_PAGE_FLAG_USED) != 0)
944 break;
945
946 runLength++;
947 }
948
949 i += runLength - 1;
950 }
951
952 printf("longest free run: %" B_PRIuSIZE " (%" B_PRIuSIZE " free)\n",
953 longestRun, freeCount);
954
955 printf("pages: %p\n", area.pages);
956 }
957
958
959 static void
dump_guarded_heap(guarded_heap & heap)960 dump_guarded_heap(guarded_heap& heap)
961 {
962 printf("guarded heap: %p\n", &heap);
963 printf("rw lock: %p\n", &heap.lock);
964 printf("page count: %" B_PRIuSIZE "\n", heap.page_count);
965 printf("used pages: %" B_PRIuSIZE "\n", heap.used_pages);
966 printf("area creation counter: %" B_PRIu32 "\n",
967 heap.area_creation_counter);
968
969 size_t areaCount = 0;
970 guarded_heap_area* area = heap.areas;
971 while (area != NULL) {
972 areaCount++;
973 area = area->next;
974 }
975
976 printf("areas: %p (%" B_PRIuSIZE ")\n", heap.areas, areaCount);
977 }
978
979
980 static void
dump_allocations(guarded_heap & heap,bool statsOnly,thread_id thread)981 dump_allocations(guarded_heap& heap, bool statsOnly, thread_id thread)
982 {
983 WriteLocker heapLocker(heap.lock);
984
985 size_t allocationCount = 0;
986 size_t allocationSize = 0;
987 for (guarded_heap_area* area = heap.areas; area != NULL;
988 area = area->next) {
989
990 MutexLocker areaLocker(area->lock);
991 for (size_t i = 0; i < area->page_count; i++) {
992 guarded_heap_page& page = area->pages[i];
993 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_FIRST) == 0
994 || (page.flags & GUARDED_HEAP_PAGE_FLAG_DEAD) != 0) {
995 continue;
996 }
997
998 if (thread >= 0 && thread != page.allocating_thread)
999 continue;
1000
1001 allocationCount++;
1002 allocationSize += page.allocation_size;
1003
1004 if (statsOnly)
1005 continue;
1006
1007 print_stdout("allocation: base: %p; size: %" B_PRIuSIZE
1008 "; thread: %" B_PRId32 "; alignment: %" B_PRIuSIZE "\n",
1009 page.allocation_base, page.allocation_size,
1010 page.allocating_thread, page.alignment);
1011
1012 guarded_heap_print_stack_trace(page.stack_trace,
1013 page.alloc_stack_trace_depth);
1014 }
1015 }
1016
1017 print_stdout("total allocations: %" B_PRIuSIZE ", %" B_PRIuSIZE " bytes\n",
1018 allocationCount, allocationSize);
1019 }
1020
1021
1022 static void
dump_allocations_full()1023 dump_allocations_full()
1024 {
1025 dump_allocations(sGuardedHeap, false, -1);
1026 }
1027
1028
1029 // #pragma mark - Heap Debug API
1030
1031
1032 static void
guarded_heap_set_memory_reuse(bool enabled)1033 guarded_heap_set_memory_reuse(bool enabled)
1034 {
1035 sGuardedHeap.reuse_memory = enabled;
1036 }
1037
1038
1039 static void
guarded_heap_set_debugger_calls(bool enabled)1040 guarded_heap_set_debugger_calls(bool enabled)
1041 {
1042 sDebuggerCalls = enabled;
1043 }
1044
1045
1046 static void
guarded_heap_set_default_alignment(size_t defaultAlignment)1047 guarded_heap_set_default_alignment(size_t defaultAlignment)
1048 {
1049 sDefaultAlignment = defaultAlignment;
1050 }
1051
1052
1053 static void
guarded_heap_dump_allocations(bool statsOnly,thread_id thread)1054 guarded_heap_dump_allocations(bool statsOnly, thread_id thread)
1055 {
1056 dump_allocations(sGuardedHeap, statsOnly, thread);
1057 }
1058
1059
1060 static void
guarded_heap_dump_heaps(bool dumpAreas,bool dumpBins)1061 guarded_heap_dump_heaps(bool dumpAreas, bool dumpBins)
1062 {
1063 WriteLocker heapLocker(sGuardedHeap.lock);
1064 dump_guarded_heap(sGuardedHeap);
1065 if (!dumpAreas)
1066 return;
1067
1068 for (guarded_heap_area* area = sGuardedHeap.areas; area != NULL;
1069 area = area->next) {
1070 MutexLocker areaLocker(area->lock);
1071 dump_guarded_heap_area(*area);
1072
1073 if (!dumpBins)
1074 continue;
1075
1076 for (size_t i = 0; i < area->page_count; i++) {
1077 dump_guarded_heap_page(area->pages[i]);
1078 if ((area->pages[i].flags & GUARDED_HEAP_PAGE_FLAG_FIRST) != 0)
1079 guarded_heap_print_stack_traces(area->pages[i]);
1080 }
1081 }
1082 }
1083
1084
1085 static status_t
guarded_heap_set_dump_allocations_on_exit(bool enabled)1086 guarded_heap_set_dump_allocations_on_exit(bool enabled)
1087 {
1088 sDumpAllocationsOnExit = enabled;
1089 return B_OK;
1090 }
1091
1092
1093 static status_t
guarded_heap_set_stack_trace_depth(size_t stackTraceDepth)1094 guarded_heap_set_stack_trace_depth(size_t stackTraceDepth)
1095 {
1096 if (stackTraceDepth == 0) {
1097 sStackTraceDepth = 0;
1098 return B_OK;
1099 }
1100
1101 // This is rather wasteful, but these are going to be filled lazily by each
1102 // thread on alloc/free. Therefore we cannot use a dynamic allocation and
1103 // just store a pointer to. Since we only need to store two addresses, we
1104 // use two TLS slots and set them to point at the stack base/end.
1105 if (sStackBaseTLSIndex < 0) {
1106 sStackBaseTLSIndex = tls_allocate();
1107 if (sStackBaseTLSIndex < 0)
1108 return sStackBaseTLSIndex;
1109 }
1110
1111 if (sStackEndTLSIndex < 0) {
1112 sStackEndTLSIndex = tls_allocate();
1113 if (sStackEndTLSIndex < 0)
1114 return sStackEndTLSIndex;
1115 }
1116
1117 sStackTraceDepth = min_c(stackTraceDepth, kMaxStackTraceDepth);
1118 return B_OK;
1119 }
1120
1121
1122 // #pragma mark - Init
1123
1124
1125 static void
init_after_fork()1126 init_after_fork()
1127 {
1128 // The memory has actually been copied (or is in a copy on write state) but
1129 // but the area ids have changed.
1130 for (guarded_heap_area* area = sGuardedHeap.areas; area != NULL;
1131 area = area->next) {
1132 area->area = area_for(area);
1133 if (area->area < 0)
1134 panic("failed to find area for heap area %p after fork", area);
1135 }
1136 }
1137
1138
1139 static status_t
guarded_heap_init(void)1140 guarded_heap_init(void)
1141 {
1142 if (!guarded_heap_area_create(sGuardedHeap, GUARDED_HEAP_INITIAL_SIZE))
1143 return B_ERROR;
1144
1145 // Install a segfault handler so we can print some info before going down.
1146 struct sigaction action;
1147 action.sa_handler = (__sighandler_t)guarded_heap_segfault_handler;
1148 action.sa_flags = SA_SIGINFO;
1149 action.sa_userdata = NULL;
1150 sigemptyset(&action.sa_mask);
1151 sigaction(SIGSEGV, &action, NULL);
1152
1153 atfork(&init_after_fork);
1154 // Note: Needs malloc(). Hence we need to be fully initialized.
1155 // TODO: We should actually also install a hook that is called before
1156 // fork() is being executed. In a multithreaded app it would need to
1157 // acquire *all* allocator locks, so that we don't fork() an
1158 // inconsistent state.
1159
1160 return B_OK;
1161 }
1162
1163
1164 static void
guarded_heap_terminate_after()1165 guarded_heap_terminate_after()
1166 {
1167 if (sDumpAllocationsOnExit)
1168 dump_allocations_full();
1169 }
1170
1171
1172 // #pragma mark - Public API
1173
1174
1175 static void*
heap_memalign(size_t alignment,size_t size)1176 heap_memalign(size_t alignment, size_t size)
1177 {
1178 if (size == 0)
1179 size = 1;
1180
1181 return guarded_heap_allocate(sGuardedHeap, size, alignment);
1182 }
1183
1184
1185 static void*
heap_malloc(size_t size)1186 heap_malloc(size_t size)
1187 {
1188 return heap_memalign(sDefaultAlignment, size);
1189 }
1190
1191
1192 static void
heap_free(void * address)1193 heap_free(void* address)
1194 {
1195 if (!guarded_heap_free(address))
1196 panic("free failed for address %p", address);
1197 }
1198
1199
1200 static void*
heap_realloc(void * address,size_t newSize)1201 heap_realloc(void* address, size_t newSize)
1202 {
1203 if (newSize == 0) {
1204 free(address);
1205 return NULL;
1206 }
1207
1208 if (address == NULL)
1209 return heap_memalign(sDefaultAlignment, newSize);
1210
1211 return guarded_heap_realloc(address, newSize);
1212 }
1213
1214
1215 heap_implementation __mallocGuardedHeap = {
1216 guarded_heap_init,
1217 guarded_heap_terminate_after,
1218
1219 heap_memalign,
1220 heap_malloc,
1221 heap_free,
1222 heap_realloc,
1223
1224 NULL, // calloc
1225 NULL, // valloc
1226 NULL, // posix_memalign
1227
1228 NULL, // start_wall_checking
1229 NULL, // stop_wall_checking
1230 NULL, // set_paranoid_validation
1231
1232 guarded_heap_set_memory_reuse,
1233 guarded_heap_set_debugger_calls,
1234 guarded_heap_set_default_alignment,
1235
1236 NULL, // validate_heaps
1237 NULL, // validate_walls
1238
1239 guarded_heap_dump_allocations,
1240 guarded_heap_dump_heaps,
1241 heap_malloc,
1242
1243 NULL, // get_allocation_info
1244
1245 guarded_heap_set_dump_allocations_on_exit,
1246 guarded_heap_set_stack_trace_depth
1247 };
1248