xref: /haiku/src/system/kernel/guarded_heap.cpp (revision 1e60bdeab63fa7a57bc9a55b032052e95a18bd2c)
1 /*
2  * Copyright 2011, Michael Lotz <mmlr@mlotz.ch>.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include <stdio.h>
8 #include <string.h>
9 
10 #include <arch/debug.h>
11 #include <elf.h>
12 #include <debug.h>
13 #include <heap.h>
14 #include <malloc.h>
15 #include <slab/Slab.h>
16 #include <team.h>
17 #include <tracing.h>
18 #include <util/list.h>
19 #include <util/AutoLock.h>
20 #include <vm/vm.h>
21 
22 
23 #if USE_GUARDED_HEAP_FOR_MALLOC
24 
25 
26 #define GUARDED_HEAP_PAGE_FLAG_USED		0x01
27 #define GUARDED_HEAP_PAGE_FLAG_FIRST	0x02
28 #define GUARDED_HEAP_PAGE_FLAG_GUARD	0x04
29 #define GUARDED_HEAP_PAGE_FLAG_DEAD		0x08
30 
31 #define GUARDED_HEAP_STACK_TRACE_DEPTH	0
32 
33 
34 struct guarded_heap;
35 
36 struct guarded_heap_page {
37 	uint8				flags;
38 	size_t				allocation_size;
39 	void*				allocation_base;
40 	size_t				alignment;
41 	team_id				team;
42 	thread_id			thread;
43 #if GUARDED_HEAP_STACK_TRACE_DEPTH > 0
44 	size_t				stack_trace_depth;
45 	addr_t				stack_trace[GUARDED_HEAP_STACK_TRACE_DEPTH];
46 #endif
47 	list_link			free_list_link;
48 };
49 
50 struct guarded_heap_area {
51 	guarded_heap*		heap;
52 	guarded_heap_area*	next;
53 	area_id				area;
54 	addr_t				base;
55 	size_t				size;
56 	size_t				page_count;
57 	size_t				used_pages;
58 	void*				protection_cookie;
59 	mutex				lock;
60 	struct list			free_list;
61 	guarded_heap_page	pages[0];
62 };
63 
64 struct guarded_heap {
65 	rw_lock				lock;
66 	size_t				page_count;
67 	size_t				used_pages;
68 	int32				area_creation_counter;
69 	guarded_heap_area*	areas;
70 };
71 
72 
73 static guarded_heap sGuardedHeap = {
74 	RW_LOCK_INITIALIZER("guarded heap lock"),
75 	0, 0, 0, NULL
76 };
77 
78 
79 #if GUARDED_HEAP_TRACING
80 
81 namespace GuardedHeapTracing {
82 
83 
84 class GuardedHeapTraceEntry
85 	: public TRACE_ENTRY_SELECTOR(GUARDED_HEAP_TRACING_STACK_TRACE) {
86 	public:
87 		GuardedHeapTraceEntry(guarded_heap* heap)
88 			:
89 			TraceEntryBase(GUARDED_HEAP_TRACING_STACK_TRACE, 0, true),
90 			fHeap(heap)
91 		{
92 		}
93 
94 	protected:
95 		guarded_heap*	fHeap;
96 };
97 
98 
99 class Allocate : public GuardedHeapTraceEntry {
100 	public:
101 		Allocate(guarded_heap* heap, void* pageBase, uint32 flags)
102 			:
103 			GuardedHeapTraceEntry(heap),
104 			fPageBase(pageBase),
105 			fFlags(flags)
106 		{
107 			Initialized();
108 		}
109 
110 		virtual void AddDump(TraceOutput& out)
111 		{
112 			out.Print("guarded heap allocate: heap: %p; page: %p; "
113 				"flags:%s%s%s%s", fHeap, fPageBase,
114 				(fFlags & GUARDED_HEAP_PAGE_FLAG_USED) != 0 ? " used" : "",
115 				(fFlags & GUARDED_HEAP_PAGE_FLAG_FIRST) != 0 ? " first" : "",
116 				(fFlags & GUARDED_HEAP_PAGE_FLAG_GUARD) != 0 ? " guard" : "",
117 				(fFlags & GUARDED_HEAP_PAGE_FLAG_DEAD) != 0 ? " dead" : "");
118 		}
119 
120 	private:
121 		void*		fPageBase;
122 		uint32		fFlags;
123 };
124 
125 
126 class Free : public GuardedHeapTraceEntry {
127 	public:
128 		Free(guarded_heap* heap, void* pageBase)
129 			:
130 			GuardedHeapTraceEntry(heap),
131 			fPageBase(pageBase)
132 		{
133 			Initialized();
134 		}
135 
136 		virtual void AddDump(TraceOutput& out)
137 		{
138 			out.Print("guarded heap free: heap: %p; page: %p", fHeap,
139 				fPageBase);
140 		}
141 
142 	private:
143 		void*		fPageBase;
144 };
145 
146 
147 }	// namespace GuardedHeapTracing
148 
149 #	define T(x)	new(std::nothrow) GuardedHeapTracing::x
150 #else
151 #	define T(x)
152 #endif	// GUARDED_HEAP_TRACING
153 
154 
155 static void
156 guarded_heap_page_protect(guarded_heap_area& area, size_t pageIndex,
157 	uint32 protection)
158 {
159 	if (area.area < 0)
160 		return;
161 
162 	addr_t address = area.base + pageIndex * B_PAGE_SIZE;
163 	vm_set_kernel_area_debug_protection(area.protection_cookie, (void*)address,
164 		B_PAGE_SIZE, protection);
165 }
166 
167 
168 static void
169 guarded_heap_page_allocate(guarded_heap_area& area, size_t startPageIndex,
170 	size_t pagesNeeded, size_t allocationSize, size_t alignment,
171 	void* allocationBase)
172 {
173 	if (pagesNeeded < 2) {
174 		panic("need to allocate at least 2 pages, one for guard\n");
175 		return;
176 	}
177 
178 	guarded_heap_page* firstPage = NULL;
179 	for (size_t i = 0; i < pagesNeeded; i++) {
180 		guarded_heap_page& page = area.pages[startPageIndex + i];
181 		page.flags = GUARDED_HEAP_PAGE_FLAG_USED;
182 		if (i == 0) {
183 			page.team = (gKernelStartup ? 0 : team_get_current_team_id());
184 			page.thread = find_thread(NULL);
185 #if GUARDED_HEAP_STACK_TRACE_DEPTH > 0
186 			page.stack_trace_depth = arch_debug_get_stack_trace(
187 				page.stack_trace, GUARDED_HEAP_STACK_TRACE_DEPTH, 0, 4,
188 				STACK_TRACE_KERNEL);
189 #endif
190 			page.allocation_size = allocationSize;
191 			page.allocation_base = allocationBase;
192 			page.alignment = alignment;
193 			page.flags |= GUARDED_HEAP_PAGE_FLAG_FIRST;
194 			firstPage = &page;
195 		} else {
196 			page.team = firstPage->team;
197 			page.thread = firstPage->thread;
198 #if GUARDED_HEAP_STACK_TRACE_DEPTH > 0
199 			page.stack_trace_depth = 0;
200 #endif
201 			page.allocation_size = allocationSize;
202 			page.allocation_base = allocationBase;
203 			page.alignment = alignment;
204 		}
205 
206 		list_remove_item(&area.free_list, &page);
207 
208 		if (i == pagesNeeded - 1) {
209 			page.flags |= GUARDED_HEAP_PAGE_FLAG_GUARD;
210 			guarded_heap_page_protect(area, startPageIndex + i, 0);
211 		} else {
212 			guarded_heap_page_protect(area, startPageIndex + i,
213 				B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
214 		}
215 
216 		T(Allocate(area.heap,
217 			(void*)(area.base + (startPageIndex + i) * B_PAGE_SIZE),
218 			page.flags));
219 	}
220 }
221 
222 
223 static void
224 guarded_heap_free_page(guarded_heap_area& area, size_t pageIndex,
225 	bool force = false)
226 {
227 	guarded_heap_page& page = area.pages[pageIndex];
228 
229 #if DEBUG_GUARDED_HEAP_DISABLE_MEMORY_REUSE
230 	if (force || area.area < 0)
231 		page.flags = 0;
232 	else
233 		page.flags |= GUARDED_HEAP_PAGE_FLAG_DEAD;
234 #else
235 	page.flags = 0;
236 #endif
237 
238 	page.allocation_size = 0;
239 	page.team = (gKernelStartup ? 0 : team_get_current_team_id());
240 	page.thread = find_thread(NULL);
241 
242 #if GUARDED_HEAP_STACK_TRACE_DEPTH > 0
243 	page.stack_trace_depth = arch_debug_get_stack_trace(page.stack_trace,
244 		GUARDED_HEAP_STACK_TRACE_DEPTH, 0, 3, STACK_TRACE_KERNEL);
245 #endif
246 
247 	list_add_item(&area.free_list, &page);
248 
249 	guarded_heap_page_protect(area, pageIndex, 0);
250 
251 	T(Free(area.heap, (void*)(area.base + pageIndex * B_PAGE_SIZE)));
252 }
253 
254 
255 static bool
256 guarded_heap_pages_allocated(guarded_heap& heap, size_t pagesAllocated)
257 {
258 	return (atomic_add((int32*)&heap.used_pages, pagesAllocated)
259 			+ pagesAllocated)
260 		>= heap.page_count - HEAP_GROW_SIZE / B_PAGE_SIZE / 2;
261 }
262 
263 
264 static void*
265 guarded_heap_area_allocate(guarded_heap_area& area, size_t size,
266 	size_t alignment, uint32 flags, bool& grow)
267 {
268 	if (alignment > B_PAGE_SIZE) {
269 		panic("alignment of %" B_PRIuSIZE " not supported", alignment);
270 		return NULL;
271 	}
272 
273 	size_t pagesNeeded = (size + B_PAGE_SIZE - 1) / B_PAGE_SIZE + 1;
274 	if (pagesNeeded > area.page_count - area.used_pages)
275 		return NULL;
276 
277 	if (pagesNeeded > area.page_count)
278 		return NULL;
279 
280 	// We use the free list this way so that the page that has been free for
281 	// the longest time is allocated. This keeps immediate re-use (that may
282 	// hide bugs) to a minimum.
283 	guarded_heap_page* page
284 		= (guarded_heap_page*)list_get_first_item(&area.free_list);
285 
286 	for (; page != NULL;
287 		page = (guarded_heap_page*)list_get_next_item(&area.free_list, page)) {
288 
289 		if ((page->flags & GUARDED_HEAP_PAGE_FLAG_USED) != 0)
290 			continue;
291 
292 		size_t pageIndex = page - area.pages;
293 		if (pageIndex > area.page_count - pagesNeeded)
294 			continue;
295 
296 		// Candidate, check if we have enough pages going forward
297 		// (including the guard page).
298 		bool candidate = true;
299 		for (size_t j = 1; j < pagesNeeded; j++) {
300 			if ((area.pages[pageIndex + j].flags & GUARDED_HEAP_PAGE_FLAG_USED)
301 					!= 0) {
302 				candidate = false;
303 				break;
304 			}
305 		}
306 
307 		if (!candidate)
308 			continue;
309 
310 		if (alignment == 0)
311 			alignment = 1;
312 
313 		size_t offset = size & (B_PAGE_SIZE - 1);
314 		void* result = (void*)((area.base + pageIndex * B_PAGE_SIZE
315 			+ (offset > 0 ? B_PAGE_SIZE - offset : 0)) & ~(alignment - 1));
316 
317 		guarded_heap_page_allocate(area, pageIndex, pagesNeeded, size,
318 			alignment, result);
319 
320 		area.used_pages += pagesNeeded;
321 		grow = guarded_heap_pages_allocated(*area.heap, pagesNeeded);
322 		return result;
323 	}
324 
325 	return NULL;
326 }
327 
328 
329 static bool
330 guarded_heap_area_init(guarded_heap& heap, area_id id, void* baseAddress,
331 	size_t size, uint32 flags)
332 {
333 	guarded_heap_area* area = (guarded_heap_area*)baseAddress;
334 	area->heap = &heap;
335 	area->area = id;
336 	area->size = size;
337 	area->page_count = area->size / B_PAGE_SIZE;
338 	area->used_pages = 0;
339 
340 	size_t pagesNeeded = (sizeof(guarded_heap_area)
341 		+ area->page_count * sizeof(guarded_heap_page)
342 		+ B_PAGE_SIZE - 1) / B_PAGE_SIZE;
343 
344 	area->page_count -= pagesNeeded;
345 	area->size = area->page_count * B_PAGE_SIZE;
346 	area->base = (addr_t)baseAddress + pagesNeeded * B_PAGE_SIZE;
347 
348 	if (area->area >= 0 && vm_prepare_kernel_area_debug_protection(area->area,
349 			&area->protection_cookie) != B_OK) {
350 		return false;
351 	}
352 
353 	mutex_init(&area->lock, "guarded_heap_area_lock");
354 
355 	list_init_etc(&area->free_list,
356 		offsetof(guarded_heap_page, free_list_link));
357 
358 	for (size_t i = 0; i < area->page_count; i++)
359 		guarded_heap_free_page(*area, i, true);
360 
361 	WriteLocker areaListWriteLocker(heap.lock);
362 	area->next = heap.areas;
363 	heap.areas = area;
364 	heap.page_count += area->page_count;
365 
366 	return true;
367 }
368 
369 
370 static bool
371 guarded_heap_area_create(guarded_heap& heap, uint32 flags)
372 {
373 	for (size_t trySize = HEAP_GROW_SIZE; trySize >= 1 * 1024 * 1024;
374 		trySize /= 2) {
375 
376 		void* baseAddress = NULL;
377 		area_id id = create_area("guarded_heap_area", &baseAddress,
378 			B_ANY_KERNEL_ADDRESS, trySize, B_FULL_LOCK,
379 			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
380 
381 		if (id < 0)
382 			continue;
383 
384 		if (guarded_heap_area_init(heap, id, baseAddress, trySize, flags))
385 			return true;
386 
387 		delete_area(id);
388 	}
389 
390 	panic("failed to allocate a new heap area");
391 	return false;
392 }
393 
394 
395 static bool
396 guarded_heap_add_area(guarded_heap& heap, int32 counter, uint32 flags)
397 {
398 	if ((flags & (HEAP_DONT_LOCK_KERNEL_SPACE | HEAP_DONT_WAIT_FOR_MEMORY))
399 			!= 0) {
400 		return false;
401 	}
402 
403 	if (atomic_test_and_set(&heap.area_creation_counter,
404 			counter + 1, counter) == counter) {
405 		return guarded_heap_area_create(heap, flags);
406 	}
407 
408 	return false;
409 }
410 
411 
412 static void*
413 guarded_heap_allocate(guarded_heap& heap, size_t size, size_t alignment,
414 	uint32 flags)
415 {
416 	bool grow = false;
417 	void* result = NULL;
418 	ReadLocker areaListReadLocker(heap.lock);
419 	for (guarded_heap_area* area = heap.areas; area != NULL;
420 			area = area->next) {
421 
422 		MutexLocker locker(area->lock);
423 		result = guarded_heap_area_allocate(*area, size, alignment, flags,
424 			grow);
425 		if (result != NULL)
426 			break;
427 	}
428 
429 	int32 counter = atomic_get(&heap.area_creation_counter);
430 	areaListReadLocker.Unlock();
431 
432 	if (result == NULL || grow) {
433 		bool added = guarded_heap_add_area(heap, counter, flags);
434 		if (result == NULL && added)
435 			return guarded_heap_allocate(heap, size, alignment, flags);
436 	}
437 
438 	if (result == NULL)
439 		panic("ran out of memory");
440 
441 	return result;
442 }
443 
444 
445 static guarded_heap_area*
446 guarded_heap_get_locked_area_for(guarded_heap& heap, void* address)
447 {
448 	ReadLocker areaListReadLocker(heap.lock);
449 	for (guarded_heap_area* area = heap.areas; area != NULL;
450 			area = area->next) {
451 		if ((addr_t)address < area->base)
452 			continue;
453 
454 		if ((addr_t)address >= area->base + area->size)
455 			continue;
456 
457 		mutex_lock(&area->lock);
458 		return area;
459 	}
460 
461 	panic("guarded heap area for address %p not found", address);
462 	return NULL;
463 }
464 
465 
466 static size_t
467 guarded_heap_area_page_index_for(guarded_heap_area& area, void* address)
468 {
469 	size_t pageIndex = ((addr_t)address - area.base) / B_PAGE_SIZE;
470 	guarded_heap_page& page = area.pages[pageIndex];
471 	if ((page.flags & GUARDED_HEAP_PAGE_FLAG_USED) == 0) {
472 		panic("tried to free %p which points at page %" B_PRIuSIZE
473 			" which is not marked in use", address, pageIndex);
474 		return area.page_count;
475 	}
476 
477 	if ((page.flags & GUARDED_HEAP_PAGE_FLAG_GUARD) != 0) {
478 		panic("tried to free %p which points at page %" B_PRIuSIZE
479 			" which is a guard page", address, pageIndex);
480 		return area.page_count;
481 	}
482 
483 	if ((page.flags & GUARDED_HEAP_PAGE_FLAG_FIRST) == 0) {
484 		panic("tried to free %p which points at page %" B_PRIuSIZE
485 			" which is not an allocation first page", address, pageIndex);
486 		return area.page_count;
487 	}
488 
489 	if ((page.flags & GUARDED_HEAP_PAGE_FLAG_DEAD) != 0) {
490 		panic("tried to free %p which points at page %" B_PRIuSIZE
491 			" which is a dead page", address, pageIndex);
492 		return area.page_count;
493 	}
494 
495 	return pageIndex;
496 }
497 
498 
499 static void
500 guarded_heap_area_free(guarded_heap_area& area, void* address, uint32 flags)
501 {
502 	size_t pageIndex = guarded_heap_area_page_index_for(area, address);
503 	if (pageIndex >= area.page_count)
504 		return;
505 
506 	size_t pagesFreed = 0;
507 	guarded_heap_page* page = &area.pages[pageIndex];
508 	while ((page->flags & GUARDED_HEAP_PAGE_FLAG_GUARD) == 0) {
509 		// Mark the allocation page as free.
510 		guarded_heap_free_page(area, pageIndex);
511 
512 		pagesFreed++;
513 		pageIndex++;
514 		page = &area.pages[pageIndex];
515 	}
516 
517 	// Mark the guard page as free as well.
518 	guarded_heap_free_page(area, pageIndex);
519 	pagesFreed++;
520 
521 #if !DEBUG_GUARDED_HEAP_DISABLE_MEMORY_REUSE
522 	area.used_pages -= pagesFreed;
523 	atomic_add((int32*)&area.heap->used_pages, -pagesFreed);
524 #endif
525 }
526 
527 
528 static void
529 guarded_heap_free(void* address, uint32 flags)
530 {
531 	if (address == NULL)
532 		return;
533 
534 	guarded_heap_area* area = guarded_heap_get_locked_area_for(sGuardedHeap,
535 		address);
536 	if (area == NULL)
537 		return;
538 
539 	MutexLocker locker(area->lock, true);
540 	guarded_heap_area_free(*area, address, flags);
541 }
542 
543 
544 static void*
545 guarded_heap_realloc(void* address, size_t newSize)
546 {
547 	guarded_heap_area* area = guarded_heap_get_locked_area_for(sGuardedHeap,
548 		address);
549 	if (area == NULL)
550 		return NULL;
551 
552 	MutexLocker locker(area->lock, true);
553 
554 	size_t pageIndex = guarded_heap_area_page_index_for(*area, address);
555 	if (pageIndex >= area->page_count)
556 		return NULL;
557 
558 	guarded_heap_page& page = area->pages[pageIndex];
559 	size_t oldSize = page.allocation_size;
560 	locker.Unlock();
561 
562 	if (oldSize == newSize)
563 		return address;
564 
565 	void* newBlock = memalign(0, newSize);
566 	if (newBlock == NULL)
567 		return NULL;
568 
569 	memcpy(newBlock, address, min_c(oldSize, newSize));
570 
571 	free(address);
572 
573 	return newBlock;
574 }
575 
576 
577 // #pragma mark - Debugger commands
578 
579 
580 static int
581 dump_guarded_heap_page(int argc, char** argv)
582 {
583 	if (argc != 2) {
584 		print_debugger_command_usage(argv[0]);
585 		return 0;
586 	}
587 
588 	addr_t address = parse_expression(argv[1]);
589 
590 	// Find the area that contains this page.
591 	guarded_heap_area* area = NULL;
592 	for (guarded_heap_area* candidate = sGuardedHeap.areas; candidate != NULL;
593 			candidate = candidate->next) {
594 
595 		if (address < candidate->base)
596 			continue;
597 		if (address >= candidate->base + candidate->size)
598 			continue;
599 
600 		area = candidate;
601 		break;
602 	}
603 
604 	if (area == NULL) {
605 		kprintf("didn't find area for address\n");
606 		return 1;
607 	}
608 
609 	size_t pageIndex = ((addr_t)address - area->base) / B_PAGE_SIZE;
610 	guarded_heap_page& page = area->pages[pageIndex];
611 
612 	kprintf("page index: %" B_PRIuSIZE "\n", pageIndex);
613 	kprintf("flags:");
614 	if ((page.flags & GUARDED_HEAP_PAGE_FLAG_USED) != 0)
615 		kprintf(" used");
616 	if ((page.flags & GUARDED_HEAP_PAGE_FLAG_FIRST) != 0)
617 		kprintf(" first");
618 	if ((page.flags & GUARDED_HEAP_PAGE_FLAG_GUARD) != 0)
619 		kprintf(" guard");
620 	if ((page.flags & GUARDED_HEAP_PAGE_FLAG_DEAD) != 0)
621 		kprintf(" dead");
622 	kprintf("\n");
623 
624 	kprintf("allocation size: %" B_PRIuSIZE "\n", page.allocation_size);
625 	kprintf("allocation base: %p\n", page.allocation_base);
626 	kprintf("alignment: %" B_PRIuSIZE "\n", page.alignment);
627 	kprintf("allocating team: %" B_PRId32 "\n", page.team);
628 	kprintf("allocating thread: %" B_PRId32 "\n", page.thread);
629 
630 #if GUARDED_HEAP_STACK_TRACE_DEPTH > 0
631 	kprintf("stack trace:\n");
632 	for (size_t i = 0; i < page.stack_trace_depth; i++) {
633 		addr_t address = page.stack_trace[i];
634 
635 		const char* symbol;
636 		const char* imageName;
637 		bool exactMatch;
638 		addr_t baseAddress;
639 
640 		if (elf_debug_lookup_symbol_address(address, &baseAddress, &symbol,
641 				&imageName, &exactMatch) == B_OK) {
642 			kprintf("  %p  %s + 0x%lx (%s)%s\n", (void*)address, symbol,
643 				address - baseAddress, imageName,
644 				exactMatch ? "" : " (nearest)");
645 		} else
646 			kprintf("  %p\n", (void*)address);
647 	}
648 #endif
649 
650 	return 0;
651 }
652 
653 
654 static int
655 dump_guarded_heap_area(int argc, char** argv)
656 {
657 	if (argc != 2) {
658 		print_debugger_command_usage(argv[0]);
659 		return 0;
660 	}
661 
662 	addr_t address = parse_expression(argv[1]);
663 
664 	// Find the area that contains this page.
665 	guarded_heap_area* area = NULL;
666 	for (guarded_heap_area* candidate = sGuardedHeap.areas; candidate != NULL;
667 			candidate = candidate->next) {
668 
669 		if ((addr_t)candidate != address) {
670 			if (address < candidate->base)
671 				continue;
672 			if (address >= candidate->base + candidate->size)
673 				continue;
674 		}
675 
676 		area = candidate;
677 		break;
678 	}
679 
680 	if (area == NULL) {
681 		kprintf("didn't find area for address\n");
682 		return 1;
683 	}
684 
685 	kprintf("guarded heap area: %p\n", area);
686 	kprintf("next heap area: %p\n", area->next);
687 	kprintf("guarded heap: %p\n", area->heap);
688 	kprintf("area id: %" B_PRId32 "\n", area->area);
689 	kprintf("base: 0x%" B_PRIxADDR "\n", area->base);
690 	kprintf("size: %" B_PRIuSIZE "\n", area->size);
691 	kprintf("page count: %" B_PRIuSIZE "\n", area->page_count);
692 	kprintf("used pages: %" B_PRIuSIZE "\n", area->used_pages);
693 	kprintf("protection cookie: %p\n", area->protection_cookie);
694 	kprintf("lock: %p\n", &area->lock);
695 
696 	size_t freeCount = 0;
697 	void* item = list_get_first_item(&area->free_list);
698 	while (item != NULL) {
699 		freeCount++;
700 
701 		if ((((guarded_heap_page*)item)->flags & GUARDED_HEAP_PAGE_FLAG_USED)
702 				!= 0) {
703 			kprintf("free list broken, page %p not actually free\n", item);
704 		}
705 
706 		item = list_get_next_item(&area->free_list, item);
707 	}
708 
709 	kprintf("free_list: %p (%" B_PRIuSIZE " free)\n", &area->free_list,
710 		freeCount);
711 
712 	freeCount = 0;
713 	size_t runLength = 0;
714 	size_t longestRun = 0;
715 	for (size_t i = 0; i <= area->page_count; i++) {
716 		guarded_heap_page& page = area->pages[i];
717 		if (i == area->page_count
718 			|| (page.flags & GUARDED_HEAP_PAGE_FLAG_USED) != 0) {
719 			freeCount += runLength;
720 			if (runLength > longestRun)
721 				longestRun = runLength;
722 			runLength = 0;
723 			continue;
724 		}
725 
726 		runLength = 1;
727 		for (size_t j = 1; j < area->page_count - i; j++) {
728 			if ((area->pages[i + j].flags & GUARDED_HEAP_PAGE_FLAG_USED) != 0)
729 				break;
730 
731 			runLength++;
732 		}
733 
734 		i += runLength - 1;
735 	}
736 
737 	kprintf("longest free run: %" B_PRIuSIZE " (%" B_PRIuSIZE " free)\n",
738 		longestRun, freeCount);
739 
740 	kprintf("pages: %p\n", area->pages);
741 
742 	return 0;
743 }
744 
745 
746 static int
747 dump_guarded_heap(int argc, char** argv)
748 {
749 	guarded_heap* heap = &sGuardedHeap;
750 	if (argc != 1) {
751 		if (argc == 2)
752 			heap = (guarded_heap*)parse_expression(argv[1]);
753 		else {
754 			print_debugger_command_usage(argv[0]);
755 			return 0;
756 		}
757 	}
758 
759 	kprintf("guarded heap: %p\n", heap);
760 	kprintf("rw lock: %p\n", &heap->lock);
761 	kprintf("page count: %" B_PRIuSIZE "\n", heap->page_count);
762 	kprintf("used pages: %" B_PRIuSIZE "\n", heap->used_pages);
763 	kprintf("area creation counter: %" B_PRId32 "\n",
764 		heap->area_creation_counter);
765 
766 	size_t areaCount = 0;
767 	guarded_heap_area* area = heap->areas;
768 	while (area != NULL) {
769 		areaCount++;
770 		area = area->next;
771 	}
772 
773 	kprintf("areas: %p (%" B_PRIuSIZE ")\n", heap->areas, areaCount);
774 
775 	return 0;
776 }
777 
778 
779 static int
780 dump_guarded_heap_allocations(int argc, char** argv)
781 {
782 	team_id team = -1;
783 	thread_id thread = -1;
784 	addr_t address = 0;
785 	bool statsOnly = false;
786 
787 	for (int32 i = 1; i < argc; i++) {
788 		if (strcmp(argv[i], "team") == 0)
789 			team = parse_expression(argv[++i]);
790 		else if (strcmp(argv[i], "thread") == 0)
791 			thread = parse_expression(argv[++i]);
792 		else if (strcmp(argv[i], "address") == 0)
793 			address = parse_expression(argv[++i]);
794 		else if (strcmp(argv[i], "stats") == 0)
795 			statsOnly = true;
796 		else {
797 			print_debugger_command_usage(argv[0]);
798 			return 0;
799 		}
800 	}
801 
802 	size_t totalSize = 0;
803 	uint32 totalCount = 0;
804 
805 	guarded_heap_area* area = sGuardedHeap.areas;
806 	while (area != NULL) {
807 		for (size_t i = 0; i < area->page_count; i++) {
808 			guarded_heap_page& page = area->pages[i];
809 			if ((page.flags & GUARDED_HEAP_PAGE_FLAG_FIRST) == 0)
810 				continue;
811 
812 			if ((team < 0 || page.team == team)
813 				&& (thread < 0 || page.thread == thread)
814 				&& (address == 0 || (addr_t)page.allocation_base == address)) {
815 
816 				if (!statsOnly) {
817 					kprintf("team: % 6" B_PRId32 "; thread: % 6" B_PRId32 "; "
818 						"address: 0x%08" B_PRIxADDR "; size: %" B_PRIuSIZE
819 						" bytes\n", page.team, page.thread,
820 						(addr_t)page.allocation_base, page.allocation_size);
821 				}
822 
823 				totalSize += page.allocation_size;
824 				totalCount++;
825 			}
826 		}
827 
828 		area = area->next;
829 	}
830 
831 	kprintf("total allocations: %" B_PRIu32 "; total bytes: %" B_PRIuSIZE
832 		"\n", totalCount, totalSize);
833 	return 0;
834 }
835 
836 
837 // #pragma mark - Malloc API
838 
839 
840 status_t
841 heap_init(addr_t address, size_t size)
842 {
843 	return guarded_heap_area_init(sGuardedHeap, -1, (void*)address, size, 0)
844 		? B_OK : B_ERROR;
845 }
846 
847 
848 status_t
849 heap_init_post_area()
850 {
851 	return B_OK;
852 }
853 
854 
855 status_t
856 heap_init_post_sem()
857 {
858 	for (guarded_heap_area* area = sGuardedHeap.areas; area != NULL;
859 			area = area->next) {
860 		if (area->area >= 0)
861 			continue;
862 
863 		area_id id = area_for((void*)area->base);
864 		if (id < 0 || vm_prepare_kernel_area_debug_protection(id,
865 				&area->protection_cookie) != B_OK) {
866 			panic("failed to prepare initial guarded heap for protection");
867 			continue;
868 		}
869 
870 		area->area = id;
871 		for (size_t i = 0; i < area->page_count; i++) {
872 			guarded_heap_page& page = area->pages[i];
873 			if ((page.flags & GUARDED_HEAP_PAGE_FLAG_USED) != 0
874 				&& (page.flags & GUARDED_HEAP_PAGE_FLAG_GUARD) == 0
875 				&& (page.flags & GUARDED_HEAP_PAGE_FLAG_DEAD) == 0) {
876 				guarded_heap_page_protect(*area, i,
877 					B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
878 			} else
879 				guarded_heap_page_protect(*area, i, 0);
880 		}
881 	}
882 
883 	add_debugger_command("guarded_heap", &dump_guarded_heap,
884 		"Dump info about the guarded heap");
885 	add_debugger_command_etc("guarded_heap_area", &dump_guarded_heap_area,
886 		"Dump info about a guarded heap area",
887 		"<address>\nDump info about guarded heap area containing address.\n",
888 		0);
889 	add_debugger_command_etc("guarded_heap_page", &dump_guarded_heap_page,
890 		"Dump info about a guarded heap page",
891 		"<address>\nDump info about guarded heap page containing address.\n",
892 		0);
893 	add_debugger_command_etc("allocations", &dump_guarded_heap_allocations,
894 		"Dump current heap allocations",
895 		"[\"stats\"] [team] [thread] [address]\n"
896 		"If no parameters are given, all current alloactions are dumped.\n"
897 		"If the optional argument \"stats\" is specified, only the allocation\n"
898 		"counts and no individual allocations are printed.\n"
899 		"If a specific allocation address is given, only this allocation is\n"
900 		"dumped.\n"
901 		"If a team and/or thread is specified, only allocations of this\n"
902 		"team/thread are dumped.\n", 0);
903 
904 	return B_OK;
905 }
906 
907 
908 void*
909 memalign(size_t alignment, size_t size)
910 {
911 	return memalign_etc(alignment, size, 0);
912 }
913 
914 
915 void *
916 memalign_etc(size_t alignment, size_t size, uint32 flags)
917 {
918 	if (size == 0)
919 		size = 1;
920 
921 	return guarded_heap_allocate(sGuardedHeap, size, alignment, flags);
922 }
923 
924 
925 void
926 free_etc(void *address, uint32 flags)
927 {
928 	guarded_heap_free(address, flags);
929 }
930 
931 
932 void*
933 malloc(size_t size)
934 {
935 	return memalign_etc(0, size, 0);
936 }
937 
938 
939 void
940 free(void* address)
941 {
942 	free_etc(address, 0);
943 }
944 
945 
946 void*
947 realloc(void* address, size_t newSize)
948 {
949 	if (newSize == 0) {
950 		free(address);
951 		return NULL;
952 	}
953 
954 	if (address == NULL)
955 		return memalign(0, newSize);
956 
957 	return guarded_heap_realloc(address, newSize);
958 }
959 
960 
961 #if USE_GUARDED_HEAP_FOR_OBJECT_CACHE
962 
963 
964 // #pragma mark - Slab API
965 
966 
967 void
968 request_memory_manager_maintenance()
969 {
970 }
971 
972 
973 object_cache*
974 create_object_cache(const char*, size_t objectSize, size_t, void*,
975 	object_cache_constructor, object_cache_destructor)
976 {
977 	return (object_cache*)objectSize;
978 }
979 
980 
981 object_cache*
982 create_object_cache_etc(const char*, size_t objectSize, size_t, size_t, size_t,
983 	size_t, uint32, void*, object_cache_constructor, object_cache_destructor,
984 	object_cache_reclaimer)
985 {
986 	return (object_cache*)objectSize;
987 }
988 
989 
990 void
991 delete_object_cache(object_cache* cache)
992 {
993 }
994 
995 
996 status_t
997 object_cache_set_minimum_reserve(object_cache* cache, size_t objectCount)
998 {
999 	return B_OK;
1000 }
1001 
1002 
1003 void*
1004 object_cache_alloc(object_cache* cache, uint32 flags)
1005 {
1006 	return memalign_etc(0, (size_t)cache, flags);
1007 }
1008 
1009 
1010 void
1011 object_cache_free(object_cache* cache, void* object, uint32 flags)
1012 {
1013 	return free_etc(object, flags);
1014 }
1015 
1016 
1017 status_t
1018 object_cache_reserve(object_cache* cache, size_t objectCount, uint32 flags)
1019 {
1020 	return B_OK;
1021 }
1022 
1023 
1024 void
1025 object_cache_get_usage(object_cache* cache, size_t* _allocatedMemory)
1026 {
1027 	*_allocatedMemory = 0;
1028 }
1029 
1030 
1031 void
1032 slab_init(kernel_args* args)
1033 {
1034 }
1035 
1036 
1037 void
1038 slab_init_post_area()
1039 {
1040 }
1041 
1042 
1043 void
1044 slab_init_post_sem()
1045 {
1046 }
1047 
1048 
1049 void
1050 slab_init_post_thread()
1051 {
1052 }
1053 
1054 
1055 #endif	// USE_GUARDED_HEAP_FOR_OBJECT_CACHE
1056 
1057 
1058 #endif	// USE_GUARDED_HEAP_FOR_MALLOC
1059