1 /* 2 * Copyright 2011, Michael Lotz <mmlr@mlotz.ch>. 3 * Copyright 2011, Ingo Weinhold <ingo_weinhold@gmx.de>. 4 * 5 * Distributed under the terms of the MIT License. 6 */ 7 #ifndef SLAB_DEBUG_H 8 #define SLAB_DEBUG_H 9 10 11 #include <AllocationTracking.h> 12 #include <debug.h> 13 #include <slab/Slab.h> 14 #include <tracing.h> 15 16 #include "kernel_debug_config.h" 17 18 19 //#define TRACE_SLAB 20 #ifdef TRACE_SLAB 21 #define TRACE_CACHE(cache, format, args...) \ 22 dprintf("Cache[%p, %s] " format "\n", cache, cache->name , ##args) 23 #else 24 #define TRACE_CACHE(cache, format, bananas...) do { } while (0) 25 #endif 26 27 28 #define COMPONENT_PARANOIA_LEVEL OBJECT_CACHE_PARANOIA 29 #include <debug_paranoia.h> 30 31 32 // Macros determining whether allocation tracking is actually available. 33 #define SLAB_OBJECT_CACHE_ALLOCATION_TRACKING (SLAB_ALLOCATION_TRACKING != 0 \ 34 && SLAB_OBJECT_CACHE_TRACING != 0 \ 35 && SLAB_OBJECT_CACHE_TRACING_STACK_TRACE > 0) 36 // The object cache code needs to do allocation tracking. 37 #define SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING (SLAB_ALLOCATION_TRACKING != 0 \ 38 && SLAB_MEMORY_MANAGER_TRACING != 0 \ 39 && SLAB_MEMORY_MANAGER_TRACING_STACK_TRACE > 0) 40 // The memory manager code needs to do allocation tracking. 41 #define SLAB_ALLOCATION_TRACKING_AVAILABLE \ 42 (SLAB_OBJECT_CACHE_ALLOCATION_TRACKING \ 43 || SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING) 44 // Guards code that is needed for either object cache or memory manager 45 // allocation tracking. 46 47 48 struct object_depot; 49 50 51 #if SLAB_ALLOCATION_TRACKING_AVAILABLE 52 53 namespace BKernel { 54 55 class AllocationTrackingCallback { 56 public: 57 virtual ~AllocationTrackingCallback(); 58 59 virtual bool ProcessTrackingInfo( 60 AllocationTrackingInfo* info, 61 void* allocation, 62 size_t allocationSize) = 0; 63 }; 64 65 } 66 67 using BKernel::AllocationTrackingCallback; 68 69 #endif // SLAB_ALLOCATION_TRACKING_AVAILABLE 70 71 72 void dump_object_depot(object_depot* depot); 73 int dump_object_depot(int argCount, char** args); 74 int dump_depot_magazine(int argCount, char** args); 75 76 77 #if PARANOID_KERNEL_MALLOC || PARANOID_KERNEL_FREE 78 static inline void* 79 fill_block(void* buffer, size_t size, uint32 pattern) 80 { 81 if (buffer == NULL) 82 return NULL; 83 84 size &= ~(sizeof(pattern) - 1); 85 for (size_t i = 0; i < size / sizeof(pattern); i++) 86 ((uint32*)buffer)[i] = pattern; 87 88 return buffer; 89 } 90 #endif 91 92 93 static inline void* 94 fill_allocated_block(void* buffer, size_t size) 95 { 96 #if PARANOID_KERNEL_MALLOC 97 return fill_block(buffer, size, 0xcccccccc); 98 #else 99 return buffer; 100 #endif 101 } 102 103 104 static inline void* 105 fill_freed_block(void* buffer, size_t size) 106 { 107 #if PARANOID_KERNEL_FREE 108 return fill_block(buffer, size, 0xdeadbeef); 109 #else 110 return buffer; 111 #endif 112 } 113 114 115 #endif // SLAB_DEBUG_H 116