xref: /haiku/src/system/kernel/debug/debug_heap.cpp (revision 7749d0bb0c358a3279b1b9cc76d8376e900130a5)
1 /*
2  * Copyright 2009-2010, Ingo Weinhold, ingo_weinhold@gmx.de
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include <debug_heap.h>
8 
9 #include <new>
10 
11 #include <util/AutoLock.h>
12 #include <vm/vm.h>
13 
14 
15 #define INITIAL_DEBUG_HEAP_SIZE	B_PAGE_SIZE
16 
17 static char sInitialHeap[INITIAL_DEBUG_HEAP_SIZE] __attribute__ ((aligned (8)));
18 static void* sHeapBase = sInitialHeap;
19 static size_t sHeapSize = INITIAL_DEBUG_HEAP_SIZE;
20 
21 const kdebug_alloc_t kdebug_alloc = {};
22 
23 
24 struct allocation_header {
25 	uint32	size : 31;	// size in allocation_header units
26 	bool	free : 1;
27 	uint32	previous;
28 };
29 
30 struct free_entry : allocation_header {
31 	uint32	previous_free;
32 	uint32	next_free;
33 };
34 
35 
36 struct DebugAllocPool {
37 	void Init(void* heap, size_t heapSize)
38 	{
39 		fParent = NULL;
40 		fChild = NULL;
41 
42 		uint32 size = heapSize / 8;
43 		fBase = (allocation_header*)heap - 1;
44 		fEnd = size + 1;
45 		fFirstFree = 0;
46 		fLastFree = 0;
47 
48 		// add free entry spanning the whole area
49 		fBase[1].size = size - 1;
50 		fBase[1].previous = 0;
51 		_InsertFreeEntry(1);
52 	}
53 
54 	DebugAllocPool* CreateChildPool()
55 	{
56 		// do we already have a child pool?
57 		if (fChild != NULL)
58 			return NULL;
59 
60 		// create the pool object
61 		DebugAllocPool* pool
62 			= (DebugAllocPool*)Allocate(sizeof(DebugAllocPool));
63 		if (pool == NULL)
64 			return NULL;
65 
66 		// do we have enough free space?
67 		if (fLastFree == 0 || fBase[fLastFree].size < 2) {
68 			Free(pool);
69 			return NULL;
70 		}
71 
72 		allocation_header* header = &fBase[fLastFree];
73 		_RemoveFreeEntry(fLastFree);
74 
75 		pool->Init(header + 1, header->size * 8);
76 		pool->fParent = this;
77 
78 		return fChild = pool;
79 	}
80 
81 	void Destroy()
82 	{
83 		if (fParent != NULL) {
84 			fParent->fChild = NULL;
85 			fParent->Free(fBase + 1);
86 		}
87 	}
88 
89 	DebugAllocPool* Parent() const
90 	{
91 		return fParent;
92 	}
93 
94 	void* Allocate(size_t size)
95 	{
96 		size = (size + 7) / 8;
97 		uint32 index = fFirstFree;
98 		while (index != 0 && fBase[index].size < size)
99 			index = ((free_entry*)&fBase[index])->next_free;
100 
101 		if (index == 0)
102 			return NULL;
103 
104 		_RemoveFreeEntry(index);
105 
106 		// if the entry is big enough, we split it
107 		if (fBase[index].size - size >= 2) {
108 			uint32 next = index + 1 + size;
109 			uint32 nextNext = index + 1 + fBase[index].size;
110 			fBase[next].size = fBase[index].size - size - 1;
111 			fBase[next].previous = index;
112 			fBase[index].size = size;
113 			_InsertFreeEntry(next);
114 
115 			if (nextNext < fEnd)
116 				fBase[nextNext].previous = next;
117 		}
118 
119 		return &fBase[index + 1];
120 	}
121 
122 	void Free(void* address)
123 	{
124 		// check address
125 		if (((addr_t)address & 7) != 0 || address <= fBase + 1
126 			|| address >= fBase + fEnd) {
127 			kprintf("DebugAllocPool::Free(%p): bad address\n", address);
128 			return;
129 		}
130 
131 		// get header
132 		allocation_header* header = (allocation_header*)address - 1;
133 		uint32 index = header - fBase;
134 		if (header->free) {
135 			kprintf("DebugAllocPool::Free(%p): double free\n", address);
136 			return;
137 		}
138 
139 		uint32 next = index + 1 + header->size;
140 
141 		// join with previous, if possible
142 		if (index > 1 && fBase[header->previous].free) {
143 			uint32 previous = header->previous;
144 			_RemoveFreeEntry(previous);
145 
146 			fBase[previous].size += 1 + header->size;
147 			fBase[next].previous = previous;
148 
149 			index = previous;
150 			header = fBase + index;
151 		}
152 
153 		// join with next, if possible
154 		if (next < fEnd && fBase[next].free) {
155 			_RemoveFreeEntry(next);
156 
157 			header->size += 1 + fBase[next].size;
158 
159 			uint32 nextNext = index + 1 + header->size;
160 			if (nextNext < fEnd)
161 				fBase[nextNext].previous = index;
162 		}
163 
164 		_InsertFreeEntry(index);
165 	}
166 
167 private:
168 	void _InsertFreeEntry(uint32 index)
169 	{
170 		// find the insertion point -- list is sorted by ascending size
171 		uint32 size = fBase[index].size;
172 		uint32 next = fFirstFree;
173 		while (next != 0 && size > fBase[next].size)
174 			next = ((free_entry*)&fBase[next])->next_free;
175 
176 		// insert
177 		uint32 previous;
178 		if (next != 0) {
179 			previous = ((free_entry*)&fBase[next])->previous_free;
180 			((free_entry*)&fBase[next])->previous_free = index;
181 		} else {
182 			previous = fLastFree;
183 			fLastFree = index;
184 		}
185 
186 		if (previous != 0)
187 			((free_entry*)&fBase[previous])->next_free = index;
188 		else
189 			fFirstFree = index;
190 
191 		((free_entry*)&fBase[index])->previous_free = previous;
192 		((free_entry*)&fBase[index])->next_free = next;
193 
194 		fBase[index].free = true;
195 	}
196 
197 	void _RemoveFreeEntry(uint32 index)
198 	{
199 		uint32 previous = ((free_entry*)&fBase[index])->previous_free;
200 		uint32 next = ((free_entry*)&fBase[index])->next_free;
201 
202 		if (previous != 0)
203 			((free_entry*)&fBase[previous])->next_free = next;
204 		else
205 			fFirstFree = next;
206 
207 		if (next != 0)
208 			((free_entry*)&fBase[next])->previous_free = previous;
209 		else
210 			fLastFree = previous;
211 
212 		fBase[index].free = false;
213 	}
214 
215 private:
216 	DebugAllocPool*		fParent;
217 	DebugAllocPool*		fChild;
218 	allocation_header*	fBase;		// actually base - 1, so that index 0 is
219 									// invalid
220 	uint32				fEnd;
221 	uint32				fFirstFree;
222 	uint32				fLastFree;
223 };
224 
225 
226 static DebugAllocPool* sCurrentPool;
227 static DebugAllocPool sInitialPool;
228 
229 
230 debug_alloc_pool*
231 create_debug_alloc_pool()
232 {
233 	if (sCurrentPool == NULL) {
234 		sInitialPool.Init(sHeapBase, sHeapSize);
235 		sCurrentPool = &sInitialPool;
236 		return sCurrentPool;
237 	}
238 
239 	DebugAllocPool* pool = sCurrentPool->CreateChildPool();
240 	if (pool == NULL)
241 		return NULL;
242 
243 	sCurrentPool = pool;
244 	return sCurrentPool;
245 }
246 
247 
248 void
249 delete_debug_alloc_pool(debug_alloc_pool* pool)
250 {
251 	if (pool == NULL || sCurrentPool == NULL)
252 		return;
253 
254 	// find the pool in the hierarchy
255 	DebugAllocPool* otherPool = sCurrentPool;
256 	while (otherPool != NULL && otherPool != pool)
257 		otherPool = otherPool->Parent();
258 
259 	if (otherPool == NULL)
260 		return;
261 
262 	// destroy the pool
263 	sCurrentPool = pool->Parent();
264 	pool->Destroy();
265 
266 	if (pool != &sInitialPool)
267 		debug_free(pool);
268 }
269 
270 
271 void*
272 debug_malloc(size_t size)
273 {
274 	if (sCurrentPool == NULL)
275 		return NULL;
276 
277 	return sCurrentPool->Allocate(size);
278 }
279 
280 
281 void
282 debug_free(void* address)
283 {
284 	if (address != NULL && sCurrentPool != NULL)
285 		sCurrentPool->Free(address);
286 }
287 
288 
289 void
290 debug_heap_init()
291 {
292 	// create the heap area
293 	void* base;
294 	virtual_address_restrictions virtualRestrictions = {};
295 	virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
296 	physical_address_restrictions physicalRestrictions = {};
297 	area_id area = create_area_etc(B_SYSTEM_TEAM, "kdebug heap", KDEBUG_HEAP,
298 		B_FULL_LOCK, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
299 		CREATE_AREA_DONT_WAIT, &virtualRestrictions, &physicalRestrictions,
300 		(void**)&base);
301 	if (area < 0)
302 		return;
303 
304 	// switch from the small static buffer to the area
305 	InterruptsLocker locker;
306 	sHeapBase = base;
307 	sHeapSize = KDEBUG_HEAP;
308 }
309