xref: /haiku/src/system/boot/loader/heap.cpp (revision 04a0e9c7b68cbe3a43d38e2bca8e860fd80936fb)
1 /*
2  * Copyright 2003-2013, Axel Dörfler, axeld@pinc-software.de.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include <boot/heap.h>
8 
9 #ifdef HEAP_TEST
10 #	include <stdio.h>
11 #endif
12 #include <stdlib.h>
13 #include <string.h>
14 
15 #include <algorithm>
16 
17 #include <boot/platform.h>
18 #include <util/OpenHashTable.h>
19 #include <util/SplayTree.h>
20 
21 
22 //#define TRACE_HEAP
23 #ifdef TRACE_HEAP
24 #	define TRACE(format...)	dprintf(format)
25 #else
26 #	define TRACE(format...)	do { } while (false)
27 #endif
28 
29 
30 /*!	This is a very simple malloc()/free() implementation - it only
31 	manages a free list.
32 	After heap_init() is called, all free memory is contained in one
33 	big chunk, the only entry in the free link list (which is a single
34 	linked list).
35 	When memory is allocated, the smallest free chunk that contains
36 	the requested size is split (or taken as a whole if it can't be
37 	splitted anymore), and it's lower half will be removed from the
38 	free list.
39 	The free list is ordered by size, starting with the smallest
40 	free chunk available. When a chunk is freed, it will be joint
41 	with its predecessor or successor, if possible.
42 	To ease list handling, the list anchor itself is a free chunk with
43 	size 0 that can't be allocated.
44 */
45 
46 #define DEBUG_ALLOCATIONS
47 	// if defined, freed memory is filled with 0xcc
48 #define DEBUG_MAX_HEAP_USAGE
49 	// if defined, the maximum heap usage is determined and printed before
50 	// entering the kernel
51 
52 
53 const static size_t kAlignment = 8;
54 	// all memory chunks will be a multiple of this
55 const static size_t kLargeAllocationThreshold = 16 * 1024;
56 	// allocations of this size or larger are allocated via
57 	// platform_allocate_region()
58 
59 
60 class Chunk {
61 public:
62 	size_t CompleteSize() const
63 	{
64 		return fSize;
65 	}
66 
67 protected:
68 	union {
69 		size_t	fSize;
70 		char	fAlignment[kAlignment];
71 	};
72 };
73 
74 
75 class FreeChunk;
76 
77 
78 struct FreeChunkData : SplayTreeLink<FreeChunk> {
79 
80 	FreeChunk* Next() const
81 	{
82 		return fNext;
83 	}
84 
85 	FreeChunk** NextLink()
86 	{
87 		return &fNext;
88 	}
89 
90 protected:
91 	FreeChunk*	fNext;
92 };
93 
94 
95 class FreeChunk : public Chunk, public FreeChunkData {
96 public:
97 			void				SetTo(size_t size);
98 
99 			size_t				Size() const;
100 
101 			FreeChunk*			Split(size_t splitSize);
102 			bool				IsTouching(FreeChunk* link);
103 			FreeChunk*			Join(FreeChunk* link);
104 
105 			void*				AllocatedAddress() const;
106 	static	FreeChunk*			SetToAllocated(void* allocated);
107 };
108 
109 
110 struct FreeChunkKey {
111 	FreeChunkKey(size_t size)
112 		:
113 		fSize(size),
114 		fChunk(NULL)
115 	{
116 	}
117 
118 	FreeChunkKey(const FreeChunk* chunk)
119 		:
120 		fSize(chunk->Size()),
121 		fChunk(chunk)
122 	{
123 	}
124 
125 	int Compare(const FreeChunk* chunk) const
126 	{
127 		size_t chunkSize = chunk->Size();
128 		if (chunkSize != fSize)
129 			return fSize < chunkSize ? -1 : 1;
130 
131 		if (fChunk == chunk)
132 			return 0;
133 		return fChunk < chunk ? -1 : 1;
134 	}
135 
136 private:
137 	size_t				fSize;
138 	const FreeChunk*	fChunk;
139 };
140 
141 
142 struct FreeChunkTreeDefinition {
143 	typedef FreeChunkKey	KeyType;
144 	typedef FreeChunk		NodeType;
145 
146 	static FreeChunkKey GetKey(const FreeChunk* node)
147 	{
148 		return FreeChunkKey(node);
149 	}
150 
151 	static SplayTreeLink<FreeChunk>* GetLink(FreeChunk* node)
152 	{
153 		return node;
154 	}
155 
156 	static int Compare(const FreeChunkKey& key, const FreeChunk* node)
157 	{
158 		return key.Compare(node);
159 	}
160 
161 	static FreeChunk** GetListLink(FreeChunk* node)
162 	{
163 		return node->NextLink();
164 	}
165 };
166 
167 
168 typedef IteratableSplayTree<FreeChunkTreeDefinition> FreeChunkTree;
169 
170 
171 struct LargeAllocation {
172 	LargeAllocation()
173 	{
174 	}
175 
176 	void SetTo(void* address, size_t size)
177 	{
178 		fAddress = address;
179 		fSize = size;
180 	}
181 
182 	status_t Allocate(size_t size)
183 	{
184 		fSize = size;
185 		return platform_allocate_region(&fAddress, fSize,
186 			B_READ_AREA | B_WRITE_AREA, false);
187 	}
188 
189 	void Free()
190 	{
191 		platform_free_region(fAddress, fSize);
192 	}
193 
194 	void* Address() const
195 	{
196 		return fAddress;
197 	}
198 
199 	size_t Size() const
200 	{
201 		return fSize;
202 	}
203 
204 	LargeAllocation*& HashNext()
205 	{
206 		return fHashNext;
207 	}
208 
209 private:
210 	void*				fAddress;
211 	size_t				fSize;
212 	LargeAllocation*	fHashNext;
213 };
214 
215 
216 struct LargeAllocationHashDefinition {
217 	typedef void*			KeyType;
218 	typedef	LargeAllocation	ValueType;
219 
220 	size_t HashKey(void* key) const
221 	{
222 		return size_t(key) / kAlignment;
223 	}
224 
225 	size_t Hash(LargeAllocation* value) const
226 	{
227 		return HashKey(value->Address());
228 	}
229 
230 	bool Compare(void* key, LargeAllocation* value) const
231 	{
232 		return value->Address() == key;
233 	}
234 
235 	LargeAllocation*& GetLink(LargeAllocation* value) const
236 	{
237 		return value->HashNext();
238 	}
239 };
240 
241 
242 typedef BOpenHashTable<LargeAllocationHashDefinition> LargeAllocationHashTable;
243 
244 
245 static void* sHeapBase;
246 static void* sHeapEnd;
247 static size_t sMaxHeapSize, sAvailable, sMaxHeapUsage;
248 static FreeChunkTree sFreeChunkTree;
249 
250 static LargeAllocationHashTable sLargeAllocations;
251 
252 
253 static inline size_t
254 align(size_t size)
255 {
256 	return (size + kAlignment - 1) & ~(kAlignment - 1);
257 }
258 
259 
260 static void*
261 malloc_large(size_t size)
262 {
263 	LargeAllocation* allocation = new(std::nothrow) LargeAllocation;
264 	if (allocation == NULL) {
265 		dprintf("malloc_large(): Out of memory!\n");
266 		return NULL;
267 	}
268 
269 	if (allocation->Allocate(size) != B_OK) {
270 		dprintf("malloc_large(): Out of memory!\n");
271 		delete allocation;
272 		return NULL;
273 	}
274 
275 	sLargeAllocations.InsertUnchecked(allocation);
276 	TRACE("malloc_large(%lu) -> %p\n", size, allocation->Address());
277 	return allocation->Address();
278 }
279 
280 
281 static void
282 free_large(void* address)
283 {
284 	LargeAllocation* allocation = sLargeAllocations.Lookup(address);
285 	if (allocation == NULL)
286 		panic("free_large(%p): unknown allocation!\n", address);
287 
288 	sLargeAllocations.RemoveUnchecked(allocation);
289 	allocation->Free();
290 	delete allocation;
291 }
292 
293 
294 void
295 FreeChunk::SetTo(size_t size)
296 {
297 	fSize = size;
298 	fNext = NULL;
299 }
300 
301 
302 /*!	Returns the amount of bytes that can be allocated
303 	in this chunk.
304 */
305 size_t
306 FreeChunk::Size() const
307 {
308 	return (addr_t)this + fSize - (addr_t)AllocatedAddress();
309 }
310 
311 
312 /*!	Splits the upper half at the requested location and returns it. This chunk
313 	will no longer be a valid FreeChunk object; only its fSize will be valid.
314  */
315 FreeChunk*
316 FreeChunk::Split(size_t splitSize)
317 {
318 	splitSize = align(splitSize);
319 
320 	FreeChunk* chunk = (FreeChunk*)((addr_t)AllocatedAddress() + splitSize);
321 	size_t newSize = (addr_t)chunk - (addr_t)this;
322 	chunk->fSize = fSize - newSize;
323 	chunk->fNext = NULL;
324 
325 	fSize = newSize;
326 
327 	return chunk;
328 }
329 
330 
331 /*!	Checks if the specified chunk touches this chunk, so
332 	that they could be joined.
333 */
334 bool
335 FreeChunk::IsTouching(FreeChunk* chunk)
336 {
337 	return chunk
338 		&& (((uint8*)this + fSize == (uint8*)chunk)
339 			|| (uint8*)chunk + chunk->fSize == (uint8*)this);
340 }
341 
342 
343 /*!	Joins the chunk to this chunk and returns the pointer
344 	to the new chunk - which will either be one of the
345 	two chunks.
346 	Note, the chunks must be joinable, or else this method
347 	doesn't work correctly. Use FreeChunk::IsTouching()
348 	to check if this method can be applied.
349 */
350 FreeChunk*
351 FreeChunk::Join(FreeChunk* chunk)
352 {
353 	if (chunk < this) {
354 		chunk->fSize += fSize;
355 		chunk->fNext = fNext;
356 
357 		return chunk;
358 	}
359 
360 	fSize += chunk->fSize;
361 	fNext = chunk->fNext;
362 
363 	return this;
364 }
365 
366 
367 void*
368 FreeChunk::AllocatedAddress() const
369 {
370 	return (void*)static_cast<const FreeChunkData*>(this);
371 }
372 
373 
374 FreeChunk*
375 FreeChunk::SetToAllocated(void* allocated)
376 {
377 	return static_cast<FreeChunk*>((FreeChunkData*)allocated);
378 }
379 
380 
381 //	#pragma mark -
382 
383 
384 void
385 heap_release(stage2_args* args)
386 {
387 	platform_release_heap(args, sHeapBase);
388 }
389 
390 
391 void
392 heap_print_statistics()
393 {
394 #ifdef DEBUG_MAX_HEAP_USAGE
395 	dprintf("maximum boot loader heap usage: %zu, currently used: %zu\n",
396 		sMaxHeapUsage, sMaxHeapSize - sAvailable);
397 #endif
398 }
399 
400 
401 status_t
402 heap_init(stage2_args* args)
403 {
404 	void* base;
405 	void* top;
406 	if (platform_init_heap(args, &base, &top) < B_OK)
407 		return B_ERROR;
408 
409 	sHeapBase = base;
410 	sHeapEnd = top;
411 	sMaxHeapSize = (uint8*)top - (uint8*)base;
412 
413 	// declare the whole heap as one chunk, and add it
414 	// to the free list
415 	FreeChunk* chunk = (FreeChunk*)base;
416 	chunk->SetTo(sMaxHeapSize);
417 	sFreeChunkTree.Insert(chunk);
418 
419 	sAvailable = chunk->Size();
420 #ifdef DEBUG_MAX_HEAP_USAGE
421 	sMaxHeapUsage = sMaxHeapSize - sAvailable;
422 #endif
423 
424 	if (sLargeAllocations.Init(64) != B_OK)
425 		return B_NO_MEMORY;
426 
427 	return B_OK;
428 }
429 
430 
431 #ifdef HEAP_TEST
432 void
433 dump_chunks(void)
434 {
435 	FreeChunk* chunk = sFreeChunkTree.FindMin();
436 	while (chunk != NULL) {
437 		printf("\t%p: chunk size = %ld, end = %p, next = %p\n", chunk,
438 			chunk->Size(), (uint8*)chunk + chunk->CompleteSize(),
439 			chunk->Next());
440 		chunk = chunk->Next();
441 	}
442 }
443 #endif
444 
445 uint32
446 heap_available(void)
447 {
448 	return (uint32)sAvailable;
449 }
450 
451 
452 void*
453 malloc(size_t size)
454 {
455 	if (sHeapBase == NULL || size == 0)
456 		return NULL;
457 
458 	// align the size requirement to a kAlignment bytes boundary
459 	if (size < sizeof(FreeChunkData))
460 		size = sizeof(FreeChunkData);
461 	size = align(size);
462 
463 	if (size >= kLargeAllocationThreshold)
464 		return malloc_large(size);
465 
466 	if (size > sAvailable) {
467 		dprintf("malloc(): Out of memory!\n");
468 		return NULL;
469 	}
470 
471 	FreeChunk* chunk = sFreeChunkTree.FindClosest(FreeChunkKey(size), true,
472 		true);
473 
474 	if (chunk == NULL) {
475 		// could not find a free chunk as large as needed
476 		dprintf("malloc(): Out of memory!\n");
477 		return NULL;
478 	}
479 
480 	sFreeChunkTree.Remove(chunk);
481 	sAvailable -= chunk->Size();
482 
483 	void* allocatedAddress = chunk->AllocatedAddress();
484 
485 	// If this chunk is bigger than the requested size and there's enough space
486 	// left over for a new chunk, we split it.
487 	if (chunk->Size() >= size + align(sizeof(FreeChunk))) {
488 		FreeChunk* freeChunk = chunk->Split(size);
489 		sFreeChunkTree.Insert(freeChunk);
490 		sAvailable += freeChunk->Size();
491 	}
492 
493 #ifdef DEBUG_MAX_HEAP_USAGE
494 	sMaxHeapUsage = std::max(sMaxHeapUsage, sMaxHeapSize - sAvailable);
495 #endif
496 
497 	TRACE("malloc(%lu) -> %p\n", size, allocatedAddress);
498 	return allocatedAddress;
499 }
500 
501 
502 void*
503 realloc(void* oldBuffer, size_t newSize)
504 {
505 	if (newSize == 0) {
506 		TRACE("realloc(%p, %lu) -> NULL\n", oldBuffer, newSize);
507 		free(oldBuffer);
508 		return NULL;
509 	}
510 
511 	size_t oldSize = 0;
512 	if (oldBuffer != NULL) {
513 		if (oldBuffer >= sHeapBase && oldBuffer < sHeapEnd) {
514 			FreeChunk* oldChunk = FreeChunk::SetToAllocated(oldBuffer);
515 			oldSize = oldChunk->Size();
516 		} else {
517 			LargeAllocation* allocation = sLargeAllocations.Lookup(oldBuffer);
518 			if (allocation == NULL) {
519 				panic("realloc(%p, %zu): unknown allocation!\n", oldBuffer,
520 					newSize);
521 			}
522 
523 			oldSize = allocation->Size();
524 		}
525 
526 		// Check if the old buffer still fits, and if it makes sense to keep it.
527 		if (oldSize >= newSize
528 			&& (oldSize < 128 || newSize > oldSize / 3)) {
529 			TRACE("realloc(%p, %lu) old buffer is large enough\n",
530 				oldBuffer, newSize);
531 			return oldBuffer;
532 		}
533 	}
534 
535 	void* newBuffer = malloc(newSize);
536 	if (newBuffer == NULL)
537 		return NULL;
538 
539 	if (oldBuffer != NULL) {
540 		memcpy(newBuffer, oldBuffer, std::min(oldSize, newSize));
541 		free(oldBuffer);
542 	}
543 
544 	TRACE("realloc(%p, %lu) -> %p\n", oldBuffer, newSize, newBuffer);
545 	return newBuffer;
546 }
547 
548 
549 void
550 free(void* allocated)
551 {
552 	if (allocated == NULL)
553 		return;
554 
555 	TRACE("free(%p)\n", allocated);
556 
557 	if (allocated < sHeapBase || allocated >= sHeapEnd) {
558 		free_large(allocated);
559 		return;
560 	}
561 
562 	FreeChunk* freedChunk = FreeChunk::SetToAllocated(allocated);
563 
564 #ifdef DEBUG_ALLOCATIONS
565 	if (freedChunk->Size() > sMaxHeapSize - sAvailable) {
566 		panic("freed chunk %p clobbered (%#zx)!\n", freedChunk,
567 			freedChunk->Size());
568 	}
569 	{
570 		FreeChunk* chunk = sFreeChunkTree.FindMin();
571 		while (chunk) {
572 			if (chunk->Size() > sAvailable || freedChunk == chunk)
573 				panic("invalid chunk in free list (%p (%zu)), or double free\n",
574 					chunk, chunk->Size());
575 			chunk = chunk->Next();
576 		}
577 	}
578 #endif
579 
580 	// try to join the new free chunk with an existing one
581 	// it may be joined with up to two chunks
582 
583 	FreeChunk* chunk = sFreeChunkTree.FindMin();
584 	int32 joinCount = 0;
585 
586 	while (chunk) {
587 		FreeChunk* nextChunk = chunk->Next();
588 
589 		if (chunk->IsTouching(freedChunk)) {
590 			sFreeChunkTree.Remove(chunk);
591 			sAvailable -= chunk->Size();
592 
593 			freedChunk = chunk->Join(freedChunk);
594 
595 			if (++joinCount == 2)
596 				break;
597 		}
598 
599 		chunk = nextChunk;
600 	}
601 
602 	sFreeChunkTree.Insert(freedChunk);
603 	sAvailable += freedChunk->Size();
604 #ifdef DEBUG_MAX_HEAP_USAGE
605 	sMaxHeapUsage = std::max(sMaxHeapUsage, sMaxHeapSize - sAvailable);
606 #endif
607 }
608 
609