xref: /haiku/src/system/boot/loader/heap.cpp (revision 4c8e85b316c35a9161f5a1c50ad70bc91c83a76f)
1 /*
2  * Copyright 2003-2013, Axel Dörfler, axeld@pinc-software.de.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include <boot/heap.h>
8 
9 #ifdef HEAP_TEST
10 #	include <stdio.h>
11 #endif
12 #include <stdlib.h>
13 #include <string.h>
14 
15 #include <algorithm>
16 
17 #include <boot/platform.h>
18 #include <util/OpenHashTable.h>
19 #include <util/SplayTree.h>
20 
21 
22 //#define TRACE_HEAP
23 #ifdef TRACE_HEAP
24 #	define TRACE(format...)	dprintf(format)
25 #else
26 #	define TRACE(format...)	do { } while (false)
27 #endif
28 
29 
30 /*!	This is a very simple malloc()/free() implementation - it only
31 	manages a free list.
32 	After heap_init() is called, all free memory is contained in one
33 	big chunk, the only entry in the free link list (which is a single
34 	linked list).
35 	When memory is allocated, the smallest free chunk that contains
36 	the requested size is split (or taken as a whole if it can't be
37 	splitted anymore), and it's lower half will be removed from the
38 	free list.
39 	The free list is ordered by size, starting with the smallest
40 	free chunk available. When a chunk is freed, it will be joint
41 	with its predecessor or successor, if possible.
42 	To ease list handling, the list anchor itself is a free chunk with
43 	size 0 that can't be allocated.
44 */
45 
46 #define DEBUG_ALLOCATIONS
47 	// if defined, freed memory is filled with 0xcc
48 #define DEBUG_MAX_HEAP_USAGE
49 	// if defined, the maximum heap usage is determined and printed before
50 	// entering the kernel
51 
52 
53 const static size_t kAlignment = 8;
54 	// all memory chunks will be a multiple of this
55 const static size_t kLargeAllocationThreshold = 16 * 1024;
56 	// allocations of this size or larger are allocated via
57 	// platform_allocate_region()
58 
59 
60 class Chunk {
61 public:
62 	size_t CompleteSize() const
63 	{
64 		return fSize;
65 	}
66 
67 protected:
68 	union {
69 		size_t	fSize;
70 		char	fAlignment[kAlignment];
71 	};
72 };
73 
74 
75 class FreeChunk;
76 
77 
78 struct FreeChunkData : SplayTreeLink<FreeChunk> {
79 
80 	FreeChunk* Next() const
81 	{
82 		return fNext;
83 	}
84 
85 	FreeChunk** NextLink()
86 	{
87 		return &fNext;
88 	}
89 
90 protected:
91 	FreeChunk*	fNext;
92 };
93 
94 
95 class FreeChunk : public Chunk, public FreeChunkData {
96 public:
97 			void				SetTo(size_t size);
98 
99 			size_t				Size() const;
100 
101 			FreeChunk*			Split(size_t splitSize);
102 			bool				IsTouching(FreeChunk* link);
103 			FreeChunk*			Join(FreeChunk* link);
104 
105 			void*				AllocatedAddress() const;
106 	static	FreeChunk*			SetToAllocated(void* allocated);
107 };
108 
109 
110 struct FreeChunkKey {
111 	FreeChunkKey(size_t size)
112 		:
113 		fSize(size),
114 		fChunk(NULL)
115 	{
116 	}
117 
118 	FreeChunkKey(const FreeChunk* chunk)
119 		:
120 		fSize(chunk->Size()),
121 		fChunk(chunk)
122 	{
123 	}
124 
125 	int Compare(const FreeChunk* chunk) const
126 	{
127 		size_t chunkSize = chunk->Size();
128 		if (chunkSize != fSize)
129 			return fSize < chunkSize ? -1 : 1;
130 
131 		if (fChunk == chunk)
132 			return 0;
133 		return fChunk < chunk ? -1 : 1;
134 	}
135 
136 private:
137 	size_t				fSize;
138 	const FreeChunk*	fChunk;
139 };
140 
141 
142 struct FreeChunkTreeDefinition {
143 	typedef FreeChunkKey	KeyType;
144 	typedef FreeChunk		NodeType;
145 
146 	static FreeChunkKey GetKey(const FreeChunk* node)
147 	{
148 		return FreeChunkKey(node);
149 	}
150 
151 	static SplayTreeLink<FreeChunk>* GetLink(FreeChunk* node)
152 	{
153 		return node;
154 	}
155 
156 	static int Compare(const FreeChunkKey& key, const FreeChunk* node)
157 	{
158 		return key.Compare(node);
159 	}
160 
161 	static FreeChunk** GetListLink(FreeChunk* node)
162 	{
163 		return node->NextLink();
164 	}
165 };
166 
167 
168 typedef IteratableSplayTree<FreeChunkTreeDefinition> FreeChunkTree;
169 
170 
171 struct LargeAllocation {
172 	LargeAllocation()
173 	{
174 	}
175 
176 	void SetTo(void* address, size_t size)
177 	{
178 		fAddress = address;
179 		fSize = size;
180 	}
181 
182 	status_t Allocate(size_t size)
183 	{
184 		fSize = size;
185 		return platform_allocate_region(&fAddress, fSize,
186 			B_READ_AREA | B_WRITE_AREA, false);
187 	}
188 
189 	void Free()
190 	{
191 		platform_free_region(fAddress, fSize);
192 	}
193 
194 	void* Address() const
195 	{
196 		return fAddress;
197 	}
198 
199 	size_t Size() const
200 	{
201 		return fSize;
202 	}
203 
204 	LargeAllocation*& HashNext()
205 	{
206 		return fHashNext;
207 	}
208 
209 private:
210 	void*				fAddress;
211 	size_t				fSize;
212 	LargeAllocation*	fHashNext;
213 };
214 
215 
216 struct LargeAllocationHashDefinition {
217 	typedef void*			KeyType;
218 	typedef	LargeAllocation	ValueType;
219 
220 	size_t HashKey(void* key) const
221 	{
222 		return size_t(key) / kAlignment;
223 	}
224 
225 	size_t Hash(LargeAllocation* value) const
226 	{
227 		return HashKey(value->Address());
228 	}
229 
230 	bool Compare(void* key, LargeAllocation* value) const
231 	{
232 		return value->Address() == key;
233 	}
234 
235 	LargeAllocation*& GetLink(LargeAllocation* value) const
236 	{
237 		return value->HashNext();
238 	}
239 };
240 
241 
242 typedef BOpenHashTable<LargeAllocationHashDefinition> LargeAllocationHashTable;
243 
244 
245 static void* sHeapBase;
246 static void* sHeapEnd;
247 static size_t sMaxHeapSize, sAvailable, sMaxHeapUsage;
248 static FreeChunkTree sFreeChunkTree;
249 
250 static LargeAllocationHashTable sLargeAllocations;
251 
252 
253 static inline size_t
254 align(size_t size)
255 {
256 	return (size + kAlignment - 1) & ~(kAlignment - 1);
257 }
258 
259 
260 static void*
261 malloc_large(size_t size)
262 {
263 	LargeAllocation* allocation = new(std::nothrow) LargeAllocation;
264 	if (allocation == NULL) {
265 		dprintf("malloc_large(): Out of memory!\n");
266 		return NULL;
267 	}
268 
269 	if (allocation->Allocate(size) != B_OK) {
270 		dprintf("malloc_large(): Out of memory!\n");
271 		delete allocation;
272 		return NULL;
273 	}
274 
275 	sLargeAllocations.InsertUnchecked(allocation);
276 	TRACE("malloc_large(%lu) -> %p\n", size, allocation->Address());
277 	return allocation->Address();
278 }
279 
280 
281 static void
282 free_large(void* address)
283 {
284 	LargeAllocation* allocation = sLargeAllocations.Lookup(address);
285 	if (allocation == NULL)
286 		panic("free_large(%p): unknown allocation!\n", address);
287 
288 	sLargeAllocations.RemoveUnchecked(allocation);
289 	allocation->Free();
290 	delete allocation;
291 }
292 
293 
294 void
295 FreeChunk::SetTo(size_t size)
296 {
297 	fSize = size;
298 	fNext = NULL;
299 }
300 
301 
302 /*!	Returns the amount of bytes that can be allocated
303 	in this chunk.
304 */
305 size_t
306 FreeChunk::Size() const
307 {
308 	return (addr_t)this + fSize - (addr_t)AllocatedAddress();
309 }
310 
311 
312 /*!	Splits the upper half at the requested location and returns it. This chunk
313 	will no longer be a valid FreeChunk object; only its fSize will be valid.
314  */
315 FreeChunk*
316 FreeChunk::Split(size_t splitSize)
317 {
318 	splitSize = align(splitSize);
319 
320 	FreeChunk* chunk = (FreeChunk*)((addr_t)AllocatedAddress() + splitSize);
321 	size_t newSize = (addr_t)chunk - (addr_t)this;
322 	chunk->fSize = fSize - newSize;
323 	chunk->fNext = NULL;
324 
325 	fSize = newSize;
326 
327 	return chunk;
328 }
329 
330 
331 /*!	Checks if the specified chunk touches this chunk, so
332 	that they could be joined.
333 */
334 bool
335 FreeChunk::IsTouching(FreeChunk* chunk)
336 {
337 	return chunk
338 		&& (((uint8*)this + fSize == (uint8*)chunk)
339 			|| (uint8*)chunk + chunk->fSize == (uint8*)this);
340 }
341 
342 
343 /*!	Joins the chunk to this chunk and returns the pointer
344 	to the new chunk - which will either be one of the
345 	two chunks.
346 	Note, the chunks must be joinable, or else this method
347 	doesn't work correctly. Use FreeChunk::IsTouching()
348 	to check if this method can be applied.
349 */
350 FreeChunk*
351 FreeChunk::Join(FreeChunk* chunk)
352 {
353 	if (chunk < this) {
354 		chunk->fSize += fSize;
355 		chunk->fNext = fNext;
356 
357 		return chunk;
358 	}
359 
360 	fSize += chunk->fSize;
361 	fNext = chunk->fNext;
362 
363 	return this;
364 }
365 
366 
367 void*
368 FreeChunk::AllocatedAddress() const
369 {
370 	return (void*)static_cast<const FreeChunkData*>(this);
371 }
372 
373 
374 FreeChunk*
375 FreeChunk::SetToAllocated(void* allocated)
376 {
377 	return static_cast<FreeChunk*>((FreeChunkData*)allocated);
378 }
379 
380 
381 //	#pragma mark -
382 
383 
384 void
385 heap_release(stage2_args* args)
386 {
387 	platform_release_heap(args, sHeapBase);
388 }
389 
390 
391 void
392 heap_print_statistics()
393 {
394 #ifdef DEBUG_MAX_HEAP_USAGE
395 	dprintf("maximum boot loader heap usage: %zu, currently used: %zu\n",
396 		sMaxHeapUsage, sMaxHeapSize - sAvailable);
397 #endif
398 }
399 
400 
401 status_t
402 heap_init(stage2_args* args)
403 {
404 	void* base;
405 	void* top;
406 	if (platform_init_heap(args, &base, &top) < B_OK)
407 		return B_ERROR;
408 
409 	sHeapBase = base;
410 	sHeapEnd = top;
411 	sMaxHeapSize = (uint8*)top - (uint8*)base;
412 
413 	// declare the whole heap as one chunk, and add it
414 	// to the free list
415 	FreeChunk* chunk = (FreeChunk*)base;
416 	chunk->SetTo(sMaxHeapSize);
417 	sFreeChunkTree.Insert(chunk);
418 
419 	sAvailable = chunk->Size();
420 #ifdef DEBUG_MAX_HEAP_USAGE
421 	sMaxHeapUsage = sMaxHeapSize - sAvailable;
422 #endif
423 
424 	if (sLargeAllocations.Init(64) != B_OK)
425 		return B_NO_MEMORY;
426 
427 	return B_OK;
428 }
429 
430 
431 #ifdef HEAP_TEST
432 void
433 dump_chunks(void)
434 {
435 	FreeChunk* chunk = sFreeChunkTree.FindMin();
436 	while (chunk != NULL) {
437 		printf("\t%p: chunk size = %ld, end = %p, next = %p\n", chunk,
438 			chunk->Size(), (uint8*)chunk + chunk->CompleteSize(),
439 			chunk->Next());
440 		chunk = chunk->Next();
441 	}
442 }
443 #endif
444 
445 uint32
446 heap_available(void)
447 {
448 	return (uint32)sAvailable;
449 }
450 
451 
452 void*
453 malloc(size_t size)
454 {
455 	if (sHeapBase == NULL || size == 0)
456 		return NULL;
457 
458 	// align the size requirement to a kAlignment bytes boundary
459 	if (size < sizeof(FreeChunkData))
460 		size = sizeof(FreeChunkData);
461 	size = align(size);
462 
463 	if (size >= kLargeAllocationThreshold)
464 		return malloc_large(size);
465 
466 	if (size > sAvailable) {
467 		dprintf("malloc(): Out of memory allocating a block of %ld bytes, "
468 			"only %ld left!\n", size, sAvailable);
469 		return NULL;
470 	}
471 
472 	FreeChunk* chunk = sFreeChunkTree.FindClosest(FreeChunkKey(size), true,
473 		true);
474 
475 	if (chunk == NULL) {
476 		// could not find a free chunk as large as needed
477 		dprintf("malloc(): Out of memory allocating a block of %ld bytes, "
478 			"no free chunks!\n", size);
479 		return NULL;
480 	}
481 
482 	sFreeChunkTree.Remove(chunk);
483 	sAvailable -= chunk->Size();
484 
485 	void* allocatedAddress = chunk->AllocatedAddress();
486 
487 	// If this chunk is bigger than the requested size and there's enough space
488 	// left over for a new chunk, we split it.
489 	if (chunk->Size() >= size + align(sizeof(FreeChunk))) {
490 		FreeChunk* freeChunk = chunk->Split(size);
491 		sFreeChunkTree.Insert(freeChunk);
492 		sAvailable += freeChunk->Size();
493 	}
494 
495 #ifdef DEBUG_MAX_HEAP_USAGE
496 	sMaxHeapUsage = std::max(sMaxHeapUsage, sMaxHeapSize - sAvailable);
497 #endif
498 
499 	TRACE("malloc(%lu) -> %p\n", size, allocatedAddress);
500 	return allocatedAddress;
501 }
502 
503 
504 void*
505 realloc(void* oldBuffer, size_t newSize)
506 {
507 	if (newSize == 0) {
508 		TRACE("realloc(%p, %lu) -> NULL\n", oldBuffer, newSize);
509 		free(oldBuffer);
510 		return NULL;
511 	}
512 
513 	size_t oldSize = 0;
514 	if (oldBuffer != NULL) {
515 		if (oldBuffer >= sHeapBase && oldBuffer < sHeapEnd) {
516 			FreeChunk* oldChunk = FreeChunk::SetToAllocated(oldBuffer);
517 			oldSize = oldChunk->Size();
518 		} else {
519 			LargeAllocation* allocation = sLargeAllocations.Lookup(oldBuffer);
520 			if (allocation == NULL) {
521 				panic("realloc(%p, %zu): unknown allocation!\n", oldBuffer,
522 					newSize);
523 			}
524 
525 			oldSize = allocation->Size();
526 		}
527 
528 		// Check if the old buffer still fits, and if it makes sense to keep it.
529 		if (oldSize >= newSize
530 			&& (oldSize < 128 || newSize > oldSize / 3)) {
531 			TRACE("realloc(%p, %lu) old buffer is large enough\n",
532 				oldBuffer, newSize);
533 			return oldBuffer;
534 		}
535 	}
536 
537 	void* newBuffer = malloc(newSize);
538 	if (newBuffer == NULL)
539 		return NULL;
540 
541 	if (oldBuffer != NULL) {
542 		memcpy(newBuffer, oldBuffer, std::min(oldSize, newSize));
543 		free(oldBuffer);
544 	}
545 
546 	TRACE("realloc(%p, %lu) -> %p\n", oldBuffer, newSize, newBuffer);
547 	return newBuffer;
548 }
549 
550 
551 void*
552 calloc(size_t numElements, size_t size)
553 {
554 	void* address = malloc(numElements * size);
555 	if (address != NULL)
556 		memset(address, 0, numElements * size);
557 
558 	return address;
559 }
560 
561 
562 void
563 free(void* allocated)
564 {
565 	if (allocated == NULL)
566 		return;
567 
568 	TRACE("free(%p)\n", allocated);
569 
570 	if (allocated < sHeapBase || allocated >= sHeapEnd) {
571 		free_large(allocated);
572 		return;
573 	}
574 
575 	FreeChunk* freedChunk = FreeChunk::SetToAllocated(allocated);
576 
577 #ifdef DEBUG_ALLOCATIONS
578 	if (freedChunk->Size() > sMaxHeapSize - sAvailable) {
579 		panic("freed chunk %p clobbered (%#zx)!\n", freedChunk,
580 			freedChunk->Size());
581 	}
582 	{
583 		FreeChunk* chunk = sFreeChunkTree.FindMin();
584 		while (chunk) {
585 			if (chunk->Size() > sAvailable || freedChunk == chunk)
586 				panic("invalid chunk in free list (%p (%zu)), or double free\n",
587 					chunk, chunk->Size());
588 			chunk = chunk->Next();
589 		}
590 	}
591 #endif
592 
593 	// try to join the new free chunk with an existing one
594 	// it may be joined with up to two chunks
595 
596 	FreeChunk* chunk = sFreeChunkTree.FindMin();
597 	int32 joinCount = 0;
598 
599 	while (chunk) {
600 		FreeChunk* nextChunk = chunk->Next();
601 
602 		if (chunk->IsTouching(freedChunk)) {
603 			sFreeChunkTree.Remove(chunk);
604 			sAvailable -= chunk->Size();
605 
606 			freedChunk = chunk->Join(freedChunk);
607 
608 			if (++joinCount == 2)
609 				break;
610 		}
611 
612 		chunk = nextChunk;
613 	}
614 
615 	sFreeChunkTree.Insert(freedChunk);
616 	sAvailable += freedChunk->Size();
617 #ifdef DEBUG_MAX_HEAP_USAGE
618 	sMaxHeapUsage = std::max(sMaxHeapUsage, sMaxHeapSize - sAvailable);
619 #endif
620 }
621 
622