xref: /haiku/src/kits/media/RealtimeAlloc.cpp (revision b55a57da7173b9af0432bd3e148d03f06161d036)
1 /*
2  * Copyright 2009, Axel Dörfler, axeld@pinc-software.de.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 /*!	A simple allocator that works directly on an area, based on the boot
8 	loader's heap. See there for more information about its inner workings.
9 */
10 
11 
12 #include <RealtimeAlloc.h>
13 
14 #include <pthread.h>
15 #include <stdlib.h>
16 #include <stdio.h>
17 #include <string.h>
18 
19 #include <OS.h>
20 
21 #include <locks.h>
22 #include <kernel/util/DoublyLinkedList.h>
23 
24 
25 //#define TRACE_RTM
26 #ifdef TRACE_RTM
27 #	define TRACE(x...) printf(x);
28 #else
29 #	define TRACE(x...) ;
30 #endif
31 
32 
33 class FreeChunk {
34 public:
35 			void				SetTo(size_t size, FreeChunk* next);
36 
37 			uint32				Size() const;
38 			uint32				CompleteSize() const { return fSize; }
39 
40 			FreeChunk*			Next() const { return fNext; }
41 			void				SetNext(FreeChunk* next) { fNext = next; }
42 
43 			FreeChunk*			Split(uint32 splitSize);
44 			bool				IsTouching(FreeChunk* link);
45 			FreeChunk*			Join(FreeChunk* link);
46 			void				Remove(rtm_pool* pool,
47 									FreeChunk* previous = NULL);
48 			void				Enqueue(rtm_pool* pool);
49 
50 			void*				AllocatedAddress() const;
51 	static	FreeChunk*			SetToAllocated(void* allocated);
52 	static	addr_t				NextOffset() { return sizeof(uint32); }
53 
54 private:
55 			uint32				fSize;
56 			FreeChunk*			fNext;
57 };
58 
59 
60 struct rtm_pool : DoublyLinkedListLinkImpl<rtm_pool> {
61 	area_id		area;
62 	void*		heap_base;
63 	size_t		max_size;
64 	size_t		available;
65 	FreeChunk	free_anchor;
66 	mutex		lock;
67 
68 	bool Contains(void* buffer) const;
69 	void Free(void* buffer);
70 };
71 
72 typedef DoublyLinkedList<rtm_pool> PoolList;
73 
74 
75 const static uint32 kAlignment = 256;
76 	// all memory chunks will be a multiple of this
77 
78 static mutex sPoolsLock = {-1, -1};
79 static PoolList sPools;
80 
81 
82 void
83 FreeChunk::SetTo(size_t size, FreeChunk* next)
84 {
85 	fSize = size;
86 	fNext = next;
87 }
88 
89 
90 /*!	Returns the amount of bytes that can be allocated
91 	in this chunk.
92 */
93 uint32
94 FreeChunk::Size() const
95 {
96 	return fSize - FreeChunk::NextOffset();
97 }
98 
99 
100 /*!	Splits the upper half at the requested location
101 	and returns it.
102 */
103 FreeChunk*
104 FreeChunk::Split(uint32 splitSize)
105 {
106 	splitSize = (splitSize - 1 + kAlignment) & ~(kAlignment - 1);
107 
108 	FreeChunk* chunk
109 		= (FreeChunk*)((uint8*)this + FreeChunk::NextOffset() + splitSize);
110 	chunk->fSize = fSize - splitSize - FreeChunk::NextOffset();
111 	chunk->fNext = fNext;
112 
113 	fSize = splitSize + FreeChunk::NextOffset();
114 
115 	return chunk;
116 }
117 
118 
119 /*!	Checks if the specified chunk touches this chunk, so
120 	that they could be joined.
121 */
122 bool
123 FreeChunk::IsTouching(FreeChunk* chunk)
124 {
125 	return chunk
126 		&& (((uint8*)this + fSize == (uint8*)chunk)
127 			|| (uint8*)chunk + chunk->fSize == (uint8*)this);
128 }
129 
130 
131 /*!	Joins the chunk to this chunk and returns the pointer
132 	to the new chunk - which will either be one of the
133 	two chunks.
134 	Note, the chunks must be joinable, or else this method
135 	doesn't work correctly. Use FreeChunk::IsTouching()
136 	to check if this method can be applied.
137 */
138 FreeChunk*
139 FreeChunk::Join(FreeChunk* chunk)
140 {
141 	if (chunk < this) {
142 		chunk->fSize += fSize;
143 		chunk->fNext = fNext;
144 
145 		return chunk;
146 	}
147 
148 	fSize += chunk->fSize;
149 	fNext = chunk->fNext;
150 
151 	return this;
152 }
153 
154 
155 void
156 FreeChunk::Remove(rtm_pool* pool, FreeChunk* previous)
157 {
158 	if (previous == NULL) {
159 		// find the previous chunk in the list
160 		FreeChunk* chunk = pool->free_anchor.fNext;
161 
162 		while (chunk != NULL && chunk != this) {
163 			previous = chunk;
164 			chunk = chunk->fNext;
165 		}
166 
167 		if (chunk == NULL)
168 			return;
169 	}
170 
171 	previous->fNext = fNext;
172 	fNext = NULL;
173 }
174 
175 
176 void
177 FreeChunk::Enqueue(rtm_pool* pool)
178 {
179 	FreeChunk* chunk = pool->free_anchor.fNext;
180 	FreeChunk* last = &pool->free_anchor;
181 	while (chunk && chunk->Size() < fSize) {
182 		last = chunk;
183 		chunk = chunk->fNext;
184 	}
185 
186 	fNext = chunk;
187 	last->fNext = this;
188 }
189 
190 
191 void*
192 FreeChunk::AllocatedAddress() const
193 {
194 	return (void*)&fNext;
195 }
196 
197 
198 FreeChunk*
199 FreeChunk::SetToAllocated(void* allocated)
200 {
201 	return (FreeChunk*)((uint8*)allocated - FreeChunk::NextOffset());
202 }
203 
204 
205 // #pragma mark - rtm_pool
206 
207 
208 bool
209 rtm_pool::Contains(void* buffer) const
210 {
211 	return (addr_t)heap_base <= (addr_t)buffer
212 		&& (addr_t)heap_base - 1 + max_size >= (addr_t)buffer;
213 }
214 
215 
216 void
217 rtm_pool::Free(void* allocated)
218 {
219 	FreeChunk* freedChunk = FreeChunk::SetToAllocated(allocated);
220 	available += freedChunk->CompleteSize();
221 
222 	// try to join the new free chunk with an existing one
223 	// it may be joined with up to two chunks
224 
225 	FreeChunk* chunk = free_anchor.Next();
226 	FreeChunk* last = &free_anchor;
227 	int32 joinCount = 0;
228 
229 	while (chunk) {
230 		if (chunk->IsTouching(freedChunk)) {
231 			// almost "insert" it into the list before joining
232 			// because the next pointer is inherited by the chunk
233 			freedChunk->SetNext(chunk->Next());
234 			freedChunk = chunk->Join(freedChunk);
235 
236 			// remove the joined chunk from the list
237 			last->SetNext(freedChunk->Next());
238 			chunk = last;
239 
240 			if (++joinCount == 2)
241 				break;
242 		}
243 
244 		last = chunk;
245 		chunk = chunk->Next();
246 	}
247 
248 	// enqueue the link at the right position; the
249 	// free link queue is ordered by size
250 
251 	freedChunk->Enqueue(this);
252 }
253 
254 
255 // #pragma mark -
256 
257 
258 static rtm_pool*
259 pool_for(void* buffer)
260 {
261 	MutexLocker _(&sPoolsLock);
262 
263 	PoolList::Iterator iterator = sPools.GetIterator();
264 	while (rtm_pool* pool = iterator.Next()) {
265 		if (pool->Contains(buffer))
266 			return pool;
267 	}
268 
269 	return NULL;
270 }
271 
272 
273 static void
274 pool_init(void)
275 {
276 	mutex_init(&sPoolsLock, "rtm pools");
277 }
278 
279 
280 // #pragma mark - public API
281 
282 
283 status_t
284 rtm_create_pool(rtm_pool** _pool, size_t totalSize, const char* name)
285 {
286 	rtm_pool* pool = (rtm_pool*)malloc(sizeof(rtm_pool));
287 	if (pool == NULL)
288 		return B_NO_MEMORY;
289 
290 	if (name == NULL)
291 		name = "realtime pool";
292 
293 	status_t status = mutex_init(&pool->lock, name);
294 	if (status != B_OK) {
295 		free(pool);
296 		return status;
297 	}
298 
299 	// Allocate enough space for at least one allocation over \a totalSize
300 	pool->max_size = (totalSize + sizeof(FreeChunk) - 1 + B_PAGE_SIZE)
301 		& ~(B_PAGE_SIZE - 1);
302 
303 	area_id area = create_area(name, &pool->heap_base, B_ANY_ADDRESS,
304 		pool->max_size, B_LAZY_LOCK, B_READ_AREA | B_WRITE_AREA);
305 	if (area < 0) {
306 		mutex_destroy(&pool->lock);
307 		free(pool);
308 		return area;
309 	}
310 
311 	pool->area = area;
312 	pool->available = pool->max_size - FreeChunk::NextOffset();
313 
314 	// declare the whole heap as one chunk, and add it
315 	// to the free list
316 
317 	FreeChunk* chunk = (FreeChunk*)pool->heap_base;
318 	chunk->SetTo(pool->max_size, NULL);
319 
320 	pool->free_anchor.SetTo(0, chunk);
321 
322 	*_pool = pool;
323 
324 	static pthread_once_t sOnce = PTHREAD_ONCE_INIT;
325 	pthread_once(&sOnce, &pool_init);
326 
327 	MutexLocker _(&sPoolsLock);
328 	sPools.Add(pool);
329 	return B_OK;
330 }
331 
332 
333 status_t
334 rtm_delete_pool(rtm_pool* pool)
335 {
336 	if (pool == NULL)
337 		return B_BAD_VALUE;
338 
339 	mutex_lock(&pool->lock);
340 
341 	{
342 		MutexLocker _(&sPoolsLock);
343 		sPools.Remove(pool);
344 	}
345 
346 	delete_area(pool->area);
347 	mutex_destroy(&pool->lock);
348 	free(pool);
349 
350 	return B_OK;
351 }
352 
353 
354 void*
355 rtm_alloc(rtm_pool* pool, size_t size)
356 {
357 	if (pool == NULL)
358 		return malloc(size);
359 
360 	if (pool->heap_base == NULL || size == 0)
361 		return NULL;
362 
363 	MutexLocker _(&pool->lock);
364 
365 	// align the size requirement to a kAlignment bytes boundary
366 	size = (size - 1 + kAlignment) & ~(size_t)(kAlignment - 1);
367 
368 	if (size > pool->available) {
369 		TRACE("malloc(): Out of memory!\n");
370 		return NULL;
371 	}
372 
373 	FreeChunk* chunk = pool->free_anchor.Next();
374 	FreeChunk* last = &pool->free_anchor;
375 	while (chunk && chunk->Size() < size) {
376 		last = chunk;
377 		chunk = chunk->Next();
378 	}
379 
380 	if (chunk == NULL) {
381 		// could not find a free chunk as large as needed
382 		TRACE("malloc(): Out of memory!\n");
383 		return NULL;
384 	}
385 
386 	if (chunk->Size() > size + sizeof(FreeChunk) + kAlignment) {
387 		// if this chunk is bigger than the requested size,
388 		// we split it to form two chunks (with a minimal
389 		// size of kAlignment allocatable bytes).
390 
391 		FreeChunk* freeChunk = chunk->Split(size);
392 		last->SetNext(freeChunk);
393 
394 		// re-enqueue the free chunk at the correct position
395 		freeChunk->Remove(pool, last);
396 		freeChunk->Enqueue(pool);
397 	} else {
398 		// remove the chunk from the free list
399 
400 		last->SetNext(chunk->Next());
401 	}
402 
403 	pool->available -= size + sizeof(uint32);
404 
405 	TRACE("malloc(%lu) -> %p\n", size, chunk->AllocatedAddress());
406 	return chunk->AllocatedAddress();
407 }
408 
409 
410 status_t
411 rtm_free(void* allocated)
412 {
413 	if (allocated == NULL)
414 		return B_OK;
415 
416 	TRACE("rtm_free(%p)\n", allocated);
417 
418 	// find pool
419 	rtm_pool* pool = pool_for(allocated);
420 	if (pool == NULL) {
421 		free(allocated);
422 		return B_OK;
423 	}
424 
425 	MutexLocker _(&pool->lock);
426 	pool->Free(allocated);
427 	return B_OK;
428 }
429 
430 
431 status_t
432 rtm_realloc(void** _buffer, size_t newSize)
433 {
434 	if (_buffer == NULL)
435 		return B_BAD_VALUE;
436 
437 	TRACE("rtm_realloc(%p, %lu)\n", *_buffer, newSize);
438 
439 	void* oldBuffer = *_buffer;
440 
441 	// find pool
442 	rtm_pool* pool = pool_for(oldBuffer);
443 	if (pool == NULL) {
444 		void* buffer = realloc(oldBuffer, newSize);
445 		if (buffer != NULL) {
446 			*_buffer = buffer;
447 			return B_OK;
448 		}
449 		return B_NO_MEMORY;
450 	}
451 
452 	MutexLocker _(&pool->lock);
453 
454 	if (newSize == 0) {
455 		TRACE("realloc(%p, %lu) -> NULL\n", oldBuffer, newSize);
456 		pool->Free(oldBuffer);
457 		*_buffer = NULL;
458 		return B_OK;
459 	}
460 
461 	size_t copySize = newSize;
462 	if (oldBuffer != NULL) {
463 		FreeChunk* oldChunk = FreeChunk::SetToAllocated(oldBuffer);
464 
465 		// Check if the old buffer still fits, and if it makes sense to keep it
466 		if (oldChunk->Size() >= newSize && newSize > oldChunk->Size() / 3) {
467 			TRACE("realloc(%p, %lu) old buffer is large enough\n",
468 				oldBuffer, newSize);
469 			return B_OK;
470 		}
471 
472 		if (copySize > oldChunk->Size())
473 			copySize = oldChunk->Size();
474 	}
475 
476 	void* newBuffer = rtm_alloc(pool, newSize);
477 	if (newBuffer == NULL)
478 		return B_NO_MEMORY;
479 
480 	if (oldBuffer != NULL) {
481 		memcpy(newBuffer, oldBuffer, copySize);
482 		pool->Free(oldBuffer);
483 	}
484 
485 	TRACE("realloc(%p, %lu) -> %p\n", oldBuffer, newSize, newBuffer);
486 	*_buffer = newBuffer;
487 	return B_OK;
488 }
489 
490 
491 status_t
492 rtm_size_for(void* buffer)
493 {
494 	if (buffer == NULL)
495 		return 0;
496 
497 	FreeChunk* chunk = FreeChunk::SetToAllocated(buffer);
498 	// TODO: we currently always return the actual chunk size, not the allocated
499 	// one
500 	return chunk->Size();
501 }
502 
503 
504 status_t
505 rtm_phys_size_for(void* buffer)
506 {
507 	if (buffer == NULL)
508 		return 0;
509 
510 	FreeChunk* chunk = FreeChunk::SetToAllocated(buffer);
511 	return chunk->Size();
512 }
513 
514 
515 size_t
516 rtm_available(rtm_pool* pool)
517 {
518 	if (pool == NULL) {
519 		// whatever - might want to use system_info instead
520 		return 1024 * 1024;
521 	}
522 
523 	return pool->available;
524 }
525 
526 
527 rtm_pool*
528 rtm_default_pool()
529 {
530 	// We always return NULL - the default pool will just use malloc()/free()
531 	return NULL;
532 }
533 
534 
535 #if 0
536 extern "C" {
537 
538 // undocumented symbols that BeOS exports
539 status_t rtm_create_pool_etc(rtm_pool ** out_pool, size_t total_size, const char * name, int32 param4, int32 param5, ...);
540 void rtm_get_pool(rtm_pool *pool,void *data,int32 param3,int32 param4, ...);
541 
542 }
543 #endif
544