xref: /haiku/src/system/libroot/posix/malloc/hoard2/arch-specific.cpp (revision 9a6a20d4689307142a7ed26a1437ba47e244e73f)
1 ///-*-C++-*-//////////////////////////////////////////////////////////////////
2 //
3 // Hoard: A Fast, Scalable, and Memory-Efficient Allocator
4 //        for Shared-Memory Multiprocessors
5 // Contact author: Emery Berger, http://www.cs.utexas.edu/users/emery
6 //
7 // Copyright (c) 1998-2000, The University of Texas at Austin.
8 //
9 // This library is free software; you can redistribute it and/or modify
10 // it under the terms of the GNU Library General Public License as
11 // published by the Free Software Foundation, http://www.fsf.org.
12 //
13 // This library is distributed in the hope that it will be useful, but
14 // WITHOUT ANY WARRANTY; without even the implied warranty of
15 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16 // Library General Public License for more details.
17 //
18 //////////////////////////////////////////////////////////////////////////////
19 
20 #include "arch-specific.h"
21 #include "heap.h"
22 
23 #include <OS.h>
24 #include <Debug.h>
25 #include <syscalls.h>
26 
27 #include <libroot_private.h>
28 
29 #include <stdlib.h>
30 #include <unistd.h>
31 
32 //#define TRACE_CHUNKS
33 #ifdef TRACE_CHUNKS
34 #	define CTRACE(x) debug_printf x
35 #else
36 #	define CTRACE(x) ;
37 #endif
38 
39 using namespace BPrivate;
40 
41 struct free_chunk {
42 	free_chunk	*next;
43 	size_t		size;
44 };
45 
46 
47 static const size_t kInitialHeapSize = 64 * B_PAGE_SIZE;
48 	// that's about what hoard allocates anyway (should be kHeapIncrement
49 	// aligned)
50 
51 static const size_t kHeapIncrement = 16 * B_PAGE_SIZE;
52 	// the steps in which to increase the heap size (must be a power of 2)
53 
54 #if B_HAIKU_64_BIT
55 static const addr_t kHeapReservationBase = 0x100100000000;
56 static const addr_t kHeapReservationSize = 0x1000000000;
57 #else
58 static const addr_t kHeapReservationBase = 0x18000000;
59 static const addr_t kHeapReservationSize = 0x48000000;
60 #endif
61 
62 static area_id sHeapArea;
63 static hoardLockType sHeapLock;
64 static void *sHeapBase;
65 static addr_t sFreeHeapBase;
66 static size_t sFreeHeapSize, sHeapAreaSize;
67 static free_chunk *sFreeChunks;
68 
69 
70 void
71 __init_after_fork(void)
72 {
73 	// find the heap area
74 	sHeapArea = area_for((void*)sFreeHeapBase);
75 	if (sHeapArea < 0) {
76 		// Where is it gone?
77 		debug_printf("hoard: init_after_fork(): thread %" B_PRId32 ", Heap "
78 			"area not found! Base address: %p\n", find_thread(NULL),
79 			sHeapBase);
80 		exit(1);
81 	}
82 }
83 
84 
85 extern "C" status_t
86 __init_heap(void)
87 {
88 	hoardHeap::initNumProcs();
89 
90 	// This will locate the heap base at 384 MB and reserve the next 1152 MB
91 	// for it. They may get reclaimed by other areas, though, but the maximum
92 	// size of the heap is guaranteed until the space is really needed.
93 	sHeapBase = (void *)kHeapReservationBase;
94 	status_t status = _kern_reserve_address_range((addr_t *)&sHeapBase,
95 		B_RANDOMIZED_BASE_ADDRESS, kHeapReservationSize);
96 	if (status != B_OK)
97 		sHeapBase = NULL;
98 
99 	uint32 protection = B_READ_AREA | B_WRITE_AREA;
100 	if (__gABIVersion < B_HAIKU_ABI_GCC_2_HAIKU)
101 		protection |= B_EXECUTE_AREA;
102 	sHeapArea = create_area("heap", (void **)&sHeapBase,
103 		status == B_OK ? B_EXACT_ADDRESS : B_RANDOMIZED_BASE_ADDRESS,
104 		kInitialHeapSize, B_NO_LOCK, protection);
105 	if (sHeapArea < B_OK)
106 		return sHeapArea;
107 
108 	sFreeHeapBase = (addr_t)sHeapBase;
109 	sHeapAreaSize = kInitialHeapSize;
110 
111 	hoardLockInit(sHeapLock, "heap");
112 
113 	return B_OK;
114 }
115 
116 
117 extern "C" void
118 __heap_terminate_after()
119 {
120 	// nothing to do
121 }
122 
123 
124 static void
125 insert_chunk(free_chunk *newChunk)
126 {
127 	free_chunk *chunk = (free_chunk *)sFreeChunks, *smaller = NULL;
128 	for (; chunk != NULL; chunk = chunk->next) {
129 		if (chunk->size < newChunk->size)
130 			smaller = chunk;
131 		else
132 			break;
133 	}
134 
135 	if (smaller) {
136 		newChunk->next = smaller->next;
137 		smaller->next = newChunk;
138 	} else {
139 		newChunk->next = sFreeChunks;
140 		sFreeChunks = newChunk;
141 	}
142 }
143 
144 
145 namespace BPrivate {
146 
147 void *
148 hoardSbrk(long size)
149 {
150 	assert(size > 0);
151 	CTRACE(("sbrk: size = %ld\n", size));
152 
153 	// align size request
154 	size = (size + hoardHeap::ALIGNMENT - 1) & ~(hoardHeap::ALIGNMENT - 1);
155 
156 	// choose correct protection flags
157 	uint32 protection = B_READ_AREA | B_WRITE_AREA;
158 	if (__gABIVersion < B_HAIKU_ABI_GCC_2_HAIKU)
159 		protection |= B_EXECUTE_AREA;
160 
161 	hoardLock(sHeapLock);
162 
163 	// find chunk in free list
164 	free_chunk *chunk = sFreeChunks, *last = NULL;
165 	for (; chunk != NULL; chunk = chunk->next) {
166 		CTRACE(("  chunk %p (%ld)\n", chunk, chunk->size));
167 
168 		if (chunk->size < (size_t)size) {
169 			last = chunk;
170 			continue;
171 		}
172 
173 		// this chunk is large enough to satisfy the request
174 
175 		SERIAL_PRINT(("HEAP-%" B_PRId32 ": "
176 			"found free chunk to hold %ld bytes\n", find_thread(NULL), size));
177 
178 		void *address = (void *)chunk;
179 
180 		if (chunk->size > (size_t)size + sizeof(free_chunk)) {
181 			// divide this chunk into smaller bits
182 			size_t newSize = chunk->size - size;
183 			free_chunk *next = chunk->next;
184 
185 			chunk = (free_chunk *)((addr_t)chunk + size);
186 			chunk->next = next;
187 			chunk->size = newSize;
188 
189 			if (last != NULL) {
190 				last->next = next;
191 				insert_chunk(chunk);
192 			} else
193 				sFreeChunks = chunk;
194 		} else {
195 			chunk = chunk->next;
196 
197 			if (last != NULL)
198 				last->next = chunk;
199 			else
200 				sFreeChunks = chunk;
201 		}
202 
203 		hoardUnlock(sHeapLock);
204 		return address;
205 	}
206 
207 	// There was no chunk, let's see if the area is large enough
208 
209 	size_t oldHeapSize = sFreeHeapSize;
210 	sFreeHeapSize += size;
211 
212 	// round to next heap increment aligned size
213 	size_t incrementAlignedSize = (sFreeHeapSize + kHeapIncrement - 1)
214 		& ~(kHeapIncrement - 1);
215 
216 	if (incrementAlignedSize <= sHeapAreaSize) {
217 		SERIAL_PRINT(("HEAP-%" B_PRId32 ": heap area large enough for %ld\n",
218 			find_thread(NULL), size));
219 		// the area is large enough already
220 		hoardUnlock(sHeapLock);
221 		return (void *)(sFreeHeapBase + oldHeapSize);
222 	}
223 
224 	// We need to grow the area
225 
226 	SERIAL_PRINT(("HEAP-%" B_PRId32 ": need to resize heap area to %ld "
227 		"(%ld requested)\n", find_thread(NULL), incrementAlignedSize, size));
228 
229 	status_t status = resize_area(sHeapArea, incrementAlignedSize);
230 	if (status != B_OK) {
231 		// Either the system is out of memory or another area is in the way and
232 		// prevents ours from being resized. As a special case of the latter
233 		// the user might have mmap()ed something over malloc()ed memory. This
234 		// splits the heap area in two, the first one retaining the original
235 		// area ID. In either case, if there's still memory, it is a good idea
236 		// to try and allocate a new area.
237 		sFreeHeapSize = oldHeapSize;
238 
239 		if (status == B_NO_MEMORY) {
240 			hoardUnlock(sHeapLock);
241 			return NULL;
242 		}
243 
244 		size_t newHeapSize = (size + kHeapIncrement - 1) / kHeapIncrement
245 			* kHeapIncrement;
246 
247 		// First try at the location directly after the current heap area, if
248 		// that is still in the reserved memory region.
249 		void* base = (void*)(sFreeHeapBase + sHeapAreaSize);
250 		area_id area = -1;
251 		if (sHeapBase != NULL
252 			&& base >= sHeapBase
253 			&& (addr_t)base + newHeapSize
254 				<= (addr_t)sHeapBase + kHeapReservationSize) {
255 			area = create_area("heap", &base, B_EXACT_ADDRESS, newHeapSize,
256 				B_NO_LOCK, protection);
257 
258 			if (area == B_NO_MEMORY) {
259 				hoardUnlock(sHeapLock);
260 				return NULL;
261 			}
262 		}
263 
264 		// If we don't have an area yet, try again with a free location
265 		// allocation.
266 		if (area < 0) {
267 			base = (void*)(sFreeHeapBase + sHeapAreaSize);
268 			area = create_area("heap", &base, B_RANDOMIZED_BASE_ADDRESS,
269 				newHeapSize, B_NO_LOCK, protection);
270 		}
271 
272 		if (area < 0) {
273 			hoardUnlock(sHeapLock);
274 			return NULL;
275 		}
276 
277 		// We have a new area, so make it the new heap area.
278 		sHeapArea = area;
279 		sFreeHeapBase = (addr_t)base;
280 		sHeapAreaSize = newHeapSize;
281 		sFreeHeapSize = size;
282 		oldHeapSize = 0;
283 	} else
284 		sHeapAreaSize = incrementAlignedSize;
285 
286 	hoardUnlock(sHeapLock);
287 	return (void *)(sFreeHeapBase + oldHeapSize);
288 }
289 
290 
291 void
292 hoardUnsbrk(void *ptr, long size)
293 {
294 	CTRACE(("unsbrk: %p, %ld!\n", ptr, size));
295 
296 	hoardLock(sHeapLock);
297 
298 	// TODO: hoard always allocates and frees in typical sizes, so we could
299 	//	save a lot of effort if we just had a similar mechanism
300 
301 	// We add this chunk to our free list - first, try to find an adjacent
302 	// chunk, so that we can merge them together
303 
304 	free_chunk *chunk = (free_chunk *)sFreeChunks, *last = NULL, *smaller = NULL;
305 	for (; chunk != NULL; chunk = chunk->next) {
306 		if ((addr_t)chunk + chunk->size == (addr_t)ptr
307 			|| (addr_t)ptr + size == (addr_t)chunk) {
308 			// chunks are adjacent - merge them
309 
310 			CTRACE(("  found adjacent chunks: %p, %ld\n", chunk, chunk->size));
311 			if (last)
312 				last->next = chunk->next;
313 			else
314 				sFreeChunks = chunk->next;
315 
316 			if ((addr_t)chunk < (addr_t)ptr)
317 				chunk->size += size;
318 			else {
319 				free_chunk *newChunk = (free_chunk *)ptr;
320 				newChunk->next = chunk->next;
321 				newChunk->size = size + chunk->size;
322 				chunk = newChunk;
323 			}
324 
325 			insert_chunk(chunk);
326 			hoardUnlock(sHeapLock);
327 			return;
328 		}
329 
330 		last = chunk;
331 
332 		if (chunk->size < (size_t)size)
333 			smaller = chunk;
334 	}
335 
336 	// we didn't find an adjacent chunk, so insert the new chunk into the list
337 
338 	free_chunk *newChunk = (free_chunk *)ptr;
339 	newChunk->size = size;
340 	if (smaller) {
341 		newChunk->next = smaller->next;
342 		smaller->next = newChunk;
343 	} else {
344 		newChunk->next = sFreeChunks;
345 		sFreeChunks = newChunk;
346 	}
347 
348 	hoardUnlock(sHeapLock);
349 }
350 
351 
352 void
353 hoardLockInit(hoardLockType &lock, const char *name)
354 {
355 	mutex_init_etc(&lock, name, MUTEX_FLAG_ADAPTIVE);
356 }
357 
358 
359 void
360 hoardLock(hoardLockType &lock)
361 {
362 	mutex_lock(&lock);
363 }
364 
365 
366 void
367 hoardUnlock(hoardLockType &lock)
368 {
369 	mutex_unlock(&lock);
370 }
371 
372 
373 void
374 hoardYield(void)
375 {
376 	_kern_thread_yield();
377 }
378 
379 }	// namespace BPrivate
380