xref: /haiku/src/system/kernel/slab/allocator.cpp (revision fc7456e9b1ec38c941134ed6d01c438cf289381e)
1 /*
2  * Copyright 2010, Ingo Weinhold <ingo_weinhold@gmx.de>.
3  * Copyright 2007, Hugo Santos. All Rights Reserved.
4  * Distributed under the terms of the MIT License.
5  */
6 
7 
8 #include "slab_private.h"
9 
10 #include <stdio.h>
11 #include <string.h>
12 
13 #include <algorithm>
14 
15 #include <debug.h>
16 #include <heap.h>
17 #include <kernel.h> // for ROUNDUP
18 #include <malloc.h>
19 #include <vm/vm.h>
20 #include <vm/VMAddressSpace.h>
21 
22 #include "ObjectCache.h"
23 #include "MemoryManager.h"
24 
25 
26 #if USE_SLAB_ALLOCATOR_FOR_MALLOC
27 
28 
29 //#define TEST_ALL_CACHES_DURING_BOOT
30 
31 static const size_t kBlockSizes[] = {
32 	16, 24, 32, 48, 64, 80, 96, 112,
33 	128, 160, 192, 224, 256, 320, 384, 448,
34 	512, 640, 768, 896, 1024, 1280, 1536, 1792,
35 	2048, 2560, 3072, 3584, 4096, 4608, 5120, 5632,
36 	6144, 6656, 7168, 7680, 8192,
37 };
38 
39 static const size_t kNumBlockSizes = B_COUNT_OF(kBlockSizes);
40 
41 static object_cache* sBlockCaches[kNumBlockSizes];
42 
43 static addr_t sBootStrapMemory = 0;
44 static size_t sBootStrapMemorySize = 0;
45 static size_t sUsedBootStrapMemory = 0;
46 
47 
48 RANGE_MARKER_FUNCTION_BEGIN(slab_allocator)
49 
50 
51 static int
52 size_to_index(size_t size)
53 {
54 	if (size <= 16)
55 		return 0;
56 	if (size <= 32)
57 		return 1 + (size - 16 - 1) / 8;
58 	if (size <= 128)
59 		return 3 + (size - 32 - 1) / 16;
60 	if (size <= 256)
61 		return 9 + (size - 128 - 1) / 32;
62 	if (size <= 512)
63 		return 13 + (size - 256 - 1) / 64;
64 	if (size <= 1024)
65 		return 17 + (size - 512 - 1) / 128;
66 	if (size <= 2048)
67 		return 21 + (size - 1024 - 1) / 256;
68 	if (size <= 8192)
69 		return 25 + (size - 2048 - 1) / 512;
70 
71 	return -1;
72 }
73 
74 
75 static void*
76 block_alloc(size_t size, size_t alignment, uint32 flags)
77 {
78 	if (alignment > kMinObjectAlignment) {
79 		// Make size >= alignment and a power of two. This is sufficient, since
80 		// all of our object caches with power of two sizes are aligned. We may
81 		// waste quite a bit of memory, but memalign() is very rarely used
82 		// in the kernel and always with power of two size == alignment anyway.
83 		ASSERT((alignment & (alignment - 1)) == 0);
84 		while (alignment < size)
85 			alignment <<= 1;
86 		size = alignment;
87 
88 		// If we're not using an object cache, make sure that the memory
89 		// manager knows it has to align the allocation.
90 		if (size > kBlockSizes[kNumBlockSizes - 1])
91 			flags |= CACHE_ALIGN_ON_SIZE;
92 	}
93 
94 	// allocate from the respective object cache, if any
95 	int index = size_to_index(size);
96 	if (index >= 0)
97 		return object_cache_alloc(sBlockCaches[index], flags);
98 
99 	// the allocation is too large for our object caches -- ask the memory
100 	// manager
101 	void* block;
102 	if (MemoryManager::AllocateRaw(size, flags, block) != B_OK)
103 		return NULL;
104 
105 	return block;
106 }
107 
108 
109 void*
110 block_alloc_early(size_t size)
111 {
112 	int index = size_to_index(size);
113 	if (index >= 0 && sBlockCaches[index] != NULL)
114 		return object_cache_alloc(sBlockCaches[index], CACHE_DURING_BOOT);
115 
116 	if (size > SLAB_CHUNK_SIZE_SMALL) {
117 		// This is a sufficiently large allocation -- just ask the memory
118 		// manager directly.
119 		void* block;
120 		if (MemoryManager::AllocateRaw(size, 0, block) != B_OK)
121 			return NULL;
122 
123 		return block;
124 	}
125 
126 	// A small allocation, but no object cache yet. Use the bootstrap memory.
127 	// This allocation must never be freed!
128 	if (sBootStrapMemorySize - sUsedBootStrapMemory < size) {
129 		// We need more memory.
130 		void* block;
131 		if (MemoryManager::AllocateRaw(SLAB_CHUNK_SIZE_SMALL, 0, block) != B_OK)
132 			return NULL;
133 		sBootStrapMemory = (addr_t)block;
134 		sBootStrapMemorySize = SLAB_CHUNK_SIZE_SMALL;
135 		sUsedBootStrapMemory = 0;
136 	}
137 
138 	size_t neededSize = ROUNDUP(size, sizeof(double));
139 	if (sUsedBootStrapMemory + neededSize > sBootStrapMemorySize)
140 		return NULL;
141 	void* block = (void*)(sBootStrapMemory + sUsedBootStrapMemory);
142 	sUsedBootStrapMemory += neededSize;
143 
144 	return block;
145 }
146 
147 
148 static void
149 block_free(void* block, uint32 flags)
150 {
151 	if (block == NULL)
152 		return;
153 
154 	ObjectCache* cache = MemoryManager::FreeRawOrReturnCache(block, flags);
155 	if (cache != NULL) {
156 		// a regular small allocation
157 		ASSERT(cache->object_size >= kBlockSizes[0]);
158 		ASSERT(cache->object_size <= kBlockSizes[kNumBlockSizes - 1]);
159 		ASSERT(cache == sBlockCaches[size_to_index(cache->object_size)]);
160 		object_cache_free(cache, block, flags);
161 	}
162 }
163 
164 
165 void
166 block_allocator_init_boot()
167 {
168 	for (size_t index = 0; index < kNumBlockSizes; index++) {
169 		char name[32];
170 		snprintf(name, sizeof(name), "block allocator: %lu",
171 			kBlockSizes[index]);
172 
173 		uint32 flags = CACHE_DURING_BOOT;
174 		size_t size = kBlockSizes[index];
175 
176 		// align the power of two objects to their size
177 		size_t alignment = (size & (size - 1)) == 0 ? size : 0;
178 
179 		// For the larger allocation sizes disable the object depot, so we don't
180 		// keep lot's of unused objects around.
181 		if (size > 2048)
182 			flags |= CACHE_NO_DEPOT;
183 
184 		sBlockCaches[index] = create_object_cache_etc(name, size, alignment, 0,
185 			0, 0, flags, NULL, NULL, NULL, NULL);
186 		if (sBlockCaches[index] == NULL)
187 			panic("allocator: failed to init block cache");
188 	}
189 }
190 
191 
192 void
193 block_allocator_init_rest()
194 {
195 #ifdef TEST_ALL_CACHES_DURING_BOOT
196 	for (int index = 0; kBlockSizes[index] != 0; index++) {
197 		block_free(block_alloc(kBlockSizes[index] - sizeof(boundary_tag)), 0,
198 			0);
199 	}
200 #endif
201 }
202 
203 
204 // #pragma mark - public API
205 
206 
207 void*
208 memalign(size_t alignment, size_t size)
209 {
210 	return block_alloc(size, alignment, 0);
211 }
212 
213 
214 void *
215 memalign_etc(size_t alignment, size_t size, uint32 flags)
216 {
217 	return block_alloc(size, alignment, flags & CACHE_ALLOC_FLAGS);
218 }
219 
220 
221 int
222 posix_memalign(void** _pointer, size_t alignment, size_t size)
223 {
224 	if ((alignment & (sizeof(void*) - 1)) != 0 || _pointer == NULL)
225 		return B_BAD_VALUE;
226 	*_pointer = block_alloc(size, alignment, 0);
227 	return 0;
228 }
229 
230 
231 void
232 free_etc(void *address, uint32 flags)
233 {
234 	if ((flags & CACHE_DONT_LOCK_KERNEL_SPACE) != 0) {
235 		deferred_free(address);
236 		return;
237 	}
238 
239 	block_free(address, flags & CACHE_ALLOC_FLAGS);
240 }
241 
242 
243 void*
244 malloc(size_t size)
245 {
246 	return block_alloc(size, 0, 0);
247 }
248 
249 
250 void
251 free(void* address)
252 {
253 	block_free(address, 0);
254 }
255 
256 
257 void*
258 realloc_etc(void* address, size_t newSize, uint32 flags)
259 {
260 	if (newSize == 0) {
261 		block_free(address, flags);
262 		return NULL;
263 	}
264 
265 	if (address == NULL)
266 		return block_alloc(newSize, 0, flags);
267 
268 	size_t oldSize;
269 	ObjectCache* cache = MemoryManager::GetAllocationInfo(address, oldSize);
270 	if (cache == NULL && oldSize == 0) {
271 		panic("block_realloc(): allocation %p not known", address);
272 		return NULL;
273 	}
274 
275 	if (oldSize == newSize)
276 		return address;
277 
278 	void* newBlock = block_alloc(newSize, 0, flags);
279 	if (newBlock == NULL)
280 		return NULL;
281 
282 	memcpy(newBlock, address, std::min(oldSize, newSize));
283 
284 	block_free(address, flags);
285 
286 	return newBlock;
287 }
288 
289 
290 void*
291 realloc(void* address, size_t newSize)
292 {
293 	return realloc_etc(address, newSize, 0);
294 }
295 
296 
297 #else
298 
299 
300 void*
301 block_alloc_early(size_t size)
302 {
303 	panic("block allocator not enabled!");
304 	return NULL;
305 }
306 
307 
308 void
309 block_allocator_init_boot()
310 {
311 }
312 
313 
314 void
315 block_allocator_init_rest()
316 {
317 }
318 
319 
320 #endif	// USE_SLAB_ALLOCATOR_FOR_MALLOC
321 
322 
323 RANGE_MARKER_FUNCTION_END(slab_allocator)
324