xref: /haiku/src/system/kernel/slab/allocator.cpp (revision 984f843b917a1c4e077915c5961a6ef1cf8dabc7)
1 /*
2  * Copyright 2010, Ingo Weinhold <ingo_weinhold@gmx.de>.
3  * Copyright 2007, Hugo Santos. All Rights Reserved.
4  * Distributed under the terms of the MIT License.
5  */
6 
7 
8 #include "slab_private.h"
9 
10 #include <stdio.h>
11 #include <string.h>
12 
13 #include <algorithm>
14 
15 #include <debug.h>
16 #include <heap.h>
17 #include <kernel.h> // for ROUNDUP
18 #include <malloc.h>
19 #include <vm/vm.h>
20 #include <vm/VMAddressSpace.h>
21 
22 #include "ObjectCache.h"
23 #include "MemoryManager.h"
24 
25 
26 #if USE_SLAB_ALLOCATOR_FOR_MALLOC
27 
28 
29 //#define TEST_ALL_CACHES_DURING_BOOT
30 
31 static const size_t kBlockSizes[] = {
32 	16, 24, 32, 48, 64, 80, 96, 112,
33 	128, 160, 192, 224, 256, 320, 384, 448,
34 	512, 640, 768, 896, 1024, 1280, 1536, 1792,
35 	2048, 2560, 3072, 3584, 4096, 4608, 5120, 5632,
36 	6144, 6656, 7168, 7680, 8192,
37 	0
38 };
39 
40 static const size_t kNumBlockSizes = sizeof(kBlockSizes) / sizeof(size_t) - 1;
41 
42 static object_cache* sBlockCaches[kNumBlockSizes];
43 
44 static addr_t sBootStrapMemory = 0;
45 static size_t sBootStrapMemorySize = 0;
46 static size_t sUsedBootStrapMemory = 0;
47 
48 
49 RANGE_MARKER_FUNCTION_BEGIN(slab_allocator)
50 
51 
52 static int
53 size_to_index(size_t size)
54 {
55 	if (size <= 16)
56 		return 0;
57 	if (size <= 32)
58 		return 1 + (size - 16 - 1) / 8;
59 	if (size <= 128)
60 		return 3 + (size - 32 - 1) / 16;
61 	if (size <= 256)
62 		return 9 + (size - 128 - 1) / 32;
63 	if (size <= 512)
64 		return 13 + (size - 256 - 1) / 64;
65 	if (size <= 1024)
66 		return 17 + (size - 512 - 1) / 128;
67 	if (size <= 2048)
68 		return 21 + (size - 1024 - 1) / 256;
69 	if (size <= 8192)
70 		return 25 + (size - 2048 - 1) / 512;
71 
72 	return -1;
73 }
74 
75 
76 static void*
77 block_alloc(size_t size, size_t alignment, uint32 flags)
78 {
79 	if (alignment > kMinObjectAlignment) {
80 		// Make size >= alignment and a power of two. This is sufficient, since
81 		// all of our object caches with power of two sizes are aligned. We may
82 		// waste quite a bit of memory, but memalign() is very rarely used
83 		// in the kernel and always with power of two size == alignment anyway.
84 		ASSERT((alignment & (alignment - 1)) == 0);
85 		while (alignment < size)
86 			alignment <<= 1;
87 		size = alignment;
88 
89 		// If we're not using an object cache, make sure that the memory
90 		// manager knows it has to align the allocation.
91 		if (size > kBlockSizes[kNumBlockSizes])
92 			flags |= CACHE_ALIGN_ON_SIZE;
93 	}
94 
95 	// allocate from the respective object cache, if any
96 	int index = size_to_index(size);
97 	if (index >= 0)
98 		return object_cache_alloc(sBlockCaches[index], flags);
99 
100 	// the allocation is too large for our object caches -- ask the memory
101 	// manager
102 	void* block;
103 	if (MemoryManager::AllocateRaw(size, flags, block) != B_OK)
104 		return NULL;
105 
106 	return block;
107 }
108 
109 
110 void*
111 block_alloc_early(size_t size)
112 {
113 	int index = size_to_index(size);
114 	if (index >= 0 && sBlockCaches[index] != NULL)
115 		return object_cache_alloc(sBlockCaches[index], CACHE_DURING_BOOT);
116 
117 	if (size > SLAB_CHUNK_SIZE_SMALL) {
118 		// This is a sufficiently large allocation -- just ask the memory
119 		// manager directly.
120 		void* block;
121 		if (MemoryManager::AllocateRaw(size, 0, block) != B_OK)
122 			return NULL;
123 
124 		return block;
125 	}
126 
127 	// A small allocation, but no object cache yet. Use the bootstrap memory.
128 	// This allocation must never be freed!
129 	if (sBootStrapMemorySize - sUsedBootStrapMemory < size) {
130 		// We need more memory.
131 		void* block;
132 		if (MemoryManager::AllocateRaw(SLAB_CHUNK_SIZE_SMALL, 0, block) != B_OK)
133 			return NULL;
134 		sBootStrapMemory = (addr_t)block;
135 		sBootStrapMemorySize = SLAB_CHUNK_SIZE_SMALL;
136 		sUsedBootStrapMemory = 0;
137 	}
138 
139 	size_t neededSize = ROUNDUP(size, sizeof(double));
140 	if (sUsedBootStrapMemory + neededSize > sBootStrapMemorySize)
141 		return NULL;
142 	void* block = (void*)(sBootStrapMemory + sUsedBootStrapMemory);
143 	sUsedBootStrapMemory += neededSize;
144 
145 	return block;
146 }
147 
148 
149 static void
150 block_free(void* block, uint32 flags)
151 {
152 	if (block == NULL)
153 		return;
154 
155 	ObjectCache* cache = MemoryManager::FreeRawOrReturnCache(block, flags);
156 	if (cache != NULL) {
157 		// a regular small allocation
158 		ASSERT(cache->object_size >= kBlockSizes[0]);
159 		ASSERT(cache->object_size <= kBlockSizes[kNumBlockSizes - 1]);
160 		ASSERT(cache == sBlockCaches[size_to_index(cache->object_size)]);
161 		object_cache_free(cache, block, flags);
162 	}
163 }
164 
165 
166 void
167 block_allocator_init_boot()
168 {
169 	for (int index = 0; kBlockSizes[index] != 0; index++) {
170 		char name[32];
171 		snprintf(name, sizeof(name), "block allocator: %lu",
172 			kBlockSizes[index]);
173 
174 		uint32 flags = CACHE_DURING_BOOT;
175 		size_t size = kBlockSizes[index];
176 
177 		// align the power of two objects to their size
178 		size_t alignment = (size & (size - 1)) == 0 ? size : 0;
179 
180 		// For the larger allocation sizes disable the object depot, so we don't
181 		// keep lot's of unused objects around.
182 		if (size > 2048)
183 			flags |= CACHE_NO_DEPOT;
184 
185 		sBlockCaches[index] = create_object_cache_etc(name, size, alignment, 0,
186 			0, 0, flags, NULL, NULL, NULL, NULL);
187 		if (sBlockCaches[index] == NULL)
188 			panic("allocator: failed to init block cache");
189 	}
190 }
191 
192 
193 void
194 block_allocator_init_rest()
195 {
196 #ifdef TEST_ALL_CACHES_DURING_BOOT
197 	for (int index = 0; kBlockSizes[index] != 0; index++) {
198 		block_free(block_alloc(kBlockSizes[index] - sizeof(boundary_tag)), 0,
199 			0);
200 	}
201 #endif
202 }
203 
204 
205 // #pragma mark - public API
206 
207 
208 void*
209 memalign(size_t alignment, size_t size)
210 {
211 	return block_alloc(size, alignment, 0);
212 }
213 
214 
215 void *
216 memalign_etc(size_t alignment, size_t size, uint32 flags)
217 {
218 	return block_alloc(size, alignment, flags & CACHE_ALLOC_FLAGS);
219 }
220 
221 
222 int
223 posix_memalign(void** _pointer, size_t alignment, size_t size)
224 {
225 	if ((alignment & (sizeof(void*) - 1)) != 0 || _pointer == NULL)
226 		return B_BAD_VALUE;
227 	*_pointer = block_alloc(size, alignment, 0);
228 	return 0;
229 }
230 
231 
232 void
233 free_etc(void *address, uint32 flags)
234 {
235 	if ((flags & CACHE_DONT_LOCK_KERNEL_SPACE) != 0) {
236 		deferred_free(address);
237 		return;
238 	}
239 
240 	block_free(address, flags & CACHE_ALLOC_FLAGS);
241 }
242 
243 
244 void*
245 malloc(size_t size)
246 {
247 	return block_alloc(size, 0, 0);
248 }
249 
250 
251 void
252 free(void* address)
253 {
254 	block_free(address, 0);
255 }
256 
257 
258 void*
259 realloc_etc(void* address, size_t newSize, uint32 flags)
260 {
261 	if (newSize == 0) {
262 		block_free(address, flags);
263 		return NULL;
264 	}
265 
266 	if (address == NULL)
267 		return block_alloc(newSize, 0, flags);
268 
269 	size_t oldSize;
270 	ObjectCache* cache = MemoryManager::GetAllocationInfo(address, oldSize);
271 	if (cache == NULL && oldSize == 0) {
272 		panic("block_realloc(): allocation %p not known", address);
273 		return NULL;
274 	}
275 
276 	if (oldSize == newSize)
277 		return address;
278 
279 	void* newBlock = block_alloc(newSize, 0, flags);
280 	if (newBlock == NULL)
281 		return NULL;
282 
283 	memcpy(newBlock, address, std::min(oldSize, newSize));
284 
285 	block_free(address, flags);
286 
287 	return newBlock;
288 }
289 
290 
291 void*
292 realloc(void* address, size_t newSize)
293 {
294 	return realloc_etc(address, newSize, 0);
295 }
296 
297 
298 #else
299 
300 
301 void*
302 block_alloc_early(size_t size)
303 {
304 	panic("block allocator not enabled!");
305 	return NULL;
306 }
307 
308 
309 void
310 block_allocator_init_boot()
311 {
312 }
313 
314 
315 void
316 block_allocator_init_rest()
317 {
318 }
319 
320 
321 #endif	// USE_SLAB_ALLOCATOR_FOR_MALLOC
322 
323 
324 RANGE_MARKER_FUNCTION_END(slab_allocator)
325