xref: /haiku/src/system/kernel/slab/allocator.cpp (revision dfc8a217db488098641462dfc334dcc0f7d62456)
1 /*
2  * Copyright 2010, Ingo Weinhold <ingo_weinhold@gmx.de>.
3  * Copyright 2007, Hugo Santos. All Rights Reserved.
4  * Distributed under the terms of the MIT License.
5  */
6 
7 
8 #include "slab_private.h"
9 
10 #include <stdio.h>
11 #include <string.h>
12 
13 #include <algorithm>
14 
15 #include <debug.h>
16 #include <heap.h>
17 #include <kernel.h> // for ROUNDUP
18 #include <malloc.h>
19 #include <vm/vm.h>
20 #include <vm/VMAddressSpace.h>
21 
22 #include "ObjectCache.h"
23 #include "MemoryManager.h"
24 
25 
26 #define DEBUG_ALLOCATOR
27 //#define TEST_ALL_CACHES_DURING_BOOT
28 
29 static const size_t kBlockSizes[] = {
30 	16, 24, 32, 48, 64, 80, 96, 112,
31 	128, 160, 192, 224, 256, 320, 384, 448,
32 	512, 640, 768, 896, 1024, 1280, 1536, 1792,
33 	2048, 2560, 3072, 3584, 4096, 4608, 5120, 5632,
34 	6144, 6656, 7168, 7680, 8192,
35 	0
36 };
37 
38 static const size_t kNumBlockSizes = sizeof(kBlockSizes) / sizeof(size_t) - 1;
39 
40 static object_cache* sBlockCaches[kNumBlockSizes];
41 
42 static addr_t sBootStrapMemory = 0;
43 static size_t sBootStrapMemorySize = 0;
44 static size_t sUsedBootStrapMemory = 0;
45 
46 
47 static int
48 size_to_index(size_t size)
49 {
50 	if (size <= 16)
51 		return 0;
52 	if (size <= 32)
53 		return 1 + (size - 16 - 1) / 8;
54 	if (size <= 128)
55 		return 3 + (size - 32 - 1) / 16;
56 	if (size <= 256)
57 		return 9 + (size - 128 - 1) / 32;
58 	if (size <= 512)
59 		return 13 + (size - 256 - 1) / 64;
60 	if (size <= 1024)
61 		return 17 + (size - 512 - 1) / 128;
62 	if (size <= 2048)
63 		return 21 + (size - 1024 - 1) / 256;
64 	if (size <= 8192)
65 		return 25 + (size - 2048 - 1) / 512;
66 
67 	return -1;
68 }
69 
70 
71 void*
72 block_alloc(size_t size, size_t alignment, uint32 flags)
73 {
74 	if (alignment > kMinObjectAlignment) {
75 		// Make size >= alignment and a power of two. This is sufficient, since
76 		// all of our object caches with power of two sizes are aligned. We may
77 		// waste quite a bit of memory, but memalign() is very rarely used
78 		// in the kernel and always with power of two size == alignment anyway.
79 		ASSERT((alignment & (alignment - 1)) == 0);
80 		while (alignment < size)
81 			alignment <<= 1;
82 		size = alignment;
83 
84 		// If we're not using an object cache, make sure that the memory
85 		// manager knows it has to align the allocation.
86 		if (size > kBlockSizes[kNumBlockSizes])
87 			flags |= CACHE_ALIGN_ON_SIZE;
88 	}
89 
90 	// allocate from the respective object cache, if any
91 	int index = size_to_index(size);
92 	if (index >= 0)
93 		return object_cache_alloc(sBlockCaches[index], flags);
94 
95 	// the allocation is too large for our object caches -- ask the memory
96 	// manager
97 	void* block;
98 	if (MemoryManager::AllocateRaw(size, flags, block) != B_OK)
99 		return NULL;
100 
101 	return block;
102 }
103 
104 
105 void*
106 block_alloc_early(size_t size)
107 {
108 	int index = size_to_index(size);
109 	if (index >= 0 && sBlockCaches[index] != NULL)
110 		return object_cache_alloc(sBlockCaches[index], CACHE_DURING_BOOT);
111 
112 	if (size > SLAB_CHUNK_SIZE_SMALL) {
113 		// This is a sufficiently large allocation -- just ask the memory
114 		// manager directly.
115 		void* block;
116 		if (MemoryManager::AllocateRaw(size, 0, block) != B_OK)
117 			return NULL;
118 
119 		return block;
120 	}
121 
122 	// A small allocation, but no object cache yet. Use the bootstrap memory.
123 	// This allocation must never be freed!
124 	if (sBootStrapMemorySize - sUsedBootStrapMemory < size) {
125 		// We need more memory.
126 		void* block;
127 		if (MemoryManager::AllocateRaw(SLAB_CHUNK_SIZE_SMALL, 0, block) != B_OK)
128 			return NULL;
129 		sBootStrapMemory = (addr_t)block;
130 		sBootStrapMemorySize = SLAB_CHUNK_SIZE_SMALL;
131 		sUsedBootStrapMemory = 0;
132 	}
133 
134 	size_t neededSize = ROUNDUP(size, sizeof(double));
135 	if (sUsedBootStrapMemory + neededSize > sBootStrapMemorySize)
136 		return NULL;
137 	void* block = (void*)(sBootStrapMemory + sUsedBootStrapMemory);
138 	sUsedBootStrapMemory += neededSize;
139 
140 	return block;
141 }
142 
143 
144 void
145 block_free(void* block, uint32 flags)
146 {
147 	if (block == NULL)
148 		return;
149 
150 	ObjectCache* cache = MemoryManager::FreeRawOrReturnCache(block, flags);
151 	if (cache != NULL) {
152 		// a regular small allocation
153 		ASSERT(cache->object_size >= kBlockSizes[0]);
154 		ASSERT(cache->object_size <= kBlockSizes[kNumBlockSizes - 1]);
155 		ASSERT(cache == sBlockCaches[size_to_index(cache->object_size)]);
156 		object_cache_free(cache, block, flags);
157 	}
158 }
159 
160 
161 void
162 block_allocator_init_boot()
163 {
164 	for (int index = 0; kBlockSizes[index] != 0; index++) {
165 		char name[32];
166 		snprintf(name, sizeof(name), "block allocator: %lu",
167 			kBlockSizes[index]);
168 
169 		uint32 flags = CACHE_DURING_BOOT;
170 		size_t size = kBlockSizes[index];
171 
172 		// align the power of two objects to their size
173 		size_t alignment = (size & (size - 1)) == 0 ? size : 0;
174 
175 		// For the larger allocation sizes disable the object depot, so we don't
176 		// keep lot's of unused objects around.
177 		if (size > 2048)
178 			flags |= CACHE_NO_DEPOT;
179 
180 		sBlockCaches[index] = create_object_cache_etc(name, size, alignment, 0,
181 			0, 0, flags, NULL, NULL, NULL, NULL);
182 		if (sBlockCaches[index] == NULL)
183 			panic("allocator: failed to init block cache");
184 	}
185 }
186 
187 
188 void
189 block_allocator_init_rest()
190 {
191 #ifdef TEST_ALL_CACHES_DURING_BOOT
192 	for (int index = 0; kBlockSizes[index] != 0; index++) {
193 		block_free(block_alloc(kBlockSizes[index] - sizeof(boundary_tag)), 0,
194 			0);
195 	}
196 #endif
197 }
198 
199 
200 // #pragma mark - public API
201 
202 
203 #if USE_SLAB_ALLOCATOR_FOR_MALLOC
204 
205 
206 void*
207 memalign(size_t alignment, size_t size)
208 {
209 	return block_alloc(size, alignment, 0);
210 }
211 
212 
213 void *
214 memalign_etc(size_t alignment, size_t size, uint32 flags)
215 {
216 	return block_alloc(size, alignment, flags & CACHE_ALLOC_FLAGS);
217 }
218 
219 
220 void
221 free_etc(void *address, uint32 flags)
222 {
223 	block_free(address, flags & CACHE_ALLOC_FLAGS);
224 }
225 
226 
227 void*
228 malloc(size_t size)
229 {
230 	return block_alloc(size, 0, 0);
231 }
232 
233 
234 void
235 free(void* address)
236 {
237 	block_free(address, 0);
238 }
239 
240 
241 void*
242 realloc(void* address, size_t newSize)
243 {
244 	if (newSize == 0) {
245 		block_free(address, 0);
246 		return NULL;
247 	}
248 
249 	if (address == NULL)
250 		return block_alloc(newSize, 0, 0);
251 
252 	size_t oldSize;
253 	ObjectCache* cache = MemoryManager::GetAllocationInfo(address, oldSize);
254 	if (cache == NULL && oldSize == 0) {
255 		panic("block_realloc(): allocation %p not known", address);
256 		return NULL;
257 	}
258 
259 	if (oldSize == newSize)
260 		return address;
261 
262 	void* newBlock = block_alloc(newSize, 0, 0);
263 	if (newBlock == NULL)
264 		return NULL;
265 
266 	memcpy(newBlock, address, std::min(oldSize, newSize));
267 
268 	block_free(address, 0);
269 
270 	return newBlock;
271 }
272 
273 
274 #endif	// USE_SLAB_ALLOCATOR_FOR_MALLOC
275