xref: /haiku/src/system/kernel/slab/MemoryManager.h (revision 13581b3d2a71545960b98fefebc5225b5bf29072)
1 /*
2  * Copyright 2010, Ingo Weinhold <ingo_weinhold@gmx.de>.
3  * Distributed under the terms of the MIT License.
4  */
5 #ifndef MEMORY_MANAGER_H
6 #define MEMORY_MANAGER_H
7 
8 
9 #include <KernelExport.h>
10 
11 #include <condition_variable.h>
12 #include <kernel.h>
13 #include <lock.h>
14 #include <util/DoublyLinkedList.h>
15 #include <util/OpenHashTable.h>
16 
17 #include "slab_debug.h"
18 #include "slab_private.h"
19 
20 
21 class AbstractTraceEntryWithStackTrace;
22 struct kernel_args;
23 struct ObjectCache;
24 struct VMArea;
25 
26 
27 #define SLAB_CHUNK_SIZE_SMALL	B_PAGE_SIZE
28 #define SLAB_CHUNK_SIZE_MEDIUM	(16 * B_PAGE_SIZE)
29 #define SLAB_CHUNK_SIZE_LARGE	(128 * B_PAGE_SIZE)
30 #define SLAB_AREA_SIZE			(2048 * B_PAGE_SIZE)
31 	// TODO: These sizes have been chosen with 4 KB pages in mind.
32 #define SLAB_AREA_STRUCT_OFFSET	B_PAGE_SIZE
33 	// The offset from the start of the area to the Area structure. This space
34 	// is not mapped and will trip code writing beyond the previous area's
35 	// bounds.
36 
37 #define SLAB_META_CHUNKS_PER_AREA	(SLAB_AREA_SIZE / SLAB_CHUNK_SIZE_LARGE)
38 #define SLAB_SMALL_CHUNKS_PER_META_CHUNK	\
39 	(SLAB_CHUNK_SIZE_LARGE / SLAB_CHUNK_SIZE_SMALL)
40 
41 
42 class MemoryManager {
43 public:
44 	static	void				Init(kernel_args* args);
45 	static	void				InitPostArea();
46 
47 	static	status_t			Allocate(ObjectCache* cache, uint32 flags,
48 									void*& _pages);
49 	static	void				Free(void* pages, uint32 flags);
50 
51 	static	status_t			AllocateRaw(size_t size, uint32 flags,
52 									void*& _pages);
53 	static	ObjectCache*		FreeRawOrReturnCache(void* pages,
54 									uint32 flags);
55 
56 	static	size_t				AcceptableChunkSize(size_t size);
57 	static	ObjectCache*		GetAllocationInfo(void* address,
58 									size_t& _size);
59 	static	ObjectCache*		CacheForAddress(void* address);
60 
61 	static	bool				MaintenanceNeeded();
62 	static	void				PerformMaintenance();
63 
64 #if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
65 	static	bool				AnalyzeAllocationCallers(
66 									AllocationTrackingCallback& callback);
67 #endif
68 
69 	static	ObjectCache*		DebugObjectCacheForAddress(void* address);
70 
71 private:
72 			struct Tracing;
73 
74 			struct Area;
75 
76 			struct Chunk {
77 				union {
78 					Chunk*		next;
79 					addr_t		reference;
80 				};
81 			};
82 
83 			struct MetaChunk : DoublyLinkedListLinkImpl<MetaChunk> {
84 				size_t			chunkSize;
85 				addr_t			chunkBase;
86 				size_t			totalSize;
87 				uint16			chunkCount;
88 				uint16			usedChunkCount;
89 				uint16			firstFreeChunk;	// *some* free range
90 				uint16			lastFreeChunk;	// inclusive
91 				Chunk			chunks[SLAB_SMALL_CHUNKS_PER_META_CHUNK];
92 				Chunk*			freeChunks;
93 
94 				Area*			GetArea() const;
95 			};
96 
97 			friend struct MetaChunk;
98 			typedef DoublyLinkedList<MetaChunk> MetaChunkList;
99 
100 			struct Area : DoublyLinkedListLinkImpl<Area> {
101 				Area*			next;
102 				VMArea*			vmArea;
103 				size_t			reserved_memory_for_mapping;
104 				uint16			usedMetaChunkCount;
105 				bool			fullyMapped;
106 				MetaChunk		metaChunks[SLAB_META_CHUNKS_PER_AREA];
107 
108 				addr_t BaseAddress() const
109 				{
110 					return (addr_t)this - SLAB_AREA_STRUCT_OFFSET;
111 				}
112 			};
113 
114 			typedef DoublyLinkedList<Area> AreaList;
115 
116 			struct AreaHashDefinition {
117 				typedef addr_t		KeyType;
118 				typedef	Area		ValueType;
119 
120 				size_t HashKey(addr_t key) const
121 				{
122 					return key / SLAB_AREA_SIZE;
123 				}
124 
125 				size_t Hash(const Area* value) const
126 				{
127 					return HashKey(value->BaseAddress());
128 				}
129 
130 				bool Compare(addr_t key, const Area* value) const
131 				{
132 					return key == value->BaseAddress();
133 				}
134 
135 				Area*& GetLink(Area* value) const
136 				{
137 					return value->next;
138 				}
139 			};
140 
141 			typedef BOpenHashTable<AreaHashDefinition> AreaTable;
142 
143 			struct AllocationEntry {
144 				ConditionVariable	condition;
145 				thread_id			thread;
146 			};
147 
148 private:
149 	static	status_t			_AllocateChunks(size_t chunkSize,
150 									uint32 chunkCount, uint32 flags,
151 									MetaChunk*& _metaChunk, Chunk*& _chunk);
152 	static	bool				_GetChunks(MetaChunkList* metaChunkList,
153 									size_t chunkSize, uint32 chunkCount,
154 									MetaChunk*& _metaChunk, Chunk*& _chunk);
155 	static	bool				_GetChunk(MetaChunkList* metaChunkList,
156 									size_t chunkSize, MetaChunk*& _metaChunk,
157 									Chunk*& _chunk);
158 	static	void				_FreeChunk(Area* area, MetaChunk* metaChunk,
159 									Chunk* chunk, addr_t chunkAddress,
160 									bool alreadyUnmapped, uint32 flags);
161 
162 	static	void				_PrepareMetaChunk(MetaChunk* metaChunk,
163 									size_t chunkSize);
164 
165 	static	void				_PushFreeArea(Area* area);
166 	static	Area*				_PopFreeArea();
167 
168 	static	void				_AddArea(Area* area);
169 	static	status_t			_AllocateArea(uint32 flags, Area*& _area);
170 	static	void				_FreeArea(Area* area, bool areaRemoved,
171 									uint32 flags);
172 
173 	static	status_t			_MapChunk(VMArea* vmArea, addr_t address,
174 									size_t size, size_t reserveAdditionalMemory,
175 									uint32 flags);
176 	static	status_t			_UnmapChunk(VMArea* vmArea, addr_t address,
177 									size_t size, uint32 flags);
178 
179 	static	void				_UnmapFreeChunksEarly(Area* area);
180 	static	void				_ConvertEarlyArea(Area* area);
181 
182 	static	void				_RequestMaintenance();
183 
184 	static	addr_t				_AreaBaseAddressForAddress(addr_t address);
185 	static	Area*				_AreaForAddress(addr_t address);
186 	static	uint32				_ChunkIndexForAddress(
187 									const MetaChunk* metaChunk, addr_t address);
188 	static	addr_t				_ChunkAddress(const MetaChunk* metaChunk,
189 									const Chunk* chunk);
190 	static	bool				_IsChunkFree(const MetaChunk* metaChunk,
191 									const Chunk* chunk);
192 	static	bool				_IsChunkInFreeList(const MetaChunk* metaChunk,
193 									const Chunk* chunk);
194 	static	void				_CheckMetaChunk(MetaChunk* metaChunk);
195 
196 	static	int					_DumpRawAllocations(int argc, char** argv);
197 	static	void				_PrintMetaChunkTableHeader(bool printChunks);
198 	static	void				_DumpMetaChunk(MetaChunk* metaChunk,
199 									bool printChunks, bool printHeader);
200 	static	int					_DumpMetaChunk(int argc, char** argv);
201 	static	void				_DumpMetaChunks(const char* name,
202 									MetaChunkList& metaChunkList,
203 									bool printChunks);
204 	static	int					_DumpMetaChunks(int argc, char** argv);
205 	static	int					_DumpArea(int argc, char** argv);
206 	static	int					_DumpAreas(int argc, char** argv);
207 
208 #if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
209 	static	void				_AddTrackingInfo(void* allocation, size_t size,
210 									AbstractTraceEntryWithStackTrace* entry);
211 	static	AllocationTrackingInfo* _TrackingInfoFor(void* allocation,
212 									size_t size);
213 #endif
214 
215 private:
216 	static	const size_t		kAreaAdminSize
217 									= ROUNDUP(sizeof(Area), B_PAGE_SIZE);
218 
219 	static	mutex				sLock;
220 	static	rw_lock				sAreaTableLock;
221 	static	kernel_args*		sKernelArgs;
222 	static	AreaTable			sAreaTable;
223 	static	Area*				sFreeAreas;
224 	static	int					sFreeAreaCount;
225 	static	MetaChunkList		sFreeCompleteMetaChunks;
226 	static	MetaChunkList		sFreeShortMetaChunks;
227 	static	MetaChunkList		sPartialMetaChunksSmall;
228 	static	MetaChunkList		sPartialMetaChunksMedium;
229 	static	AllocationEntry*	sAllocationEntryCanWait;
230 	static	AllocationEntry*	sAllocationEntryDontWait;
231 	static	bool				sMaintenanceNeeded;
232 };
233 
234 
235 /*static*/ inline bool
236 MemoryManager::MaintenanceNeeded()
237 {
238 	return sMaintenanceNeeded;
239 }
240 
241 
242 /*static*/ inline void
243 MemoryManager::_PushFreeArea(Area* area)
244 {
245 	_push(sFreeAreas, area);
246 	sFreeAreaCount++;
247 }
248 
249 
250 /*static*/ inline MemoryManager::Area*
251 MemoryManager::_PopFreeArea()
252 {
253 	if (sFreeAreaCount == 0)
254 		return NULL;
255 
256 	sFreeAreaCount--;
257 	return _pop(sFreeAreas);
258 }
259 
260 
261 /*static*/ inline addr_t
262 MemoryManager::_AreaBaseAddressForAddress(addr_t address)
263 {
264 	return ROUNDDOWN((addr_t)address, SLAB_AREA_SIZE);
265 }
266 
267 
268 /*static*/ inline MemoryManager::Area*
269 MemoryManager::_AreaForAddress(addr_t address)
270 {
271 	return (Area*)(_AreaBaseAddressForAddress(address)
272 		+ SLAB_AREA_STRUCT_OFFSET);
273 }
274 
275 
276 /*static*/ inline uint32
277 MemoryManager::_ChunkIndexForAddress(const MetaChunk* metaChunk, addr_t address)
278 {
279 	return (address - metaChunk->chunkBase) / metaChunk->chunkSize;
280 }
281 
282 
283 /*static*/ inline addr_t
284 MemoryManager::_ChunkAddress(const MetaChunk* metaChunk, const Chunk* chunk)
285 {
286 	return metaChunk->chunkBase
287 		+ (chunk - metaChunk->chunks) * metaChunk->chunkSize;
288 }
289 
290 
291 /*static*/ inline bool
292 MemoryManager::_IsChunkFree(const MetaChunk* metaChunk, const Chunk* chunk)
293 {
294 	return chunk->next == NULL
295 		|| (chunk->next >= metaChunk->chunks
296 			&& chunk->next < metaChunk->chunks + metaChunk->chunkCount);
297 }
298 
299 
300 inline MemoryManager::Area*
301 MemoryManager::MetaChunk::GetArea() const
302 {
303 	return _AreaForAddress((addr_t)this);
304 }
305 
306 
307 #if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
308 
309 /*static*/ inline AllocationTrackingInfo*
310 MemoryManager::_TrackingInfoFor(void* allocation, size_t size)
311 {
312 	return (AllocationTrackingInfo*)((uint8*)allocation + size
313 		- sizeof(AllocationTrackingInfo));
314 }
315 
316 #endif	// SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
317 
318 
319 #endif	// MEMORY_MANAGER_H
320