1 /* 2 * Copyright 2010, Ingo Weinhold <ingo_weinhold@gmx.de>. 3 * Distributed under the terms of the MIT License. 4 */ 5 #ifndef MEMORY_MANAGER_H 6 #define MEMORY_MANAGER_H 7 8 9 #include <KernelExport.h> 10 11 #include <condition_variable.h> 12 #include <kernel.h> 13 #include <lock.h> 14 #include <util/DoublyLinkedList.h> 15 #include <util/OpenHashTable.h> 16 17 #include "slab_debug.h" 18 19 20 class AbstractTraceEntryWithStackTrace; 21 struct kernel_args; 22 struct ObjectCache; 23 struct VMArea; 24 25 26 #define SLAB_CHUNK_SIZE_SMALL B_PAGE_SIZE 27 #define SLAB_CHUNK_SIZE_MEDIUM (16 * B_PAGE_SIZE) 28 #define SLAB_CHUNK_SIZE_LARGE (128 * B_PAGE_SIZE) 29 #define SLAB_AREA_SIZE (2048 * B_PAGE_SIZE) 30 // TODO: These sizes have been chosen with 4 KB pages in mind. 31 #define SLAB_AREA_STRUCT_OFFSET B_PAGE_SIZE 32 // The offset from the start of the area to the Area structure. This space 33 // is not mapped and will trip code writing beyond the previous area's 34 // bounds. 35 36 #define SLAB_META_CHUNKS_PER_AREA (SLAB_AREA_SIZE / SLAB_CHUNK_SIZE_LARGE) 37 #define SLAB_SMALL_CHUNKS_PER_META_CHUNK \ 38 (SLAB_CHUNK_SIZE_LARGE / SLAB_CHUNK_SIZE_SMALL) 39 40 41 class MemoryManager { 42 public: 43 static void Init(kernel_args* args); 44 static void InitPostArea(); 45 46 static status_t Allocate(ObjectCache* cache, uint32 flags, 47 void*& _pages); 48 static void Free(void* pages, uint32 flags); 49 50 static status_t AllocateRaw(size_t size, uint32 flags, 51 void*& _pages); 52 static ObjectCache* FreeRawOrReturnCache(void* pages, 53 uint32 flags); 54 55 static size_t AcceptableChunkSize(size_t size); 56 static ObjectCache* GetAllocationInfo(void* address, 57 size_t& _size); 58 static ObjectCache* CacheForAddress(void* address); 59 60 static bool MaintenanceNeeded(); 61 static void PerformMaintenance(); 62 63 #if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING 64 static bool AnalyzeAllocationCallers( 65 AllocationTrackingCallback& callback); 66 #endif 67 68 static ObjectCache* DebugObjectCacheForAddress(void* address); 69 70 private: 71 struct Tracing; 72 73 struct Area; 74 75 struct Chunk { 76 union { 77 Chunk* next; 78 addr_t reference; 79 }; 80 }; 81 82 struct MetaChunk : DoublyLinkedListLinkImpl<MetaChunk> { 83 size_t chunkSize; 84 addr_t chunkBase; 85 size_t totalSize; 86 uint16 chunkCount; 87 uint16 usedChunkCount; 88 uint16 firstFreeChunk; // *some* free range 89 uint16 lastFreeChunk; // inclusive 90 Chunk chunks[SLAB_SMALL_CHUNKS_PER_META_CHUNK]; 91 Chunk* freeChunks; 92 93 Area* GetArea() const; 94 }; 95 96 friend class MetaChunk; 97 typedef DoublyLinkedList<MetaChunk> MetaChunkList; 98 99 struct Area : DoublyLinkedListLinkImpl<Area> { 100 Area* next; 101 VMArea* vmArea; 102 size_t reserved_memory_for_mapping; 103 uint16 usedMetaChunkCount; 104 bool fullyMapped; 105 MetaChunk metaChunks[SLAB_META_CHUNKS_PER_AREA]; 106 107 addr_t BaseAddress() const 108 { 109 return (addr_t)this - SLAB_AREA_STRUCT_OFFSET; 110 } 111 }; 112 113 typedef DoublyLinkedList<Area> AreaList; 114 115 struct AreaHashDefinition { 116 typedef addr_t KeyType; 117 typedef Area ValueType; 118 119 size_t HashKey(addr_t key) const 120 { 121 return key / SLAB_AREA_SIZE; 122 } 123 124 size_t Hash(const Area* value) const 125 { 126 return HashKey(value->BaseAddress()); 127 } 128 129 bool Compare(addr_t key, const Area* value) const 130 { 131 return key == value->BaseAddress(); 132 } 133 134 Area*& GetLink(Area* value) const 135 { 136 return value->next; 137 } 138 }; 139 140 typedef BOpenHashTable<AreaHashDefinition> AreaTable; 141 142 struct AllocationEntry { 143 ConditionVariable condition; 144 thread_id thread; 145 }; 146 147 private: 148 static status_t _AllocateChunks(size_t chunkSize, 149 uint32 chunkCount, uint32 flags, 150 MetaChunk*& _metaChunk, Chunk*& _chunk); 151 static bool _GetChunks(MetaChunkList* metaChunkList, 152 size_t chunkSize, uint32 chunkCount, 153 MetaChunk*& _metaChunk, Chunk*& _chunk); 154 static bool _GetChunk(MetaChunkList* metaChunkList, 155 size_t chunkSize, MetaChunk*& _metaChunk, 156 Chunk*& _chunk); 157 static void _FreeChunk(Area* area, MetaChunk* metaChunk, 158 Chunk* chunk, addr_t chunkAddress, 159 bool alreadyUnmapped, uint32 flags); 160 161 static void _PrepareMetaChunk(MetaChunk* metaChunk, 162 size_t chunkSize); 163 164 static void _AddArea(Area* area); 165 static status_t _AllocateArea(uint32 flags, Area*& _area); 166 static void _FreeArea(Area* area, bool areaRemoved, 167 uint32 flags); 168 169 static status_t _MapChunk(VMArea* vmArea, addr_t address, 170 size_t size, size_t reserveAdditionalMemory, 171 uint32 flags); 172 static status_t _UnmapChunk(VMArea* vmArea, addr_t address, 173 size_t size, uint32 flags); 174 175 static void _UnmapFreeChunksEarly(Area* area); 176 static void _ConvertEarlyArea(Area* area); 177 178 static void _RequestMaintenance(); 179 180 static addr_t _AreaBaseAddressForAddress(addr_t address); 181 static Area* _AreaForAddress(addr_t address); 182 static uint32 _ChunkIndexForAddress( 183 const MetaChunk* metaChunk, addr_t address); 184 static addr_t _ChunkAddress(const MetaChunk* metaChunk, 185 const Chunk* chunk); 186 static bool _IsChunkFree(const MetaChunk* metaChunk, 187 const Chunk* chunk); 188 static bool _IsChunkInFreeList(const MetaChunk* metaChunk, 189 const Chunk* chunk); 190 static void _CheckMetaChunk(MetaChunk* metaChunk); 191 192 static int _DumpRawAllocations(int argc, char** argv); 193 static void _PrintMetaChunkTableHeader(bool printChunks); 194 static void _DumpMetaChunk(MetaChunk* metaChunk, 195 bool printChunks, bool printHeader); 196 static int _DumpMetaChunk(int argc, char** argv); 197 static void _DumpMetaChunks(const char* name, 198 MetaChunkList& metaChunkList, 199 bool printChunks); 200 static int _DumpMetaChunks(int argc, char** argv); 201 static int _DumpArea(int argc, char** argv); 202 static int _DumpAreas(int argc, char** argv); 203 204 #if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING 205 static void _AddTrackingInfo(void* allocation, size_t size, 206 AbstractTraceEntryWithStackTrace* entry); 207 static AllocationTrackingInfo* _TrackingInfoFor(void* allocation, 208 size_t size); 209 #endif 210 211 private: 212 static const size_t kAreaAdminSize 213 = ROUNDUP(sizeof(Area), B_PAGE_SIZE); 214 215 static mutex sLock; 216 static rw_lock sAreaTableLock; 217 static kernel_args* sKernelArgs; 218 static AreaTable sAreaTable; 219 static Area* sFreeAreas; 220 static int sFreeAreaCount; 221 static MetaChunkList sFreeCompleteMetaChunks; 222 static MetaChunkList sFreeShortMetaChunks; 223 static MetaChunkList sPartialMetaChunksSmall; 224 static MetaChunkList sPartialMetaChunksMedium; 225 static AllocationEntry* sAllocationEntryCanWait; 226 static AllocationEntry* sAllocationEntryDontWait; 227 static bool sMaintenanceNeeded; 228 }; 229 230 231 /*static*/ inline bool 232 MemoryManager::MaintenanceNeeded() 233 { 234 return sMaintenanceNeeded; 235 } 236 237 238 /*static*/ inline addr_t 239 MemoryManager::_AreaBaseAddressForAddress(addr_t address) 240 { 241 return ROUNDDOWN((addr_t)address, SLAB_AREA_SIZE); 242 } 243 244 245 /*static*/ inline MemoryManager::Area* 246 MemoryManager::_AreaForAddress(addr_t address) 247 { 248 return (Area*)(_AreaBaseAddressForAddress(address) 249 + SLAB_AREA_STRUCT_OFFSET); 250 } 251 252 253 /*static*/ inline uint32 254 MemoryManager::_ChunkIndexForAddress(const MetaChunk* metaChunk, addr_t address) 255 { 256 return (address - metaChunk->chunkBase) / metaChunk->chunkSize; 257 } 258 259 260 /*static*/ inline addr_t 261 MemoryManager::_ChunkAddress(const MetaChunk* metaChunk, const Chunk* chunk) 262 { 263 return metaChunk->chunkBase 264 + (chunk - metaChunk->chunks) * metaChunk->chunkSize; 265 } 266 267 268 /*static*/ inline bool 269 MemoryManager::_IsChunkFree(const MetaChunk* metaChunk, const Chunk* chunk) 270 { 271 return chunk->next == NULL 272 || (chunk->next >= metaChunk->chunks 273 && chunk->next < metaChunk->chunks + metaChunk->chunkCount); 274 } 275 276 277 inline MemoryManager::Area* 278 MemoryManager::MetaChunk::GetArea() const 279 { 280 return _AreaForAddress((addr_t)this); 281 } 282 283 284 #if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING 285 286 /*static*/ inline AllocationTrackingInfo* 287 MemoryManager::_TrackingInfoFor(void* allocation, size_t size) 288 { 289 return (AllocationTrackingInfo*)((uint8*)allocation + size 290 - sizeof(AllocationTrackingInfo)); 291 } 292 293 #endif // SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING 294 295 296 #endif // MEMORY_MANAGER_H 297