xref: /haiku/src/system/kernel/slab/MemoryManager.cpp (revision 38c7ed7c476e4a985fee9129c8150a85237d362e)
1 /*
2  * Copyright 2010, Ingo Weinhold <ingo_weinhold@gmx.de>.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include "MemoryManager.h"
8 
9 #include <algorithm>
10 
11 #include <debug.h>
12 #include <kernel.h>
13 #include <tracing.h>
14 #include <util/AutoLock.h>
15 #include <vm/vm.h>
16 #include <vm/vm_page.h>
17 #include <vm/vm_priv.h>
18 #include <vm/VMAddressSpace.h>
19 #include <vm/VMArea.h>
20 #include <vm/VMCache.h>
21 #include <vm/VMTranslationMap.h>
22 
23 #include "ObjectCache.h"
24 #include "slab_private.h"
25 
26 
27 //#define TRACE_MEMORY_MANAGER
28 #ifdef TRACE_MEMORY_MANAGER
29 #	define TRACE(x...)	dprintf(x)
30 #else
31 #	define TRACE(x...)	do {} while (false)
32 #endif
33 
34 
35 static const char* const kSlabAreaName = "slab area";
36 
37 static void* sAreaTableBuffer[1024];
38 
39 mutex MemoryManager::sLock;
40 rw_lock MemoryManager::sAreaTableLock;
41 kernel_args* MemoryManager::sKernelArgs;
42 MemoryManager::AreaTable MemoryManager::sAreaTable;
43 MemoryManager::Area* MemoryManager::sFreeAreas;
44 int MemoryManager::sFreeAreaCount;
45 MemoryManager::MetaChunkList MemoryManager::sFreeCompleteMetaChunks;
46 MemoryManager::MetaChunkList MemoryManager::sFreeShortMetaChunks;
47 MemoryManager::MetaChunkList MemoryManager::sPartialMetaChunksSmall;
48 MemoryManager::MetaChunkList MemoryManager::sPartialMetaChunksMedium;
49 MemoryManager::AllocationEntry* MemoryManager::sAllocationEntryCanWait;
50 MemoryManager::AllocationEntry* MemoryManager::sAllocationEntryDontWait;
51 bool MemoryManager::sMaintenanceNeeded;
52 
53 
54 
55 // #pragma mark - kernel tracing
56 
57 
58 #if SLAB_MEMORY_MANAGER_TRACING
59 
60 
61 //namespace SlabMemoryManagerCacheTracing {
62 struct MemoryManager::Tracing {
63 
64 class MemoryManagerTraceEntry : public AbstractTraceEntry {
65 public:
66 	MemoryManagerTraceEntry()
67 	{
68 	}
69 };
70 
71 
72 class Allocate : public MemoryManagerTraceEntry {
73 public:
74 	Allocate(ObjectCache* cache, uint32 flags)
75 		:
76 		MemoryManagerTraceEntry(),
77 		fCache(cache),
78 		fFlags(flags)
79 	{
80 		Initialized();
81 	}
82 
83 	virtual void AddDump(TraceOutput& out)
84 	{
85 		out.Print("slab memory manager alloc: cache: %p, flags: %#" B_PRIx32,
86 			fCache, fFlags);
87 	}
88 
89 private:
90 	ObjectCache*	fCache;
91 	uint32			fFlags;
92 };
93 
94 
95 class Free : public MemoryManagerTraceEntry {
96 public:
97 	Free(void* address, uint32 flags)
98 		:
99 		MemoryManagerTraceEntry(),
100 		fAddress(address),
101 		fFlags(flags)
102 	{
103 		Initialized();
104 	}
105 
106 	virtual void AddDump(TraceOutput& out)
107 	{
108 		out.Print("slab memory manager free: address: %p, flags: %#" B_PRIx32,
109 			fAddress, fFlags);
110 	}
111 
112 private:
113 	void*	fAddress;
114 	uint32	fFlags;
115 };
116 
117 
118 class AllocateRaw : public MemoryManagerTraceEntry {
119 public:
120 	AllocateRaw(size_t size, uint32 flags)
121 		:
122 		MemoryManagerTraceEntry(),
123 		fSize(size),
124 		fFlags(flags)
125 	{
126 		Initialized();
127 	}
128 
129 	virtual void AddDump(TraceOutput& out)
130 	{
131 		out.Print("slab memory manager alloc raw: size: %" B_PRIuSIZE
132 			", flags: %#" B_PRIx32, fSize, fFlags);
133 	}
134 
135 private:
136 	size_t	fSize;
137 	uint32	fFlags;
138 };
139 
140 
141 class FreeRawOrReturnCache : public MemoryManagerTraceEntry {
142 public:
143 	FreeRawOrReturnCache(void* address, uint32 flags)
144 		:
145 		MemoryManagerTraceEntry(),
146 		fAddress(address),
147 		fFlags(flags)
148 	{
149 		Initialized();
150 	}
151 
152 	virtual void AddDump(TraceOutput& out)
153 	{
154 		out.Print("slab memory manager free raw/return: address: %p, flags: %#"
155 			B_PRIx32, fAddress, fFlags);
156 	}
157 
158 private:
159 	void*	fAddress;
160 	uint32	fFlags;
161 };
162 
163 
164 class AllocateArea : public MemoryManagerTraceEntry {
165 public:
166 	AllocateArea(Area* area, uint32 flags)
167 		:
168 		MemoryManagerTraceEntry(),
169 		fArea(area),
170 		fFlags(flags)
171 	{
172 		Initialized();
173 	}
174 
175 	virtual void AddDump(TraceOutput& out)
176 	{
177 		out.Print("slab memory manager alloc area: flags: %#" B_PRIx32
178 			" -> %p", fFlags, fArea);
179 	}
180 
181 private:
182 	Area*	fArea;
183 	uint32	fFlags;
184 };
185 
186 
187 class AddArea : public MemoryManagerTraceEntry {
188 public:
189 	AddArea(Area* area)
190 		:
191 		MemoryManagerTraceEntry(),
192 		fArea(area)
193 	{
194 		Initialized();
195 	}
196 
197 	virtual void AddDump(TraceOutput& out)
198 	{
199 		out.Print("slab memory manager add area: %p", fArea);
200 	}
201 
202 private:
203 	Area*	fArea;
204 };
205 
206 
207 class FreeArea : public MemoryManagerTraceEntry {
208 public:
209 	FreeArea(Area* area, bool areaRemoved, uint32 flags)
210 		:
211 		MemoryManagerTraceEntry(),
212 		fArea(area),
213 		fFlags(flags),
214 		fRemoved(areaRemoved)
215 	{
216 		Initialized();
217 	}
218 
219 	virtual void AddDump(TraceOutput& out)
220 	{
221 		out.Print("slab memory manager free area: %p%s, flags: %#" B_PRIx32,
222 			fArea, fRemoved ? " (removed)" : "", fFlags);
223 	}
224 
225 private:
226 	Area*	fArea;
227 	uint32	fFlags;
228 	bool	fRemoved;
229 };
230 
231 
232 class AllocateMetaChunk : public MemoryManagerTraceEntry {
233 public:
234 	AllocateMetaChunk(MetaChunk* metaChunk)
235 		:
236 		MemoryManagerTraceEntry(),
237 		fMetaChunk(metaChunk->chunkBase)
238 	{
239 		Initialized();
240 	}
241 
242 	virtual void AddDump(TraceOutput& out)
243 	{
244 		out.Print("slab memory manager alloc meta chunk: %#" B_PRIxADDR,
245 			fMetaChunk);
246 	}
247 
248 private:
249 	addr_t	fMetaChunk;
250 };
251 
252 
253 class FreeMetaChunk : public MemoryManagerTraceEntry {
254 public:
255 	FreeMetaChunk(MetaChunk* metaChunk)
256 		:
257 		MemoryManagerTraceEntry(),
258 		fMetaChunk(metaChunk->chunkBase)
259 	{
260 		Initialized();
261 	}
262 
263 	virtual void AddDump(TraceOutput& out)
264 	{
265 		out.Print("slab memory manager free meta chunk: %#" B_PRIxADDR,
266 			fMetaChunk);
267 	}
268 
269 private:
270 	addr_t	fMetaChunk;
271 };
272 
273 
274 class AllocateChunk : public MemoryManagerTraceEntry {
275 public:
276 	AllocateChunk(size_t chunkSize, MetaChunk* metaChunk, Chunk* chunk)
277 		:
278 		MemoryManagerTraceEntry(),
279 		fChunkSize(chunkSize),
280 		fMetaChunk(metaChunk->chunkBase),
281 		fChunk(chunk - metaChunk->chunks)
282 	{
283 		Initialized();
284 	}
285 
286 	virtual void AddDump(TraceOutput& out)
287 	{
288 		out.Print("slab memory manager alloc chunk: size: %" B_PRIuSIZE
289 			" -> meta chunk: %#" B_PRIxADDR ", chunk: %" B_PRIu32, fChunkSize,
290 			fMetaChunk, fChunk);
291 	}
292 
293 private:
294 	size_t	fChunkSize;
295 	addr_t	fMetaChunk;
296 	uint32	fChunk;
297 };
298 
299 
300 class AllocateChunks : public MemoryManagerTraceEntry {
301 public:
302 	AllocateChunks(size_t chunkSize, uint32 chunkCount, MetaChunk* metaChunk,
303 		Chunk* chunk)
304 		:
305 		MemoryManagerTraceEntry(),
306 		fMetaChunk(metaChunk->chunkBase),
307 		fChunkSize(chunkSize),
308 		fChunkCount(chunkCount),
309 		fChunk(chunk - metaChunk->chunks)
310 	{
311 		Initialized();
312 	}
313 
314 	virtual void AddDump(TraceOutput& out)
315 	{
316 		out.Print("slab memory manager alloc chunks: size: %" B_PRIuSIZE
317 			", count %" B_PRIu32 " -> meta chunk: %#" B_PRIxADDR ", chunk: %"
318 			B_PRIu32, fChunkSize, fChunkCount, fMetaChunk, fChunk);
319 	}
320 
321 private:
322 	addr_t	fMetaChunk;
323 	size_t	fChunkSize;
324 	uint32	fChunkCount;
325 	uint32	fChunk;
326 };
327 
328 
329 class FreeChunk : public MemoryManagerTraceEntry {
330 public:
331 	FreeChunk(MetaChunk* metaChunk, Chunk* chunk)
332 		:
333 		MemoryManagerTraceEntry(),
334 		fMetaChunk(metaChunk->chunkBase),
335 		fChunk(chunk - metaChunk->chunks)
336 	{
337 		Initialized();
338 	}
339 
340 	virtual void AddDump(TraceOutput& out)
341 	{
342 		out.Print("slab memory manager free chunk: meta chunk: %#" B_PRIxADDR
343 			", chunk: %" B_PRIu32, fMetaChunk, fChunk);
344 	}
345 
346 private:
347 	addr_t	fMetaChunk;
348 	uint32	fChunk;
349 };
350 
351 
352 class Map : public MemoryManagerTraceEntry {
353 public:
354 	Map(addr_t address, size_t size, uint32 flags)
355 		:
356 		MemoryManagerTraceEntry(),
357 		fAddress(address),
358 		fSize(size),
359 		fFlags(flags)
360 	{
361 		Initialized();
362 	}
363 
364 	virtual void AddDump(TraceOutput& out)
365 	{
366 		out.Print("slab memory manager map: %#" B_PRIxADDR ", size: %"
367 			B_PRIuSIZE ", flags: %#" B_PRIx32, fAddress, fSize, fFlags);
368 	}
369 
370 private:
371 	addr_t	fAddress;
372 	size_t	fSize;
373 	uint32	fFlags;
374 };
375 
376 
377 class Unmap : public MemoryManagerTraceEntry {
378 public:
379 	Unmap(addr_t address, size_t size, uint32 flags)
380 		:
381 		MemoryManagerTraceEntry(),
382 		fAddress(address),
383 		fSize(size),
384 		fFlags(flags)
385 	{
386 		Initialized();
387 	}
388 
389 	virtual void AddDump(TraceOutput& out)
390 	{
391 		out.Print("slab memory manager unmap: %#" B_PRIxADDR ", size: %"
392 			B_PRIuSIZE ", flags: %#" B_PRIx32, fAddress, fSize, fFlags);
393 	}
394 
395 private:
396 	addr_t	fAddress;
397 	size_t	fSize;
398 	uint32	fFlags;
399 };
400 
401 
402 //}	// namespace SlabMemoryManagerCacheTracing
403 };	// struct MemoryManager::Tracing
404 
405 
406 //#	define T(x)	new(std::nothrow) SlabMemoryManagerCacheTracing::x
407 #	define T(x)	new(std::nothrow) MemoryManager::Tracing::x
408 
409 #else
410 #	define T(x)
411 #endif	// SLAB_MEMORY_MANAGER_TRACING
412 
413 
414 // #pragma mark - MemoryManager
415 
416 
417 /*static*/ void
418 MemoryManager::Init(kernel_args* args)
419 {
420 	mutex_init(&sLock, "slab memory manager");
421 	rw_lock_init(&sAreaTableLock, "slab memory manager area table");
422 	sKernelArgs = args;
423 
424 	new(&sFreeCompleteMetaChunks) MetaChunkList;
425 	new(&sFreeShortMetaChunks) MetaChunkList;
426 	new(&sPartialMetaChunksSmall) MetaChunkList;
427 	new(&sPartialMetaChunksMedium) MetaChunkList;
428 
429 	new(&sAreaTable) AreaTable;
430 	sAreaTable.Resize(sAreaTableBuffer, sizeof(sAreaTableBuffer), true);
431 		// A bit hacky: The table now owns the memory. Since we never resize or
432 		// free it, that's not a problem, though.
433 
434 	sFreeAreas = NULL;
435 	sFreeAreaCount = 0;
436 	sMaintenanceNeeded = false;
437 }
438 
439 
440 /*static*/ void
441 MemoryManager::InitPostArea()
442 {
443 	sKernelArgs = NULL;
444 
445 	// Convert all areas to actual areas. This loop might look a bit weird, but
446 	// is necessary since creating the actual area involves memory allocations,
447 	// which in turn can change the situation.
448 	bool done;
449 	do {
450 		done = true;
451 
452 		for (AreaTable::Iterator it = sAreaTable.GetIterator();
453 				Area* area = it.Next();) {
454 			if (area->vmArea == NULL) {
455 				_ConvertEarlyArea(area);
456 				done = false;
457 				break;
458 			}
459 		}
460 	} while (!done);
461 
462 	// unmap and free unused pages
463 	if (sFreeAreas != NULL) {
464 		// Just "leak" all but the first of the free areas -- the VM will
465 		// automatically free all unclaimed memory.
466 		sFreeAreas->next = NULL;
467 		sFreeAreaCount = 1;
468 
469 		Area* area = sFreeAreas;
470 		_ConvertEarlyArea(area);
471 		_UnmapFreeChunksEarly(area);
472 	}
473 
474 	for (AreaTable::Iterator it = sAreaTable.GetIterator();
475 			Area* area = it.Next();) {
476 		_UnmapFreeChunksEarly(area);
477 	}
478 
479 	sMaintenanceNeeded = true;
480 		// might not be necessary, but doesn't harm
481 
482 	add_debugger_command_etc("slab_area", &_DumpArea,
483 		"Dump information on a given slab area",
484 		"[ -c ] <area>\n"
485 		"Dump information on a given slab area specified by its base "
486 			"address.\n"
487 		"If \"-c\" is given, the chunks of all meta chunks area printed as "
488 			"well.\n", 0);
489 	add_debugger_command_etc("slab_areas", &_DumpAreas,
490 		"List all slab areas",
491 		"\n"
492 		"Lists all slab areas.\n", 0);
493 	add_debugger_command_etc("slab_meta_chunk", &_DumpMetaChunk,
494 		"Dump information on a given slab meta chunk",
495 		"<meta chunk>\n"
496 		"Dump information on a given slab meta chunk specified by its base "
497 			"or object address.\n", 0);
498 	add_debugger_command_etc("slab_meta_chunks", &_DumpMetaChunks,
499 		"List all non-full slab meta chunks",
500 		"[ -c ]\n"
501 		"Lists all non-full slab meta chunks.\n"
502 		"If \"-c\" is given, the chunks of all meta chunks area printed as "
503 			"well.\n", 0);
504 	add_debugger_command_etc("slab_raw_allocations", &_DumpRawAllocations,
505 		"List all raw allocations in slab areas",
506 		"\n"
507 		"Lists all raw allocations in slab areas.\n", 0);
508 }
509 
510 
511 /*static*/ status_t
512 MemoryManager::Allocate(ObjectCache* cache, uint32 flags, void*& _pages)
513 {
514 	// TODO: Support CACHE_UNLOCKED_PAGES!
515 
516 	T(Allocate(cache, flags));
517 
518 	size_t chunkSize = cache->slab_size;
519 
520 	TRACE("MemoryManager::Allocate(%p, %#" B_PRIx32 "): chunkSize: %"
521 		B_PRIuSIZE "\n", cache, flags, chunkSize);
522 
523 	MutexLocker locker(sLock);
524 
525 	// allocate a chunk
526 	MetaChunk* metaChunk;
527 	Chunk* chunk;
528 	status_t error = _AllocateChunks(chunkSize, 1, flags, metaChunk, chunk);
529 	if (error != B_OK)
530 		return error;
531 
532 	// map the chunk
533 	Area* area = metaChunk->GetArea();
534 	addr_t chunkAddress = _ChunkAddress(metaChunk, chunk);
535 
536 	locker.Unlock();
537 	error = _MapChunk(area->vmArea, chunkAddress, chunkSize, 0, flags);
538 	locker.Lock();
539 	if (error != B_OK) {
540 		// something failed -- free the chunk
541 		_FreeChunk(area, metaChunk, chunk, chunkAddress, true, flags);
542 		return error;
543 	}
544 
545 	chunk->reference = (addr_t)cache;
546 	_pages = (void*)chunkAddress;
547 
548 	TRACE("MemoryManager::Allocate() done: %p (meta chunk: %d, chunk %d)\n",
549 		_pages, int(metaChunk - area->metaChunks),
550 		int(chunk - metaChunk->chunks));
551 	return B_OK;
552 }
553 
554 
555 /*static*/ void
556 MemoryManager::Free(void* pages, uint32 flags)
557 {
558 	TRACE("MemoryManager::Free(%p, %#" B_PRIx32 ")\n", pages, flags);
559 
560 	T(Free(pages, flags));
561 
562 	// get the area and the meta chunk
563 	Area* area = (Area*)ROUNDDOWN((addr_t)pages, SLAB_AREA_SIZE);
564 	MetaChunk* metaChunk = &area->metaChunks[
565 		((addr_t)pages % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE];
566 
567 	ASSERT((addr_t)pages >= metaChunk->chunkBase);
568 	ASSERT(((addr_t)pages % metaChunk->chunkSize) == 0);
569 
570 	// get the chunk
571 	uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)pages);
572 	Chunk* chunk = &metaChunk->chunks[chunkIndex];
573 
574 	ASSERT(chunk->next != NULL);
575 	ASSERT(chunk->next < metaChunk->chunks
576 		|| chunk->next
577 			>= metaChunk->chunks + SLAB_SMALL_CHUNKS_PER_META_CHUNK);
578 
579 	// and free it
580 	MutexLocker locker(sLock);
581 	_FreeChunk(area, metaChunk, chunk, (addr_t)pages, false, flags);
582 }
583 
584 
585 /*static*/ status_t
586 MemoryManager::AllocateRaw(size_t size, uint32 flags, void*& _pages)
587 {
588 	T(AllocateRaw(size, flags));
589 
590 	size = ROUNDUP(size, SLAB_CHUNK_SIZE_SMALL);
591 
592 	TRACE("MemoryManager::AllocateRaw(%" B_PRIuSIZE ", %#" B_PRIx32 ")\n", size,
593 		  flags);
594 
595 	if (size > SLAB_CHUNK_SIZE_LARGE || (flags & CACHE_ALIGN_ON_SIZE) != 0) {
596 		// Requested size greater than a large chunk or an aligned allocation.
597 		// Allocate as an area.
598 		if ((flags & CACHE_DONT_LOCK_KERNEL_SPACE) != 0)
599 			return B_WOULD_BLOCK;
600 
601 		area_id area = create_area_etc(VMAddressSpace::KernelID(),
602 			"slab large raw allocation", &_pages,
603 			(flags & CACHE_ALIGN_ON_SIZE) != 0
604 				? B_ANY_KERNEL_BLOCK_ADDRESS : B_ANY_KERNEL_ADDRESS,
605 			size, B_FULL_LOCK, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0,
606 			((flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0
607 					? CREATE_AREA_DONT_WAIT : 0)
608 				| CREATE_AREA_DONT_CLEAR);
609 		return area >= 0 ? B_OK : area;
610 	}
611 
612 	// determine chunk size (small or medium)
613 	size_t chunkSize = SLAB_CHUNK_SIZE_SMALL;
614 	uint32 chunkCount = size / SLAB_CHUNK_SIZE_SMALL;
615 
616 	if (size % SLAB_CHUNK_SIZE_MEDIUM == 0) {
617 		chunkSize = SLAB_CHUNK_SIZE_MEDIUM;
618 		chunkCount = size / SLAB_CHUNK_SIZE_MEDIUM;
619 	}
620 
621 	MutexLocker locker(sLock);
622 
623 	// allocate the chunks
624 	MetaChunk* metaChunk;
625 	Chunk* chunk;
626 	status_t error = _AllocateChunks(chunkSize, chunkCount, flags, metaChunk,
627 		chunk);
628 	if (error != B_OK)
629 		return error;
630 
631 	// map the chunks
632 	Area* area = metaChunk->GetArea();
633 	addr_t chunkAddress = _ChunkAddress(metaChunk, chunk);
634 
635 	locker.Unlock();
636 	error = _MapChunk(area->vmArea, chunkAddress, size, 0, flags);
637 	locker.Lock();
638 	if (error != B_OK) {
639 		// something failed -- free the chunks
640 		for (uint32 i = 0; i < chunkCount; i++)
641 			_FreeChunk(area, metaChunk, chunk + i, chunkAddress, true, flags);
642 		return error;
643 	}
644 
645 	chunk->reference = (addr_t)chunkAddress + size - 1;
646 	_pages = (void*)chunkAddress;
647 
648 	TRACE("MemoryManager::AllocateRaw() done: %p (meta chunk: %d, chunk %d)\n",
649 		_pages, int(metaChunk - area->metaChunks),
650 		int(chunk - metaChunk->chunks));
651 	return B_OK;
652 }
653 
654 
655 /*static*/ ObjectCache*
656 MemoryManager::FreeRawOrReturnCache(void* pages, uint32 flags)
657 {
658 	TRACE("MemoryManager::FreeRawOrReturnCache(%p, %#" B_PRIx32 ")\n", pages,
659 		flags);
660 
661 	T(FreeRawOrReturnCache(pages, flags));
662 
663 	// get the area
664 	addr_t areaBase = ROUNDDOWN((addr_t)pages, SLAB_AREA_SIZE);
665 
666 	ReadLocker readLocker(sAreaTableLock);
667 	Area* area = sAreaTable.Lookup(areaBase);
668 	readLocker.Unlock();
669 
670 	if (area == NULL) {
671 		// Probably a large allocation. Look up the VM area.
672 		VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
673 		addressSpace->ReadLock();
674 		VMArea* area = addressSpace->LookupArea((addr_t)pages);
675 		addressSpace->ReadUnlock();
676 
677 		if (area != NULL && (addr_t)pages == area->Base())
678 			delete_area(area->id);
679 		else
680 			panic("freeing unknown block %p from area %p", pages, area);
681 
682 		return NULL;
683 	}
684 
685 	MetaChunk* metaChunk = &area->metaChunks[
686 		((addr_t)pages % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE];
687 
688 	// get the chunk
689 	ASSERT((addr_t)pages >= metaChunk->chunkBase);
690 	uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)pages);
691 	Chunk* chunk = &metaChunk->chunks[chunkIndex];
692 
693 	addr_t reference = chunk->reference;
694 	if ((reference & 1) == 0)
695 		return (ObjectCache*)reference;
696 
697 	// Seems we have a raw chunk allocation.
698 	ASSERT((addr_t)pages == _ChunkAddress(metaChunk, chunk));
699 	ASSERT(reference > (addr_t)pages);
700 	ASSERT(reference <= areaBase + SLAB_AREA_SIZE - 1);
701 	size_t size = reference - (addr_t)pages + 1;
702 	ASSERT((size % SLAB_CHUNK_SIZE_SMALL) == 0);
703 
704 	// unmap the chunks
705 	_UnmapChunk(area->vmArea, (addr_t)pages, size, flags);
706 
707 	// and free them
708 	MutexLocker locker(sLock);
709 	uint32 chunkCount = size / metaChunk->chunkSize;
710 	for (uint32 i = 0; i < chunkCount; i++)
711 		_FreeChunk(area, metaChunk, chunk + i, (addr_t)pages, true, flags);
712 
713 	return NULL;
714 }
715 
716 
717 /*static*/ size_t
718 MemoryManager::AcceptableChunkSize(size_t size)
719 {
720 	if (size <= SLAB_CHUNK_SIZE_SMALL)
721 		return SLAB_CHUNK_SIZE_SMALL;
722 	if (size <= SLAB_CHUNK_SIZE_MEDIUM)
723 		return SLAB_CHUNK_SIZE_MEDIUM;
724 	return SLAB_CHUNK_SIZE_LARGE;
725 }
726 
727 
728 /*static*/ ObjectCache*
729 MemoryManager::GetAllocationInfo(void* address, size_t& _size)
730 {
731 	// get the area
732 	addr_t areaBase = ROUNDDOWN((addr_t)address, SLAB_AREA_SIZE);
733 
734 	ReadLocker readLocker(sAreaTableLock);
735 	Area* area = sAreaTable.Lookup(areaBase);
736 	readLocker.Unlock();
737 
738 	if (area == NULL) {
739 		VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
740 		addressSpace->ReadLock();
741 		VMArea* area = addressSpace->LookupArea((addr_t)address);
742 		if (area != NULL && (addr_t)address == area->Base())
743 			_size = area->Size();
744 		else
745 			_size = 0;
746 		addressSpace->ReadUnlock();
747 
748 		return NULL;
749 	}
750 
751 	MetaChunk* metaChunk = &area->metaChunks[
752 		((addr_t)address % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE];
753 
754 	// get the chunk
755 	ASSERT((addr_t)address >= metaChunk->chunkBase);
756 	uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)address);
757 
758 	addr_t reference = metaChunk->chunks[chunkIndex].reference;
759 	if ((reference & 1) == 0) {
760 		ObjectCache* cache = (ObjectCache*)reference;
761 		_size = cache->object_size;
762 		return cache;
763 	}
764 
765 	_size = reference - (addr_t)address + 1;
766 	return NULL;
767 }
768 
769 
770 /*static*/ ObjectCache*
771 MemoryManager::CacheForAddress(void* address)
772 {
773 	// get the area
774 	addr_t areaBase = ROUNDDOWN((addr_t)address, SLAB_AREA_SIZE);
775 
776 	ReadLocker readLocker(sAreaTableLock);
777 	Area* area = sAreaTable.Lookup(areaBase);
778 	readLocker.Unlock();
779 
780 	if (area == NULL)
781 		return NULL;
782 
783 	MetaChunk* metaChunk = &area->metaChunks[
784 		((addr_t)address % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE];
785 
786 	// get the chunk
787 	ASSERT((addr_t)address >= metaChunk->chunkBase);
788 	uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)address);
789 
790 	addr_t reference = metaChunk->chunks[chunkIndex].reference;
791 	return (reference & 1) == 0 ? (ObjectCache*)reference : NULL;
792 }
793 
794 
795 /*static*/ void
796 MemoryManager::PerformMaintenance()
797 {
798 	MutexLocker locker(sLock);
799 
800 	while (sMaintenanceNeeded) {
801 		sMaintenanceNeeded = false;
802 
803 		// We want to keep one or two areas as a reserve. This way we have at
804 		// least one area to use in situations when we aren't allowed to
805 		// allocate one and also avoid ping-pong effects.
806 		if (sFreeAreaCount > 0 && sFreeAreaCount <= 2)
807 			return;
808 
809 		if (sFreeAreaCount == 0) {
810 			// try to allocate one
811 			Area* area;
812 			if (_AllocateArea(0, area) != B_OK)
813 				return;
814 
815 			_push(sFreeAreas, area);
816 			if (++sFreeAreaCount > 2)
817 				sMaintenanceNeeded = true;
818 		} else {
819 			// free until we only have two free ones
820 			while (sFreeAreaCount > 2) {
821 				Area* area = _pop(sFreeAreas);
822 				_FreeArea(area, true, 0);
823 			}
824 
825 			if (sFreeAreaCount == 0)
826 				sMaintenanceNeeded = true;
827 		}
828 	}
829 }
830 
831 
832 /*static*/ status_t
833 MemoryManager::_AllocateChunks(size_t chunkSize, uint32 chunkCount,
834 	uint32 flags, MetaChunk*& _metaChunk, Chunk*& _chunk)
835 {
836 	MetaChunkList* metaChunkList = NULL;
837 	if (chunkSize == SLAB_CHUNK_SIZE_SMALL) {
838 		metaChunkList = &sPartialMetaChunksSmall;
839 	} else if (chunkSize == SLAB_CHUNK_SIZE_MEDIUM) {
840 		metaChunkList = &sPartialMetaChunksMedium;
841 	} else if (chunkSize != SLAB_CHUNK_SIZE_LARGE) {
842 		panic("MemoryManager::_AllocateChunks(): Unsupported chunk size: %"
843 			B_PRIuSIZE, chunkSize);
844 		return B_BAD_VALUE;
845 	}
846 
847 	if (_GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk, _chunk))
848 		return B_OK;
849 
850 	if (sFreeAreas != NULL) {
851 		_AddArea(_pop(sFreeAreas));
852 		sFreeAreaCount--;
853 		_RequestMaintenance();
854 
855 		_GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk, _chunk);
856 		return B_OK;
857 	}
858 
859 	if ((flags & CACHE_DONT_LOCK_KERNEL_SPACE) != 0) {
860 		// We can't create an area with this limitation and we must not wait for
861 		// someone else doing that.
862 		return B_WOULD_BLOCK;
863 	}
864 
865 	// We need to allocate a new area. Wait, if someone else is trying to do
866 	// the same.
867 	while (true) {
868 		AllocationEntry* allocationEntry = NULL;
869 		if (sAllocationEntryDontWait != NULL) {
870 			allocationEntry = sAllocationEntryDontWait;
871 		} else if (sAllocationEntryCanWait != NULL
872 				&& (flags & CACHE_DONT_WAIT_FOR_MEMORY) == 0) {
873 			allocationEntry = sAllocationEntryCanWait;
874 		} else
875 			break;
876 
877 		ConditionVariableEntry entry;
878 		allocationEntry->condition.Add(&entry);
879 
880 		mutex_unlock(&sLock);
881 		entry.Wait();
882 		mutex_lock(&sLock);
883 
884 		if (_GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk,
885 				_chunk)) {
886 			return B_OK;
887 		}
888 	}
889 
890 	// prepare the allocation entry others can wait on
891 	AllocationEntry*& allocationEntry
892 		= (flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0
893 			? sAllocationEntryDontWait : sAllocationEntryCanWait;
894 
895 	AllocationEntry myResizeEntry;
896 	allocationEntry = &myResizeEntry;
897 	allocationEntry->condition.Init(metaChunkList, "wait for slab area");
898 	allocationEntry->thread = find_thread(NULL);
899 
900 	Area* area;
901 	status_t error = _AllocateArea(flags, area);
902 
903 	allocationEntry->condition.NotifyAll();
904 	allocationEntry = NULL;
905 
906 	if (error != B_OK)
907 		return error;
908 
909 	// Try again to get a meta chunk. Something might have been freed in the
910 	// meantime. We can free the area in this case.
911 	if (_GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk, _chunk)) {
912 		_FreeArea(area, true, flags);
913 		return B_OK;
914 	}
915 
916 	_AddArea(area);
917 	_GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk, _chunk);
918 	return B_OK;
919 }
920 
921 
922 /*static*/ bool
923 MemoryManager::_GetChunks(MetaChunkList* metaChunkList, size_t chunkSize,
924 	uint32 chunkCount, MetaChunk*& _metaChunk, Chunk*& _chunk)
925 {
926 	// the common and less complicated special case
927 	if (chunkCount == 1)
928 		return _GetChunk(metaChunkList, chunkSize, _metaChunk, _chunk);
929 
930 	ASSERT(metaChunkList != NULL);
931 
932 	// Iterate through the partial meta chunk list and try to find a free
933 	// range that is large enough.
934 	MetaChunk* metaChunk = NULL;
935 	for (MetaChunkList::Iterator it = metaChunkList->GetIterator();
936 			(metaChunk = it.Next()) != NULL;) {
937 		if (metaChunk->firstFreeChunk + chunkCount - 1
938 				<= metaChunk->lastFreeChunk) {
939 			break;
940 		}
941 	}
942 
943 	if (metaChunk == NULL) {
944 		// try to get a free meta chunk
945 		if ((SLAB_CHUNK_SIZE_LARGE - kAreaAdminSize) / chunkSize >= chunkCount)
946 			metaChunk = sFreeShortMetaChunks.RemoveHead();
947 		if (metaChunk == NULL)
948 			metaChunk = sFreeCompleteMetaChunks.RemoveHead();
949 
950 		if (metaChunk == NULL)
951 			return false;
952 
953 		metaChunkList->Add(metaChunk);
954 		metaChunk->GetArea()->usedMetaChunkCount++;
955 		_PrepareMetaChunk(metaChunk, chunkSize);
956 
957 		T(AllocateMetaChunk(metaChunk));
958 	}
959 
960 	// pull the chunks out of the free list
961 	Chunk* firstChunk = metaChunk->chunks + metaChunk->firstFreeChunk;
962 	Chunk* lastChunk = firstChunk + (chunkCount - 1);
963 	Chunk** chunkPointer = &metaChunk->freeChunks;
964 	uint32 remainingChunks = chunkCount;
965 	while (remainingChunks > 0) {
966 		ASSERT_PRINT(chunkPointer, "remaining: %" B_PRIu32 "/%" B_PRIu32
967 			", area: %p, meta chunk: %" B_PRIdSSIZE "\n", remainingChunks,
968 			chunkCount, metaChunk->GetArea(),
969 			metaChunk - metaChunk->GetArea()->metaChunks);
970 		Chunk* chunk = *chunkPointer;
971 		if (chunk >= firstChunk && chunk <= lastChunk) {
972 			*chunkPointer = chunk->next;
973 			chunk->reference = 1;
974 			remainingChunks--;
975 		} else
976 			chunkPointer = &chunk->next;
977 	}
978 
979 	// allocate the chunks
980 	metaChunk->usedChunkCount += chunkCount;
981 	if (metaChunk->usedChunkCount == metaChunk->chunkCount) {
982 		// meta chunk is full now -- remove it from its list
983 		if (metaChunkList != NULL)
984 			metaChunkList->Remove(metaChunk);
985 	}
986 
987 	// update the free range
988 	metaChunk->firstFreeChunk += chunkCount;
989 
990 	_chunk = firstChunk;
991 	_metaChunk = metaChunk;
992 
993 	T(AllocateChunks(chunkSize, chunkCount, metaChunk, firstChunk));
994 
995 	return true;
996 }
997 
998 
999 /*static*/ bool
1000 MemoryManager::_GetChunk(MetaChunkList* metaChunkList, size_t chunkSize,
1001 	MetaChunk*& _metaChunk, Chunk*& _chunk)
1002 {
1003 	MetaChunk* metaChunk = metaChunkList != NULL
1004 		? metaChunkList->Head() : NULL;
1005 	if (metaChunk == NULL) {
1006 		// no partial meta chunk -- maybe there's a free one
1007 		if (chunkSize == SLAB_CHUNK_SIZE_LARGE) {
1008 			metaChunk = sFreeCompleteMetaChunks.RemoveHead();
1009 		} else {
1010 			metaChunk = sFreeShortMetaChunks.RemoveHead();
1011 			if (metaChunk == NULL)
1012 				metaChunk = sFreeCompleteMetaChunks.RemoveHead();
1013 			if (metaChunk != NULL)
1014 				metaChunkList->Add(metaChunk);
1015 		}
1016 
1017 		if (metaChunk == NULL)
1018 			return false;
1019 
1020 		metaChunk->GetArea()->usedMetaChunkCount++;
1021 		_PrepareMetaChunk(metaChunk, chunkSize);
1022 
1023 		T(AllocateMetaChunk(metaChunk));
1024 	}
1025 
1026 	// allocate the chunk
1027 	if (++metaChunk->usedChunkCount == metaChunk->chunkCount) {
1028 		// meta chunk is full now -- remove it from its list
1029 		if (metaChunkList != NULL)
1030 			metaChunkList->Remove(metaChunk);
1031 	}
1032 
1033 	_chunk = _pop(metaChunk->freeChunks);
1034 	_metaChunk = metaChunk;
1035 
1036 	// update the free range
1037 	uint32 chunkIndex = _chunk - metaChunk->chunks;
1038 	if (chunkIndex >= metaChunk->firstFreeChunk
1039 			&& chunkIndex <= metaChunk->lastFreeChunk) {
1040 		if (chunkIndex - metaChunk->firstFreeChunk
1041 				<= metaChunk->lastFreeChunk - chunkIndex) {
1042 			metaChunk->firstFreeChunk = chunkIndex + 1;
1043 		} else
1044 			metaChunk->lastFreeChunk = chunkIndex - 1;
1045 	}
1046 
1047 	T(AllocateChunk(chunkSize, metaChunk, _chunk));
1048 
1049 	return true;
1050 }
1051 
1052 
1053 /*static*/ void
1054 MemoryManager::_FreeChunk(Area* area, MetaChunk* metaChunk, Chunk* chunk,
1055 	addr_t chunkAddress, bool alreadyUnmapped, uint32 flags)
1056 {
1057 	// unmap the chunk
1058 	if (!alreadyUnmapped) {
1059 		mutex_unlock(&sLock);
1060 		_UnmapChunk(area->vmArea, chunkAddress, metaChunk->chunkSize, flags);
1061 		mutex_lock(&sLock);
1062 	}
1063 
1064 	T(FreeChunk(metaChunk, chunk));
1065 
1066 	_push(metaChunk->freeChunks, chunk);
1067 
1068 	uint32 chunkIndex = chunk - metaChunk->chunks;
1069 
1070 	// free the meta chunk, if it is unused now
1071 	ASSERT(metaChunk->usedChunkCount > 0);
1072 	if (--metaChunk->usedChunkCount == 0) {
1073 		T(FreeMetaChunk(metaChunk));
1074 
1075 		// remove from partial meta chunk list
1076 		if (metaChunk->chunkSize == SLAB_CHUNK_SIZE_SMALL)
1077 			sPartialMetaChunksSmall.Remove(metaChunk);
1078 		else if (metaChunk->chunkSize == SLAB_CHUNK_SIZE_MEDIUM)
1079 			sPartialMetaChunksMedium.Remove(metaChunk);
1080 
1081 		// mark empty
1082 		metaChunk->chunkSize = 0;
1083 
1084 		// add to free list
1085 		if (metaChunk == area->metaChunks)
1086 			sFreeShortMetaChunks.Add(metaChunk, false);
1087 		else
1088 			sFreeCompleteMetaChunks.Add(metaChunk, false);
1089 
1090 		// free the area, if it is unused now
1091 		ASSERT(area->usedMetaChunkCount > 0);
1092 		if (--area->usedMetaChunkCount == 0)
1093 			_FreeArea(area, false, flags);
1094 	} else if (metaChunk->usedChunkCount == metaChunk->chunkCount - 1) {
1095 		// the meta chunk was full before -- add it back to its partial chunk
1096 		// list
1097 		if (metaChunk->chunkSize == SLAB_CHUNK_SIZE_SMALL)
1098 			sPartialMetaChunksSmall.Add(metaChunk, false);
1099 		else if (metaChunk->chunkSize == SLAB_CHUNK_SIZE_MEDIUM)
1100 			sPartialMetaChunksMedium.Add(metaChunk, false);
1101 
1102 		metaChunk->firstFreeChunk = chunkIndex;
1103 		metaChunk->lastFreeChunk = chunkIndex;
1104 	} else {
1105 		// extend the free range, if the chunk adjoins
1106 		if (chunkIndex + 1 == metaChunk->firstFreeChunk) {
1107 			uint32 firstFree = chunkIndex;
1108 			for (; firstFree > 0; firstFree--) {
1109 				Chunk* previousChunk = &metaChunk->chunks[firstFree - 1];
1110 				if (!_IsChunkFree(metaChunk, previousChunk))
1111 					break;
1112 			}
1113 			metaChunk->firstFreeChunk = firstFree;
1114 		} else if (chunkIndex == (uint32)metaChunk->lastFreeChunk + 1) {
1115 			uint32 lastFree = chunkIndex;
1116 			for (; lastFree + 1 < metaChunk->chunkCount; lastFree++) {
1117 				Chunk* nextChunk = &metaChunk->chunks[lastFree + 1];
1118 				if (!_IsChunkFree(metaChunk, nextChunk))
1119 					break;
1120 			}
1121 			metaChunk->lastFreeChunk = lastFree;
1122 		}
1123 	}
1124 }
1125 
1126 
1127 /*static*/ void
1128 MemoryManager::_PrepareMetaChunk(MetaChunk* metaChunk, size_t chunkSize)
1129 {
1130 	Area* area = metaChunk->GetArea();
1131 
1132 	if (metaChunk == area->metaChunks) {
1133 		// the first chunk is shorter
1134 		size_t unusableSize = ROUNDUP(kAreaAdminSize, chunkSize);
1135 		metaChunk->chunkBase = (addr_t)area + unusableSize;
1136 		metaChunk->totalSize = SLAB_CHUNK_SIZE_LARGE - unusableSize;
1137 	}
1138 
1139 	metaChunk->chunkSize = chunkSize;
1140 	metaChunk->chunkCount = metaChunk->totalSize / chunkSize;
1141 	metaChunk->usedChunkCount = 0;
1142 
1143 	metaChunk->freeChunks = NULL;
1144 	for (int32 i = metaChunk->chunkCount - 1; i >= 0; i--)
1145 		_push(metaChunk->freeChunks, metaChunk->chunks + i);
1146 
1147 	metaChunk->firstFreeChunk = 0;
1148 	metaChunk->lastFreeChunk = metaChunk->chunkCount - 1;
1149 }
1150 
1151 
1152 /*static*/ void
1153 MemoryManager::_AddArea(Area* area)
1154 {
1155 	T(AddArea(area));
1156 
1157 	// add the area to the hash table
1158 	WriteLocker writeLocker(sAreaTableLock);
1159 	sAreaTable.InsertUnchecked(area);
1160 	writeLocker.Unlock();
1161 
1162 	// add the area's meta chunks to the free lists
1163 	sFreeShortMetaChunks.Add(&area->metaChunks[0]);
1164 	for (int32 i = 1; i < SLAB_META_CHUNKS_PER_AREA; i++)
1165 		sFreeCompleteMetaChunks.Add(&area->metaChunks[i]);
1166 }
1167 
1168 
1169 /*static*/ status_t
1170 MemoryManager::_AllocateArea(uint32 flags, Area*& _area)
1171 {
1172 	TRACE("MemoryManager::_AllocateArea(%#" B_PRIx32 ")\n", flags);
1173 
1174 	ASSERT((flags & CACHE_DONT_LOCK_KERNEL_SPACE) == 0);
1175 
1176 	mutex_unlock(&sLock);
1177 
1178 	size_t pagesNeededToMap = 0;
1179 	Area* area;
1180 	VMArea* vmArea = NULL;
1181 
1182 	if (sKernelArgs == NULL) {
1183 		// create an area
1184 		uint32 areaCreationFlags = (flags & CACHE_PRIORITY_VIP) != 0
1185 			? CREATE_AREA_PRIORITY_VIP : 0;
1186 		area_id areaID = vm_create_null_area(B_SYSTEM_TEAM, kSlabAreaName,
1187 			(void**)&area, B_ANY_KERNEL_BLOCK_ADDRESS, SLAB_AREA_SIZE,
1188 			areaCreationFlags);
1189 		if (areaID < 0) {
1190 			mutex_lock(&sLock);
1191 			return areaID;
1192 		}
1193 
1194 		// map the memory for the administrative structure
1195 		VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
1196 		VMTranslationMap* translationMap = addressSpace->TranslationMap();
1197 
1198 		pagesNeededToMap = translationMap->MaxPagesNeededToMap((addr_t)area,
1199 			(addr_t)area + SLAB_AREA_SIZE - 1);
1200 
1201 		vmArea = VMAreaHash::Lookup(areaID);
1202 		status_t error = _MapChunk(vmArea, (addr_t)area, kAreaAdminSize,
1203 			pagesNeededToMap, flags);
1204 		if (error != B_OK) {
1205 			delete_area(areaID);
1206 			mutex_lock(&sLock);
1207 			return error;
1208 		}
1209 
1210 		dprintf("slab memory manager: created area %p (%" B_PRId32 ")\n", area,
1211 			areaID);
1212 	} else {
1213 		// no areas yet -- allocate raw memory
1214 		area = (Area*)vm_allocate_early(sKernelArgs, SLAB_AREA_SIZE,
1215 			SLAB_AREA_SIZE, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, true);
1216 		if (area == NULL) {
1217 			mutex_lock(&sLock);
1218 			return B_NO_MEMORY;
1219 		}
1220 
1221 		TRACE("MemoryManager::_AllocateArea(): allocated early area %p\n",
1222 			area);
1223 	}
1224 
1225 	// init the area structure
1226 	area->vmArea = vmArea;
1227 	area->reserved_memory_for_mapping = pagesNeededToMap * B_PAGE_SIZE;
1228 	area->usedMetaChunkCount = 0;
1229 	area->fullyMapped = vmArea == NULL;
1230 
1231 	// init the meta chunks
1232 	for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA; i++) {
1233 		MetaChunk* metaChunk = area->metaChunks + i;
1234 		metaChunk->chunkSize = 0;
1235 		metaChunk->chunkBase = (addr_t)area + i * SLAB_CHUNK_SIZE_LARGE;
1236 		metaChunk->totalSize = SLAB_CHUNK_SIZE_LARGE;
1237 			// Note: chunkBase and totalSize aren't correct for the first
1238 			// meta chunk. They will be set in _PrepareMetaChunk().
1239 		metaChunk->chunkCount = 0;
1240 		metaChunk->usedChunkCount = 0;
1241 		metaChunk->freeChunks = NULL;
1242 	}
1243 
1244 	mutex_lock(&sLock);
1245 	_area = area;
1246 
1247 	T(AllocateArea(area, flags));
1248 
1249 	return B_OK;
1250 }
1251 
1252 
1253 /*static*/ void
1254 MemoryManager::_FreeArea(Area* area, bool areaRemoved, uint32 flags)
1255 {
1256 	TRACE("MemoryManager::_FreeArea(%p, %#" B_PRIx32 ")\n", area, flags);
1257 
1258 	T(FreeArea(area, areaRemoved, flags));
1259 
1260 	ASSERT(area->usedMetaChunkCount == 0);
1261 
1262 	if (!areaRemoved) {
1263 		// remove the area's meta chunks from the free lists
1264 		ASSERT(area->metaChunks[0].usedChunkCount == 0);
1265 		sFreeShortMetaChunks.Remove(&area->metaChunks[0]);
1266 
1267 		for (int32 i = 1; i < SLAB_META_CHUNKS_PER_AREA; i++) {
1268 			ASSERT(area->metaChunks[i].usedChunkCount == 0);
1269 			sFreeCompleteMetaChunks.Remove(&area->metaChunks[i]);
1270 		}
1271 
1272 		// remove the area from the hash table
1273 		WriteLocker writeLocker(sAreaTableLock);
1274 		sAreaTable.RemoveUnchecked(area);
1275 		writeLocker.Unlock();
1276 	}
1277 
1278 	// We want to keep one or two free areas as a reserve.
1279 	if (sFreeAreaCount <= 1) {
1280 		_push(sFreeAreas, area);
1281 		sFreeAreaCount++;
1282 		return;
1283 	}
1284 
1285 	if (area->vmArea == NULL || (flags & CACHE_DONT_LOCK_KERNEL_SPACE) != 0) {
1286 		// This is either early in the boot process or we aren't allowed to
1287 		// delete the area now.
1288 		_push(sFreeAreas, area);
1289 		sFreeAreaCount++;
1290 		_RequestMaintenance();
1291 		return;
1292 	}
1293 
1294 	mutex_unlock(&sLock);
1295 
1296 	dprintf("slab memory manager: deleting area %p (%" B_PRId32 ")\n", area,
1297 		area->vmArea->id);
1298 
1299 	size_t memoryToUnreserve = area->reserved_memory_for_mapping;
1300 	delete_area(area->vmArea->id);
1301 	vm_unreserve_memory(memoryToUnreserve);
1302 
1303 	mutex_lock(&sLock);
1304 }
1305 
1306 
1307 /*static*/ status_t
1308 MemoryManager::_MapChunk(VMArea* vmArea, addr_t address, size_t size,
1309 	size_t reserveAdditionalMemory, uint32 flags)
1310 {
1311 	TRACE("MemoryManager::_MapChunk(%p, %#" B_PRIxADDR ", %#" B_PRIxSIZE
1312 		")\n", vmArea, address, size);
1313 
1314 	T(Map(address, size, flags));
1315 
1316 	if (vmArea == NULL) {
1317 		// everything is mapped anyway
1318 		return B_OK;
1319 	}
1320 
1321 	VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
1322 	VMTranslationMap* translationMap = addressSpace->TranslationMap();
1323 
1324 	// reserve memory for the chunk
1325 	int priority = (flags & CACHE_PRIORITY_VIP) != 0
1326 		? VM_PRIORITY_VIP : VM_PRIORITY_SYSTEM;
1327 	size_t reservedMemory = size + reserveAdditionalMemory;
1328 	status_t error = vm_try_reserve_memory(size, priority,
1329 		(flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0 ? 0 : 1000000);
1330 	if (error != B_OK)
1331 		return error;
1332 
1333 	// reserve the pages we need now
1334 	size_t reservedPages = size / B_PAGE_SIZE
1335 		+ translationMap->MaxPagesNeededToMap(address, address + size - 1);
1336 	vm_page_reservation reservation;
1337 	if ((flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0) {
1338 		if (!vm_page_try_reserve_pages(&reservation, reservedPages, priority)) {
1339 			vm_unreserve_memory(reservedMemory);
1340 			return B_WOULD_BLOCK;
1341 		}
1342 	} else
1343 		vm_page_reserve_pages(&reservation, reservedPages, priority);
1344 
1345 	VMCache* cache = vm_area_get_locked_cache(vmArea);
1346 
1347 	// map the pages
1348 	translationMap->Lock();
1349 
1350 	addr_t areaOffset = address - vmArea->Base();
1351 	addr_t endAreaOffset = areaOffset + size;
1352 	for (size_t offset = areaOffset; offset < endAreaOffset;
1353 			offset += B_PAGE_SIZE) {
1354 		vm_page* page = vm_page_allocate_page(&reservation, PAGE_STATE_WIRED);
1355 		cache->InsertPage(page, offset);
1356 
1357 		page->wired_count++;
1358 		atomic_add(&gMappedPagesCount, 1);
1359 		DEBUG_PAGE_ACCESS_END(page);
1360 
1361 		translationMap->Map(vmArea->Base() + offset,
1362 			page->physical_page_number * B_PAGE_SIZE,
1363 			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, &reservation);
1364 	}
1365 
1366 	translationMap->Unlock();
1367 
1368 	cache->ReleaseRefAndUnlock();
1369 
1370 	vm_page_unreserve_pages(&reservation);
1371 
1372 	return B_OK;
1373 }
1374 
1375 
1376 /*static*/ status_t
1377 MemoryManager::_UnmapChunk(VMArea* vmArea, addr_t address, size_t size,
1378 	uint32 flags)
1379 {
1380 	T(Unmap(address, size, flags));
1381 
1382 	if (vmArea == NULL)
1383 		return B_ERROR;
1384 
1385 	TRACE("MemoryManager::_UnmapChunk(%p, %#" B_PRIxADDR ", %#" B_PRIxSIZE
1386 		")\n", vmArea, address, size);
1387 
1388 	VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
1389 	VMTranslationMap* translationMap = addressSpace->TranslationMap();
1390 	VMCache* cache = vm_area_get_locked_cache(vmArea);
1391 
1392 	// unmap the pages
1393 	translationMap->Lock();
1394 	translationMap->Unmap(address, address + size - 1);
1395 	atomic_add(&gMappedPagesCount, -(size / B_PAGE_SIZE));
1396 	translationMap->Unlock();
1397 
1398 	// free the pages
1399 	addr_t areaPageOffset = (address - vmArea->Base()) / B_PAGE_SIZE;
1400 	addr_t areaPageEndOffset = areaPageOffset + size / B_PAGE_SIZE;
1401 	VMCachePagesTree::Iterator it = cache->pages.GetIterator(
1402 		areaPageOffset, true, true);
1403 	while (vm_page* page = it.Next()) {
1404 		if (page->cache_offset >= areaPageEndOffset)
1405 			break;
1406 
1407 		DEBUG_PAGE_ACCESS_START(page);
1408 
1409 		page->wired_count--;
1410 
1411 		cache->RemovePage(page);
1412 			// the iterator is remove-safe
1413 		vm_page_free(cache, page);
1414 	}
1415 
1416 	cache->ReleaseRefAndUnlock();
1417 
1418 	vm_unreserve_memory(size);
1419 
1420 	return B_OK;
1421 }
1422 
1423 
1424 /*static*/ void
1425 MemoryManager::_UnmapFreeChunksEarly(Area* area)
1426 {
1427 	if (!area->fullyMapped)
1428 		return;
1429 
1430 	TRACE("MemoryManager::_UnmapFreeChunksEarly(%p)\n", area);
1431 
1432 	for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA; i++) {
1433 		MetaChunk* metaChunk = area->metaChunks + i;
1434 		if (metaChunk->chunkSize == 0) {
1435 			// meta chunk is free -- unmap it completely
1436 			if (i == 0) {
1437 				_UnmapChunk(area->vmArea, (addr_t)area + kAreaAdminSize,
1438 					SLAB_CHUNK_SIZE_LARGE - kAreaAdminSize, 0);
1439 			} else {
1440 				_UnmapChunk(area->vmArea,
1441 					(addr_t)area + i * SLAB_CHUNK_SIZE_LARGE,
1442 					SLAB_CHUNK_SIZE_LARGE, 0);
1443 			}
1444 		} else {
1445 			// unmap free chunks
1446 			for (Chunk* chunk = metaChunk->freeChunks; chunk != NULL;
1447 					chunk = chunk->next) {
1448 				_UnmapChunk(area->vmArea, _ChunkAddress(metaChunk, chunk),
1449 					metaChunk->chunkSize, 0);
1450 			}
1451 
1452 			// The first meta chunk might have space before its first chunk.
1453 			if (i == 0) {
1454 				addr_t unusedStart = (addr_t)area + kAreaAdminSize;
1455 				if (unusedStart < metaChunk->chunkBase) {
1456 					_UnmapChunk(area->vmArea, unusedStart,
1457 						metaChunk->chunkBase - unusedStart, 0);
1458 				}
1459 			}
1460 		}
1461 	}
1462 
1463 	area->fullyMapped = false;
1464 }
1465 
1466 
1467 /*static*/ void
1468 MemoryManager::_ConvertEarlyArea(Area* area)
1469 {
1470 	void* address = area;
1471 	area_id areaID = create_area(kSlabAreaName, &address, B_EXACT_ADDRESS,
1472 		SLAB_AREA_SIZE, B_ALREADY_WIRED,
1473 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
1474 	if (areaID < 0)
1475 		panic("out of memory");
1476 
1477 	area->vmArea = VMAreaHash::Lookup(areaID);
1478 }
1479 
1480 
1481 /*static*/ void
1482 MemoryManager::_RequestMaintenance()
1483 {
1484 	if ((sFreeAreaCount > 0 && sFreeAreaCount <= 2) || sMaintenanceNeeded)
1485 		return;
1486 
1487 	sMaintenanceNeeded = true;
1488 	request_memory_manager_maintenance();
1489 }
1490 
1491 
1492 /*static*/ int
1493 MemoryManager::_DumpRawAllocations(int argc, char** argv)
1494 {
1495 	kprintf("area        meta chunk  chunk  base        size (KB)\n");
1496 
1497 	size_t totalSize = 0;
1498 
1499 	for (AreaTable::Iterator it = sAreaTable.GetIterator();
1500 			Area* area = it.Next();) {
1501 		for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA; i++) {
1502 			MetaChunk* metaChunk = area->metaChunks + i;
1503 			if (metaChunk->chunkSize == 0)
1504 				continue;
1505 			for (uint32 k = 0; k < metaChunk->chunkCount; k++) {
1506 				Chunk* chunk = metaChunk->chunks + k;
1507 
1508 				// skip free chunks
1509 				if (_IsChunkFree(metaChunk, chunk))
1510 					continue;
1511 
1512 				addr_t reference = chunk->reference;
1513 				if ((reference & 1) == 0 || reference == 1)
1514 					continue;
1515 
1516 				addr_t chunkAddress = _ChunkAddress(metaChunk, chunk);
1517 				size_t size = reference - chunkAddress + 1;
1518 				totalSize += size;
1519 
1520 				kprintf("%p  %10" B_PRId32 "  %5" B_PRIu32 "  %p  %9"
1521 					B_PRIuSIZE "\n", area, i, k, (void*)chunkAddress,
1522 					size / 1024);
1523 			}
1524 		}
1525 	}
1526 
1527 	kprintf("total:                                     %9" B_PRIuSIZE "\n",
1528 		totalSize / 1024);
1529 
1530 	return 0;
1531 }
1532 
1533 
1534 /*static*/ void
1535 MemoryManager::_PrintMetaChunkTableHeader(bool printChunks)
1536 {
1537 	if (printChunks)
1538 		kprintf("chunk        base       cache  object size  cache name\n");
1539 	else
1540 		kprintf("chunk        base\n");
1541 }
1542 
1543 /*static*/ void
1544 MemoryManager::_DumpMetaChunk(MetaChunk* metaChunk, bool printChunks,
1545 	bool printHeader)
1546 {
1547 	if (printHeader)
1548 		_PrintMetaChunkTableHeader(printChunks);
1549 
1550 	const char* type = "empty";
1551 	if (metaChunk->chunkSize != 0) {
1552 		switch (metaChunk->chunkSize) {
1553 			case SLAB_CHUNK_SIZE_SMALL:
1554 				type = "small";
1555 				break;
1556 			case SLAB_CHUNK_SIZE_MEDIUM:
1557 				type = "medium";
1558 				break;
1559 			case SLAB_CHUNK_SIZE_LARGE:
1560 				type = "large";
1561 				break;
1562 		}
1563 	}
1564 
1565 	int metaChunkIndex = metaChunk - metaChunk->GetArea()->metaChunks;
1566 	kprintf("%5d  %p  --- %6s meta chunk", metaChunkIndex,
1567 		(void*)metaChunk->chunkBase, type);
1568 	if (metaChunk->chunkSize != 0) {
1569 		kprintf(": %4u/%4u used, %-4u-%4u free ------------\n",
1570 			metaChunk->usedChunkCount, metaChunk->chunkCount,
1571 			metaChunk->firstFreeChunk, metaChunk->lastFreeChunk);
1572 	} else
1573 		kprintf(" --------------------------------------------\n");
1574 
1575 	if (metaChunk->chunkSize == 0 || !printChunks)
1576 		return;
1577 
1578 	for (uint32 i = 0; i < metaChunk->chunkCount; i++) {
1579 		Chunk* chunk = metaChunk->chunks + i;
1580 
1581 		// skip free chunks
1582 		if (_IsChunkFree(metaChunk, chunk))
1583 			continue;
1584 
1585 		addr_t reference = chunk->reference;
1586 		if ((reference & 1) == 0) {
1587 			ObjectCache* cache = (ObjectCache*)reference;
1588 			kprintf("%5" B_PRIu32 "  %p  %p  %11" B_PRIuSIZE "  %s\n", i,
1589 				(void*)_ChunkAddress(metaChunk, chunk), cache,
1590 				cache != NULL ? cache->object_size : 0,
1591 				cache != NULL ? cache->name : "");
1592 		} else if (reference != 1) {
1593 			kprintf("%5" B_PRIu32 "  %p  raw allocation up to %p\n", i,
1594 				(void*)_ChunkAddress(metaChunk, chunk), (void*)reference);
1595 		}
1596 	}
1597 }
1598 
1599 
1600 /*static*/ int
1601 MemoryManager::_DumpMetaChunk(int argc, char** argv)
1602 {
1603 	if (argc != 2) {
1604 		print_debugger_command_usage(argv[0]);
1605 		return 0;
1606 	}
1607 
1608 	uint64 address;
1609 	if (!evaluate_debug_expression(argv[1], &address, false))
1610 		return 0;
1611 
1612 	Area* area = (Area*)(addr_t)ROUNDDOWN(address, SLAB_AREA_SIZE);
1613 
1614 	MetaChunk* metaChunk;
1615 	if ((addr_t)address >= (addr_t)area->metaChunks
1616 		&& (addr_t)address
1617 			< (addr_t)(area->metaChunks + SLAB_META_CHUNKS_PER_AREA)) {
1618 		metaChunk = (MetaChunk*)(addr_t)address;
1619 	} else {
1620 		metaChunk = area->metaChunks
1621 			+ (address % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE;
1622 	}
1623 
1624 	_DumpMetaChunk(metaChunk, true, true);
1625 
1626 	return 0;
1627 }
1628 
1629 
1630 /*static*/ void
1631 MemoryManager::_DumpMetaChunks(const char* name, MetaChunkList& metaChunkList,
1632 	bool printChunks)
1633 {
1634 	kprintf("%s:\n", name);
1635 
1636 	for (MetaChunkList::Iterator it = metaChunkList.GetIterator();
1637 			MetaChunk* metaChunk = it.Next();) {
1638 		_DumpMetaChunk(metaChunk, printChunks, false);
1639 	}
1640 }
1641 
1642 
1643 /*static*/ int
1644 MemoryManager::_DumpMetaChunks(int argc, char** argv)
1645 {
1646 	bool printChunks = argc > 1 && strcmp(argv[1], "-c") == 0;
1647 
1648 	_PrintMetaChunkTableHeader(printChunks);
1649 	_DumpMetaChunks("free complete", sFreeCompleteMetaChunks, printChunks);
1650 	_DumpMetaChunks("free short", sFreeShortMetaChunks, printChunks);
1651 	_DumpMetaChunks("partial small", sPartialMetaChunksSmall, printChunks);
1652 	_DumpMetaChunks("partial medium", sPartialMetaChunksMedium, printChunks);
1653 
1654 	return 0;
1655 }
1656 
1657 
1658 /*static*/ int
1659 MemoryManager::_DumpArea(int argc, char** argv)
1660 {
1661 	bool printChunks = false;
1662 
1663 	int argi = 1;
1664 	while (argi < argc) {
1665 		if (argv[argi][0] != '-')
1666 			break;
1667 		const char* arg = argv[argi++];
1668 		if (strcmp(arg, "-c") == 0) {
1669 			printChunks = true;
1670 		} else {
1671 			print_debugger_command_usage(argv[0]);
1672 			return 0;
1673 		}
1674 	}
1675 
1676 	if (argi + 1 != argc) {
1677 		print_debugger_command_usage(argv[0]);
1678 		return 0;
1679 	}
1680 
1681 	uint64 address;
1682 	if (!evaluate_debug_expression(argv[argi], &address, false))
1683 		return 0;
1684 
1685 	address = ROUNDDOWN(address, SLAB_AREA_SIZE);
1686 
1687 	Area* area = (Area*)(addr_t)address;
1688 
1689 	for (uint32 k = 0; k < SLAB_META_CHUNKS_PER_AREA; k++) {
1690 		MetaChunk* metaChunk = area->metaChunks + k;
1691 		_DumpMetaChunk(metaChunk, printChunks, k == 0);
1692 	}
1693 
1694 	return 0;
1695 }
1696 
1697 
1698 /*static*/ int
1699 MemoryManager::_DumpAreas(int argc, char** argv)
1700 {
1701 	kprintf("      base        area   meta      small   medium  large\n");
1702 
1703 	size_t totalTotalSmall = 0;
1704 	size_t totalUsedSmall = 0;
1705 	size_t totalTotalMedium = 0;
1706 	size_t totalUsedMedium = 0;
1707 	size_t totalUsedLarge = 0;
1708 	uint32 areaCount = 0;
1709 
1710 	for (AreaTable::Iterator it = sAreaTable.GetIterator();
1711 			Area* area = it.Next();) {
1712 		areaCount++;
1713 
1714 		// sum up the free/used counts for the chunk sizes
1715 		int totalSmall = 0;
1716 		int usedSmall = 0;
1717 		int totalMedium = 0;
1718 		int usedMedium = 0;
1719 		int usedLarge = 0;
1720 
1721 		for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA; i++) {
1722 			MetaChunk* metaChunk = area->metaChunks + i;
1723 			if (metaChunk->chunkSize == 0)
1724 				continue;
1725 
1726 			switch (metaChunk->chunkSize) {
1727 				case SLAB_CHUNK_SIZE_SMALL:
1728 					totalSmall += metaChunk->chunkCount;
1729 					usedSmall += metaChunk->usedChunkCount;
1730 					break;
1731 				case SLAB_CHUNK_SIZE_MEDIUM:
1732 					totalMedium += metaChunk->chunkCount;
1733 					usedMedium += metaChunk->usedChunkCount;
1734 					break;
1735 				case SLAB_CHUNK_SIZE_LARGE:
1736 					usedLarge += metaChunk->usedChunkCount;
1737 					break;
1738 			}
1739 		}
1740 
1741 		kprintf("%p  %p  %2u/%2u  %4d/%4d  %3d/%3d  %5d\n",
1742 			area, area->vmArea, area->usedMetaChunkCount,
1743 			SLAB_META_CHUNKS_PER_AREA, usedSmall, totalSmall, usedMedium,
1744 			totalMedium, usedLarge);
1745 
1746 		totalTotalSmall += totalSmall;
1747 		totalUsedSmall += usedSmall;
1748 		totalTotalMedium += totalMedium;
1749 		totalUsedMedium += usedMedium;
1750 		totalUsedLarge += usedLarge;
1751 	}
1752 
1753 	kprintf("%d free area%s:\n", sFreeAreaCount,
1754 		sFreeAreaCount == 1 ? "" : "s");
1755 	for (Area* area = sFreeAreas; area != NULL; area = area->next) {
1756 		areaCount++;
1757 		kprintf("%p  %p\n", area, area->vmArea);
1758 	}
1759 
1760 	kprintf("total usage:\n");
1761 	kprintf("  small:    %" B_PRIuSIZE "/%" B_PRIuSIZE "\n", totalUsedSmall,
1762 		totalTotalSmall);
1763 	kprintf("  medium:   %" B_PRIuSIZE "/%" B_PRIuSIZE "\n", totalUsedMedium,
1764 		totalTotalMedium);
1765 	kprintf("  large:    %" B_PRIuSIZE "\n", totalUsedLarge);
1766 	kprintf("  memory:   %" B_PRIuSIZE "/%" B_PRIuSIZE " KB\n",
1767 		(totalUsedSmall * SLAB_CHUNK_SIZE_SMALL
1768 			+ totalUsedMedium * SLAB_CHUNK_SIZE_MEDIUM
1769 			+ totalUsedLarge * SLAB_CHUNK_SIZE_LARGE) / 1024,
1770 		areaCount * SLAB_AREA_SIZE / 1024);
1771 	kprintf("  overhead: %" B_PRIuSIZE " KB\n",
1772 		areaCount * kAreaAdminSize / 1024);
1773 
1774 	return 0;
1775 }
1776