xref: /haiku/src/system/kernel/slab/MemoryManager.cpp (revision 922e7ba1f3228e6f28db69b0ded8f86eb32dea17)
1 /*
2  * Copyright 2010, Ingo Weinhold <ingo_weinhold@gmx.de>.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include "MemoryManager.h"
8 
9 #include <algorithm>
10 
11 #include <debug.h>
12 #include <tracing.h>
13 #include <util/AutoLock.h>
14 #include <vm/vm.h>
15 #include <vm/vm_page.h>
16 #include <vm/vm_priv.h>
17 #include <vm/VMAddressSpace.h>
18 #include <vm/VMArea.h>
19 #include <vm/VMCache.h>
20 #include <vm/VMTranslationMap.h>
21 
22 #include "kernel_debug_config.h"
23 
24 #include "ObjectCache.h"
25 #include "slab_private.h"
26 
27 
28 //#define TRACE_MEMORY_MANAGER
29 #ifdef TRACE_MEMORY_MANAGER
30 #	define TRACE(x...)	dprintf(x)
31 #else
32 #	define TRACE(x...)	do {} while (false)
33 #endif
34 
35 #if DEBUG_SLAB_MEMORY_MANAGER_PARANOID_CHECKS
36 #	define PARANOID_CHECKS_ONLY(x)	x
37 #else
38 #	define PARANOID_CHECKS_ONLY(x)
39 #endif
40 
41 
42 static const char* const kSlabAreaName = "slab area";
43 
44 static void* sAreaTableBuffer[1024];
45 
46 mutex MemoryManager::sLock;
47 rw_lock MemoryManager::sAreaTableLock;
48 kernel_args* MemoryManager::sKernelArgs;
49 MemoryManager::AreaTable MemoryManager::sAreaTable;
50 MemoryManager::Area* MemoryManager::sFreeAreas;
51 int MemoryManager::sFreeAreaCount;
52 MemoryManager::MetaChunkList MemoryManager::sFreeCompleteMetaChunks;
53 MemoryManager::MetaChunkList MemoryManager::sFreeShortMetaChunks;
54 MemoryManager::MetaChunkList MemoryManager::sPartialMetaChunksSmall;
55 MemoryManager::MetaChunkList MemoryManager::sPartialMetaChunksMedium;
56 MemoryManager::AllocationEntry* MemoryManager::sAllocationEntryCanWait;
57 MemoryManager::AllocationEntry* MemoryManager::sAllocationEntryDontWait;
58 bool MemoryManager::sMaintenanceNeeded;
59 
60 
61 // #pragma mark - kernel tracing
62 
63 
64 #if SLAB_MEMORY_MANAGER_TRACING
65 
66 
67 //namespace SlabMemoryManagerCacheTracing {
68 struct MemoryManager::Tracing {
69 
70 class MemoryManagerTraceEntry : public AbstractTraceEntry {
71 public:
72 	MemoryManagerTraceEntry()
73 	{
74 	}
75 };
76 
77 
78 class Allocate : public MemoryManagerTraceEntry {
79 public:
80 	Allocate(ObjectCache* cache, uint32 flags)
81 		:
82 		MemoryManagerTraceEntry(),
83 		fCache(cache),
84 		fFlags(flags)
85 	{
86 		Initialized();
87 	}
88 
89 	virtual void AddDump(TraceOutput& out)
90 	{
91 		out.Print("slab memory manager alloc: cache: %p, flags: %#" B_PRIx32,
92 			fCache, fFlags);
93 	}
94 
95 private:
96 	ObjectCache*	fCache;
97 	uint32			fFlags;
98 };
99 
100 
101 class Free : public MemoryManagerTraceEntry {
102 public:
103 	Free(void* address, uint32 flags)
104 		:
105 		MemoryManagerTraceEntry(),
106 		fAddress(address),
107 		fFlags(flags)
108 	{
109 		Initialized();
110 	}
111 
112 	virtual void AddDump(TraceOutput& out)
113 	{
114 		out.Print("slab memory manager free: address: %p, flags: %#" B_PRIx32,
115 			fAddress, fFlags);
116 	}
117 
118 private:
119 	void*	fAddress;
120 	uint32	fFlags;
121 };
122 
123 
124 class AllocateRaw : public MemoryManagerTraceEntry {
125 public:
126 	AllocateRaw(size_t size, uint32 flags)
127 		:
128 		MemoryManagerTraceEntry(),
129 		fSize(size),
130 		fFlags(flags)
131 	{
132 		Initialized();
133 	}
134 
135 	virtual void AddDump(TraceOutput& out)
136 	{
137 		out.Print("slab memory manager alloc raw: size: %" B_PRIuSIZE
138 			", flags: %#" B_PRIx32, fSize, fFlags);
139 	}
140 
141 private:
142 	size_t	fSize;
143 	uint32	fFlags;
144 };
145 
146 
147 class FreeRawOrReturnCache : public MemoryManagerTraceEntry {
148 public:
149 	FreeRawOrReturnCache(void* address, uint32 flags)
150 		:
151 		MemoryManagerTraceEntry(),
152 		fAddress(address),
153 		fFlags(flags)
154 	{
155 		Initialized();
156 	}
157 
158 	virtual void AddDump(TraceOutput& out)
159 	{
160 		out.Print("slab memory manager free raw/return: address: %p, flags: %#"
161 			B_PRIx32, fAddress, fFlags);
162 	}
163 
164 private:
165 	void*	fAddress;
166 	uint32	fFlags;
167 };
168 
169 
170 class AllocateArea : public MemoryManagerTraceEntry {
171 public:
172 	AllocateArea(Area* area, uint32 flags)
173 		:
174 		MemoryManagerTraceEntry(),
175 		fArea(area),
176 		fFlags(flags)
177 	{
178 		Initialized();
179 	}
180 
181 	virtual void AddDump(TraceOutput& out)
182 	{
183 		out.Print("slab memory manager alloc area: flags: %#" B_PRIx32
184 			" -> %p", fFlags, fArea);
185 	}
186 
187 private:
188 	Area*	fArea;
189 	uint32	fFlags;
190 };
191 
192 
193 class AddArea : public MemoryManagerTraceEntry {
194 public:
195 	AddArea(Area* area)
196 		:
197 		MemoryManagerTraceEntry(),
198 		fArea(area)
199 	{
200 		Initialized();
201 	}
202 
203 	virtual void AddDump(TraceOutput& out)
204 	{
205 		out.Print("slab memory manager add area: %p", fArea);
206 	}
207 
208 private:
209 	Area*	fArea;
210 };
211 
212 
213 class FreeArea : public MemoryManagerTraceEntry {
214 public:
215 	FreeArea(Area* area, bool areaRemoved, uint32 flags)
216 		:
217 		MemoryManagerTraceEntry(),
218 		fArea(area),
219 		fFlags(flags),
220 		fRemoved(areaRemoved)
221 	{
222 		Initialized();
223 	}
224 
225 	virtual void AddDump(TraceOutput& out)
226 	{
227 		out.Print("slab memory manager free area: %p%s, flags: %#" B_PRIx32,
228 			fArea, fRemoved ? " (removed)" : "", fFlags);
229 	}
230 
231 private:
232 	Area*	fArea;
233 	uint32	fFlags;
234 	bool	fRemoved;
235 };
236 
237 
238 class AllocateMetaChunk : public MemoryManagerTraceEntry {
239 public:
240 	AllocateMetaChunk(MetaChunk* metaChunk)
241 		:
242 		MemoryManagerTraceEntry(),
243 		fMetaChunk(metaChunk->chunkBase)
244 	{
245 		Initialized();
246 	}
247 
248 	virtual void AddDump(TraceOutput& out)
249 	{
250 		out.Print("slab memory manager alloc meta chunk: %#" B_PRIxADDR,
251 			fMetaChunk);
252 	}
253 
254 private:
255 	addr_t	fMetaChunk;
256 };
257 
258 
259 class FreeMetaChunk : public MemoryManagerTraceEntry {
260 public:
261 	FreeMetaChunk(MetaChunk* metaChunk)
262 		:
263 		MemoryManagerTraceEntry(),
264 		fMetaChunk(metaChunk->chunkBase)
265 	{
266 		Initialized();
267 	}
268 
269 	virtual void AddDump(TraceOutput& out)
270 	{
271 		out.Print("slab memory manager free meta chunk: %#" B_PRIxADDR,
272 			fMetaChunk);
273 	}
274 
275 private:
276 	addr_t	fMetaChunk;
277 };
278 
279 
280 class AllocateChunk : public MemoryManagerTraceEntry {
281 public:
282 	AllocateChunk(size_t chunkSize, MetaChunk* metaChunk, Chunk* chunk)
283 		:
284 		MemoryManagerTraceEntry(),
285 		fChunkSize(chunkSize),
286 		fMetaChunk(metaChunk->chunkBase),
287 		fChunk(chunk - metaChunk->chunks)
288 	{
289 		Initialized();
290 	}
291 
292 	virtual void AddDump(TraceOutput& out)
293 	{
294 		out.Print("slab memory manager alloc chunk: size: %" B_PRIuSIZE
295 			" -> meta chunk: %#" B_PRIxADDR ", chunk: %" B_PRIu32, fChunkSize,
296 			fMetaChunk, fChunk);
297 	}
298 
299 private:
300 	size_t	fChunkSize;
301 	addr_t	fMetaChunk;
302 	uint32	fChunk;
303 };
304 
305 
306 class AllocateChunks : public MemoryManagerTraceEntry {
307 public:
308 	AllocateChunks(size_t chunkSize, uint32 chunkCount, MetaChunk* metaChunk,
309 		Chunk* chunk)
310 		:
311 		MemoryManagerTraceEntry(),
312 		fMetaChunk(metaChunk->chunkBase),
313 		fChunkSize(chunkSize),
314 		fChunkCount(chunkCount),
315 		fChunk(chunk - metaChunk->chunks)
316 	{
317 		Initialized();
318 	}
319 
320 	virtual void AddDump(TraceOutput& out)
321 	{
322 		out.Print("slab memory manager alloc chunks: size: %" B_PRIuSIZE
323 			", count %" B_PRIu32 " -> meta chunk: %#" B_PRIxADDR ", chunk: %"
324 			B_PRIu32, fChunkSize, fChunkCount, fMetaChunk, fChunk);
325 	}
326 
327 private:
328 	addr_t	fMetaChunk;
329 	size_t	fChunkSize;
330 	uint32	fChunkCount;
331 	uint32	fChunk;
332 };
333 
334 
335 class FreeChunk : public MemoryManagerTraceEntry {
336 public:
337 	FreeChunk(MetaChunk* metaChunk, Chunk* chunk)
338 		:
339 		MemoryManagerTraceEntry(),
340 		fMetaChunk(metaChunk->chunkBase),
341 		fChunk(chunk - metaChunk->chunks)
342 	{
343 		Initialized();
344 	}
345 
346 	virtual void AddDump(TraceOutput& out)
347 	{
348 		out.Print("slab memory manager free chunk: meta chunk: %#" B_PRIxADDR
349 			", chunk: %" B_PRIu32, fMetaChunk, fChunk);
350 	}
351 
352 private:
353 	addr_t	fMetaChunk;
354 	uint32	fChunk;
355 };
356 
357 
358 class Map : public MemoryManagerTraceEntry {
359 public:
360 	Map(addr_t address, size_t size, uint32 flags)
361 		:
362 		MemoryManagerTraceEntry(),
363 		fAddress(address),
364 		fSize(size),
365 		fFlags(flags)
366 	{
367 		Initialized();
368 	}
369 
370 	virtual void AddDump(TraceOutput& out)
371 	{
372 		out.Print("slab memory manager map: %#" B_PRIxADDR ", size: %"
373 			B_PRIuSIZE ", flags: %#" B_PRIx32, fAddress, fSize, fFlags);
374 	}
375 
376 private:
377 	addr_t	fAddress;
378 	size_t	fSize;
379 	uint32	fFlags;
380 };
381 
382 
383 class Unmap : public MemoryManagerTraceEntry {
384 public:
385 	Unmap(addr_t address, size_t size, uint32 flags)
386 		:
387 		MemoryManagerTraceEntry(),
388 		fAddress(address),
389 		fSize(size),
390 		fFlags(flags)
391 	{
392 		Initialized();
393 	}
394 
395 	virtual void AddDump(TraceOutput& out)
396 	{
397 		out.Print("slab memory manager unmap: %#" B_PRIxADDR ", size: %"
398 			B_PRIuSIZE ", flags: %#" B_PRIx32, fAddress, fSize, fFlags);
399 	}
400 
401 private:
402 	addr_t	fAddress;
403 	size_t	fSize;
404 	uint32	fFlags;
405 };
406 
407 
408 //}	// namespace SlabMemoryManagerCacheTracing
409 };	// struct MemoryManager::Tracing
410 
411 
412 //#	define T(x)	new(std::nothrow) SlabMemoryManagerCacheTracing::x
413 #	define T(x)	new(std::nothrow) MemoryManager::Tracing::x
414 
415 #else
416 #	define T(x)
417 #endif	// SLAB_MEMORY_MANAGER_TRACING
418 
419 
420 // #pragma mark - MemoryManager
421 
422 
423 /*static*/ void
424 MemoryManager::Init(kernel_args* args)
425 {
426 	mutex_init(&sLock, "slab memory manager");
427 	rw_lock_init(&sAreaTableLock, "slab memory manager area table");
428 	sKernelArgs = args;
429 
430 	new(&sFreeCompleteMetaChunks) MetaChunkList;
431 	new(&sFreeShortMetaChunks) MetaChunkList;
432 	new(&sPartialMetaChunksSmall) MetaChunkList;
433 	new(&sPartialMetaChunksMedium) MetaChunkList;
434 
435 	new(&sAreaTable) AreaTable;
436 	sAreaTable.Resize(sAreaTableBuffer, sizeof(sAreaTableBuffer), true);
437 		// A bit hacky: The table now owns the memory. Since we never resize or
438 		// free it, that's not a problem, though.
439 
440 	sFreeAreas = NULL;
441 	sFreeAreaCount = 0;
442 	sMaintenanceNeeded = false;
443 }
444 
445 
446 /*static*/ void
447 MemoryManager::InitPostArea()
448 {
449 	sKernelArgs = NULL;
450 
451 	// Convert all areas to actual areas. This loop might look a bit weird, but
452 	// is necessary since creating the actual area involves memory allocations,
453 	// which in turn can change the situation.
454 	bool done;
455 	do {
456 		done = true;
457 
458 		for (AreaTable::Iterator it = sAreaTable.GetIterator();
459 				Area* area = it.Next();) {
460 			if (area->vmArea == NULL) {
461 				_ConvertEarlyArea(area);
462 				done = false;
463 				break;
464 			}
465 		}
466 	} while (!done);
467 
468 	// unmap and free unused pages
469 	if (sFreeAreas != NULL) {
470 		// Just "leak" all but the first of the free areas -- the VM will
471 		// automatically free all unclaimed memory.
472 		sFreeAreas->next = NULL;
473 		sFreeAreaCount = 1;
474 
475 		Area* area = sFreeAreas;
476 		_ConvertEarlyArea(area);
477 		_UnmapFreeChunksEarly(area);
478 	}
479 
480 	for (AreaTable::Iterator it = sAreaTable.GetIterator();
481 			Area* area = it.Next();) {
482 		_UnmapFreeChunksEarly(area);
483 	}
484 
485 	sMaintenanceNeeded = true;
486 		// might not be necessary, but doesn't harm
487 
488 	add_debugger_command_etc("slab_area", &_DumpArea,
489 		"Dump information on a given slab area",
490 		"[ -c ] <area>\n"
491 		"Dump information on a given slab area specified by its base "
492 			"address.\n"
493 		"If \"-c\" is given, the chunks of all meta chunks area printed as "
494 			"well.\n", 0);
495 	add_debugger_command_etc("slab_areas", &_DumpAreas,
496 		"List all slab areas",
497 		"\n"
498 		"Lists all slab areas.\n", 0);
499 	add_debugger_command_etc("slab_meta_chunk", &_DumpMetaChunk,
500 		"Dump information on a given slab meta chunk",
501 		"<meta chunk>\n"
502 		"Dump information on a given slab meta chunk specified by its base "
503 			"or object address.\n", 0);
504 	add_debugger_command_etc("slab_meta_chunks", &_DumpMetaChunks,
505 		"List all non-full slab meta chunks",
506 		"[ -c ]\n"
507 		"Lists all non-full slab meta chunks.\n"
508 		"If \"-c\" is given, the chunks of all meta chunks area printed as "
509 			"well.\n", 0);
510 	add_debugger_command_etc("slab_raw_allocations", &_DumpRawAllocations,
511 		"List all raw allocations in slab areas",
512 		"\n"
513 		"Lists all raw allocations in slab areas.\n", 0);
514 }
515 
516 
517 /*static*/ status_t
518 MemoryManager::Allocate(ObjectCache* cache, uint32 flags, void*& _pages)
519 {
520 	// TODO: Support CACHE_UNLOCKED_PAGES!
521 
522 	T(Allocate(cache, flags));
523 
524 	size_t chunkSize = cache->slab_size;
525 
526 	TRACE("MemoryManager::Allocate(%p, %#" B_PRIx32 "): chunkSize: %"
527 		B_PRIuSIZE "\n", cache, flags, chunkSize);
528 
529 	MutexLocker locker(sLock);
530 
531 	// allocate a chunk
532 	MetaChunk* metaChunk;
533 	Chunk* chunk;
534 	status_t error = _AllocateChunks(chunkSize, 1, flags, metaChunk, chunk);
535 	if (error != B_OK)
536 		return error;
537 
538 	// map the chunk
539 	Area* area = metaChunk->GetArea();
540 	addr_t chunkAddress = _ChunkAddress(metaChunk, chunk);
541 
542 	locker.Unlock();
543 	error = _MapChunk(area->vmArea, chunkAddress, chunkSize, 0, flags);
544 	locker.Lock();
545 	if (error != B_OK) {
546 		// something failed -- free the chunk
547 		_FreeChunk(area, metaChunk, chunk, chunkAddress, true, flags);
548 		return error;
549 	}
550 
551 	chunk->reference = (addr_t)cache;
552 	_pages = (void*)chunkAddress;
553 
554 	TRACE("MemoryManager::Allocate() done: %p (meta chunk: %d, chunk %d)\n",
555 		_pages, int(metaChunk - area->metaChunks),
556 		int(chunk - metaChunk->chunks));
557 	return B_OK;
558 }
559 
560 
561 /*static*/ void
562 MemoryManager::Free(void* pages, uint32 flags)
563 {
564 	TRACE("MemoryManager::Free(%p, %#" B_PRIx32 ")\n", pages, flags);
565 
566 	T(Free(pages, flags));
567 
568 	// get the area and the meta chunk
569 	Area* area = _AreaForAddress((addr_t)pages);
570 	MetaChunk* metaChunk = &area->metaChunks[
571 		((addr_t)pages % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE];
572 
573 	ASSERT(metaChunk->chunkSize > 0);
574 	ASSERT((addr_t)pages >= metaChunk->chunkBase);
575 	ASSERT(((addr_t)pages % metaChunk->chunkSize) == 0);
576 
577 	// get the chunk
578 	uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)pages);
579 	Chunk* chunk = &metaChunk->chunks[chunkIndex];
580 
581 	ASSERT(chunk->next != NULL);
582 	ASSERT(chunk->next < metaChunk->chunks
583 		|| chunk->next
584 			>= metaChunk->chunks + SLAB_SMALL_CHUNKS_PER_META_CHUNK);
585 
586 	// and free it
587 	MutexLocker locker(sLock);
588 	_FreeChunk(area, metaChunk, chunk, (addr_t)pages, false, flags);
589 }
590 
591 
592 /*static*/ status_t
593 MemoryManager::AllocateRaw(size_t size, uint32 flags, void*& _pages)
594 {
595 	T(AllocateRaw(size, flags));
596 
597 	size = ROUNDUP(size, SLAB_CHUNK_SIZE_SMALL);
598 
599 	TRACE("MemoryManager::AllocateRaw(%" B_PRIuSIZE ", %#" B_PRIx32 ")\n", size,
600 		  flags);
601 
602 	if (size > SLAB_CHUNK_SIZE_LARGE || (flags & CACHE_ALIGN_ON_SIZE) != 0) {
603 		// Requested size greater than a large chunk or an aligned allocation.
604 		// Allocate as an area.
605 		if ((flags & CACHE_DONT_LOCK_KERNEL_SPACE) != 0)
606 			return B_WOULD_BLOCK;
607 
608 		virtual_address_restrictions virtualRestrictions = {};
609 		virtualRestrictions.address_specification
610 			= (flags & CACHE_ALIGN_ON_SIZE) != 0
611 				? B_ANY_KERNEL_BLOCK_ADDRESS : B_ANY_KERNEL_ADDRESS;
612 		physical_address_restrictions physicalRestrictions = {};
613 		area_id area = create_area_etc(VMAddressSpace::KernelID(),
614 			"slab large raw allocation", size, B_FULL_LOCK,
615 			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
616 			((flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0
617 					? CREATE_AREA_DONT_WAIT : 0)
618 				| CREATE_AREA_DONT_CLEAR,
619 			&virtualRestrictions, &physicalRestrictions, &_pages);
620 		return area >= 0 ? B_OK : area;
621 	}
622 
623 	// determine chunk size (small or medium)
624 	size_t chunkSize = SLAB_CHUNK_SIZE_SMALL;
625 	uint32 chunkCount = size / SLAB_CHUNK_SIZE_SMALL;
626 
627 	if (size % SLAB_CHUNK_SIZE_MEDIUM == 0) {
628 		chunkSize = SLAB_CHUNK_SIZE_MEDIUM;
629 		chunkCount = size / SLAB_CHUNK_SIZE_MEDIUM;
630 	}
631 
632 	MutexLocker locker(sLock);
633 
634 	// allocate the chunks
635 	MetaChunk* metaChunk;
636 	Chunk* chunk;
637 	status_t error = _AllocateChunks(chunkSize, chunkCount, flags, metaChunk,
638 		chunk);
639 	if (error != B_OK)
640 		return error;
641 
642 	// map the chunks
643 	Area* area = metaChunk->GetArea();
644 	addr_t chunkAddress = _ChunkAddress(metaChunk, chunk);
645 
646 	locker.Unlock();
647 	error = _MapChunk(area->vmArea, chunkAddress, size, 0, flags);
648 	locker.Lock();
649 	if (error != B_OK) {
650 		// something failed -- free the chunks
651 		for (uint32 i = 0; i < chunkCount; i++)
652 			_FreeChunk(area, metaChunk, chunk + i, chunkAddress, true, flags);
653 		return error;
654 	}
655 
656 	chunk->reference = (addr_t)chunkAddress + size - 1;
657 	_pages = (void*)chunkAddress;
658 
659 	TRACE("MemoryManager::AllocateRaw() done: %p (meta chunk: %d, chunk %d)\n",
660 		_pages, int(metaChunk - area->metaChunks),
661 		int(chunk - metaChunk->chunks));
662 	return B_OK;
663 }
664 
665 
666 /*static*/ ObjectCache*
667 MemoryManager::FreeRawOrReturnCache(void* pages, uint32 flags)
668 {
669 	TRACE("MemoryManager::FreeRawOrReturnCache(%p, %#" B_PRIx32 ")\n", pages,
670 		flags);
671 
672 	T(FreeRawOrReturnCache(pages, flags));
673 
674 	// get the area
675 	addr_t areaBase = _AreaBaseAddressForAddress((addr_t)pages);
676 
677 	ReadLocker readLocker(sAreaTableLock);
678 	Area* area = sAreaTable.Lookup(areaBase);
679 	readLocker.Unlock();
680 
681 	if (area == NULL) {
682 		// Probably a large allocation. Look up the VM area.
683 		VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
684 		addressSpace->ReadLock();
685 		VMArea* area = addressSpace->LookupArea((addr_t)pages);
686 		addressSpace->ReadUnlock();
687 
688 		if (area != NULL && (addr_t)pages == area->Base())
689 			delete_area(area->id);
690 		else
691 			panic("freeing unknown block %p from area %p", pages, area);
692 
693 		return NULL;
694 	}
695 
696 	MetaChunk* metaChunk = &area->metaChunks[
697 		((addr_t)pages % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE];
698 
699 	// get the chunk
700 	ASSERT(metaChunk->chunkSize > 0);
701 	ASSERT((addr_t)pages >= metaChunk->chunkBase);
702 	uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)pages);
703 	Chunk* chunk = &metaChunk->chunks[chunkIndex];
704 
705 	addr_t reference = chunk->reference;
706 	if ((reference & 1) == 0)
707 		return (ObjectCache*)reference;
708 
709 	// Seems we have a raw chunk allocation.
710 	ASSERT((addr_t)pages == _ChunkAddress(metaChunk, chunk));
711 	ASSERT(reference > (addr_t)pages);
712 	ASSERT(reference <= areaBase + SLAB_AREA_SIZE - 1);
713 	size_t size = reference - (addr_t)pages + 1;
714 	ASSERT((size % SLAB_CHUNK_SIZE_SMALL) == 0);
715 
716 	// unmap the chunks
717 	_UnmapChunk(area->vmArea, (addr_t)pages, size, flags);
718 
719 	// and free them
720 	MutexLocker locker(sLock);
721 	uint32 chunkCount = size / metaChunk->chunkSize;
722 	for (uint32 i = 0; i < chunkCount; i++)
723 		_FreeChunk(area, metaChunk, chunk + i, (addr_t)pages, true, flags);
724 
725 	return NULL;
726 }
727 
728 
729 /*static*/ size_t
730 MemoryManager::AcceptableChunkSize(size_t size)
731 {
732 	if (size <= SLAB_CHUNK_SIZE_SMALL)
733 		return SLAB_CHUNK_SIZE_SMALL;
734 	if (size <= SLAB_CHUNK_SIZE_MEDIUM)
735 		return SLAB_CHUNK_SIZE_MEDIUM;
736 	return SLAB_CHUNK_SIZE_LARGE;
737 }
738 
739 
740 /*static*/ ObjectCache*
741 MemoryManager::GetAllocationInfo(void* address, size_t& _size)
742 {
743 	// get the area
744 	ReadLocker readLocker(sAreaTableLock);
745 	Area* area = sAreaTable.Lookup(_AreaBaseAddressForAddress((addr_t)address));
746 	readLocker.Unlock();
747 
748 	if (area == NULL) {
749 		VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
750 		addressSpace->ReadLock();
751 		VMArea* area = addressSpace->LookupArea((addr_t)address);
752 		if (area != NULL && (addr_t)address == area->Base())
753 			_size = area->Size();
754 		else
755 			_size = 0;
756 		addressSpace->ReadUnlock();
757 
758 		return NULL;
759 	}
760 
761 	MetaChunk* metaChunk = &area->metaChunks[
762 		((addr_t)address % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE];
763 
764 	// get the chunk
765 	ASSERT(metaChunk->chunkSize > 0);
766 	ASSERT((addr_t)address >= metaChunk->chunkBase);
767 	uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)address);
768 
769 	addr_t reference = metaChunk->chunks[chunkIndex].reference;
770 	if ((reference & 1) == 0) {
771 		ObjectCache* cache = (ObjectCache*)reference;
772 		_size = cache->object_size;
773 		return cache;
774 	}
775 
776 	_size = reference - (addr_t)address + 1;
777 	return NULL;
778 }
779 
780 
781 /*static*/ ObjectCache*
782 MemoryManager::CacheForAddress(void* address)
783 {
784 	// get the area
785 	ReadLocker readLocker(sAreaTableLock);
786 	Area* area = sAreaTable.Lookup(_AreaBaseAddressForAddress((addr_t)address));
787 	readLocker.Unlock();
788 
789 	if (area == NULL)
790 		return NULL;
791 
792 	MetaChunk* metaChunk = &area->metaChunks[
793 		((addr_t)address % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE];
794 
795 	// get the chunk
796 	ASSERT(metaChunk->chunkSize > 0);
797 	ASSERT((addr_t)address >= metaChunk->chunkBase);
798 	uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)address);
799 
800 	addr_t reference = metaChunk->chunks[chunkIndex].reference;
801 	return (reference & 1) == 0 ? (ObjectCache*)reference : NULL;
802 }
803 
804 
805 /*static*/ void
806 MemoryManager::PerformMaintenance()
807 {
808 	MutexLocker locker(sLock);
809 
810 	while (sMaintenanceNeeded) {
811 		sMaintenanceNeeded = false;
812 
813 		// We want to keep one or two areas as a reserve. This way we have at
814 		// least one area to use in situations when we aren't allowed to
815 		// allocate one and also avoid ping-pong effects.
816 		if (sFreeAreaCount > 0 && sFreeAreaCount <= 2)
817 			return;
818 
819 		if (sFreeAreaCount == 0) {
820 			// try to allocate one
821 			Area* area;
822 			if (_AllocateArea(0, area) != B_OK)
823 				return;
824 
825 			_push(sFreeAreas, area);
826 			if (++sFreeAreaCount > 2)
827 				sMaintenanceNeeded = true;
828 		} else {
829 			// free until we only have two free ones
830 			while (sFreeAreaCount > 2) {
831 				Area* area = _pop(sFreeAreas);
832 				_FreeArea(area, true, 0);
833 			}
834 
835 			if (sFreeAreaCount == 0)
836 				sMaintenanceNeeded = true;
837 		}
838 	}
839 }
840 
841 
842 /*static*/ status_t
843 MemoryManager::_AllocateChunks(size_t chunkSize, uint32 chunkCount,
844 	uint32 flags, MetaChunk*& _metaChunk, Chunk*& _chunk)
845 {
846 	MetaChunkList* metaChunkList = NULL;
847 	if (chunkSize == SLAB_CHUNK_SIZE_SMALL) {
848 		metaChunkList = &sPartialMetaChunksSmall;
849 	} else if (chunkSize == SLAB_CHUNK_SIZE_MEDIUM) {
850 		metaChunkList = &sPartialMetaChunksMedium;
851 	} else if (chunkSize != SLAB_CHUNK_SIZE_LARGE) {
852 		panic("MemoryManager::_AllocateChunks(): Unsupported chunk size: %"
853 			B_PRIuSIZE, chunkSize);
854 		return B_BAD_VALUE;
855 	}
856 
857 	if (_GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk, _chunk))
858 		return B_OK;
859 
860 	if (sFreeAreas != NULL) {
861 		_AddArea(_pop(sFreeAreas));
862 		sFreeAreaCount--;
863 		_RequestMaintenance();
864 
865 		_GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk, _chunk);
866 		return B_OK;
867 	}
868 
869 	if ((flags & CACHE_DONT_LOCK_KERNEL_SPACE) != 0) {
870 		// We can't create an area with this limitation and we must not wait for
871 		// someone else doing that.
872 		return B_WOULD_BLOCK;
873 	}
874 
875 	// We need to allocate a new area. Wait, if someone else is trying to do
876 	// the same.
877 	while (true) {
878 		AllocationEntry* allocationEntry = NULL;
879 		if (sAllocationEntryDontWait != NULL) {
880 			allocationEntry = sAllocationEntryDontWait;
881 		} else if (sAllocationEntryCanWait != NULL
882 				&& (flags & CACHE_DONT_WAIT_FOR_MEMORY) == 0) {
883 			allocationEntry = sAllocationEntryCanWait;
884 		} else
885 			break;
886 
887 		ConditionVariableEntry entry;
888 		allocationEntry->condition.Add(&entry);
889 
890 		mutex_unlock(&sLock);
891 		entry.Wait();
892 		mutex_lock(&sLock);
893 
894 		if (_GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk,
895 				_chunk)) {
896 			return B_OK;
897 		}
898 	}
899 
900 	// prepare the allocation entry others can wait on
901 	AllocationEntry*& allocationEntry
902 		= (flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0
903 			? sAllocationEntryDontWait : sAllocationEntryCanWait;
904 
905 	AllocationEntry myResizeEntry;
906 	allocationEntry = &myResizeEntry;
907 	allocationEntry->condition.Init(metaChunkList, "wait for slab area");
908 	allocationEntry->thread = find_thread(NULL);
909 
910 	Area* area;
911 	status_t error = _AllocateArea(flags, area);
912 
913 	allocationEntry->condition.NotifyAll();
914 	allocationEntry = NULL;
915 
916 	if (error != B_OK)
917 		return error;
918 
919 	// Try again to get a meta chunk. Something might have been freed in the
920 	// meantime. We can free the area in this case.
921 	if (_GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk, _chunk)) {
922 		_FreeArea(area, true, flags);
923 		return B_OK;
924 	}
925 
926 	_AddArea(area);
927 	_GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk, _chunk);
928 	return B_OK;
929 }
930 
931 
932 /*static*/ bool
933 MemoryManager::_GetChunks(MetaChunkList* metaChunkList, size_t chunkSize,
934 	uint32 chunkCount, MetaChunk*& _metaChunk, Chunk*& _chunk)
935 {
936 	// the common and less complicated special case
937 	if (chunkCount == 1)
938 		return _GetChunk(metaChunkList, chunkSize, _metaChunk, _chunk);
939 
940 	ASSERT(metaChunkList != NULL);
941 
942 	// Iterate through the partial meta chunk list and try to find a free
943 	// range that is large enough.
944 	MetaChunk* metaChunk = NULL;
945 	for (MetaChunkList::Iterator it = metaChunkList->GetIterator();
946 			(metaChunk = it.Next()) != NULL;) {
947 		if (metaChunk->firstFreeChunk + chunkCount - 1
948 				<= metaChunk->lastFreeChunk) {
949 			break;
950 		}
951 	}
952 
953 	if (metaChunk == NULL) {
954 		// try to get a free meta chunk
955 		if ((SLAB_CHUNK_SIZE_LARGE - SLAB_AREA_STRUCT_OFFSET - kAreaAdminSize)
956 				/ chunkSize >= chunkCount) {
957 			metaChunk = sFreeShortMetaChunks.RemoveHead();
958 		}
959 		if (metaChunk == NULL)
960 			metaChunk = sFreeCompleteMetaChunks.RemoveHead();
961 
962 		if (metaChunk == NULL)
963 			return false;
964 
965 		metaChunkList->Add(metaChunk);
966 		metaChunk->GetArea()->usedMetaChunkCount++;
967 		_PrepareMetaChunk(metaChunk, chunkSize);
968 
969 		T(AllocateMetaChunk(metaChunk));
970 	}
971 
972 	// pull the chunks out of the free list
973 	Chunk* firstChunk = metaChunk->chunks + metaChunk->firstFreeChunk;
974 	Chunk* lastChunk = firstChunk + (chunkCount - 1);
975 	Chunk** chunkPointer = &metaChunk->freeChunks;
976 	uint32 remainingChunks = chunkCount;
977 	while (remainingChunks > 0) {
978 		ASSERT_PRINT(chunkPointer, "remaining: %" B_PRIu32 "/%" B_PRIu32
979 			", area: %p, meta chunk: %" B_PRIdSSIZE "\n", remainingChunks,
980 			chunkCount, metaChunk->GetArea(),
981 			metaChunk - metaChunk->GetArea()->metaChunks);
982 		Chunk* chunk = *chunkPointer;
983 		if (chunk >= firstChunk && chunk <= lastChunk) {
984 			*chunkPointer = chunk->next;
985 			chunk->reference = 1;
986 			remainingChunks--;
987 		} else
988 			chunkPointer = &chunk->next;
989 	}
990 
991 	// allocate the chunks
992 	metaChunk->usedChunkCount += chunkCount;
993 	if (metaChunk->usedChunkCount == metaChunk->chunkCount) {
994 		// meta chunk is full now -- remove it from its list
995 		if (metaChunkList != NULL)
996 			metaChunkList->Remove(metaChunk);
997 	}
998 
999 	// update the free range
1000 	metaChunk->firstFreeChunk += chunkCount;
1001 
1002 	PARANOID_CHECKS_ONLY(_CheckMetaChunk(metaChunk));
1003 
1004 	_chunk = firstChunk;
1005 	_metaChunk = metaChunk;
1006 
1007 	T(AllocateChunks(chunkSize, chunkCount, metaChunk, firstChunk));
1008 
1009 	return true;
1010 }
1011 
1012 
1013 /*static*/ bool
1014 MemoryManager::_GetChunk(MetaChunkList* metaChunkList, size_t chunkSize,
1015 	MetaChunk*& _metaChunk, Chunk*& _chunk)
1016 {
1017 	MetaChunk* metaChunk = metaChunkList != NULL
1018 		? metaChunkList->Head() : NULL;
1019 	if (metaChunk == NULL) {
1020 		// no partial meta chunk -- maybe there's a free one
1021 		if (chunkSize == SLAB_CHUNK_SIZE_LARGE) {
1022 			metaChunk = sFreeCompleteMetaChunks.RemoveHead();
1023 		} else {
1024 			metaChunk = sFreeShortMetaChunks.RemoveHead();
1025 			if (metaChunk == NULL)
1026 				metaChunk = sFreeCompleteMetaChunks.RemoveHead();
1027 			if (metaChunk != NULL)
1028 				metaChunkList->Add(metaChunk);
1029 		}
1030 
1031 		if (metaChunk == NULL)
1032 			return false;
1033 
1034 		metaChunk->GetArea()->usedMetaChunkCount++;
1035 		_PrepareMetaChunk(metaChunk, chunkSize);
1036 
1037 		T(AllocateMetaChunk(metaChunk));
1038 	}
1039 
1040 	// allocate the chunk
1041 	if (++metaChunk->usedChunkCount == metaChunk->chunkCount) {
1042 		// meta chunk is full now -- remove it from its list
1043 		if (metaChunkList != NULL)
1044 			metaChunkList->Remove(metaChunk);
1045 	}
1046 
1047 	_chunk = _pop(metaChunk->freeChunks);
1048 	_metaChunk = metaChunk;
1049 
1050 	_chunk->reference = 1;
1051 
1052 	// update the free range
1053 	uint32 chunkIndex = _chunk - metaChunk->chunks;
1054 	if (chunkIndex >= metaChunk->firstFreeChunk
1055 			&& chunkIndex <= metaChunk->lastFreeChunk) {
1056 		if (chunkIndex - metaChunk->firstFreeChunk
1057 				<= metaChunk->lastFreeChunk - chunkIndex) {
1058 			metaChunk->firstFreeChunk = chunkIndex + 1;
1059 		} else
1060 			metaChunk->lastFreeChunk = chunkIndex - 1;
1061 	}
1062 
1063 	PARANOID_CHECKS_ONLY(_CheckMetaChunk(metaChunk));
1064 
1065 	T(AllocateChunk(chunkSize, metaChunk, _chunk));
1066 
1067 	return true;
1068 }
1069 
1070 
1071 /*static*/ void
1072 MemoryManager::_FreeChunk(Area* area, MetaChunk* metaChunk, Chunk* chunk,
1073 	addr_t chunkAddress, bool alreadyUnmapped, uint32 flags)
1074 {
1075 	// unmap the chunk
1076 	if (!alreadyUnmapped) {
1077 		mutex_unlock(&sLock);
1078 		_UnmapChunk(area->vmArea, chunkAddress, metaChunk->chunkSize, flags);
1079 		mutex_lock(&sLock);
1080 	}
1081 
1082 	T(FreeChunk(metaChunk, chunk));
1083 
1084 	_push(metaChunk->freeChunks, chunk);
1085 
1086 	uint32 chunkIndex = chunk - metaChunk->chunks;
1087 
1088 	// free the meta chunk, if it is unused now
1089 	PARANOID_CHECKS_ONLY(bool areaDeleted = false;)
1090 	ASSERT(metaChunk->usedChunkCount > 0);
1091 	if (--metaChunk->usedChunkCount == 0) {
1092 		T(FreeMetaChunk(metaChunk));
1093 
1094 		// remove from partial meta chunk list
1095 		if (metaChunk->chunkSize == SLAB_CHUNK_SIZE_SMALL)
1096 			sPartialMetaChunksSmall.Remove(metaChunk);
1097 		else if (metaChunk->chunkSize == SLAB_CHUNK_SIZE_MEDIUM)
1098 			sPartialMetaChunksMedium.Remove(metaChunk);
1099 
1100 		// mark empty
1101 		metaChunk->chunkSize = 0;
1102 
1103 		// add to free list
1104 		if (metaChunk == area->metaChunks)
1105 			sFreeShortMetaChunks.Add(metaChunk, false);
1106 		else
1107 			sFreeCompleteMetaChunks.Add(metaChunk, false);
1108 
1109 		// free the area, if it is unused now
1110 		ASSERT(area->usedMetaChunkCount > 0);
1111 		if (--area->usedMetaChunkCount == 0) {
1112 			_FreeArea(area, false, flags);
1113 			PARANOID_CHECKS_ONLY(areaDeleted = true;)
1114 		}
1115 	} else if (metaChunk->usedChunkCount == metaChunk->chunkCount - 1) {
1116 		// the meta chunk was full before -- add it back to its partial chunk
1117 		// list
1118 		if (metaChunk->chunkSize == SLAB_CHUNK_SIZE_SMALL)
1119 			sPartialMetaChunksSmall.Add(metaChunk, false);
1120 		else if (metaChunk->chunkSize == SLAB_CHUNK_SIZE_MEDIUM)
1121 			sPartialMetaChunksMedium.Add(metaChunk, false);
1122 
1123 		metaChunk->firstFreeChunk = chunkIndex;
1124 		metaChunk->lastFreeChunk = chunkIndex;
1125 	} else {
1126 		// extend the free range, if the chunk adjoins
1127 		if (chunkIndex + 1 == metaChunk->firstFreeChunk) {
1128 			uint32 firstFree = chunkIndex;
1129 			for (; firstFree > 0; firstFree--) {
1130 				Chunk* previousChunk = &metaChunk->chunks[firstFree - 1];
1131 				if (!_IsChunkFree(metaChunk, previousChunk))
1132 					break;
1133 			}
1134 			metaChunk->firstFreeChunk = firstFree;
1135 		} else if (chunkIndex == (uint32)metaChunk->lastFreeChunk + 1) {
1136 			uint32 lastFree = chunkIndex;
1137 			for (; lastFree + 1 < metaChunk->chunkCount; lastFree++) {
1138 				Chunk* nextChunk = &metaChunk->chunks[lastFree + 1];
1139 				if (!_IsChunkFree(metaChunk, nextChunk))
1140 					break;
1141 			}
1142 			metaChunk->lastFreeChunk = lastFree;
1143 		}
1144 	}
1145 
1146 	PARANOID_CHECKS_ONLY(
1147 		if (!areaDeleted)
1148 			_CheckMetaChunk(metaChunk);
1149 	)
1150 }
1151 
1152 
1153 /*static*/ void
1154 MemoryManager::_PrepareMetaChunk(MetaChunk* metaChunk, size_t chunkSize)
1155 {
1156 	Area* area = metaChunk->GetArea();
1157 
1158 	if (metaChunk == area->metaChunks) {
1159 		// the first chunk is shorter
1160 		size_t unusableSize = ROUNDUP(SLAB_AREA_STRUCT_OFFSET + kAreaAdminSize,
1161 			chunkSize);
1162 		metaChunk->chunkBase = area->BaseAddress() + unusableSize;
1163 		metaChunk->totalSize = SLAB_CHUNK_SIZE_LARGE - unusableSize;
1164 	}
1165 
1166 	metaChunk->chunkSize = chunkSize;
1167 	metaChunk->chunkCount = metaChunk->totalSize / chunkSize;
1168 	metaChunk->usedChunkCount = 0;
1169 
1170 	metaChunk->freeChunks = NULL;
1171 	for (int32 i = metaChunk->chunkCount - 1; i >= 0; i--)
1172 		_push(metaChunk->freeChunks, metaChunk->chunks + i);
1173 
1174 	metaChunk->firstFreeChunk = 0;
1175 	metaChunk->lastFreeChunk = metaChunk->chunkCount - 1;
1176 
1177 	PARANOID_CHECKS_ONLY(_CheckMetaChunk(metaChunk));
1178 }
1179 
1180 
1181 /*static*/ void
1182 MemoryManager::_AddArea(Area* area)
1183 {
1184 	T(AddArea(area));
1185 
1186 	// add the area to the hash table
1187 	WriteLocker writeLocker(sAreaTableLock);
1188 	sAreaTable.InsertUnchecked(area);
1189 	writeLocker.Unlock();
1190 
1191 	// add the area's meta chunks to the free lists
1192 	sFreeShortMetaChunks.Add(&area->metaChunks[0]);
1193 	for (int32 i = 1; i < SLAB_META_CHUNKS_PER_AREA; i++)
1194 		sFreeCompleteMetaChunks.Add(&area->metaChunks[i]);
1195 }
1196 
1197 
1198 /*static*/ status_t
1199 MemoryManager::_AllocateArea(uint32 flags, Area*& _area)
1200 {
1201 	TRACE("MemoryManager::_AllocateArea(%#" B_PRIx32 ")\n", flags);
1202 
1203 	ASSERT((flags & CACHE_DONT_LOCK_KERNEL_SPACE) == 0);
1204 
1205 	mutex_unlock(&sLock);
1206 
1207 	size_t pagesNeededToMap = 0;
1208 	void* areaBase;
1209 	Area* area;
1210 	VMArea* vmArea = NULL;
1211 
1212 	if (sKernelArgs == NULL) {
1213 		// create an area
1214 		uint32 areaCreationFlags = (flags & CACHE_PRIORITY_VIP) != 0
1215 			? CREATE_AREA_PRIORITY_VIP : 0;
1216 		area_id areaID = vm_create_null_area(B_SYSTEM_TEAM, kSlabAreaName,
1217 			&areaBase, B_ANY_KERNEL_BLOCK_ADDRESS, SLAB_AREA_SIZE,
1218 			areaCreationFlags);
1219 		if (areaID < 0) {
1220 			mutex_lock(&sLock);
1221 			return areaID;
1222 		}
1223 
1224 		area = _AreaForAddress((addr_t)areaBase);
1225 
1226 		// map the memory for the administrative structure
1227 		VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
1228 		VMTranslationMap* translationMap = addressSpace->TranslationMap();
1229 
1230 		pagesNeededToMap = translationMap->MaxPagesNeededToMap(
1231 			(addr_t)area, (addr_t)areaBase + SLAB_AREA_SIZE - 1);
1232 
1233 		vmArea = VMAreaHash::Lookup(areaID);
1234 		status_t error = _MapChunk(vmArea, (addr_t)area, kAreaAdminSize,
1235 			pagesNeededToMap, flags);
1236 		if (error != B_OK) {
1237 			delete_area(areaID);
1238 			mutex_lock(&sLock);
1239 			return error;
1240 		}
1241 
1242 		dprintf("slab memory manager: created area %p (%" B_PRId32 ")\n", area,
1243 			areaID);
1244 	} else {
1245 		// no areas yet -- allocate raw memory
1246 		areaBase = (void*)vm_allocate_early(sKernelArgs, SLAB_AREA_SIZE,
1247 			SLAB_AREA_SIZE, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
1248 			SLAB_AREA_SIZE);
1249 		if (areaBase == NULL) {
1250 			mutex_lock(&sLock);
1251 			return B_NO_MEMORY;
1252 		}
1253 		area = _AreaForAddress((addr_t)areaBase);
1254 
1255 		TRACE("MemoryManager::_AllocateArea(): allocated early area %p\n",
1256 			area);
1257 	}
1258 
1259 	// init the area structure
1260 	area->vmArea = vmArea;
1261 	area->reserved_memory_for_mapping = pagesNeededToMap * B_PAGE_SIZE;
1262 	area->usedMetaChunkCount = 0;
1263 	area->fullyMapped = vmArea == NULL;
1264 
1265 	// init the meta chunks
1266 	for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA; i++) {
1267 		MetaChunk* metaChunk = area->metaChunks + i;
1268 		metaChunk->chunkSize = 0;
1269 		metaChunk->chunkBase = (addr_t)areaBase + i * SLAB_CHUNK_SIZE_LARGE;
1270 		metaChunk->totalSize = SLAB_CHUNK_SIZE_LARGE;
1271 			// Note: chunkBase and totalSize aren't correct for the first
1272 			// meta chunk. They will be set in _PrepareMetaChunk().
1273 		metaChunk->chunkCount = 0;
1274 		metaChunk->usedChunkCount = 0;
1275 		metaChunk->freeChunks = NULL;
1276 	}
1277 
1278 	mutex_lock(&sLock);
1279 	_area = area;
1280 
1281 	T(AllocateArea(area, flags));
1282 
1283 	return B_OK;
1284 }
1285 
1286 
1287 /*static*/ void
1288 MemoryManager::_FreeArea(Area* area, bool areaRemoved, uint32 flags)
1289 {
1290 	TRACE("MemoryManager::_FreeArea(%p, %#" B_PRIx32 ")\n", area, flags);
1291 
1292 	T(FreeArea(area, areaRemoved, flags));
1293 
1294 	ASSERT(area->usedMetaChunkCount == 0);
1295 
1296 	if (!areaRemoved) {
1297 		// remove the area's meta chunks from the free lists
1298 		ASSERT(area->metaChunks[0].usedChunkCount == 0);
1299 		sFreeShortMetaChunks.Remove(&area->metaChunks[0]);
1300 
1301 		for (int32 i = 1; i < SLAB_META_CHUNKS_PER_AREA; i++) {
1302 			ASSERT(area->metaChunks[i].usedChunkCount == 0);
1303 			sFreeCompleteMetaChunks.Remove(&area->metaChunks[i]);
1304 		}
1305 
1306 		// remove the area from the hash table
1307 		WriteLocker writeLocker(sAreaTableLock);
1308 		sAreaTable.RemoveUnchecked(area);
1309 		writeLocker.Unlock();
1310 	}
1311 
1312 	// We want to keep one or two free areas as a reserve.
1313 	if (sFreeAreaCount <= 1) {
1314 		_push(sFreeAreas, area);
1315 		sFreeAreaCount++;
1316 		return;
1317 	}
1318 
1319 	if (area->vmArea == NULL || (flags & CACHE_DONT_LOCK_KERNEL_SPACE) != 0) {
1320 		// This is either early in the boot process or we aren't allowed to
1321 		// delete the area now.
1322 		_push(sFreeAreas, area);
1323 		sFreeAreaCount++;
1324 		_RequestMaintenance();
1325 		return;
1326 	}
1327 
1328 	mutex_unlock(&sLock);
1329 
1330 	dprintf("slab memory manager: deleting area %p (%" B_PRId32 ")\n", area,
1331 		area->vmArea->id);
1332 
1333 	size_t memoryToUnreserve = area->reserved_memory_for_mapping;
1334 	delete_area(area->vmArea->id);
1335 	vm_unreserve_memory(memoryToUnreserve);
1336 
1337 	mutex_lock(&sLock);
1338 }
1339 
1340 
1341 /*static*/ status_t
1342 MemoryManager::_MapChunk(VMArea* vmArea, addr_t address, size_t size,
1343 	size_t reserveAdditionalMemory, uint32 flags)
1344 {
1345 	TRACE("MemoryManager::_MapChunk(%p, %#" B_PRIxADDR ", %#" B_PRIxSIZE
1346 		")\n", vmArea, address, size);
1347 
1348 	T(Map(address, size, flags));
1349 
1350 	if (vmArea == NULL) {
1351 		// everything is mapped anyway
1352 		return B_OK;
1353 	}
1354 
1355 	VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
1356 	VMTranslationMap* translationMap = addressSpace->TranslationMap();
1357 
1358 	// reserve memory for the chunk
1359 	int priority = (flags & CACHE_PRIORITY_VIP) != 0
1360 		? VM_PRIORITY_VIP : VM_PRIORITY_SYSTEM;
1361 	size_t reservedMemory = size + reserveAdditionalMemory;
1362 	status_t error = vm_try_reserve_memory(size, priority,
1363 		(flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0 ? 0 : 1000000);
1364 	if (error != B_OK)
1365 		return error;
1366 
1367 	// reserve the pages we need now
1368 	size_t reservedPages = size / B_PAGE_SIZE
1369 		+ translationMap->MaxPagesNeededToMap(address, address + size - 1);
1370 	vm_page_reservation reservation;
1371 	if ((flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0) {
1372 		if (!vm_page_try_reserve_pages(&reservation, reservedPages, priority)) {
1373 			vm_unreserve_memory(reservedMemory);
1374 			return B_WOULD_BLOCK;
1375 		}
1376 	} else
1377 		vm_page_reserve_pages(&reservation, reservedPages, priority);
1378 
1379 	VMCache* cache = vm_area_get_locked_cache(vmArea);
1380 
1381 	// map the pages
1382 	translationMap->Lock();
1383 
1384 	addr_t areaOffset = address - vmArea->Base();
1385 	addr_t endAreaOffset = areaOffset + size;
1386 	for (size_t offset = areaOffset; offset < endAreaOffset;
1387 			offset += B_PAGE_SIZE) {
1388 		vm_page* page = vm_page_allocate_page(&reservation, PAGE_STATE_WIRED);
1389 		cache->InsertPage(page, offset);
1390 
1391 		page->IncrementWiredCount();
1392 		atomic_add(&gMappedPagesCount, 1);
1393 		DEBUG_PAGE_ACCESS_END(page);
1394 
1395 		translationMap->Map(vmArea->Base() + offset,
1396 			page->physical_page_number * B_PAGE_SIZE,
1397 			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
1398 			vmArea->MemoryType(), &reservation);
1399 	}
1400 
1401 	translationMap->Unlock();
1402 
1403 	cache->ReleaseRefAndUnlock();
1404 
1405 	vm_page_unreserve_pages(&reservation);
1406 
1407 	return B_OK;
1408 }
1409 
1410 
1411 /*static*/ status_t
1412 MemoryManager::_UnmapChunk(VMArea* vmArea, addr_t address, size_t size,
1413 	uint32 flags)
1414 {
1415 	T(Unmap(address, size, flags));
1416 
1417 	if (vmArea == NULL)
1418 		return B_ERROR;
1419 
1420 	TRACE("MemoryManager::_UnmapChunk(%p, %#" B_PRIxADDR ", %#" B_PRIxSIZE
1421 		")\n", vmArea, address, size);
1422 
1423 	VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
1424 	VMTranslationMap* translationMap = addressSpace->TranslationMap();
1425 	VMCache* cache = vm_area_get_locked_cache(vmArea);
1426 
1427 	// unmap the pages
1428 	translationMap->Lock();
1429 	translationMap->Unmap(address, address + size - 1);
1430 	atomic_add(&gMappedPagesCount, -(size / B_PAGE_SIZE));
1431 	translationMap->Unlock();
1432 
1433 	// free the pages
1434 	addr_t areaPageOffset = (address - vmArea->Base()) / B_PAGE_SIZE;
1435 	addr_t areaPageEndOffset = areaPageOffset + size / B_PAGE_SIZE;
1436 	VMCachePagesTree::Iterator it = cache->pages.GetIterator(
1437 		areaPageOffset, true, true);
1438 	while (vm_page* page = it.Next()) {
1439 		if (page->cache_offset >= areaPageEndOffset)
1440 			break;
1441 
1442 		DEBUG_PAGE_ACCESS_START(page);
1443 
1444 		page->DecrementWiredCount();
1445 
1446 		cache->RemovePage(page);
1447 			// the iterator is remove-safe
1448 		vm_page_free(cache, page);
1449 	}
1450 
1451 	cache->ReleaseRefAndUnlock();
1452 
1453 	vm_unreserve_memory(size);
1454 
1455 	return B_OK;
1456 }
1457 
1458 
1459 /*static*/ void
1460 MemoryManager::_UnmapFreeChunksEarly(Area* area)
1461 {
1462 	if (!area->fullyMapped)
1463 		return;
1464 
1465 	TRACE("MemoryManager::_UnmapFreeChunksEarly(%p)\n", area);
1466 
1467 	// unmap the space before the Area structure
1468 	#if SLAB_AREA_STRUCT_OFFSET > 0
1469 		_UnmapChunk(area->vmArea, area->BaseAddress(), SLAB_AREA_STRUCT_OFFSET,
1470 			0);
1471 	#endif
1472 
1473 	for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA; i++) {
1474 		MetaChunk* metaChunk = area->metaChunks + i;
1475 		if (metaChunk->chunkSize == 0) {
1476 			// meta chunk is free -- unmap it completely
1477 			if (i == 0) {
1478 				_UnmapChunk(area->vmArea, (addr_t)area + kAreaAdminSize,
1479 					SLAB_CHUNK_SIZE_LARGE - kAreaAdminSize, 0);
1480 			} else {
1481 				_UnmapChunk(area->vmArea,
1482 					area->BaseAddress() + i * SLAB_CHUNK_SIZE_LARGE,
1483 					SLAB_CHUNK_SIZE_LARGE, 0);
1484 			}
1485 		} else {
1486 			// unmap free chunks
1487 			for (Chunk* chunk = metaChunk->freeChunks; chunk != NULL;
1488 					chunk = chunk->next) {
1489 				_UnmapChunk(area->vmArea, _ChunkAddress(metaChunk, chunk),
1490 					metaChunk->chunkSize, 0);
1491 			}
1492 
1493 			// The first meta chunk might have space before its first chunk.
1494 			if (i == 0) {
1495 				addr_t unusedStart = (addr_t)area + kAreaAdminSize;
1496 				if (unusedStart < metaChunk->chunkBase) {
1497 					_UnmapChunk(area->vmArea, unusedStart,
1498 						metaChunk->chunkBase - unusedStart, 0);
1499 				}
1500 			}
1501 		}
1502 	}
1503 
1504 	area->fullyMapped = false;
1505 }
1506 
1507 
1508 /*static*/ void
1509 MemoryManager::_ConvertEarlyArea(Area* area)
1510 {
1511 	void* address = (void*)area->BaseAddress();
1512 	area_id areaID = create_area(kSlabAreaName, &address, B_EXACT_ADDRESS,
1513 		SLAB_AREA_SIZE, B_ALREADY_WIRED,
1514 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
1515 	if (areaID < 0)
1516 		panic("out of memory");
1517 
1518 	area->vmArea = VMAreaHash::Lookup(areaID);
1519 }
1520 
1521 
1522 /*static*/ void
1523 MemoryManager::_RequestMaintenance()
1524 {
1525 	if ((sFreeAreaCount > 0 && sFreeAreaCount <= 2) || sMaintenanceNeeded)
1526 		return;
1527 
1528 	sMaintenanceNeeded = true;
1529 	request_memory_manager_maintenance();
1530 }
1531 
1532 
1533 /*static*/ bool
1534 MemoryManager::_IsChunkInFreeList(const MetaChunk* metaChunk,
1535 	const Chunk* chunk)
1536 {
1537 	Chunk* freeChunk = metaChunk->freeChunks;
1538 	while (freeChunk != NULL) {
1539 		if (freeChunk == chunk)
1540 			return true;
1541 		freeChunk = freeChunk->next;
1542 	}
1543 
1544 	return false;
1545 }
1546 
1547 
1548 #if DEBUG_SLAB_MEMORY_MANAGER_PARANOID_CHECKS
1549 
1550 /*static*/ void
1551 MemoryManager::_CheckMetaChunk(MetaChunk* metaChunk)
1552 {
1553 	Area* area = metaChunk->GetArea();
1554 	int32 metaChunkIndex = metaChunk - area->metaChunks;
1555 	if (metaChunkIndex < 0 || metaChunkIndex >= SLAB_META_CHUNKS_PER_AREA) {
1556 		panic("invalid meta chunk %p!", metaChunk);
1557 		return;
1558 	}
1559 
1560 	switch (metaChunk->chunkSize) {
1561 		case 0:
1562 			// unused
1563 			return;
1564 		case SLAB_CHUNK_SIZE_SMALL:
1565 		case SLAB_CHUNK_SIZE_MEDIUM:
1566 		case SLAB_CHUNK_SIZE_LARGE:
1567 			break;
1568 		default:
1569 			panic("meta chunk %p has invalid chunk size: %" B_PRIuSIZE,
1570 				metaChunk, metaChunk->chunkSize);
1571 			return;
1572 	}
1573 
1574 	if (metaChunk->totalSize > SLAB_CHUNK_SIZE_LARGE) {
1575 		panic("meta chunk %p has invalid total size: %" B_PRIuSIZE,
1576 			metaChunk, metaChunk->totalSize);
1577 		return;
1578 	}
1579 
1580 	addr_t expectedBase = area->BaseAddress()
1581 		+ metaChunkIndex * SLAB_CHUNK_SIZE_LARGE;
1582 	if (metaChunk->chunkBase < expectedBase
1583 		|| metaChunk->chunkBase - expectedBase + metaChunk->totalSize
1584 			> SLAB_CHUNK_SIZE_LARGE) {
1585 		panic("meta chunk %p has invalid base address: %" B_PRIxADDR, metaChunk,
1586 			metaChunk->chunkBase);
1587 		return;
1588 	}
1589 
1590 	if (metaChunk->chunkCount != metaChunk->totalSize / metaChunk->chunkSize) {
1591 		panic("meta chunk %p has invalid chunk count: %u", metaChunk,
1592 			metaChunk->chunkCount);
1593 		return;
1594 	}
1595 
1596 	if (metaChunk->usedChunkCount > metaChunk->chunkCount) {
1597 		panic("meta chunk %p has invalid unused chunk count: %u", metaChunk,
1598 			metaChunk->usedChunkCount);
1599 		return;
1600 	}
1601 
1602 	if (metaChunk->firstFreeChunk > metaChunk->chunkCount) {
1603 		panic("meta chunk %p has invalid first free chunk: %u", metaChunk,
1604 			metaChunk->firstFreeChunk);
1605 		return;
1606 	}
1607 
1608 	if (metaChunk->lastFreeChunk >= metaChunk->chunkCount) {
1609 		panic("meta chunk %p has invalid last free chunk: %u", metaChunk,
1610 			metaChunk->lastFreeChunk);
1611 		return;
1612 	}
1613 
1614 	// check free list for structural sanity
1615 	uint32 freeChunks = 0;
1616 	for (Chunk* chunk = metaChunk->freeChunks; chunk != NULL;
1617 			chunk = chunk->next) {
1618 		if ((addr_t)chunk % sizeof(Chunk) != 0 || chunk < metaChunk->chunks
1619 			|| chunk >= metaChunk->chunks + metaChunk->chunkCount) {
1620 			panic("meta chunk %p has invalid element in free list, chunk: %p",
1621 				metaChunk, chunk);
1622 			return;
1623 		}
1624 
1625 		if (++freeChunks > metaChunk->chunkCount) {
1626 			panic("meta chunk %p has cyclic free list", metaChunk);
1627 			return;
1628 		}
1629 	}
1630 
1631 	if (freeChunks + metaChunk->usedChunkCount > metaChunk->chunkCount) {
1632 		panic("meta chunk %p has mismatching free/used chunk counts: total: "
1633 			"%u, used: %u, free: %" B_PRIu32, metaChunk, metaChunk->chunkCount,
1634 			metaChunk->usedChunkCount, freeChunks);
1635 		return;
1636 	}
1637 
1638 	// count used chunks by looking at their reference/next field
1639 	uint32 usedChunks = 0;
1640 	for (uint32 i = 0; i < metaChunk->chunkCount; i++) {
1641 		if (!_IsChunkFree(metaChunk, metaChunk->chunks + i))
1642 			usedChunks++;
1643 	}
1644 
1645 	if (usedChunks != metaChunk->usedChunkCount) {
1646 		panic("meta chunk %p has used chunks that appear free: total: "
1647 			"%u, used: %u, appearing used: %" B_PRIu32, metaChunk,
1648 			metaChunk->chunkCount, metaChunk->usedChunkCount, usedChunks);
1649 		return;
1650 	}
1651 
1652 	// check free range
1653 	for (uint32 i = metaChunk->firstFreeChunk; i < metaChunk->lastFreeChunk;
1654 			i++) {
1655 		if (!_IsChunkFree(metaChunk, metaChunk->chunks + i)) {
1656 			panic("meta chunk %p has used chunk in free range, chunk: %p (%"
1657 				B_PRIu32 ", free range: %u - %u)", metaChunk,
1658 				metaChunk->chunks + i, i, metaChunk->firstFreeChunk,
1659 				metaChunk->lastFreeChunk);
1660 			return;
1661 		}
1662 	}
1663 }
1664 
1665 #endif	// DEBUG_SLAB_MEMORY_MANAGER_PARANOID_CHECKS
1666 
1667 
1668 /*static*/ int
1669 MemoryManager::_DumpRawAllocations(int argc, char** argv)
1670 {
1671 	kprintf("area        meta chunk  chunk  base        size (KB)\n");
1672 
1673 	size_t totalSize = 0;
1674 
1675 	for (AreaTable::Iterator it = sAreaTable.GetIterator();
1676 			Area* area = it.Next();) {
1677 		for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA; i++) {
1678 			MetaChunk* metaChunk = area->metaChunks + i;
1679 			if (metaChunk->chunkSize == 0)
1680 				continue;
1681 			for (uint32 k = 0; k < metaChunk->chunkCount; k++) {
1682 				Chunk* chunk = metaChunk->chunks + k;
1683 
1684 				// skip free chunks
1685 				if (_IsChunkFree(metaChunk, chunk))
1686 					continue;
1687 
1688 				addr_t reference = chunk->reference;
1689 				if ((reference & 1) == 0 || reference == 1)
1690 					continue;
1691 
1692 				addr_t chunkAddress = _ChunkAddress(metaChunk, chunk);
1693 				size_t size = reference - chunkAddress + 1;
1694 				totalSize += size;
1695 
1696 				kprintf("%p  %10" B_PRId32 "  %5" B_PRIu32 "  %p  %9"
1697 					B_PRIuSIZE "\n", area, i, k, (void*)chunkAddress,
1698 					size / 1024);
1699 			}
1700 		}
1701 	}
1702 
1703 	kprintf("total:                                     %9" B_PRIuSIZE "\n",
1704 		totalSize / 1024);
1705 
1706 	return 0;
1707 }
1708 
1709 
1710 /*static*/ void
1711 MemoryManager::_PrintMetaChunkTableHeader(bool printChunks)
1712 {
1713 	if (printChunks)
1714 		kprintf("chunk        base       cache  object size  cache name\n");
1715 	else
1716 		kprintf("chunk        base\n");
1717 }
1718 
1719 /*static*/ void
1720 MemoryManager::_DumpMetaChunk(MetaChunk* metaChunk, bool printChunks,
1721 	bool printHeader)
1722 {
1723 	if (printHeader)
1724 		_PrintMetaChunkTableHeader(printChunks);
1725 
1726 	const char* type = "empty";
1727 	if (metaChunk->chunkSize != 0) {
1728 		switch (metaChunk->chunkSize) {
1729 			case SLAB_CHUNK_SIZE_SMALL:
1730 				type = "small";
1731 				break;
1732 			case SLAB_CHUNK_SIZE_MEDIUM:
1733 				type = "medium";
1734 				break;
1735 			case SLAB_CHUNK_SIZE_LARGE:
1736 				type = "large";
1737 				break;
1738 		}
1739 	}
1740 
1741 	int metaChunkIndex = metaChunk - metaChunk->GetArea()->metaChunks;
1742 	kprintf("%5d  %p  --- %6s meta chunk", metaChunkIndex,
1743 		(void*)metaChunk->chunkBase, type);
1744 	if (metaChunk->chunkSize != 0) {
1745 		kprintf(": %4u/%4u used, %-4u-%4u free ------------\n",
1746 			metaChunk->usedChunkCount, metaChunk->chunkCount,
1747 			metaChunk->firstFreeChunk, metaChunk->lastFreeChunk);
1748 	} else
1749 		kprintf(" --------------------------------------------\n");
1750 
1751 	if (metaChunk->chunkSize == 0 || !printChunks)
1752 		return;
1753 
1754 	for (uint32 i = 0; i < metaChunk->chunkCount; i++) {
1755 		Chunk* chunk = metaChunk->chunks + i;
1756 
1757 		// skip free chunks
1758 		if (_IsChunkFree(metaChunk, chunk)) {
1759 			if (!_IsChunkInFreeList(metaChunk, chunk)) {
1760 				kprintf("%5" B_PRIu32 "  %p  appears free, but isn't in free "
1761 					"list!\n", i, (void*)_ChunkAddress(metaChunk, chunk));
1762 			}
1763 
1764 			continue;
1765 		}
1766 
1767 		addr_t reference = chunk->reference;
1768 		if ((reference & 1) == 0) {
1769 			ObjectCache* cache = (ObjectCache*)reference;
1770 			kprintf("%5" B_PRIu32 "  %p  %p  %11" B_PRIuSIZE "  %s\n", i,
1771 				(void*)_ChunkAddress(metaChunk, chunk), cache,
1772 				cache != NULL ? cache->object_size : 0,
1773 				cache != NULL ? cache->name : "");
1774 		} else if (reference != 1) {
1775 			kprintf("%5" B_PRIu32 "  %p  raw allocation up to %p\n", i,
1776 				(void*)_ChunkAddress(metaChunk, chunk), (void*)reference);
1777 		}
1778 	}
1779 }
1780 
1781 
1782 /*static*/ int
1783 MemoryManager::_DumpMetaChunk(int argc, char** argv)
1784 {
1785 	if (argc != 2) {
1786 		print_debugger_command_usage(argv[0]);
1787 		return 0;
1788 	}
1789 
1790 	uint64 address;
1791 	if (!evaluate_debug_expression(argv[1], &address, false))
1792 		return 0;
1793 
1794 	Area* area = _AreaForAddress(address);
1795 
1796 	MetaChunk* metaChunk;
1797 	if ((addr_t)address >= (addr_t)area->metaChunks
1798 		&& (addr_t)address
1799 			< (addr_t)(area->metaChunks + SLAB_META_CHUNKS_PER_AREA)) {
1800 		metaChunk = (MetaChunk*)(addr_t)address;
1801 	} else {
1802 		metaChunk = area->metaChunks
1803 			+ (address % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE;
1804 	}
1805 
1806 	_DumpMetaChunk(metaChunk, true, true);
1807 
1808 	return 0;
1809 }
1810 
1811 
1812 /*static*/ void
1813 MemoryManager::_DumpMetaChunks(const char* name, MetaChunkList& metaChunkList,
1814 	bool printChunks)
1815 {
1816 	kprintf("%s:\n", name);
1817 
1818 	for (MetaChunkList::Iterator it = metaChunkList.GetIterator();
1819 			MetaChunk* metaChunk = it.Next();) {
1820 		_DumpMetaChunk(metaChunk, printChunks, false);
1821 	}
1822 }
1823 
1824 
1825 /*static*/ int
1826 MemoryManager::_DumpMetaChunks(int argc, char** argv)
1827 {
1828 	bool printChunks = argc > 1 && strcmp(argv[1], "-c") == 0;
1829 
1830 	_PrintMetaChunkTableHeader(printChunks);
1831 	_DumpMetaChunks("free complete", sFreeCompleteMetaChunks, printChunks);
1832 	_DumpMetaChunks("free short", sFreeShortMetaChunks, printChunks);
1833 	_DumpMetaChunks("partial small", sPartialMetaChunksSmall, printChunks);
1834 	_DumpMetaChunks("partial medium", sPartialMetaChunksMedium, printChunks);
1835 
1836 	return 0;
1837 }
1838 
1839 
1840 /*static*/ int
1841 MemoryManager::_DumpArea(int argc, char** argv)
1842 {
1843 	bool printChunks = false;
1844 
1845 	int argi = 1;
1846 	while (argi < argc) {
1847 		if (argv[argi][0] != '-')
1848 			break;
1849 		const char* arg = argv[argi++];
1850 		if (strcmp(arg, "-c") == 0) {
1851 			printChunks = true;
1852 		} else {
1853 			print_debugger_command_usage(argv[0]);
1854 			return 0;
1855 		}
1856 	}
1857 
1858 	if (argi + 1 != argc) {
1859 		print_debugger_command_usage(argv[0]);
1860 		return 0;
1861 	}
1862 
1863 	uint64 address;
1864 	if (!evaluate_debug_expression(argv[argi], &address, false))
1865 		return 0;
1866 
1867 	Area* area = _AreaForAddress((addr_t)address);
1868 
1869 	for (uint32 k = 0; k < SLAB_META_CHUNKS_PER_AREA; k++) {
1870 		MetaChunk* metaChunk = area->metaChunks + k;
1871 		_DumpMetaChunk(metaChunk, printChunks, k == 0);
1872 	}
1873 
1874 	return 0;
1875 }
1876 
1877 
1878 /*static*/ int
1879 MemoryManager::_DumpAreas(int argc, char** argv)
1880 {
1881 	kprintf("      base        area   meta      small   medium  large\n");
1882 
1883 	size_t totalTotalSmall = 0;
1884 	size_t totalUsedSmall = 0;
1885 	size_t totalTotalMedium = 0;
1886 	size_t totalUsedMedium = 0;
1887 	size_t totalUsedLarge = 0;
1888 	uint32 areaCount = 0;
1889 
1890 	for (AreaTable::Iterator it = sAreaTable.GetIterator();
1891 			Area* area = it.Next();) {
1892 		areaCount++;
1893 
1894 		// sum up the free/used counts for the chunk sizes
1895 		int totalSmall = 0;
1896 		int usedSmall = 0;
1897 		int totalMedium = 0;
1898 		int usedMedium = 0;
1899 		int usedLarge = 0;
1900 
1901 		for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA; i++) {
1902 			MetaChunk* metaChunk = area->metaChunks + i;
1903 			if (metaChunk->chunkSize == 0)
1904 				continue;
1905 
1906 			switch (metaChunk->chunkSize) {
1907 				case SLAB_CHUNK_SIZE_SMALL:
1908 					totalSmall += metaChunk->chunkCount;
1909 					usedSmall += metaChunk->usedChunkCount;
1910 					break;
1911 				case SLAB_CHUNK_SIZE_MEDIUM:
1912 					totalMedium += metaChunk->chunkCount;
1913 					usedMedium += metaChunk->usedChunkCount;
1914 					break;
1915 				case SLAB_CHUNK_SIZE_LARGE:
1916 					usedLarge += metaChunk->usedChunkCount;
1917 					break;
1918 			}
1919 		}
1920 
1921 		kprintf("%p  %p  %2u/%2u  %4d/%4d  %3d/%3d  %5d\n",
1922 			area, area->vmArea, area->usedMetaChunkCount,
1923 			SLAB_META_CHUNKS_PER_AREA, usedSmall, totalSmall, usedMedium,
1924 			totalMedium, usedLarge);
1925 
1926 		totalTotalSmall += totalSmall;
1927 		totalUsedSmall += usedSmall;
1928 		totalTotalMedium += totalMedium;
1929 		totalUsedMedium += usedMedium;
1930 		totalUsedLarge += usedLarge;
1931 	}
1932 
1933 	kprintf("%d free area%s:\n", sFreeAreaCount,
1934 		sFreeAreaCount == 1 ? "" : "s");
1935 	for (Area* area = sFreeAreas; area != NULL; area = area->next) {
1936 		areaCount++;
1937 		kprintf("%p  %p\n", area, area->vmArea);
1938 	}
1939 
1940 	kprintf("total usage:\n");
1941 	kprintf("  small:    %" B_PRIuSIZE "/%" B_PRIuSIZE "\n", totalUsedSmall,
1942 		totalTotalSmall);
1943 	kprintf("  medium:   %" B_PRIuSIZE "/%" B_PRIuSIZE "\n", totalUsedMedium,
1944 		totalTotalMedium);
1945 	kprintf("  large:    %" B_PRIuSIZE "\n", totalUsedLarge);
1946 	kprintf("  memory:   %" B_PRIuSIZE "/%" B_PRIuSIZE " KB\n",
1947 		(totalUsedSmall * SLAB_CHUNK_SIZE_SMALL
1948 			+ totalUsedMedium * SLAB_CHUNK_SIZE_MEDIUM
1949 			+ totalUsedLarge * SLAB_CHUNK_SIZE_LARGE) / 1024,
1950 		areaCount * SLAB_AREA_SIZE / 1024);
1951 	kprintf("  overhead: %" B_PRIuSIZE " KB\n",
1952 		areaCount * kAreaAdminSize / 1024);
1953 
1954 	return 0;
1955 }
1956