xref: /haiku/src/system/kernel/slab/MemoryManager.cpp (revision 71f92c6439bddce17ccd7121d4ba7ff716617b1c)
1 /*
2  * Copyright 2010, Ingo Weinhold <ingo_weinhold@gmx.de>.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include "MemoryManager.h"
8 
9 #include <algorithm>
10 
11 #include <debug.h>
12 #include <tracing.h>
13 #include <util/AutoLock.h>
14 #include <vm/vm.h>
15 #include <vm/vm_page.h>
16 #include <vm/vm_priv.h>
17 #include <vm/VMAddressSpace.h>
18 #include <vm/VMArea.h>
19 #include <vm/VMCache.h>
20 #include <vm/VMTranslationMap.h>
21 
22 #include "kernel_debug_config.h"
23 
24 #include "ObjectCache.h"
25 #include "slab_private.h"
26 
27 
28 //#define TRACE_MEMORY_MANAGER
29 #ifdef TRACE_MEMORY_MANAGER
30 #	define TRACE(x...)	dprintf(x)
31 #else
32 #	define TRACE(x...)	do {} while (false)
33 #endif
34 
35 #if DEBUG_SLAB_MEMORY_MANAGER_PARANOID_CHECKS
36 #	define PARANOID_CHECKS_ONLY(x)	x
37 #else
38 #	define PARANOID_CHECKS_ONLY(x)
39 #endif
40 
41 
42 static const char* const kSlabAreaName = "slab area";
43 
44 static void* sAreaTableBuffer[1024];
45 
46 mutex MemoryManager::sLock;
47 rw_lock MemoryManager::sAreaTableLock;
48 kernel_args* MemoryManager::sKernelArgs;
49 MemoryManager::AreaTable MemoryManager::sAreaTable;
50 MemoryManager::Area* MemoryManager::sFreeAreas;
51 int MemoryManager::sFreeAreaCount;
52 MemoryManager::MetaChunkList MemoryManager::sFreeCompleteMetaChunks;
53 MemoryManager::MetaChunkList MemoryManager::sFreeShortMetaChunks;
54 MemoryManager::MetaChunkList MemoryManager::sPartialMetaChunksSmall;
55 MemoryManager::MetaChunkList MemoryManager::sPartialMetaChunksMedium;
56 MemoryManager::AllocationEntry* MemoryManager::sAllocationEntryCanWait;
57 MemoryManager::AllocationEntry* MemoryManager::sAllocationEntryDontWait;
58 bool MemoryManager::sMaintenanceNeeded;
59 
60 
61 // #pragma mark - kernel tracing
62 
63 
64 #if SLAB_MEMORY_MANAGER_TRACING
65 
66 
67 //namespace SlabMemoryManagerCacheTracing {
68 struct MemoryManager::Tracing {
69 
70 class MemoryManagerTraceEntry : public AbstractTraceEntry {
71 public:
72 	MemoryManagerTraceEntry()
73 	{
74 	}
75 };
76 
77 
78 class Allocate : public MemoryManagerTraceEntry {
79 public:
80 	Allocate(ObjectCache* cache, uint32 flags)
81 		:
82 		MemoryManagerTraceEntry(),
83 		fCache(cache),
84 		fFlags(flags)
85 	{
86 		Initialized();
87 	}
88 
89 	virtual void AddDump(TraceOutput& out)
90 	{
91 		out.Print("slab memory manager alloc: cache: %p, flags: %#" B_PRIx32,
92 			fCache, fFlags);
93 	}
94 
95 private:
96 	ObjectCache*	fCache;
97 	uint32			fFlags;
98 };
99 
100 
101 class Free : public MemoryManagerTraceEntry {
102 public:
103 	Free(void* address, uint32 flags)
104 		:
105 		MemoryManagerTraceEntry(),
106 		fAddress(address),
107 		fFlags(flags)
108 	{
109 		Initialized();
110 	}
111 
112 	virtual void AddDump(TraceOutput& out)
113 	{
114 		out.Print("slab memory manager free: address: %p, flags: %#" B_PRIx32,
115 			fAddress, fFlags);
116 	}
117 
118 private:
119 	void*	fAddress;
120 	uint32	fFlags;
121 };
122 
123 
124 class AllocateRaw : public MemoryManagerTraceEntry {
125 public:
126 	AllocateRaw(size_t size, uint32 flags)
127 		:
128 		MemoryManagerTraceEntry(),
129 		fSize(size),
130 		fFlags(flags)
131 	{
132 		Initialized();
133 	}
134 
135 	virtual void AddDump(TraceOutput& out)
136 	{
137 		out.Print("slab memory manager alloc raw: size: %" B_PRIuSIZE
138 			", flags: %#" B_PRIx32, fSize, fFlags);
139 	}
140 
141 private:
142 	size_t	fSize;
143 	uint32	fFlags;
144 };
145 
146 
147 class FreeRawOrReturnCache : public MemoryManagerTraceEntry {
148 public:
149 	FreeRawOrReturnCache(void* address, uint32 flags)
150 		:
151 		MemoryManagerTraceEntry(),
152 		fAddress(address),
153 		fFlags(flags)
154 	{
155 		Initialized();
156 	}
157 
158 	virtual void AddDump(TraceOutput& out)
159 	{
160 		out.Print("slab memory manager free raw/return: address: %p, flags: %#"
161 			B_PRIx32, fAddress, fFlags);
162 	}
163 
164 private:
165 	void*	fAddress;
166 	uint32	fFlags;
167 };
168 
169 
170 class AllocateArea : public MemoryManagerTraceEntry {
171 public:
172 	AllocateArea(Area* area, uint32 flags)
173 		:
174 		MemoryManagerTraceEntry(),
175 		fArea(area),
176 		fFlags(flags)
177 	{
178 		Initialized();
179 	}
180 
181 	virtual void AddDump(TraceOutput& out)
182 	{
183 		out.Print("slab memory manager alloc area: flags: %#" B_PRIx32
184 			" -> %p", fFlags, fArea);
185 	}
186 
187 private:
188 	Area*	fArea;
189 	uint32	fFlags;
190 };
191 
192 
193 class AddArea : public MemoryManagerTraceEntry {
194 public:
195 	AddArea(Area* area)
196 		:
197 		MemoryManagerTraceEntry(),
198 		fArea(area)
199 	{
200 		Initialized();
201 	}
202 
203 	virtual void AddDump(TraceOutput& out)
204 	{
205 		out.Print("slab memory manager add area: %p", fArea);
206 	}
207 
208 private:
209 	Area*	fArea;
210 };
211 
212 
213 class FreeArea : public MemoryManagerTraceEntry {
214 public:
215 	FreeArea(Area* area, bool areaRemoved, uint32 flags)
216 		:
217 		MemoryManagerTraceEntry(),
218 		fArea(area),
219 		fFlags(flags),
220 		fRemoved(areaRemoved)
221 	{
222 		Initialized();
223 	}
224 
225 	virtual void AddDump(TraceOutput& out)
226 	{
227 		out.Print("slab memory manager free area: %p%s, flags: %#" B_PRIx32,
228 			fArea, fRemoved ? " (removed)" : "", fFlags);
229 	}
230 
231 private:
232 	Area*	fArea;
233 	uint32	fFlags;
234 	bool	fRemoved;
235 };
236 
237 
238 class AllocateMetaChunk : public MemoryManagerTraceEntry {
239 public:
240 	AllocateMetaChunk(MetaChunk* metaChunk)
241 		:
242 		MemoryManagerTraceEntry(),
243 		fMetaChunk(metaChunk->chunkBase)
244 	{
245 		Initialized();
246 	}
247 
248 	virtual void AddDump(TraceOutput& out)
249 	{
250 		out.Print("slab memory manager alloc meta chunk: %#" B_PRIxADDR,
251 			fMetaChunk);
252 	}
253 
254 private:
255 	addr_t	fMetaChunk;
256 };
257 
258 
259 class FreeMetaChunk : public MemoryManagerTraceEntry {
260 public:
261 	FreeMetaChunk(MetaChunk* metaChunk)
262 		:
263 		MemoryManagerTraceEntry(),
264 		fMetaChunk(metaChunk->chunkBase)
265 	{
266 		Initialized();
267 	}
268 
269 	virtual void AddDump(TraceOutput& out)
270 	{
271 		out.Print("slab memory manager free meta chunk: %#" B_PRIxADDR,
272 			fMetaChunk);
273 	}
274 
275 private:
276 	addr_t	fMetaChunk;
277 };
278 
279 
280 class AllocateChunk : public MemoryManagerTraceEntry {
281 public:
282 	AllocateChunk(size_t chunkSize, MetaChunk* metaChunk, Chunk* chunk)
283 		:
284 		MemoryManagerTraceEntry(),
285 		fChunkSize(chunkSize),
286 		fMetaChunk(metaChunk->chunkBase),
287 		fChunk(chunk - metaChunk->chunks)
288 	{
289 		Initialized();
290 	}
291 
292 	virtual void AddDump(TraceOutput& out)
293 	{
294 		out.Print("slab memory manager alloc chunk: size: %" B_PRIuSIZE
295 			" -> meta chunk: %#" B_PRIxADDR ", chunk: %" B_PRIu32, fChunkSize,
296 			fMetaChunk, fChunk);
297 	}
298 
299 private:
300 	size_t	fChunkSize;
301 	addr_t	fMetaChunk;
302 	uint32	fChunk;
303 };
304 
305 
306 class AllocateChunks : public MemoryManagerTraceEntry {
307 public:
308 	AllocateChunks(size_t chunkSize, uint32 chunkCount, MetaChunk* metaChunk,
309 		Chunk* chunk)
310 		:
311 		MemoryManagerTraceEntry(),
312 		fMetaChunk(metaChunk->chunkBase),
313 		fChunkSize(chunkSize),
314 		fChunkCount(chunkCount),
315 		fChunk(chunk - metaChunk->chunks)
316 	{
317 		Initialized();
318 	}
319 
320 	virtual void AddDump(TraceOutput& out)
321 	{
322 		out.Print("slab memory manager alloc chunks: size: %" B_PRIuSIZE
323 			", count %" B_PRIu32 " -> meta chunk: %#" B_PRIxADDR ", chunk: %"
324 			B_PRIu32, fChunkSize, fChunkCount, fMetaChunk, fChunk);
325 	}
326 
327 private:
328 	addr_t	fMetaChunk;
329 	size_t	fChunkSize;
330 	uint32	fChunkCount;
331 	uint32	fChunk;
332 };
333 
334 
335 class FreeChunk : public MemoryManagerTraceEntry {
336 public:
337 	FreeChunk(MetaChunk* metaChunk, Chunk* chunk)
338 		:
339 		MemoryManagerTraceEntry(),
340 		fMetaChunk(metaChunk->chunkBase),
341 		fChunk(chunk - metaChunk->chunks)
342 	{
343 		Initialized();
344 	}
345 
346 	virtual void AddDump(TraceOutput& out)
347 	{
348 		out.Print("slab memory manager free chunk: meta chunk: %#" B_PRIxADDR
349 			", chunk: %" B_PRIu32, fMetaChunk, fChunk);
350 	}
351 
352 private:
353 	addr_t	fMetaChunk;
354 	uint32	fChunk;
355 };
356 
357 
358 class Map : public MemoryManagerTraceEntry {
359 public:
360 	Map(addr_t address, size_t size, uint32 flags)
361 		:
362 		MemoryManagerTraceEntry(),
363 		fAddress(address),
364 		fSize(size),
365 		fFlags(flags)
366 	{
367 		Initialized();
368 	}
369 
370 	virtual void AddDump(TraceOutput& out)
371 	{
372 		out.Print("slab memory manager map: %#" B_PRIxADDR ", size: %"
373 			B_PRIuSIZE ", flags: %#" B_PRIx32, fAddress, fSize, fFlags);
374 	}
375 
376 private:
377 	addr_t	fAddress;
378 	size_t	fSize;
379 	uint32	fFlags;
380 };
381 
382 
383 class Unmap : public MemoryManagerTraceEntry {
384 public:
385 	Unmap(addr_t address, size_t size, uint32 flags)
386 		:
387 		MemoryManagerTraceEntry(),
388 		fAddress(address),
389 		fSize(size),
390 		fFlags(flags)
391 	{
392 		Initialized();
393 	}
394 
395 	virtual void AddDump(TraceOutput& out)
396 	{
397 		out.Print("slab memory manager unmap: %#" B_PRIxADDR ", size: %"
398 			B_PRIuSIZE ", flags: %#" B_PRIx32, fAddress, fSize, fFlags);
399 	}
400 
401 private:
402 	addr_t	fAddress;
403 	size_t	fSize;
404 	uint32	fFlags;
405 };
406 
407 
408 //}	// namespace SlabMemoryManagerCacheTracing
409 };	// struct MemoryManager::Tracing
410 
411 
412 //#	define T(x)	new(std::nothrow) SlabMemoryManagerCacheTracing::x
413 #	define T(x)	new(std::nothrow) MemoryManager::Tracing::x
414 
415 #else
416 #	define T(x)
417 #endif	// SLAB_MEMORY_MANAGER_TRACING
418 
419 
420 // #pragma mark - MemoryManager
421 
422 
423 /*static*/ void
424 MemoryManager::Init(kernel_args* args)
425 {
426 	mutex_init(&sLock, "slab memory manager");
427 	rw_lock_init(&sAreaTableLock, "slab memory manager area table");
428 	sKernelArgs = args;
429 
430 	new(&sFreeCompleteMetaChunks) MetaChunkList;
431 	new(&sFreeShortMetaChunks) MetaChunkList;
432 	new(&sPartialMetaChunksSmall) MetaChunkList;
433 	new(&sPartialMetaChunksMedium) MetaChunkList;
434 
435 	new(&sAreaTable) AreaTable;
436 	sAreaTable.Resize(sAreaTableBuffer, sizeof(sAreaTableBuffer), true);
437 		// A bit hacky: The table now owns the memory. Since we never resize or
438 		// free it, that's not a problem, though.
439 
440 	sFreeAreas = NULL;
441 	sFreeAreaCount = 0;
442 	sMaintenanceNeeded = false;
443 }
444 
445 
446 /*static*/ void
447 MemoryManager::InitPostArea()
448 {
449 	sKernelArgs = NULL;
450 
451 	// Convert all areas to actual areas. This loop might look a bit weird, but
452 	// is necessary since creating the actual area involves memory allocations,
453 	// which in turn can change the situation.
454 	bool done;
455 	do {
456 		done = true;
457 
458 		for (AreaTable::Iterator it = sAreaTable.GetIterator();
459 				Area* area = it.Next();) {
460 			if (area->vmArea == NULL) {
461 				_ConvertEarlyArea(area);
462 				done = false;
463 				break;
464 			}
465 		}
466 	} while (!done);
467 
468 	// unmap and free unused pages
469 	if (sFreeAreas != NULL) {
470 		// Just "leak" all but the first of the free areas -- the VM will
471 		// automatically free all unclaimed memory.
472 		sFreeAreas->next = NULL;
473 		sFreeAreaCount = 1;
474 
475 		Area* area = sFreeAreas;
476 		_ConvertEarlyArea(area);
477 		_UnmapFreeChunksEarly(area);
478 	}
479 
480 	for (AreaTable::Iterator it = sAreaTable.GetIterator();
481 			Area* area = it.Next();) {
482 		_UnmapFreeChunksEarly(area);
483 	}
484 
485 	sMaintenanceNeeded = true;
486 		// might not be necessary, but doesn't harm
487 
488 	add_debugger_command_etc("slab_area", &_DumpArea,
489 		"Dump information on a given slab area",
490 		"[ -c ] <area>\n"
491 		"Dump information on a given slab area specified by its base "
492 			"address.\n"
493 		"If \"-c\" is given, the chunks of all meta chunks area printed as "
494 			"well.\n", 0);
495 	add_debugger_command_etc("slab_areas", &_DumpAreas,
496 		"List all slab areas",
497 		"\n"
498 		"Lists all slab areas.\n", 0);
499 	add_debugger_command_etc("slab_meta_chunk", &_DumpMetaChunk,
500 		"Dump information on a given slab meta chunk",
501 		"<meta chunk>\n"
502 		"Dump information on a given slab meta chunk specified by its base "
503 			"or object address.\n", 0);
504 	add_debugger_command_etc("slab_meta_chunks", &_DumpMetaChunks,
505 		"List all non-full slab meta chunks",
506 		"[ -c ]\n"
507 		"Lists all non-full slab meta chunks.\n"
508 		"If \"-c\" is given, the chunks of all meta chunks area printed as "
509 			"well.\n", 0);
510 	add_debugger_command_etc("slab_raw_allocations", &_DumpRawAllocations,
511 		"List all raw allocations in slab areas",
512 		"\n"
513 		"Lists all raw allocations in slab areas.\n", 0);
514 }
515 
516 
517 /*static*/ status_t
518 MemoryManager::Allocate(ObjectCache* cache, uint32 flags, void*& _pages)
519 {
520 	// TODO: Support CACHE_UNLOCKED_PAGES!
521 
522 	T(Allocate(cache, flags));
523 
524 	size_t chunkSize = cache->slab_size;
525 
526 	TRACE("MemoryManager::Allocate(%p, %#" B_PRIx32 "): chunkSize: %"
527 		B_PRIuSIZE "\n", cache, flags, chunkSize);
528 
529 	MutexLocker locker(sLock);
530 
531 	// allocate a chunk
532 	MetaChunk* metaChunk;
533 	Chunk* chunk;
534 	status_t error = _AllocateChunks(chunkSize, 1, flags, metaChunk, chunk);
535 	if (error != B_OK)
536 		return error;
537 
538 	// map the chunk
539 	Area* area = metaChunk->GetArea();
540 	addr_t chunkAddress = _ChunkAddress(metaChunk, chunk);
541 
542 	locker.Unlock();
543 	error = _MapChunk(area->vmArea, chunkAddress, chunkSize, 0, flags);
544 	locker.Lock();
545 	if (error != B_OK) {
546 		// something failed -- free the chunk
547 		_FreeChunk(area, metaChunk, chunk, chunkAddress, true, flags);
548 		return error;
549 	}
550 
551 	chunk->reference = (addr_t)cache;
552 	_pages = (void*)chunkAddress;
553 
554 	TRACE("MemoryManager::Allocate() done: %p (meta chunk: %d, chunk %d)\n",
555 		_pages, int(metaChunk - area->metaChunks),
556 		int(chunk - metaChunk->chunks));
557 	return B_OK;
558 }
559 
560 
561 /*static*/ void
562 MemoryManager::Free(void* pages, uint32 flags)
563 {
564 	TRACE("MemoryManager::Free(%p, %#" B_PRIx32 ")\n", pages, flags);
565 
566 	T(Free(pages, flags));
567 
568 	// get the area and the meta chunk
569 	Area* area = _AreaForAddress((addr_t)pages);
570 	MetaChunk* metaChunk = &area->metaChunks[
571 		((addr_t)pages % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE];
572 
573 	ASSERT(metaChunk->chunkSize > 0);
574 	ASSERT((addr_t)pages >= metaChunk->chunkBase);
575 	ASSERT(((addr_t)pages % metaChunk->chunkSize) == 0);
576 
577 	// get the chunk
578 	uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)pages);
579 	Chunk* chunk = &metaChunk->chunks[chunkIndex];
580 
581 	ASSERT(chunk->next != NULL);
582 	ASSERT(chunk->next < metaChunk->chunks
583 		|| chunk->next
584 			>= metaChunk->chunks + SLAB_SMALL_CHUNKS_PER_META_CHUNK);
585 
586 	// and free it
587 	MutexLocker locker(sLock);
588 	_FreeChunk(area, metaChunk, chunk, (addr_t)pages, false, flags);
589 }
590 
591 
592 /*static*/ status_t
593 MemoryManager::AllocateRaw(size_t size, uint32 flags, void*& _pages)
594 {
595 	T(AllocateRaw(size, flags));
596 
597 	size = ROUNDUP(size, SLAB_CHUNK_SIZE_SMALL);
598 
599 	TRACE("MemoryManager::AllocateRaw(%" B_PRIuSIZE ", %#" B_PRIx32 ")\n", size,
600 		  flags);
601 
602 	if (size > SLAB_CHUNK_SIZE_LARGE || (flags & CACHE_ALIGN_ON_SIZE) != 0) {
603 		// Requested size greater than a large chunk or an aligned allocation.
604 		// Allocate as an area.
605 		if ((flags & CACHE_DONT_LOCK_KERNEL_SPACE) != 0)
606 			return B_WOULD_BLOCK;
607 
608 		virtual_address_restrictions virtualRestrictions = {};
609 		virtualRestrictions.address_specification
610 			= (flags & CACHE_ALIGN_ON_SIZE) != 0
611 				? B_ANY_KERNEL_BLOCK_ADDRESS : B_ANY_KERNEL_ADDRESS;
612 		physical_address_restrictions physicalRestrictions = {};
613 		area_id area = create_area_etc(VMAddressSpace::KernelID(),
614 			"slab large raw allocation", size, B_FULL_LOCK,
615 			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
616 			((flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0
617 					? CREATE_AREA_DONT_WAIT : 0)
618 				| CREATE_AREA_DONT_CLEAR,
619 			&virtualRestrictions, &physicalRestrictions, &_pages);
620 
621 		status_t result = area >= 0 ? B_OK : area;
622 		if (result == B_OK)
623 			fill_allocated_block(_pages, size);
624 		return result;
625 	}
626 
627 	// determine chunk size (small or medium)
628 	size_t chunkSize = SLAB_CHUNK_SIZE_SMALL;
629 	uint32 chunkCount = size / SLAB_CHUNK_SIZE_SMALL;
630 
631 	if (size % SLAB_CHUNK_SIZE_MEDIUM == 0) {
632 		chunkSize = SLAB_CHUNK_SIZE_MEDIUM;
633 		chunkCount = size / SLAB_CHUNK_SIZE_MEDIUM;
634 	}
635 
636 	MutexLocker locker(sLock);
637 
638 	// allocate the chunks
639 	MetaChunk* metaChunk;
640 	Chunk* chunk;
641 	status_t error = _AllocateChunks(chunkSize, chunkCount, flags, metaChunk,
642 		chunk);
643 	if (error != B_OK)
644 		return error;
645 
646 	// map the chunks
647 	Area* area = metaChunk->GetArea();
648 	addr_t chunkAddress = _ChunkAddress(metaChunk, chunk);
649 
650 	locker.Unlock();
651 	error = _MapChunk(area->vmArea, chunkAddress, size, 0, flags);
652 	locker.Lock();
653 	if (error != B_OK) {
654 		// something failed -- free the chunks
655 		for (uint32 i = 0; i < chunkCount; i++)
656 			_FreeChunk(area, metaChunk, chunk + i, chunkAddress, true, flags);
657 		return error;
658 	}
659 
660 	chunk->reference = (addr_t)chunkAddress + size - 1;
661 	_pages = (void*)chunkAddress;
662 
663 	fill_allocated_block(_pages, size);
664 
665 	TRACE("MemoryManager::AllocateRaw() done: %p (meta chunk: %d, chunk %d)\n",
666 		_pages, int(metaChunk - area->metaChunks),
667 		int(chunk - metaChunk->chunks));
668 	return B_OK;
669 }
670 
671 
672 /*static*/ ObjectCache*
673 MemoryManager::FreeRawOrReturnCache(void* pages, uint32 flags)
674 {
675 	TRACE("MemoryManager::FreeRawOrReturnCache(%p, %#" B_PRIx32 ")\n", pages,
676 		flags);
677 
678 	T(FreeRawOrReturnCache(pages, flags));
679 
680 	// get the area
681 	addr_t areaBase = _AreaBaseAddressForAddress((addr_t)pages);
682 
683 	ReadLocker readLocker(sAreaTableLock);
684 	Area* area = sAreaTable.Lookup(areaBase);
685 	readLocker.Unlock();
686 
687 	if (area == NULL) {
688 		// Probably a large allocation. Look up the VM area.
689 		VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
690 		addressSpace->ReadLock();
691 		VMArea* area = addressSpace->LookupArea((addr_t)pages);
692 		addressSpace->ReadUnlock();
693 
694 		if (area != NULL && (addr_t)pages == area->Base())
695 			delete_area(area->id);
696 		else
697 			panic("freeing unknown block %p from area %p", pages, area);
698 
699 		return NULL;
700 	}
701 
702 	MetaChunk* metaChunk = &area->metaChunks[
703 		((addr_t)pages % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE];
704 
705 	// get the chunk
706 	ASSERT(metaChunk->chunkSize > 0);
707 	ASSERT((addr_t)pages >= metaChunk->chunkBase);
708 	uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)pages);
709 	Chunk* chunk = &metaChunk->chunks[chunkIndex];
710 
711 	addr_t reference = chunk->reference;
712 	if ((reference & 1) == 0)
713 		return (ObjectCache*)reference;
714 
715 	// Seems we have a raw chunk allocation.
716 	ASSERT((addr_t)pages == _ChunkAddress(metaChunk, chunk));
717 	ASSERT(reference > (addr_t)pages);
718 	ASSERT(reference <= areaBase + SLAB_AREA_SIZE - 1);
719 	size_t size = reference - (addr_t)pages + 1;
720 	ASSERT((size % SLAB_CHUNK_SIZE_SMALL) == 0);
721 
722 	// unmap the chunks
723 	_UnmapChunk(area->vmArea, (addr_t)pages, size, flags);
724 
725 	// and free them
726 	MutexLocker locker(sLock);
727 	uint32 chunkCount = size / metaChunk->chunkSize;
728 	for (uint32 i = 0; i < chunkCount; i++)
729 		_FreeChunk(area, metaChunk, chunk + i, (addr_t)pages, true, flags);
730 
731 	return NULL;
732 }
733 
734 
735 /*static*/ size_t
736 MemoryManager::AcceptableChunkSize(size_t size)
737 {
738 	if (size <= SLAB_CHUNK_SIZE_SMALL)
739 		return SLAB_CHUNK_SIZE_SMALL;
740 	if (size <= SLAB_CHUNK_SIZE_MEDIUM)
741 		return SLAB_CHUNK_SIZE_MEDIUM;
742 	return SLAB_CHUNK_SIZE_LARGE;
743 }
744 
745 
746 /*static*/ ObjectCache*
747 MemoryManager::GetAllocationInfo(void* address, size_t& _size)
748 {
749 	// get the area
750 	ReadLocker readLocker(sAreaTableLock);
751 	Area* area = sAreaTable.Lookup(_AreaBaseAddressForAddress((addr_t)address));
752 	readLocker.Unlock();
753 
754 	if (area == NULL) {
755 		VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
756 		addressSpace->ReadLock();
757 		VMArea* area = addressSpace->LookupArea((addr_t)address);
758 		if (area != NULL && (addr_t)address == area->Base())
759 			_size = area->Size();
760 		else
761 			_size = 0;
762 		addressSpace->ReadUnlock();
763 
764 		return NULL;
765 	}
766 
767 	MetaChunk* metaChunk = &area->metaChunks[
768 		((addr_t)address % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE];
769 
770 	// get the chunk
771 	ASSERT(metaChunk->chunkSize > 0);
772 	ASSERT((addr_t)address >= metaChunk->chunkBase);
773 	uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)address);
774 
775 	addr_t reference = metaChunk->chunks[chunkIndex].reference;
776 	if ((reference & 1) == 0) {
777 		ObjectCache* cache = (ObjectCache*)reference;
778 		_size = cache->object_size;
779 		return cache;
780 	}
781 
782 	_size = reference - (addr_t)address + 1;
783 	return NULL;
784 }
785 
786 
787 /*static*/ ObjectCache*
788 MemoryManager::CacheForAddress(void* address)
789 {
790 	// get the area
791 	ReadLocker readLocker(sAreaTableLock);
792 	Area* area = sAreaTable.Lookup(_AreaBaseAddressForAddress((addr_t)address));
793 	readLocker.Unlock();
794 
795 	if (area == NULL)
796 		return NULL;
797 
798 	MetaChunk* metaChunk = &area->metaChunks[
799 		((addr_t)address % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE];
800 
801 	// get the chunk
802 	ASSERT(metaChunk->chunkSize > 0);
803 	ASSERT((addr_t)address >= metaChunk->chunkBase);
804 	uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)address);
805 
806 	addr_t reference = metaChunk->chunks[chunkIndex].reference;
807 	return (reference & 1) == 0 ? (ObjectCache*)reference : NULL;
808 }
809 
810 
811 /*static*/ void
812 MemoryManager::PerformMaintenance()
813 {
814 	MutexLocker locker(sLock);
815 
816 	while (sMaintenanceNeeded) {
817 		sMaintenanceNeeded = false;
818 
819 		// We want to keep one or two areas as a reserve. This way we have at
820 		// least one area to use in situations when we aren't allowed to
821 		// allocate one and also avoid ping-pong effects.
822 		if (sFreeAreaCount > 0 && sFreeAreaCount <= 2)
823 			return;
824 
825 		if (sFreeAreaCount == 0) {
826 			// try to allocate one
827 			Area* area;
828 			if (_AllocateArea(0, area) != B_OK)
829 				return;
830 
831 			_push(sFreeAreas, area);
832 			if (++sFreeAreaCount > 2)
833 				sMaintenanceNeeded = true;
834 		} else {
835 			// free until we only have two free ones
836 			while (sFreeAreaCount > 2) {
837 				Area* area = _pop(sFreeAreas);
838 				_FreeArea(area, true, 0);
839 			}
840 
841 			if (sFreeAreaCount == 0)
842 				sMaintenanceNeeded = true;
843 		}
844 	}
845 }
846 
847 
848 /*static*/ status_t
849 MemoryManager::_AllocateChunks(size_t chunkSize, uint32 chunkCount,
850 	uint32 flags, MetaChunk*& _metaChunk, Chunk*& _chunk)
851 {
852 	MetaChunkList* metaChunkList = NULL;
853 	if (chunkSize == SLAB_CHUNK_SIZE_SMALL) {
854 		metaChunkList = &sPartialMetaChunksSmall;
855 	} else if (chunkSize == SLAB_CHUNK_SIZE_MEDIUM) {
856 		metaChunkList = &sPartialMetaChunksMedium;
857 	} else if (chunkSize != SLAB_CHUNK_SIZE_LARGE) {
858 		panic("MemoryManager::_AllocateChunks(): Unsupported chunk size: %"
859 			B_PRIuSIZE, chunkSize);
860 		return B_BAD_VALUE;
861 	}
862 
863 	if (_GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk, _chunk))
864 		return B_OK;
865 
866 	if (sFreeAreas != NULL) {
867 		_AddArea(_pop(sFreeAreas));
868 		sFreeAreaCount--;
869 		_RequestMaintenance();
870 
871 		_GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk, _chunk);
872 		return B_OK;
873 	}
874 
875 	if ((flags & CACHE_DONT_LOCK_KERNEL_SPACE) != 0) {
876 		// We can't create an area with this limitation and we must not wait for
877 		// someone else doing that.
878 		return B_WOULD_BLOCK;
879 	}
880 
881 	// We need to allocate a new area. Wait, if someone else is trying to do
882 	// the same.
883 	while (true) {
884 		AllocationEntry* allocationEntry = NULL;
885 		if (sAllocationEntryDontWait != NULL) {
886 			allocationEntry = sAllocationEntryDontWait;
887 		} else if (sAllocationEntryCanWait != NULL
888 				&& (flags & CACHE_DONT_WAIT_FOR_MEMORY) == 0) {
889 			allocationEntry = sAllocationEntryCanWait;
890 		} else
891 			break;
892 
893 		ConditionVariableEntry entry;
894 		allocationEntry->condition.Add(&entry);
895 
896 		mutex_unlock(&sLock);
897 		entry.Wait();
898 		mutex_lock(&sLock);
899 
900 		if (_GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk,
901 				_chunk)) {
902 			return B_OK;
903 		}
904 	}
905 
906 	// prepare the allocation entry others can wait on
907 	AllocationEntry*& allocationEntry
908 		= (flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0
909 			? sAllocationEntryDontWait : sAllocationEntryCanWait;
910 
911 	AllocationEntry myResizeEntry;
912 	allocationEntry = &myResizeEntry;
913 	allocationEntry->condition.Init(metaChunkList, "wait for slab area");
914 	allocationEntry->thread = find_thread(NULL);
915 
916 	Area* area;
917 	status_t error = _AllocateArea(flags, area);
918 
919 	allocationEntry->condition.NotifyAll();
920 	allocationEntry = NULL;
921 
922 	if (error != B_OK)
923 		return error;
924 
925 	// Try again to get a meta chunk. Something might have been freed in the
926 	// meantime. We can free the area in this case.
927 	if (_GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk, _chunk)) {
928 		_FreeArea(area, true, flags);
929 		return B_OK;
930 	}
931 
932 	_AddArea(area);
933 	_GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk, _chunk);
934 	return B_OK;
935 }
936 
937 
938 /*static*/ bool
939 MemoryManager::_GetChunks(MetaChunkList* metaChunkList, size_t chunkSize,
940 	uint32 chunkCount, MetaChunk*& _metaChunk, Chunk*& _chunk)
941 {
942 	// the common and less complicated special case
943 	if (chunkCount == 1)
944 		return _GetChunk(metaChunkList, chunkSize, _metaChunk, _chunk);
945 
946 	ASSERT(metaChunkList != NULL);
947 
948 	// Iterate through the partial meta chunk list and try to find a free
949 	// range that is large enough.
950 	MetaChunk* metaChunk = NULL;
951 	for (MetaChunkList::Iterator it = metaChunkList->GetIterator();
952 			(metaChunk = it.Next()) != NULL;) {
953 		if (metaChunk->firstFreeChunk + chunkCount - 1
954 				<= metaChunk->lastFreeChunk) {
955 			break;
956 		}
957 	}
958 
959 	if (metaChunk == NULL) {
960 		// try to get a free meta chunk
961 		if ((SLAB_CHUNK_SIZE_LARGE - SLAB_AREA_STRUCT_OFFSET - kAreaAdminSize)
962 				/ chunkSize >= chunkCount) {
963 			metaChunk = sFreeShortMetaChunks.RemoveHead();
964 		}
965 		if (metaChunk == NULL)
966 			metaChunk = sFreeCompleteMetaChunks.RemoveHead();
967 
968 		if (metaChunk == NULL)
969 			return false;
970 
971 		metaChunkList->Add(metaChunk);
972 		metaChunk->GetArea()->usedMetaChunkCount++;
973 		_PrepareMetaChunk(metaChunk, chunkSize);
974 
975 		T(AllocateMetaChunk(metaChunk));
976 	}
977 
978 	// pull the chunks out of the free list
979 	Chunk* firstChunk = metaChunk->chunks + metaChunk->firstFreeChunk;
980 	Chunk* lastChunk = firstChunk + (chunkCount - 1);
981 	Chunk** chunkPointer = &metaChunk->freeChunks;
982 	uint32 remainingChunks = chunkCount;
983 	while (remainingChunks > 0) {
984 		ASSERT_PRINT(chunkPointer, "remaining: %" B_PRIu32 "/%" B_PRIu32
985 			", area: %p, meta chunk: %" B_PRIdSSIZE "\n", remainingChunks,
986 			chunkCount, metaChunk->GetArea(),
987 			metaChunk - metaChunk->GetArea()->metaChunks);
988 		Chunk* chunk = *chunkPointer;
989 		if (chunk >= firstChunk && chunk <= lastChunk) {
990 			*chunkPointer = chunk->next;
991 			chunk->reference = 1;
992 			remainingChunks--;
993 		} else
994 			chunkPointer = &chunk->next;
995 	}
996 
997 	// allocate the chunks
998 	metaChunk->usedChunkCount += chunkCount;
999 	if (metaChunk->usedChunkCount == metaChunk->chunkCount) {
1000 		// meta chunk is full now -- remove it from its list
1001 		if (metaChunkList != NULL)
1002 			metaChunkList->Remove(metaChunk);
1003 	}
1004 
1005 	// update the free range
1006 	metaChunk->firstFreeChunk += chunkCount;
1007 
1008 	PARANOID_CHECKS_ONLY(_CheckMetaChunk(metaChunk));
1009 
1010 	_chunk = firstChunk;
1011 	_metaChunk = metaChunk;
1012 
1013 	T(AllocateChunks(chunkSize, chunkCount, metaChunk, firstChunk));
1014 
1015 	return true;
1016 }
1017 
1018 
1019 /*static*/ bool
1020 MemoryManager::_GetChunk(MetaChunkList* metaChunkList, size_t chunkSize,
1021 	MetaChunk*& _metaChunk, Chunk*& _chunk)
1022 {
1023 	MetaChunk* metaChunk = metaChunkList != NULL
1024 		? metaChunkList->Head() : NULL;
1025 	if (metaChunk == NULL) {
1026 		// no partial meta chunk -- maybe there's a free one
1027 		if (chunkSize == SLAB_CHUNK_SIZE_LARGE) {
1028 			metaChunk = sFreeCompleteMetaChunks.RemoveHead();
1029 		} else {
1030 			metaChunk = sFreeShortMetaChunks.RemoveHead();
1031 			if (metaChunk == NULL)
1032 				metaChunk = sFreeCompleteMetaChunks.RemoveHead();
1033 			if (metaChunk != NULL)
1034 				metaChunkList->Add(metaChunk);
1035 		}
1036 
1037 		if (metaChunk == NULL)
1038 			return false;
1039 
1040 		metaChunk->GetArea()->usedMetaChunkCount++;
1041 		_PrepareMetaChunk(metaChunk, chunkSize);
1042 
1043 		T(AllocateMetaChunk(metaChunk));
1044 	}
1045 
1046 	// allocate the chunk
1047 	if (++metaChunk->usedChunkCount == metaChunk->chunkCount) {
1048 		// meta chunk is full now -- remove it from its list
1049 		if (metaChunkList != NULL)
1050 			metaChunkList->Remove(metaChunk);
1051 	}
1052 
1053 	_chunk = _pop(metaChunk->freeChunks);
1054 	_metaChunk = metaChunk;
1055 
1056 	_chunk->reference = 1;
1057 
1058 	// update the free range
1059 	uint32 chunkIndex = _chunk - metaChunk->chunks;
1060 	if (chunkIndex >= metaChunk->firstFreeChunk
1061 			&& chunkIndex <= metaChunk->lastFreeChunk) {
1062 		if (chunkIndex - metaChunk->firstFreeChunk
1063 				<= metaChunk->lastFreeChunk - chunkIndex) {
1064 			metaChunk->firstFreeChunk = chunkIndex + 1;
1065 		} else
1066 			metaChunk->lastFreeChunk = chunkIndex - 1;
1067 	}
1068 
1069 	PARANOID_CHECKS_ONLY(_CheckMetaChunk(metaChunk));
1070 
1071 	T(AllocateChunk(chunkSize, metaChunk, _chunk));
1072 
1073 	return true;
1074 }
1075 
1076 
1077 /*static*/ void
1078 MemoryManager::_FreeChunk(Area* area, MetaChunk* metaChunk, Chunk* chunk,
1079 	addr_t chunkAddress, bool alreadyUnmapped, uint32 flags)
1080 {
1081 	// unmap the chunk
1082 	if (!alreadyUnmapped) {
1083 		mutex_unlock(&sLock);
1084 		_UnmapChunk(area->vmArea, chunkAddress, metaChunk->chunkSize, flags);
1085 		mutex_lock(&sLock);
1086 	}
1087 
1088 	T(FreeChunk(metaChunk, chunk));
1089 
1090 	_push(metaChunk->freeChunks, chunk);
1091 
1092 	uint32 chunkIndex = chunk - metaChunk->chunks;
1093 
1094 	// free the meta chunk, if it is unused now
1095 	PARANOID_CHECKS_ONLY(bool areaDeleted = false;)
1096 	ASSERT(metaChunk->usedChunkCount > 0);
1097 	if (--metaChunk->usedChunkCount == 0) {
1098 		T(FreeMetaChunk(metaChunk));
1099 
1100 		// remove from partial meta chunk list
1101 		if (metaChunk->chunkSize == SLAB_CHUNK_SIZE_SMALL)
1102 			sPartialMetaChunksSmall.Remove(metaChunk);
1103 		else if (metaChunk->chunkSize == SLAB_CHUNK_SIZE_MEDIUM)
1104 			sPartialMetaChunksMedium.Remove(metaChunk);
1105 
1106 		// mark empty
1107 		metaChunk->chunkSize = 0;
1108 
1109 		// add to free list
1110 		if (metaChunk == area->metaChunks)
1111 			sFreeShortMetaChunks.Add(metaChunk, false);
1112 		else
1113 			sFreeCompleteMetaChunks.Add(metaChunk, false);
1114 
1115 		// free the area, if it is unused now
1116 		ASSERT(area->usedMetaChunkCount > 0);
1117 		if (--area->usedMetaChunkCount == 0) {
1118 			_FreeArea(area, false, flags);
1119 			PARANOID_CHECKS_ONLY(areaDeleted = true;)
1120 		}
1121 	} else if (metaChunk->usedChunkCount == metaChunk->chunkCount - 1) {
1122 		// the meta chunk was full before -- add it back to its partial chunk
1123 		// list
1124 		if (metaChunk->chunkSize == SLAB_CHUNK_SIZE_SMALL)
1125 			sPartialMetaChunksSmall.Add(metaChunk, false);
1126 		else if (metaChunk->chunkSize == SLAB_CHUNK_SIZE_MEDIUM)
1127 			sPartialMetaChunksMedium.Add(metaChunk, false);
1128 
1129 		metaChunk->firstFreeChunk = chunkIndex;
1130 		metaChunk->lastFreeChunk = chunkIndex;
1131 	} else {
1132 		// extend the free range, if the chunk adjoins
1133 		if (chunkIndex + 1 == metaChunk->firstFreeChunk) {
1134 			uint32 firstFree = chunkIndex;
1135 			for (; firstFree > 0; firstFree--) {
1136 				Chunk* previousChunk = &metaChunk->chunks[firstFree - 1];
1137 				if (!_IsChunkFree(metaChunk, previousChunk))
1138 					break;
1139 			}
1140 			metaChunk->firstFreeChunk = firstFree;
1141 		} else if (chunkIndex == (uint32)metaChunk->lastFreeChunk + 1) {
1142 			uint32 lastFree = chunkIndex;
1143 			for (; lastFree + 1 < metaChunk->chunkCount; lastFree++) {
1144 				Chunk* nextChunk = &metaChunk->chunks[lastFree + 1];
1145 				if (!_IsChunkFree(metaChunk, nextChunk))
1146 					break;
1147 			}
1148 			metaChunk->lastFreeChunk = lastFree;
1149 		}
1150 	}
1151 
1152 	PARANOID_CHECKS_ONLY(
1153 		if (!areaDeleted)
1154 			_CheckMetaChunk(metaChunk);
1155 	)
1156 }
1157 
1158 
1159 /*static*/ void
1160 MemoryManager::_PrepareMetaChunk(MetaChunk* metaChunk, size_t chunkSize)
1161 {
1162 	Area* area = metaChunk->GetArea();
1163 
1164 	if (metaChunk == area->metaChunks) {
1165 		// the first chunk is shorter
1166 		size_t unusableSize = ROUNDUP(SLAB_AREA_STRUCT_OFFSET + kAreaAdminSize,
1167 			chunkSize);
1168 		metaChunk->chunkBase = area->BaseAddress() + unusableSize;
1169 		metaChunk->totalSize = SLAB_CHUNK_SIZE_LARGE - unusableSize;
1170 	}
1171 
1172 	metaChunk->chunkSize = chunkSize;
1173 	metaChunk->chunkCount = metaChunk->totalSize / chunkSize;
1174 	metaChunk->usedChunkCount = 0;
1175 
1176 	metaChunk->freeChunks = NULL;
1177 	for (int32 i = metaChunk->chunkCount - 1; i >= 0; i--)
1178 		_push(metaChunk->freeChunks, metaChunk->chunks + i);
1179 
1180 	metaChunk->firstFreeChunk = 0;
1181 	metaChunk->lastFreeChunk = metaChunk->chunkCount - 1;
1182 
1183 	PARANOID_CHECKS_ONLY(_CheckMetaChunk(metaChunk));
1184 }
1185 
1186 
1187 /*static*/ void
1188 MemoryManager::_AddArea(Area* area)
1189 {
1190 	T(AddArea(area));
1191 
1192 	// add the area to the hash table
1193 	WriteLocker writeLocker(sAreaTableLock);
1194 	sAreaTable.InsertUnchecked(area);
1195 	writeLocker.Unlock();
1196 
1197 	// add the area's meta chunks to the free lists
1198 	sFreeShortMetaChunks.Add(&area->metaChunks[0]);
1199 	for (int32 i = 1; i < SLAB_META_CHUNKS_PER_AREA; i++)
1200 		sFreeCompleteMetaChunks.Add(&area->metaChunks[i]);
1201 }
1202 
1203 
1204 /*static*/ status_t
1205 MemoryManager::_AllocateArea(uint32 flags, Area*& _area)
1206 {
1207 	TRACE("MemoryManager::_AllocateArea(%#" B_PRIx32 ")\n", flags);
1208 
1209 	ASSERT((flags & CACHE_DONT_LOCK_KERNEL_SPACE) == 0);
1210 
1211 	mutex_unlock(&sLock);
1212 
1213 	size_t pagesNeededToMap = 0;
1214 	void* areaBase;
1215 	Area* area;
1216 	VMArea* vmArea = NULL;
1217 
1218 	if (sKernelArgs == NULL) {
1219 		// create an area
1220 		uint32 areaCreationFlags = (flags & CACHE_PRIORITY_VIP) != 0
1221 			? CREATE_AREA_PRIORITY_VIP : 0;
1222 		area_id areaID = vm_create_null_area(B_SYSTEM_TEAM, kSlabAreaName,
1223 			&areaBase, B_ANY_KERNEL_BLOCK_ADDRESS, SLAB_AREA_SIZE,
1224 			areaCreationFlags);
1225 		if (areaID < 0) {
1226 			mutex_lock(&sLock);
1227 			return areaID;
1228 		}
1229 
1230 		area = _AreaForAddress((addr_t)areaBase);
1231 
1232 		// map the memory for the administrative structure
1233 		VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
1234 		VMTranslationMap* translationMap = addressSpace->TranslationMap();
1235 
1236 		pagesNeededToMap = translationMap->MaxPagesNeededToMap(
1237 			(addr_t)area, (addr_t)areaBase + SLAB_AREA_SIZE - 1);
1238 
1239 		vmArea = VMAreaHash::Lookup(areaID);
1240 		status_t error = _MapChunk(vmArea, (addr_t)area, kAreaAdminSize,
1241 			pagesNeededToMap, flags);
1242 		if (error != B_OK) {
1243 			delete_area(areaID);
1244 			mutex_lock(&sLock);
1245 			return error;
1246 		}
1247 
1248 		dprintf("slab memory manager: created area %p (%" B_PRId32 ")\n", area,
1249 			areaID);
1250 	} else {
1251 		// no areas yet -- allocate raw memory
1252 		areaBase = (void*)vm_allocate_early(sKernelArgs, SLAB_AREA_SIZE,
1253 			SLAB_AREA_SIZE, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
1254 			SLAB_AREA_SIZE);
1255 		if (areaBase == NULL) {
1256 			mutex_lock(&sLock);
1257 			return B_NO_MEMORY;
1258 		}
1259 		area = _AreaForAddress((addr_t)areaBase);
1260 
1261 		TRACE("MemoryManager::_AllocateArea(): allocated early area %p\n",
1262 			area);
1263 	}
1264 
1265 	// init the area structure
1266 	area->vmArea = vmArea;
1267 	area->reserved_memory_for_mapping = pagesNeededToMap * B_PAGE_SIZE;
1268 	area->usedMetaChunkCount = 0;
1269 	area->fullyMapped = vmArea == NULL;
1270 
1271 	// init the meta chunks
1272 	for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA; i++) {
1273 		MetaChunk* metaChunk = area->metaChunks + i;
1274 		metaChunk->chunkSize = 0;
1275 		metaChunk->chunkBase = (addr_t)areaBase + i * SLAB_CHUNK_SIZE_LARGE;
1276 		metaChunk->totalSize = SLAB_CHUNK_SIZE_LARGE;
1277 			// Note: chunkBase and totalSize aren't correct for the first
1278 			// meta chunk. They will be set in _PrepareMetaChunk().
1279 		metaChunk->chunkCount = 0;
1280 		metaChunk->usedChunkCount = 0;
1281 		metaChunk->freeChunks = NULL;
1282 	}
1283 
1284 	mutex_lock(&sLock);
1285 	_area = area;
1286 
1287 	T(AllocateArea(area, flags));
1288 
1289 	return B_OK;
1290 }
1291 
1292 
1293 /*static*/ void
1294 MemoryManager::_FreeArea(Area* area, bool areaRemoved, uint32 flags)
1295 {
1296 	TRACE("MemoryManager::_FreeArea(%p, %#" B_PRIx32 ")\n", area, flags);
1297 
1298 	T(FreeArea(area, areaRemoved, flags));
1299 
1300 	ASSERT(area->usedMetaChunkCount == 0);
1301 
1302 	if (!areaRemoved) {
1303 		// remove the area's meta chunks from the free lists
1304 		ASSERT(area->metaChunks[0].usedChunkCount == 0);
1305 		sFreeShortMetaChunks.Remove(&area->metaChunks[0]);
1306 
1307 		for (int32 i = 1; i < SLAB_META_CHUNKS_PER_AREA; i++) {
1308 			ASSERT(area->metaChunks[i].usedChunkCount == 0);
1309 			sFreeCompleteMetaChunks.Remove(&area->metaChunks[i]);
1310 		}
1311 
1312 		// remove the area from the hash table
1313 		WriteLocker writeLocker(sAreaTableLock);
1314 		sAreaTable.RemoveUnchecked(area);
1315 		writeLocker.Unlock();
1316 	}
1317 
1318 	// We want to keep one or two free areas as a reserve.
1319 	if (sFreeAreaCount <= 1) {
1320 		_push(sFreeAreas, area);
1321 		sFreeAreaCount++;
1322 		return;
1323 	}
1324 
1325 	if (area->vmArea == NULL || (flags & CACHE_DONT_LOCK_KERNEL_SPACE) != 0) {
1326 		// This is either early in the boot process or we aren't allowed to
1327 		// delete the area now.
1328 		_push(sFreeAreas, area);
1329 		sFreeAreaCount++;
1330 		_RequestMaintenance();
1331 		return;
1332 	}
1333 
1334 	mutex_unlock(&sLock);
1335 
1336 	dprintf("slab memory manager: deleting area %p (%" B_PRId32 ")\n", area,
1337 		area->vmArea->id);
1338 
1339 	size_t memoryToUnreserve = area->reserved_memory_for_mapping;
1340 	delete_area(area->vmArea->id);
1341 	vm_unreserve_memory(memoryToUnreserve);
1342 
1343 	mutex_lock(&sLock);
1344 }
1345 
1346 
1347 /*static*/ status_t
1348 MemoryManager::_MapChunk(VMArea* vmArea, addr_t address, size_t size,
1349 	size_t reserveAdditionalMemory, uint32 flags)
1350 {
1351 	TRACE("MemoryManager::_MapChunk(%p, %#" B_PRIxADDR ", %#" B_PRIxSIZE
1352 		")\n", vmArea, address, size);
1353 
1354 	T(Map(address, size, flags));
1355 
1356 	if (vmArea == NULL) {
1357 		// everything is mapped anyway
1358 		return B_OK;
1359 	}
1360 
1361 	VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
1362 	VMTranslationMap* translationMap = addressSpace->TranslationMap();
1363 
1364 	// reserve memory for the chunk
1365 	int priority = (flags & CACHE_PRIORITY_VIP) != 0
1366 		? VM_PRIORITY_VIP : VM_PRIORITY_SYSTEM;
1367 	size_t reservedMemory = size + reserveAdditionalMemory;
1368 	status_t error = vm_try_reserve_memory(size, priority,
1369 		(flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0 ? 0 : 1000000);
1370 	if (error != B_OK)
1371 		return error;
1372 
1373 	// reserve the pages we need now
1374 	size_t reservedPages = size / B_PAGE_SIZE
1375 		+ translationMap->MaxPagesNeededToMap(address, address + size - 1);
1376 	vm_page_reservation reservation;
1377 	if ((flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0) {
1378 		if (!vm_page_try_reserve_pages(&reservation, reservedPages, priority)) {
1379 			vm_unreserve_memory(reservedMemory);
1380 			return B_WOULD_BLOCK;
1381 		}
1382 	} else
1383 		vm_page_reserve_pages(&reservation, reservedPages, priority);
1384 
1385 	VMCache* cache = vm_area_get_locked_cache(vmArea);
1386 
1387 	// map the pages
1388 	translationMap->Lock();
1389 
1390 	addr_t areaOffset = address - vmArea->Base();
1391 	addr_t endAreaOffset = areaOffset + size;
1392 	for (size_t offset = areaOffset; offset < endAreaOffset;
1393 			offset += B_PAGE_SIZE) {
1394 		vm_page* page = vm_page_allocate_page(&reservation, PAGE_STATE_WIRED);
1395 		cache->InsertPage(page, offset);
1396 
1397 		page->IncrementWiredCount();
1398 		atomic_add(&gMappedPagesCount, 1);
1399 		DEBUG_PAGE_ACCESS_END(page);
1400 
1401 		translationMap->Map(vmArea->Base() + offset,
1402 			page->physical_page_number * B_PAGE_SIZE,
1403 			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
1404 			vmArea->MemoryType(), &reservation);
1405 	}
1406 
1407 	translationMap->Unlock();
1408 
1409 	cache->ReleaseRefAndUnlock();
1410 
1411 	vm_page_unreserve_pages(&reservation);
1412 
1413 	return B_OK;
1414 }
1415 
1416 
1417 /*static*/ status_t
1418 MemoryManager::_UnmapChunk(VMArea* vmArea, addr_t address, size_t size,
1419 	uint32 flags)
1420 {
1421 	T(Unmap(address, size, flags));
1422 
1423 	if (vmArea == NULL)
1424 		return B_ERROR;
1425 
1426 	TRACE("MemoryManager::_UnmapChunk(%p, %#" B_PRIxADDR ", %#" B_PRIxSIZE
1427 		")\n", vmArea, address, size);
1428 
1429 	VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
1430 	VMTranslationMap* translationMap = addressSpace->TranslationMap();
1431 	VMCache* cache = vm_area_get_locked_cache(vmArea);
1432 
1433 	// unmap the pages
1434 	translationMap->Lock();
1435 	translationMap->Unmap(address, address + size - 1);
1436 	atomic_add(&gMappedPagesCount, -(size / B_PAGE_SIZE));
1437 	translationMap->Unlock();
1438 
1439 	// free the pages
1440 	addr_t areaPageOffset = (address - vmArea->Base()) / B_PAGE_SIZE;
1441 	addr_t areaPageEndOffset = areaPageOffset + size / B_PAGE_SIZE;
1442 	VMCachePagesTree::Iterator it = cache->pages.GetIterator(
1443 		areaPageOffset, true, true);
1444 	while (vm_page* page = it.Next()) {
1445 		if (page->cache_offset >= areaPageEndOffset)
1446 			break;
1447 
1448 		DEBUG_PAGE_ACCESS_START(page);
1449 
1450 		page->DecrementWiredCount();
1451 
1452 		cache->RemovePage(page);
1453 			// the iterator is remove-safe
1454 		vm_page_free(cache, page);
1455 	}
1456 
1457 	cache->ReleaseRefAndUnlock();
1458 
1459 	vm_unreserve_memory(size);
1460 
1461 	return B_OK;
1462 }
1463 
1464 
1465 /*static*/ void
1466 MemoryManager::_UnmapFreeChunksEarly(Area* area)
1467 {
1468 	if (!area->fullyMapped)
1469 		return;
1470 
1471 	TRACE("MemoryManager::_UnmapFreeChunksEarly(%p)\n", area);
1472 
1473 	// unmap the space before the Area structure
1474 	#if SLAB_AREA_STRUCT_OFFSET > 0
1475 		_UnmapChunk(area->vmArea, area->BaseAddress(), SLAB_AREA_STRUCT_OFFSET,
1476 			0);
1477 	#endif
1478 
1479 	for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA; i++) {
1480 		MetaChunk* metaChunk = area->metaChunks + i;
1481 		if (metaChunk->chunkSize == 0) {
1482 			// meta chunk is free -- unmap it completely
1483 			if (i == 0) {
1484 				_UnmapChunk(area->vmArea, (addr_t)area + kAreaAdminSize,
1485 					SLAB_CHUNK_SIZE_LARGE - kAreaAdminSize, 0);
1486 			} else {
1487 				_UnmapChunk(area->vmArea,
1488 					area->BaseAddress() + i * SLAB_CHUNK_SIZE_LARGE,
1489 					SLAB_CHUNK_SIZE_LARGE, 0);
1490 			}
1491 		} else {
1492 			// unmap free chunks
1493 			for (Chunk* chunk = metaChunk->freeChunks; chunk != NULL;
1494 					chunk = chunk->next) {
1495 				_UnmapChunk(area->vmArea, _ChunkAddress(metaChunk, chunk),
1496 					metaChunk->chunkSize, 0);
1497 			}
1498 
1499 			// The first meta chunk might have space before its first chunk.
1500 			if (i == 0) {
1501 				addr_t unusedStart = (addr_t)area + kAreaAdminSize;
1502 				if (unusedStart < metaChunk->chunkBase) {
1503 					_UnmapChunk(area->vmArea, unusedStart,
1504 						metaChunk->chunkBase - unusedStart, 0);
1505 				}
1506 			}
1507 		}
1508 	}
1509 
1510 	area->fullyMapped = false;
1511 }
1512 
1513 
1514 /*static*/ void
1515 MemoryManager::_ConvertEarlyArea(Area* area)
1516 {
1517 	void* address = (void*)area->BaseAddress();
1518 	area_id areaID = create_area(kSlabAreaName, &address, B_EXACT_ADDRESS,
1519 		SLAB_AREA_SIZE, B_ALREADY_WIRED,
1520 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
1521 	if (areaID < 0)
1522 		panic("out of memory");
1523 
1524 	area->vmArea = VMAreaHash::Lookup(areaID);
1525 }
1526 
1527 
1528 /*static*/ void
1529 MemoryManager::_RequestMaintenance()
1530 {
1531 	if ((sFreeAreaCount > 0 && sFreeAreaCount <= 2) || sMaintenanceNeeded)
1532 		return;
1533 
1534 	sMaintenanceNeeded = true;
1535 	request_memory_manager_maintenance();
1536 }
1537 
1538 
1539 /*static*/ bool
1540 MemoryManager::_IsChunkInFreeList(const MetaChunk* metaChunk,
1541 	const Chunk* chunk)
1542 {
1543 	Chunk* freeChunk = metaChunk->freeChunks;
1544 	while (freeChunk != NULL) {
1545 		if (freeChunk == chunk)
1546 			return true;
1547 		freeChunk = freeChunk->next;
1548 	}
1549 
1550 	return false;
1551 }
1552 
1553 
1554 #if DEBUG_SLAB_MEMORY_MANAGER_PARANOID_CHECKS
1555 
1556 /*static*/ void
1557 MemoryManager::_CheckMetaChunk(MetaChunk* metaChunk)
1558 {
1559 	Area* area = metaChunk->GetArea();
1560 	int32 metaChunkIndex = metaChunk - area->metaChunks;
1561 	if (metaChunkIndex < 0 || metaChunkIndex >= SLAB_META_CHUNKS_PER_AREA) {
1562 		panic("invalid meta chunk %p!", metaChunk);
1563 		return;
1564 	}
1565 
1566 	switch (metaChunk->chunkSize) {
1567 		case 0:
1568 			// unused
1569 			return;
1570 		case SLAB_CHUNK_SIZE_SMALL:
1571 		case SLAB_CHUNK_SIZE_MEDIUM:
1572 		case SLAB_CHUNK_SIZE_LARGE:
1573 			break;
1574 		default:
1575 			panic("meta chunk %p has invalid chunk size: %" B_PRIuSIZE,
1576 				metaChunk, metaChunk->chunkSize);
1577 			return;
1578 	}
1579 
1580 	if (metaChunk->totalSize > SLAB_CHUNK_SIZE_LARGE) {
1581 		panic("meta chunk %p has invalid total size: %" B_PRIuSIZE,
1582 			metaChunk, metaChunk->totalSize);
1583 		return;
1584 	}
1585 
1586 	addr_t expectedBase = area->BaseAddress()
1587 		+ metaChunkIndex * SLAB_CHUNK_SIZE_LARGE;
1588 	if (metaChunk->chunkBase < expectedBase
1589 		|| metaChunk->chunkBase - expectedBase + metaChunk->totalSize
1590 			> SLAB_CHUNK_SIZE_LARGE) {
1591 		panic("meta chunk %p has invalid base address: %" B_PRIxADDR, metaChunk,
1592 			metaChunk->chunkBase);
1593 		return;
1594 	}
1595 
1596 	if (metaChunk->chunkCount != metaChunk->totalSize / metaChunk->chunkSize) {
1597 		panic("meta chunk %p has invalid chunk count: %u", metaChunk,
1598 			metaChunk->chunkCount);
1599 		return;
1600 	}
1601 
1602 	if (metaChunk->usedChunkCount > metaChunk->chunkCount) {
1603 		panic("meta chunk %p has invalid unused chunk count: %u", metaChunk,
1604 			metaChunk->usedChunkCount);
1605 		return;
1606 	}
1607 
1608 	if (metaChunk->firstFreeChunk > metaChunk->chunkCount) {
1609 		panic("meta chunk %p has invalid first free chunk: %u", metaChunk,
1610 			metaChunk->firstFreeChunk);
1611 		return;
1612 	}
1613 
1614 	if (metaChunk->lastFreeChunk >= metaChunk->chunkCount) {
1615 		panic("meta chunk %p has invalid last free chunk: %u", metaChunk,
1616 			metaChunk->lastFreeChunk);
1617 		return;
1618 	}
1619 
1620 	// check free list for structural sanity
1621 	uint32 freeChunks = 0;
1622 	for (Chunk* chunk = metaChunk->freeChunks; chunk != NULL;
1623 			chunk = chunk->next) {
1624 		if ((addr_t)chunk % sizeof(Chunk) != 0 || chunk < metaChunk->chunks
1625 			|| chunk >= metaChunk->chunks + metaChunk->chunkCount) {
1626 			panic("meta chunk %p has invalid element in free list, chunk: %p",
1627 				metaChunk, chunk);
1628 			return;
1629 		}
1630 
1631 		if (++freeChunks > metaChunk->chunkCount) {
1632 			panic("meta chunk %p has cyclic free list", metaChunk);
1633 			return;
1634 		}
1635 	}
1636 
1637 	if (freeChunks + metaChunk->usedChunkCount > metaChunk->chunkCount) {
1638 		panic("meta chunk %p has mismatching free/used chunk counts: total: "
1639 			"%u, used: %u, free: %" B_PRIu32, metaChunk, metaChunk->chunkCount,
1640 			metaChunk->usedChunkCount, freeChunks);
1641 		return;
1642 	}
1643 
1644 	// count used chunks by looking at their reference/next field
1645 	uint32 usedChunks = 0;
1646 	for (uint32 i = 0; i < metaChunk->chunkCount; i++) {
1647 		if (!_IsChunkFree(metaChunk, metaChunk->chunks + i))
1648 			usedChunks++;
1649 	}
1650 
1651 	if (usedChunks != metaChunk->usedChunkCount) {
1652 		panic("meta chunk %p has used chunks that appear free: total: "
1653 			"%u, used: %u, appearing used: %" B_PRIu32, metaChunk,
1654 			metaChunk->chunkCount, metaChunk->usedChunkCount, usedChunks);
1655 		return;
1656 	}
1657 
1658 	// check free range
1659 	for (uint32 i = metaChunk->firstFreeChunk; i < metaChunk->lastFreeChunk;
1660 			i++) {
1661 		if (!_IsChunkFree(metaChunk, metaChunk->chunks + i)) {
1662 			panic("meta chunk %p has used chunk in free range, chunk: %p (%"
1663 				B_PRIu32 ", free range: %u - %u)", metaChunk,
1664 				metaChunk->chunks + i, i, metaChunk->firstFreeChunk,
1665 				metaChunk->lastFreeChunk);
1666 			return;
1667 		}
1668 	}
1669 }
1670 
1671 #endif	// DEBUG_SLAB_MEMORY_MANAGER_PARANOID_CHECKS
1672 
1673 
1674 /*static*/ int
1675 MemoryManager::_DumpRawAllocations(int argc, char** argv)
1676 {
1677 	kprintf("area        meta chunk  chunk  base        size (KB)\n");
1678 
1679 	size_t totalSize = 0;
1680 
1681 	for (AreaTable::Iterator it = sAreaTable.GetIterator();
1682 			Area* area = it.Next();) {
1683 		for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA; i++) {
1684 			MetaChunk* metaChunk = area->metaChunks + i;
1685 			if (metaChunk->chunkSize == 0)
1686 				continue;
1687 			for (uint32 k = 0; k < metaChunk->chunkCount; k++) {
1688 				Chunk* chunk = metaChunk->chunks + k;
1689 
1690 				// skip free chunks
1691 				if (_IsChunkFree(metaChunk, chunk))
1692 					continue;
1693 
1694 				addr_t reference = chunk->reference;
1695 				if ((reference & 1) == 0 || reference == 1)
1696 					continue;
1697 
1698 				addr_t chunkAddress = _ChunkAddress(metaChunk, chunk);
1699 				size_t size = reference - chunkAddress + 1;
1700 				totalSize += size;
1701 
1702 				kprintf("%p  %10" B_PRId32 "  %5" B_PRIu32 "  %p  %9"
1703 					B_PRIuSIZE "\n", area, i, k, (void*)chunkAddress,
1704 					size / 1024);
1705 			}
1706 		}
1707 	}
1708 
1709 	kprintf("total:                                     %9" B_PRIuSIZE "\n",
1710 		totalSize / 1024);
1711 
1712 	return 0;
1713 }
1714 
1715 
1716 /*static*/ void
1717 MemoryManager::_PrintMetaChunkTableHeader(bool printChunks)
1718 {
1719 	if (printChunks)
1720 		kprintf("chunk        base       cache  object size  cache name\n");
1721 	else
1722 		kprintf("chunk        base\n");
1723 }
1724 
1725 /*static*/ void
1726 MemoryManager::_DumpMetaChunk(MetaChunk* metaChunk, bool printChunks,
1727 	bool printHeader)
1728 {
1729 	if (printHeader)
1730 		_PrintMetaChunkTableHeader(printChunks);
1731 
1732 	const char* type = "empty";
1733 	if (metaChunk->chunkSize != 0) {
1734 		switch (metaChunk->chunkSize) {
1735 			case SLAB_CHUNK_SIZE_SMALL:
1736 				type = "small";
1737 				break;
1738 			case SLAB_CHUNK_SIZE_MEDIUM:
1739 				type = "medium";
1740 				break;
1741 			case SLAB_CHUNK_SIZE_LARGE:
1742 				type = "large";
1743 				break;
1744 		}
1745 	}
1746 
1747 	int metaChunkIndex = metaChunk - metaChunk->GetArea()->metaChunks;
1748 	kprintf("%5d  %p  --- %6s meta chunk", metaChunkIndex,
1749 		(void*)metaChunk->chunkBase, type);
1750 	if (metaChunk->chunkSize != 0) {
1751 		kprintf(": %4u/%4u used, %-4u-%4u free ------------\n",
1752 			metaChunk->usedChunkCount, metaChunk->chunkCount,
1753 			metaChunk->firstFreeChunk, metaChunk->lastFreeChunk);
1754 	} else
1755 		kprintf(" --------------------------------------------\n");
1756 
1757 	if (metaChunk->chunkSize == 0 || !printChunks)
1758 		return;
1759 
1760 	for (uint32 i = 0; i < metaChunk->chunkCount; i++) {
1761 		Chunk* chunk = metaChunk->chunks + i;
1762 
1763 		// skip free chunks
1764 		if (_IsChunkFree(metaChunk, chunk)) {
1765 			if (!_IsChunkInFreeList(metaChunk, chunk)) {
1766 				kprintf("%5" B_PRIu32 "  %p  appears free, but isn't in free "
1767 					"list!\n", i, (void*)_ChunkAddress(metaChunk, chunk));
1768 			}
1769 
1770 			continue;
1771 		}
1772 
1773 		addr_t reference = chunk->reference;
1774 		if ((reference & 1) == 0) {
1775 			ObjectCache* cache = (ObjectCache*)reference;
1776 			kprintf("%5" B_PRIu32 "  %p  %p  %11" B_PRIuSIZE "  %s\n", i,
1777 				(void*)_ChunkAddress(metaChunk, chunk), cache,
1778 				cache != NULL ? cache->object_size : 0,
1779 				cache != NULL ? cache->name : "");
1780 		} else if (reference != 1) {
1781 			kprintf("%5" B_PRIu32 "  %p  raw allocation up to %p\n", i,
1782 				(void*)_ChunkAddress(metaChunk, chunk), (void*)reference);
1783 		}
1784 	}
1785 }
1786 
1787 
1788 /*static*/ int
1789 MemoryManager::_DumpMetaChunk(int argc, char** argv)
1790 {
1791 	if (argc != 2) {
1792 		print_debugger_command_usage(argv[0]);
1793 		return 0;
1794 	}
1795 
1796 	uint64 address;
1797 	if (!evaluate_debug_expression(argv[1], &address, false))
1798 		return 0;
1799 
1800 	Area* area = _AreaForAddress(address);
1801 
1802 	MetaChunk* metaChunk;
1803 	if ((addr_t)address >= (addr_t)area->metaChunks
1804 		&& (addr_t)address
1805 			< (addr_t)(area->metaChunks + SLAB_META_CHUNKS_PER_AREA)) {
1806 		metaChunk = (MetaChunk*)(addr_t)address;
1807 	} else {
1808 		metaChunk = area->metaChunks
1809 			+ (address % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE;
1810 	}
1811 
1812 	_DumpMetaChunk(metaChunk, true, true);
1813 
1814 	return 0;
1815 }
1816 
1817 
1818 /*static*/ void
1819 MemoryManager::_DumpMetaChunks(const char* name, MetaChunkList& metaChunkList,
1820 	bool printChunks)
1821 {
1822 	kprintf("%s:\n", name);
1823 
1824 	for (MetaChunkList::Iterator it = metaChunkList.GetIterator();
1825 			MetaChunk* metaChunk = it.Next();) {
1826 		_DumpMetaChunk(metaChunk, printChunks, false);
1827 	}
1828 }
1829 
1830 
1831 /*static*/ int
1832 MemoryManager::_DumpMetaChunks(int argc, char** argv)
1833 {
1834 	bool printChunks = argc > 1 && strcmp(argv[1], "-c") == 0;
1835 
1836 	_PrintMetaChunkTableHeader(printChunks);
1837 	_DumpMetaChunks("free complete", sFreeCompleteMetaChunks, printChunks);
1838 	_DumpMetaChunks("free short", sFreeShortMetaChunks, printChunks);
1839 	_DumpMetaChunks("partial small", sPartialMetaChunksSmall, printChunks);
1840 	_DumpMetaChunks("partial medium", sPartialMetaChunksMedium, printChunks);
1841 
1842 	return 0;
1843 }
1844 
1845 
1846 /*static*/ int
1847 MemoryManager::_DumpArea(int argc, char** argv)
1848 {
1849 	bool printChunks = false;
1850 
1851 	int argi = 1;
1852 	while (argi < argc) {
1853 		if (argv[argi][0] != '-')
1854 			break;
1855 		const char* arg = argv[argi++];
1856 		if (strcmp(arg, "-c") == 0) {
1857 			printChunks = true;
1858 		} else {
1859 			print_debugger_command_usage(argv[0]);
1860 			return 0;
1861 		}
1862 	}
1863 
1864 	if (argi + 1 != argc) {
1865 		print_debugger_command_usage(argv[0]);
1866 		return 0;
1867 	}
1868 
1869 	uint64 address;
1870 	if (!evaluate_debug_expression(argv[argi], &address, false))
1871 		return 0;
1872 
1873 	Area* area = _AreaForAddress((addr_t)address);
1874 
1875 	for (uint32 k = 0; k < SLAB_META_CHUNKS_PER_AREA; k++) {
1876 		MetaChunk* metaChunk = area->metaChunks + k;
1877 		_DumpMetaChunk(metaChunk, printChunks, k == 0);
1878 	}
1879 
1880 	return 0;
1881 }
1882 
1883 
1884 /*static*/ int
1885 MemoryManager::_DumpAreas(int argc, char** argv)
1886 {
1887 	kprintf("      base        area   meta      small   medium  large\n");
1888 
1889 	size_t totalTotalSmall = 0;
1890 	size_t totalUsedSmall = 0;
1891 	size_t totalTotalMedium = 0;
1892 	size_t totalUsedMedium = 0;
1893 	size_t totalUsedLarge = 0;
1894 	uint32 areaCount = 0;
1895 
1896 	for (AreaTable::Iterator it = sAreaTable.GetIterator();
1897 			Area* area = it.Next();) {
1898 		areaCount++;
1899 
1900 		// sum up the free/used counts for the chunk sizes
1901 		int totalSmall = 0;
1902 		int usedSmall = 0;
1903 		int totalMedium = 0;
1904 		int usedMedium = 0;
1905 		int usedLarge = 0;
1906 
1907 		for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA; i++) {
1908 			MetaChunk* metaChunk = area->metaChunks + i;
1909 			if (metaChunk->chunkSize == 0)
1910 				continue;
1911 
1912 			switch (metaChunk->chunkSize) {
1913 				case SLAB_CHUNK_SIZE_SMALL:
1914 					totalSmall += metaChunk->chunkCount;
1915 					usedSmall += metaChunk->usedChunkCount;
1916 					break;
1917 				case SLAB_CHUNK_SIZE_MEDIUM:
1918 					totalMedium += metaChunk->chunkCount;
1919 					usedMedium += metaChunk->usedChunkCount;
1920 					break;
1921 				case SLAB_CHUNK_SIZE_LARGE:
1922 					usedLarge += metaChunk->usedChunkCount;
1923 					break;
1924 			}
1925 		}
1926 
1927 		kprintf("%p  %p  %2u/%2u  %4d/%4d  %3d/%3d  %5d\n",
1928 			area, area->vmArea, area->usedMetaChunkCount,
1929 			SLAB_META_CHUNKS_PER_AREA, usedSmall, totalSmall, usedMedium,
1930 			totalMedium, usedLarge);
1931 
1932 		totalTotalSmall += totalSmall;
1933 		totalUsedSmall += usedSmall;
1934 		totalTotalMedium += totalMedium;
1935 		totalUsedMedium += usedMedium;
1936 		totalUsedLarge += usedLarge;
1937 	}
1938 
1939 	kprintf("%d free area%s:\n", sFreeAreaCount,
1940 		sFreeAreaCount == 1 ? "" : "s");
1941 	for (Area* area = sFreeAreas; area != NULL; area = area->next) {
1942 		areaCount++;
1943 		kprintf("%p  %p\n", area, area->vmArea);
1944 	}
1945 
1946 	kprintf("total usage:\n");
1947 	kprintf("  small:    %" B_PRIuSIZE "/%" B_PRIuSIZE "\n", totalUsedSmall,
1948 		totalTotalSmall);
1949 	kprintf("  medium:   %" B_PRIuSIZE "/%" B_PRIuSIZE "\n", totalUsedMedium,
1950 		totalTotalMedium);
1951 	kprintf("  large:    %" B_PRIuSIZE "\n", totalUsedLarge);
1952 	kprintf("  memory:   %" B_PRIuSIZE "/%" B_PRIuSIZE " KB\n",
1953 		(totalUsedSmall * SLAB_CHUNK_SIZE_SMALL
1954 			+ totalUsedMedium * SLAB_CHUNK_SIZE_MEDIUM
1955 			+ totalUsedLarge * SLAB_CHUNK_SIZE_LARGE) / 1024,
1956 		areaCount * SLAB_AREA_SIZE / 1024);
1957 	kprintf("  overhead: %" B_PRIuSIZE " KB\n",
1958 		areaCount * kAreaAdminSize / 1024);
1959 
1960 	return 0;
1961 }
1962