xref: /haiku/src/system/kernel/slab/MemoryManager.cpp (revision 1294543de9ac0eff000eaea1b18368c36435d08e)
1 /*
2  * Copyright 2010, Ingo Weinhold <ingo_weinhold@gmx.de>.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include "MemoryManager.h"
8 
9 #include <algorithm>
10 
11 #include <debug.h>
12 #include <tracing.h>
13 #include <util/AutoLock.h>
14 #include <vm/vm.h>
15 #include <vm/vm_page.h>
16 #include <vm/vm_priv.h>
17 #include <vm/VMAddressSpace.h>
18 #include <vm/VMArea.h>
19 #include <vm/VMCache.h>
20 #include <vm/VMTranslationMap.h>
21 
22 #include "kernel_debug_config.h"
23 
24 #include "ObjectCache.h"
25 #include "slab_private.h"
26 
27 
28 //#define TRACE_MEMORY_MANAGER
29 #ifdef TRACE_MEMORY_MANAGER
30 #	define TRACE(x...)	dprintf(x)
31 #else
32 #	define TRACE(x...)	do {} while (false)
33 #endif
34 
35 #if DEBUG_SLAB_MEMORY_MANAGER_PARANOID_CHECKS
36 #	define PARANOID_CHECKS_ONLY(x)	x
37 #else
38 #	define PARANOID_CHECKS_ONLY(x)
39 #endif
40 
41 
42 static const char* const kSlabAreaName = "slab area";
43 
44 static void* sAreaTableBuffer[1024];
45 
46 mutex MemoryManager::sLock;
47 rw_lock MemoryManager::sAreaTableLock;
48 kernel_args* MemoryManager::sKernelArgs;
49 MemoryManager::AreaTable MemoryManager::sAreaTable;
50 MemoryManager::Area* MemoryManager::sFreeAreas;
51 int MemoryManager::sFreeAreaCount;
52 MemoryManager::MetaChunkList MemoryManager::sFreeCompleteMetaChunks;
53 MemoryManager::MetaChunkList MemoryManager::sFreeShortMetaChunks;
54 MemoryManager::MetaChunkList MemoryManager::sPartialMetaChunksSmall;
55 MemoryManager::MetaChunkList MemoryManager::sPartialMetaChunksMedium;
56 MemoryManager::AllocationEntry* MemoryManager::sAllocationEntryCanWait;
57 MemoryManager::AllocationEntry* MemoryManager::sAllocationEntryDontWait;
58 bool MemoryManager::sMaintenanceNeeded;
59 
60 
61 // #pragma mark - kernel tracing
62 
63 
64 #if SLAB_MEMORY_MANAGER_TRACING
65 
66 
67 //namespace SlabMemoryManagerCacheTracing {
68 struct MemoryManager::Tracing {
69 
70 class MemoryManagerTraceEntry : public AbstractTraceEntry {
71 public:
72 	MemoryManagerTraceEntry()
73 	{
74 	}
75 };
76 
77 
78 class Allocate : public MemoryManagerTraceEntry {
79 public:
80 	Allocate(ObjectCache* cache, uint32 flags)
81 		:
82 		MemoryManagerTraceEntry(),
83 		fCache(cache),
84 		fFlags(flags)
85 	{
86 		Initialized();
87 	}
88 
89 	virtual void AddDump(TraceOutput& out)
90 	{
91 		out.Print("slab memory manager alloc: cache: %p, flags: %#" B_PRIx32,
92 			fCache, fFlags);
93 	}
94 
95 private:
96 	ObjectCache*	fCache;
97 	uint32			fFlags;
98 };
99 
100 
101 class Free : public MemoryManagerTraceEntry {
102 public:
103 	Free(void* address, uint32 flags)
104 		:
105 		MemoryManagerTraceEntry(),
106 		fAddress(address),
107 		fFlags(flags)
108 	{
109 		Initialized();
110 	}
111 
112 	virtual void AddDump(TraceOutput& out)
113 	{
114 		out.Print("slab memory manager free: address: %p, flags: %#" B_PRIx32,
115 			fAddress, fFlags);
116 	}
117 
118 private:
119 	void*	fAddress;
120 	uint32	fFlags;
121 };
122 
123 
124 class AllocateRaw : public MemoryManagerTraceEntry {
125 public:
126 	AllocateRaw(size_t size, uint32 flags)
127 		:
128 		MemoryManagerTraceEntry(),
129 		fSize(size),
130 		fFlags(flags)
131 	{
132 		Initialized();
133 	}
134 
135 	virtual void AddDump(TraceOutput& out)
136 	{
137 		out.Print("slab memory manager alloc raw: size: %" B_PRIuSIZE
138 			", flags: %#" B_PRIx32, fSize, fFlags);
139 	}
140 
141 private:
142 	size_t	fSize;
143 	uint32	fFlags;
144 };
145 
146 
147 class FreeRawOrReturnCache : public MemoryManagerTraceEntry {
148 public:
149 	FreeRawOrReturnCache(void* address, uint32 flags)
150 		:
151 		MemoryManagerTraceEntry(),
152 		fAddress(address),
153 		fFlags(flags)
154 	{
155 		Initialized();
156 	}
157 
158 	virtual void AddDump(TraceOutput& out)
159 	{
160 		out.Print("slab memory manager free raw/return: address: %p, flags: %#"
161 			B_PRIx32, fAddress, fFlags);
162 	}
163 
164 private:
165 	void*	fAddress;
166 	uint32	fFlags;
167 };
168 
169 
170 class AllocateArea : public MemoryManagerTraceEntry {
171 public:
172 	AllocateArea(Area* area, uint32 flags)
173 		:
174 		MemoryManagerTraceEntry(),
175 		fArea(area),
176 		fFlags(flags)
177 	{
178 		Initialized();
179 	}
180 
181 	virtual void AddDump(TraceOutput& out)
182 	{
183 		out.Print("slab memory manager alloc area: flags: %#" B_PRIx32
184 			" -> %p", fFlags, fArea);
185 	}
186 
187 private:
188 	Area*	fArea;
189 	uint32	fFlags;
190 };
191 
192 
193 class AddArea : public MemoryManagerTraceEntry {
194 public:
195 	AddArea(Area* area)
196 		:
197 		MemoryManagerTraceEntry(),
198 		fArea(area)
199 	{
200 		Initialized();
201 	}
202 
203 	virtual void AddDump(TraceOutput& out)
204 	{
205 		out.Print("slab memory manager add area: %p", fArea);
206 	}
207 
208 private:
209 	Area*	fArea;
210 };
211 
212 
213 class FreeArea : public MemoryManagerTraceEntry {
214 public:
215 	FreeArea(Area* area, bool areaRemoved, uint32 flags)
216 		:
217 		MemoryManagerTraceEntry(),
218 		fArea(area),
219 		fFlags(flags),
220 		fRemoved(areaRemoved)
221 	{
222 		Initialized();
223 	}
224 
225 	virtual void AddDump(TraceOutput& out)
226 	{
227 		out.Print("slab memory manager free area: %p%s, flags: %#" B_PRIx32,
228 			fArea, fRemoved ? " (removed)" : "", fFlags);
229 	}
230 
231 private:
232 	Area*	fArea;
233 	uint32	fFlags;
234 	bool	fRemoved;
235 };
236 
237 
238 class AllocateMetaChunk : public MemoryManagerTraceEntry {
239 public:
240 	AllocateMetaChunk(MetaChunk* metaChunk)
241 		:
242 		MemoryManagerTraceEntry(),
243 		fMetaChunk(metaChunk->chunkBase)
244 	{
245 		Initialized();
246 	}
247 
248 	virtual void AddDump(TraceOutput& out)
249 	{
250 		out.Print("slab memory manager alloc meta chunk: %#" B_PRIxADDR,
251 			fMetaChunk);
252 	}
253 
254 private:
255 	addr_t	fMetaChunk;
256 };
257 
258 
259 class FreeMetaChunk : public MemoryManagerTraceEntry {
260 public:
261 	FreeMetaChunk(MetaChunk* metaChunk)
262 		:
263 		MemoryManagerTraceEntry(),
264 		fMetaChunk(metaChunk->chunkBase)
265 	{
266 		Initialized();
267 	}
268 
269 	virtual void AddDump(TraceOutput& out)
270 	{
271 		out.Print("slab memory manager free meta chunk: %#" B_PRIxADDR,
272 			fMetaChunk);
273 	}
274 
275 private:
276 	addr_t	fMetaChunk;
277 };
278 
279 
280 class AllocateChunk : public MemoryManagerTraceEntry {
281 public:
282 	AllocateChunk(size_t chunkSize, MetaChunk* metaChunk, Chunk* chunk)
283 		:
284 		MemoryManagerTraceEntry(),
285 		fChunkSize(chunkSize),
286 		fMetaChunk(metaChunk->chunkBase),
287 		fChunk(chunk - metaChunk->chunks)
288 	{
289 		Initialized();
290 	}
291 
292 	virtual void AddDump(TraceOutput& out)
293 	{
294 		out.Print("slab memory manager alloc chunk: size: %" B_PRIuSIZE
295 			" -> meta chunk: %#" B_PRIxADDR ", chunk: %" B_PRIu32, fChunkSize,
296 			fMetaChunk, fChunk);
297 	}
298 
299 private:
300 	size_t	fChunkSize;
301 	addr_t	fMetaChunk;
302 	uint32	fChunk;
303 };
304 
305 
306 class AllocateChunks : public MemoryManagerTraceEntry {
307 public:
308 	AllocateChunks(size_t chunkSize, uint32 chunkCount, MetaChunk* metaChunk,
309 		Chunk* chunk)
310 		:
311 		MemoryManagerTraceEntry(),
312 		fMetaChunk(metaChunk->chunkBase),
313 		fChunkSize(chunkSize),
314 		fChunkCount(chunkCount),
315 		fChunk(chunk - metaChunk->chunks)
316 	{
317 		Initialized();
318 	}
319 
320 	virtual void AddDump(TraceOutput& out)
321 	{
322 		out.Print("slab memory manager alloc chunks: size: %" B_PRIuSIZE
323 			", count %" B_PRIu32 " -> meta chunk: %#" B_PRIxADDR ", chunk: %"
324 			B_PRIu32, fChunkSize, fChunkCount, fMetaChunk, fChunk);
325 	}
326 
327 private:
328 	addr_t	fMetaChunk;
329 	size_t	fChunkSize;
330 	uint32	fChunkCount;
331 	uint32	fChunk;
332 };
333 
334 
335 class FreeChunk : public MemoryManagerTraceEntry {
336 public:
337 	FreeChunk(MetaChunk* metaChunk, Chunk* chunk)
338 		:
339 		MemoryManagerTraceEntry(),
340 		fMetaChunk(metaChunk->chunkBase),
341 		fChunk(chunk - metaChunk->chunks)
342 	{
343 		Initialized();
344 	}
345 
346 	virtual void AddDump(TraceOutput& out)
347 	{
348 		out.Print("slab memory manager free chunk: meta chunk: %#" B_PRIxADDR
349 			", chunk: %" B_PRIu32, fMetaChunk, fChunk);
350 	}
351 
352 private:
353 	addr_t	fMetaChunk;
354 	uint32	fChunk;
355 };
356 
357 
358 class Map : public MemoryManagerTraceEntry {
359 public:
360 	Map(addr_t address, size_t size, uint32 flags)
361 		:
362 		MemoryManagerTraceEntry(),
363 		fAddress(address),
364 		fSize(size),
365 		fFlags(flags)
366 	{
367 		Initialized();
368 	}
369 
370 	virtual void AddDump(TraceOutput& out)
371 	{
372 		out.Print("slab memory manager map: %#" B_PRIxADDR ", size: %"
373 			B_PRIuSIZE ", flags: %#" B_PRIx32, fAddress, fSize, fFlags);
374 	}
375 
376 private:
377 	addr_t	fAddress;
378 	size_t	fSize;
379 	uint32	fFlags;
380 };
381 
382 
383 class Unmap : public MemoryManagerTraceEntry {
384 public:
385 	Unmap(addr_t address, size_t size, uint32 flags)
386 		:
387 		MemoryManagerTraceEntry(),
388 		fAddress(address),
389 		fSize(size),
390 		fFlags(flags)
391 	{
392 		Initialized();
393 	}
394 
395 	virtual void AddDump(TraceOutput& out)
396 	{
397 		out.Print("slab memory manager unmap: %#" B_PRIxADDR ", size: %"
398 			B_PRIuSIZE ", flags: %#" B_PRIx32, fAddress, fSize, fFlags);
399 	}
400 
401 private:
402 	addr_t	fAddress;
403 	size_t	fSize;
404 	uint32	fFlags;
405 };
406 
407 
408 //}	// namespace SlabMemoryManagerCacheTracing
409 };	// struct MemoryManager::Tracing
410 
411 
412 //#	define T(x)	new(std::nothrow) SlabMemoryManagerCacheTracing::x
413 #	define T(x)	new(std::nothrow) MemoryManager::Tracing::x
414 
415 #else
416 #	define T(x)
417 #endif	// SLAB_MEMORY_MANAGER_TRACING
418 
419 
420 // #pragma mark - MemoryManager
421 
422 
423 /*static*/ void
424 MemoryManager::Init(kernel_args* args)
425 {
426 	mutex_init(&sLock, "slab memory manager");
427 	rw_lock_init(&sAreaTableLock, "slab memory manager area table");
428 	sKernelArgs = args;
429 
430 	new(&sFreeCompleteMetaChunks) MetaChunkList;
431 	new(&sFreeShortMetaChunks) MetaChunkList;
432 	new(&sPartialMetaChunksSmall) MetaChunkList;
433 	new(&sPartialMetaChunksMedium) MetaChunkList;
434 
435 	new(&sAreaTable) AreaTable;
436 	sAreaTable.Resize(sAreaTableBuffer, sizeof(sAreaTableBuffer), true);
437 		// A bit hacky: The table now owns the memory. Since we never resize or
438 		// free it, that's not a problem, though.
439 
440 	sFreeAreas = NULL;
441 	sFreeAreaCount = 0;
442 	sMaintenanceNeeded = false;
443 }
444 
445 
446 /*static*/ void
447 MemoryManager::InitPostArea()
448 {
449 	sKernelArgs = NULL;
450 
451 	// Convert all areas to actual areas. This loop might look a bit weird, but
452 	// is necessary since creating the actual area involves memory allocations,
453 	// which in turn can change the situation.
454 	bool done;
455 	do {
456 		done = true;
457 
458 		for (AreaTable::Iterator it = sAreaTable.GetIterator();
459 				Area* area = it.Next();) {
460 			if (area->vmArea == NULL) {
461 				_ConvertEarlyArea(area);
462 				done = false;
463 				break;
464 			}
465 		}
466 	} while (!done);
467 
468 	// unmap and free unused pages
469 	if (sFreeAreas != NULL) {
470 		// Just "leak" all but the first of the free areas -- the VM will
471 		// automatically free all unclaimed memory.
472 		sFreeAreas->next = NULL;
473 		sFreeAreaCount = 1;
474 
475 		Area* area = sFreeAreas;
476 		_ConvertEarlyArea(area);
477 		_UnmapFreeChunksEarly(area);
478 	}
479 
480 	for (AreaTable::Iterator it = sAreaTable.GetIterator();
481 			Area* area = it.Next();) {
482 		_UnmapFreeChunksEarly(area);
483 	}
484 
485 	sMaintenanceNeeded = true;
486 		// might not be necessary, but doesn't harm
487 
488 	add_debugger_command_etc("slab_area", &_DumpArea,
489 		"Dump information on a given slab area",
490 		"[ -c ] <area>\n"
491 		"Dump information on a given slab area specified by its base "
492 			"address.\n"
493 		"If \"-c\" is given, the chunks of all meta chunks area printed as "
494 			"well.\n", 0);
495 	add_debugger_command_etc("slab_areas", &_DumpAreas,
496 		"List all slab areas",
497 		"\n"
498 		"Lists all slab areas.\n", 0);
499 	add_debugger_command_etc("slab_meta_chunk", &_DumpMetaChunk,
500 		"Dump information on a given slab meta chunk",
501 		"<meta chunk>\n"
502 		"Dump information on a given slab meta chunk specified by its base "
503 			"or object address.\n", 0);
504 	add_debugger_command_etc("slab_meta_chunks", &_DumpMetaChunks,
505 		"List all non-full slab meta chunks",
506 		"[ -c ]\n"
507 		"Lists all non-full slab meta chunks.\n"
508 		"If \"-c\" is given, the chunks of all meta chunks area printed as "
509 			"well.\n", 0);
510 	add_debugger_command_etc("slab_raw_allocations", &_DumpRawAllocations,
511 		"List all raw allocations in slab areas",
512 		"\n"
513 		"Lists all raw allocations in slab areas.\n", 0);
514 }
515 
516 
517 /*static*/ status_t
518 MemoryManager::Allocate(ObjectCache* cache, uint32 flags, void*& _pages)
519 {
520 	// TODO: Support CACHE_UNLOCKED_PAGES!
521 
522 	T(Allocate(cache, flags));
523 
524 	size_t chunkSize = cache->slab_size;
525 
526 	TRACE("MemoryManager::Allocate(%p, %#" B_PRIx32 "): chunkSize: %"
527 		B_PRIuSIZE "\n", cache, flags, chunkSize);
528 
529 	MutexLocker locker(sLock);
530 
531 	// allocate a chunk
532 	MetaChunk* metaChunk;
533 	Chunk* chunk;
534 	status_t error = _AllocateChunks(chunkSize, 1, flags, metaChunk, chunk);
535 	if (error != B_OK)
536 		return error;
537 
538 	// map the chunk
539 	Area* area = metaChunk->GetArea();
540 	addr_t chunkAddress = _ChunkAddress(metaChunk, chunk);
541 
542 	locker.Unlock();
543 	error = _MapChunk(area->vmArea, chunkAddress, chunkSize, 0, flags);
544 	locker.Lock();
545 	if (error != B_OK) {
546 		// something failed -- free the chunk
547 		_FreeChunk(area, metaChunk, chunk, chunkAddress, true, flags);
548 		return error;
549 	}
550 
551 	chunk->reference = (addr_t)cache;
552 	_pages = (void*)chunkAddress;
553 
554 	TRACE("MemoryManager::Allocate() done: %p (meta chunk: %d, chunk %d)\n",
555 		_pages, int(metaChunk - area->metaChunks),
556 		int(chunk - metaChunk->chunks));
557 	return B_OK;
558 }
559 
560 
561 /*static*/ void
562 MemoryManager::Free(void* pages, uint32 flags)
563 {
564 	TRACE("MemoryManager::Free(%p, %#" B_PRIx32 ")\n", pages, flags);
565 
566 	T(Free(pages, flags));
567 
568 	// get the area and the meta chunk
569 	Area* area = _AreaForAddress((addr_t)pages);
570 	MetaChunk* metaChunk = &area->metaChunks[
571 		((addr_t)pages % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE];
572 
573 	ASSERT(metaChunk->chunkSize > 0);
574 	ASSERT((addr_t)pages >= metaChunk->chunkBase);
575 	ASSERT(((addr_t)pages % metaChunk->chunkSize) == 0);
576 
577 	// get the chunk
578 	uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)pages);
579 	Chunk* chunk = &metaChunk->chunks[chunkIndex];
580 
581 	ASSERT(chunk->next != NULL);
582 	ASSERT(chunk->next < metaChunk->chunks
583 		|| chunk->next
584 			>= metaChunk->chunks + SLAB_SMALL_CHUNKS_PER_META_CHUNK);
585 
586 	// and free it
587 	MutexLocker locker(sLock);
588 	_FreeChunk(area, metaChunk, chunk, (addr_t)pages, false, flags);
589 }
590 
591 
592 /*static*/ status_t
593 MemoryManager::AllocateRaw(size_t size, uint32 flags, void*& _pages)
594 {
595 	T(AllocateRaw(size, flags));
596 
597 	size = ROUNDUP(size, SLAB_CHUNK_SIZE_SMALL);
598 
599 	TRACE("MemoryManager::AllocateRaw(%" B_PRIuSIZE ", %#" B_PRIx32 ")\n", size,
600 		  flags);
601 
602 	if (size > SLAB_CHUNK_SIZE_LARGE || (flags & CACHE_ALIGN_ON_SIZE) != 0) {
603 		// Requested size greater than a large chunk or an aligned allocation.
604 		// Allocate as an area.
605 		if ((flags & CACHE_DONT_LOCK_KERNEL_SPACE) != 0)
606 			return B_WOULD_BLOCK;
607 
608 		virtual_address_restrictions virtualRestrictions = {};
609 		virtualRestrictions.address_specification
610 			= (flags & CACHE_ALIGN_ON_SIZE) != 0
611 				? B_ANY_KERNEL_BLOCK_ADDRESS : B_ANY_KERNEL_ADDRESS;
612 		physical_address_restrictions physicalRestrictions = {};
613 		area_id area = create_area_etc(VMAddressSpace::KernelID(),
614 			"slab large raw allocation", size, B_FULL_LOCK,
615 			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
616 			((flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0
617 					? CREATE_AREA_DONT_WAIT : 0)
618 				| CREATE_AREA_DONT_CLEAR,
619 			&virtualRestrictions, &physicalRestrictions, &_pages);
620 		return area >= 0 ? B_OK : area;
621 	}
622 
623 	// determine chunk size (small or medium)
624 	size_t chunkSize = SLAB_CHUNK_SIZE_SMALL;
625 	uint32 chunkCount = size / SLAB_CHUNK_SIZE_SMALL;
626 
627 	if (size % SLAB_CHUNK_SIZE_MEDIUM == 0) {
628 		chunkSize = SLAB_CHUNK_SIZE_MEDIUM;
629 		chunkCount = size / SLAB_CHUNK_SIZE_MEDIUM;
630 	}
631 
632 	MutexLocker locker(sLock);
633 
634 	// allocate the chunks
635 	MetaChunk* metaChunk;
636 	Chunk* chunk;
637 	status_t error = _AllocateChunks(chunkSize, chunkCount, flags, metaChunk,
638 		chunk);
639 	if (error != B_OK)
640 		return error;
641 
642 	// map the chunks
643 	Area* area = metaChunk->GetArea();
644 	addr_t chunkAddress = _ChunkAddress(metaChunk, chunk);
645 
646 	locker.Unlock();
647 	error = _MapChunk(area->vmArea, chunkAddress, size, 0, flags);
648 	locker.Lock();
649 	if (error != B_OK) {
650 		// something failed -- free the chunks
651 		for (uint32 i = 0; i < chunkCount; i++)
652 			_FreeChunk(area, metaChunk, chunk + i, chunkAddress, true, flags);
653 		return error;
654 	}
655 
656 	chunk->reference = (addr_t)chunkAddress + size - 1;
657 	_pages = (void*)chunkAddress;
658 
659 	TRACE("MemoryManager::AllocateRaw() done: %p (meta chunk: %d, chunk %d)\n",
660 		_pages, int(metaChunk - area->metaChunks),
661 		int(chunk - metaChunk->chunks));
662 	return B_OK;
663 }
664 
665 
666 /*static*/ ObjectCache*
667 MemoryManager::FreeRawOrReturnCache(void* pages, uint32 flags)
668 {
669 	TRACE("MemoryManager::FreeRawOrReturnCache(%p, %#" B_PRIx32 ")\n", pages,
670 		flags);
671 
672 	T(FreeRawOrReturnCache(pages, flags));
673 
674 	// get the area
675 	addr_t areaBase = _AreaBaseAddressForAddress((addr_t)pages);
676 
677 	ReadLocker readLocker(sAreaTableLock);
678 	Area* area = sAreaTable.Lookup(areaBase);
679 	readLocker.Unlock();
680 
681 	if (area == NULL) {
682 		// Probably a large allocation. Look up the VM area.
683 		VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
684 		addressSpace->ReadLock();
685 		VMArea* area = addressSpace->LookupArea((addr_t)pages);
686 		addressSpace->ReadUnlock();
687 
688 		if (area != NULL && (addr_t)pages == area->Base())
689 			delete_area(area->id);
690 		else
691 			panic("freeing unknown block %p from area %p", pages, area);
692 
693 		return NULL;
694 	}
695 
696 	MetaChunk* metaChunk = &area->metaChunks[
697 		((addr_t)pages % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE];
698 
699 	// get the chunk
700 	ASSERT(metaChunk->chunkSize > 0);
701 	ASSERT((addr_t)pages >= metaChunk->chunkBase);
702 	uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)pages);
703 	Chunk* chunk = &metaChunk->chunks[chunkIndex];
704 
705 	addr_t reference = chunk->reference;
706 	if ((reference & 1) == 0)
707 		return (ObjectCache*)reference;
708 
709 	// Seems we have a raw chunk allocation.
710 	ASSERT((addr_t)pages == _ChunkAddress(metaChunk, chunk));
711 	ASSERT(reference > (addr_t)pages);
712 	ASSERT(reference <= areaBase + SLAB_AREA_SIZE - 1);
713 	size_t size = reference - (addr_t)pages + 1;
714 	ASSERT((size % SLAB_CHUNK_SIZE_SMALL) == 0);
715 
716 	// unmap the chunks
717 	_UnmapChunk(area->vmArea, (addr_t)pages, size, flags);
718 
719 	// and free them
720 	MutexLocker locker(sLock);
721 	uint32 chunkCount = size / metaChunk->chunkSize;
722 	for (uint32 i = 0; i < chunkCount; i++)
723 		_FreeChunk(area, metaChunk, chunk + i, (addr_t)pages, true, flags);
724 
725 	return NULL;
726 }
727 
728 
729 /*static*/ size_t
730 MemoryManager::AcceptableChunkSize(size_t size)
731 {
732 	if (size <= SLAB_CHUNK_SIZE_SMALL)
733 		return SLAB_CHUNK_SIZE_SMALL;
734 	if (size <= SLAB_CHUNK_SIZE_MEDIUM)
735 		return SLAB_CHUNK_SIZE_MEDIUM;
736 	return SLAB_CHUNK_SIZE_LARGE;
737 }
738 
739 
740 /*static*/ ObjectCache*
741 MemoryManager::GetAllocationInfo(void* address, size_t& _size)
742 {
743 	// get the area
744 	ReadLocker readLocker(sAreaTableLock);
745 	Area* area = sAreaTable.Lookup(_AreaBaseAddressForAddress((addr_t)address));
746 	readLocker.Unlock();
747 
748 	if (area == NULL) {
749 		VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
750 		addressSpace->ReadLock();
751 		VMArea* area = addressSpace->LookupArea((addr_t)address);
752 		if (area != NULL && (addr_t)address == area->Base())
753 			_size = area->Size();
754 		else
755 			_size = 0;
756 		addressSpace->ReadUnlock();
757 
758 		return NULL;
759 	}
760 
761 	MetaChunk* metaChunk = &area->metaChunks[
762 		((addr_t)address % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE];
763 
764 	// get the chunk
765 	ASSERT(metaChunk->chunkSize > 0);
766 	ASSERT((addr_t)address >= metaChunk->chunkBase);
767 	uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)address);
768 
769 	addr_t reference = metaChunk->chunks[chunkIndex].reference;
770 	if ((reference & 1) == 0) {
771 		ObjectCache* cache = (ObjectCache*)reference;
772 		_size = cache->object_size;
773 		return cache;
774 	}
775 
776 	_size = reference - (addr_t)address + 1;
777 	return NULL;
778 }
779 
780 
781 /*static*/ ObjectCache*
782 MemoryManager::CacheForAddress(void* address)
783 {
784 	// get the area
785 	ReadLocker readLocker(sAreaTableLock);
786 	Area* area = sAreaTable.Lookup(_AreaBaseAddressForAddress((addr_t)address));
787 	readLocker.Unlock();
788 
789 	if (area == NULL)
790 		return NULL;
791 
792 	MetaChunk* metaChunk = &area->metaChunks[
793 		((addr_t)address % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE];
794 
795 	// get the chunk
796 	ASSERT(metaChunk->chunkSize > 0);
797 	ASSERT((addr_t)address >= metaChunk->chunkBase);
798 	uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)address);
799 
800 	addr_t reference = metaChunk->chunks[chunkIndex].reference;
801 	return (reference & 1) == 0 ? (ObjectCache*)reference : NULL;
802 }
803 
804 
805 /*static*/ void
806 MemoryManager::PerformMaintenance()
807 {
808 	MutexLocker locker(sLock);
809 
810 	while (sMaintenanceNeeded) {
811 		sMaintenanceNeeded = false;
812 
813 		// We want to keep one or two areas as a reserve. This way we have at
814 		// least one area to use in situations when we aren't allowed to
815 		// allocate one and also avoid ping-pong effects.
816 		if (sFreeAreaCount > 0 && sFreeAreaCount <= 2)
817 			return;
818 
819 		if (sFreeAreaCount == 0) {
820 			// try to allocate one
821 			Area* area;
822 			if (_AllocateArea(0, area) != B_OK)
823 				return;
824 
825 			_push(sFreeAreas, area);
826 			if (++sFreeAreaCount > 2)
827 				sMaintenanceNeeded = true;
828 		} else {
829 			// free until we only have two free ones
830 			while (sFreeAreaCount > 2) {
831 				Area* area = _pop(sFreeAreas);
832 				_FreeArea(area, true, 0);
833 			}
834 
835 			if (sFreeAreaCount == 0)
836 				sMaintenanceNeeded = true;
837 		}
838 	}
839 }
840 
841 
842 /*static*/ status_t
843 MemoryManager::_AllocateChunks(size_t chunkSize, uint32 chunkCount,
844 	uint32 flags, MetaChunk*& _metaChunk, Chunk*& _chunk)
845 {
846 	MetaChunkList* metaChunkList = NULL;
847 	if (chunkSize == SLAB_CHUNK_SIZE_SMALL) {
848 		metaChunkList = &sPartialMetaChunksSmall;
849 	} else if (chunkSize == SLAB_CHUNK_SIZE_MEDIUM) {
850 		metaChunkList = &sPartialMetaChunksMedium;
851 	} else if (chunkSize != SLAB_CHUNK_SIZE_LARGE) {
852 		panic("MemoryManager::_AllocateChunks(): Unsupported chunk size: %"
853 			B_PRIuSIZE, chunkSize);
854 		return B_BAD_VALUE;
855 	}
856 
857 	if (_GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk, _chunk))
858 		return B_OK;
859 
860 	if (sFreeAreas != NULL) {
861 		_AddArea(_pop(sFreeAreas));
862 		sFreeAreaCount--;
863 		_RequestMaintenance();
864 
865 		_GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk, _chunk);
866 		return B_OK;
867 	}
868 
869 	if ((flags & CACHE_DONT_LOCK_KERNEL_SPACE) != 0) {
870 		// We can't create an area with this limitation and we must not wait for
871 		// someone else doing that.
872 		return B_WOULD_BLOCK;
873 	}
874 
875 	// We need to allocate a new area. Wait, if someone else is trying to do
876 	// the same.
877 	while (true) {
878 		AllocationEntry* allocationEntry = NULL;
879 		if (sAllocationEntryDontWait != NULL) {
880 			allocationEntry = sAllocationEntryDontWait;
881 		} else if (sAllocationEntryCanWait != NULL
882 				&& (flags & CACHE_DONT_WAIT_FOR_MEMORY) == 0) {
883 			allocationEntry = sAllocationEntryCanWait;
884 		} else
885 			break;
886 
887 		ConditionVariableEntry entry;
888 		allocationEntry->condition.Add(&entry);
889 
890 		mutex_unlock(&sLock);
891 		entry.Wait();
892 		mutex_lock(&sLock);
893 
894 		if (_GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk,
895 				_chunk)) {
896 			return B_OK;
897 		}
898 	}
899 
900 	// prepare the allocation entry others can wait on
901 	AllocationEntry*& allocationEntry
902 		= (flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0
903 			? sAllocationEntryDontWait : sAllocationEntryCanWait;
904 
905 	AllocationEntry myResizeEntry;
906 	allocationEntry = &myResizeEntry;
907 	allocationEntry->condition.Init(metaChunkList, "wait for slab area");
908 	allocationEntry->thread = find_thread(NULL);
909 
910 	Area* area;
911 	status_t error = _AllocateArea(flags, area);
912 
913 	allocationEntry->condition.NotifyAll();
914 	allocationEntry = NULL;
915 
916 	if (error != B_OK)
917 		return error;
918 
919 	// Try again to get a meta chunk. Something might have been freed in the
920 	// meantime. We can free the area in this case.
921 	if (_GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk, _chunk)) {
922 		_FreeArea(area, true, flags);
923 		return B_OK;
924 	}
925 
926 	_AddArea(area);
927 	_GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk, _chunk);
928 	return B_OK;
929 }
930 
931 
932 /*static*/ bool
933 MemoryManager::_GetChunks(MetaChunkList* metaChunkList, size_t chunkSize,
934 	uint32 chunkCount, MetaChunk*& _metaChunk, Chunk*& _chunk)
935 {
936 	// the common and less complicated special case
937 	if (chunkCount == 1)
938 		return _GetChunk(metaChunkList, chunkSize, _metaChunk, _chunk);
939 
940 	ASSERT(metaChunkList != NULL);
941 
942 	// Iterate through the partial meta chunk list and try to find a free
943 	// range that is large enough.
944 	MetaChunk* metaChunk = NULL;
945 	for (MetaChunkList::Iterator it = metaChunkList->GetIterator();
946 			(metaChunk = it.Next()) != NULL;) {
947 		if (metaChunk->firstFreeChunk + chunkCount - 1
948 				<= metaChunk->lastFreeChunk) {
949 			break;
950 		}
951 	}
952 
953 	if (metaChunk == NULL) {
954 		// try to get a free meta chunk
955 		if ((SLAB_CHUNK_SIZE_LARGE - kAreaAdminSize) / chunkSize >= chunkCount)
956 			metaChunk = sFreeShortMetaChunks.RemoveHead();
957 		if (metaChunk == NULL)
958 			metaChunk = sFreeCompleteMetaChunks.RemoveHead();
959 
960 		if (metaChunk == NULL)
961 			return false;
962 
963 		metaChunkList->Add(metaChunk);
964 		metaChunk->GetArea()->usedMetaChunkCount++;
965 		_PrepareMetaChunk(metaChunk, chunkSize);
966 
967 		T(AllocateMetaChunk(metaChunk));
968 	}
969 
970 	// pull the chunks out of the free list
971 	Chunk* firstChunk = metaChunk->chunks + metaChunk->firstFreeChunk;
972 	Chunk* lastChunk = firstChunk + (chunkCount - 1);
973 	Chunk** chunkPointer = &metaChunk->freeChunks;
974 	uint32 remainingChunks = chunkCount;
975 	while (remainingChunks > 0) {
976 		ASSERT_PRINT(chunkPointer, "remaining: %" B_PRIu32 "/%" B_PRIu32
977 			", area: %p, meta chunk: %" B_PRIdSSIZE "\n", remainingChunks,
978 			chunkCount, metaChunk->GetArea(),
979 			metaChunk - metaChunk->GetArea()->metaChunks);
980 		Chunk* chunk = *chunkPointer;
981 		if (chunk >= firstChunk && chunk <= lastChunk) {
982 			*chunkPointer = chunk->next;
983 			chunk->reference = 1;
984 			remainingChunks--;
985 		} else
986 			chunkPointer = &chunk->next;
987 	}
988 
989 	// allocate the chunks
990 	metaChunk->usedChunkCount += chunkCount;
991 	if (metaChunk->usedChunkCount == metaChunk->chunkCount) {
992 		// meta chunk is full now -- remove it from its list
993 		if (metaChunkList != NULL)
994 			metaChunkList->Remove(metaChunk);
995 	}
996 
997 	// update the free range
998 	metaChunk->firstFreeChunk += chunkCount;
999 
1000 	PARANOID_CHECKS_ONLY(_CheckMetaChunk(metaChunk));
1001 
1002 	_chunk = firstChunk;
1003 	_metaChunk = metaChunk;
1004 
1005 	T(AllocateChunks(chunkSize, chunkCount, metaChunk, firstChunk));
1006 
1007 	return true;
1008 }
1009 
1010 
1011 /*static*/ bool
1012 MemoryManager::_GetChunk(MetaChunkList* metaChunkList, size_t chunkSize,
1013 	MetaChunk*& _metaChunk, Chunk*& _chunk)
1014 {
1015 	MetaChunk* metaChunk = metaChunkList != NULL
1016 		? metaChunkList->Head() : NULL;
1017 	if (metaChunk == NULL) {
1018 		// no partial meta chunk -- maybe there's a free one
1019 		if (chunkSize == SLAB_CHUNK_SIZE_LARGE) {
1020 			metaChunk = sFreeCompleteMetaChunks.RemoveHead();
1021 		} else {
1022 			metaChunk = sFreeShortMetaChunks.RemoveHead();
1023 			if (metaChunk == NULL)
1024 				metaChunk = sFreeCompleteMetaChunks.RemoveHead();
1025 			if (metaChunk != NULL)
1026 				metaChunkList->Add(metaChunk);
1027 		}
1028 
1029 		if (metaChunk == NULL)
1030 			return false;
1031 
1032 		metaChunk->GetArea()->usedMetaChunkCount++;
1033 		_PrepareMetaChunk(metaChunk, chunkSize);
1034 
1035 		T(AllocateMetaChunk(metaChunk));
1036 	}
1037 
1038 	// allocate the chunk
1039 	if (++metaChunk->usedChunkCount == metaChunk->chunkCount) {
1040 		// meta chunk is full now -- remove it from its list
1041 		if (metaChunkList != NULL)
1042 			metaChunkList->Remove(metaChunk);
1043 	}
1044 
1045 	_chunk = _pop(metaChunk->freeChunks);
1046 	_metaChunk = metaChunk;
1047 
1048 	_chunk->reference = 1;
1049 
1050 	// update the free range
1051 	uint32 chunkIndex = _chunk - metaChunk->chunks;
1052 	if (chunkIndex >= metaChunk->firstFreeChunk
1053 			&& chunkIndex <= metaChunk->lastFreeChunk) {
1054 		if (chunkIndex - metaChunk->firstFreeChunk
1055 				<= metaChunk->lastFreeChunk - chunkIndex) {
1056 			metaChunk->firstFreeChunk = chunkIndex + 1;
1057 		} else
1058 			metaChunk->lastFreeChunk = chunkIndex - 1;
1059 	}
1060 
1061 	PARANOID_CHECKS_ONLY(_CheckMetaChunk(metaChunk));
1062 
1063 	T(AllocateChunk(chunkSize, metaChunk, _chunk));
1064 
1065 	return true;
1066 }
1067 
1068 
1069 /*static*/ void
1070 MemoryManager::_FreeChunk(Area* area, MetaChunk* metaChunk, Chunk* chunk,
1071 	addr_t chunkAddress, bool alreadyUnmapped, uint32 flags)
1072 {
1073 	// unmap the chunk
1074 	if (!alreadyUnmapped) {
1075 		mutex_unlock(&sLock);
1076 		_UnmapChunk(area->vmArea, chunkAddress, metaChunk->chunkSize, flags);
1077 		mutex_lock(&sLock);
1078 	}
1079 
1080 	T(FreeChunk(metaChunk, chunk));
1081 
1082 	_push(metaChunk->freeChunks, chunk);
1083 
1084 	uint32 chunkIndex = chunk - metaChunk->chunks;
1085 
1086 	// free the meta chunk, if it is unused now
1087 	PARANOID_CHECKS_ONLY(bool areaDeleted = false;)
1088 	ASSERT(metaChunk->usedChunkCount > 0);
1089 	if (--metaChunk->usedChunkCount == 0) {
1090 		T(FreeMetaChunk(metaChunk));
1091 
1092 		// remove from partial meta chunk list
1093 		if (metaChunk->chunkSize == SLAB_CHUNK_SIZE_SMALL)
1094 			sPartialMetaChunksSmall.Remove(metaChunk);
1095 		else if (metaChunk->chunkSize == SLAB_CHUNK_SIZE_MEDIUM)
1096 			sPartialMetaChunksMedium.Remove(metaChunk);
1097 
1098 		// mark empty
1099 		metaChunk->chunkSize = 0;
1100 
1101 		// add to free list
1102 		if (metaChunk == area->metaChunks)
1103 			sFreeShortMetaChunks.Add(metaChunk, false);
1104 		else
1105 			sFreeCompleteMetaChunks.Add(metaChunk, false);
1106 
1107 		// free the area, if it is unused now
1108 		ASSERT(area->usedMetaChunkCount > 0);
1109 		if (--area->usedMetaChunkCount == 0) {
1110 			_FreeArea(area, false, flags);
1111 			PARANOID_CHECKS_ONLY(areaDeleted = true;)
1112 		}
1113 	} else if (metaChunk->usedChunkCount == metaChunk->chunkCount - 1) {
1114 		// the meta chunk was full before -- add it back to its partial chunk
1115 		// list
1116 		if (metaChunk->chunkSize == SLAB_CHUNK_SIZE_SMALL)
1117 			sPartialMetaChunksSmall.Add(metaChunk, false);
1118 		else if (metaChunk->chunkSize == SLAB_CHUNK_SIZE_MEDIUM)
1119 			sPartialMetaChunksMedium.Add(metaChunk, false);
1120 
1121 		metaChunk->firstFreeChunk = chunkIndex;
1122 		metaChunk->lastFreeChunk = chunkIndex;
1123 	} else {
1124 		// extend the free range, if the chunk adjoins
1125 		if (chunkIndex + 1 == metaChunk->firstFreeChunk) {
1126 			uint32 firstFree = chunkIndex;
1127 			for (; firstFree > 0; firstFree--) {
1128 				Chunk* previousChunk = &metaChunk->chunks[firstFree - 1];
1129 				if (!_IsChunkFree(metaChunk, previousChunk))
1130 					break;
1131 			}
1132 			metaChunk->firstFreeChunk = firstFree;
1133 		} else if (chunkIndex == (uint32)metaChunk->lastFreeChunk + 1) {
1134 			uint32 lastFree = chunkIndex;
1135 			for (; lastFree + 1 < metaChunk->chunkCount; lastFree++) {
1136 				Chunk* nextChunk = &metaChunk->chunks[lastFree + 1];
1137 				if (!_IsChunkFree(metaChunk, nextChunk))
1138 					break;
1139 			}
1140 			metaChunk->lastFreeChunk = lastFree;
1141 		}
1142 	}
1143 
1144 	PARANOID_CHECKS_ONLY(
1145 		if (!areaDeleted)
1146 			_CheckMetaChunk(metaChunk);
1147 	)
1148 }
1149 
1150 
1151 /*static*/ void
1152 MemoryManager::_PrepareMetaChunk(MetaChunk* metaChunk, size_t chunkSize)
1153 {
1154 	Area* area = metaChunk->GetArea();
1155 
1156 	if (metaChunk == area->metaChunks) {
1157 		// the first chunk is shorter
1158 		size_t unusableSize = ROUNDUP(SLAB_AREA_STRUCT_OFFSET + kAreaAdminSize,
1159 			chunkSize);
1160 		metaChunk->chunkBase = area->BaseAddress() + unusableSize;
1161 		metaChunk->totalSize = SLAB_CHUNK_SIZE_LARGE - unusableSize;
1162 	}
1163 
1164 	metaChunk->chunkSize = chunkSize;
1165 	metaChunk->chunkCount = metaChunk->totalSize / chunkSize;
1166 	metaChunk->usedChunkCount = 0;
1167 
1168 	metaChunk->freeChunks = NULL;
1169 	for (int32 i = metaChunk->chunkCount - 1; i >= 0; i--)
1170 		_push(metaChunk->freeChunks, metaChunk->chunks + i);
1171 
1172 	metaChunk->firstFreeChunk = 0;
1173 	metaChunk->lastFreeChunk = metaChunk->chunkCount - 1;
1174 
1175 	PARANOID_CHECKS_ONLY(_CheckMetaChunk(metaChunk));
1176 }
1177 
1178 
1179 /*static*/ void
1180 MemoryManager::_AddArea(Area* area)
1181 {
1182 	T(AddArea(area));
1183 
1184 	// add the area to the hash table
1185 	WriteLocker writeLocker(sAreaTableLock);
1186 	sAreaTable.InsertUnchecked(area);
1187 	writeLocker.Unlock();
1188 
1189 	// add the area's meta chunks to the free lists
1190 	sFreeShortMetaChunks.Add(&area->metaChunks[0]);
1191 	for (int32 i = 1; i < SLAB_META_CHUNKS_PER_AREA; i++)
1192 		sFreeCompleteMetaChunks.Add(&area->metaChunks[i]);
1193 }
1194 
1195 
1196 /*static*/ status_t
1197 MemoryManager::_AllocateArea(uint32 flags, Area*& _area)
1198 {
1199 	TRACE("MemoryManager::_AllocateArea(%#" B_PRIx32 ")\n", flags);
1200 
1201 	ASSERT((flags & CACHE_DONT_LOCK_KERNEL_SPACE) == 0);
1202 
1203 	mutex_unlock(&sLock);
1204 
1205 	size_t pagesNeededToMap = 0;
1206 	void* areaBase;
1207 	Area* area;
1208 	VMArea* vmArea = NULL;
1209 
1210 	if (sKernelArgs == NULL) {
1211 		// create an area
1212 		uint32 areaCreationFlags = (flags & CACHE_PRIORITY_VIP) != 0
1213 			? CREATE_AREA_PRIORITY_VIP : 0;
1214 		area_id areaID = vm_create_null_area(B_SYSTEM_TEAM, kSlabAreaName,
1215 			&areaBase, B_ANY_KERNEL_BLOCK_ADDRESS, SLAB_AREA_SIZE,
1216 			areaCreationFlags);
1217 		if (areaID < 0) {
1218 			mutex_lock(&sLock);
1219 			return areaID;
1220 		}
1221 
1222 		area = _AreaForAddress((addr_t)areaBase);
1223 
1224 		// map the memory for the administrative structure
1225 		VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
1226 		VMTranslationMap* translationMap = addressSpace->TranslationMap();
1227 
1228 		pagesNeededToMap = translationMap->MaxPagesNeededToMap(
1229 			(addr_t)area, (addr_t)areaBase + SLAB_AREA_SIZE - 1);
1230 
1231 		vmArea = VMAreaHash::Lookup(areaID);
1232 		status_t error = _MapChunk(vmArea, (addr_t)area, kAreaAdminSize,
1233 			pagesNeededToMap, flags);
1234 		if (error != B_OK) {
1235 			delete_area(areaID);
1236 			mutex_lock(&sLock);
1237 			return error;
1238 		}
1239 
1240 		dprintf("slab memory manager: created area %p (%" B_PRId32 ")\n", area,
1241 			areaID);
1242 	} else {
1243 		// no areas yet -- allocate raw memory
1244 		areaBase = (void*)vm_allocate_early(sKernelArgs, SLAB_AREA_SIZE,
1245 			SLAB_AREA_SIZE, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
1246 			SLAB_AREA_SIZE);
1247 		if (areaBase == NULL) {
1248 			mutex_lock(&sLock);
1249 			return B_NO_MEMORY;
1250 		}
1251 		area = _AreaForAddress((addr_t)areaBase);
1252 
1253 		TRACE("MemoryManager::_AllocateArea(): allocated early area %p\n",
1254 			area);
1255 	}
1256 
1257 	// init the area structure
1258 	area->vmArea = vmArea;
1259 	area->reserved_memory_for_mapping = pagesNeededToMap * B_PAGE_SIZE;
1260 	area->usedMetaChunkCount = 0;
1261 	area->fullyMapped = vmArea == NULL;
1262 
1263 	// init the meta chunks
1264 	for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA; i++) {
1265 		MetaChunk* metaChunk = area->metaChunks + i;
1266 		metaChunk->chunkSize = 0;
1267 		metaChunk->chunkBase = (addr_t)areaBase + i * SLAB_CHUNK_SIZE_LARGE;
1268 		metaChunk->totalSize = SLAB_CHUNK_SIZE_LARGE;
1269 			// Note: chunkBase and totalSize aren't correct for the first
1270 			// meta chunk. They will be set in _PrepareMetaChunk().
1271 		metaChunk->chunkCount = 0;
1272 		metaChunk->usedChunkCount = 0;
1273 		metaChunk->freeChunks = NULL;
1274 	}
1275 
1276 	mutex_lock(&sLock);
1277 	_area = area;
1278 
1279 	T(AllocateArea(area, flags));
1280 
1281 	return B_OK;
1282 }
1283 
1284 
1285 /*static*/ void
1286 MemoryManager::_FreeArea(Area* area, bool areaRemoved, uint32 flags)
1287 {
1288 	TRACE("MemoryManager::_FreeArea(%p, %#" B_PRIx32 ")\n", area, flags);
1289 
1290 	T(FreeArea(area, areaRemoved, flags));
1291 
1292 	ASSERT(area->usedMetaChunkCount == 0);
1293 
1294 	if (!areaRemoved) {
1295 		// remove the area's meta chunks from the free lists
1296 		ASSERT(area->metaChunks[0].usedChunkCount == 0);
1297 		sFreeShortMetaChunks.Remove(&area->metaChunks[0]);
1298 
1299 		for (int32 i = 1; i < SLAB_META_CHUNKS_PER_AREA; i++) {
1300 			ASSERT(area->metaChunks[i].usedChunkCount == 0);
1301 			sFreeCompleteMetaChunks.Remove(&area->metaChunks[i]);
1302 		}
1303 
1304 		// remove the area from the hash table
1305 		WriteLocker writeLocker(sAreaTableLock);
1306 		sAreaTable.RemoveUnchecked(area);
1307 		writeLocker.Unlock();
1308 	}
1309 
1310 	// We want to keep one or two free areas as a reserve.
1311 	if (sFreeAreaCount <= 1) {
1312 		_push(sFreeAreas, area);
1313 		sFreeAreaCount++;
1314 		return;
1315 	}
1316 
1317 	if (area->vmArea == NULL || (flags & CACHE_DONT_LOCK_KERNEL_SPACE) != 0) {
1318 		// This is either early in the boot process or we aren't allowed to
1319 		// delete the area now.
1320 		_push(sFreeAreas, area);
1321 		sFreeAreaCount++;
1322 		_RequestMaintenance();
1323 		return;
1324 	}
1325 
1326 	mutex_unlock(&sLock);
1327 
1328 	dprintf("slab memory manager: deleting area %p (%" B_PRId32 ")\n", area,
1329 		area->vmArea->id);
1330 
1331 	size_t memoryToUnreserve = area->reserved_memory_for_mapping;
1332 	delete_area(area->vmArea->id);
1333 	vm_unreserve_memory(memoryToUnreserve);
1334 
1335 	mutex_lock(&sLock);
1336 }
1337 
1338 
1339 /*static*/ status_t
1340 MemoryManager::_MapChunk(VMArea* vmArea, addr_t address, size_t size,
1341 	size_t reserveAdditionalMemory, uint32 flags)
1342 {
1343 	TRACE("MemoryManager::_MapChunk(%p, %#" B_PRIxADDR ", %#" B_PRIxSIZE
1344 		")\n", vmArea, address, size);
1345 
1346 	T(Map(address, size, flags));
1347 
1348 	if (vmArea == NULL) {
1349 		// everything is mapped anyway
1350 		return B_OK;
1351 	}
1352 
1353 	VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
1354 	VMTranslationMap* translationMap = addressSpace->TranslationMap();
1355 
1356 	// reserve memory for the chunk
1357 	int priority = (flags & CACHE_PRIORITY_VIP) != 0
1358 		? VM_PRIORITY_VIP : VM_PRIORITY_SYSTEM;
1359 	size_t reservedMemory = size + reserveAdditionalMemory;
1360 	status_t error = vm_try_reserve_memory(size, priority,
1361 		(flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0 ? 0 : 1000000);
1362 	if (error != B_OK)
1363 		return error;
1364 
1365 	// reserve the pages we need now
1366 	size_t reservedPages = size / B_PAGE_SIZE
1367 		+ translationMap->MaxPagesNeededToMap(address, address + size - 1);
1368 	vm_page_reservation reservation;
1369 	if ((flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0) {
1370 		if (!vm_page_try_reserve_pages(&reservation, reservedPages, priority)) {
1371 			vm_unreserve_memory(reservedMemory);
1372 			return B_WOULD_BLOCK;
1373 		}
1374 	} else
1375 		vm_page_reserve_pages(&reservation, reservedPages, priority);
1376 
1377 	VMCache* cache = vm_area_get_locked_cache(vmArea);
1378 
1379 	// map the pages
1380 	translationMap->Lock();
1381 
1382 	addr_t areaOffset = address - vmArea->Base();
1383 	addr_t endAreaOffset = areaOffset + size;
1384 	for (size_t offset = areaOffset; offset < endAreaOffset;
1385 			offset += B_PAGE_SIZE) {
1386 		vm_page* page = vm_page_allocate_page(&reservation, PAGE_STATE_WIRED);
1387 		cache->InsertPage(page, offset);
1388 
1389 		page->IncrementWiredCount();
1390 		atomic_add(&gMappedPagesCount, 1);
1391 		DEBUG_PAGE_ACCESS_END(page);
1392 
1393 		translationMap->Map(vmArea->Base() + offset,
1394 			page->physical_page_number * B_PAGE_SIZE,
1395 			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
1396 			vmArea->MemoryType(), &reservation);
1397 	}
1398 
1399 	translationMap->Unlock();
1400 
1401 	cache->ReleaseRefAndUnlock();
1402 
1403 	vm_page_unreserve_pages(&reservation);
1404 
1405 	return B_OK;
1406 }
1407 
1408 
1409 /*static*/ status_t
1410 MemoryManager::_UnmapChunk(VMArea* vmArea, addr_t address, size_t size,
1411 	uint32 flags)
1412 {
1413 	T(Unmap(address, size, flags));
1414 
1415 	if (vmArea == NULL)
1416 		return B_ERROR;
1417 
1418 	TRACE("MemoryManager::_UnmapChunk(%p, %#" B_PRIxADDR ", %#" B_PRIxSIZE
1419 		")\n", vmArea, address, size);
1420 
1421 	VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
1422 	VMTranslationMap* translationMap = addressSpace->TranslationMap();
1423 	VMCache* cache = vm_area_get_locked_cache(vmArea);
1424 
1425 	// unmap the pages
1426 	translationMap->Lock();
1427 	translationMap->Unmap(address, address + size - 1);
1428 	atomic_add(&gMappedPagesCount, -(size / B_PAGE_SIZE));
1429 	translationMap->Unlock();
1430 
1431 	// free the pages
1432 	addr_t areaPageOffset = (address - vmArea->Base()) / B_PAGE_SIZE;
1433 	addr_t areaPageEndOffset = areaPageOffset + size / B_PAGE_SIZE;
1434 	VMCachePagesTree::Iterator it = cache->pages.GetIterator(
1435 		areaPageOffset, true, true);
1436 	while (vm_page* page = it.Next()) {
1437 		if (page->cache_offset >= areaPageEndOffset)
1438 			break;
1439 
1440 		DEBUG_PAGE_ACCESS_START(page);
1441 
1442 		page->DecrementWiredCount();
1443 
1444 		cache->RemovePage(page);
1445 			// the iterator is remove-safe
1446 		vm_page_free(cache, page);
1447 	}
1448 
1449 	cache->ReleaseRefAndUnlock();
1450 
1451 	vm_unreserve_memory(size);
1452 
1453 	return B_OK;
1454 }
1455 
1456 
1457 /*static*/ void
1458 MemoryManager::_UnmapFreeChunksEarly(Area* area)
1459 {
1460 	if (!area->fullyMapped)
1461 		return;
1462 
1463 	TRACE("MemoryManager::_UnmapFreeChunksEarly(%p)\n", area);
1464 
1465 	// unmap the space before the Area structure
1466 	#if SLAB_AREA_STRUCT_OFFSET > 0
1467 		_UnmapChunk(area->vmArea, area->BaseAddress(), SLAB_AREA_STRUCT_OFFSET,
1468 			0);
1469 	#endif
1470 
1471 	for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA; i++) {
1472 		MetaChunk* metaChunk = area->metaChunks + i;
1473 		if (metaChunk->chunkSize == 0) {
1474 			// meta chunk is free -- unmap it completely
1475 			if (i == 0) {
1476 				_UnmapChunk(area->vmArea, (addr_t)area + kAreaAdminSize,
1477 					SLAB_CHUNK_SIZE_LARGE - kAreaAdminSize, 0);
1478 			} else {
1479 				_UnmapChunk(area->vmArea,
1480 					area->BaseAddress() + i * SLAB_CHUNK_SIZE_LARGE,
1481 					SLAB_CHUNK_SIZE_LARGE, 0);
1482 			}
1483 		} else {
1484 			// unmap free chunks
1485 			for (Chunk* chunk = metaChunk->freeChunks; chunk != NULL;
1486 					chunk = chunk->next) {
1487 				_UnmapChunk(area->vmArea, _ChunkAddress(metaChunk, chunk),
1488 					metaChunk->chunkSize, 0);
1489 			}
1490 
1491 			// The first meta chunk might have space before its first chunk.
1492 			if (i == 0) {
1493 				addr_t unusedStart = (addr_t)area + kAreaAdminSize;
1494 				if (unusedStart < metaChunk->chunkBase) {
1495 					_UnmapChunk(area->vmArea, unusedStart,
1496 						metaChunk->chunkBase - unusedStart, 0);
1497 				}
1498 			}
1499 		}
1500 	}
1501 
1502 	area->fullyMapped = false;
1503 }
1504 
1505 
1506 /*static*/ void
1507 MemoryManager::_ConvertEarlyArea(Area* area)
1508 {
1509 	void* address = (void*)area->BaseAddress();
1510 	area_id areaID = create_area(kSlabAreaName, &address, B_EXACT_ADDRESS,
1511 		SLAB_AREA_SIZE, B_ALREADY_WIRED,
1512 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
1513 	if (areaID < 0)
1514 		panic("out of memory");
1515 
1516 	area->vmArea = VMAreaHash::Lookup(areaID);
1517 }
1518 
1519 
1520 /*static*/ void
1521 MemoryManager::_RequestMaintenance()
1522 {
1523 	if ((sFreeAreaCount > 0 && sFreeAreaCount <= 2) || sMaintenanceNeeded)
1524 		return;
1525 
1526 	sMaintenanceNeeded = true;
1527 	request_memory_manager_maintenance();
1528 }
1529 
1530 
1531 /*static*/ bool
1532 MemoryManager::_IsChunkInFreeList(const MetaChunk* metaChunk,
1533 	const Chunk* chunk)
1534 {
1535 	Chunk* freeChunk = metaChunk->freeChunks;
1536 	while (freeChunk != NULL) {
1537 		if (freeChunk == chunk)
1538 			return true;
1539 		freeChunk = freeChunk->next;
1540 	}
1541 
1542 	return false;
1543 }
1544 
1545 
1546 #if DEBUG_SLAB_MEMORY_MANAGER_PARANOID_CHECKS
1547 
1548 /*static*/ void
1549 MemoryManager::_CheckMetaChunk(MetaChunk* metaChunk)
1550 {
1551 	Area* area = metaChunk->GetArea();
1552 	int32 metaChunkIndex = metaChunk - area->metaChunks;
1553 	if (metaChunkIndex < 0 || metaChunkIndex >= SLAB_META_CHUNKS_PER_AREA) {
1554 		panic("invalid meta chunk %p!", metaChunk);
1555 		return;
1556 	}
1557 
1558 	switch (metaChunk->chunkSize) {
1559 		case 0:
1560 			// unused
1561 			return;
1562 		case SLAB_CHUNK_SIZE_SMALL:
1563 		case SLAB_CHUNK_SIZE_MEDIUM:
1564 		case SLAB_CHUNK_SIZE_LARGE:
1565 			break;
1566 		default:
1567 			panic("meta chunk %p has invalid chunk size: %" B_PRIuSIZE,
1568 				metaChunk, metaChunk->chunkSize);
1569 			return;
1570 	}
1571 
1572 	if (metaChunk->totalSize > SLAB_CHUNK_SIZE_LARGE) {
1573 		panic("meta chunk %p has invalid total size: %" B_PRIuSIZE,
1574 			metaChunk, metaChunk->totalSize);
1575 		return;
1576 	}
1577 
1578 	addr_t expectedBase = area->BaseAddress()
1579 		+ metaChunkIndex * SLAB_CHUNK_SIZE_LARGE;
1580 	if (metaChunk->chunkBase < expectedBase
1581 		|| metaChunk->chunkBase - expectedBase + metaChunk->totalSize
1582 			> SLAB_CHUNK_SIZE_LARGE) {
1583 		panic("meta chunk %p has invalid base address: %" B_PRIxADDR, metaChunk,
1584 			metaChunk->chunkBase);
1585 		return;
1586 	}
1587 
1588 	if (metaChunk->chunkCount != metaChunk->totalSize / metaChunk->chunkSize) {
1589 		panic("meta chunk %p has invalid chunk count: %u", metaChunk,
1590 			metaChunk->chunkCount);
1591 		return;
1592 	}
1593 
1594 	if (metaChunk->usedChunkCount > metaChunk->chunkCount) {
1595 		panic("meta chunk %p has invalid unused chunk count: %u", metaChunk,
1596 			metaChunk->usedChunkCount);
1597 		return;
1598 	}
1599 
1600 	if (metaChunk->firstFreeChunk > metaChunk->chunkCount) {
1601 		panic("meta chunk %p has invalid first free chunk: %u", metaChunk,
1602 			metaChunk->firstFreeChunk);
1603 		return;
1604 	}
1605 
1606 	if (metaChunk->lastFreeChunk >= metaChunk->chunkCount) {
1607 		panic("meta chunk %p has invalid last free chunk: %u", metaChunk,
1608 			metaChunk->lastFreeChunk);
1609 		return;
1610 	}
1611 
1612 	// check free list for structural sanity
1613 	uint32 freeChunks = 0;
1614 	for (Chunk* chunk = metaChunk->freeChunks; chunk != NULL;
1615 			chunk = chunk->next) {
1616 		if ((addr_t)chunk % sizeof(Chunk) != 0 || chunk < metaChunk->chunks
1617 			|| chunk >= metaChunk->chunks + metaChunk->chunkCount) {
1618 			panic("meta chunk %p has invalid element in free list, chunk: %p",
1619 				metaChunk, chunk);
1620 			return;
1621 		}
1622 
1623 		if (++freeChunks > metaChunk->chunkCount) {
1624 			panic("meta chunk %p has cyclic free list", metaChunk);
1625 			return;
1626 		}
1627 	}
1628 
1629 	if (freeChunks + metaChunk->usedChunkCount > metaChunk->chunkCount) {
1630 		panic("meta chunk %p has mismatching free/used chunk counts: total: "
1631 			"%u, used: %u, free: %" B_PRIu32, metaChunk, metaChunk->chunkCount,
1632 			metaChunk->usedChunkCount, freeChunks);
1633 		return;
1634 	}
1635 
1636 	// count used chunks by looking at their reference/next field
1637 	uint32 usedChunks = 0;
1638 	for (uint32 i = 0; i < metaChunk->chunkCount; i++) {
1639 		if (!_IsChunkFree(metaChunk, metaChunk->chunks + i))
1640 			usedChunks++;
1641 	}
1642 
1643 	if (usedChunks != metaChunk->usedChunkCount) {
1644 		panic("meta chunk %p has used chunks that appear free: total: "
1645 			"%u, used: %u, appearing used: %" B_PRIu32, metaChunk,
1646 			metaChunk->chunkCount, metaChunk->usedChunkCount, usedChunks);
1647 		return;
1648 	}
1649 
1650 	// check free range
1651 	for (uint32 i = metaChunk->firstFreeChunk; i < metaChunk->lastFreeChunk;
1652 			i++) {
1653 		if (!_IsChunkFree(metaChunk, metaChunk->chunks + i)) {
1654 			panic("meta chunk %p has used chunk in free range, chunk: %p (%"
1655 				B_PRIu32 ", free range: %u - %u)", metaChunk,
1656 				metaChunk->chunks + i, i, metaChunk->firstFreeChunk,
1657 				metaChunk->lastFreeChunk);
1658 			return;
1659 		}
1660 	}
1661 }
1662 
1663 #endif	// DEBUG_SLAB_MEMORY_MANAGER_PARANOID_CHECKS
1664 
1665 
1666 /*static*/ int
1667 MemoryManager::_DumpRawAllocations(int argc, char** argv)
1668 {
1669 	kprintf("area        meta chunk  chunk  base        size (KB)\n");
1670 
1671 	size_t totalSize = 0;
1672 
1673 	for (AreaTable::Iterator it = sAreaTable.GetIterator();
1674 			Area* area = it.Next();) {
1675 		for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA; i++) {
1676 			MetaChunk* metaChunk = area->metaChunks + i;
1677 			if (metaChunk->chunkSize == 0)
1678 				continue;
1679 			for (uint32 k = 0; k < metaChunk->chunkCount; k++) {
1680 				Chunk* chunk = metaChunk->chunks + k;
1681 
1682 				// skip free chunks
1683 				if (_IsChunkFree(metaChunk, chunk))
1684 					continue;
1685 
1686 				addr_t reference = chunk->reference;
1687 				if ((reference & 1) == 0 || reference == 1)
1688 					continue;
1689 
1690 				addr_t chunkAddress = _ChunkAddress(metaChunk, chunk);
1691 				size_t size = reference - chunkAddress + 1;
1692 				totalSize += size;
1693 
1694 				kprintf("%p  %10" B_PRId32 "  %5" B_PRIu32 "  %p  %9"
1695 					B_PRIuSIZE "\n", area, i, k, (void*)chunkAddress,
1696 					size / 1024);
1697 			}
1698 		}
1699 	}
1700 
1701 	kprintf("total:                                     %9" B_PRIuSIZE "\n",
1702 		totalSize / 1024);
1703 
1704 	return 0;
1705 }
1706 
1707 
1708 /*static*/ void
1709 MemoryManager::_PrintMetaChunkTableHeader(bool printChunks)
1710 {
1711 	if (printChunks)
1712 		kprintf("chunk        base       cache  object size  cache name\n");
1713 	else
1714 		kprintf("chunk        base\n");
1715 }
1716 
1717 /*static*/ void
1718 MemoryManager::_DumpMetaChunk(MetaChunk* metaChunk, bool printChunks,
1719 	bool printHeader)
1720 {
1721 	if (printHeader)
1722 		_PrintMetaChunkTableHeader(printChunks);
1723 
1724 	const char* type = "empty";
1725 	if (metaChunk->chunkSize != 0) {
1726 		switch (metaChunk->chunkSize) {
1727 			case SLAB_CHUNK_SIZE_SMALL:
1728 				type = "small";
1729 				break;
1730 			case SLAB_CHUNK_SIZE_MEDIUM:
1731 				type = "medium";
1732 				break;
1733 			case SLAB_CHUNK_SIZE_LARGE:
1734 				type = "large";
1735 				break;
1736 		}
1737 	}
1738 
1739 	int metaChunkIndex = metaChunk - metaChunk->GetArea()->metaChunks;
1740 	kprintf("%5d  %p  --- %6s meta chunk", metaChunkIndex,
1741 		(void*)metaChunk->chunkBase, type);
1742 	if (metaChunk->chunkSize != 0) {
1743 		kprintf(": %4u/%4u used, %-4u-%4u free ------------\n",
1744 			metaChunk->usedChunkCount, metaChunk->chunkCount,
1745 			metaChunk->firstFreeChunk, metaChunk->lastFreeChunk);
1746 	} else
1747 		kprintf(" --------------------------------------------\n");
1748 
1749 	if (metaChunk->chunkSize == 0 || !printChunks)
1750 		return;
1751 
1752 	for (uint32 i = 0; i < metaChunk->chunkCount; i++) {
1753 		Chunk* chunk = metaChunk->chunks + i;
1754 
1755 		// skip free chunks
1756 		if (_IsChunkFree(metaChunk, chunk)) {
1757 			if (!_IsChunkInFreeList(metaChunk, chunk)) {
1758 				kprintf("%5" B_PRIu32 "  %p  appears free, but isn't in free "
1759 					"list!\n", i, (void*)_ChunkAddress(metaChunk, chunk));
1760 			}
1761 
1762 			continue;
1763 		}
1764 
1765 		addr_t reference = chunk->reference;
1766 		if ((reference & 1) == 0) {
1767 			ObjectCache* cache = (ObjectCache*)reference;
1768 			kprintf("%5" B_PRIu32 "  %p  %p  %11" B_PRIuSIZE "  %s\n", i,
1769 				(void*)_ChunkAddress(metaChunk, chunk), cache,
1770 				cache != NULL ? cache->object_size : 0,
1771 				cache != NULL ? cache->name : "");
1772 		} else if (reference != 1) {
1773 			kprintf("%5" B_PRIu32 "  %p  raw allocation up to %p\n", i,
1774 				(void*)_ChunkAddress(metaChunk, chunk), (void*)reference);
1775 		}
1776 	}
1777 }
1778 
1779 
1780 /*static*/ int
1781 MemoryManager::_DumpMetaChunk(int argc, char** argv)
1782 {
1783 	if (argc != 2) {
1784 		print_debugger_command_usage(argv[0]);
1785 		return 0;
1786 	}
1787 
1788 	uint64 address;
1789 	if (!evaluate_debug_expression(argv[1], &address, false))
1790 		return 0;
1791 
1792 	Area* area = _AreaForAddress(address);
1793 
1794 	MetaChunk* metaChunk;
1795 	if ((addr_t)address >= (addr_t)area->metaChunks
1796 		&& (addr_t)address
1797 			< (addr_t)(area->metaChunks + SLAB_META_CHUNKS_PER_AREA)) {
1798 		metaChunk = (MetaChunk*)(addr_t)address;
1799 	} else {
1800 		metaChunk = area->metaChunks
1801 			+ (address % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE;
1802 	}
1803 
1804 	_DumpMetaChunk(metaChunk, true, true);
1805 
1806 	return 0;
1807 }
1808 
1809 
1810 /*static*/ void
1811 MemoryManager::_DumpMetaChunks(const char* name, MetaChunkList& metaChunkList,
1812 	bool printChunks)
1813 {
1814 	kprintf("%s:\n", name);
1815 
1816 	for (MetaChunkList::Iterator it = metaChunkList.GetIterator();
1817 			MetaChunk* metaChunk = it.Next();) {
1818 		_DumpMetaChunk(metaChunk, printChunks, false);
1819 	}
1820 }
1821 
1822 
1823 /*static*/ int
1824 MemoryManager::_DumpMetaChunks(int argc, char** argv)
1825 {
1826 	bool printChunks = argc > 1 && strcmp(argv[1], "-c") == 0;
1827 
1828 	_PrintMetaChunkTableHeader(printChunks);
1829 	_DumpMetaChunks("free complete", sFreeCompleteMetaChunks, printChunks);
1830 	_DumpMetaChunks("free short", sFreeShortMetaChunks, printChunks);
1831 	_DumpMetaChunks("partial small", sPartialMetaChunksSmall, printChunks);
1832 	_DumpMetaChunks("partial medium", sPartialMetaChunksMedium, printChunks);
1833 
1834 	return 0;
1835 }
1836 
1837 
1838 /*static*/ int
1839 MemoryManager::_DumpArea(int argc, char** argv)
1840 {
1841 	bool printChunks = false;
1842 
1843 	int argi = 1;
1844 	while (argi < argc) {
1845 		if (argv[argi][0] != '-')
1846 			break;
1847 		const char* arg = argv[argi++];
1848 		if (strcmp(arg, "-c") == 0) {
1849 			printChunks = true;
1850 		} else {
1851 			print_debugger_command_usage(argv[0]);
1852 			return 0;
1853 		}
1854 	}
1855 
1856 	if (argi + 1 != argc) {
1857 		print_debugger_command_usage(argv[0]);
1858 		return 0;
1859 	}
1860 
1861 	uint64 address;
1862 	if (!evaluate_debug_expression(argv[argi], &address, false))
1863 		return 0;
1864 
1865 	Area* area = _AreaForAddress((addr_t)address);
1866 
1867 	for (uint32 k = 0; k < SLAB_META_CHUNKS_PER_AREA; k++) {
1868 		MetaChunk* metaChunk = area->metaChunks + k;
1869 		_DumpMetaChunk(metaChunk, printChunks, k == 0);
1870 	}
1871 
1872 	return 0;
1873 }
1874 
1875 
1876 /*static*/ int
1877 MemoryManager::_DumpAreas(int argc, char** argv)
1878 {
1879 	kprintf("      base        area   meta      small   medium  large\n");
1880 
1881 	size_t totalTotalSmall = 0;
1882 	size_t totalUsedSmall = 0;
1883 	size_t totalTotalMedium = 0;
1884 	size_t totalUsedMedium = 0;
1885 	size_t totalUsedLarge = 0;
1886 	uint32 areaCount = 0;
1887 
1888 	for (AreaTable::Iterator it = sAreaTable.GetIterator();
1889 			Area* area = it.Next();) {
1890 		areaCount++;
1891 
1892 		// sum up the free/used counts for the chunk sizes
1893 		int totalSmall = 0;
1894 		int usedSmall = 0;
1895 		int totalMedium = 0;
1896 		int usedMedium = 0;
1897 		int usedLarge = 0;
1898 
1899 		for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA; i++) {
1900 			MetaChunk* metaChunk = area->metaChunks + i;
1901 			if (metaChunk->chunkSize == 0)
1902 				continue;
1903 
1904 			switch (metaChunk->chunkSize) {
1905 				case SLAB_CHUNK_SIZE_SMALL:
1906 					totalSmall += metaChunk->chunkCount;
1907 					usedSmall += metaChunk->usedChunkCount;
1908 					break;
1909 				case SLAB_CHUNK_SIZE_MEDIUM:
1910 					totalMedium += metaChunk->chunkCount;
1911 					usedMedium += metaChunk->usedChunkCount;
1912 					break;
1913 				case SLAB_CHUNK_SIZE_LARGE:
1914 					usedLarge += metaChunk->usedChunkCount;
1915 					break;
1916 			}
1917 		}
1918 
1919 		kprintf("%p  %p  %2u/%2u  %4d/%4d  %3d/%3d  %5d\n",
1920 			area, area->vmArea, area->usedMetaChunkCount,
1921 			SLAB_META_CHUNKS_PER_AREA, usedSmall, totalSmall, usedMedium,
1922 			totalMedium, usedLarge);
1923 
1924 		totalTotalSmall += totalSmall;
1925 		totalUsedSmall += usedSmall;
1926 		totalTotalMedium += totalMedium;
1927 		totalUsedMedium += usedMedium;
1928 		totalUsedLarge += usedLarge;
1929 	}
1930 
1931 	kprintf("%d free area%s:\n", sFreeAreaCount,
1932 		sFreeAreaCount == 1 ? "" : "s");
1933 	for (Area* area = sFreeAreas; area != NULL; area = area->next) {
1934 		areaCount++;
1935 		kprintf("%p  %p\n", area, area->vmArea);
1936 	}
1937 
1938 	kprintf("total usage:\n");
1939 	kprintf("  small:    %" B_PRIuSIZE "/%" B_PRIuSIZE "\n", totalUsedSmall,
1940 		totalTotalSmall);
1941 	kprintf("  medium:   %" B_PRIuSIZE "/%" B_PRIuSIZE "\n", totalUsedMedium,
1942 		totalTotalMedium);
1943 	kprintf("  large:    %" B_PRIuSIZE "\n", totalUsedLarge);
1944 	kprintf("  memory:   %" B_PRIuSIZE "/%" B_PRIuSIZE " KB\n",
1945 		(totalUsedSmall * SLAB_CHUNK_SIZE_SMALL
1946 			+ totalUsedMedium * SLAB_CHUNK_SIZE_MEDIUM
1947 			+ totalUsedLarge * SLAB_CHUNK_SIZE_LARGE) / 1024,
1948 		areaCount * SLAB_AREA_SIZE / 1024);
1949 	kprintf("  overhead: %" B_PRIuSIZE " KB\n",
1950 		areaCount * kAreaAdminSize / 1024);
1951 
1952 	return 0;
1953 }
1954