xref: /haiku/src/system/kernel/slab/MemoryManager.cpp (revision 02354704729d38c3b078c696adc1bbbd33cbcf72)
1 /*
2  * Copyright 2010, Ingo Weinhold <ingo_weinhold@gmx.de>.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include "MemoryManager.h"
8 
9 #include <algorithm>
10 
11 #include <debug.h>
12 #include <tracing.h>
13 #include <util/AutoLock.h>
14 #include <vm/vm.h>
15 #include <vm/vm_page.h>
16 #include <vm/vm_priv.h>
17 #include <vm/VMAddressSpace.h>
18 #include <vm/VMArea.h>
19 #include <vm/VMCache.h>
20 #include <vm/VMTranslationMap.h>
21 
22 #include "kernel_debug_config.h"
23 
24 #include "ObjectCache.h"
25 
26 
27 //#define TRACE_MEMORY_MANAGER
28 #ifdef TRACE_MEMORY_MANAGER
29 #	define TRACE(x...)	dprintf(x)
30 #else
31 #	define TRACE(x...)	do {} while (false)
32 #endif
33 
34 #if DEBUG_SLAB_MEMORY_MANAGER_PARANOID_CHECKS
35 #	define PARANOID_CHECKS_ONLY(x)	x
36 #else
37 #	define PARANOID_CHECKS_ONLY(x)
38 #endif
39 
40 
41 static const char* const kSlabAreaName = "slab area";
42 
43 static void* sAreaTableBuffer[1024];
44 
45 mutex MemoryManager::sLock;
46 rw_lock MemoryManager::sAreaTableLock;
47 kernel_args* MemoryManager::sKernelArgs;
48 MemoryManager::AreaTable MemoryManager::sAreaTable;
49 MemoryManager::Area* MemoryManager::sFreeAreas;
50 int MemoryManager::sFreeAreaCount;
51 MemoryManager::MetaChunkList MemoryManager::sFreeCompleteMetaChunks;
52 MemoryManager::MetaChunkList MemoryManager::sFreeShortMetaChunks;
53 MemoryManager::MetaChunkList MemoryManager::sPartialMetaChunksSmall;
54 MemoryManager::MetaChunkList MemoryManager::sPartialMetaChunksMedium;
55 MemoryManager::AllocationEntry* MemoryManager::sAllocationEntryCanWait;
56 MemoryManager::AllocationEntry* MemoryManager::sAllocationEntryDontWait;
57 bool MemoryManager::sMaintenanceNeeded;
58 
59 
60 RANGE_MARKER_FUNCTION_BEGIN(SlabMemoryManager)
61 
62 
63 // #pragma mark - kernel tracing
64 
65 
66 #if SLAB_MEMORY_MANAGER_TRACING
67 
68 
69 //namespace SlabMemoryManagerCacheTracing {
70 struct MemoryManager::Tracing {
71 
72 class MemoryManagerTraceEntry
73 	: public TRACE_ENTRY_SELECTOR(SLAB_MEMORY_MANAGER_TRACING_STACK_TRACE) {
74 public:
75 	MemoryManagerTraceEntry()
76 		:
77 		TraceEntryBase(SLAB_MEMORY_MANAGER_TRACING_STACK_TRACE, 0, true)
78 	{
79 	}
80 };
81 
82 
83 class Allocate : public MemoryManagerTraceEntry {
84 public:
85 	Allocate(ObjectCache* cache, uint32 flags)
86 		:
87 		MemoryManagerTraceEntry(),
88 		fCache(cache),
89 		fFlags(flags)
90 	{
91 		Initialized();
92 	}
93 
94 	virtual void AddDump(TraceOutput& out)
95 	{
96 		out.Print("slab memory manager alloc: cache: %p, flags: %#" B_PRIx32,
97 			fCache, fFlags);
98 	}
99 
100 private:
101 	ObjectCache*	fCache;
102 	uint32			fFlags;
103 };
104 
105 
106 class Free : public MemoryManagerTraceEntry {
107 public:
108 	Free(void* address, uint32 flags)
109 		:
110 		MemoryManagerTraceEntry(),
111 		fAddress(address),
112 		fFlags(flags)
113 	{
114 		Initialized();
115 	}
116 
117 	virtual void AddDump(TraceOutput& out)
118 	{
119 		out.Print("slab memory manager free: address: %p, flags: %#" B_PRIx32,
120 			fAddress, fFlags);
121 	}
122 
123 private:
124 	void*	fAddress;
125 	uint32	fFlags;
126 };
127 
128 
129 class AllocateRaw : public MemoryManagerTraceEntry {
130 public:
131 	AllocateRaw(size_t size, uint32 flags)
132 		:
133 		MemoryManagerTraceEntry(),
134 		fSize(size),
135 		fFlags(flags)
136 	{
137 		Initialized();
138 	}
139 
140 	virtual void AddDump(TraceOutput& out)
141 	{
142 		out.Print("slab memory manager alloc raw: size: %" B_PRIuSIZE
143 			", flags: %#" B_PRIx32, fSize, fFlags);
144 	}
145 
146 private:
147 	size_t	fSize;
148 	uint32	fFlags;
149 };
150 
151 
152 class FreeRawOrReturnCache : public MemoryManagerTraceEntry {
153 public:
154 	FreeRawOrReturnCache(void* address, uint32 flags)
155 		:
156 		MemoryManagerTraceEntry(),
157 		fAddress(address),
158 		fFlags(flags)
159 	{
160 		Initialized();
161 	}
162 
163 	virtual void AddDump(TraceOutput& out)
164 	{
165 		out.Print("slab memory manager free raw/return: address: %p, flags: %#"
166 			B_PRIx32, fAddress, fFlags);
167 	}
168 
169 private:
170 	void*	fAddress;
171 	uint32	fFlags;
172 };
173 
174 
175 class AllocateArea : public MemoryManagerTraceEntry {
176 public:
177 	AllocateArea(Area* area, uint32 flags)
178 		:
179 		MemoryManagerTraceEntry(),
180 		fArea(area),
181 		fFlags(flags)
182 	{
183 		Initialized();
184 	}
185 
186 	virtual void AddDump(TraceOutput& out)
187 	{
188 		out.Print("slab memory manager alloc area: flags: %#" B_PRIx32
189 			" -> %p", fFlags, fArea);
190 	}
191 
192 private:
193 	Area*	fArea;
194 	uint32	fFlags;
195 };
196 
197 
198 class AddArea : public MemoryManagerTraceEntry {
199 public:
200 	AddArea(Area* area)
201 		:
202 		MemoryManagerTraceEntry(),
203 		fArea(area)
204 	{
205 		Initialized();
206 	}
207 
208 	virtual void AddDump(TraceOutput& out)
209 	{
210 		out.Print("slab memory manager add area: %p", fArea);
211 	}
212 
213 private:
214 	Area*	fArea;
215 };
216 
217 
218 class FreeArea : public MemoryManagerTraceEntry {
219 public:
220 	FreeArea(Area* area, bool areaRemoved, uint32 flags)
221 		:
222 		MemoryManagerTraceEntry(),
223 		fArea(area),
224 		fFlags(flags),
225 		fRemoved(areaRemoved)
226 	{
227 		Initialized();
228 	}
229 
230 	virtual void AddDump(TraceOutput& out)
231 	{
232 		out.Print("slab memory manager free area: %p%s, flags: %#" B_PRIx32,
233 			fArea, fRemoved ? " (removed)" : "", fFlags);
234 	}
235 
236 private:
237 	Area*	fArea;
238 	uint32	fFlags;
239 	bool	fRemoved;
240 };
241 
242 
243 class AllocateMetaChunk : public MemoryManagerTraceEntry {
244 public:
245 	AllocateMetaChunk(MetaChunk* metaChunk)
246 		:
247 		MemoryManagerTraceEntry(),
248 		fMetaChunk(metaChunk->chunkBase)
249 	{
250 		Initialized();
251 	}
252 
253 	virtual void AddDump(TraceOutput& out)
254 	{
255 		out.Print("slab memory manager alloc meta chunk: %#" B_PRIxADDR,
256 			fMetaChunk);
257 	}
258 
259 private:
260 	addr_t	fMetaChunk;
261 };
262 
263 
264 class FreeMetaChunk : public MemoryManagerTraceEntry {
265 public:
266 	FreeMetaChunk(MetaChunk* metaChunk)
267 		:
268 		MemoryManagerTraceEntry(),
269 		fMetaChunk(metaChunk->chunkBase)
270 	{
271 		Initialized();
272 	}
273 
274 	virtual void AddDump(TraceOutput& out)
275 	{
276 		out.Print("slab memory manager free meta chunk: %#" B_PRIxADDR,
277 			fMetaChunk);
278 	}
279 
280 private:
281 	addr_t	fMetaChunk;
282 };
283 
284 
285 class AllocateChunk : public MemoryManagerTraceEntry {
286 public:
287 	AllocateChunk(size_t chunkSize, MetaChunk* metaChunk, Chunk* chunk)
288 		:
289 		MemoryManagerTraceEntry(),
290 		fChunkSize(chunkSize),
291 		fMetaChunk(metaChunk->chunkBase),
292 		fChunk(chunk - metaChunk->chunks)
293 	{
294 		Initialized();
295 	}
296 
297 	virtual void AddDump(TraceOutput& out)
298 	{
299 		out.Print("slab memory manager alloc chunk: size: %" B_PRIuSIZE
300 			" -> meta chunk: %#" B_PRIxADDR ", chunk: %" B_PRIu32, fChunkSize,
301 			fMetaChunk, fChunk);
302 	}
303 
304 private:
305 	size_t	fChunkSize;
306 	addr_t	fMetaChunk;
307 	uint32	fChunk;
308 };
309 
310 
311 class AllocateChunks : public MemoryManagerTraceEntry {
312 public:
313 	AllocateChunks(size_t chunkSize, uint32 chunkCount, MetaChunk* metaChunk,
314 		Chunk* chunk)
315 		:
316 		MemoryManagerTraceEntry(),
317 		fMetaChunk(metaChunk->chunkBase),
318 		fChunkSize(chunkSize),
319 		fChunkCount(chunkCount),
320 		fChunk(chunk - metaChunk->chunks)
321 	{
322 		Initialized();
323 	}
324 
325 	virtual void AddDump(TraceOutput& out)
326 	{
327 		out.Print("slab memory manager alloc chunks: size: %" B_PRIuSIZE
328 			", count %" B_PRIu32 " -> meta chunk: %#" B_PRIxADDR ", chunk: %"
329 			B_PRIu32, fChunkSize, fChunkCount, fMetaChunk, fChunk);
330 	}
331 
332 private:
333 	addr_t	fMetaChunk;
334 	size_t	fChunkSize;
335 	uint32	fChunkCount;
336 	uint32	fChunk;
337 };
338 
339 
340 class FreeChunk : public MemoryManagerTraceEntry {
341 public:
342 	FreeChunk(MetaChunk* metaChunk, Chunk* chunk)
343 		:
344 		MemoryManagerTraceEntry(),
345 		fMetaChunk(metaChunk->chunkBase),
346 		fChunk(chunk - metaChunk->chunks)
347 	{
348 		Initialized();
349 	}
350 
351 	virtual void AddDump(TraceOutput& out)
352 	{
353 		out.Print("slab memory manager free chunk: meta chunk: %#" B_PRIxADDR
354 			", chunk: %" B_PRIu32, fMetaChunk, fChunk);
355 	}
356 
357 private:
358 	addr_t	fMetaChunk;
359 	uint32	fChunk;
360 };
361 
362 
363 class Map : public MemoryManagerTraceEntry {
364 public:
365 	Map(addr_t address, size_t size, uint32 flags)
366 		:
367 		MemoryManagerTraceEntry(),
368 		fAddress(address),
369 		fSize(size),
370 		fFlags(flags)
371 	{
372 		Initialized();
373 	}
374 
375 	virtual void AddDump(TraceOutput& out)
376 	{
377 		out.Print("slab memory manager map: %#" B_PRIxADDR ", size: %"
378 			B_PRIuSIZE ", flags: %#" B_PRIx32, fAddress, fSize, fFlags);
379 	}
380 
381 private:
382 	addr_t	fAddress;
383 	size_t	fSize;
384 	uint32	fFlags;
385 };
386 
387 
388 class Unmap : public MemoryManagerTraceEntry {
389 public:
390 	Unmap(addr_t address, size_t size, uint32 flags)
391 		:
392 		MemoryManagerTraceEntry(),
393 		fAddress(address),
394 		fSize(size),
395 		fFlags(flags)
396 	{
397 		Initialized();
398 	}
399 
400 	virtual void AddDump(TraceOutput& out)
401 	{
402 		out.Print("slab memory manager unmap: %#" B_PRIxADDR ", size: %"
403 			B_PRIuSIZE ", flags: %#" B_PRIx32, fAddress, fSize, fFlags);
404 	}
405 
406 private:
407 	addr_t	fAddress;
408 	size_t	fSize;
409 	uint32	fFlags;
410 };
411 
412 
413 //}	// namespace SlabMemoryManagerCacheTracing
414 };	// struct MemoryManager::Tracing
415 
416 
417 //#	define T(x)	new(std::nothrow) SlabMemoryManagerCacheTracing::x
418 #	define T(x)	new(std::nothrow) MemoryManager::Tracing::x
419 
420 #else
421 #	define T(x)
422 #endif	// SLAB_MEMORY_MANAGER_TRACING
423 
424 
425 // #pragma mark - MemoryManager
426 
427 
428 /*static*/ void
429 MemoryManager::Init(kernel_args* args)
430 {
431 	mutex_init(&sLock, "slab memory manager");
432 	rw_lock_init(&sAreaTableLock, "slab memory manager area table");
433 	sKernelArgs = args;
434 
435 	new(&sFreeCompleteMetaChunks) MetaChunkList;
436 	new(&sFreeShortMetaChunks) MetaChunkList;
437 	new(&sPartialMetaChunksSmall) MetaChunkList;
438 	new(&sPartialMetaChunksMedium) MetaChunkList;
439 
440 	new(&sAreaTable) AreaTable;
441 	sAreaTable.Resize(sAreaTableBuffer, sizeof(sAreaTableBuffer), true);
442 		// A bit hacky: The table now owns the memory. Since we never resize or
443 		// free it, that's not a problem, though.
444 
445 	sFreeAreas = NULL;
446 	sFreeAreaCount = 0;
447 	sMaintenanceNeeded = false;
448 }
449 
450 
451 /*static*/ void
452 MemoryManager::InitPostArea()
453 {
454 	sKernelArgs = NULL;
455 
456 	// Convert all areas to actual areas. This loop might look a bit weird, but
457 	// is necessary since creating the actual area involves memory allocations,
458 	// which in turn can change the situation.
459 	bool done;
460 	do {
461 		done = true;
462 
463 		for (AreaTable::Iterator it = sAreaTable.GetIterator();
464 				Area* area = it.Next();) {
465 			if (area->vmArea == NULL) {
466 				_ConvertEarlyArea(area);
467 				done = false;
468 				break;
469 			}
470 		}
471 	} while (!done);
472 
473 	// unmap and free unused pages
474 	if (sFreeAreas != NULL) {
475 		// Just "leak" all but the first of the free areas -- the VM will
476 		// automatically free all unclaimed memory.
477 		sFreeAreas->next = NULL;
478 		sFreeAreaCount = 1;
479 
480 		Area* area = sFreeAreas;
481 		_ConvertEarlyArea(area);
482 		_UnmapFreeChunksEarly(area);
483 	}
484 
485 	for (AreaTable::Iterator it = sAreaTable.GetIterator();
486 			Area* area = it.Next();) {
487 		_UnmapFreeChunksEarly(area);
488 	}
489 
490 	sMaintenanceNeeded = true;
491 		// might not be necessary, but doesn't harm
492 
493 	add_debugger_command_etc("slab_area", &_DumpArea,
494 		"Dump information on a given slab area",
495 		"[ -c ] <area>\n"
496 		"Dump information on a given slab area specified by its base "
497 			"address.\n"
498 		"If \"-c\" is given, the chunks of all meta chunks area printed as "
499 			"well.\n", 0);
500 	add_debugger_command_etc("slab_areas", &_DumpAreas,
501 		"List all slab areas",
502 		"\n"
503 		"Lists all slab areas.\n", 0);
504 	add_debugger_command_etc("slab_meta_chunk", &_DumpMetaChunk,
505 		"Dump information on a given slab meta chunk",
506 		"<meta chunk>\n"
507 		"Dump information on a given slab meta chunk specified by its base "
508 			"or object address.\n", 0);
509 	add_debugger_command_etc("slab_meta_chunks", &_DumpMetaChunks,
510 		"List all non-full slab meta chunks",
511 		"[ -c ]\n"
512 		"Lists all non-full slab meta chunks.\n"
513 		"If \"-c\" is given, the chunks of all meta chunks area printed as "
514 			"well.\n", 0);
515 	add_debugger_command_etc("slab_raw_allocations", &_DumpRawAllocations,
516 		"List all raw allocations in slab areas",
517 		"\n"
518 		"Lists all raw allocations in slab areas.\n", 0);
519 }
520 
521 
522 /*static*/ status_t
523 MemoryManager::Allocate(ObjectCache* cache, uint32 flags, void*& _pages)
524 {
525 	// TODO: Support CACHE_UNLOCKED_PAGES!
526 
527 	T(Allocate(cache, flags));
528 
529 	size_t chunkSize = cache->slab_size;
530 
531 	TRACE("MemoryManager::Allocate(%p, %#" B_PRIx32 "): chunkSize: %"
532 		B_PRIuSIZE "\n", cache, flags, chunkSize);
533 
534 	MutexLocker locker(sLock);
535 
536 	// allocate a chunk
537 	MetaChunk* metaChunk;
538 	Chunk* chunk;
539 	status_t error = _AllocateChunks(chunkSize, 1, flags, metaChunk, chunk);
540 	if (error != B_OK)
541 		return error;
542 
543 	// map the chunk
544 	Area* area = metaChunk->GetArea();
545 	addr_t chunkAddress = _ChunkAddress(metaChunk, chunk);
546 
547 	locker.Unlock();
548 	error = _MapChunk(area->vmArea, chunkAddress, chunkSize, 0, flags);
549 	locker.Lock();
550 	if (error != B_OK) {
551 		// something failed -- free the chunk
552 		_FreeChunk(area, metaChunk, chunk, chunkAddress, true, flags);
553 		return error;
554 	}
555 
556 	chunk->reference = (addr_t)cache;
557 	_pages = (void*)chunkAddress;
558 
559 	TRACE("MemoryManager::Allocate() done: %p (meta chunk: %d, chunk %d)\n",
560 		_pages, int(metaChunk - area->metaChunks),
561 		int(chunk - metaChunk->chunks));
562 	return B_OK;
563 }
564 
565 
566 /*static*/ void
567 MemoryManager::Free(void* pages, uint32 flags)
568 {
569 	TRACE("MemoryManager::Free(%p, %#" B_PRIx32 ")\n", pages, flags);
570 
571 	T(Free(pages, flags));
572 
573 	// get the area and the meta chunk
574 	Area* area = _AreaForAddress((addr_t)pages);
575 	MetaChunk* metaChunk = &area->metaChunks[
576 		((addr_t)pages % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE];
577 
578 	ASSERT(metaChunk->chunkSize > 0);
579 	ASSERT((addr_t)pages >= metaChunk->chunkBase);
580 	ASSERT(((addr_t)pages % metaChunk->chunkSize) == 0);
581 
582 	// get the chunk
583 	uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)pages);
584 	Chunk* chunk = &metaChunk->chunks[chunkIndex];
585 
586 	ASSERT(chunk->next != NULL);
587 	ASSERT(chunk->next < metaChunk->chunks
588 		|| chunk->next
589 			>= metaChunk->chunks + SLAB_SMALL_CHUNKS_PER_META_CHUNK);
590 
591 	// and free it
592 	MutexLocker locker(sLock);
593 	_FreeChunk(area, metaChunk, chunk, (addr_t)pages, false, flags);
594 }
595 
596 
597 /*static*/ status_t
598 MemoryManager::AllocateRaw(size_t size, uint32 flags, void*& _pages)
599 {
600 #if SLAB_MEMORY_MANAGER_TRACING
601 #if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
602 	AbstractTraceEntryWithStackTrace* traceEntry = T(AllocateRaw(size, flags));
603 	size += sizeof(AllocationTrackingInfo);
604 #else
605 	T(AllocateRaw(size, flags));
606 #endif
607 #endif
608 
609 	size = ROUNDUP(size, SLAB_CHUNK_SIZE_SMALL);
610 
611 	TRACE("MemoryManager::AllocateRaw(%" B_PRIuSIZE ", %#" B_PRIx32 ")\n", size,
612 		  flags);
613 
614 	if (size > SLAB_CHUNK_SIZE_LARGE || (flags & CACHE_ALIGN_ON_SIZE) != 0) {
615 		// Requested size greater than a large chunk or an aligned allocation.
616 		// Allocate as an area.
617 		if ((flags & CACHE_DONT_LOCK_KERNEL_SPACE) != 0)
618 			return B_WOULD_BLOCK;
619 
620 		virtual_address_restrictions virtualRestrictions = {};
621 		virtualRestrictions.address_specification
622 			= (flags & CACHE_ALIGN_ON_SIZE) != 0
623 				? B_ANY_KERNEL_BLOCK_ADDRESS : B_ANY_KERNEL_ADDRESS;
624 		physical_address_restrictions physicalRestrictions = {};
625 		area_id area = create_area_etc(VMAddressSpace::KernelID(),
626 			"slab large raw allocation", size, B_FULL_LOCK,
627 			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
628 			((flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0
629 					? CREATE_AREA_DONT_WAIT : 0)
630 				| CREATE_AREA_DONT_CLEAR, 0,
631 			&virtualRestrictions, &physicalRestrictions, &_pages);
632 
633 		status_t result = area >= 0 ? B_OK : area;
634 		if (result == B_OK) {
635 			fill_allocated_block(_pages, size);
636 #if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
637 			_AddTrackingInfo(_pages, size, traceEntry);
638 #endif
639 		}
640 
641 		return result;
642 	}
643 
644 	// determine chunk size (small or medium)
645 	size_t chunkSize = SLAB_CHUNK_SIZE_SMALL;
646 	uint32 chunkCount = size / SLAB_CHUNK_SIZE_SMALL;
647 
648 	if (size % SLAB_CHUNK_SIZE_MEDIUM == 0) {
649 		chunkSize = SLAB_CHUNK_SIZE_MEDIUM;
650 		chunkCount = size / SLAB_CHUNK_SIZE_MEDIUM;
651 	}
652 
653 	MutexLocker locker(sLock);
654 
655 	// allocate the chunks
656 	MetaChunk* metaChunk;
657 	Chunk* chunk;
658 	status_t error = _AllocateChunks(chunkSize, chunkCount, flags, metaChunk,
659 		chunk);
660 	if (error != B_OK)
661 		return error;
662 
663 	// map the chunks
664 	Area* area = metaChunk->GetArea();
665 	addr_t chunkAddress = _ChunkAddress(metaChunk, chunk);
666 
667 	locker.Unlock();
668 	error = _MapChunk(area->vmArea, chunkAddress, size, 0, flags);
669 	locker.Lock();
670 	if (error != B_OK) {
671 		// something failed -- free the chunks
672 		for (uint32 i = 0; i < chunkCount; i++)
673 			_FreeChunk(area, metaChunk, chunk + i, chunkAddress, true, flags);
674 		return error;
675 	}
676 
677 	chunk->reference = (addr_t)chunkAddress + size - 1;
678 	_pages = (void*)chunkAddress;
679 
680 	fill_allocated_block(_pages, size);
681 #if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
682 	_AddTrackingInfo(_pages, size, traceEntry);
683 #endif
684 
685 	TRACE("MemoryManager::AllocateRaw() done: %p (meta chunk: %d, chunk %d)\n",
686 		_pages, int(metaChunk - area->metaChunks),
687 		int(chunk - metaChunk->chunks));
688 	return B_OK;
689 }
690 
691 
692 /*static*/ ObjectCache*
693 MemoryManager::FreeRawOrReturnCache(void* pages, uint32 flags)
694 {
695 	TRACE("MemoryManager::FreeRawOrReturnCache(%p, %#" B_PRIx32 ")\n", pages,
696 		flags);
697 
698 	T(FreeRawOrReturnCache(pages, flags));
699 
700 	// get the area
701 	addr_t areaBase = _AreaBaseAddressForAddress((addr_t)pages);
702 
703 	ReadLocker readLocker(sAreaTableLock);
704 	Area* area = sAreaTable.Lookup(areaBase);
705 	readLocker.Unlock();
706 
707 	if (area == NULL) {
708 		// Probably a large allocation.
709 		if ((flags & CACHE_DONT_LOCK_KERNEL_SPACE) != 0) {
710 			// We cannot delete areas without locking the kernel address space,
711 			// so defer the free until we can do that.
712 			deferred_free(pages);
713 			return NULL;
714 		}
715 
716 		VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
717 		addressSpace->ReadLock();
718 		VMArea* area = addressSpace->LookupArea((addr_t)pages);
719 		addressSpace->ReadUnlock();
720 
721 		if (area != NULL && (addr_t)pages == area->Base())
722 			delete_area(area->id);
723 		else
724 			panic("freeing unknown block %p from area %p", pages, area);
725 
726 		return NULL;
727 	}
728 
729 	MetaChunk* metaChunk = &area->metaChunks[
730 		((addr_t)pages % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE];
731 
732 	// get the chunk
733 	ASSERT(metaChunk->chunkSize > 0);
734 	ASSERT((addr_t)pages >= metaChunk->chunkBase);
735 	uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)pages);
736 	Chunk* chunk = &metaChunk->chunks[chunkIndex];
737 
738 	addr_t reference = chunk->reference;
739 	if ((reference & 1) == 0)
740 		return (ObjectCache*)reference;
741 
742 	// Seems we have a raw chunk allocation.
743 	ASSERT((addr_t)pages == _ChunkAddress(metaChunk, chunk));
744 	ASSERT(reference > (addr_t)pages);
745 	ASSERT(reference <= areaBase + SLAB_AREA_SIZE - 1);
746 	size_t size = reference - (addr_t)pages + 1;
747 	ASSERT((size % SLAB_CHUNK_SIZE_SMALL) == 0);
748 
749 	// Verify we can actually lock the kernel space before going further.
750 	if ((flags & CACHE_DONT_LOCK_KERNEL_SPACE) != 0) {
751 		deferred_free(pages);
752 		return NULL;
753 	}
754 
755 	// unmap the chunks
756 	_UnmapChunk(area->vmArea, (addr_t)pages, size, flags);
757 
758 	// and free them
759 	MutexLocker locker(sLock);
760 	uint32 chunkCount = size / metaChunk->chunkSize;
761 	for (uint32 i = 0; i < chunkCount; i++)
762 		_FreeChunk(area, metaChunk, chunk + i, (addr_t)pages, true, flags);
763 
764 	return NULL;
765 }
766 
767 
768 /*static*/ size_t
769 MemoryManager::AcceptableChunkSize(size_t size)
770 {
771 	if (size <= SLAB_CHUNK_SIZE_SMALL)
772 		return SLAB_CHUNK_SIZE_SMALL;
773 	if (size <= SLAB_CHUNK_SIZE_MEDIUM)
774 		return SLAB_CHUNK_SIZE_MEDIUM;
775 	return SLAB_CHUNK_SIZE_LARGE;
776 }
777 
778 
779 /*static*/ ObjectCache*
780 MemoryManager::GetAllocationInfo(void* address, size_t& _size)
781 {
782 	// get the area
783 	ReadLocker readLocker(sAreaTableLock);
784 	Area* area = sAreaTable.Lookup(_AreaBaseAddressForAddress((addr_t)address));
785 	readLocker.Unlock();
786 
787 	if (area == NULL) {
788 		VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
789 		addressSpace->ReadLock();
790 		VMArea* area = addressSpace->LookupArea((addr_t)address);
791 		if (area != NULL && (addr_t)address == area->Base())
792 			_size = area->Size();
793 		else
794 			_size = 0;
795 		addressSpace->ReadUnlock();
796 
797 		return NULL;
798 	}
799 
800 	MetaChunk* metaChunk = &area->metaChunks[
801 		((addr_t)address % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE];
802 
803 	// get the chunk
804 	ASSERT(metaChunk->chunkSize > 0);
805 	ASSERT((addr_t)address >= metaChunk->chunkBase);
806 	uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)address);
807 
808 	addr_t reference = metaChunk->chunks[chunkIndex].reference;
809 	if ((reference & 1) == 0) {
810 		ObjectCache* cache = (ObjectCache*)reference;
811 		_size = cache->object_size;
812 		return cache;
813 	}
814 
815 	_size = reference - (addr_t)address + 1;
816 	return NULL;
817 }
818 
819 
820 /*static*/ ObjectCache*
821 MemoryManager::CacheForAddress(void* address)
822 {
823 	// get the area
824 	ReadLocker readLocker(sAreaTableLock);
825 	Area* area = sAreaTable.Lookup(_AreaBaseAddressForAddress((addr_t)address));
826 	readLocker.Unlock();
827 
828 	if (area == NULL)
829 		return NULL;
830 
831 	MetaChunk* metaChunk = &area->metaChunks[
832 		((addr_t)address % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE];
833 
834 	// get the chunk
835 	ASSERT(metaChunk->chunkSize > 0);
836 	ASSERT((addr_t)address >= metaChunk->chunkBase);
837 	uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)address);
838 
839 	addr_t reference = metaChunk->chunks[chunkIndex].reference;
840 	return (reference & 1) == 0 ? (ObjectCache*)reference : NULL;
841 }
842 
843 
844 /*static*/ void
845 MemoryManager::PerformMaintenance()
846 {
847 	MutexLocker locker(sLock);
848 
849 	while (sMaintenanceNeeded) {
850 		sMaintenanceNeeded = false;
851 
852 		// We want to keep one or two areas as a reserve. This way we have at
853 		// least one area to use in situations when we aren't allowed to
854 		// allocate one and also avoid ping-pong effects.
855 		if (sFreeAreaCount > 0 && sFreeAreaCount <= 2)
856 			return;
857 
858 		if (sFreeAreaCount == 0) {
859 			// try to allocate one
860 			Area* area;
861 			if (_AllocateArea(0, area) != B_OK)
862 				return;
863 
864 			_PushFreeArea(area);
865 			if (sFreeAreaCount > 2)
866 				sMaintenanceNeeded = true;
867 		} else {
868 			// free until we only have two free ones
869 			while (sFreeAreaCount > 2)
870 				_FreeArea(_PopFreeArea(), true, 0);
871 
872 			if (sFreeAreaCount == 0)
873 				sMaintenanceNeeded = true;
874 		}
875 	}
876 }
877 
878 
879 #if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
880 
881 /*static*/ bool
882 MemoryManager::AnalyzeAllocationCallers(AllocationTrackingCallback& callback)
883 {
884 	for (AreaTable::Iterator it = sAreaTable.GetIterator();
885 			Area* area = it.Next();) {
886 		for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA; i++) {
887 			MetaChunk* metaChunk = area->metaChunks + i;
888 			if (metaChunk->chunkSize == 0)
889 				continue;
890 
891 			for (uint32 k = 0; k < metaChunk->chunkCount; k++) {
892 				Chunk* chunk = metaChunk->chunks + k;
893 
894 				// skip free chunks
895 				if (_IsChunkFree(metaChunk, chunk))
896 					continue;
897 
898 				addr_t reference = chunk->reference;
899 				if ((reference & 1) == 0 || reference == 1)
900 					continue;
901 
902 				addr_t chunkAddress = _ChunkAddress(metaChunk, chunk);
903 				size_t size = reference - chunkAddress + 1;
904 
905 				if (!callback.ProcessTrackingInfo(
906 						_TrackingInfoFor((void*)chunkAddress, size),
907 						(void*)chunkAddress, size)) {
908 					return false;
909 				}
910 			}
911 		}
912 	}
913 
914 	return true;
915 }
916 
917 #endif	// SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
918 
919 
920 /*static*/ ObjectCache*
921 MemoryManager::DebugObjectCacheForAddress(void* address)
922 {
923 	// get the area
924 	addr_t areaBase = _AreaBaseAddressForAddress((addr_t)address);
925 	Area* area = sAreaTable.Lookup(areaBase);
926 
927 	if (area == NULL)
928 		return NULL;
929 
930 	MetaChunk* metaChunk = &area->metaChunks[
931 		((addr_t)address % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE];
932 
933 	// get the chunk
934 	if (metaChunk->chunkSize == 0)
935 		return NULL;
936 	if ((addr_t)address < metaChunk->chunkBase)
937 		return NULL;
938 
939 	uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)address);
940 	Chunk* chunk = &metaChunk->chunks[chunkIndex];
941 
942 	addr_t reference = chunk->reference;
943 	if ((reference & 1) == 0)
944 		return (ObjectCache*)reference;
945 
946 	return NULL;
947 }
948 
949 
950 /*static*/ status_t
951 MemoryManager::_AllocateChunks(size_t chunkSize, uint32 chunkCount,
952 	uint32 flags, MetaChunk*& _metaChunk, Chunk*& _chunk)
953 {
954 	MetaChunkList* metaChunkList = NULL;
955 	if (chunkSize == SLAB_CHUNK_SIZE_SMALL) {
956 		metaChunkList = &sPartialMetaChunksSmall;
957 	} else if (chunkSize == SLAB_CHUNK_SIZE_MEDIUM) {
958 		metaChunkList = &sPartialMetaChunksMedium;
959 	} else if (chunkSize != SLAB_CHUNK_SIZE_LARGE) {
960 		panic("MemoryManager::_AllocateChunks(): Unsupported chunk size: %"
961 			B_PRIuSIZE, chunkSize);
962 		return B_BAD_VALUE;
963 	}
964 
965 	if (_GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk, _chunk))
966 		return B_OK;
967 
968 	if (sFreeAreas != NULL) {
969 		_AddArea(_PopFreeArea());
970 		_RequestMaintenance();
971 
972 		return _GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk,
973 			_chunk) ? B_OK : B_NO_MEMORY;
974 	}
975 
976 	if ((flags & CACHE_DONT_LOCK_KERNEL_SPACE) != 0) {
977 		// We can't create an area with this limitation and we must not wait for
978 		// someone else doing that.
979 		return B_WOULD_BLOCK;
980 	}
981 
982 	// We need to allocate a new area. Wait, if someone else is trying to do
983 	// the same.
984 	while (true) {
985 		AllocationEntry* allocationEntry = NULL;
986 		if (sAllocationEntryDontWait != NULL) {
987 			allocationEntry = sAllocationEntryDontWait;
988 		} else if (sAllocationEntryCanWait != NULL
989 				&& (flags & CACHE_DONT_WAIT_FOR_MEMORY) == 0) {
990 			allocationEntry = sAllocationEntryCanWait;
991 		} else
992 			break;
993 
994 		ConditionVariableEntry entry;
995 		allocationEntry->condition.Add(&entry);
996 
997 		mutex_unlock(&sLock);
998 		entry.Wait();
999 		mutex_lock(&sLock);
1000 
1001 		if (_GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk,
1002 				_chunk)) {
1003 			return B_OK;
1004 		}
1005 	}
1006 
1007 	// prepare the allocation entry others can wait on
1008 	AllocationEntry*& allocationEntry
1009 		= (flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0
1010 			? sAllocationEntryDontWait : sAllocationEntryCanWait;
1011 
1012 	AllocationEntry myResizeEntry;
1013 	allocationEntry = &myResizeEntry;
1014 	allocationEntry->condition.Init(metaChunkList, "wait for slab area");
1015 	allocationEntry->thread = find_thread(NULL);
1016 
1017 	Area* area;
1018 	status_t error = _AllocateArea(flags, area);
1019 
1020 	allocationEntry->condition.NotifyAll();
1021 	allocationEntry = NULL;
1022 
1023 	if (error != B_OK)
1024 		return error;
1025 
1026 	// Try again to get a meta chunk. Something might have been freed in the
1027 	// meantime. We can free the area in this case.
1028 	if (_GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk, _chunk)) {
1029 		_FreeArea(area, true, flags);
1030 		return B_OK;
1031 	}
1032 
1033 	_AddArea(area);
1034 	return _GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk,
1035 		_chunk) ? B_OK : B_NO_MEMORY;
1036 }
1037 
1038 
1039 /*static*/ bool
1040 MemoryManager::_GetChunks(MetaChunkList* metaChunkList, size_t chunkSize,
1041 	uint32 chunkCount, MetaChunk*& _metaChunk, Chunk*& _chunk)
1042 {
1043 	// the common and less complicated special case
1044 	if (chunkCount == 1)
1045 		return _GetChunk(metaChunkList, chunkSize, _metaChunk, _chunk);
1046 
1047 	ASSERT(metaChunkList != NULL);
1048 
1049 	// Iterate through the partial meta chunk list and try to find a free
1050 	// range that is large enough.
1051 	MetaChunk* metaChunk = NULL;
1052 	for (MetaChunkList::Iterator it = metaChunkList->GetIterator();
1053 			(metaChunk = it.Next()) != NULL;) {
1054 		if (metaChunk->firstFreeChunk + chunkCount - 1
1055 				<= metaChunk->lastFreeChunk) {
1056 			break;
1057 		}
1058 	}
1059 
1060 	if (metaChunk == NULL) {
1061 		// try to get a free meta chunk
1062 		if ((SLAB_CHUNK_SIZE_LARGE - SLAB_AREA_STRUCT_OFFSET - kAreaAdminSize)
1063 				/ chunkSize >= chunkCount) {
1064 			metaChunk = sFreeShortMetaChunks.RemoveHead();
1065 		}
1066 		if (metaChunk == NULL)
1067 			metaChunk = sFreeCompleteMetaChunks.RemoveHead();
1068 
1069 		if (metaChunk == NULL)
1070 			return false;
1071 
1072 		metaChunkList->Add(metaChunk);
1073 		metaChunk->GetArea()->usedMetaChunkCount++;
1074 		_PrepareMetaChunk(metaChunk, chunkSize);
1075 
1076 		T(AllocateMetaChunk(metaChunk));
1077 	}
1078 
1079 	// pull the chunks out of the free list
1080 	Chunk* firstChunk = metaChunk->chunks + metaChunk->firstFreeChunk;
1081 	Chunk* lastChunk = firstChunk + (chunkCount - 1);
1082 	Chunk** chunkPointer = &metaChunk->freeChunks;
1083 	uint32 remainingChunks = chunkCount;
1084 	while (remainingChunks > 0) {
1085 		ASSERT_PRINT(chunkPointer, "remaining: %" B_PRIu32 "/%" B_PRIu32
1086 			", area: %p, meta chunk: %" B_PRIdSSIZE "\n", remainingChunks,
1087 			chunkCount, metaChunk->GetArea(),
1088 			metaChunk - metaChunk->GetArea()->metaChunks);
1089 		Chunk* chunk = *chunkPointer;
1090 		if (chunk >= firstChunk && chunk <= lastChunk) {
1091 			*chunkPointer = chunk->next;
1092 			chunk->reference = 1;
1093 			remainingChunks--;
1094 		} else
1095 			chunkPointer = &chunk->next;
1096 	}
1097 
1098 	// allocate the chunks
1099 	metaChunk->usedChunkCount += chunkCount;
1100 	if (metaChunk->usedChunkCount == metaChunk->chunkCount) {
1101 		// meta chunk is full now -- remove it from its list
1102 		if (metaChunkList != NULL)
1103 			metaChunkList->Remove(metaChunk);
1104 	}
1105 
1106 	// update the free range
1107 	metaChunk->firstFreeChunk += chunkCount;
1108 
1109 	PARANOID_CHECKS_ONLY(_CheckMetaChunk(metaChunk));
1110 
1111 	_chunk = firstChunk;
1112 	_metaChunk = metaChunk;
1113 
1114 	T(AllocateChunks(chunkSize, chunkCount, metaChunk, firstChunk));
1115 
1116 	return true;
1117 }
1118 
1119 
1120 /*static*/ bool
1121 MemoryManager::_GetChunk(MetaChunkList* metaChunkList, size_t chunkSize,
1122 	MetaChunk*& _metaChunk, Chunk*& _chunk)
1123 {
1124 	MetaChunk* metaChunk = metaChunkList != NULL
1125 		? metaChunkList->Head() : NULL;
1126 	if (metaChunk == NULL) {
1127 		// no partial meta chunk -- maybe there's a free one
1128 		if (chunkSize == SLAB_CHUNK_SIZE_LARGE) {
1129 			metaChunk = sFreeCompleteMetaChunks.RemoveHead();
1130 		} else {
1131 			metaChunk = sFreeShortMetaChunks.RemoveHead();
1132 			if (metaChunk == NULL)
1133 				metaChunk = sFreeCompleteMetaChunks.RemoveHead();
1134 			if (metaChunk != NULL)
1135 				metaChunkList->Add(metaChunk);
1136 		}
1137 
1138 		if (metaChunk == NULL)
1139 			return false;
1140 
1141 		metaChunk->GetArea()->usedMetaChunkCount++;
1142 		_PrepareMetaChunk(metaChunk, chunkSize);
1143 
1144 		T(AllocateMetaChunk(metaChunk));
1145 	}
1146 
1147 	// allocate the chunk
1148 	if (++metaChunk->usedChunkCount == metaChunk->chunkCount) {
1149 		// meta chunk is full now -- remove it from its list
1150 		if (metaChunkList != NULL)
1151 			metaChunkList->Remove(metaChunk);
1152 	}
1153 
1154 	_chunk = _pop(metaChunk->freeChunks);
1155 	_metaChunk = metaChunk;
1156 
1157 	_chunk->reference = 1;
1158 
1159 	// update the free range
1160 	uint32 chunkIndex = _chunk - metaChunk->chunks;
1161 	if (chunkIndex >= metaChunk->firstFreeChunk
1162 			&& chunkIndex <= metaChunk->lastFreeChunk) {
1163 		if (chunkIndex - metaChunk->firstFreeChunk
1164 				<= metaChunk->lastFreeChunk - chunkIndex) {
1165 			metaChunk->firstFreeChunk = chunkIndex + 1;
1166 		} else
1167 			metaChunk->lastFreeChunk = chunkIndex - 1;
1168 	}
1169 
1170 	PARANOID_CHECKS_ONLY(_CheckMetaChunk(metaChunk));
1171 
1172 	T(AllocateChunk(chunkSize, metaChunk, _chunk));
1173 
1174 	return true;
1175 }
1176 
1177 
1178 /*static*/ void
1179 MemoryManager::_FreeChunk(Area* area, MetaChunk* metaChunk, Chunk* chunk,
1180 	addr_t chunkAddress, bool alreadyUnmapped, uint32 flags)
1181 {
1182 	// unmap the chunk
1183 	if (!alreadyUnmapped) {
1184 		mutex_unlock(&sLock);
1185 		_UnmapChunk(area->vmArea, chunkAddress, metaChunk->chunkSize, flags);
1186 		mutex_lock(&sLock);
1187 	}
1188 
1189 	T(FreeChunk(metaChunk, chunk));
1190 
1191 	_push(metaChunk->freeChunks, chunk);
1192 
1193 	uint32 chunkIndex = chunk - metaChunk->chunks;
1194 
1195 	// free the meta chunk, if it is unused now
1196 	PARANOID_CHECKS_ONLY(bool areaDeleted = false;)
1197 	ASSERT(metaChunk->usedChunkCount > 0);
1198 	if (--metaChunk->usedChunkCount == 0) {
1199 		T(FreeMetaChunk(metaChunk));
1200 
1201 		// remove from partial meta chunk list
1202 		if (metaChunk->chunkSize == SLAB_CHUNK_SIZE_SMALL)
1203 			sPartialMetaChunksSmall.Remove(metaChunk);
1204 		else if (metaChunk->chunkSize == SLAB_CHUNK_SIZE_MEDIUM)
1205 			sPartialMetaChunksMedium.Remove(metaChunk);
1206 
1207 		// mark empty
1208 		metaChunk->chunkSize = 0;
1209 
1210 		// add to free list
1211 		if (metaChunk == area->metaChunks)
1212 			sFreeShortMetaChunks.Add(metaChunk, false);
1213 		else
1214 			sFreeCompleteMetaChunks.Add(metaChunk, false);
1215 
1216 		// free the area, if it is unused now
1217 		ASSERT(area->usedMetaChunkCount > 0);
1218 		if (--area->usedMetaChunkCount == 0) {
1219 			_FreeArea(area, false, flags);
1220 			PARANOID_CHECKS_ONLY(areaDeleted = true;)
1221 		}
1222 	} else if (metaChunk->usedChunkCount == metaChunk->chunkCount - 1) {
1223 		// the meta chunk was full before -- add it back to its partial chunk
1224 		// list
1225 		if (metaChunk->chunkSize == SLAB_CHUNK_SIZE_SMALL)
1226 			sPartialMetaChunksSmall.Add(metaChunk, false);
1227 		else if (metaChunk->chunkSize == SLAB_CHUNK_SIZE_MEDIUM)
1228 			sPartialMetaChunksMedium.Add(metaChunk, false);
1229 
1230 		metaChunk->firstFreeChunk = chunkIndex;
1231 		metaChunk->lastFreeChunk = chunkIndex;
1232 	} else {
1233 		// extend the free range, if the chunk adjoins
1234 		if (chunkIndex + 1 == metaChunk->firstFreeChunk) {
1235 			uint32 firstFree = chunkIndex;
1236 			for (; firstFree > 0; firstFree--) {
1237 				Chunk* previousChunk = &metaChunk->chunks[firstFree - 1];
1238 				if (!_IsChunkFree(metaChunk, previousChunk))
1239 					break;
1240 			}
1241 			metaChunk->firstFreeChunk = firstFree;
1242 		} else if (chunkIndex == (uint32)metaChunk->lastFreeChunk + 1) {
1243 			uint32 lastFree = chunkIndex;
1244 			for (; lastFree + 1 < metaChunk->chunkCount; lastFree++) {
1245 				Chunk* nextChunk = &metaChunk->chunks[lastFree + 1];
1246 				if (!_IsChunkFree(metaChunk, nextChunk))
1247 					break;
1248 			}
1249 			metaChunk->lastFreeChunk = lastFree;
1250 		}
1251 	}
1252 
1253 	PARANOID_CHECKS_ONLY(
1254 		if (!areaDeleted)
1255 			_CheckMetaChunk(metaChunk);
1256 	)
1257 }
1258 
1259 
1260 /*static*/ void
1261 MemoryManager::_PrepareMetaChunk(MetaChunk* metaChunk, size_t chunkSize)
1262 {
1263 	Area* area = metaChunk->GetArea();
1264 
1265 	if (metaChunk == area->metaChunks) {
1266 		// the first chunk is shorter
1267 		size_t unusableSize = ROUNDUP(SLAB_AREA_STRUCT_OFFSET + kAreaAdminSize,
1268 			chunkSize);
1269 		metaChunk->chunkBase = area->BaseAddress() + unusableSize;
1270 		metaChunk->totalSize = SLAB_CHUNK_SIZE_LARGE - unusableSize;
1271 	}
1272 
1273 	metaChunk->chunkSize = chunkSize;
1274 	metaChunk->chunkCount = metaChunk->totalSize / chunkSize;
1275 	metaChunk->usedChunkCount = 0;
1276 
1277 	metaChunk->freeChunks = NULL;
1278 	for (int32 i = metaChunk->chunkCount - 1; i >= 0; i--)
1279 		_push(metaChunk->freeChunks, metaChunk->chunks + i);
1280 
1281 	metaChunk->firstFreeChunk = 0;
1282 	metaChunk->lastFreeChunk = metaChunk->chunkCount - 1;
1283 
1284 	PARANOID_CHECKS_ONLY(_CheckMetaChunk(metaChunk));
1285 }
1286 
1287 
1288 /*static*/ void
1289 MemoryManager::_AddArea(Area* area)
1290 {
1291 	T(AddArea(area));
1292 
1293 	// add the area to the hash table
1294 	WriteLocker writeLocker(sAreaTableLock);
1295 	sAreaTable.InsertUnchecked(area);
1296 	writeLocker.Unlock();
1297 
1298 	// add the area's meta chunks to the free lists
1299 	sFreeShortMetaChunks.Add(&area->metaChunks[0]);
1300 	for (int32 i = 1; i < SLAB_META_CHUNKS_PER_AREA; i++)
1301 		sFreeCompleteMetaChunks.Add(&area->metaChunks[i]);
1302 }
1303 
1304 
1305 /*static*/ status_t
1306 MemoryManager::_AllocateArea(uint32 flags, Area*& _area)
1307 {
1308 	TRACE("MemoryManager::_AllocateArea(%#" B_PRIx32 ")\n", flags);
1309 
1310 	ASSERT((flags & CACHE_DONT_LOCK_KERNEL_SPACE) == 0);
1311 
1312 	mutex_unlock(&sLock);
1313 
1314 	size_t pagesNeededToMap = 0;
1315 	void* areaBase;
1316 	Area* area;
1317 	VMArea* vmArea = NULL;
1318 
1319 	if (sKernelArgs == NULL) {
1320 		// create an area
1321 		uint32 areaCreationFlags = (flags & CACHE_PRIORITY_VIP) != 0
1322 			? CREATE_AREA_PRIORITY_VIP : 0;
1323 		area_id areaID = vm_create_null_area(B_SYSTEM_TEAM, kSlabAreaName,
1324 			&areaBase, B_ANY_KERNEL_BLOCK_ADDRESS, SLAB_AREA_SIZE,
1325 			areaCreationFlags);
1326 		if (areaID < 0) {
1327 			mutex_lock(&sLock);
1328 			return areaID;
1329 		}
1330 
1331 		area = _AreaForAddress((addr_t)areaBase);
1332 
1333 		// map the memory for the administrative structure
1334 		VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
1335 		VMTranslationMap* translationMap = addressSpace->TranslationMap();
1336 
1337 		pagesNeededToMap = translationMap->MaxPagesNeededToMap(
1338 			(addr_t)area, (addr_t)areaBase + SLAB_AREA_SIZE - 1);
1339 
1340 		vmArea = VMAreaHash::Lookup(areaID);
1341 		status_t error = _MapChunk(vmArea, (addr_t)area, kAreaAdminSize,
1342 			pagesNeededToMap, flags);
1343 		if (error != B_OK) {
1344 			delete_area(areaID);
1345 			mutex_lock(&sLock);
1346 			return error;
1347 		}
1348 
1349 		dprintf("slab memory manager: created area %p (%" B_PRId32 ")\n", area,
1350 			areaID);
1351 	} else {
1352 		// no areas yet -- allocate raw memory
1353 		areaBase = (void*)vm_allocate_early(sKernelArgs, SLAB_AREA_SIZE,
1354 			SLAB_AREA_SIZE, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
1355 			SLAB_AREA_SIZE);
1356 		if (areaBase == NULL) {
1357 			mutex_lock(&sLock);
1358 			return B_NO_MEMORY;
1359 		}
1360 		area = _AreaForAddress((addr_t)areaBase);
1361 
1362 		TRACE("MemoryManager::_AllocateArea(): allocated early area %p\n",
1363 			area);
1364 	}
1365 
1366 	// init the area structure
1367 	area->vmArea = vmArea;
1368 	area->reserved_memory_for_mapping = pagesNeededToMap * B_PAGE_SIZE;
1369 	area->usedMetaChunkCount = 0;
1370 	area->fullyMapped = vmArea == NULL;
1371 
1372 	// init the meta chunks
1373 	for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA; i++) {
1374 		MetaChunk* metaChunk = area->metaChunks + i;
1375 		metaChunk->chunkSize = 0;
1376 		metaChunk->chunkBase = (addr_t)areaBase + i * SLAB_CHUNK_SIZE_LARGE;
1377 		metaChunk->totalSize = SLAB_CHUNK_SIZE_LARGE;
1378 			// Note: chunkBase and totalSize aren't correct for the first
1379 			// meta chunk. They will be set in _PrepareMetaChunk().
1380 		metaChunk->chunkCount = 0;
1381 		metaChunk->usedChunkCount = 0;
1382 		metaChunk->freeChunks = NULL;
1383 	}
1384 
1385 	mutex_lock(&sLock);
1386 	_area = area;
1387 
1388 	T(AllocateArea(area, flags));
1389 
1390 	return B_OK;
1391 }
1392 
1393 
1394 /*static*/ void
1395 MemoryManager::_FreeArea(Area* area, bool areaRemoved, uint32 flags)
1396 {
1397 	TRACE("MemoryManager::_FreeArea(%p, %#" B_PRIx32 ")\n", area, flags);
1398 
1399 	T(FreeArea(area, areaRemoved, flags));
1400 
1401 	ASSERT(area->usedMetaChunkCount == 0);
1402 
1403 	if (!areaRemoved) {
1404 		// remove the area's meta chunks from the free lists
1405 		ASSERT(area->metaChunks[0].usedChunkCount == 0);
1406 		sFreeShortMetaChunks.Remove(&area->metaChunks[0]);
1407 
1408 		for (int32 i = 1; i < SLAB_META_CHUNKS_PER_AREA; i++) {
1409 			ASSERT(area->metaChunks[i].usedChunkCount == 0);
1410 			sFreeCompleteMetaChunks.Remove(&area->metaChunks[i]);
1411 		}
1412 
1413 		// remove the area from the hash table
1414 		WriteLocker writeLocker(sAreaTableLock);
1415 		sAreaTable.RemoveUnchecked(area);
1416 		writeLocker.Unlock();
1417 	}
1418 
1419 	// We want to keep one or two free areas as a reserve.
1420 	if (sFreeAreaCount <= 1) {
1421 		_PushFreeArea(area);
1422 		return;
1423 	}
1424 
1425 	if (area->vmArea == NULL || (flags & CACHE_DONT_LOCK_KERNEL_SPACE) != 0) {
1426 		// This is either early in the boot process or we aren't allowed to
1427 		// delete the area now.
1428 		_PushFreeArea(area);
1429 		_RequestMaintenance();
1430 		return;
1431 	}
1432 
1433 	mutex_unlock(&sLock);
1434 
1435 	dprintf("slab memory manager: deleting area %p (%" B_PRId32 ")\n", area,
1436 		area->vmArea->id);
1437 
1438 	size_t memoryToUnreserve = area->reserved_memory_for_mapping;
1439 	delete_area(area->vmArea->id);
1440 	vm_unreserve_memory(memoryToUnreserve);
1441 
1442 	mutex_lock(&sLock);
1443 }
1444 
1445 
1446 /*static*/ status_t
1447 MemoryManager::_MapChunk(VMArea* vmArea, addr_t address, size_t size,
1448 	size_t reserveAdditionalMemory, uint32 flags)
1449 {
1450 	TRACE("MemoryManager::_MapChunk(%p, %#" B_PRIxADDR ", %#" B_PRIxSIZE
1451 		")\n", vmArea, address, size);
1452 
1453 	T(Map(address, size, flags));
1454 
1455 	if (vmArea == NULL) {
1456 		// everything is mapped anyway
1457 		return B_OK;
1458 	}
1459 
1460 	VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
1461 	VMTranslationMap* translationMap = addressSpace->TranslationMap();
1462 
1463 	// reserve memory for the chunk
1464 	int priority = (flags & CACHE_PRIORITY_VIP) != 0
1465 		? VM_PRIORITY_VIP : VM_PRIORITY_SYSTEM;
1466 	size_t reservedMemory = size + reserveAdditionalMemory;
1467 	status_t error = vm_try_reserve_memory(size, priority,
1468 		(flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0 ? 0 : 1000000);
1469 	if (error != B_OK)
1470 		return error;
1471 
1472 	// reserve the pages we need now
1473 	size_t reservedPages = size / B_PAGE_SIZE
1474 		+ translationMap->MaxPagesNeededToMap(address, address + size - 1);
1475 	vm_page_reservation reservation;
1476 	if ((flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0) {
1477 		if (!vm_page_try_reserve_pages(&reservation, reservedPages, priority)) {
1478 			vm_unreserve_memory(reservedMemory);
1479 			return B_WOULD_BLOCK;
1480 		}
1481 	} else
1482 		vm_page_reserve_pages(&reservation, reservedPages, priority);
1483 
1484 	VMCache* cache = vm_area_get_locked_cache(vmArea);
1485 
1486 	// map the pages
1487 	translationMap->Lock();
1488 
1489 	addr_t areaOffset = address - vmArea->Base();
1490 	addr_t endAreaOffset = areaOffset + size;
1491 	for (size_t offset = areaOffset; offset < endAreaOffset;
1492 			offset += B_PAGE_SIZE) {
1493 		vm_page* page = vm_page_allocate_page(&reservation, PAGE_STATE_WIRED);
1494 		cache->InsertPage(page, offset);
1495 
1496 		page->IncrementWiredCount();
1497 		atomic_add(&gMappedPagesCount, 1);
1498 		DEBUG_PAGE_ACCESS_END(page);
1499 
1500 		translationMap->Map(vmArea->Base() + offset,
1501 			page->physical_page_number * B_PAGE_SIZE,
1502 			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
1503 			vmArea->MemoryType(), &reservation);
1504 	}
1505 
1506 	translationMap->Unlock();
1507 
1508 	cache->ReleaseRefAndUnlock();
1509 
1510 	vm_page_unreserve_pages(&reservation);
1511 
1512 	return B_OK;
1513 }
1514 
1515 
1516 /*static*/ status_t
1517 MemoryManager::_UnmapChunk(VMArea* vmArea, addr_t address, size_t size,
1518 	uint32 flags)
1519 {
1520 	T(Unmap(address, size, flags));
1521 
1522 	if (vmArea == NULL)
1523 		return B_ERROR;
1524 
1525 	TRACE("MemoryManager::_UnmapChunk(%p, %#" B_PRIxADDR ", %#" B_PRIxSIZE
1526 		")\n", vmArea, address, size);
1527 
1528 	VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
1529 	VMTranslationMap* translationMap = addressSpace->TranslationMap();
1530 	VMCache* cache = vm_area_get_locked_cache(vmArea);
1531 
1532 	// unmap the pages
1533 	translationMap->Lock();
1534 	translationMap->Unmap(address, address + size - 1);
1535 	atomic_add(&gMappedPagesCount, -(size / B_PAGE_SIZE));
1536 	translationMap->Unlock();
1537 
1538 	// free the pages
1539 	addr_t areaPageOffset = (address - vmArea->Base()) / B_PAGE_SIZE;
1540 	addr_t areaPageEndOffset = areaPageOffset + size / B_PAGE_SIZE;
1541 	VMCachePagesTree::Iterator it = cache->pages.GetIterator(
1542 		areaPageOffset, true, true);
1543 	while (vm_page* page = it.Next()) {
1544 		if (page->cache_offset >= areaPageEndOffset)
1545 			break;
1546 
1547 		DEBUG_PAGE_ACCESS_START(page);
1548 
1549 		page->DecrementWiredCount();
1550 
1551 		cache->RemovePage(page);
1552 			// the iterator is remove-safe
1553 		vm_page_free(cache, page);
1554 	}
1555 
1556 	cache->ReleaseRefAndUnlock();
1557 
1558 	vm_unreserve_memory(size);
1559 
1560 	return B_OK;
1561 }
1562 
1563 
1564 /*static*/ void
1565 MemoryManager::_UnmapFreeChunksEarly(Area* area)
1566 {
1567 	if (!area->fullyMapped)
1568 		return;
1569 
1570 	TRACE("MemoryManager::_UnmapFreeChunksEarly(%p)\n", area);
1571 
1572 	// unmap the space before the Area structure
1573 	#if SLAB_AREA_STRUCT_OFFSET > 0
1574 		_UnmapChunk(area->vmArea, area->BaseAddress(), SLAB_AREA_STRUCT_OFFSET,
1575 			0);
1576 	#endif
1577 
1578 	for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA; i++) {
1579 		MetaChunk* metaChunk = area->metaChunks + i;
1580 		if (metaChunk->chunkSize == 0) {
1581 			// meta chunk is free -- unmap it completely
1582 			if (i == 0) {
1583 				_UnmapChunk(area->vmArea, (addr_t)area + kAreaAdminSize,
1584 					SLAB_CHUNK_SIZE_LARGE - kAreaAdminSize, 0);
1585 			} else {
1586 				_UnmapChunk(area->vmArea,
1587 					area->BaseAddress() + i * SLAB_CHUNK_SIZE_LARGE,
1588 					SLAB_CHUNK_SIZE_LARGE, 0);
1589 			}
1590 		} else {
1591 			// unmap free chunks
1592 			for (Chunk* chunk = metaChunk->freeChunks; chunk != NULL;
1593 					chunk = chunk->next) {
1594 				_UnmapChunk(area->vmArea, _ChunkAddress(metaChunk, chunk),
1595 					metaChunk->chunkSize, 0);
1596 			}
1597 
1598 			// The first meta chunk might have space before its first chunk.
1599 			if (i == 0) {
1600 				addr_t unusedStart = (addr_t)area + kAreaAdminSize;
1601 				if (unusedStart < metaChunk->chunkBase) {
1602 					_UnmapChunk(area->vmArea, unusedStart,
1603 						metaChunk->chunkBase - unusedStart, 0);
1604 				}
1605 			}
1606 		}
1607 	}
1608 
1609 	area->fullyMapped = false;
1610 }
1611 
1612 
1613 /*static*/ void
1614 MemoryManager::_ConvertEarlyArea(Area* area)
1615 {
1616 	void* address = (void*)area->BaseAddress();
1617 	area_id areaID = create_area(kSlabAreaName, &address, B_EXACT_ADDRESS,
1618 		SLAB_AREA_SIZE, B_ALREADY_WIRED,
1619 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
1620 	if (areaID < 0)
1621 		panic("out of memory");
1622 
1623 	area->vmArea = VMAreaHash::Lookup(areaID);
1624 }
1625 
1626 
1627 /*static*/ void
1628 MemoryManager::_RequestMaintenance()
1629 {
1630 	if ((sFreeAreaCount > 0 && sFreeAreaCount <= 2) || sMaintenanceNeeded)
1631 		return;
1632 
1633 	sMaintenanceNeeded = true;
1634 	request_memory_manager_maintenance();
1635 }
1636 
1637 
1638 /*static*/ bool
1639 MemoryManager::_IsChunkInFreeList(const MetaChunk* metaChunk,
1640 	const Chunk* chunk)
1641 {
1642 	Chunk* freeChunk = metaChunk->freeChunks;
1643 	while (freeChunk != NULL) {
1644 		if (freeChunk == chunk)
1645 			return true;
1646 		freeChunk = freeChunk->next;
1647 	}
1648 
1649 	return false;
1650 }
1651 
1652 
1653 #if DEBUG_SLAB_MEMORY_MANAGER_PARANOID_CHECKS
1654 
1655 /*static*/ void
1656 MemoryManager::_CheckMetaChunk(MetaChunk* metaChunk)
1657 {
1658 	Area* area = metaChunk->GetArea();
1659 	int32 metaChunkIndex = metaChunk - area->metaChunks;
1660 	if (metaChunkIndex < 0 || metaChunkIndex >= SLAB_META_CHUNKS_PER_AREA) {
1661 		panic("invalid meta chunk %p!", metaChunk);
1662 		return;
1663 	}
1664 
1665 	switch (metaChunk->chunkSize) {
1666 		case 0:
1667 			// unused
1668 			return;
1669 		case SLAB_CHUNK_SIZE_SMALL:
1670 		case SLAB_CHUNK_SIZE_MEDIUM:
1671 		case SLAB_CHUNK_SIZE_LARGE:
1672 			break;
1673 		default:
1674 			panic("meta chunk %p has invalid chunk size: %" B_PRIuSIZE,
1675 				metaChunk, metaChunk->chunkSize);
1676 			return;
1677 	}
1678 
1679 	if (metaChunk->totalSize > SLAB_CHUNK_SIZE_LARGE) {
1680 		panic("meta chunk %p has invalid total size: %" B_PRIuSIZE,
1681 			metaChunk, metaChunk->totalSize);
1682 		return;
1683 	}
1684 
1685 	addr_t expectedBase = area->BaseAddress()
1686 		+ metaChunkIndex * SLAB_CHUNK_SIZE_LARGE;
1687 	if (metaChunk->chunkBase < expectedBase
1688 		|| metaChunk->chunkBase - expectedBase + metaChunk->totalSize
1689 			> SLAB_CHUNK_SIZE_LARGE) {
1690 		panic("meta chunk %p has invalid base address: %" B_PRIxADDR, metaChunk,
1691 			metaChunk->chunkBase);
1692 		return;
1693 	}
1694 
1695 	if (metaChunk->chunkCount != metaChunk->totalSize / metaChunk->chunkSize) {
1696 		panic("meta chunk %p has invalid chunk count: %u", metaChunk,
1697 			metaChunk->chunkCount);
1698 		return;
1699 	}
1700 
1701 	if (metaChunk->usedChunkCount > metaChunk->chunkCount) {
1702 		panic("meta chunk %p has invalid unused chunk count: %u", metaChunk,
1703 			metaChunk->usedChunkCount);
1704 		return;
1705 	}
1706 
1707 	if (metaChunk->firstFreeChunk > metaChunk->chunkCount) {
1708 		panic("meta chunk %p has invalid first free chunk: %u", metaChunk,
1709 			metaChunk->firstFreeChunk);
1710 		return;
1711 	}
1712 
1713 	if (metaChunk->lastFreeChunk >= metaChunk->chunkCount) {
1714 		panic("meta chunk %p has invalid last free chunk: %u", metaChunk,
1715 			metaChunk->lastFreeChunk);
1716 		return;
1717 	}
1718 
1719 	// check free list for structural sanity
1720 	uint32 freeChunks = 0;
1721 	for (Chunk* chunk = metaChunk->freeChunks; chunk != NULL;
1722 			chunk = chunk->next) {
1723 		if ((addr_t)chunk % sizeof(Chunk) != 0 || chunk < metaChunk->chunks
1724 			|| chunk >= metaChunk->chunks + metaChunk->chunkCount) {
1725 			panic("meta chunk %p has invalid element in free list, chunk: %p",
1726 				metaChunk, chunk);
1727 			return;
1728 		}
1729 
1730 		if (++freeChunks > metaChunk->chunkCount) {
1731 			panic("meta chunk %p has cyclic free list", metaChunk);
1732 			return;
1733 		}
1734 	}
1735 
1736 	if (freeChunks + metaChunk->usedChunkCount > metaChunk->chunkCount) {
1737 		panic("meta chunk %p has mismatching free/used chunk counts: total: "
1738 			"%u, used: %u, free: %" B_PRIu32, metaChunk, metaChunk->chunkCount,
1739 			metaChunk->usedChunkCount, freeChunks);
1740 		return;
1741 	}
1742 
1743 	// count used chunks by looking at their reference/next field
1744 	uint32 usedChunks = 0;
1745 	for (uint32 i = 0; i < metaChunk->chunkCount; i++) {
1746 		if (!_IsChunkFree(metaChunk, metaChunk->chunks + i))
1747 			usedChunks++;
1748 	}
1749 
1750 	if (usedChunks != metaChunk->usedChunkCount) {
1751 		panic("meta chunk %p has used chunks that appear free: total: "
1752 			"%u, used: %u, appearing used: %" B_PRIu32, metaChunk,
1753 			metaChunk->chunkCount, metaChunk->usedChunkCount, usedChunks);
1754 		return;
1755 	}
1756 
1757 	// check free range
1758 	for (uint32 i = metaChunk->firstFreeChunk; i < metaChunk->lastFreeChunk;
1759 			i++) {
1760 		if (!_IsChunkFree(metaChunk, metaChunk->chunks + i)) {
1761 			panic("meta chunk %p has used chunk in free range, chunk: %p (%"
1762 				B_PRIu32 ", free range: %u - %u)", metaChunk,
1763 				metaChunk->chunks + i, i, metaChunk->firstFreeChunk,
1764 				metaChunk->lastFreeChunk);
1765 			return;
1766 		}
1767 	}
1768 }
1769 
1770 #endif	// DEBUG_SLAB_MEMORY_MANAGER_PARANOID_CHECKS
1771 
1772 
1773 /*static*/ int
1774 MemoryManager::_DumpRawAllocations(int argc, char** argv)
1775 {
1776 	kprintf("%-*s    meta chunk  chunk  %-*s    size (KB)\n",
1777 		B_PRINTF_POINTER_WIDTH, "area", B_PRINTF_POINTER_WIDTH, "base");
1778 
1779 	size_t totalSize = 0;
1780 
1781 	for (AreaTable::Iterator it = sAreaTable.GetIterator();
1782 			Area* area = it.Next();) {
1783 		for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA; i++) {
1784 			MetaChunk* metaChunk = area->metaChunks + i;
1785 			if (metaChunk->chunkSize == 0)
1786 				continue;
1787 			for (uint32 k = 0; k < metaChunk->chunkCount; k++) {
1788 				Chunk* chunk = metaChunk->chunks + k;
1789 
1790 				// skip free chunks
1791 				if (_IsChunkFree(metaChunk, chunk))
1792 					continue;
1793 
1794 				addr_t reference = chunk->reference;
1795 				if ((reference & 1) == 0 || reference == 1)
1796 					continue;
1797 
1798 				addr_t chunkAddress = _ChunkAddress(metaChunk, chunk);
1799 				size_t size = reference - chunkAddress + 1;
1800 				totalSize += size;
1801 
1802 				kprintf("%p  %10" B_PRId32 "  %5" B_PRIu32 "  %p  %9"
1803 					B_PRIuSIZE "\n", area, i, k, (void*)chunkAddress,
1804 					size / 1024);
1805 			}
1806 		}
1807 	}
1808 
1809 	kprintf("total:%*s%9" B_PRIuSIZE "\n", (2 * B_PRINTF_POINTER_WIDTH) + 21,
1810 		"", totalSize / 1024);
1811 
1812 	return 0;
1813 }
1814 
1815 
1816 /*static*/ void
1817 MemoryManager::_PrintMetaChunkTableHeader(bool printChunks)
1818 {
1819 	if (printChunks)
1820 		kprintf("chunk        base       cache  object size  cache name\n");
1821 	else
1822 		kprintf("chunk        base\n");
1823 }
1824 
1825 /*static*/ void
1826 MemoryManager::_DumpMetaChunk(MetaChunk* metaChunk, bool printChunks,
1827 	bool printHeader)
1828 {
1829 	if (printHeader)
1830 		_PrintMetaChunkTableHeader(printChunks);
1831 
1832 	const char* type = "empty";
1833 	if (metaChunk->chunkSize != 0) {
1834 		switch (metaChunk->chunkSize) {
1835 			case SLAB_CHUNK_SIZE_SMALL:
1836 				type = "small";
1837 				break;
1838 			case SLAB_CHUNK_SIZE_MEDIUM:
1839 				type = "medium";
1840 				break;
1841 			case SLAB_CHUNK_SIZE_LARGE:
1842 				type = "large";
1843 				break;
1844 		}
1845 	}
1846 
1847 	int metaChunkIndex = metaChunk - metaChunk->GetArea()->metaChunks;
1848 	kprintf("%5d  %p  --- %6s meta chunk", metaChunkIndex,
1849 		(void*)metaChunk->chunkBase, type);
1850 	if (metaChunk->chunkSize != 0) {
1851 		kprintf(": %4u/%4u used, %-4u-%4u free ------------\n",
1852 			metaChunk->usedChunkCount, metaChunk->chunkCount,
1853 			metaChunk->firstFreeChunk, metaChunk->lastFreeChunk);
1854 	} else
1855 		kprintf(" --------------------------------------------\n");
1856 
1857 	if (metaChunk->chunkSize == 0 || !printChunks)
1858 		return;
1859 
1860 	for (uint32 i = 0; i < metaChunk->chunkCount; i++) {
1861 		Chunk* chunk = metaChunk->chunks + i;
1862 
1863 		// skip free chunks
1864 		if (_IsChunkFree(metaChunk, chunk)) {
1865 			if (!_IsChunkInFreeList(metaChunk, chunk)) {
1866 				kprintf("%5" B_PRIu32 "  %p  appears free, but isn't in free "
1867 					"list!\n", i, (void*)_ChunkAddress(metaChunk, chunk));
1868 			}
1869 
1870 			continue;
1871 		}
1872 
1873 		addr_t reference = chunk->reference;
1874 		if ((reference & 1) == 0) {
1875 			ObjectCache* cache = (ObjectCache*)reference;
1876 			kprintf("%5" B_PRIu32 "  %p  %p  %11" B_PRIuSIZE "  %s\n", i,
1877 				(void*)_ChunkAddress(metaChunk, chunk), cache,
1878 				cache != NULL ? cache->object_size : 0,
1879 				cache != NULL ? cache->name : "");
1880 		} else if (reference != 1) {
1881 			kprintf("%5" B_PRIu32 "  %p  raw allocation up to %p\n", i,
1882 				(void*)_ChunkAddress(metaChunk, chunk), (void*)reference);
1883 		}
1884 	}
1885 }
1886 
1887 
1888 /*static*/ int
1889 MemoryManager::_DumpMetaChunk(int argc, char** argv)
1890 {
1891 	if (argc != 2) {
1892 		print_debugger_command_usage(argv[0]);
1893 		return 0;
1894 	}
1895 
1896 	uint64 address;
1897 	if (!evaluate_debug_expression(argv[1], &address, false))
1898 		return 0;
1899 
1900 	Area* area = _AreaForAddress(address);
1901 
1902 	MetaChunk* metaChunk;
1903 	if ((addr_t)address >= (addr_t)area->metaChunks
1904 		&& (addr_t)address
1905 			< (addr_t)(area->metaChunks + SLAB_META_CHUNKS_PER_AREA)) {
1906 		metaChunk = (MetaChunk*)(addr_t)address;
1907 	} else {
1908 		metaChunk = area->metaChunks
1909 			+ (address % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE;
1910 	}
1911 
1912 	_DumpMetaChunk(metaChunk, true, true);
1913 
1914 	return 0;
1915 }
1916 
1917 
1918 /*static*/ void
1919 MemoryManager::_DumpMetaChunks(const char* name, MetaChunkList& metaChunkList,
1920 	bool printChunks)
1921 {
1922 	kprintf("%s:\n", name);
1923 
1924 	for (MetaChunkList::Iterator it = metaChunkList.GetIterator();
1925 			MetaChunk* metaChunk = it.Next();) {
1926 		_DumpMetaChunk(metaChunk, printChunks, false);
1927 	}
1928 }
1929 
1930 
1931 /*static*/ int
1932 MemoryManager::_DumpMetaChunks(int argc, char** argv)
1933 {
1934 	bool printChunks = argc > 1 && strcmp(argv[1], "-c") == 0;
1935 
1936 	_PrintMetaChunkTableHeader(printChunks);
1937 	_DumpMetaChunks("free complete", sFreeCompleteMetaChunks, printChunks);
1938 	_DumpMetaChunks("free short", sFreeShortMetaChunks, printChunks);
1939 	_DumpMetaChunks("partial small", sPartialMetaChunksSmall, printChunks);
1940 	_DumpMetaChunks("partial medium", sPartialMetaChunksMedium, printChunks);
1941 
1942 	return 0;
1943 }
1944 
1945 
1946 /*static*/ int
1947 MemoryManager::_DumpArea(int argc, char** argv)
1948 {
1949 	bool printChunks = false;
1950 
1951 	int argi = 1;
1952 	while (argi < argc) {
1953 		if (argv[argi][0] != '-')
1954 			break;
1955 		const char* arg = argv[argi++];
1956 		if (strcmp(arg, "-c") == 0) {
1957 			printChunks = true;
1958 		} else {
1959 			print_debugger_command_usage(argv[0]);
1960 			return 0;
1961 		}
1962 	}
1963 
1964 	if (argi + 1 != argc) {
1965 		print_debugger_command_usage(argv[0]);
1966 		return 0;
1967 	}
1968 
1969 	uint64 address;
1970 	if (!evaluate_debug_expression(argv[argi], &address, false))
1971 		return 0;
1972 
1973 	Area* area = _AreaForAddress((addr_t)address);
1974 
1975 	for (uint32 k = 0; k < SLAB_META_CHUNKS_PER_AREA; k++) {
1976 		MetaChunk* metaChunk = area->metaChunks + k;
1977 		_DumpMetaChunk(metaChunk, printChunks, k == 0);
1978 	}
1979 
1980 	return 0;
1981 }
1982 
1983 
1984 /*static*/ int
1985 MemoryManager::_DumpAreas(int argc, char** argv)
1986 {
1987 	kprintf("  %*s    %*s   meta      small   medium  large\n",
1988 		B_PRINTF_POINTER_WIDTH, "base", B_PRINTF_POINTER_WIDTH, "area");
1989 
1990 	size_t totalTotalSmall = 0;
1991 	size_t totalUsedSmall = 0;
1992 	size_t totalTotalMedium = 0;
1993 	size_t totalUsedMedium = 0;
1994 	size_t totalUsedLarge = 0;
1995 	uint32 areaCount = 0;
1996 
1997 	for (AreaTable::Iterator it = sAreaTable.GetIterator();
1998 			Area* area = it.Next();) {
1999 		areaCount++;
2000 
2001 		// sum up the free/used counts for the chunk sizes
2002 		int totalSmall = 0;
2003 		int usedSmall = 0;
2004 		int totalMedium = 0;
2005 		int usedMedium = 0;
2006 		int usedLarge = 0;
2007 
2008 		for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA; i++) {
2009 			MetaChunk* metaChunk = area->metaChunks + i;
2010 			if (metaChunk->chunkSize == 0)
2011 				continue;
2012 
2013 			switch (metaChunk->chunkSize) {
2014 				case SLAB_CHUNK_SIZE_SMALL:
2015 					totalSmall += metaChunk->chunkCount;
2016 					usedSmall += metaChunk->usedChunkCount;
2017 					break;
2018 				case SLAB_CHUNK_SIZE_MEDIUM:
2019 					totalMedium += metaChunk->chunkCount;
2020 					usedMedium += metaChunk->usedChunkCount;
2021 					break;
2022 				case SLAB_CHUNK_SIZE_LARGE:
2023 					usedLarge += metaChunk->usedChunkCount;
2024 					break;
2025 			}
2026 		}
2027 
2028 		kprintf("%p  %p  %2u/%2u  %4d/%4d  %3d/%3d  %5d\n",
2029 			area, area->vmArea, area->usedMetaChunkCount,
2030 			SLAB_META_CHUNKS_PER_AREA, usedSmall, totalSmall, usedMedium,
2031 			totalMedium, usedLarge);
2032 
2033 		totalTotalSmall += totalSmall;
2034 		totalUsedSmall += usedSmall;
2035 		totalTotalMedium += totalMedium;
2036 		totalUsedMedium += usedMedium;
2037 		totalUsedLarge += usedLarge;
2038 	}
2039 
2040 	kprintf("%d free area%s:\n", sFreeAreaCount,
2041 		sFreeAreaCount == 1 ? "" : "s");
2042 	for (Area* area = sFreeAreas; area != NULL; area = area->next) {
2043 		areaCount++;
2044 		kprintf("%p  %p\n", area, area->vmArea);
2045 	}
2046 
2047 	kprintf("total usage:\n");
2048 	kprintf("  small:    %" B_PRIuSIZE "/%" B_PRIuSIZE "\n", totalUsedSmall,
2049 		totalTotalSmall);
2050 	kprintf("  medium:   %" B_PRIuSIZE "/%" B_PRIuSIZE "\n", totalUsedMedium,
2051 		totalTotalMedium);
2052 	kprintf("  large:    %" B_PRIuSIZE "\n", totalUsedLarge);
2053 	kprintf("  memory:   %" B_PRIuSIZE "/%" B_PRIu32 " KB\n",
2054 		(totalUsedSmall * SLAB_CHUNK_SIZE_SMALL
2055 			+ totalUsedMedium * SLAB_CHUNK_SIZE_MEDIUM
2056 			+ totalUsedLarge * SLAB_CHUNK_SIZE_LARGE) / 1024,
2057 		areaCount * SLAB_AREA_SIZE / 1024);
2058 	kprintf("  overhead: %" B_PRIuSIZE " KB\n",
2059 		areaCount * kAreaAdminSize / 1024);
2060 
2061 	return 0;
2062 }
2063 
2064 
2065 #if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
2066 
2067 void
2068 MemoryManager::_AddTrackingInfo(void* allocation, size_t size,
2069 	AbstractTraceEntryWithStackTrace* traceEntry)
2070 {
2071 	_TrackingInfoFor(allocation, size)->Init(traceEntry);
2072 }
2073 
2074 #endif // SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
2075 
2076 
2077 RANGE_MARKER_FUNCTION_END(SlabMemoryManager)
2078