xref: /haiku/src/system/kernel/slab/MemoryManager.cpp (revision 7a74a5df454197933bc6e80a542102362ee98703)
1 /*
2  * Copyright 2010, Ingo Weinhold <ingo_weinhold@gmx.de>.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include "MemoryManager.h"
8 
9 #include <algorithm>
10 
11 #include <debug.h>
12 #include <tracing.h>
13 #include <util/AutoLock.h>
14 #include <vm/vm.h>
15 #include <vm/vm_page.h>
16 #include <vm/vm_priv.h>
17 #include <vm/VMAddressSpace.h>
18 #include <vm/VMArea.h>
19 #include <vm/VMCache.h>
20 #include <vm/VMTranslationMap.h>
21 
22 #include "kernel_debug_config.h"
23 
24 #include "ObjectCache.h"
25 #include "slab_private.h"
26 
27 
28 //#define TRACE_MEMORY_MANAGER
29 #ifdef TRACE_MEMORY_MANAGER
30 #	define TRACE(x...)	dprintf(x)
31 #else
32 #	define TRACE(x...)	do {} while (false)
33 #endif
34 
35 #if DEBUG_SLAB_MEMORY_MANAGER_PARANOID_CHECKS
36 #	define PARANOID_CHECKS_ONLY(x)	x
37 #else
38 #	define PARANOID_CHECKS_ONLY(x)
39 #endif
40 
41 
42 static const char* const kSlabAreaName = "slab area";
43 
44 static void* sAreaTableBuffer[1024];
45 
46 mutex MemoryManager::sLock;
47 rw_lock MemoryManager::sAreaTableLock;
48 kernel_args* MemoryManager::sKernelArgs;
49 MemoryManager::AreaTable MemoryManager::sAreaTable;
50 MemoryManager::Area* MemoryManager::sFreeAreas;
51 int MemoryManager::sFreeAreaCount;
52 MemoryManager::MetaChunkList MemoryManager::sFreeCompleteMetaChunks;
53 MemoryManager::MetaChunkList MemoryManager::sFreeShortMetaChunks;
54 MemoryManager::MetaChunkList MemoryManager::sPartialMetaChunksSmall;
55 MemoryManager::MetaChunkList MemoryManager::sPartialMetaChunksMedium;
56 MemoryManager::AllocationEntry* MemoryManager::sAllocationEntryCanWait;
57 MemoryManager::AllocationEntry* MemoryManager::sAllocationEntryDontWait;
58 bool MemoryManager::sMaintenanceNeeded;
59 
60 
61 RANGE_MARKER_FUNCTION_BEGIN(SlabMemoryManager)
62 
63 
64 // #pragma mark - kernel tracing
65 
66 
67 #if SLAB_MEMORY_MANAGER_TRACING
68 
69 
70 //namespace SlabMemoryManagerCacheTracing {
71 struct MemoryManager::Tracing {
72 
73 class MemoryManagerTraceEntry
74 	: public TRACE_ENTRY_SELECTOR(SLAB_MEMORY_MANAGER_TRACING_STACK_TRACE) {
75 public:
76 	MemoryManagerTraceEntry()
77 		:
78 		TraceEntryBase(SLAB_MEMORY_MANAGER_TRACING_STACK_TRACE, 0, true)
79 	{
80 	}
81 };
82 
83 
84 class Allocate : public MemoryManagerTraceEntry {
85 public:
86 	Allocate(ObjectCache* cache, uint32 flags)
87 		:
88 		MemoryManagerTraceEntry(),
89 		fCache(cache),
90 		fFlags(flags)
91 	{
92 		Initialized();
93 	}
94 
95 	virtual void AddDump(TraceOutput& out)
96 	{
97 		out.Print("slab memory manager alloc: cache: %p, flags: %#" B_PRIx32,
98 			fCache, fFlags);
99 	}
100 
101 private:
102 	ObjectCache*	fCache;
103 	uint32			fFlags;
104 };
105 
106 
107 class Free : public MemoryManagerTraceEntry {
108 public:
109 	Free(void* address, uint32 flags)
110 		:
111 		MemoryManagerTraceEntry(),
112 		fAddress(address),
113 		fFlags(flags)
114 	{
115 		Initialized();
116 	}
117 
118 	virtual void AddDump(TraceOutput& out)
119 	{
120 		out.Print("slab memory manager free: address: %p, flags: %#" B_PRIx32,
121 			fAddress, fFlags);
122 	}
123 
124 private:
125 	void*	fAddress;
126 	uint32	fFlags;
127 };
128 
129 
130 class AllocateRaw : public MemoryManagerTraceEntry {
131 public:
132 	AllocateRaw(size_t size, uint32 flags)
133 		:
134 		MemoryManagerTraceEntry(),
135 		fSize(size),
136 		fFlags(flags)
137 	{
138 		Initialized();
139 	}
140 
141 	virtual void AddDump(TraceOutput& out)
142 	{
143 		out.Print("slab memory manager alloc raw: size: %" B_PRIuSIZE
144 			", flags: %#" B_PRIx32, fSize, fFlags);
145 	}
146 
147 private:
148 	size_t	fSize;
149 	uint32	fFlags;
150 };
151 
152 
153 class FreeRawOrReturnCache : public MemoryManagerTraceEntry {
154 public:
155 	FreeRawOrReturnCache(void* address, uint32 flags)
156 		:
157 		MemoryManagerTraceEntry(),
158 		fAddress(address),
159 		fFlags(flags)
160 	{
161 		Initialized();
162 	}
163 
164 	virtual void AddDump(TraceOutput& out)
165 	{
166 		out.Print("slab memory manager free raw/return: address: %p, flags: %#"
167 			B_PRIx32, fAddress, fFlags);
168 	}
169 
170 private:
171 	void*	fAddress;
172 	uint32	fFlags;
173 };
174 
175 
176 class AllocateArea : public MemoryManagerTraceEntry {
177 public:
178 	AllocateArea(Area* area, uint32 flags)
179 		:
180 		MemoryManagerTraceEntry(),
181 		fArea(area),
182 		fFlags(flags)
183 	{
184 		Initialized();
185 	}
186 
187 	virtual void AddDump(TraceOutput& out)
188 	{
189 		out.Print("slab memory manager alloc area: flags: %#" B_PRIx32
190 			" -> %p", fFlags, fArea);
191 	}
192 
193 private:
194 	Area*	fArea;
195 	uint32	fFlags;
196 };
197 
198 
199 class AddArea : public MemoryManagerTraceEntry {
200 public:
201 	AddArea(Area* area)
202 		:
203 		MemoryManagerTraceEntry(),
204 		fArea(area)
205 	{
206 		Initialized();
207 	}
208 
209 	virtual void AddDump(TraceOutput& out)
210 	{
211 		out.Print("slab memory manager add area: %p", fArea);
212 	}
213 
214 private:
215 	Area*	fArea;
216 };
217 
218 
219 class FreeArea : public MemoryManagerTraceEntry {
220 public:
221 	FreeArea(Area* area, bool areaRemoved, uint32 flags)
222 		:
223 		MemoryManagerTraceEntry(),
224 		fArea(area),
225 		fFlags(flags),
226 		fRemoved(areaRemoved)
227 	{
228 		Initialized();
229 	}
230 
231 	virtual void AddDump(TraceOutput& out)
232 	{
233 		out.Print("slab memory manager free area: %p%s, flags: %#" B_PRIx32,
234 			fArea, fRemoved ? " (removed)" : "", fFlags);
235 	}
236 
237 private:
238 	Area*	fArea;
239 	uint32	fFlags;
240 	bool	fRemoved;
241 };
242 
243 
244 class AllocateMetaChunk : public MemoryManagerTraceEntry {
245 public:
246 	AllocateMetaChunk(MetaChunk* metaChunk)
247 		:
248 		MemoryManagerTraceEntry(),
249 		fMetaChunk(metaChunk->chunkBase)
250 	{
251 		Initialized();
252 	}
253 
254 	virtual void AddDump(TraceOutput& out)
255 	{
256 		out.Print("slab memory manager alloc meta chunk: %#" B_PRIxADDR,
257 			fMetaChunk);
258 	}
259 
260 private:
261 	addr_t	fMetaChunk;
262 };
263 
264 
265 class FreeMetaChunk : public MemoryManagerTraceEntry {
266 public:
267 	FreeMetaChunk(MetaChunk* metaChunk)
268 		:
269 		MemoryManagerTraceEntry(),
270 		fMetaChunk(metaChunk->chunkBase)
271 	{
272 		Initialized();
273 	}
274 
275 	virtual void AddDump(TraceOutput& out)
276 	{
277 		out.Print("slab memory manager free meta chunk: %#" B_PRIxADDR,
278 			fMetaChunk);
279 	}
280 
281 private:
282 	addr_t	fMetaChunk;
283 };
284 
285 
286 class AllocateChunk : public MemoryManagerTraceEntry {
287 public:
288 	AllocateChunk(size_t chunkSize, MetaChunk* metaChunk, Chunk* chunk)
289 		:
290 		MemoryManagerTraceEntry(),
291 		fChunkSize(chunkSize),
292 		fMetaChunk(metaChunk->chunkBase),
293 		fChunk(chunk - metaChunk->chunks)
294 	{
295 		Initialized();
296 	}
297 
298 	virtual void AddDump(TraceOutput& out)
299 	{
300 		out.Print("slab memory manager alloc chunk: size: %" B_PRIuSIZE
301 			" -> meta chunk: %#" B_PRIxADDR ", chunk: %" B_PRIu32, fChunkSize,
302 			fMetaChunk, fChunk);
303 	}
304 
305 private:
306 	size_t	fChunkSize;
307 	addr_t	fMetaChunk;
308 	uint32	fChunk;
309 };
310 
311 
312 class AllocateChunks : public MemoryManagerTraceEntry {
313 public:
314 	AllocateChunks(size_t chunkSize, uint32 chunkCount, MetaChunk* metaChunk,
315 		Chunk* chunk)
316 		:
317 		MemoryManagerTraceEntry(),
318 		fMetaChunk(metaChunk->chunkBase),
319 		fChunkSize(chunkSize),
320 		fChunkCount(chunkCount),
321 		fChunk(chunk - metaChunk->chunks)
322 	{
323 		Initialized();
324 	}
325 
326 	virtual void AddDump(TraceOutput& out)
327 	{
328 		out.Print("slab memory manager alloc chunks: size: %" B_PRIuSIZE
329 			", count %" B_PRIu32 " -> meta chunk: %#" B_PRIxADDR ", chunk: %"
330 			B_PRIu32, fChunkSize, fChunkCount, fMetaChunk, fChunk);
331 	}
332 
333 private:
334 	addr_t	fMetaChunk;
335 	size_t	fChunkSize;
336 	uint32	fChunkCount;
337 	uint32	fChunk;
338 };
339 
340 
341 class FreeChunk : public MemoryManagerTraceEntry {
342 public:
343 	FreeChunk(MetaChunk* metaChunk, Chunk* chunk)
344 		:
345 		MemoryManagerTraceEntry(),
346 		fMetaChunk(metaChunk->chunkBase),
347 		fChunk(chunk - metaChunk->chunks)
348 	{
349 		Initialized();
350 	}
351 
352 	virtual void AddDump(TraceOutput& out)
353 	{
354 		out.Print("slab memory manager free chunk: meta chunk: %#" B_PRIxADDR
355 			", chunk: %" B_PRIu32, fMetaChunk, fChunk);
356 	}
357 
358 private:
359 	addr_t	fMetaChunk;
360 	uint32	fChunk;
361 };
362 
363 
364 class Map : public MemoryManagerTraceEntry {
365 public:
366 	Map(addr_t address, size_t size, uint32 flags)
367 		:
368 		MemoryManagerTraceEntry(),
369 		fAddress(address),
370 		fSize(size),
371 		fFlags(flags)
372 	{
373 		Initialized();
374 	}
375 
376 	virtual void AddDump(TraceOutput& out)
377 	{
378 		out.Print("slab memory manager map: %#" B_PRIxADDR ", size: %"
379 			B_PRIuSIZE ", flags: %#" B_PRIx32, fAddress, fSize, fFlags);
380 	}
381 
382 private:
383 	addr_t	fAddress;
384 	size_t	fSize;
385 	uint32	fFlags;
386 };
387 
388 
389 class Unmap : public MemoryManagerTraceEntry {
390 public:
391 	Unmap(addr_t address, size_t size, uint32 flags)
392 		:
393 		MemoryManagerTraceEntry(),
394 		fAddress(address),
395 		fSize(size),
396 		fFlags(flags)
397 	{
398 		Initialized();
399 	}
400 
401 	virtual void AddDump(TraceOutput& out)
402 	{
403 		out.Print("slab memory manager unmap: %#" B_PRIxADDR ", size: %"
404 			B_PRIuSIZE ", flags: %#" B_PRIx32, fAddress, fSize, fFlags);
405 	}
406 
407 private:
408 	addr_t	fAddress;
409 	size_t	fSize;
410 	uint32	fFlags;
411 };
412 
413 
414 //}	// namespace SlabMemoryManagerCacheTracing
415 };	// struct MemoryManager::Tracing
416 
417 
418 //#	define T(x)	new(std::nothrow) SlabMemoryManagerCacheTracing::x
419 #	define T(x)	new(std::nothrow) MemoryManager::Tracing::x
420 
421 #else
422 #	define T(x)
423 #endif	// SLAB_MEMORY_MANAGER_TRACING
424 
425 
426 // #pragma mark - MemoryManager
427 
428 
429 /*static*/ void
430 MemoryManager::Init(kernel_args* args)
431 {
432 	mutex_init(&sLock, "slab memory manager");
433 	rw_lock_init(&sAreaTableLock, "slab memory manager area table");
434 	sKernelArgs = args;
435 
436 	new(&sFreeCompleteMetaChunks) MetaChunkList;
437 	new(&sFreeShortMetaChunks) MetaChunkList;
438 	new(&sPartialMetaChunksSmall) MetaChunkList;
439 	new(&sPartialMetaChunksMedium) MetaChunkList;
440 
441 	new(&sAreaTable) AreaTable;
442 	sAreaTable.Resize(sAreaTableBuffer, sizeof(sAreaTableBuffer), true);
443 		// A bit hacky: The table now owns the memory. Since we never resize or
444 		// free it, that's not a problem, though.
445 
446 	sFreeAreas = NULL;
447 	sFreeAreaCount = 0;
448 	sMaintenanceNeeded = false;
449 }
450 
451 
452 /*static*/ void
453 MemoryManager::InitPostArea()
454 {
455 	sKernelArgs = NULL;
456 
457 	// Convert all areas to actual areas. This loop might look a bit weird, but
458 	// is necessary since creating the actual area involves memory allocations,
459 	// which in turn can change the situation.
460 	bool done;
461 	do {
462 		done = true;
463 
464 		for (AreaTable::Iterator it = sAreaTable.GetIterator();
465 				Area* area = it.Next();) {
466 			if (area->vmArea == NULL) {
467 				_ConvertEarlyArea(area);
468 				done = false;
469 				break;
470 			}
471 		}
472 	} while (!done);
473 
474 	// unmap and free unused pages
475 	if (sFreeAreas != NULL) {
476 		// Just "leak" all but the first of the free areas -- the VM will
477 		// automatically free all unclaimed memory.
478 		sFreeAreas->next = NULL;
479 		sFreeAreaCount = 1;
480 
481 		Area* area = sFreeAreas;
482 		_ConvertEarlyArea(area);
483 		_UnmapFreeChunksEarly(area);
484 	}
485 
486 	for (AreaTable::Iterator it = sAreaTable.GetIterator();
487 			Area* area = it.Next();) {
488 		_UnmapFreeChunksEarly(area);
489 	}
490 
491 	sMaintenanceNeeded = true;
492 		// might not be necessary, but doesn't harm
493 
494 	add_debugger_command_etc("slab_area", &_DumpArea,
495 		"Dump information on a given slab area",
496 		"[ -c ] <area>\n"
497 		"Dump information on a given slab area specified by its base "
498 			"address.\n"
499 		"If \"-c\" is given, the chunks of all meta chunks area printed as "
500 			"well.\n", 0);
501 	add_debugger_command_etc("slab_areas", &_DumpAreas,
502 		"List all slab areas",
503 		"\n"
504 		"Lists all slab areas.\n", 0);
505 	add_debugger_command_etc("slab_meta_chunk", &_DumpMetaChunk,
506 		"Dump information on a given slab meta chunk",
507 		"<meta chunk>\n"
508 		"Dump information on a given slab meta chunk specified by its base "
509 			"or object address.\n", 0);
510 	add_debugger_command_etc("slab_meta_chunks", &_DumpMetaChunks,
511 		"List all non-full slab meta chunks",
512 		"[ -c ]\n"
513 		"Lists all non-full slab meta chunks.\n"
514 		"If \"-c\" is given, the chunks of all meta chunks area printed as "
515 			"well.\n", 0);
516 	add_debugger_command_etc("slab_raw_allocations", &_DumpRawAllocations,
517 		"List all raw allocations in slab areas",
518 		"\n"
519 		"Lists all raw allocations in slab areas.\n", 0);
520 }
521 
522 
523 /*static*/ status_t
524 MemoryManager::Allocate(ObjectCache* cache, uint32 flags, void*& _pages)
525 {
526 	// TODO: Support CACHE_UNLOCKED_PAGES!
527 
528 	T(Allocate(cache, flags));
529 
530 	size_t chunkSize = cache->slab_size;
531 
532 	TRACE("MemoryManager::Allocate(%p, %#" B_PRIx32 "): chunkSize: %"
533 		B_PRIuSIZE "\n", cache, flags, chunkSize);
534 
535 	MutexLocker locker(sLock);
536 
537 	// allocate a chunk
538 	MetaChunk* metaChunk;
539 	Chunk* chunk;
540 	status_t error = _AllocateChunks(chunkSize, 1, flags, metaChunk, chunk);
541 	if (error != B_OK)
542 		return error;
543 
544 	// map the chunk
545 	Area* area = metaChunk->GetArea();
546 	addr_t chunkAddress = _ChunkAddress(metaChunk, chunk);
547 
548 	locker.Unlock();
549 	error = _MapChunk(area->vmArea, chunkAddress, chunkSize, 0, flags);
550 	locker.Lock();
551 	if (error != B_OK) {
552 		// something failed -- free the chunk
553 		_FreeChunk(area, metaChunk, chunk, chunkAddress, true, flags);
554 		return error;
555 	}
556 
557 	chunk->reference = (addr_t)cache;
558 	_pages = (void*)chunkAddress;
559 
560 	TRACE("MemoryManager::Allocate() done: %p (meta chunk: %d, chunk %d)\n",
561 		_pages, int(metaChunk - area->metaChunks),
562 		int(chunk - metaChunk->chunks));
563 	return B_OK;
564 }
565 
566 
567 /*static*/ void
568 MemoryManager::Free(void* pages, uint32 flags)
569 {
570 	TRACE("MemoryManager::Free(%p, %#" B_PRIx32 ")\n", pages, flags);
571 
572 	T(Free(pages, flags));
573 
574 	// get the area and the meta chunk
575 	Area* area = _AreaForAddress((addr_t)pages);
576 	MetaChunk* metaChunk = &area->metaChunks[
577 		((addr_t)pages % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE];
578 
579 	ASSERT(metaChunk->chunkSize > 0);
580 	ASSERT((addr_t)pages >= metaChunk->chunkBase);
581 	ASSERT(((addr_t)pages % metaChunk->chunkSize) == 0);
582 
583 	// get the chunk
584 	uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)pages);
585 	Chunk* chunk = &metaChunk->chunks[chunkIndex];
586 
587 	ASSERT(chunk->next != NULL);
588 	ASSERT(chunk->next < metaChunk->chunks
589 		|| chunk->next
590 			>= metaChunk->chunks + SLAB_SMALL_CHUNKS_PER_META_CHUNK);
591 
592 	// and free it
593 	MutexLocker locker(sLock);
594 	_FreeChunk(area, metaChunk, chunk, (addr_t)pages, false, flags);
595 }
596 
597 
598 /*static*/ status_t
599 MemoryManager::AllocateRaw(size_t size, uint32 flags, void*& _pages)
600 {
601 #if SLAB_MEMORY_MANAGER_TRACING
602 #if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
603 	AbstractTraceEntryWithStackTrace* traceEntry = T(AllocateRaw(size, flags));
604 	size += sizeof(AllocationTrackingInfo);
605 #else
606 	T(AllocateRaw(size, flags));
607 #endif
608 #endif
609 
610 	size = ROUNDUP(size, SLAB_CHUNK_SIZE_SMALL);
611 
612 	TRACE("MemoryManager::AllocateRaw(%" B_PRIuSIZE ", %#" B_PRIx32 ")\n", size,
613 		  flags);
614 
615 	if (size > SLAB_CHUNK_SIZE_LARGE || (flags & CACHE_ALIGN_ON_SIZE) != 0) {
616 		// Requested size greater than a large chunk or an aligned allocation.
617 		// Allocate as an area.
618 		if ((flags & CACHE_DONT_LOCK_KERNEL_SPACE) != 0)
619 			return B_WOULD_BLOCK;
620 
621 		virtual_address_restrictions virtualRestrictions = {};
622 		virtualRestrictions.address_specification
623 			= (flags & CACHE_ALIGN_ON_SIZE) != 0
624 				? B_ANY_KERNEL_BLOCK_ADDRESS : B_ANY_KERNEL_ADDRESS;
625 		physical_address_restrictions physicalRestrictions = {};
626 		area_id area = create_area_etc(VMAddressSpace::KernelID(),
627 			"slab large raw allocation", size, B_FULL_LOCK,
628 			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
629 			((flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0
630 					? CREATE_AREA_DONT_WAIT : 0)
631 				| CREATE_AREA_DONT_CLEAR,
632 			&virtualRestrictions, &physicalRestrictions, &_pages);
633 
634 		status_t result = area >= 0 ? B_OK : area;
635 		if (result == B_OK) {
636 			fill_allocated_block(_pages, size);
637 #if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
638 			_AddTrackingInfo(_pages, size, traceEntry);
639 #endif
640 		}
641 
642 		return result;
643 	}
644 
645 	// determine chunk size (small or medium)
646 	size_t chunkSize = SLAB_CHUNK_SIZE_SMALL;
647 	uint32 chunkCount = size / SLAB_CHUNK_SIZE_SMALL;
648 
649 	if (size % SLAB_CHUNK_SIZE_MEDIUM == 0) {
650 		chunkSize = SLAB_CHUNK_SIZE_MEDIUM;
651 		chunkCount = size / SLAB_CHUNK_SIZE_MEDIUM;
652 	}
653 
654 	MutexLocker locker(sLock);
655 
656 	// allocate the chunks
657 	MetaChunk* metaChunk;
658 	Chunk* chunk;
659 	status_t error = _AllocateChunks(chunkSize, chunkCount, flags, metaChunk,
660 		chunk);
661 	if (error != B_OK)
662 		return error;
663 
664 	// map the chunks
665 	Area* area = metaChunk->GetArea();
666 	addr_t chunkAddress = _ChunkAddress(metaChunk, chunk);
667 
668 	locker.Unlock();
669 	error = _MapChunk(area->vmArea, chunkAddress, size, 0, flags);
670 	locker.Lock();
671 	if (error != B_OK) {
672 		// something failed -- free the chunks
673 		for (uint32 i = 0; i < chunkCount; i++)
674 			_FreeChunk(area, metaChunk, chunk + i, chunkAddress, true, flags);
675 		return error;
676 	}
677 
678 	chunk->reference = (addr_t)chunkAddress + size - 1;
679 	_pages = (void*)chunkAddress;
680 
681 	fill_allocated_block(_pages, size);
682 #if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
683 	_AddTrackingInfo(_pages, size, traceEntry);
684 #endif
685 
686 	TRACE("MemoryManager::AllocateRaw() done: %p (meta chunk: %d, chunk %d)\n",
687 		_pages, int(metaChunk - area->metaChunks),
688 		int(chunk - metaChunk->chunks));
689 	return B_OK;
690 }
691 
692 
693 /*static*/ ObjectCache*
694 MemoryManager::FreeRawOrReturnCache(void* pages, uint32 flags)
695 {
696 	TRACE("MemoryManager::FreeRawOrReturnCache(%p, %#" B_PRIx32 ")\n", pages,
697 		flags);
698 
699 	T(FreeRawOrReturnCache(pages, flags));
700 
701 	// get the area
702 	addr_t areaBase = _AreaBaseAddressForAddress((addr_t)pages);
703 
704 	ReadLocker readLocker(sAreaTableLock);
705 	Area* area = sAreaTable.Lookup(areaBase);
706 	readLocker.Unlock();
707 
708 	if (area == NULL) {
709 		// Probably a large allocation. Look up the VM area.
710 		VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
711 		addressSpace->ReadLock();
712 		VMArea* area = addressSpace->LookupArea((addr_t)pages);
713 		addressSpace->ReadUnlock();
714 
715 		if (area != NULL && (addr_t)pages == area->Base())
716 			delete_area(area->id);
717 		else
718 			panic("freeing unknown block %p from area %p", pages, area);
719 
720 		return NULL;
721 	}
722 
723 	MetaChunk* metaChunk = &area->metaChunks[
724 		((addr_t)pages % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE];
725 
726 	// get the chunk
727 	ASSERT(metaChunk->chunkSize > 0);
728 	ASSERT((addr_t)pages >= metaChunk->chunkBase);
729 	uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)pages);
730 	Chunk* chunk = &metaChunk->chunks[chunkIndex];
731 
732 	addr_t reference = chunk->reference;
733 	if ((reference & 1) == 0)
734 		return (ObjectCache*)reference;
735 
736 	// Seems we have a raw chunk allocation.
737 	ASSERT((addr_t)pages == _ChunkAddress(metaChunk, chunk));
738 	ASSERT(reference > (addr_t)pages);
739 	ASSERT(reference <= areaBase + SLAB_AREA_SIZE - 1);
740 	size_t size = reference - (addr_t)pages + 1;
741 	ASSERT((size % SLAB_CHUNK_SIZE_SMALL) == 0);
742 
743 	// unmap the chunks
744 	_UnmapChunk(area->vmArea, (addr_t)pages, size, flags);
745 
746 	// and free them
747 	MutexLocker locker(sLock);
748 	uint32 chunkCount = size / metaChunk->chunkSize;
749 	for (uint32 i = 0; i < chunkCount; i++)
750 		_FreeChunk(area, metaChunk, chunk + i, (addr_t)pages, true, flags);
751 
752 	return NULL;
753 }
754 
755 
756 /*static*/ size_t
757 MemoryManager::AcceptableChunkSize(size_t size)
758 {
759 	if (size <= SLAB_CHUNK_SIZE_SMALL)
760 		return SLAB_CHUNK_SIZE_SMALL;
761 	if (size <= SLAB_CHUNK_SIZE_MEDIUM)
762 		return SLAB_CHUNK_SIZE_MEDIUM;
763 	return SLAB_CHUNK_SIZE_LARGE;
764 }
765 
766 
767 /*static*/ ObjectCache*
768 MemoryManager::GetAllocationInfo(void* address, size_t& _size)
769 {
770 	// get the area
771 	ReadLocker readLocker(sAreaTableLock);
772 	Area* area = sAreaTable.Lookup(_AreaBaseAddressForAddress((addr_t)address));
773 	readLocker.Unlock();
774 
775 	if (area == NULL) {
776 		VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
777 		addressSpace->ReadLock();
778 		VMArea* area = addressSpace->LookupArea((addr_t)address);
779 		if (area != NULL && (addr_t)address == area->Base())
780 			_size = area->Size();
781 		else
782 			_size = 0;
783 		addressSpace->ReadUnlock();
784 
785 		return NULL;
786 	}
787 
788 	MetaChunk* metaChunk = &area->metaChunks[
789 		((addr_t)address % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE];
790 
791 	// get the chunk
792 	ASSERT(metaChunk->chunkSize > 0);
793 	ASSERT((addr_t)address >= metaChunk->chunkBase);
794 	uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)address);
795 
796 	addr_t reference = metaChunk->chunks[chunkIndex].reference;
797 	if ((reference & 1) == 0) {
798 		ObjectCache* cache = (ObjectCache*)reference;
799 		_size = cache->object_size;
800 		return cache;
801 	}
802 
803 	_size = reference - (addr_t)address + 1;
804 	return NULL;
805 }
806 
807 
808 /*static*/ ObjectCache*
809 MemoryManager::CacheForAddress(void* address)
810 {
811 	// get the area
812 	ReadLocker readLocker(sAreaTableLock);
813 	Area* area = sAreaTable.Lookup(_AreaBaseAddressForAddress((addr_t)address));
814 	readLocker.Unlock();
815 
816 	if (area == NULL)
817 		return NULL;
818 
819 	MetaChunk* metaChunk = &area->metaChunks[
820 		((addr_t)address % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE];
821 
822 	// get the chunk
823 	ASSERT(metaChunk->chunkSize > 0);
824 	ASSERT((addr_t)address >= metaChunk->chunkBase);
825 	uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)address);
826 
827 	addr_t reference = metaChunk->chunks[chunkIndex].reference;
828 	return (reference & 1) == 0 ? (ObjectCache*)reference : NULL;
829 }
830 
831 
832 /*static*/ void
833 MemoryManager::PerformMaintenance()
834 {
835 	MutexLocker locker(sLock);
836 
837 	while (sMaintenanceNeeded) {
838 		sMaintenanceNeeded = false;
839 
840 		// We want to keep one or two areas as a reserve. This way we have at
841 		// least one area to use in situations when we aren't allowed to
842 		// allocate one and also avoid ping-pong effects.
843 		if (sFreeAreaCount > 0 && sFreeAreaCount <= 2)
844 			return;
845 
846 		if (sFreeAreaCount == 0) {
847 			// try to allocate one
848 			Area* area;
849 			if (_AllocateArea(0, area) != B_OK)
850 				return;
851 
852 			_push(sFreeAreas, area);
853 			if (++sFreeAreaCount > 2)
854 				sMaintenanceNeeded = true;
855 		} else {
856 			// free until we only have two free ones
857 			while (sFreeAreaCount > 2) {
858 				Area* area = _pop(sFreeAreas);
859 				_FreeArea(area, true, 0);
860 			}
861 
862 			if (sFreeAreaCount == 0)
863 				sMaintenanceNeeded = true;
864 		}
865 	}
866 }
867 
868 
869 #if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
870 
871 /*static*/ bool
872 MemoryManager::AnalyzeAllocationCallers(AllocationTrackingCallback& callback)
873 {
874 	for (AreaTable::Iterator it = sAreaTable.GetIterator();
875 			Area* area = it.Next();) {
876 		for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA; i++) {
877 			MetaChunk* metaChunk = area->metaChunks + i;
878 			if (metaChunk->chunkSize == 0)
879 				continue;
880 
881 			for (uint32 k = 0; k < metaChunk->chunkCount; k++) {
882 				Chunk* chunk = metaChunk->chunks + k;
883 
884 				// skip free chunks
885 				if (_IsChunkFree(metaChunk, chunk))
886 					continue;
887 
888 				addr_t reference = chunk->reference;
889 				if ((reference & 1) == 0 || reference == 1)
890 					continue;
891 
892 				addr_t chunkAddress = _ChunkAddress(metaChunk, chunk);
893 				size_t size = reference - chunkAddress + 1;
894 
895 				if (!callback.ProcessTrackingInfo(
896 						_TrackingInfoFor((void*)chunkAddress, size),
897 						(void*)chunkAddress, size)) {
898 					return false;
899 				}
900 			}
901 		}
902 	}
903 
904 	return true;
905 }
906 
907 #endif	// SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
908 
909 
910 /*static*/ ObjectCache*
911 MemoryManager::DebugObjectCacheForAddress(void* address)
912 {
913 	// get the area
914 	addr_t areaBase = _AreaBaseAddressForAddress((addr_t)address);
915 	Area* area = sAreaTable.Lookup(areaBase);
916 
917 	if (area == NULL)
918 		return NULL;
919 
920 	MetaChunk* metaChunk = &area->metaChunks[
921 		((addr_t)address % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE];
922 
923 	// get the chunk
924 	if (metaChunk->chunkSize == 0)
925 		return NULL;
926 	if ((addr_t)address < metaChunk->chunkBase)
927 		return NULL;
928 
929 	uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)address);
930 	Chunk* chunk = &metaChunk->chunks[chunkIndex];
931 
932 	addr_t reference = chunk->reference;
933 	if ((reference & 1) == 0)
934 		return (ObjectCache*)reference;
935 
936 	return NULL;
937 }
938 
939 
940 /*static*/ status_t
941 MemoryManager::_AllocateChunks(size_t chunkSize, uint32 chunkCount,
942 	uint32 flags, MetaChunk*& _metaChunk, Chunk*& _chunk)
943 {
944 	MetaChunkList* metaChunkList = NULL;
945 	if (chunkSize == SLAB_CHUNK_SIZE_SMALL) {
946 		metaChunkList = &sPartialMetaChunksSmall;
947 	} else if (chunkSize == SLAB_CHUNK_SIZE_MEDIUM) {
948 		metaChunkList = &sPartialMetaChunksMedium;
949 	} else if (chunkSize != SLAB_CHUNK_SIZE_LARGE) {
950 		panic("MemoryManager::_AllocateChunks(): Unsupported chunk size: %"
951 			B_PRIuSIZE, chunkSize);
952 		return B_BAD_VALUE;
953 	}
954 
955 	if (_GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk, _chunk))
956 		return B_OK;
957 
958 	if (sFreeAreas != NULL) {
959 		_AddArea(_pop(sFreeAreas));
960 		sFreeAreaCount--;
961 		_RequestMaintenance();
962 
963 		_GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk, _chunk);
964 		return B_OK;
965 	}
966 
967 	if ((flags & CACHE_DONT_LOCK_KERNEL_SPACE) != 0) {
968 		// We can't create an area with this limitation and we must not wait for
969 		// someone else doing that.
970 		return B_WOULD_BLOCK;
971 	}
972 
973 	// We need to allocate a new area. Wait, if someone else is trying to do
974 	// the same.
975 	while (true) {
976 		AllocationEntry* allocationEntry = NULL;
977 		if (sAllocationEntryDontWait != NULL) {
978 			allocationEntry = sAllocationEntryDontWait;
979 		} else if (sAllocationEntryCanWait != NULL
980 				&& (flags & CACHE_DONT_WAIT_FOR_MEMORY) == 0) {
981 			allocationEntry = sAllocationEntryCanWait;
982 		} else
983 			break;
984 
985 		ConditionVariableEntry entry;
986 		allocationEntry->condition.Add(&entry);
987 
988 		mutex_unlock(&sLock);
989 		entry.Wait();
990 		mutex_lock(&sLock);
991 
992 		if (_GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk,
993 				_chunk)) {
994 			return B_OK;
995 		}
996 	}
997 
998 	// prepare the allocation entry others can wait on
999 	AllocationEntry*& allocationEntry
1000 		= (flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0
1001 			? sAllocationEntryDontWait : sAllocationEntryCanWait;
1002 
1003 	AllocationEntry myResizeEntry;
1004 	allocationEntry = &myResizeEntry;
1005 	allocationEntry->condition.Init(metaChunkList, "wait for slab area");
1006 	allocationEntry->thread = find_thread(NULL);
1007 
1008 	Area* area;
1009 	status_t error = _AllocateArea(flags, area);
1010 
1011 	allocationEntry->condition.NotifyAll();
1012 	allocationEntry = NULL;
1013 
1014 	if (error != B_OK)
1015 		return error;
1016 
1017 	// Try again to get a meta chunk. Something might have been freed in the
1018 	// meantime. We can free the area in this case.
1019 	if (_GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk, _chunk)) {
1020 		_FreeArea(area, true, flags);
1021 		return B_OK;
1022 	}
1023 
1024 	_AddArea(area);
1025 	_GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk, _chunk);
1026 	return B_OK;
1027 }
1028 
1029 
1030 /*static*/ bool
1031 MemoryManager::_GetChunks(MetaChunkList* metaChunkList, size_t chunkSize,
1032 	uint32 chunkCount, MetaChunk*& _metaChunk, Chunk*& _chunk)
1033 {
1034 	// the common and less complicated special case
1035 	if (chunkCount == 1)
1036 		return _GetChunk(metaChunkList, chunkSize, _metaChunk, _chunk);
1037 
1038 	ASSERT(metaChunkList != NULL);
1039 
1040 	// Iterate through the partial meta chunk list and try to find a free
1041 	// range that is large enough.
1042 	MetaChunk* metaChunk = NULL;
1043 	for (MetaChunkList::Iterator it = metaChunkList->GetIterator();
1044 			(metaChunk = it.Next()) != NULL;) {
1045 		if (metaChunk->firstFreeChunk + chunkCount - 1
1046 				<= metaChunk->lastFreeChunk) {
1047 			break;
1048 		}
1049 	}
1050 
1051 	if (metaChunk == NULL) {
1052 		// try to get a free meta chunk
1053 		if ((SLAB_CHUNK_SIZE_LARGE - SLAB_AREA_STRUCT_OFFSET - kAreaAdminSize)
1054 				/ chunkSize >= chunkCount) {
1055 			metaChunk = sFreeShortMetaChunks.RemoveHead();
1056 		}
1057 		if (metaChunk == NULL)
1058 			metaChunk = sFreeCompleteMetaChunks.RemoveHead();
1059 
1060 		if (metaChunk == NULL)
1061 			return false;
1062 
1063 		metaChunkList->Add(metaChunk);
1064 		metaChunk->GetArea()->usedMetaChunkCount++;
1065 		_PrepareMetaChunk(metaChunk, chunkSize);
1066 
1067 		T(AllocateMetaChunk(metaChunk));
1068 	}
1069 
1070 	// pull the chunks out of the free list
1071 	Chunk* firstChunk = metaChunk->chunks + metaChunk->firstFreeChunk;
1072 	Chunk* lastChunk = firstChunk + (chunkCount - 1);
1073 	Chunk** chunkPointer = &metaChunk->freeChunks;
1074 	uint32 remainingChunks = chunkCount;
1075 	while (remainingChunks > 0) {
1076 		ASSERT_PRINT(chunkPointer, "remaining: %" B_PRIu32 "/%" B_PRIu32
1077 			", area: %p, meta chunk: %" B_PRIdSSIZE "\n", remainingChunks,
1078 			chunkCount, metaChunk->GetArea(),
1079 			metaChunk - metaChunk->GetArea()->metaChunks);
1080 		Chunk* chunk = *chunkPointer;
1081 		if (chunk >= firstChunk && chunk <= lastChunk) {
1082 			*chunkPointer = chunk->next;
1083 			chunk->reference = 1;
1084 			remainingChunks--;
1085 		} else
1086 			chunkPointer = &chunk->next;
1087 	}
1088 
1089 	// allocate the chunks
1090 	metaChunk->usedChunkCount += chunkCount;
1091 	if (metaChunk->usedChunkCount == metaChunk->chunkCount) {
1092 		// meta chunk is full now -- remove it from its list
1093 		if (metaChunkList != NULL)
1094 			metaChunkList->Remove(metaChunk);
1095 	}
1096 
1097 	// update the free range
1098 	metaChunk->firstFreeChunk += chunkCount;
1099 
1100 	PARANOID_CHECKS_ONLY(_CheckMetaChunk(metaChunk));
1101 
1102 	_chunk = firstChunk;
1103 	_metaChunk = metaChunk;
1104 
1105 	T(AllocateChunks(chunkSize, chunkCount, metaChunk, firstChunk));
1106 
1107 	return true;
1108 }
1109 
1110 
1111 /*static*/ bool
1112 MemoryManager::_GetChunk(MetaChunkList* metaChunkList, size_t chunkSize,
1113 	MetaChunk*& _metaChunk, Chunk*& _chunk)
1114 {
1115 	MetaChunk* metaChunk = metaChunkList != NULL
1116 		? metaChunkList->Head() : NULL;
1117 	if (metaChunk == NULL) {
1118 		// no partial meta chunk -- maybe there's a free one
1119 		if (chunkSize == SLAB_CHUNK_SIZE_LARGE) {
1120 			metaChunk = sFreeCompleteMetaChunks.RemoveHead();
1121 		} else {
1122 			metaChunk = sFreeShortMetaChunks.RemoveHead();
1123 			if (metaChunk == NULL)
1124 				metaChunk = sFreeCompleteMetaChunks.RemoveHead();
1125 			if (metaChunk != NULL)
1126 				metaChunkList->Add(metaChunk);
1127 		}
1128 
1129 		if (metaChunk == NULL)
1130 			return false;
1131 
1132 		metaChunk->GetArea()->usedMetaChunkCount++;
1133 		_PrepareMetaChunk(metaChunk, chunkSize);
1134 
1135 		T(AllocateMetaChunk(metaChunk));
1136 	}
1137 
1138 	// allocate the chunk
1139 	if (++metaChunk->usedChunkCount == metaChunk->chunkCount) {
1140 		// meta chunk is full now -- remove it from its list
1141 		if (metaChunkList != NULL)
1142 			metaChunkList->Remove(metaChunk);
1143 	}
1144 
1145 	_chunk = _pop(metaChunk->freeChunks);
1146 	_metaChunk = metaChunk;
1147 
1148 	_chunk->reference = 1;
1149 
1150 	// update the free range
1151 	uint32 chunkIndex = _chunk - metaChunk->chunks;
1152 	if (chunkIndex >= metaChunk->firstFreeChunk
1153 			&& chunkIndex <= metaChunk->lastFreeChunk) {
1154 		if (chunkIndex - metaChunk->firstFreeChunk
1155 				<= metaChunk->lastFreeChunk - chunkIndex) {
1156 			metaChunk->firstFreeChunk = chunkIndex + 1;
1157 		} else
1158 			metaChunk->lastFreeChunk = chunkIndex - 1;
1159 	}
1160 
1161 	PARANOID_CHECKS_ONLY(_CheckMetaChunk(metaChunk));
1162 
1163 	T(AllocateChunk(chunkSize, metaChunk, _chunk));
1164 
1165 	return true;
1166 }
1167 
1168 
1169 /*static*/ void
1170 MemoryManager::_FreeChunk(Area* area, MetaChunk* metaChunk, Chunk* chunk,
1171 	addr_t chunkAddress, bool alreadyUnmapped, uint32 flags)
1172 {
1173 	// unmap the chunk
1174 	if (!alreadyUnmapped) {
1175 		mutex_unlock(&sLock);
1176 		_UnmapChunk(area->vmArea, chunkAddress, metaChunk->chunkSize, flags);
1177 		mutex_lock(&sLock);
1178 	}
1179 
1180 	T(FreeChunk(metaChunk, chunk));
1181 
1182 	_push(metaChunk->freeChunks, chunk);
1183 
1184 	uint32 chunkIndex = chunk - metaChunk->chunks;
1185 
1186 	// free the meta chunk, if it is unused now
1187 	PARANOID_CHECKS_ONLY(bool areaDeleted = false;)
1188 	ASSERT(metaChunk->usedChunkCount > 0);
1189 	if (--metaChunk->usedChunkCount == 0) {
1190 		T(FreeMetaChunk(metaChunk));
1191 
1192 		// remove from partial meta chunk list
1193 		if (metaChunk->chunkSize == SLAB_CHUNK_SIZE_SMALL)
1194 			sPartialMetaChunksSmall.Remove(metaChunk);
1195 		else if (metaChunk->chunkSize == SLAB_CHUNK_SIZE_MEDIUM)
1196 			sPartialMetaChunksMedium.Remove(metaChunk);
1197 
1198 		// mark empty
1199 		metaChunk->chunkSize = 0;
1200 
1201 		// add to free list
1202 		if (metaChunk == area->metaChunks)
1203 			sFreeShortMetaChunks.Add(metaChunk, false);
1204 		else
1205 			sFreeCompleteMetaChunks.Add(metaChunk, false);
1206 
1207 		// free the area, if it is unused now
1208 		ASSERT(area->usedMetaChunkCount > 0);
1209 		if (--area->usedMetaChunkCount == 0) {
1210 			_FreeArea(area, false, flags);
1211 			PARANOID_CHECKS_ONLY(areaDeleted = true;)
1212 		}
1213 	} else if (metaChunk->usedChunkCount == metaChunk->chunkCount - 1) {
1214 		// the meta chunk was full before -- add it back to its partial chunk
1215 		// list
1216 		if (metaChunk->chunkSize == SLAB_CHUNK_SIZE_SMALL)
1217 			sPartialMetaChunksSmall.Add(metaChunk, false);
1218 		else if (metaChunk->chunkSize == SLAB_CHUNK_SIZE_MEDIUM)
1219 			sPartialMetaChunksMedium.Add(metaChunk, false);
1220 
1221 		metaChunk->firstFreeChunk = chunkIndex;
1222 		metaChunk->lastFreeChunk = chunkIndex;
1223 	} else {
1224 		// extend the free range, if the chunk adjoins
1225 		if (chunkIndex + 1 == metaChunk->firstFreeChunk) {
1226 			uint32 firstFree = chunkIndex;
1227 			for (; firstFree > 0; firstFree--) {
1228 				Chunk* previousChunk = &metaChunk->chunks[firstFree - 1];
1229 				if (!_IsChunkFree(metaChunk, previousChunk))
1230 					break;
1231 			}
1232 			metaChunk->firstFreeChunk = firstFree;
1233 		} else if (chunkIndex == (uint32)metaChunk->lastFreeChunk + 1) {
1234 			uint32 lastFree = chunkIndex;
1235 			for (; lastFree + 1 < metaChunk->chunkCount; lastFree++) {
1236 				Chunk* nextChunk = &metaChunk->chunks[lastFree + 1];
1237 				if (!_IsChunkFree(metaChunk, nextChunk))
1238 					break;
1239 			}
1240 			metaChunk->lastFreeChunk = lastFree;
1241 		}
1242 	}
1243 
1244 	PARANOID_CHECKS_ONLY(
1245 		if (!areaDeleted)
1246 			_CheckMetaChunk(metaChunk);
1247 	)
1248 }
1249 
1250 
1251 /*static*/ void
1252 MemoryManager::_PrepareMetaChunk(MetaChunk* metaChunk, size_t chunkSize)
1253 {
1254 	Area* area = metaChunk->GetArea();
1255 
1256 	if (metaChunk == area->metaChunks) {
1257 		// the first chunk is shorter
1258 		size_t unusableSize = ROUNDUP(SLAB_AREA_STRUCT_OFFSET + kAreaAdminSize,
1259 			chunkSize);
1260 		metaChunk->chunkBase = area->BaseAddress() + unusableSize;
1261 		metaChunk->totalSize = SLAB_CHUNK_SIZE_LARGE - unusableSize;
1262 	}
1263 
1264 	metaChunk->chunkSize = chunkSize;
1265 	metaChunk->chunkCount = metaChunk->totalSize / chunkSize;
1266 	metaChunk->usedChunkCount = 0;
1267 
1268 	metaChunk->freeChunks = NULL;
1269 	for (int32 i = metaChunk->chunkCount - 1; i >= 0; i--)
1270 		_push(metaChunk->freeChunks, metaChunk->chunks + i);
1271 
1272 	metaChunk->firstFreeChunk = 0;
1273 	metaChunk->lastFreeChunk = metaChunk->chunkCount - 1;
1274 
1275 	PARANOID_CHECKS_ONLY(_CheckMetaChunk(metaChunk));
1276 }
1277 
1278 
1279 /*static*/ void
1280 MemoryManager::_AddArea(Area* area)
1281 {
1282 	T(AddArea(area));
1283 
1284 	// add the area to the hash table
1285 	WriteLocker writeLocker(sAreaTableLock);
1286 	sAreaTable.InsertUnchecked(area);
1287 	writeLocker.Unlock();
1288 
1289 	// add the area's meta chunks to the free lists
1290 	sFreeShortMetaChunks.Add(&area->metaChunks[0]);
1291 	for (int32 i = 1; i < SLAB_META_CHUNKS_PER_AREA; i++)
1292 		sFreeCompleteMetaChunks.Add(&area->metaChunks[i]);
1293 }
1294 
1295 
1296 /*static*/ status_t
1297 MemoryManager::_AllocateArea(uint32 flags, Area*& _area)
1298 {
1299 	TRACE("MemoryManager::_AllocateArea(%#" B_PRIx32 ")\n", flags);
1300 
1301 	ASSERT((flags & CACHE_DONT_LOCK_KERNEL_SPACE) == 0);
1302 
1303 	mutex_unlock(&sLock);
1304 
1305 	size_t pagesNeededToMap = 0;
1306 	void* areaBase;
1307 	Area* area;
1308 	VMArea* vmArea = NULL;
1309 
1310 	if (sKernelArgs == NULL) {
1311 		// create an area
1312 		uint32 areaCreationFlags = (flags & CACHE_PRIORITY_VIP) != 0
1313 			? CREATE_AREA_PRIORITY_VIP : 0;
1314 		area_id areaID = vm_create_null_area(B_SYSTEM_TEAM, kSlabAreaName,
1315 			&areaBase, B_ANY_KERNEL_BLOCK_ADDRESS, SLAB_AREA_SIZE,
1316 			areaCreationFlags);
1317 		if (areaID < 0) {
1318 			mutex_lock(&sLock);
1319 			return areaID;
1320 		}
1321 
1322 		area = _AreaForAddress((addr_t)areaBase);
1323 
1324 		// map the memory for the administrative structure
1325 		VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
1326 		VMTranslationMap* translationMap = addressSpace->TranslationMap();
1327 
1328 		pagesNeededToMap = translationMap->MaxPagesNeededToMap(
1329 			(addr_t)area, (addr_t)areaBase + SLAB_AREA_SIZE - 1);
1330 
1331 		vmArea = VMAreaHash::Lookup(areaID);
1332 		status_t error = _MapChunk(vmArea, (addr_t)area, kAreaAdminSize,
1333 			pagesNeededToMap, flags);
1334 		if (error != B_OK) {
1335 			delete_area(areaID);
1336 			mutex_lock(&sLock);
1337 			return error;
1338 		}
1339 
1340 		dprintf("slab memory manager: created area %p (%" B_PRId32 ")\n", area,
1341 			areaID);
1342 	} else {
1343 		// no areas yet -- allocate raw memory
1344 		areaBase = (void*)vm_allocate_early(sKernelArgs, SLAB_AREA_SIZE,
1345 			SLAB_AREA_SIZE, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
1346 			SLAB_AREA_SIZE);
1347 		if (areaBase == NULL) {
1348 			mutex_lock(&sLock);
1349 			return B_NO_MEMORY;
1350 		}
1351 		area = _AreaForAddress((addr_t)areaBase);
1352 
1353 		TRACE("MemoryManager::_AllocateArea(): allocated early area %p\n",
1354 			area);
1355 	}
1356 
1357 	// init the area structure
1358 	area->vmArea = vmArea;
1359 	area->reserved_memory_for_mapping = pagesNeededToMap * B_PAGE_SIZE;
1360 	area->usedMetaChunkCount = 0;
1361 	area->fullyMapped = vmArea == NULL;
1362 
1363 	// init the meta chunks
1364 	for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA; i++) {
1365 		MetaChunk* metaChunk = area->metaChunks + i;
1366 		metaChunk->chunkSize = 0;
1367 		metaChunk->chunkBase = (addr_t)areaBase + i * SLAB_CHUNK_SIZE_LARGE;
1368 		metaChunk->totalSize = SLAB_CHUNK_SIZE_LARGE;
1369 			// Note: chunkBase and totalSize aren't correct for the first
1370 			// meta chunk. They will be set in _PrepareMetaChunk().
1371 		metaChunk->chunkCount = 0;
1372 		metaChunk->usedChunkCount = 0;
1373 		metaChunk->freeChunks = NULL;
1374 	}
1375 
1376 	mutex_lock(&sLock);
1377 	_area = area;
1378 
1379 	T(AllocateArea(area, flags));
1380 
1381 	return B_OK;
1382 }
1383 
1384 
1385 /*static*/ void
1386 MemoryManager::_FreeArea(Area* area, bool areaRemoved, uint32 flags)
1387 {
1388 	TRACE("MemoryManager::_FreeArea(%p, %#" B_PRIx32 ")\n", area, flags);
1389 
1390 	T(FreeArea(area, areaRemoved, flags));
1391 
1392 	ASSERT(area->usedMetaChunkCount == 0);
1393 
1394 	if (!areaRemoved) {
1395 		// remove the area's meta chunks from the free lists
1396 		ASSERT(area->metaChunks[0].usedChunkCount == 0);
1397 		sFreeShortMetaChunks.Remove(&area->metaChunks[0]);
1398 
1399 		for (int32 i = 1; i < SLAB_META_CHUNKS_PER_AREA; i++) {
1400 			ASSERT(area->metaChunks[i].usedChunkCount == 0);
1401 			sFreeCompleteMetaChunks.Remove(&area->metaChunks[i]);
1402 		}
1403 
1404 		// remove the area from the hash table
1405 		WriteLocker writeLocker(sAreaTableLock);
1406 		sAreaTable.RemoveUnchecked(area);
1407 		writeLocker.Unlock();
1408 	}
1409 
1410 	// We want to keep one or two free areas as a reserve.
1411 	if (sFreeAreaCount <= 1) {
1412 		_push(sFreeAreas, area);
1413 		sFreeAreaCount++;
1414 		return;
1415 	}
1416 
1417 	if (area->vmArea == NULL || (flags & CACHE_DONT_LOCK_KERNEL_SPACE) != 0) {
1418 		// This is either early in the boot process or we aren't allowed to
1419 		// delete the area now.
1420 		_push(sFreeAreas, area);
1421 		sFreeAreaCount++;
1422 		_RequestMaintenance();
1423 		return;
1424 	}
1425 
1426 	mutex_unlock(&sLock);
1427 
1428 	dprintf("slab memory manager: deleting area %p (%" B_PRId32 ")\n", area,
1429 		area->vmArea->id);
1430 
1431 	size_t memoryToUnreserve = area->reserved_memory_for_mapping;
1432 	delete_area(area->vmArea->id);
1433 	vm_unreserve_memory(memoryToUnreserve);
1434 
1435 	mutex_lock(&sLock);
1436 }
1437 
1438 
1439 /*static*/ status_t
1440 MemoryManager::_MapChunk(VMArea* vmArea, addr_t address, size_t size,
1441 	size_t reserveAdditionalMemory, uint32 flags)
1442 {
1443 	TRACE("MemoryManager::_MapChunk(%p, %#" B_PRIxADDR ", %#" B_PRIxSIZE
1444 		")\n", vmArea, address, size);
1445 
1446 	T(Map(address, size, flags));
1447 
1448 	if (vmArea == NULL) {
1449 		// everything is mapped anyway
1450 		return B_OK;
1451 	}
1452 
1453 	VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
1454 	VMTranslationMap* translationMap = addressSpace->TranslationMap();
1455 
1456 	// reserve memory for the chunk
1457 	int priority = (flags & CACHE_PRIORITY_VIP) != 0
1458 		? VM_PRIORITY_VIP : VM_PRIORITY_SYSTEM;
1459 	size_t reservedMemory = size + reserveAdditionalMemory;
1460 	status_t error = vm_try_reserve_memory(size, priority,
1461 		(flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0 ? 0 : 1000000);
1462 	if (error != B_OK)
1463 		return error;
1464 
1465 	// reserve the pages we need now
1466 	size_t reservedPages = size / B_PAGE_SIZE
1467 		+ translationMap->MaxPagesNeededToMap(address, address + size - 1);
1468 	vm_page_reservation reservation;
1469 	if ((flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0) {
1470 		if (!vm_page_try_reserve_pages(&reservation, reservedPages, priority)) {
1471 			vm_unreserve_memory(reservedMemory);
1472 			return B_WOULD_BLOCK;
1473 		}
1474 	} else
1475 		vm_page_reserve_pages(&reservation, reservedPages, priority);
1476 
1477 	VMCache* cache = vm_area_get_locked_cache(vmArea);
1478 
1479 	// map the pages
1480 	translationMap->Lock();
1481 
1482 	addr_t areaOffset = address - vmArea->Base();
1483 	addr_t endAreaOffset = areaOffset + size;
1484 	for (size_t offset = areaOffset; offset < endAreaOffset;
1485 			offset += B_PAGE_SIZE) {
1486 		vm_page* page = vm_page_allocate_page(&reservation, PAGE_STATE_WIRED);
1487 		cache->InsertPage(page, offset);
1488 
1489 		page->IncrementWiredCount();
1490 		atomic_add(&gMappedPagesCount, 1);
1491 		DEBUG_PAGE_ACCESS_END(page);
1492 
1493 		translationMap->Map(vmArea->Base() + offset,
1494 			page->physical_page_number * B_PAGE_SIZE,
1495 			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
1496 			vmArea->MemoryType(), &reservation);
1497 	}
1498 
1499 	translationMap->Unlock();
1500 
1501 	cache->ReleaseRefAndUnlock();
1502 
1503 	vm_page_unreserve_pages(&reservation);
1504 
1505 	return B_OK;
1506 }
1507 
1508 
1509 /*static*/ status_t
1510 MemoryManager::_UnmapChunk(VMArea* vmArea, addr_t address, size_t size,
1511 	uint32 flags)
1512 {
1513 	T(Unmap(address, size, flags));
1514 
1515 	if (vmArea == NULL)
1516 		return B_ERROR;
1517 
1518 	TRACE("MemoryManager::_UnmapChunk(%p, %#" B_PRIxADDR ", %#" B_PRIxSIZE
1519 		")\n", vmArea, address, size);
1520 
1521 	VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
1522 	VMTranslationMap* translationMap = addressSpace->TranslationMap();
1523 	VMCache* cache = vm_area_get_locked_cache(vmArea);
1524 
1525 	// unmap the pages
1526 	translationMap->Lock();
1527 	translationMap->Unmap(address, address + size - 1);
1528 	atomic_add(&gMappedPagesCount, -(size / B_PAGE_SIZE));
1529 	translationMap->Unlock();
1530 
1531 	// free the pages
1532 	addr_t areaPageOffset = (address - vmArea->Base()) / B_PAGE_SIZE;
1533 	addr_t areaPageEndOffset = areaPageOffset + size / B_PAGE_SIZE;
1534 	VMCachePagesTree::Iterator it = cache->pages.GetIterator(
1535 		areaPageOffset, true, true);
1536 	while (vm_page* page = it.Next()) {
1537 		if (page->cache_offset >= areaPageEndOffset)
1538 			break;
1539 
1540 		DEBUG_PAGE_ACCESS_START(page);
1541 
1542 		page->DecrementWiredCount();
1543 
1544 		cache->RemovePage(page);
1545 			// the iterator is remove-safe
1546 		vm_page_free(cache, page);
1547 	}
1548 
1549 	cache->ReleaseRefAndUnlock();
1550 
1551 	vm_unreserve_memory(size);
1552 
1553 	return B_OK;
1554 }
1555 
1556 
1557 /*static*/ void
1558 MemoryManager::_UnmapFreeChunksEarly(Area* area)
1559 {
1560 	if (!area->fullyMapped)
1561 		return;
1562 
1563 	TRACE("MemoryManager::_UnmapFreeChunksEarly(%p)\n", area);
1564 
1565 	// unmap the space before the Area structure
1566 	#if SLAB_AREA_STRUCT_OFFSET > 0
1567 		_UnmapChunk(area->vmArea, area->BaseAddress(), SLAB_AREA_STRUCT_OFFSET,
1568 			0);
1569 	#endif
1570 
1571 	for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA; i++) {
1572 		MetaChunk* metaChunk = area->metaChunks + i;
1573 		if (metaChunk->chunkSize == 0) {
1574 			// meta chunk is free -- unmap it completely
1575 			if (i == 0) {
1576 				_UnmapChunk(area->vmArea, (addr_t)area + kAreaAdminSize,
1577 					SLAB_CHUNK_SIZE_LARGE - kAreaAdminSize, 0);
1578 			} else {
1579 				_UnmapChunk(area->vmArea,
1580 					area->BaseAddress() + i * SLAB_CHUNK_SIZE_LARGE,
1581 					SLAB_CHUNK_SIZE_LARGE, 0);
1582 			}
1583 		} else {
1584 			// unmap free chunks
1585 			for (Chunk* chunk = metaChunk->freeChunks; chunk != NULL;
1586 					chunk = chunk->next) {
1587 				_UnmapChunk(area->vmArea, _ChunkAddress(metaChunk, chunk),
1588 					metaChunk->chunkSize, 0);
1589 			}
1590 
1591 			// The first meta chunk might have space before its first chunk.
1592 			if (i == 0) {
1593 				addr_t unusedStart = (addr_t)area + kAreaAdminSize;
1594 				if (unusedStart < metaChunk->chunkBase) {
1595 					_UnmapChunk(area->vmArea, unusedStart,
1596 						metaChunk->chunkBase - unusedStart, 0);
1597 				}
1598 			}
1599 		}
1600 	}
1601 
1602 	area->fullyMapped = false;
1603 }
1604 
1605 
1606 /*static*/ void
1607 MemoryManager::_ConvertEarlyArea(Area* area)
1608 {
1609 	void* address = (void*)area->BaseAddress();
1610 	area_id areaID = create_area(kSlabAreaName, &address, B_EXACT_ADDRESS,
1611 		SLAB_AREA_SIZE, B_ALREADY_WIRED,
1612 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
1613 	if (areaID < 0)
1614 		panic("out of memory");
1615 
1616 	area->vmArea = VMAreaHash::Lookup(areaID);
1617 }
1618 
1619 
1620 /*static*/ void
1621 MemoryManager::_RequestMaintenance()
1622 {
1623 	if ((sFreeAreaCount > 0 && sFreeAreaCount <= 2) || sMaintenanceNeeded)
1624 		return;
1625 
1626 	sMaintenanceNeeded = true;
1627 	request_memory_manager_maintenance();
1628 }
1629 
1630 
1631 /*static*/ bool
1632 MemoryManager::_IsChunkInFreeList(const MetaChunk* metaChunk,
1633 	const Chunk* chunk)
1634 {
1635 	Chunk* freeChunk = metaChunk->freeChunks;
1636 	while (freeChunk != NULL) {
1637 		if (freeChunk == chunk)
1638 			return true;
1639 		freeChunk = freeChunk->next;
1640 	}
1641 
1642 	return false;
1643 }
1644 
1645 
1646 #if DEBUG_SLAB_MEMORY_MANAGER_PARANOID_CHECKS
1647 
1648 /*static*/ void
1649 MemoryManager::_CheckMetaChunk(MetaChunk* metaChunk)
1650 {
1651 	Area* area = metaChunk->GetArea();
1652 	int32 metaChunkIndex = metaChunk - area->metaChunks;
1653 	if (metaChunkIndex < 0 || metaChunkIndex >= SLAB_META_CHUNKS_PER_AREA) {
1654 		panic("invalid meta chunk %p!", metaChunk);
1655 		return;
1656 	}
1657 
1658 	switch (metaChunk->chunkSize) {
1659 		case 0:
1660 			// unused
1661 			return;
1662 		case SLAB_CHUNK_SIZE_SMALL:
1663 		case SLAB_CHUNK_SIZE_MEDIUM:
1664 		case SLAB_CHUNK_SIZE_LARGE:
1665 			break;
1666 		default:
1667 			panic("meta chunk %p has invalid chunk size: %" B_PRIuSIZE,
1668 				metaChunk, metaChunk->chunkSize);
1669 			return;
1670 	}
1671 
1672 	if (metaChunk->totalSize > SLAB_CHUNK_SIZE_LARGE) {
1673 		panic("meta chunk %p has invalid total size: %" B_PRIuSIZE,
1674 			metaChunk, metaChunk->totalSize);
1675 		return;
1676 	}
1677 
1678 	addr_t expectedBase = area->BaseAddress()
1679 		+ metaChunkIndex * SLAB_CHUNK_SIZE_LARGE;
1680 	if (metaChunk->chunkBase < expectedBase
1681 		|| metaChunk->chunkBase - expectedBase + metaChunk->totalSize
1682 			> SLAB_CHUNK_SIZE_LARGE) {
1683 		panic("meta chunk %p has invalid base address: %" B_PRIxADDR, metaChunk,
1684 			metaChunk->chunkBase);
1685 		return;
1686 	}
1687 
1688 	if (metaChunk->chunkCount != metaChunk->totalSize / metaChunk->chunkSize) {
1689 		panic("meta chunk %p has invalid chunk count: %u", metaChunk,
1690 			metaChunk->chunkCount);
1691 		return;
1692 	}
1693 
1694 	if (metaChunk->usedChunkCount > metaChunk->chunkCount) {
1695 		panic("meta chunk %p has invalid unused chunk count: %u", metaChunk,
1696 			metaChunk->usedChunkCount);
1697 		return;
1698 	}
1699 
1700 	if (metaChunk->firstFreeChunk > metaChunk->chunkCount) {
1701 		panic("meta chunk %p has invalid first free chunk: %u", metaChunk,
1702 			metaChunk->firstFreeChunk);
1703 		return;
1704 	}
1705 
1706 	if (metaChunk->lastFreeChunk >= metaChunk->chunkCount) {
1707 		panic("meta chunk %p has invalid last free chunk: %u", metaChunk,
1708 			metaChunk->lastFreeChunk);
1709 		return;
1710 	}
1711 
1712 	// check free list for structural sanity
1713 	uint32 freeChunks = 0;
1714 	for (Chunk* chunk = metaChunk->freeChunks; chunk != NULL;
1715 			chunk = chunk->next) {
1716 		if ((addr_t)chunk % sizeof(Chunk) != 0 || chunk < metaChunk->chunks
1717 			|| chunk >= metaChunk->chunks + metaChunk->chunkCount) {
1718 			panic("meta chunk %p has invalid element in free list, chunk: %p",
1719 				metaChunk, chunk);
1720 			return;
1721 		}
1722 
1723 		if (++freeChunks > metaChunk->chunkCount) {
1724 			panic("meta chunk %p has cyclic free list", metaChunk);
1725 			return;
1726 		}
1727 	}
1728 
1729 	if (freeChunks + metaChunk->usedChunkCount > metaChunk->chunkCount) {
1730 		panic("meta chunk %p has mismatching free/used chunk counts: total: "
1731 			"%u, used: %u, free: %" B_PRIu32, metaChunk, metaChunk->chunkCount,
1732 			metaChunk->usedChunkCount, freeChunks);
1733 		return;
1734 	}
1735 
1736 	// count used chunks by looking at their reference/next field
1737 	uint32 usedChunks = 0;
1738 	for (uint32 i = 0; i < metaChunk->chunkCount; i++) {
1739 		if (!_IsChunkFree(metaChunk, metaChunk->chunks + i))
1740 			usedChunks++;
1741 	}
1742 
1743 	if (usedChunks != metaChunk->usedChunkCount) {
1744 		panic("meta chunk %p has used chunks that appear free: total: "
1745 			"%u, used: %u, appearing used: %" B_PRIu32, metaChunk,
1746 			metaChunk->chunkCount, metaChunk->usedChunkCount, usedChunks);
1747 		return;
1748 	}
1749 
1750 	// check free range
1751 	for (uint32 i = metaChunk->firstFreeChunk; i < metaChunk->lastFreeChunk;
1752 			i++) {
1753 		if (!_IsChunkFree(metaChunk, metaChunk->chunks + i)) {
1754 			panic("meta chunk %p has used chunk in free range, chunk: %p (%"
1755 				B_PRIu32 ", free range: %u - %u)", metaChunk,
1756 				metaChunk->chunks + i, i, metaChunk->firstFreeChunk,
1757 				metaChunk->lastFreeChunk);
1758 			return;
1759 		}
1760 	}
1761 }
1762 
1763 #endif	// DEBUG_SLAB_MEMORY_MANAGER_PARANOID_CHECKS
1764 
1765 
1766 /*static*/ int
1767 MemoryManager::_DumpRawAllocations(int argc, char** argv)
1768 {
1769 	kprintf("area        meta chunk  chunk  base        size (KB)\n");
1770 
1771 	size_t totalSize = 0;
1772 
1773 	for (AreaTable::Iterator it = sAreaTable.GetIterator();
1774 			Area* area = it.Next();) {
1775 		for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA; i++) {
1776 			MetaChunk* metaChunk = area->metaChunks + i;
1777 			if (metaChunk->chunkSize == 0)
1778 				continue;
1779 			for (uint32 k = 0; k < metaChunk->chunkCount; k++) {
1780 				Chunk* chunk = metaChunk->chunks + k;
1781 
1782 				// skip free chunks
1783 				if (_IsChunkFree(metaChunk, chunk))
1784 					continue;
1785 
1786 				addr_t reference = chunk->reference;
1787 				if ((reference & 1) == 0 || reference == 1)
1788 					continue;
1789 
1790 				addr_t chunkAddress = _ChunkAddress(metaChunk, chunk);
1791 				size_t size = reference - chunkAddress + 1;
1792 				totalSize += size;
1793 
1794 				kprintf("%p  %10" B_PRId32 "  %5" B_PRIu32 "  %p  %9"
1795 					B_PRIuSIZE "\n", area, i, k, (void*)chunkAddress,
1796 					size / 1024);
1797 			}
1798 		}
1799 	}
1800 
1801 	kprintf("total:                                     %9" B_PRIuSIZE "\n",
1802 		totalSize / 1024);
1803 
1804 	return 0;
1805 }
1806 
1807 
1808 /*static*/ void
1809 MemoryManager::_PrintMetaChunkTableHeader(bool printChunks)
1810 {
1811 	if (printChunks)
1812 		kprintf("chunk        base       cache  object size  cache name\n");
1813 	else
1814 		kprintf("chunk        base\n");
1815 }
1816 
1817 /*static*/ void
1818 MemoryManager::_DumpMetaChunk(MetaChunk* metaChunk, bool printChunks,
1819 	bool printHeader)
1820 {
1821 	if (printHeader)
1822 		_PrintMetaChunkTableHeader(printChunks);
1823 
1824 	const char* type = "empty";
1825 	if (metaChunk->chunkSize != 0) {
1826 		switch (metaChunk->chunkSize) {
1827 			case SLAB_CHUNK_SIZE_SMALL:
1828 				type = "small";
1829 				break;
1830 			case SLAB_CHUNK_SIZE_MEDIUM:
1831 				type = "medium";
1832 				break;
1833 			case SLAB_CHUNK_SIZE_LARGE:
1834 				type = "large";
1835 				break;
1836 		}
1837 	}
1838 
1839 	int metaChunkIndex = metaChunk - metaChunk->GetArea()->metaChunks;
1840 	kprintf("%5d  %p  --- %6s meta chunk", metaChunkIndex,
1841 		(void*)metaChunk->chunkBase, type);
1842 	if (metaChunk->chunkSize != 0) {
1843 		kprintf(": %4u/%4u used, %-4u-%4u free ------------\n",
1844 			metaChunk->usedChunkCount, metaChunk->chunkCount,
1845 			metaChunk->firstFreeChunk, metaChunk->lastFreeChunk);
1846 	} else
1847 		kprintf(" --------------------------------------------\n");
1848 
1849 	if (metaChunk->chunkSize == 0 || !printChunks)
1850 		return;
1851 
1852 	for (uint32 i = 0; i < metaChunk->chunkCount; i++) {
1853 		Chunk* chunk = metaChunk->chunks + i;
1854 
1855 		// skip free chunks
1856 		if (_IsChunkFree(metaChunk, chunk)) {
1857 			if (!_IsChunkInFreeList(metaChunk, chunk)) {
1858 				kprintf("%5" B_PRIu32 "  %p  appears free, but isn't in free "
1859 					"list!\n", i, (void*)_ChunkAddress(metaChunk, chunk));
1860 			}
1861 
1862 			continue;
1863 		}
1864 
1865 		addr_t reference = chunk->reference;
1866 		if ((reference & 1) == 0) {
1867 			ObjectCache* cache = (ObjectCache*)reference;
1868 			kprintf("%5" B_PRIu32 "  %p  %p  %11" B_PRIuSIZE "  %s\n", i,
1869 				(void*)_ChunkAddress(metaChunk, chunk), cache,
1870 				cache != NULL ? cache->object_size : 0,
1871 				cache != NULL ? cache->name : "");
1872 		} else if (reference != 1) {
1873 			kprintf("%5" B_PRIu32 "  %p  raw allocation up to %p\n", i,
1874 				(void*)_ChunkAddress(metaChunk, chunk), (void*)reference);
1875 		}
1876 	}
1877 }
1878 
1879 
1880 /*static*/ int
1881 MemoryManager::_DumpMetaChunk(int argc, char** argv)
1882 {
1883 	if (argc != 2) {
1884 		print_debugger_command_usage(argv[0]);
1885 		return 0;
1886 	}
1887 
1888 	uint64 address;
1889 	if (!evaluate_debug_expression(argv[1], &address, false))
1890 		return 0;
1891 
1892 	Area* area = _AreaForAddress(address);
1893 
1894 	MetaChunk* metaChunk;
1895 	if ((addr_t)address >= (addr_t)area->metaChunks
1896 		&& (addr_t)address
1897 			< (addr_t)(area->metaChunks + SLAB_META_CHUNKS_PER_AREA)) {
1898 		metaChunk = (MetaChunk*)(addr_t)address;
1899 	} else {
1900 		metaChunk = area->metaChunks
1901 			+ (address % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE;
1902 	}
1903 
1904 	_DumpMetaChunk(metaChunk, true, true);
1905 
1906 	return 0;
1907 }
1908 
1909 
1910 /*static*/ void
1911 MemoryManager::_DumpMetaChunks(const char* name, MetaChunkList& metaChunkList,
1912 	bool printChunks)
1913 {
1914 	kprintf("%s:\n", name);
1915 
1916 	for (MetaChunkList::Iterator it = metaChunkList.GetIterator();
1917 			MetaChunk* metaChunk = it.Next();) {
1918 		_DumpMetaChunk(metaChunk, printChunks, false);
1919 	}
1920 }
1921 
1922 
1923 /*static*/ int
1924 MemoryManager::_DumpMetaChunks(int argc, char** argv)
1925 {
1926 	bool printChunks = argc > 1 && strcmp(argv[1], "-c") == 0;
1927 
1928 	_PrintMetaChunkTableHeader(printChunks);
1929 	_DumpMetaChunks("free complete", sFreeCompleteMetaChunks, printChunks);
1930 	_DumpMetaChunks("free short", sFreeShortMetaChunks, printChunks);
1931 	_DumpMetaChunks("partial small", sPartialMetaChunksSmall, printChunks);
1932 	_DumpMetaChunks("partial medium", sPartialMetaChunksMedium, printChunks);
1933 
1934 	return 0;
1935 }
1936 
1937 
1938 /*static*/ int
1939 MemoryManager::_DumpArea(int argc, char** argv)
1940 {
1941 	bool printChunks = false;
1942 
1943 	int argi = 1;
1944 	while (argi < argc) {
1945 		if (argv[argi][0] != '-')
1946 			break;
1947 		const char* arg = argv[argi++];
1948 		if (strcmp(arg, "-c") == 0) {
1949 			printChunks = true;
1950 		} else {
1951 			print_debugger_command_usage(argv[0]);
1952 			return 0;
1953 		}
1954 	}
1955 
1956 	if (argi + 1 != argc) {
1957 		print_debugger_command_usage(argv[0]);
1958 		return 0;
1959 	}
1960 
1961 	uint64 address;
1962 	if (!evaluate_debug_expression(argv[argi], &address, false))
1963 		return 0;
1964 
1965 	Area* area = _AreaForAddress((addr_t)address);
1966 
1967 	for (uint32 k = 0; k < SLAB_META_CHUNKS_PER_AREA; k++) {
1968 		MetaChunk* metaChunk = area->metaChunks + k;
1969 		_DumpMetaChunk(metaChunk, printChunks, k == 0);
1970 	}
1971 
1972 	return 0;
1973 }
1974 
1975 
1976 /*static*/ int
1977 MemoryManager::_DumpAreas(int argc, char** argv)
1978 {
1979 	kprintf("      base        area   meta      small   medium  large\n");
1980 
1981 	size_t totalTotalSmall = 0;
1982 	size_t totalUsedSmall = 0;
1983 	size_t totalTotalMedium = 0;
1984 	size_t totalUsedMedium = 0;
1985 	size_t totalUsedLarge = 0;
1986 	uint32 areaCount = 0;
1987 
1988 	for (AreaTable::Iterator it = sAreaTable.GetIterator();
1989 			Area* area = it.Next();) {
1990 		areaCount++;
1991 
1992 		// sum up the free/used counts for the chunk sizes
1993 		int totalSmall = 0;
1994 		int usedSmall = 0;
1995 		int totalMedium = 0;
1996 		int usedMedium = 0;
1997 		int usedLarge = 0;
1998 
1999 		for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA; i++) {
2000 			MetaChunk* metaChunk = area->metaChunks + i;
2001 			if (metaChunk->chunkSize == 0)
2002 				continue;
2003 
2004 			switch (metaChunk->chunkSize) {
2005 				case SLAB_CHUNK_SIZE_SMALL:
2006 					totalSmall += metaChunk->chunkCount;
2007 					usedSmall += metaChunk->usedChunkCount;
2008 					break;
2009 				case SLAB_CHUNK_SIZE_MEDIUM:
2010 					totalMedium += metaChunk->chunkCount;
2011 					usedMedium += metaChunk->usedChunkCount;
2012 					break;
2013 				case SLAB_CHUNK_SIZE_LARGE:
2014 					usedLarge += metaChunk->usedChunkCount;
2015 					break;
2016 			}
2017 		}
2018 
2019 		kprintf("%p  %p  %2u/%2u  %4d/%4d  %3d/%3d  %5d\n",
2020 			area, area->vmArea, area->usedMetaChunkCount,
2021 			SLAB_META_CHUNKS_PER_AREA, usedSmall, totalSmall, usedMedium,
2022 			totalMedium, usedLarge);
2023 
2024 		totalTotalSmall += totalSmall;
2025 		totalUsedSmall += usedSmall;
2026 		totalTotalMedium += totalMedium;
2027 		totalUsedMedium += usedMedium;
2028 		totalUsedLarge += usedLarge;
2029 	}
2030 
2031 	kprintf("%d free area%s:\n", sFreeAreaCount,
2032 		sFreeAreaCount == 1 ? "" : "s");
2033 	for (Area* area = sFreeAreas; area != NULL; area = area->next) {
2034 		areaCount++;
2035 		kprintf("%p  %p\n", area, area->vmArea);
2036 	}
2037 
2038 	kprintf("total usage:\n");
2039 	kprintf("  small:    %" B_PRIuSIZE "/%" B_PRIuSIZE "\n", totalUsedSmall,
2040 		totalTotalSmall);
2041 	kprintf("  medium:   %" B_PRIuSIZE "/%" B_PRIuSIZE "\n", totalUsedMedium,
2042 		totalTotalMedium);
2043 	kprintf("  large:    %" B_PRIuSIZE "\n", totalUsedLarge);
2044 	kprintf("  memory:   %" B_PRIuSIZE "/%" B_PRIuSIZE " KB\n",
2045 		(totalUsedSmall * SLAB_CHUNK_SIZE_SMALL
2046 			+ totalUsedMedium * SLAB_CHUNK_SIZE_MEDIUM
2047 			+ totalUsedLarge * SLAB_CHUNK_SIZE_LARGE) / 1024,
2048 		areaCount * SLAB_AREA_SIZE / 1024);
2049 	kprintf("  overhead: %" B_PRIuSIZE " KB\n",
2050 		areaCount * kAreaAdminSize / 1024);
2051 
2052 	return 0;
2053 }
2054 
2055 
2056 #if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
2057 
2058 void
2059 MemoryManager::_AddTrackingInfo(void* allocation, size_t size,
2060 	AbstractTraceEntryWithStackTrace* traceEntry)
2061 {
2062 	_TrackingInfoFor(allocation, size)->Init(traceEntry);
2063 }
2064 
2065 #endif // SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
2066 
2067 
2068 RANGE_MARKER_FUNCTION_END(SlabMemoryManager)
2069