xref: /haiku/src/system/kernel/slab/MemoryManager.cpp (revision a5bf12376daeded4049521eb17a6cc41192250d9)
1 /*
2  * Copyright 2010, Ingo Weinhold <ingo_weinhold@gmx.de>.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include "MemoryManager.h"
8 
9 #include <algorithm>
10 
11 #include <debug.h>
12 #include <kernel.h>
13 #include <tracing.h>
14 #include <util/AutoLock.h>
15 #include <vm/vm.h>
16 #include <vm/vm_page.h>
17 #include <vm/vm_priv.h>
18 #include <vm/VMAddressSpace.h>
19 #include <vm/VMArea.h>
20 #include <vm/VMCache.h>
21 #include <vm/VMTranslationMap.h>
22 
23 #include "ObjectCache.h"
24 #include "slab_private.h"
25 
26 
27 //#define TRACE_MEMORY_MANAGER
28 #ifdef TRACE_MEMORY_MANAGER
29 #	define TRACE(x...)	dprintf(x)
30 #else
31 #	define TRACE(x...)	do {} while (false)
32 #endif
33 
34 
35 static const char* const kSlabAreaName = "slab area";
36 
37 static void* sAreaTableBuffer[1024];
38 
39 mutex MemoryManager::sLock;
40 rw_lock MemoryManager::sAreaTableLock;
41 kernel_args* MemoryManager::sKernelArgs;
42 MemoryManager::AreaTable MemoryManager::sAreaTable;
43 MemoryManager::Area* MemoryManager::sFreeAreas;
44 int MemoryManager::sFreeAreaCount;
45 MemoryManager::MetaChunkList MemoryManager::sFreeCompleteMetaChunks;
46 MemoryManager::MetaChunkList MemoryManager::sFreeShortMetaChunks;
47 MemoryManager::MetaChunkList MemoryManager::sPartialMetaChunksSmall;
48 MemoryManager::MetaChunkList MemoryManager::sPartialMetaChunksMedium;
49 MemoryManager::AllocationEntry* MemoryManager::sAllocationEntryCanWait;
50 MemoryManager::AllocationEntry* MemoryManager::sAllocationEntryDontWait;
51 bool MemoryManager::sMaintenanceNeeded;
52 
53 
54 
55 // #pragma mark - kernel tracing
56 
57 
58 #if SLAB_MEMORY_MANAGER_TRACING
59 
60 
61 //namespace SlabMemoryManagerCacheTracing {
62 struct MemoryManager::Tracing {
63 
64 class MemoryManagerTraceEntry : public AbstractTraceEntry {
65 public:
66 	MemoryManagerTraceEntry()
67 	{
68 	}
69 };
70 
71 
72 class Allocate : public MemoryManagerTraceEntry {
73 public:
74 	Allocate(ObjectCache* cache, uint32 flags)
75 		:
76 		MemoryManagerTraceEntry(),
77 		fCache(cache),
78 		fFlags(flags)
79 	{
80 		Initialized();
81 	}
82 
83 	virtual void AddDump(TraceOutput& out)
84 	{
85 		out.Print("slab memory manager alloc: cache: %p, flags: %#" B_PRIx32,
86 			fCache, fFlags);
87 	}
88 
89 private:
90 	ObjectCache*	fCache;
91 	uint32			fFlags;
92 };
93 
94 
95 class Free : public MemoryManagerTraceEntry {
96 public:
97 	Free(void* address, uint32 flags)
98 		:
99 		MemoryManagerTraceEntry(),
100 		fAddress(address),
101 		fFlags(flags)
102 	{
103 		Initialized();
104 	}
105 
106 	virtual void AddDump(TraceOutput& out)
107 	{
108 		out.Print("slab memory manager free: address: %p, flags: %#" B_PRIx32,
109 			fAddress, fFlags);
110 	}
111 
112 private:
113 	void*	fAddress;
114 	uint32	fFlags;
115 };
116 
117 
118 class AllocateRaw : public MemoryManagerTraceEntry {
119 public:
120 	AllocateRaw(size_t size, uint32 flags)
121 		:
122 		MemoryManagerTraceEntry(),
123 		fSize(size),
124 		fFlags(flags)
125 	{
126 		Initialized();
127 	}
128 
129 	virtual void AddDump(TraceOutput& out)
130 	{
131 		out.Print("slab memory manager alloc raw: size: %" B_PRIuSIZE
132 			", flags: %#" B_PRIx32, fSize, fFlags);
133 	}
134 
135 private:
136 	size_t	fSize;
137 	uint32	fFlags;
138 };
139 
140 
141 class FreeRawOrReturnCache : public MemoryManagerTraceEntry {
142 public:
143 	FreeRawOrReturnCache(void* address, uint32 flags)
144 		:
145 		MemoryManagerTraceEntry(),
146 		fAddress(address),
147 		fFlags(flags)
148 	{
149 		Initialized();
150 	}
151 
152 	virtual void AddDump(TraceOutput& out)
153 	{
154 		out.Print("slab memory manager free raw/return: address: %p, flags: %#"
155 			B_PRIx32, fAddress, fFlags);
156 	}
157 
158 private:
159 	void*	fAddress;
160 	uint32	fFlags;
161 };
162 
163 
164 class AllocateArea : public MemoryManagerTraceEntry {
165 public:
166 	AllocateArea(Area* area, uint32 flags)
167 		:
168 		MemoryManagerTraceEntry(),
169 		fArea(area),
170 		fFlags(flags)
171 	{
172 		Initialized();
173 	}
174 
175 	virtual void AddDump(TraceOutput& out)
176 	{
177 		out.Print("slab memory manager alloc area: flags: %#" B_PRIx32
178 			" -> %p", fFlags, fArea);
179 	}
180 
181 private:
182 	Area*	fArea;
183 	uint32	fFlags;
184 };
185 
186 
187 class AddArea : public MemoryManagerTraceEntry {
188 public:
189 	AddArea(Area* area)
190 		:
191 		MemoryManagerTraceEntry(),
192 		fArea(area)
193 	{
194 		Initialized();
195 	}
196 
197 	virtual void AddDump(TraceOutput& out)
198 	{
199 		out.Print("slab memory manager add area: %p", fArea);
200 	}
201 
202 private:
203 	Area*	fArea;
204 };
205 
206 
207 class FreeArea : public MemoryManagerTraceEntry {
208 public:
209 	FreeArea(Area* area, bool areaRemoved, uint32 flags)
210 		:
211 		MemoryManagerTraceEntry(),
212 		fArea(area),
213 		fFlags(flags),
214 		fRemoved(areaRemoved)
215 	{
216 		Initialized();
217 	}
218 
219 	virtual void AddDump(TraceOutput& out)
220 	{
221 		out.Print("slab memory manager free area: %p%s, flags: %#" B_PRIx32,
222 			fArea, fRemoved ? " (removed)" : "", fFlags);
223 	}
224 
225 private:
226 	Area*	fArea;
227 	uint32	fFlags;
228 	bool	fRemoved;
229 };
230 
231 
232 class AllocateMetaChunk : public MemoryManagerTraceEntry {
233 public:
234 	AllocateMetaChunk(MetaChunk* metaChunk)
235 		:
236 		MemoryManagerTraceEntry(),
237 		fMetaChunk(metaChunk->chunkBase)
238 	{
239 		Initialized();
240 	}
241 
242 	virtual void AddDump(TraceOutput& out)
243 	{
244 		out.Print("slab memory manager alloc meta chunk: %#" B_PRIxADDR,
245 			fMetaChunk);
246 	}
247 
248 private:
249 	addr_t	fMetaChunk;
250 };
251 
252 
253 class FreeMetaChunk : public MemoryManagerTraceEntry {
254 public:
255 	FreeMetaChunk(MetaChunk* metaChunk)
256 		:
257 		MemoryManagerTraceEntry(),
258 		fMetaChunk(metaChunk->chunkBase)
259 	{
260 		Initialized();
261 	}
262 
263 	virtual void AddDump(TraceOutput& out)
264 	{
265 		out.Print("slab memory manager free meta chunk: %#" B_PRIxADDR,
266 			fMetaChunk);
267 	}
268 
269 private:
270 	addr_t	fMetaChunk;
271 };
272 
273 
274 class AllocateChunk : public MemoryManagerTraceEntry {
275 public:
276 	AllocateChunk(size_t chunkSize, MetaChunk* metaChunk, Chunk* chunk)
277 		:
278 		MemoryManagerTraceEntry(),
279 		fChunkSize(chunkSize),
280 		fMetaChunk(metaChunk->chunkBase),
281 		fChunk(chunk - metaChunk->chunks)
282 	{
283 		Initialized();
284 	}
285 
286 	virtual void AddDump(TraceOutput& out)
287 	{
288 		out.Print("slab memory manager alloc chunk: size: %" B_PRIuSIZE
289 			" -> meta chunk: %#" B_PRIxADDR ", chunk: %" B_PRIu32, fChunkSize,
290 			fMetaChunk, fChunk);
291 	}
292 
293 private:
294 	size_t	fChunkSize;
295 	addr_t	fMetaChunk;
296 	uint32	fChunk;
297 };
298 
299 
300 class AllocateChunks : public MemoryManagerTraceEntry {
301 public:
302 	AllocateChunks(size_t chunkSize, uint32 chunkCount, MetaChunk* metaChunk,
303 		Chunk* chunk)
304 		:
305 		MemoryManagerTraceEntry(),
306 		fMetaChunk(metaChunk->chunkBase),
307 		fChunkSize(chunkSize),
308 		fChunkCount(chunkCount),
309 		fChunk(chunk - metaChunk->chunks)
310 	{
311 		Initialized();
312 	}
313 
314 	virtual void AddDump(TraceOutput& out)
315 	{
316 		out.Print("slab memory manager alloc chunks: size: %" B_PRIuSIZE
317 			", count %" B_PRIu32 " -> meta chunk: %#" B_PRIxADDR ", chunk: %"
318 			B_PRIu32, fChunkSize, fChunkCount, fMetaChunk, fChunk);
319 	}
320 
321 private:
322 	addr_t	fMetaChunk;
323 	size_t	fChunkSize;
324 	uint32	fChunkCount;
325 	uint32	fChunk;
326 };
327 
328 
329 class FreeChunk : public MemoryManagerTraceEntry {
330 public:
331 	FreeChunk(MetaChunk* metaChunk, Chunk* chunk)
332 		:
333 		MemoryManagerTraceEntry(),
334 		fMetaChunk(metaChunk->chunkBase),
335 		fChunk(chunk - metaChunk->chunks)
336 	{
337 		Initialized();
338 	}
339 
340 	virtual void AddDump(TraceOutput& out)
341 	{
342 		out.Print("slab memory manager free chunk: meta chunk: %#" B_PRIxADDR
343 			", chunk: %" B_PRIu32, fMetaChunk, fChunk);
344 	}
345 
346 private:
347 	addr_t	fMetaChunk;
348 	uint32	fChunk;
349 };
350 
351 
352 class Map : public MemoryManagerTraceEntry {
353 public:
354 	Map(addr_t address, size_t size, uint32 flags)
355 		:
356 		MemoryManagerTraceEntry(),
357 		fAddress(address),
358 		fSize(size),
359 		fFlags(flags)
360 	{
361 		Initialized();
362 	}
363 
364 	virtual void AddDump(TraceOutput& out)
365 	{
366 		out.Print("slab memory manager map: %#" B_PRIxADDR ", size: %"
367 			B_PRIuSIZE ", flags: %#" B_PRIx32, fAddress, fSize, fFlags);
368 	}
369 
370 private:
371 	addr_t	fAddress;
372 	size_t	fSize;
373 	uint32	fFlags;
374 };
375 
376 
377 class Unmap : public MemoryManagerTraceEntry {
378 public:
379 	Unmap(addr_t address, size_t size, uint32 flags)
380 		:
381 		MemoryManagerTraceEntry(),
382 		fAddress(address),
383 		fSize(size),
384 		fFlags(flags)
385 	{
386 		Initialized();
387 	}
388 
389 	virtual void AddDump(TraceOutput& out)
390 	{
391 		out.Print("slab memory manager unmap: %#" B_PRIxADDR ", size: %"
392 			B_PRIuSIZE ", flags: %#" B_PRIx32, fAddress, fSize, fFlags);
393 	}
394 
395 private:
396 	addr_t	fAddress;
397 	size_t	fSize;
398 	uint32	fFlags;
399 };
400 
401 
402 //}	// namespace SlabMemoryManagerCacheTracing
403 };	// struct MemoryManager::Tracing
404 
405 
406 //#	define T(x)	new(std::nothrow) SlabMemoryManagerCacheTracing::x
407 #	define T(x)	new(std::nothrow) MemoryManager::Tracing::x
408 
409 #else
410 #	define T(x)
411 #endif	// SLAB_MEMORY_MANAGER_TRACING
412 
413 
414 // #pragma mark - MemoryManager
415 
416 
417 /*static*/ void
418 MemoryManager::Init(kernel_args* args)
419 {
420 	mutex_init(&sLock, "slab memory manager");
421 	rw_lock_init(&sAreaTableLock, "slab memory manager area table");
422 	sKernelArgs = args;
423 
424 	new(&sFreeCompleteMetaChunks) MetaChunkList;
425 	new(&sFreeShortMetaChunks) MetaChunkList;
426 	new(&sPartialMetaChunksSmall) MetaChunkList;
427 	new(&sPartialMetaChunksMedium) MetaChunkList;
428 
429 	new(&sAreaTable) AreaTable;
430 	sAreaTable.Resize(sAreaTableBuffer, sizeof(sAreaTableBuffer), true);
431 		// A bit hacky: The table now owns the memory. Since we never resize or
432 		// free it, that's not a problem, though.
433 
434 	sFreeAreas = NULL;
435 	sFreeAreaCount = 0;
436 	sMaintenanceNeeded = false;
437 }
438 
439 
440 /*static*/ void
441 MemoryManager::InitPostArea()
442 {
443 	sKernelArgs = NULL;
444 
445 	// Convert all areas to actual areas. This loop might look a bit weird, but
446 	// is necessary since creating the actual area involves memory allocations,
447 	// which in turn can change the situation.
448 	bool done;
449 	do {
450 		done = true;
451 
452 		for (AreaTable::Iterator it = sAreaTable.GetIterator();
453 				Area* area = it.Next();) {
454 			if (area->vmArea == NULL) {
455 				_ConvertEarlyArea(area);
456 				done = false;
457 				break;
458 			}
459 		}
460 	} while (!done);
461 
462 	// unmap and free unused pages
463 	if (sFreeAreas != NULL) {
464 		// Just "leak" all but the first of the free areas -- the VM will
465 		// automatically free all unclaimed memory.
466 		sFreeAreas->next = NULL;
467 		sFreeAreaCount = 1;
468 
469 		Area* area = sFreeAreas;
470 		_ConvertEarlyArea(area);
471 		_UnmapFreeChunksEarly(area);
472 	}
473 
474 	for (AreaTable::Iterator it = sAreaTable.GetIterator();
475 			Area* area = it.Next();) {
476 		_UnmapFreeChunksEarly(area);
477 	}
478 
479 	sMaintenanceNeeded = true;
480 		// might not be necessary, but doesn't harm
481 
482 	add_debugger_command_etc("slab_area", &_DumpArea,
483 		"Dump information on a given slab area",
484 		"[ -c ] <area>\n"
485 		"Dump information on a given slab area specified by its base "
486 			"address.\n"
487 		"If \"-c\" is given, the chunks of all meta chunks area printed as "
488 			"well.\n", 0);
489 	add_debugger_command_etc("slab_areas", &_DumpAreas,
490 		"List all slab areas",
491 		"\n"
492 		"Lists all slab areas.\n", 0);
493 	add_debugger_command_etc("slab_meta_chunk", &_DumpMetaChunk,
494 		"Dump information on a given slab meta chunk",
495 		"<meta chunk>\n"
496 		"Dump information on a given slab meta chunk specified by its base "
497 			"or object address.\n", 0);
498 	add_debugger_command_etc("slab_meta_chunks", &_DumpMetaChunks,
499 		"List all non-full slab meta chunks",
500 		"[ -c ]\n"
501 		"Lists all non-full slab meta chunks.\n"
502 		"If \"-c\" is given, the chunks of all meta chunks area printed as "
503 			"well.\n", 0);
504 	add_debugger_command_etc("slab_raw_allocations", &_DumpRawAllocations,
505 		"List all raw allocations in slab areas",
506 		"\n"
507 		"Lists all raw allocations in slab areas.\n", 0);
508 }
509 
510 
511 /*static*/ status_t
512 MemoryManager::Allocate(ObjectCache* cache, uint32 flags, void*& _pages)
513 {
514 	// TODO: Support CACHE_UNLOCKED_PAGES!
515 
516 	T(Allocate(cache, flags));
517 
518 	size_t chunkSize = cache->slab_size;
519 
520 	TRACE("MemoryManager::Allocate(%p, %#" B_PRIx32 "): chunkSize: %"
521 		B_PRIuSIZE "\n", cache, flags, chunkSize);
522 
523 	MutexLocker locker(sLock);
524 
525 	// allocate a chunk
526 	MetaChunk* metaChunk;
527 	Chunk* chunk;
528 	status_t error = _AllocateChunks(chunkSize, 1, flags, metaChunk, chunk);
529 	if (error != B_OK)
530 		return error;
531 
532 	// map the chunk
533 	Area* area = metaChunk->GetArea();
534 	addr_t chunkAddress = _ChunkAddress(metaChunk, chunk);
535 
536 	locker.Unlock();
537 	error = _MapChunk(area->vmArea, chunkAddress, chunkSize, 0, flags);
538 	locker.Lock();
539 	if (error != B_OK) {
540 		// something failed -- free the chunk
541 		_FreeChunk(area, metaChunk, chunk, chunkAddress, true, flags);
542 		return error;
543 	}
544 
545 	chunk->reference = (addr_t)cache;
546 	_pages = (void*)chunkAddress;
547 
548 	TRACE("MemoryManager::Allocate() done: %p (meta chunk: %d, chunk %d)\n",
549 		_pages, int(metaChunk - area->metaChunks),
550 		int(chunk - metaChunk->chunks));
551 	return B_OK;
552 }
553 
554 
555 /*static*/ void
556 MemoryManager::Free(void* pages, uint32 flags)
557 {
558 	TRACE("MemoryManager::Free(%p, %#" B_PRIx32 ")\n", pages, flags);
559 
560 	T(Free(pages, flags));
561 
562 	// get the area and the meta chunk
563 	Area* area = (Area*)ROUNDDOWN((addr_t)pages, SLAB_AREA_SIZE);
564 	MetaChunk* metaChunk = &area->metaChunks[
565 		((addr_t)pages % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE];
566 
567 	ASSERT((addr_t)pages >= metaChunk->chunkBase);
568 	ASSERT(((addr_t)pages % metaChunk->chunkSize) == 0);
569 
570 	// get the chunk
571 	uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)pages);
572 	Chunk* chunk = &metaChunk->chunks[chunkIndex];
573 
574 	ASSERT(chunk->next != NULL);
575 	ASSERT(chunk->next < metaChunk->chunks
576 		|| chunk->next
577 			>= metaChunk->chunks + SLAB_SMALL_CHUNKS_PER_META_CHUNK);
578 
579 	// and free it
580 	MutexLocker locker(sLock);
581 	_FreeChunk(area, metaChunk, chunk, (addr_t)pages, false, flags);
582 }
583 
584 
585 /*static*/ status_t
586 MemoryManager::AllocateRaw(size_t size, uint32 flags, void*& _pages)
587 {
588 	T(AllocateRaw(size, flags));
589 
590 	size = ROUNDUP(size, SLAB_CHUNK_SIZE_SMALL);
591 
592 	TRACE("MemoryManager::AllocateRaw(%" B_PRIuSIZE ", %#" B_PRIx32 ")\n", size,
593 		  flags);
594 
595 	if (size > SLAB_CHUNK_SIZE_LARGE || (flags & CACHE_ALIGN_ON_SIZE) != 0) {
596 		// Requested size greater than a large chunk or an aligned allocation.
597 		// Allocate as an area.
598 		if ((flags & CACHE_DONT_LOCK_KERNEL_SPACE) != 0)
599 			return B_WOULD_BLOCK;
600 
601 		virtual_address_restrictions virtualRestrictions = {};
602 		virtualRestrictions.address_specification
603 			= (flags & CACHE_ALIGN_ON_SIZE) != 0
604 				? B_ANY_KERNEL_BLOCK_ADDRESS : B_ANY_KERNEL_ADDRESS;
605 		physical_address_restrictions physicalRestrictions = {};
606 		area_id area = create_area_etc(VMAddressSpace::KernelID(),
607 			"slab large raw allocation", size, B_FULL_LOCK,
608 			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
609 			((flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0
610 					? CREATE_AREA_DONT_WAIT : 0)
611 				| CREATE_AREA_DONT_CLEAR,
612 			&virtualRestrictions, &physicalRestrictions, &_pages);
613 		return area >= 0 ? B_OK : area;
614 	}
615 
616 	// determine chunk size (small or medium)
617 	size_t chunkSize = SLAB_CHUNK_SIZE_SMALL;
618 	uint32 chunkCount = size / SLAB_CHUNK_SIZE_SMALL;
619 
620 	if (size % SLAB_CHUNK_SIZE_MEDIUM == 0) {
621 		chunkSize = SLAB_CHUNK_SIZE_MEDIUM;
622 		chunkCount = size / SLAB_CHUNK_SIZE_MEDIUM;
623 	}
624 
625 	MutexLocker locker(sLock);
626 
627 	// allocate the chunks
628 	MetaChunk* metaChunk;
629 	Chunk* chunk;
630 	status_t error = _AllocateChunks(chunkSize, chunkCount, flags, metaChunk,
631 		chunk);
632 	if (error != B_OK)
633 		return error;
634 
635 	// map the chunks
636 	Area* area = metaChunk->GetArea();
637 	addr_t chunkAddress = _ChunkAddress(metaChunk, chunk);
638 
639 	locker.Unlock();
640 	error = _MapChunk(area->vmArea, chunkAddress, size, 0, flags);
641 	locker.Lock();
642 	if (error != B_OK) {
643 		// something failed -- free the chunks
644 		for (uint32 i = 0; i < chunkCount; i++)
645 			_FreeChunk(area, metaChunk, chunk + i, chunkAddress, true, flags);
646 		return error;
647 	}
648 
649 	chunk->reference = (addr_t)chunkAddress + size - 1;
650 	_pages = (void*)chunkAddress;
651 
652 	TRACE("MemoryManager::AllocateRaw() done: %p (meta chunk: %d, chunk %d)\n",
653 		_pages, int(metaChunk - area->metaChunks),
654 		int(chunk - metaChunk->chunks));
655 	return B_OK;
656 }
657 
658 
659 /*static*/ ObjectCache*
660 MemoryManager::FreeRawOrReturnCache(void* pages, uint32 flags)
661 {
662 	TRACE("MemoryManager::FreeRawOrReturnCache(%p, %#" B_PRIx32 ")\n", pages,
663 		flags);
664 
665 	T(FreeRawOrReturnCache(pages, flags));
666 
667 	// get the area
668 	addr_t areaBase = ROUNDDOWN((addr_t)pages, SLAB_AREA_SIZE);
669 
670 	ReadLocker readLocker(sAreaTableLock);
671 	Area* area = sAreaTable.Lookup(areaBase);
672 	readLocker.Unlock();
673 
674 	if (area == NULL) {
675 		// Probably a large allocation. Look up the VM area.
676 		VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
677 		addressSpace->ReadLock();
678 		VMArea* area = addressSpace->LookupArea((addr_t)pages);
679 		addressSpace->ReadUnlock();
680 
681 		if (area != NULL && (addr_t)pages == area->Base())
682 			delete_area(area->id);
683 		else
684 			panic("freeing unknown block %p from area %p", pages, area);
685 
686 		return NULL;
687 	}
688 
689 	MetaChunk* metaChunk = &area->metaChunks[
690 		((addr_t)pages % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE];
691 
692 	// get the chunk
693 	ASSERT((addr_t)pages >= metaChunk->chunkBase);
694 	uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)pages);
695 	Chunk* chunk = &metaChunk->chunks[chunkIndex];
696 
697 	addr_t reference = chunk->reference;
698 	if ((reference & 1) == 0)
699 		return (ObjectCache*)reference;
700 
701 	// Seems we have a raw chunk allocation.
702 	ASSERT((addr_t)pages == _ChunkAddress(metaChunk, chunk));
703 	ASSERT(reference > (addr_t)pages);
704 	ASSERT(reference <= areaBase + SLAB_AREA_SIZE - 1);
705 	size_t size = reference - (addr_t)pages + 1;
706 	ASSERT((size % SLAB_CHUNK_SIZE_SMALL) == 0);
707 
708 	// unmap the chunks
709 	_UnmapChunk(area->vmArea, (addr_t)pages, size, flags);
710 
711 	// and free them
712 	MutexLocker locker(sLock);
713 	uint32 chunkCount = size / metaChunk->chunkSize;
714 	for (uint32 i = 0; i < chunkCount; i++)
715 		_FreeChunk(area, metaChunk, chunk + i, (addr_t)pages, true, flags);
716 
717 	return NULL;
718 }
719 
720 
721 /*static*/ size_t
722 MemoryManager::AcceptableChunkSize(size_t size)
723 {
724 	if (size <= SLAB_CHUNK_SIZE_SMALL)
725 		return SLAB_CHUNK_SIZE_SMALL;
726 	if (size <= SLAB_CHUNK_SIZE_MEDIUM)
727 		return SLAB_CHUNK_SIZE_MEDIUM;
728 	return SLAB_CHUNK_SIZE_LARGE;
729 }
730 
731 
732 /*static*/ ObjectCache*
733 MemoryManager::GetAllocationInfo(void* address, size_t& _size)
734 {
735 	// get the area
736 	addr_t areaBase = ROUNDDOWN((addr_t)address, SLAB_AREA_SIZE);
737 
738 	ReadLocker readLocker(sAreaTableLock);
739 	Area* area = sAreaTable.Lookup(areaBase);
740 	readLocker.Unlock();
741 
742 	if (area == NULL) {
743 		VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
744 		addressSpace->ReadLock();
745 		VMArea* area = addressSpace->LookupArea((addr_t)address);
746 		if (area != NULL && (addr_t)address == area->Base())
747 			_size = area->Size();
748 		else
749 			_size = 0;
750 		addressSpace->ReadUnlock();
751 
752 		return NULL;
753 	}
754 
755 	MetaChunk* metaChunk = &area->metaChunks[
756 		((addr_t)address % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE];
757 
758 	// get the chunk
759 	ASSERT((addr_t)address >= metaChunk->chunkBase);
760 	uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)address);
761 
762 	addr_t reference = metaChunk->chunks[chunkIndex].reference;
763 	if ((reference & 1) == 0) {
764 		ObjectCache* cache = (ObjectCache*)reference;
765 		_size = cache->object_size;
766 		return cache;
767 	}
768 
769 	_size = reference - (addr_t)address + 1;
770 	return NULL;
771 }
772 
773 
774 /*static*/ ObjectCache*
775 MemoryManager::CacheForAddress(void* address)
776 {
777 	// get the area
778 	addr_t areaBase = ROUNDDOWN((addr_t)address, SLAB_AREA_SIZE);
779 
780 	ReadLocker readLocker(sAreaTableLock);
781 	Area* area = sAreaTable.Lookup(areaBase);
782 	readLocker.Unlock();
783 
784 	if (area == NULL)
785 		return NULL;
786 
787 	MetaChunk* metaChunk = &area->metaChunks[
788 		((addr_t)address % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE];
789 
790 	// get the chunk
791 	ASSERT((addr_t)address >= metaChunk->chunkBase);
792 	uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)address);
793 
794 	addr_t reference = metaChunk->chunks[chunkIndex].reference;
795 	return (reference & 1) == 0 ? (ObjectCache*)reference : NULL;
796 }
797 
798 
799 /*static*/ void
800 MemoryManager::PerformMaintenance()
801 {
802 	MutexLocker locker(sLock);
803 
804 	while (sMaintenanceNeeded) {
805 		sMaintenanceNeeded = false;
806 
807 		// We want to keep one or two areas as a reserve. This way we have at
808 		// least one area to use in situations when we aren't allowed to
809 		// allocate one and also avoid ping-pong effects.
810 		if (sFreeAreaCount > 0 && sFreeAreaCount <= 2)
811 			return;
812 
813 		if (sFreeAreaCount == 0) {
814 			// try to allocate one
815 			Area* area;
816 			if (_AllocateArea(0, area) != B_OK)
817 				return;
818 
819 			_push(sFreeAreas, area);
820 			if (++sFreeAreaCount > 2)
821 				sMaintenanceNeeded = true;
822 		} else {
823 			// free until we only have two free ones
824 			while (sFreeAreaCount > 2) {
825 				Area* area = _pop(sFreeAreas);
826 				_FreeArea(area, true, 0);
827 			}
828 
829 			if (sFreeAreaCount == 0)
830 				sMaintenanceNeeded = true;
831 		}
832 	}
833 }
834 
835 
836 /*static*/ status_t
837 MemoryManager::_AllocateChunks(size_t chunkSize, uint32 chunkCount,
838 	uint32 flags, MetaChunk*& _metaChunk, Chunk*& _chunk)
839 {
840 	MetaChunkList* metaChunkList = NULL;
841 	if (chunkSize == SLAB_CHUNK_SIZE_SMALL) {
842 		metaChunkList = &sPartialMetaChunksSmall;
843 	} else if (chunkSize == SLAB_CHUNK_SIZE_MEDIUM) {
844 		metaChunkList = &sPartialMetaChunksMedium;
845 	} else if (chunkSize != SLAB_CHUNK_SIZE_LARGE) {
846 		panic("MemoryManager::_AllocateChunks(): Unsupported chunk size: %"
847 			B_PRIuSIZE, chunkSize);
848 		return B_BAD_VALUE;
849 	}
850 
851 	if (_GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk, _chunk))
852 		return B_OK;
853 
854 	if (sFreeAreas != NULL) {
855 		_AddArea(_pop(sFreeAreas));
856 		sFreeAreaCount--;
857 		_RequestMaintenance();
858 
859 		_GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk, _chunk);
860 		return B_OK;
861 	}
862 
863 	if ((flags & CACHE_DONT_LOCK_KERNEL_SPACE) != 0) {
864 		// We can't create an area with this limitation and we must not wait for
865 		// someone else doing that.
866 		return B_WOULD_BLOCK;
867 	}
868 
869 	// We need to allocate a new area. Wait, if someone else is trying to do
870 	// the same.
871 	while (true) {
872 		AllocationEntry* allocationEntry = NULL;
873 		if (sAllocationEntryDontWait != NULL) {
874 			allocationEntry = sAllocationEntryDontWait;
875 		} else if (sAllocationEntryCanWait != NULL
876 				&& (flags & CACHE_DONT_WAIT_FOR_MEMORY) == 0) {
877 			allocationEntry = sAllocationEntryCanWait;
878 		} else
879 			break;
880 
881 		ConditionVariableEntry entry;
882 		allocationEntry->condition.Add(&entry);
883 
884 		mutex_unlock(&sLock);
885 		entry.Wait();
886 		mutex_lock(&sLock);
887 
888 		if (_GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk,
889 				_chunk)) {
890 			return B_OK;
891 		}
892 	}
893 
894 	// prepare the allocation entry others can wait on
895 	AllocationEntry*& allocationEntry
896 		= (flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0
897 			? sAllocationEntryDontWait : sAllocationEntryCanWait;
898 
899 	AllocationEntry myResizeEntry;
900 	allocationEntry = &myResizeEntry;
901 	allocationEntry->condition.Init(metaChunkList, "wait for slab area");
902 	allocationEntry->thread = find_thread(NULL);
903 
904 	Area* area;
905 	status_t error = _AllocateArea(flags, area);
906 
907 	allocationEntry->condition.NotifyAll();
908 	allocationEntry = NULL;
909 
910 	if (error != B_OK)
911 		return error;
912 
913 	// Try again to get a meta chunk. Something might have been freed in the
914 	// meantime. We can free the area in this case.
915 	if (_GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk, _chunk)) {
916 		_FreeArea(area, true, flags);
917 		return B_OK;
918 	}
919 
920 	_AddArea(area);
921 	_GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk, _chunk);
922 	return B_OK;
923 }
924 
925 
926 /*static*/ bool
927 MemoryManager::_GetChunks(MetaChunkList* metaChunkList, size_t chunkSize,
928 	uint32 chunkCount, MetaChunk*& _metaChunk, Chunk*& _chunk)
929 {
930 	// the common and less complicated special case
931 	if (chunkCount == 1)
932 		return _GetChunk(metaChunkList, chunkSize, _metaChunk, _chunk);
933 
934 	ASSERT(metaChunkList != NULL);
935 
936 	// Iterate through the partial meta chunk list and try to find a free
937 	// range that is large enough.
938 	MetaChunk* metaChunk = NULL;
939 	for (MetaChunkList::Iterator it = metaChunkList->GetIterator();
940 			(metaChunk = it.Next()) != NULL;) {
941 		if (metaChunk->firstFreeChunk + chunkCount - 1
942 				<= metaChunk->lastFreeChunk) {
943 			break;
944 		}
945 	}
946 
947 	if (metaChunk == NULL) {
948 		// try to get a free meta chunk
949 		if ((SLAB_CHUNK_SIZE_LARGE - kAreaAdminSize) / chunkSize >= chunkCount)
950 			metaChunk = sFreeShortMetaChunks.RemoveHead();
951 		if (metaChunk == NULL)
952 			metaChunk = sFreeCompleteMetaChunks.RemoveHead();
953 
954 		if (metaChunk == NULL)
955 			return false;
956 
957 		metaChunkList->Add(metaChunk);
958 		metaChunk->GetArea()->usedMetaChunkCount++;
959 		_PrepareMetaChunk(metaChunk, chunkSize);
960 
961 		T(AllocateMetaChunk(metaChunk));
962 	}
963 
964 	// pull the chunks out of the free list
965 	Chunk* firstChunk = metaChunk->chunks + metaChunk->firstFreeChunk;
966 	Chunk* lastChunk = firstChunk + (chunkCount - 1);
967 	Chunk** chunkPointer = &metaChunk->freeChunks;
968 	uint32 remainingChunks = chunkCount;
969 	while (remainingChunks > 0) {
970 		ASSERT_PRINT(chunkPointer, "remaining: %" B_PRIu32 "/%" B_PRIu32
971 			", area: %p, meta chunk: %" B_PRIdSSIZE "\n", remainingChunks,
972 			chunkCount, metaChunk->GetArea(),
973 			metaChunk - metaChunk->GetArea()->metaChunks);
974 		Chunk* chunk = *chunkPointer;
975 		if (chunk >= firstChunk && chunk <= lastChunk) {
976 			*chunkPointer = chunk->next;
977 			chunk->reference = 1;
978 			remainingChunks--;
979 		} else
980 			chunkPointer = &chunk->next;
981 	}
982 
983 	// allocate the chunks
984 	metaChunk->usedChunkCount += chunkCount;
985 	if (metaChunk->usedChunkCount == metaChunk->chunkCount) {
986 		// meta chunk is full now -- remove it from its list
987 		if (metaChunkList != NULL)
988 			metaChunkList->Remove(metaChunk);
989 	}
990 
991 	// update the free range
992 	metaChunk->firstFreeChunk += chunkCount;
993 
994 	_chunk = firstChunk;
995 	_metaChunk = metaChunk;
996 
997 	T(AllocateChunks(chunkSize, chunkCount, metaChunk, firstChunk));
998 
999 	return true;
1000 }
1001 
1002 
1003 /*static*/ bool
1004 MemoryManager::_GetChunk(MetaChunkList* metaChunkList, size_t chunkSize,
1005 	MetaChunk*& _metaChunk, Chunk*& _chunk)
1006 {
1007 	MetaChunk* metaChunk = metaChunkList != NULL
1008 		? metaChunkList->Head() : NULL;
1009 	if (metaChunk == NULL) {
1010 		// no partial meta chunk -- maybe there's a free one
1011 		if (chunkSize == SLAB_CHUNK_SIZE_LARGE) {
1012 			metaChunk = sFreeCompleteMetaChunks.RemoveHead();
1013 		} else {
1014 			metaChunk = sFreeShortMetaChunks.RemoveHead();
1015 			if (metaChunk == NULL)
1016 				metaChunk = sFreeCompleteMetaChunks.RemoveHead();
1017 			if (metaChunk != NULL)
1018 				metaChunkList->Add(metaChunk);
1019 		}
1020 
1021 		if (metaChunk == NULL)
1022 			return false;
1023 
1024 		metaChunk->GetArea()->usedMetaChunkCount++;
1025 		_PrepareMetaChunk(metaChunk, chunkSize);
1026 
1027 		T(AllocateMetaChunk(metaChunk));
1028 	}
1029 
1030 	// allocate the chunk
1031 	if (++metaChunk->usedChunkCount == metaChunk->chunkCount) {
1032 		// meta chunk is full now -- remove it from its list
1033 		if (metaChunkList != NULL)
1034 			metaChunkList->Remove(metaChunk);
1035 	}
1036 
1037 	_chunk = _pop(metaChunk->freeChunks);
1038 	_metaChunk = metaChunk;
1039 
1040 	// update the free range
1041 	uint32 chunkIndex = _chunk - metaChunk->chunks;
1042 	if (chunkIndex >= metaChunk->firstFreeChunk
1043 			&& chunkIndex <= metaChunk->lastFreeChunk) {
1044 		if (chunkIndex - metaChunk->firstFreeChunk
1045 				<= metaChunk->lastFreeChunk - chunkIndex) {
1046 			metaChunk->firstFreeChunk = chunkIndex + 1;
1047 		} else
1048 			metaChunk->lastFreeChunk = chunkIndex - 1;
1049 	}
1050 
1051 	T(AllocateChunk(chunkSize, metaChunk, _chunk));
1052 
1053 	return true;
1054 }
1055 
1056 
1057 /*static*/ void
1058 MemoryManager::_FreeChunk(Area* area, MetaChunk* metaChunk, Chunk* chunk,
1059 	addr_t chunkAddress, bool alreadyUnmapped, uint32 flags)
1060 {
1061 	// unmap the chunk
1062 	if (!alreadyUnmapped) {
1063 		mutex_unlock(&sLock);
1064 		_UnmapChunk(area->vmArea, chunkAddress, metaChunk->chunkSize, flags);
1065 		mutex_lock(&sLock);
1066 	}
1067 
1068 	T(FreeChunk(metaChunk, chunk));
1069 
1070 	_push(metaChunk->freeChunks, chunk);
1071 
1072 	uint32 chunkIndex = chunk - metaChunk->chunks;
1073 
1074 	// free the meta chunk, if it is unused now
1075 	ASSERT(metaChunk->usedChunkCount > 0);
1076 	if (--metaChunk->usedChunkCount == 0) {
1077 		T(FreeMetaChunk(metaChunk));
1078 
1079 		// remove from partial meta chunk list
1080 		if (metaChunk->chunkSize == SLAB_CHUNK_SIZE_SMALL)
1081 			sPartialMetaChunksSmall.Remove(metaChunk);
1082 		else if (metaChunk->chunkSize == SLAB_CHUNK_SIZE_MEDIUM)
1083 			sPartialMetaChunksMedium.Remove(metaChunk);
1084 
1085 		// mark empty
1086 		metaChunk->chunkSize = 0;
1087 
1088 		// add to free list
1089 		if (metaChunk == area->metaChunks)
1090 			sFreeShortMetaChunks.Add(metaChunk, false);
1091 		else
1092 			sFreeCompleteMetaChunks.Add(metaChunk, false);
1093 
1094 		// free the area, if it is unused now
1095 		ASSERT(area->usedMetaChunkCount > 0);
1096 		if (--area->usedMetaChunkCount == 0)
1097 			_FreeArea(area, false, flags);
1098 	} else if (metaChunk->usedChunkCount == metaChunk->chunkCount - 1) {
1099 		// the meta chunk was full before -- add it back to its partial chunk
1100 		// list
1101 		if (metaChunk->chunkSize == SLAB_CHUNK_SIZE_SMALL)
1102 			sPartialMetaChunksSmall.Add(metaChunk, false);
1103 		else if (metaChunk->chunkSize == SLAB_CHUNK_SIZE_MEDIUM)
1104 			sPartialMetaChunksMedium.Add(metaChunk, false);
1105 
1106 		metaChunk->firstFreeChunk = chunkIndex;
1107 		metaChunk->lastFreeChunk = chunkIndex;
1108 	} else {
1109 		// extend the free range, if the chunk adjoins
1110 		if (chunkIndex + 1 == metaChunk->firstFreeChunk) {
1111 			uint32 firstFree = chunkIndex;
1112 			for (; firstFree > 0; firstFree--) {
1113 				Chunk* previousChunk = &metaChunk->chunks[firstFree - 1];
1114 				if (!_IsChunkFree(metaChunk, previousChunk))
1115 					break;
1116 			}
1117 			metaChunk->firstFreeChunk = firstFree;
1118 		} else if (chunkIndex == (uint32)metaChunk->lastFreeChunk + 1) {
1119 			uint32 lastFree = chunkIndex;
1120 			for (; lastFree + 1 < metaChunk->chunkCount; lastFree++) {
1121 				Chunk* nextChunk = &metaChunk->chunks[lastFree + 1];
1122 				if (!_IsChunkFree(metaChunk, nextChunk))
1123 					break;
1124 			}
1125 			metaChunk->lastFreeChunk = lastFree;
1126 		}
1127 	}
1128 }
1129 
1130 
1131 /*static*/ void
1132 MemoryManager::_PrepareMetaChunk(MetaChunk* metaChunk, size_t chunkSize)
1133 {
1134 	Area* area = metaChunk->GetArea();
1135 
1136 	if (metaChunk == area->metaChunks) {
1137 		// the first chunk is shorter
1138 		size_t unusableSize = ROUNDUP(kAreaAdminSize, chunkSize);
1139 		metaChunk->chunkBase = (addr_t)area + unusableSize;
1140 		metaChunk->totalSize = SLAB_CHUNK_SIZE_LARGE - unusableSize;
1141 	}
1142 
1143 	metaChunk->chunkSize = chunkSize;
1144 	metaChunk->chunkCount = metaChunk->totalSize / chunkSize;
1145 	metaChunk->usedChunkCount = 0;
1146 
1147 	metaChunk->freeChunks = NULL;
1148 	for (int32 i = metaChunk->chunkCount - 1; i >= 0; i--)
1149 		_push(metaChunk->freeChunks, metaChunk->chunks + i);
1150 
1151 	metaChunk->firstFreeChunk = 0;
1152 	metaChunk->lastFreeChunk = metaChunk->chunkCount - 1;
1153 }
1154 
1155 
1156 /*static*/ void
1157 MemoryManager::_AddArea(Area* area)
1158 {
1159 	T(AddArea(area));
1160 
1161 	// add the area to the hash table
1162 	WriteLocker writeLocker(sAreaTableLock);
1163 	sAreaTable.InsertUnchecked(area);
1164 	writeLocker.Unlock();
1165 
1166 	// add the area's meta chunks to the free lists
1167 	sFreeShortMetaChunks.Add(&area->metaChunks[0]);
1168 	for (int32 i = 1; i < SLAB_META_CHUNKS_PER_AREA; i++)
1169 		sFreeCompleteMetaChunks.Add(&area->metaChunks[i]);
1170 }
1171 
1172 
1173 /*static*/ status_t
1174 MemoryManager::_AllocateArea(uint32 flags, Area*& _area)
1175 {
1176 	TRACE("MemoryManager::_AllocateArea(%#" B_PRIx32 ")\n", flags);
1177 
1178 	ASSERT((flags & CACHE_DONT_LOCK_KERNEL_SPACE) == 0);
1179 
1180 	mutex_unlock(&sLock);
1181 
1182 	size_t pagesNeededToMap = 0;
1183 	Area* area;
1184 	VMArea* vmArea = NULL;
1185 
1186 	if (sKernelArgs == NULL) {
1187 		// create an area
1188 		uint32 areaCreationFlags = (flags & CACHE_PRIORITY_VIP) != 0
1189 			? CREATE_AREA_PRIORITY_VIP : 0;
1190 		area_id areaID = vm_create_null_area(B_SYSTEM_TEAM, kSlabAreaName,
1191 			(void**)&area, B_ANY_KERNEL_BLOCK_ADDRESS, SLAB_AREA_SIZE,
1192 			areaCreationFlags);
1193 		if (areaID < 0) {
1194 			mutex_lock(&sLock);
1195 			return areaID;
1196 		}
1197 
1198 		// map the memory for the administrative structure
1199 		VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
1200 		VMTranslationMap* translationMap = addressSpace->TranslationMap();
1201 
1202 		pagesNeededToMap = translationMap->MaxPagesNeededToMap((addr_t)area,
1203 			(addr_t)area + SLAB_AREA_SIZE - 1);
1204 
1205 		vmArea = VMAreaHash::Lookup(areaID);
1206 		status_t error = _MapChunk(vmArea, (addr_t)area, kAreaAdminSize,
1207 			pagesNeededToMap, flags);
1208 		if (error != B_OK) {
1209 			delete_area(areaID);
1210 			mutex_lock(&sLock);
1211 			return error;
1212 		}
1213 
1214 		dprintf("slab memory manager: created area %p (%" B_PRId32 ")\n", area,
1215 			areaID);
1216 	} else {
1217 		// no areas yet -- allocate raw memory
1218 		area = (Area*)vm_allocate_early(sKernelArgs, SLAB_AREA_SIZE,
1219 			SLAB_AREA_SIZE, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
1220 			SLAB_AREA_SIZE);
1221 		if (area == NULL) {
1222 			mutex_lock(&sLock);
1223 			return B_NO_MEMORY;
1224 		}
1225 
1226 		TRACE("MemoryManager::_AllocateArea(): allocated early area %p\n",
1227 			area);
1228 	}
1229 
1230 	// init the area structure
1231 	area->vmArea = vmArea;
1232 	area->reserved_memory_for_mapping = pagesNeededToMap * B_PAGE_SIZE;
1233 	area->usedMetaChunkCount = 0;
1234 	area->fullyMapped = vmArea == NULL;
1235 
1236 	// init the meta chunks
1237 	for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA; i++) {
1238 		MetaChunk* metaChunk = area->metaChunks + i;
1239 		metaChunk->chunkSize = 0;
1240 		metaChunk->chunkBase = (addr_t)area + i * SLAB_CHUNK_SIZE_LARGE;
1241 		metaChunk->totalSize = SLAB_CHUNK_SIZE_LARGE;
1242 			// Note: chunkBase and totalSize aren't correct for the first
1243 			// meta chunk. They will be set in _PrepareMetaChunk().
1244 		metaChunk->chunkCount = 0;
1245 		metaChunk->usedChunkCount = 0;
1246 		metaChunk->freeChunks = NULL;
1247 	}
1248 
1249 	mutex_lock(&sLock);
1250 	_area = area;
1251 
1252 	T(AllocateArea(area, flags));
1253 
1254 	return B_OK;
1255 }
1256 
1257 
1258 /*static*/ void
1259 MemoryManager::_FreeArea(Area* area, bool areaRemoved, uint32 flags)
1260 {
1261 	TRACE("MemoryManager::_FreeArea(%p, %#" B_PRIx32 ")\n", area, flags);
1262 
1263 	T(FreeArea(area, areaRemoved, flags));
1264 
1265 	ASSERT(area->usedMetaChunkCount == 0);
1266 
1267 	if (!areaRemoved) {
1268 		// remove the area's meta chunks from the free lists
1269 		ASSERT(area->metaChunks[0].usedChunkCount == 0);
1270 		sFreeShortMetaChunks.Remove(&area->metaChunks[0]);
1271 
1272 		for (int32 i = 1; i < SLAB_META_CHUNKS_PER_AREA; i++) {
1273 			ASSERT(area->metaChunks[i].usedChunkCount == 0);
1274 			sFreeCompleteMetaChunks.Remove(&area->metaChunks[i]);
1275 		}
1276 
1277 		// remove the area from the hash table
1278 		WriteLocker writeLocker(sAreaTableLock);
1279 		sAreaTable.RemoveUnchecked(area);
1280 		writeLocker.Unlock();
1281 	}
1282 
1283 	// We want to keep one or two free areas as a reserve.
1284 	if (sFreeAreaCount <= 1) {
1285 		_push(sFreeAreas, area);
1286 		sFreeAreaCount++;
1287 		return;
1288 	}
1289 
1290 	if (area->vmArea == NULL || (flags & CACHE_DONT_LOCK_KERNEL_SPACE) != 0) {
1291 		// This is either early in the boot process or we aren't allowed to
1292 		// delete the area now.
1293 		_push(sFreeAreas, area);
1294 		sFreeAreaCount++;
1295 		_RequestMaintenance();
1296 		return;
1297 	}
1298 
1299 	mutex_unlock(&sLock);
1300 
1301 	dprintf("slab memory manager: deleting area %p (%" B_PRId32 ")\n", area,
1302 		area->vmArea->id);
1303 
1304 	size_t memoryToUnreserve = area->reserved_memory_for_mapping;
1305 	delete_area(area->vmArea->id);
1306 	vm_unreserve_memory(memoryToUnreserve);
1307 
1308 	mutex_lock(&sLock);
1309 }
1310 
1311 
1312 /*static*/ status_t
1313 MemoryManager::_MapChunk(VMArea* vmArea, addr_t address, size_t size,
1314 	size_t reserveAdditionalMemory, uint32 flags)
1315 {
1316 	TRACE("MemoryManager::_MapChunk(%p, %#" B_PRIxADDR ", %#" B_PRIxSIZE
1317 		")\n", vmArea, address, size);
1318 
1319 	T(Map(address, size, flags));
1320 
1321 	if (vmArea == NULL) {
1322 		// everything is mapped anyway
1323 		return B_OK;
1324 	}
1325 
1326 	VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
1327 	VMTranslationMap* translationMap = addressSpace->TranslationMap();
1328 
1329 	// reserve memory for the chunk
1330 	int priority = (flags & CACHE_PRIORITY_VIP) != 0
1331 		? VM_PRIORITY_VIP : VM_PRIORITY_SYSTEM;
1332 	size_t reservedMemory = size + reserveAdditionalMemory;
1333 	status_t error = vm_try_reserve_memory(size, priority,
1334 		(flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0 ? 0 : 1000000);
1335 	if (error != B_OK)
1336 		return error;
1337 
1338 	// reserve the pages we need now
1339 	size_t reservedPages = size / B_PAGE_SIZE
1340 		+ translationMap->MaxPagesNeededToMap(address, address + size - 1);
1341 	vm_page_reservation reservation;
1342 	if ((flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0) {
1343 		if (!vm_page_try_reserve_pages(&reservation, reservedPages, priority)) {
1344 			vm_unreserve_memory(reservedMemory);
1345 			return B_WOULD_BLOCK;
1346 		}
1347 	} else
1348 		vm_page_reserve_pages(&reservation, reservedPages, priority);
1349 
1350 	VMCache* cache = vm_area_get_locked_cache(vmArea);
1351 
1352 	// map the pages
1353 	translationMap->Lock();
1354 
1355 	addr_t areaOffset = address - vmArea->Base();
1356 	addr_t endAreaOffset = areaOffset + size;
1357 	for (size_t offset = areaOffset; offset < endAreaOffset;
1358 			offset += B_PAGE_SIZE) {
1359 		vm_page* page = vm_page_allocate_page(&reservation, PAGE_STATE_WIRED);
1360 		cache->InsertPage(page, offset);
1361 
1362 		page->wired_count++;
1363 		atomic_add(&gMappedPagesCount, 1);
1364 		DEBUG_PAGE_ACCESS_END(page);
1365 
1366 		translationMap->Map(vmArea->Base() + offset,
1367 			page->physical_page_number * B_PAGE_SIZE,
1368 			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
1369 			vmArea->MemoryType(), &reservation);
1370 	}
1371 
1372 	translationMap->Unlock();
1373 
1374 	cache->ReleaseRefAndUnlock();
1375 
1376 	vm_page_unreserve_pages(&reservation);
1377 
1378 	return B_OK;
1379 }
1380 
1381 
1382 /*static*/ status_t
1383 MemoryManager::_UnmapChunk(VMArea* vmArea, addr_t address, size_t size,
1384 	uint32 flags)
1385 {
1386 	T(Unmap(address, size, flags));
1387 
1388 	if (vmArea == NULL)
1389 		return B_ERROR;
1390 
1391 	TRACE("MemoryManager::_UnmapChunk(%p, %#" B_PRIxADDR ", %#" B_PRIxSIZE
1392 		")\n", vmArea, address, size);
1393 
1394 	VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
1395 	VMTranslationMap* translationMap = addressSpace->TranslationMap();
1396 	VMCache* cache = vm_area_get_locked_cache(vmArea);
1397 
1398 	// unmap the pages
1399 	translationMap->Lock();
1400 	translationMap->Unmap(address, address + size - 1);
1401 	atomic_add(&gMappedPagesCount, -(size / B_PAGE_SIZE));
1402 	translationMap->Unlock();
1403 
1404 	// free the pages
1405 	addr_t areaPageOffset = (address - vmArea->Base()) / B_PAGE_SIZE;
1406 	addr_t areaPageEndOffset = areaPageOffset + size / B_PAGE_SIZE;
1407 	VMCachePagesTree::Iterator it = cache->pages.GetIterator(
1408 		areaPageOffset, true, true);
1409 	while (vm_page* page = it.Next()) {
1410 		if (page->cache_offset >= areaPageEndOffset)
1411 			break;
1412 
1413 		DEBUG_PAGE_ACCESS_START(page);
1414 
1415 		page->wired_count--;
1416 
1417 		cache->RemovePage(page);
1418 			// the iterator is remove-safe
1419 		vm_page_free(cache, page);
1420 	}
1421 
1422 	cache->ReleaseRefAndUnlock();
1423 
1424 	vm_unreserve_memory(size);
1425 
1426 	return B_OK;
1427 }
1428 
1429 
1430 /*static*/ void
1431 MemoryManager::_UnmapFreeChunksEarly(Area* area)
1432 {
1433 	if (!area->fullyMapped)
1434 		return;
1435 
1436 	TRACE("MemoryManager::_UnmapFreeChunksEarly(%p)\n", area);
1437 
1438 	for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA; i++) {
1439 		MetaChunk* metaChunk = area->metaChunks + i;
1440 		if (metaChunk->chunkSize == 0) {
1441 			// meta chunk is free -- unmap it completely
1442 			if (i == 0) {
1443 				_UnmapChunk(area->vmArea, (addr_t)area + kAreaAdminSize,
1444 					SLAB_CHUNK_SIZE_LARGE - kAreaAdminSize, 0);
1445 			} else {
1446 				_UnmapChunk(area->vmArea,
1447 					(addr_t)area + i * SLAB_CHUNK_SIZE_LARGE,
1448 					SLAB_CHUNK_SIZE_LARGE, 0);
1449 			}
1450 		} else {
1451 			// unmap free chunks
1452 			for (Chunk* chunk = metaChunk->freeChunks; chunk != NULL;
1453 					chunk = chunk->next) {
1454 				_UnmapChunk(area->vmArea, _ChunkAddress(metaChunk, chunk),
1455 					metaChunk->chunkSize, 0);
1456 			}
1457 
1458 			// The first meta chunk might have space before its first chunk.
1459 			if (i == 0) {
1460 				addr_t unusedStart = (addr_t)area + kAreaAdminSize;
1461 				if (unusedStart < metaChunk->chunkBase) {
1462 					_UnmapChunk(area->vmArea, unusedStart,
1463 						metaChunk->chunkBase - unusedStart, 0);
1464 				}
1465 			}
1466 		}
1467 	}
1468 
1469 	area->fullyMapped = false;
1470 }
1471 
1472 
1473 /*static*/ void
1474 MemoryManager::_ConvertEarlyArea(Area* area)
1475 {
1476 	void* address = area;
1477 	area_id areaID = create_area(kSlabAreaName, &address, B_EXACT_ADDRESS,
1478 		SLAB_AREA_SIZE, B_ALREADY_WIRED,
1479 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
1480 	if (areaID < 0)
1481 		panic("out of memory");
1482 
1483 	area->vmArea = VMAreaHash::Lookup(areaID);
1484 }
1485 
1486 
1487 /*static*/ void
1488 MemoryManager::_RequestMaintenance()
1489 {
1490 	if ((sFreeAreaCount > 0 && sFreeAreaCount <= 2) || sMaintenanceNeeded)
1491 		return;
1492 
1493 	sMaintenanceNeeded = true;
1494 	request_memory_manager_maintenance();
1495 }
1496 
1497 
1498 /*static*/ int
1499 MemoryManager::_DumpRawAllocations(int argc, char** argv)
1500 {
1501 	kprintf("area        meta chunk  chunk  base        size (KB)\n");
1502 
1503 	size_t totalSize = 0;
1504 
1505 	for (AreaTable::Iterator it = sAreaTable.GetIterator();
1506 			Area* area = it.Next();) {
1507 		for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA; i++) {
1508 			MetaChunk* metaChunk = area->metaChunks + i;
1509 			if (metaChunk->chunkSize == 0)
1510 				continue;
1511 			for (uint32 k = 0; k < metaChunk->chunkCount; k++) {
1512 				Chunk* chunk = metaChunk->chunks + k;
1513 
1514 				// skip free chunks
1515 				if (_IsChunkFree(metaChunk, chunk))
1516 					continue;
1517 
1518 				addr_t reference = chunk->reference;
1519 				if ((reference & 1) == 0 || reference == 1)
1520 					continue;
1521 
1522 				addr_t chunkAddress = _ChunkAddress(metaChunk, chunk);
1523 				size_t size = reference - chunkAddress + 1;
1524 				totalSize += size;
1525 
1526 				kprintf("%p  %10" B_PRId32 "  %5" B_PRIu32 "  %p  %9"
1527 					B_PRIuSIZE "\n", area, i, k, (void*)chunkAddress,
1528 					size / 1024);
1529 			}
1530 		}
1531 	}
1532 
1533 	kprintf("total:                                     %9" B_PRIuSIZE "\n",
1534 		totalSize / 1024);
1535 
1536 	return 0;
1537 }
1538 
1539 
1540 /*static*/ void
1541 MemoryManager::_PrintMetaChunkTableHeader(bool printChunks)
1542 {
1543 	if (printChunks)
1544 		kprintf("chunk        base       cache  object size  cache name\n");
1545 	else
1546 		kprintf("chunk        base\n");
1547 }
1548 
1549 /*static*/ void
1550 MemoryManager::_DumpMetaChunk(MetaChunk* metaChunk, bool printChunks,
1551 	bool printHeader)
1552 {
1553 	if (printHeader)
1554 		_PrintMetaChunkTableHeader(printChunks);
1555 
1556 	const char* type = "empty";
1557 	if (metaChunk->chunkSize != 0) {
1558 		switch (metaChunk->chunkSize) {
1559 			case SLAB_CHUNK_SIZE_SMALL:
1560 				type = "small";
1561 				break;
1562 			case SLAB_CHUNK_SIZE_MEDIUM:
1563 				type = "medium";
1564 				break;
1565 			case SLAB_CHUNK_SIZE_LARGE:
1566 				type = "large";
1567 				break;
1568 		}
1569 	}
1570 
1571 	int metaChunkIndex = metaChunk - metaChunk->GetArea()->metaChunks;
1572 	kprintf("%5d  %p  --- %6s meta chunk", metaChunkIndex,
1573 		(void*)metaChunk->chunkBase, type);
1574 	if (metaChunk->chunkSize != 0) {
1575 		kprintf(": %4u/%4u used, %-4u-%4u free ------------\n",
1576 			metaChunk->usedChunkCount, metaChunk->chunkCount,
1577 			metaChunk->firstFreeChunk, metaChunk->lastFreeChunk);
1578 	} else
1579 		kprintf(" --------------------------------------------\n");
1580 
1581 	if (metaChunk->chunkSize == 0 || !printChunks)
1582 		return;
1583 
1584 	for (uint32 i = 0; i < metaChunk->chunkCount; i++) {
1585 		Chunk* chunk = metaChunk->chunks + i;
1586 
1587 		// skip free chunks
1588 		if (_IsChunkFree(metaChunk, chunk))
1589 			continue;
1590 
1591 		addr_t reference = chunk->reference;
1592 		if ((reference & 1) == 0) {
1593 			ObjectCache* cache = (ObjectCache*)reference;
1594 			kprintf("%5" B_PRIu32 "  %p  %p  %11" B_PRIuSIZE "  %s\n", i,
1595 				(void*)_ChunkAddress(metaChunk, chunk), cache,
1596 				cache != NULL ? cache->object_size : 0,
1597 				cache != NULL ? cache->name : "");
1598 		} else if (reference != 1) {
1599 			kprintf("%5" B_PRIu32 "  %p  raw allocation up to %p\n", i,
1600 				(void*)_ChunkAddress(metaChunk, chunk), (void*)reference);
1601 		}
1602 	}
1603 }
1604 
1605 
1606 /*static*/ int
1607 MemoryManager::_DumpMetaChunk(int argc, char** argv)
1608 {
1609 	if (argc != 2) {
1610 		print_debugger_command_usage(argv[0]);
1611 		return 0;
1612 	}
1613 
1614 	uint64 address;
1615 	if (!evaluate_debug_expression(argv[1], &address, false))
1616 		return 0;
1617 
1618 	Area* area = (Area*)(addr_t)ROUNDDOWN(address, SLAB_AREA_SIZE);
1619 
1620 	MetaChunk* metaChunk;
1621 	if ((addr_t)address >= (addr_t)area->metaChunks
1622 		&& (addr_t)address
1623 			< (addr_t)(area->metaChunks + SLAB_META_CHUNKS_PER_AREA)) {
1624 		metaChunk = (MetaChunk*)(addr_t)address;
1625 	} else {
1626 		metaChunk = area->metaChunks
1627 			+ (address % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE;
1628 	}
1629 
1630 	_DumpMetaChunk(metaChunk, true, true);
1631 
1632 	return 0;
1633 }
1634 
1635 
1636 /*static*/ void
1637 MemoryManager::_DumpMetaChunks(const char* name, MetaChunkList& metaChunkList,
1638 	bool printChunks)
1639 {
1640 	kprintf("%s:\n", name);
1641 
1642 	for (MetaChunkList::Iterator it = metaChunkList.GetIterator();
1643 			MetaChunk* metaChunk = it.Next();) {
1644 		_DumpMetaChunk(metaChunk, printChunks, false);
1645 	}
1646 }
1647 
1648 
1649 /*static*/ int
1650 MemoryManager::_DumpMetaChunks(int argc, char** argv)
1651 {
1652 	bool printChunks = argc > 1 && strcmp(argv[1], "-c") == 0;
1653 
1654 	_PrintMetaChunkTableHeader(printChunks);
1655 	_DumpMetaChunks("free complete", sFreeCompleteMetaChunks, printChunks);
1656 	_DumpMetaChunks("free short", sFreeShortMetaChunks, printChunks);
1657 	_DumpMetaChunks("partial small", sPartialMetaChunksSmall, printChunks);
1658 	_DumpMetaChunks("partial medium", sPartialMetaChunksMedium, printChunks);
1659 
1660 	return 0;
1661 }
1662 
1663 
1664 /*static*/ int
1665 MemoryManager::_DumpArea(int argc, char** argv)
1666 {
1667 	bool printChunks = false;
1668 
1669 	int argi = 1;
1670 	while (argi < argc) {
1671 		if (argv[argi][0] != '-')
1672 			break;
1673 		const char* arg = argv[argi++];
1674 		if (strcmp(arg, "-c") == 0) {
1675 			printChunks = true;
1676 		} else {
1677 			print_debugger_command_usage(argv[0]);
1678 			return 0;
1679 		}
1680 	}
1681 
1682 	if (argi + 1 != argc) {
1683 		print_debugger_command_usage(argv[0]);
1684 		return 0;
1685 	}
1686 
1687 	uint64 address;
1688 	if (!evaluate_debug_expression(argv[argi], &address, false))
1689 		return 0;
1690 
1691 	address = ROUNDDOWN(address, SLAB_AREA_SIZE);
1692 
1693 	Area* area = (Area*)(addr_t)address;
1694 
1695 	for (uint32 k = 0; k < SLAB_META_CHUNKS_PER_AREA; k++) {
1696 		MetaChunk* metaChunk = area->metaChunks + k;
1697 		_DumpMetaChunk(metaChunk, printChunks, k == 0);
1698 	}
1699 
1700 	return 0;
1701 }
1702 
1703 
1704 /*static*/ int
1705 MemoryManager::_DumpAreas(int argc, char** argv)
1706 {
1707 	kprintf("      base        area   meta      small   medium  large\n");
1708 
1709 	size_t totalTotalSmall = 0;
1710 	size_t totalUsedSmall = 0;
1711 	size_t totalTotalMedium = 0;
1712 	size_t totalUsedMedium = 0;
1713 	size_t totalUsedLarge = 0;
1714 	uint32 areaCount = 0;
1715 
1716 	for (AreaTable::Iterator it = sAreaTable.GetIterator();
1717 			Area* area = it.Next();) {
1718 		areaCount++;
1719 
1720 		// sum up the free/used counts for the chunk sizes
1721 		int totalSmall = 0;
1722 		int usedSmall = 0;
1723 		int totalMedium = 0;
1724 		int usedMedium = 0;
1725 		int usedLarge = 0;
1726 
1727 		for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA; i++) {
1728 			MetaChunk* metaChunk = area->metaChunks + i;
1729 			if (metaChunk->chunkSize == 0)
1730 				continue;
1731 
1732 			switch (metaChunk->chunkSize) {
1733 				case SLAB_CHUNK_SIZE_SMALL:
1734 					totalSmall += metaChunk->chunkCount;
1735 					usedSmall += metaChunk->usedChunkCount;
1736 					break;
1737 				case SLAB_CHUNK_SIZE_MEDIUM:
1738 					totalMedium += metaChunk->chunkCount;
1739 					usedMedium += metaChunk->usedChunkCount;
1740 					break;
1741 				case SLAB_CHUNK_SIZE_LARGE:
1742 					usedLarge += metaChunk->usedChunkCount;
1743 					break;
1744 			}
1745 		}
1746 
1747 		kprintf("%p  %p  %2u/%2u  %4d/%4d  %3d/%3d  %5d\n",
1748 			area, area->vmArea, area->usedMetaChunkCount,
1749 			SLAB_META_CHUNKS_PER_AREA, usedSmall, totalSmall, usedMedium,
1750 			totalMedium, usedLarge);
1751 
1752 		totalTotalSmall += totalSmall;
1753 		totalUsedSmall += usedSmall;
1754 		totalTotalMedium += totalMedium;
1755 		totalUsedMedium += usedMedium;
1756 		totalUsedLarge += usedLarge;
1757 	}
1758 
1759 	kprintf("%d free area%s:\n", sFreeAreaCount,
1760 		sFreeAreaCount == 1 ? "" : "s");
1761 	for (Area* area = sFreeAreas; area != NULL; area = area->next) {
1762 		areaCount++;
1763 		kprintf("%p  %p\n", area, area->vmArea);
1764 	}
1765 
1766 	kprintf("total usage:\n");
1767 	kprintf("  small:    %" B_PRIuSIZE "/%" B_PRIuSIZE "\n", totalUsedSmall,
1768 		totalTotalSmall);
1769 	kprintf("  medium:   %" B_PRIuSIZE "/%" B_PRIuSIZE "\n", totalUsedMedium,
1770 		totalTotalMedium);
1771 	kprintf("  large:    %" B_PRIuSIZE "\n", totalUsedLarge);
1772 	kprintf("  memory:   %" B_PRIuSIZE "/%" B_PRIuSIZE " KB\n",
1773 		(totalUsedSmall * SLAB_CHUNK_SIZE_SMALL
1774 			+ totalUsedMedium * SLAB_CHUNK_SIZE_MEDIUM
1775 			+ totalUsedLarge * SLAB_CHUNK_SIZE_LARGE) / 1024,
1776 		areaCount * SLAB_AREA_SIZE / 1024);
1777 	kprintf("  overhead: %" B_PRIuSIZE " KB\n",
1778 		areaCount * kAreaAdminSize / 1024);
1779 
1780 	return 0;
1781 }
1782