xref: /haiku/src/system/kernel/vm/VMKernelAddressSpace.cpp (revision 5b189b0e1e2f51f367bfcb126b2f00a3702f352d)
1 /*
2  * Copyright 2009-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 #include "VMKernelAddressSpace.h"
12 
13 #include <stdlib.h>
14 
15 #include <KernelExport.h>
16 
17 #include <heap.h>
18 #include <slab/Slab.h>
19 #include <thread.h>
20 #include <vm/vm.h>
21 #include <vm/VMArea.h>
22 
23 
24 //#define TRACE_VM
25 #ifdef TRACE_VM
26 #	define TRACE(x...) dprintf(x)
27 #else
28 #	define TRACE(x...) ;
29 #endif
30 
31 
32 //#define PARANOIA_CHECKS
33 #ifdef PARANOIA_CHECKS
34 #	define PARANOIA_CHECK_STRUCTURES()	_CheckStructures()
35 #else
36 #	define PARANOIA_CHECK_STRUCTURES()	do {} while (false)
37 #endif
38 
39 
40 static int
41 ld(size_t value)
42 {
43 	int index = -1;
44 	while (value > 0) {
45 		value >>= 1;
46 		index++;
47 	}
48 
49 	return index;
50 }
51 
52 
53 /*!	Verifies that an area with the given aligned base and size fits into
54 	the spot defined by base and limit and checks for overflows.
55 	\param base Minimum base address valid for the area.
56 	\param alignedBase The base address of the range to check.
57 	\param size The size of the area.
58 	\param limit The last (inclusive) addresss of the range to check.
59 */
60 static inline bool
61 is_valid_spot(addr_t base, addr_t alignedBase, addr_t size, addr_t limit)
62 {
63 	return (alignedBase >= base && alignedBase + (size - 1) > alignedBase
64 		&& alignedBase + (size - 1) <= limit);
65 }
66 
67 
68 // #pragma mark - VMKernelAddressSpace
69 
70 
71 VMKernelAddressSpace::VMKernelAddressSpace(team_id id, addr_t base, size_t size)
72 	:
73 	VMAddressSpace(id, base, size, "kernel address space"),
74 	fAreaObjectCache(NULL),
75 	fRangesObjectCache(NULL)
76 {
77 }
78 
79 
80 VMKernelAddressSpace::~VMKernelAddressSpace()
81 {
82 	panic("deleting the kernel aspace!\n");
83 }
84 
85 
86 status_t
87 VMKernelAddressSpace::InitObject()
88 {
89 	fAreaObjectCache = create_object_cache("kernel areas",
90 		sizeof(VMKernelArea), 0, NULL, NULL, NULL);
91 	if (fAreaObjectCache == NULL)
92 		return B_NO_MEMORY;
93 
94 	fRangesObjectCache = create_object_cache("kernel address ranges",
95 		sizeof(Range), 0, NULL, NULL, NULL);
96 	if (fRangesObjectCache == NULL)
97 		return B_NO_MEMORY;
98 
99 	// create the free lists
100 	size_t size = fEndAddress - fBase + 1;
101 	fFreeListCount = ld(size) - PAGE_SHIFT + 1;
102 	fFreeLists = new(std::nothrow) RangeFreeList[fFreeListCount];
103 	if (fFreeLists == NULL)
104 		return B_NO_MEMORY;
105 
106 	Range* range = new(fRangesObjectCache, 0) Range(fBase, size,
107 		Range::RANGE_FREE);
108 	if (range == NULL)
109 		return B_NO_MEMORY;
110 
111 	_InsertRange(range);
112 
113 	TRACE("VMKernelAddressSpace::InitObject(): address range: %#" B_PRIxADDR
114 		" - %#" B_PRIxADDR ", free lists: %d\n", fBase, fEndAddress,
115 		fFreeListCount);
116 
117 	return B_OK;
118 }
119 
120 
121 inline VMArea*
122 VMKernelAddressSpace::FirstArea() const
123 {
124 	Range* range = fRangeList.Head();
125 	while (range != NULL && range->type != Range::RANGE_AREA)
126 		range = fRangeList.GetNext(range);
127 	return range != NULL ? range->area : NULL;
128 }
129 
130 
131 inline VMArea*
132 VMKernelAddressSpace::NextArea(VMArea* _area) const
133 {
134 	Range* range = static_cast<VMKernelArea*>(_area)->Range();
135 	do {
136 		range = fRangeList.GetNext(range);
137 	} while (range != NULL && range->type != Range::RANGE_AREA);
138 	return range != NULL ? range->area : NULL;
139 }
140 
141 
142 VMArea*
143 VMKernelAddressSpace::CreateArea(const char* name, uint32 wiring,
144 	uint32 protection, uint32 allocationFlags)
145 {
146 	return VMKernelArea::Create(this, name, wiring, protection,
147 		fAreaObjectCache, allocationFlags);
148 }
149 
150 
151 void
152 VMKernelAddressSpace::DeleteArea(VMArea* _area, uint32 allocationFlags)
153 {
154 	TRACE("VMKernelAddressSpace::DeleteArea(%p)\n", _area);
155 
156 	VMKernelArea* area = static_cast<VMKernelArea*>(_area);
157 	object_cache_delete(fAreaObjectCache, area);
158 }
159 
160 
161 //! You must hold the address space's read lock.
162 VMArea*
163 VMKernelAddressSpace::LookupArea(addr_t address) const
164 {
165 	Range* range = fRangeTree.FindClosest(address, true);
166 	if (range == NULL || range->type != Range::RANGE_AREA)
167 		return NULL;
168 
169 	VMKernelArea* area = range->area;
170 	return area->ContainsAddress(address) ? area : NULL;
171 }
172 
173 
174 /*!	This inserts the area you pass into the address space.
175 	It will also set the "_address" argument to its base address when
176 	the call succeeds.
177 	You need to hold the VMAddressSpace write lock.
178 */
179 status_t
180 VMKernelAddressSpace::InsertArea(VMArea* _area, size_t size,
181 	const virtual_address_restrictions* addressRestrictions,
182 	uint32 allocationFlags, void** _address)
183 {
184 	TRACE("VMKernelAddressSpace::InsertArea(%p, %" B_PRIu32 ", %#" B_PRIxSIZE
185 		", %p \"%s\")\n", addressRestrictions->address,
186 		addressRestrictions->address_specification, size, _area, _area->name);
187 
188 	VMKernelArea* area = static_cast<VMKernelArea*>(_area);
189 
190 	Range* range;
191 	status_t error = _AllocateRange(addressRestrictions, size,
192 		addressRestrictions->address_specification == B_EXACT_ADDRESS,
193 		allocationFlags, range);
194 	if (error != B_OK)
195 		return error;
196 
197 	range->type = Range::RANGE_AREA;
198 	range->area = area;
199 	area->SetRange(range);
200 	area->SetBase(range->base);
201 	area->SetSize(range->size);
202 
203 	if (_address != NULL)
204 		*_address = (void*)area->Base();
205 	fFreeSpace -= area->Size();
206 
207 	PARANOIA_CHECK_STRUCTURES();
208 
209 	return B_OK;
210 }
211 
212 
213 //! You must hold the address space's write lock.
214 void
215 VMKernelAddressSpace::RemoveArea(VMArea* _area, uint32 allocationFlags)
216 {
217 	TRACE("VMKernelAddressSpace::RemoveArea(%p)\n", _area);
218 
219 	VMKernelArea* area = static_cast<VMKernelArea*>(_area);
220 
221 	_FreeRange(area->Range(), allocationFlags);
222 
223 	fFreeSpace += area->Size();
224 
225 	PARANOIA_CHECK_STRUCTURES();
226 }
227 
228 
229 bool
230 VMKernelAddressSpace::CanResizeArea(VMArea* area, size_t newSize)
231 {
232 	Range* range = static_cast<VMKernelArea*>(area)->Range();
233 
234 	if (newSize <= range->size)
235 		return true;
236 
237 	Range* nextRange = fRangeList.GetNext(range);
238 	if (nextRange == NULL || nextRange->type == Range::RANGE_AREA)
239 		return false;
240 
241 	if (nextRange->type == Range::RANGE_RESERVED
242 		&& nextRange->reserved.base > range->base) {
243 		return false;
244 	}
245 
246 	// TODO: If there is free space after a reserved range (or vice versa), it
247 	// could be used as well.
248 	return newSize - range->size <= nextRange->size;
249 }
250 
251 
252 status_t
253 VMKernelAddressSpace::ResizeArea(VMArea* _area, size_t newSize,
254 	uint32 allocationFlags)
255 {
256 	TRACE("VMKernelAddressSpace::ResizeArea(%p, %#" B_PRIxSIZE ")\n", _area,
257 		newSize);
258 
259 	VMKernelArea* area = static_cast<VMKernelArea*>(_area);
260 	Range* range = area->Range();
261 
262 	if (newSize == range->size)
263 		return B_OK;
264 
265 	Range* nextRange = fRangeList.GetNext(range);
266 
267 	if (newSize < range->size) {
268 		if (nextRange != NULL && nextRange->type == Range::RANGE_FREE) {
269 			// a free range is following -- just enlarge it
270 			_FreeListRemoveRange(nextRange, nextRange->size);
271 			nextRange->size += range->size - newSize;
272 			nextRange->base = range->base + newSize;
273 			_FreeListInsertRange(nextRange, nextRange->size);
274 		} else {
275 			// no free range following -- we need to allocate a new one and
276 			// insert it
277 			nextRange = new(fRangesObjectCache, allocationFlags) Range(
278 				range->base + newSize, range->size - newSize,
279 				Range::RANGE_FREE);
280 			if (nextRange == NULL)
281 				return B_NO_MEMORY;
282 			_InsertRange(nextRange);
283 		}
284 	} else {
285 		if (nextRange == NULL
286 			|| (nextRange->type == Range::RANGE_RESERVED
287 				&& nextRange->reserved.base > range->base)) {
288 			return B_BAD_VALUE;
289 		}
290 		// TODO: If there is free space after a reserved range (or vice versa),
291 		// it could be used as well.
292 		size_t sizeDiff = newSize - range->size;
293 		if (sizeDiff > nextRange->size)
294 			return B_BAD_VALUE;
295 
296 		if (sizeDiff == nextRange->size) {
297 			// The next range is completely covered -- remove and delete it.
298 			_RemoveRange(nextRange);
299 			object_cache_delete(fRangesObjectCache, nextRange, allocationFlags);
300 		} else {
301 			// The next range is only partially covered -- shrink it.
302 			if (nextRange->type == Range::RANGE_FREE)
303 				_FreeListRemoveRange(nextRange, nextRange->size);
304 			nextRange->size -= sizeDiff;
305 			nextRange->base = range->base + newSize;
306 			if (nextRange->type == Range::RANGE_FREE)
307 				_FreeListInsertRange(nextRange, nextRange->size);
308 		}
309 	}
310 
311 	range->size = newSize;
312 	area->SetSize(newSize);
313 
314 	IncrementChangeCount();
315 	PARANOIA_CHECK_STRUCTURES();
316 	return B_OK;
317 }
318 
319 
320 status_t
321 VMKernelAddressSpace::ShrinkAreaHead(VMArea* _area, size_t newSize,
322 	uint32 allocationFlags)
323 {
324 	TRACE("VMKernelAddressSpace::ShrinkAreaHead(%p, %#" B_PRIxSIZE ")\n", _area,
325 		newSize);
326 
327 	VMKernelArea* area = static_cast<VMKernelArea*>(_area);
328 	Range* range = area->Range();
329 
330 	if (newSize == range->size)
331 		return B_OK;
332 
333 	if (newSize > range->size)
334 		return B_BAD_VALUE;
335 
336 	Range* previousRange = fRangeList.GetPrevious(range);
337 
338 	size_t sizeDiff = range->size - newSize;
339 	if (previousRange != NULL && previousRange->type == Range::RANGE_FREE) {
340 		// the previous range is free -- just enlarge it
341 		_FreeListRemoveRange(previousRange, previousRange->size);
342 		previousRange->size += sizeDiff;
343 		_FreeListInsertRange(previousRange, previousRange->size);
344 		range->base += sizeDiff;
345 		range->size = newSize;
346 	} else {
347 		// no free range before -- we need to allocate a new one and
348 		// insert it
349 		previousRange = new(fRangesObjectCache, allocationFlags) Range(
350 			range->base, sizeDiff, Range::RANGE_FREE);
351 		if (previousRange == NULL)
352 			return B_NO_MEMORY;
353 		range->base += sizeDiff;
354 		range->size = newSize;
355 		_InsertRange(previousRange);
356 	}
357 
358 	area->SetBase(range->base);
359 	area->SetSize(range->size);
360 
361 	IncrementChangeCount();
362 	PARANOIA_CHECK_STRUCTURES();
363 	return B_OK;
364 }
365 
366 
367 status_t
368 VMKernelAddressSpace::ShrinkAreaTail(VMArea* area, size_t newSize,
369 	uint32 allocationFlags)
370 {
371 	return ResizeArea(area, newSize, allocationFlags);
372 }
373 
374 
375 status_t
376 VMKernelAddressSpace::ReserveAddressRange(size_t size,
377 	const virtual_address_restrictions* addressRestrictions,
378 	uint32 flags, uint32 allocationFlags, void** _address)
379 {
380 	TRACE("VMKernelAddressSpace::ReserveAddressRange(%p, %" B_PRIu32 ", %#"
381 		B_PRIxSIZE ", %#" B_PRIx32 ")\n", addressRestrictions->address,
382 		addressRestrictions->address_specification, size, flags);
383 
384 	// Don't allow range reservations, if the address space is about to be
385 	// deleted.
386 	if (fDeleting)
387 		return B_BAD_TEAM_ID;
388 
389 	Range* range;
390 	status_t error = _AllocateRange(addressRestrictions, size, false,
391 		allocationFlags, range);
392 	if (error != B_OK)
393 		return error;
394 
395 	range->type = Range::RANGE_RESERVED;
396 	range->reserved.base = range->base;
397 	range->reserved.flags = flags;
398 
399 	if (_address != NULL)
400 		*_address = (void*)range->base;
401 
402 	Get();
403 	PARANOIA_CHECK_STRUCTURES();
404 	return B_OK;
405 }
406 
407 
408 status_t
409 VMKernelAddressSpace::UnreserveAddressRange(addr_t address, size_t size,
410 	uint32 allocationFlags)
411 {
412 	TRACE("VMKernelAddressSpace::UnreserveAddressRange(%#" B_PRIxADDR ", %#"
413 		B_PRIxSIZE ")\n", address, size);
414 
415 	// Don't allow range unreservations, if the address space is about to be
416 	// deleted. UnreserveAllAddressRanges() must be used.
417 	if (fDeleting)
418 		return B_BAD_TEAM_ID;
419 
420 	// search range list and remove any matching reserved ranges
421 	addr_t endAddress = address + (size - 1);
422 	Range* range = fRangeTree.FindClosest(address, false);
423 	while (range != NULL && range->base + (range->size - 1) <= endAddress) {
424 		// Get the next range for the iteration -- we need to skip free ranges,
425 		// since _FreeRange() might join them with the current range and delete
426 		// them.
427 		Range* nextRange = fRangeList.GetNext(range);
428 		while (nextRange != NULL && nextRange->type == Range::RANGE_FREE)
429 			nextRange = fRangeList.GetNext(nextRange);
430 
431 		if (range->type == Range::RANGE_RESERVED) {
432 			_FreeRange(range, allocationFlags);
433 			Put();
434 		}
435 
436 		range = nextRange;
437 	}
438 
439 	PARANOIA_CHECK_STRUCTURES();
440 	return B_OK;
441 }
442 
443 
444 void
445 VMKernelAddressSpace::UnreserveAllAddressRanges(uint32 allocationFlags)
446 {
447 	Range* range = fRangeList.Head();
448 	while (range != NULL) {
449 		// Get the next range for the iteration -- we need to skip free ranges,
450 		// since _FreeRange() might join them with the current range and delete
451 		// them.
452 		Range* nextRange = fRangeList.GetNext(range);
453 		while (nextRange != NULL && nextRange->type == Range::RANGE_FREE)
454 			nextRange = fRangeList.GetNext(nextRange);
455 
456 		if (range->type == Range::RANGE_RESERVED) {
457 			_FreeRange(range, allocationFlags);
458 			Put();
459 		}
460 
461 		range = nextRange;
462 	}
463 
464 	PARANOIA_CHECK_STRUCTURES();
465 }
466 
467 
468 void
469 VMKernelAddressSpace::Dump() const
470 {
471 	VMAddressSpace::Dump();
472 
473 	kprintf("area_list:\n");
474 
475 	for (RangeList::ConstIterator it = fRangeList.GetIterator();
476 			Range* range = it.Next();) {
477 		if (range->type != Range::RANGE_AREA)
478 			continue;
479 
480 		VMKernelArea* area = range->area;
481 		kprintf(" area %" B_PRId32 ": ", area->id);
482 		kprintf("base_addr = %#" B_PRIxADDR " ", area->Base());
483 		kprintf("size = %#" B_PRIxSIZE " ", area->Size());
484 		kprintf("name = '%s' ", area->name);
485 		kprintf("protection = %#" B_PRIx32 "\n", area->protection);
486 	}
487 }
488 
489 
490 inline void
491 VMKernelAddressSpace::_FreeListInsertRange(Range* range, size_t size)
492 {
493 	TRACE("    VMKernelAddressSpace::_FreeListInsertRange(%p (%#" B_PRIxADDR
494 		", %#" B_PRIxSIZE ", %d), %#" B_PRIxSIZE " (%d))\n", range, range->base,
495 		range->size, range->type, size, ld(size) - PAGE_SHIFT);
496 
497 	fFreeLists[ld(size) - PAGE_SHIFT].Add(range);
498 }
499 
500 
501 inline void
502 VMKernelAddressSpace::_FreeListRemoveRange(Range* range, size_t size)
503 {
504 	TRACE("    VMKernelAddressSpace::_FreeListRemoveRange(%p (%#" B_PRIxADDR
505 		", %#" B_PRIxSIZE ", %d), %#" B_PRIxSIZE " (%d))\n", range, range->base,
506 		range->size, range->type, size, ld(size) - PAGE_SHIFT);
507 
508 	fFreeLists[ld(size) - PAGE_SHIFT].Remove(range);
509 }
510 
511 
512 void
513 VMKernelAddressSpace::_InsertRange(Range* range)
514 {
515 	TRACE("    VMKernelAddressSpace::_InsertRange(%p (%#" B_PRIxADDR ", %#"
516 		B_PRIxSIZE ", %d))\n", range, range->base, range->size, range->type);
517 
518 	// insert at the correct position in the range list
519 	Range* insertBeforeRange = fRangeTree.FindClosest(range->base, true);
520 	fRangeList.Insert(
521 		insertBeforeRange != NULL
522 			? fRangeList.GetNext(insertBeforeRange) : fRangeList.Head(),
523 		range);
524 
525 	// insert into tree
526 	fRangeTree.Insert(range);
527 
528 	// insert in the free ranges list, if the range is free
529 	if (range->type == Range::RANGE_FREE)
530 		_FreeListInsertRange(range, range->size);
531 }
532 
533 
534 void
535 VMKernelAddressSpace::_RemoveRange(Range* range)
536 {
537 	TRACE("    VMKernelAddressSpace::_RemoveRange(%p (%#" B_PRIxADDR ", %#"
538 		B_PRIxSIZE ", %d))\n", range, range->base, range->size, range->type);
539 
540 	// remove from tree and range list
541 	// insert at the correct position in the range list
542 	fRangeTree.Remove(range);
543 	fRangeList.Remove(range);
544 
545 	// if it is a free range, also remove it from the free list
546 	if (range->type == Range::RANGE_FREE)
547 		_FreeListRemoveRange(range, range->size);
548 }
549 
550 
551 status_t
552 VMKernelAddressSpace::_AllocateRange(
553 	const virtual_address_restrictions* addressRestrictions,
554 	size_t size, bool allowReservedRange, uint32 allocationFlags,
555 	Range*& _range)
556 {
557 	TRACE("  VMKernelAddressSpace::_AllocateRange(address: %p, size: %#"
558 		B_PRIxSIZE ", addressSpec: %#" B_PRIx32 ", reserved allowed: %d)\n",
559 		addressRestrictions->address, size,
560 		addressRestrictions->address_specification, allowReservedRange);
561 
562 	// prepare size, alignment and the base address for the range search
563 	addr_t address = (addr_t)addressRestrictions->address;
564 	size = ROUNDUP(size, B_PAGE_SIZE);
565 	size_t alignment = addressRestrictions->alignment != 0
566 		? addressRestrictions->alignment : B_PAGE_SIZE;
567 
568 	switch (addressRestrictions->address_specification) {
569 		case B_EXACT_ADDRESS:
570 		{
571 			if (address % B_PAGE_SIZE != 0)
572 				return B_BAD_VALUE;
573 			break;
574 		}
575 
576 		case B_BASE_ADDRESS:
577 			address = ROUNDUP(address, B_PAGE_SIZE);
578 			break;
579 
580 		case B_ANY_KERNEL_BLOCK_ADDRESS:
581 			// align the memory to the next power of two of the size
582 			while (alignment < size)
583 				alignment <<= 1;
584 
585 			// fall through...
586 
587 		case B_ANY_ADDRESS:
588 		case B_ANY_KERNEL_ADDRESS:
589 			address = fBase;
590 			break;
591 
592 		default:
593 			return B_BAD_VALUE;
594 	}
595 
596 	// find a range
597 	Range* range = _FindFreeRange(address, size, alignment,
598 		addressRestrictions->address_specification, allowReservedRange,
599 		address);
600 	if (range == NULL) {
601 		return addressRestrictions->address_specification == B_EXACT_ADDRESS
602 			? B_BAD_VALUE : B_NO_MEMORY;
603 	}
604 
605 	TRACE("  VMKernelAddressSpace::_AllocateRange() found range:(%p (%#"
606 		B_PRIxADDR ", %#" B_PRIxSIZE ", %d)\n", range, range->base, range->size,
607 		range->type);
608 
609 	// We have found a range. It might not be a perfect fit, in which case
610 	// we have to split the range.
611 	size_t rangeSize = range->size;
612 
613 	if (address == range->base) {
614 		// allocation at the beginning of the range
615 		if (range->size > size) {
616 			// only partial -- split the range
617 			Range* leftOverRange = new(fRangesObjectCache, allocationFlags)
618 				Range(address + size, range->size - size, range);
619 			if (leftOverRange == NULL)
620 				return B_NO_MEMORY;
621 
622 			range->size = size;
623 			_InsertRange(leftOverRange);
624 		}
625 	} else if (address + size == range->base + range->size) {
626 		// allocation at the end of the range -- split the range
627 		Range* leftOverRange = new(fRangesObjectCache, allocationFlags) Range(
628 			range->base, range->size - size, range);
629 		if (leftOverRange == NULL)
630 			return B_NO_MEMORY;
631 
632 		range->base = address;
633 		range->size = size;
634 		_InsertRange(leftOverRange);
635 	} else {
636 		// allocation in the middle of the range -- split the range in three
637 		Range* leftOverRange1 = new(fRangesObjectCache, allocationFlags) Range(
638 			range->base, address - range->base, range);
639 		if (leftOverRange1 == NULL)
640 			return B_NO_MEMORY;
641 		Range* leftOverRange2 = new(fRangesObjectCache, allocationFlags) Range(
642 			address + size, range->size - size - leftOverRange1->size, range);
643 		if (leftOverRange2 == NULL) {
644 			object_cache_delete(fRangesObjectCache, leftOverRange1,
645 				allocationFlags);
646 			return B_NO_MEMORY;
647 		}
648 
649 		range->base = address;
650 		range->size = size;
651 		_InsertRange(leftOverRange1);
652 		_InsertRange(leftOverRange2);
653 	}
654 
655 	// If the range is a free range, remove it from the respective free list.
656 	if (range->type == Range::RANGE_FREE)
657 		_FreeListRemoveRange(range, rangeSize);
658 
659 	IncrementChangeCount();
660 
661 	TRACE("  VMKernelAddressSpace::_AllocateRange() -> %p (%#" B_PRIxADDR ", %#"
662 		B_PRIxSIZE ")\n", range, range->base, range->size);
663 
664 	_range = range;
665 	return B_OK;
666 }
667 
668 
669 VMKernelAddressSpace::Range*
670 VMKernelAddressSpace::_FindFreeRange(addr_t start, size_t size,
671 	size_t alignment, uint32 addressSpec, bool allowReservedRange,
672 	addr_t& _foundAddress)
673 {
674 	TRACE("  VMKernelAddressSpace::_FindFreeRange(start: %#" B_PRIxADDR
675 		", size: %#" B_PRIxSIZE ", alignment: %#" B_PRIxSIZE ", addressSpec: %#"
676 		B_PRIx32 ", reserved allowed: %d)\n", start, size, alignment,
677 		addressSpec, allowReservedRange);
678 
679 	switch (addressSpec) {
680 		case B_BASE_ADDRESS:
681 		{
682 			// We have to iterate through the range list starting at the given
683 			// address. This is the most inefficient case.
684 			Range* range = fRangeTree.FindClosest(start, true);
685 			while (range != NULL) {
686 				if (range->type == Range::RANGE_FREE) {
687 					addr_t alignedBase = ROUNDUP(range->base, alignment);
688 					if (is_valid_spot(start, alignedBase, size,
689 							range->base + (range->size - 1))) {
690 						_foundAddress = alignedBase;
691 						return range;
692 					}
693 				}
694 				range = fRangeList.GetNext(range);
695 			}
696 
697 			// We didn't find a free spot in the requested range, so we'll
698 			// try again without any restrictions.
699 			start = fBase;
700 			addressSpec = B_ANY_ADDRESS;
701 			// fall through...
702 		}
703 
704 		case B_ANY_ADDRESS:
705 		case B_ANY_KERNEL_ADDRESS:
706 		case B_ANY_KERNEL_BLOCK_ADDRESS:
707 		{
708 			// We want to allocate from the first non-empty free list that is
709 			// guaranteed to contain the size. Finding a free range is O(1),
710 			// unless there are constraints (min base address, alignment).
711 			int freeListIndex = ld((size * 2 - 1) >> PAGE_SHIFT);
712 
713 			for (int32 i = freeListIndex; i < fFreeListCount; i++) {
714 				RangeFreeList& freeList = fFreeLists[i];
715 				if (freeList.IsEmpty())
716 					continue;
717 
718 				for (RangeFreeList::Iterator it = freeList.GetIterator();
719 						Range* range = it.Next();) {
720 					addr_t alignedBase = ROUNDUP(range->base, alignment);
721 					if (is_valid_spot(start, alignedBase, size,
722 							range->base + (range->size - 1))) {
723 						_foundAddress = alignedBase;
724 						return range;
725 					}
726 				}
727 			}
728 
729 			if (!allowReservedRange)
730 				return NULL;
731 
732 			// We haven't found any free ranges, but we're supposed to look
733 			// for reserved ones, too. Iterate through the range list starting
734 			// at the given address.
735 			Range* range = fRangeTree.FindClosest(start, true);
736 			while (range != NULL) {
737 				if (range->type == Range::RANGE_RESERVED) {
738 					addr_t alignedBase = ROUNDUP(range->base, alignment);
739 					if (is_valid_spot(start, alignedBase, size,
740 							range->base + (range->size - 1))) {
741 						// allocation from the back might be preferred
742 						// -- adjust the base accordingly
743 						if ((range->reserved.flags & RESERVED_AVOID_BASE)
744 								!= 0) {
745 							alignedBase = ROUNDDOWN(
746 								range->base + (range->size - size), alignment);
747 						}
748 
749 						_foundAddress = alignedBase;
750 						return range;
751 					}
752 				}
753 				range = fRangeList.GetNext(range);
754 			}
755 
756 			return NULL;
757 		}
758 
759 		case B_EXACT_ADDRESS:
760 		{
761 			Range* range = fRangeTree.FindClosest(start, true);
762 TRACE("    B_EXACT_ADDRESS: range: %p\n", range);
763 			if (range == NULL || range->type == Range::RANGE_AREA
764 				|| range->base + (range->size - 1) < start + (size - 1)) {
765 				// TODO: Support allocating if the area range covers multiple
766 				// free and reserved ranges!
767 TRACE("    -> no suitable range\n");
768 				return NULL;
769 			}
770 
771 			if (range->type != Range::RANGE_FREE && !allowReservedRange)
772 {
773 TRACE("    -> reserved range not allowed\n");
774 				return NULL;
775 }
776 
777 			_foundAddress = start;
778 			return range;
779 		}
780 
781 		default:
782 			return NULL;
783 	}
784 }
785 
786 
787 void
788 VMKernelAddressSpace::_FreeRange(Range* range, uint32 allocationFlags)
789 {
790 	TRACE("  VMKernelAddressSpace::_FreeRange(%p (%#" B_PRIxADDR ", %#"
791 		B_PRIxSIZE ", %d))\n", range, range->base, range->size, range->type);
792 
793 	// Check whether one or both of the neighboring ranges are free already,
794 	// and join them, if so.
795 	Range* previousRange = fRangeList.GetPrevious(range);
796 	Range* nextRange = fRangeList.GetNext(range);
797 
798 	if (previousRange != NULL && previousRange->type == Range::RANGE_FREE) {
799 		if (nextRange != NULL && nextRange->type == Range::RANGE_FREE) {
800 			// join them all -- keep the first one, delete the others
801 			_FreeListRemoveRange(previousRange, previousRange->size);
802 			_RemoveRange(range);
803 			_RemoveRange(nextRange);
804 			previousRange->size += range->size + nextRange->size;
805 			object_cache_delete(fRangesObjectCache, range, allocationFlags);
806 			object_cache_delete(fRangesObjectCache, nextRange, allocationFlags);
807 			_FreeListInsertRange(previousRange, previousRange->size);
808 		} else {
809 			// join with the previous range only, delete the supplied one
810 			_FreeListRemoveRange(previousRange, previousRange->size);
811 			_RemoveRange(range);
812 			previousRange->size += range->size;
813 			object_cache_delete(fRangesObjectCache, range, allocationFlags);
814 			_FreeListInsertRange(previousRange, previousRange->size);
815 		}
816 	} else {
817 		if (nextRange != NULL && nextRange->type == Range::RANGE_FREE) {
818 			// join with the next range and delete it
819 			_RemoveRange(nextRange);
820 			range->size += nextRange->size;
821 			object_cache_delete(fRangesObjectCache, nextRange, allocationFlags);
822 		}
823 
824 		// mark the range free and add it to the respective free list
825 		range->type = Range::RANGE_FREE;
826 		_FreeListInsertRange(range, range->size);
827 	}
828 
829 	IncrementChangeCount();
830 }
831 
832 
833 #ifdef PARANOIA_CHECKS
834 
835 void
836 VMKernelAddressSpace::_CheckStructures() const
837 {
838 	// general tree structure check
839 	fRangeTree.CheckTree();
840 
841 	// check range list and tree
842 	size_t spaceSize = fEndAddress - fBase + 1;
843 	addr_t nextBase = fBase;
844 	Range* previousRange = NULL;
845 	int previousRangeType = Range::RANGE_AREA;
846 	uint64 freeRanges = 0;
847 
848 	RangeList::ConstIterator listIt = fRangeList.GetIterator();
849 	RangeTree::ConstIterator treeIt = fRangeTree.GetIterator();
850 	while (true) {
851 		Range* range = listIt.Next();
852 		Range* treeRange = treeIt.Next();
853 		if (range != treeRange) {
854 			panic("VMKernelAddressSpace::_CheckStructures(): list/tree range "
855 				"mismatch: %p vs %p", range, treeRange);
856 		}
857 		if (range == NULL)
858 			break;
859 
860 		if (range->base != nextBase) {
861 			panic("VMKernelAddressSpace::_CheckStructures(): range base %#"
862 				B_PRIxADDR ", expected: %#" B_PRIxADDR, range->base, nextBase);
863 		}
864 
865 		if (range->size == 0) {
866 			panic("VMKernelAddressSpace::_CheckStructures(): empty range %p",
867 				range);
868 		}
869 
870 		if (range->size % B_PAGE_SIZE != 0) {
871 			panic("VMKernelAddressSpace::_CheckStructures(): range %p (%#"
872 				B_PRIxADDR ", %#" B_PRIxSIZE ") not page aligned", range,
873 				range->base, range->size);
874 		}
875 
876 		if (range->size > spaceSize - (range->base - fBase)) {
877 			panic("VMKernelAddressSpace::_CheckStructures(): range too large: "
878 				"(%#" B_PRIxADDR ", %#" B_PRIxSIZE "), address space end: %#"
879 				B_PRIxADDR, range->base, range->size, fEndAddress);
880 		}
881 
882 		if (range->type == Range::RANGE_FREE) {
883 			freeRanges++;
884 
885 			if (previousRangeType == Range::RANGE_FREE) {
886 				panic("VMKernelAddressSpace::_CheckStructures(): adjoining "
887 					"free ranges: %p (%#" B_PRIxADDR ", %#" B_PRIxSIZE
888 					"), %p (%#" B_PRIxADDR ", %#" B_PRIxSIZE ")", previousRange,
889 					previousRange->base, previousRange->size, range,
890 					range->base, range->size);
891 			}
892 		}
893 
894 		previousRange = range;
895 		nextBase = range->base + range->size;
896 		previousRangeType = range->type;
897 	}
898 
899 	if (nextBase - 1 != fEndAddress) {
900 		panic("VMKernelAddressSpace::_CheckStructures(): space not fully "
901 			"covered by ranges: last: %#" B_PRIxADDR ", expected %#" B_PRIxADDR,
902 			nextBase - 1, fEndAddress);
903 	}
904 
905 	// check free lists
906 	uint64 freeListRanges = 0;
907 	for (int i = 0; i < fFreeListCount; i++) {
908 		RangeFreeList& freeList = fFreeLists[i];
909 		if (freeList.IsEmpty())
910 			continue;
911 
912 		for (RangeFreeList::Iterator it = freeList.GetIterator();
913 				Range* range = it.Next();) {
914 			if (range->type != Range::RANGE_FREE) {
915 				panic("VMKernelAddressSpace::_CheckStructures(): non-free "
916 					"range %p (%#" B_PRIxADDR ", %#" B_PRIxSIZE ", %d) in "
917 					"free list %d", range, range->base, range->size,
918 					range->type, i);
919 			}
920 
921 			if (fRangeTree.Find(range->base) != range) {
922 				panic("VMKernelAddressSpace::_CheckStructures(): unknown "
923 					"range %p (%#" B_PRIxADDR ", %#" B_PRIxSIZE ", %d) in "
924 					"free list %d", range, range->base, range->size,
925 					range->type, i);
926 			}
927 
928 			if (ld(range->size) - PAGE_SHIFT != i) {
929 				panic("VMKernelAddressSpace::_CheckStructures(): "
930 					"range %p (%#" B_PRIxADDR ", %#" B_PRIxSIZE ", %d) in "
931 					"wrong free list %d", range, range->base, range->size,
932 					range->type, i);
933 			}
934 
935 			freeListRanges++;
936 		}
937 	}
938 }
939 
940 #endif	// PARANOIA_CHECKS
941