xref: /haiku/src/system/kernel/vm/VMKernelAddressSpace.cpp (revision 481f986b59e7782458dcc5fe98ad59a57480e5db)
1 /*
2  * Copyright 2009-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 #include "VMKernelAddressSpace.h"
12 
13 #include <stdlib.h>
14 
15 #include <KernelExport.h>
16 
17 #include <heap.h>
18 #include <thread.h>
19 #include <vm/vm.h>
20 #include <vm/VMArea.h>
21 
22 
23 //#define TRACE_VM
24 #ifdef TRACE_VM
25 #	define TRACE(x...) dprintf(x)
26 #else
27 #	define TRACE(x...) ;
28 #endif
29 
30 
31 //#define PARANOIA_CHECKS
32 #ifdef PARANOIA_CHECKS
33 #	define PARANOIA_CHECK_STRUCTURES()	_CheckStructures()
34 #else
35 #	define PARANOIA_CHECK_STRUCTURES()	do {} while (false)
36 #endif
37 
38 
39 static int
40 ld(size_t value)
41 {
42 	int index = -1;
43 	while (value > 0) {
44 		value >>= 1;
45 		index++;
46 	}
47 
48 	return index;
49 }
50 
51 
52 /*!	Verifies that an area with the given aligned base and size fits into
53 	the spot defined by base and limit and checks for overflows.
54 	\param base Minimum base address valid for the area.
55 	\param alignedBase The base address of the range to check.
56 	\param size The size of the area.
57 	\param limit The last (inclusive) addresss of the range to check.
58 */
59 static inline bool
60 is_valid_spot(addr_t base, addr_t alignedBase, addr_t size, addr_t limit)
61 {
62 	return (alignedBase >= base && alignedBase + (size - 1) > alignedBase
63 		&& alignedBase + (size - 1) <= limit);
64 }
65 
66 
67 // #pragma mark - VMKernelAddressSpace
68 
69 
70 VMKernelAddressSpace::VMKernelAddressSpace(team_id id, addr_t base, size_t size)
71 	:
72 	VMAddressSpace(id, base, size, "kernel address space")
73 {
74 }
75 
76 
77 VMKernelAddressSpace::~VMKernelAddressSpace()
78 {
79 	panic("deleting the kernel aspace!\n");
80 }
81 
82 
83 status_t
84 VMKernelAddressSpace::InitObject()
85 {
86 	// create the free lists
87 	size_t size = fEndAddress - fBase + 1;
88 	fFreeListCount = ld(size) - PAGE_SHIFT + 1;
89 	fFreeLists = new(std::nothrow) RangeFreeList[fFreeListCount];
90 	if (fFreeLists == NULL)
91 		return B_NO_MEMORY;
92 
93 	Range* range = new(std::nothrow) Range(fBase, size, Range::RANGE_FREE);
94 	if (range == NULL)
95 		return B_NO_MEMORY;
96 
97 	_InsertRange(range);
98 
99 	TRACE("VMKernelAddressSpace::InitObject(): address range: %#" B_PRIxADDR
100 		" - %#" B_PRIxADDR ", free lists: %d\n", fBase, fEndAddress,
101 		fFreeListCount);
102 
103 	return B_OK;
104 }
105 
106 
107 inline VMArea*
108 VMKernelAddressSpace::FirstArea() const
109 {
110 	Range* range = fRangeList.Head();
111 	while (range != NULL && range->type != Range::RANGE_AREA)
112 		range = fRangeList.GetNext(range);
113 	return range != NULL ? range->area : NULL;
114 }
115 
116 
117 inline VMArea*
118 VMKernelAddressSpace::NextArea(VMArea* _area) const
119 {
120 	Range* range = static_cast<VMKernelArea*>(_area)->Range();
121 	do {
122 		range = fRangeList.GetNext(range);
123 	} while (range != NULL && range->type != Range::RANGE_AREA);
124 	return range != NULL ? range->area : NULL;
125 }
126 
127 
128 VMArea*
129 VMKernelAddressSpace::CreateArea(const char* name, uint32 wiring,
130 	uint32 protection, uint32 allocationFlags)
131 {
132 	return VMKernelArea::Create(this, name, wiring, protection,
133 		allocationFlags);
134 }
135 
136 
137 void
138 VMKernelAddressSpace::DeleteArea(VMArea* _area, uint32 allocationFlags)
139 {
140 	TRACE("VMKernelAddressSpace::DeleteArea(%p)\n", area);
141 
142 	VMKernelArea* area = static_cast<VMKernelArea*>(_area);
143 	area->~VMKernelArea();
144 	free_etc(area, allocationFlags);
145 }
146 
147 
148 //! You must hold the address space's read lock.
149 VMArea*
150 VMKernelAddressSpace::LookupArea(addr_t address) const
151 {
152 	Range* range = fRangeTree.FindClosest(address, true);
153 	if (range == NULL || range->type != Range::RANGE_AREA)
154 		return NULL;
155 
156 	VMKernelArea* area = range->area;
157 	return area->ContainsAddress(address) ? area : NULL;
158 }
159 
160 
161 /*!	This inserts the area you pass into the address space.
162 	It will also set the "_address" argument to its base address when
163 	the call succeeds.
164 	You need to hold the VMAddressSpace write lock.
165 */
166 status_t
167 VMKernelAddressSpace::InsertArea(VMArea* _area, size_t size,
168 	const virtual_address_restrictions* addressRestrictions,
169 	uint32 allocationFlags, void** _address)
170 {
171 	TRACE("VMKernelAddressSpace::InsertArea(%p, %" B_PRIu32 ", %#" B_PRIxSIZE
172 		", %p \"%s\")\n", addressRestrictions->address,
173 		addressRestrictions->address_specification, size, _area, _area->name);
174 
175 	VMKernelArea* area = static_cast<VMKernelArea*>(_area);
176 
177 	Range* range;
178 	status_t error = _AllocateRange(addressRestrictions, size,
179 		addressRestrictions->address_specification == B_EXACT_ADDRESS,
180 		allocationFlags, range);
181 	if (error != B_OK)
182 		return error;
183 
184 	range->type = Range::RANGE_AREA;
185 	range->area = area;
186 	area->SetRange(range);
187 	area->SetBase(range->base);
188 	area->SetSize(range->size);
189 
190 	if (_address != NULL)
191 		*_address = (void*)area->Base();
192 	fFreeSpace -= area->Size();
193 
194 	PARANOIA_CHECK_STRUCTURES();
195 
196 	return B_OK;
197 }
198 
199 
200 //! You must hold the address space's write lock.
201 void
202 VMKernelAddressSpace::RemoveArea(VMArea* _area, uint32 allocationFlags)
203 {
204 	TRACE("VMKernelAddressSpace::RemoveArea(%p)\n", _area);
205 
206 	VMKernelArea* area = static_cast<VMKernelArea*>(_area);
207 
208 	_FreeRange(area->Range(), allocationFlags);
209 
210 	fFreeSpace += area->Size();
211 
212 	PARANOIA_CHECK_STRUCTURES();
213 }
214 
215 
216 bool
217 VMKernelAddressSpace::CanResizeArea(VMArea* area, size_t newSize)
218 {
219 	Range* range = static_cast<VMKernelArea*>(area)->Range();
220 
221 	if (newSize <= range->size)
222 		return true;
223 
224 	Range* nextRange = fRangeList.GetNext(range);
225 	if (nextRange == NULL || nextRange->type == Range::RANGE_AREA)
226 		return false;
227 
228 	if (nextRange->type == Range::RANGE_RESERVED
229 		&& nextRange->reserved.base > range->base) {
230 		return false;
231 	}
232 
233 	// TODO: If there is free space after a reserved range (or vice versa), it
234 	// could be used as well.
235 	return newSize - range->size <= nextRange->size;
236 }
237 
238 
239 status_t
240 VMKernelAddressSpace::ResizeArea(VMArea* _area, size_t newSize,
241 	uint32 allocationFlags)
242 {
243 	TRACE("VMKernelAddressSpace::ResizeArea(%p, %#" B_PRIxSIZE ")\n", _area,
244 		newSize);
245 
246 	VMKernelArea* area = static_cast<VMKernelArea*>(_area);
247 	Range* range = area->Range();
248 
249 	if (newSize == range->size)
250 		return B_OK;
251 
252 	Range* nextRange = fRangeList.GetNext(range);
253 
254 	if (newSize < range->size) {
255 		if (nextRange != NULL && nextRange->type == Range::RANGE_FREE) {
256 			// a free range is following -- just enlarge it
257 			_FreeListRemoveRange(nextRange, nextRange->size);
258 			nextRange->size += range->size - newSize;
259 			nextRange->base = range->base + newSize;
260 			_FreeListInsertRange(nextRange, nextRange->size);
261 		} else {
262 			// no free range following -- we need to allocate a new one and
263 			// insert it
264 			nextRange = new(malloc_flags(allocationFlags)) Range(
265 				range->base + newSize, range->size - newSize,
266 				Range::RANGE_FREE);
267 			if (nextRange == NULL)
268 				return B_NO_MEMORY;
269 			_InsertRange(nextRange);
270 		}
271 	} else {
272 		if (nextRange == NULL
273 			|| (nextRange->type == Range::RANGE_RESERVED
274 				&& nextRange->reserved.base > range->base)) {
275 			return B_BAD_VALUE;
276 		}
277 		// TODO: If there is free space after a reserved range (or vice versa),
278 		// it could be used as well.
279 		size_t sizeDiff = newSize - range->size;
280 		if (sizeDiff > nextRange->size)
281 			return B_BAD_VALUE;
282 
283 		if (sizeDiff == nextRange->size) {
284 			// The next range is completely covered -- remove and delete it.
285 			_RemoveRange(nextRange);
286 			free_etc(nextRange, allocationFlags);
287 		} else {
288 			// The next range is only partially covered -- shrink it.
289 			if (nextRange->type == Range::RANGE_FREE)
290 				_FreeListRemoveRange(nextRange, nextRange->size);
291 			nextRange->size -= sizeDiff;
292 			nextRange->base = range->base + newSize;
293 			if (nextRange->type == Range::RANGE_FREE)
294 				_FreeListInsertRange(nextRange, nextRange->size);
295 		}
296 	}
297 
298 	range->size = newSize;
299 	area->SetSize(newSize);
300 
301 	IncrementChangeCount();
302 	PARANOIA_CHECK_STRUCTURES();
303 	return B_OK;
304 }
305 
306 
307 status_t
308 VMKernelAddressSpace::ShrinkAreaHead(VMArea* _area, size_t newSize,
309 	uint32 allocationFlags)
310 {
311 	TRACE("VMKernelAddressSpace::ShrinkAreaHead(%p, %#" B_PRIxSIZE ")\n", _area,
312 		newSize);
313 
314 	VMKernelArea* area = static_cast<VMKernelArea*>(_area);
315 	Range* range = area->Range();
316 
317 	if (newSize == range->size)
318 		return B_OK;
319 
320 	if (newSize > range->size)
321 		return B_BAD_VALUE;
322 
323 	Range* previousRange = fRangeList.GetPrevious(range);
324 
325 	size_t sizeDiff = range->size - newSize;
326 	if (previousRange != NULL && previousRange->type == Range::RANGE_FREE) {
327 		// the previous range is free -- just enlarge it
328 		_FreeListRemoveRange(previousRange, previousRange->size);
329 		previousRange->size += sizeDiff;
330 		_FreeListInsertRange(previousRange, previousRange->size);
331 		range->base += sizeDiff;
332 		range->size = newSize;
333 	} else {
334 		// no free range before -- we need to allocate a new one and
335 		// insert it
336 		previousRange = new(malloc_flags(allocationFlags)) Range(range->base,
337 			sizeDiff, Range::RANGE_FREE);
338 		if (previousRange == NULL)
339 			return B_NO_MEMORY;
340 		range->base += sizeDiff;
341 		range->size = newSize;
342 		_InsertRange(previousRange);
343 	}
344 
345 	area->SetBase(range->base);
346 	area->SetSize(range->size);
347 
348 	IncrementChangeCount();
349 	PARANOIA_CHECK_STRUCTURES();
350 	return B_OK;
351 }
352 
353 
354 status_t
355 VMKernelAddressSpace::ShrinkAreaTail(VMArea* area, size_t newSize,
356 	uint32 allocationFlags)
357 {
358 	return ResizeArea(area, newSize, allocationFlags);
359 }
360 
361 
362 status_t
363 VMKernelAddressSpace::ReserveAddressRange(size_t size,
364 	const virtual_address_restrictions* addressRestrictions,
365 	uint32 flags, uint32 allocationFlags, void** _address)
366 {
367 	TRACE("VMKernelAddressSpace::ReserveAddressRange(%p, %" B_PRIu32 ", %#"
368 		B_PRIxSIZE ", %#" B_PRIx32 ")\n", addressRestrictions->address,
369 		addressRestrictions->address_specification, size, flags);
370 
371 	// Don't allow range reservations, if the address space is about to be
372 	// deleted.
373 	if (fDeleting)
374 		return B_BAD_TEAM_ID;
375 
376 	Range* range;
377 	status_t error = _AllocateRange(addressRestrictions, size, false,
378 		allocationFlags, range);
379 	if (error != B_OK)
380 		return error;
381 
382 	range->type = Range::RANGE_RESERVED;
383 	range->reserved.base = range->base;
384 	range->reserved.flags = flags;
385 
386 	if (_address != NULL)
387 		*_address = (void*)range->base;
388 
389 	Get();
390 	PARANOIA_CHECK_STRUCTURES();
391 	return B_OK;
392 }
393 
394 
395 status_t
396 VMKernelAddressSpace::UnreserveAddressRange(addr_t address, size_t size,
397 	uint32 allocationFlags)
398 {
399 	TRACE("VMKernelAddressSpace::UnreserveAddressRange(%#" B_PRIxADDR ", %#"
400 		B_PRIxSIZE ")\n", address, size);
401 
402 	// Don't allow range unreservations, if the address space is about to be
403 	// deleted. UnreserveAllAddressRanges() must be used.
404 	if (fDeleting)
405 		return B_BAD_TEAM_ID;
406 
407 	// search range list and remove any matching reserved ranges
408 	addr_t endAddress = address + (size - 1);
409 	Range* range = fRangeTree.FindClosest(address, false);
410 	while (range != NULL && range->base + (range->size - 1) <= endAddress) {
411 		// Get the next range for the iteration -- we need to skip free ranges,
412 		// since _FreeRange() might join them with the current range and delete
413 		// them.
414 		Range* nextRange = fRangeList.GetNext(range);
415 		while (nextRange != NULL && nextRange->type == Range::RANGE_FREE)
416 			nextRange = fRangeList.GetNext(nextRange);
417 
418 		if (range->type == Range::RANGE_RESERVED) {
419 			_FreeRange(range, allocationFlags);
420 			Put();
421 		}
422 
423 		range = nextRange;
424 	}
425 
426 	PARANOIA_CHECK_STRUCTURES();
427 	return B_OK;
428 }
429 
430 
431 void
432 VMKernelAddressSpace::UnreserveAllAddressRanges(uint32 allocationFlags)
433 {
434 	Range* range = fRangeList.Head();
435 	while (range != NULL) {
436 		// Get the next range for the iteration -- we need to skip free ranges,
437 		// since _FreeRange() might join them with the current range and delete
438 		// them.
439 		Range* nextRange = fRangeList.GetNext(range);
440 		while (nextRange != NULL && nextRange->type == Range::RANGE_FREE)
441 			nextRange = fRangeList.GetNext(nextRange);
442 
443 		if (range->type == Range::RANGE_RESERVED) {
444 			_FreeRange(range, allocationFlags);
445 			Put();
446 		}
447 
448 		range = nextRange;
449 	}
450 
451 	PARANOIA_CHECK_STRUCTURES();
452 }
453 
454 
455 void
456 VMKernelAddressSpace::Dump() const
457 {
458 	VMAddressSpace::Dump();
459 
460 	kprintf("area_list:\n");
461 
462 	for (RangeList::ConstIterator it = fRangeList.GetIterator();
463 			Range* range = it.Next();) {
464 		if (range->type != Range::RANGE_AREA)
465 			continue;
466 
467 		VMKernelArea* area = range->area;
468 		kprintf(" area %" B_PRId32 ": ", area->id);
469 		kprintf("base_addr = %#" B_PRIxADDR " ", area->Base());
470 		kprintf("size = %#" B_PRIxSIZE " ", area->Size());
471 		kprintf("name = '%s' ", area->name);
472 		kprintf("protection = %#" B_PRIx32 "\n", area->protection);
473 	}
474 }
475 
476 
477 inline void
478 VMKernelAddressSpace::_FreeListInsertRange(Range* range, size_t size)
479 {
480 	TRACE("    VMKernelAddressSpace::_FreeListInsertRange(%p (%#" B_PRIxADDR
481 		", %#" B_PRIxSIZE ", %d), %#" B_PRIxSIZE " (%d))\n", range, range->base,
482 		range->size, range->type, size, ld(size) - PAGE_SHIFT);
483 
484 	fFreeLists[ld(size) - PAGE_SHIFT].Add(range);
485 }
486 
487 
488 inline void
489 VMKernelAddressSpace::_FreeListRemoveRange(Range* range, size_t size)
490 {
491 	TRACE("    VMKernelAddressSpace::_FreeListRemoveRange(%p (%#" B_PRIxADDR
492 		", %#" B_PRIxSIZE ", %d), %#" B_PRIxSIZE " (%d))\n", range, range->base,
493 		range->size, range->type, size, ld(size) - PAGE_SHIFT);
494 
495 	fFreeLists[ld(size) - PAGE_SHIFT].Remove(range);
496 }
497 
498 
499 void
500 VMKernelAddressSpace::_InsertRange(Range* range)
501 {
502 	TRACE("    VMKernelAddressSpace::_InsertRange(%p (%#" B_PRIxADDR ", %#"
503 		B_PRIxSIZE ", %d))\n", range, range->base, range->size, range->type);
504 
505 	// insert at the correct position in the range list
506 	Range* insertBeforeRange = fRangeTree.FindClosest(range->base, true);
507 	fRangeList.Insert(
508 		insertBeforeRange != NULL
509 			? fRangeList.GetNext(insertBeforeRange) : fRangeList.Head(),
510 		range);
511 
512 	// insert into tree
513 	fRangeTree.Insert(range);
514 
515 	// insert in the free ranges list, if the range is free
516 	if (range->type == Range::RANGE_FREE)
517 		_FreeListInsertRange(range, range->size);
518 }
519 
520 
521 void
522 VMKernelAddressSpace::_RemoveRange(Range* range)
523 {
524 	TRACE("    VMKernelAddressSpace::_RemoveRange(%p (%#" B_PRIxADDR ", %#"
525 		B_PRIxSIZE ", %d))\n", range, range->base, range->size, range->type);
526 
527 	// remove from tree and range list
528 	// insert at the correct position in the range list
529 	fRangeTree.Remove(range);
530 	fRangeList.Remove(range);
531 
532 	// if it is a free range, also remove it from the free list
533 	if (range->type == Range::RANGE_FREE)
534 		_FreeListRemoveRange(range, range->size);
535 }
536 
537 
538 status_t
539 VMKernelAddressSpace::_AllocateRange(
540 	const virtual_address_restrictions* addressRestrictions,
541 	size_t size, bool allowReservedRange, uint32 allocationFlags,
542 	Range*& _range)
543 {
544 	TRACE("  VMKernelAddressSpace::_AllocateRange(address: %p, size: %#"
545 		B_PRIxSIZE ", addressSpec: %#" B_PRIx32 ", reserved allowed: %d)\n",
546 		addressRestrictions->address, size,
547 		addressRestrictions->address_specification, allowReservedRange);
548 
549 	// prepare size, alignment and the base address for the range search
550 	addr_t address = (addr_t)addressRestrictions->address;
551 	size = ROUNDUP(size, B_PAGE_SIZE);
552 	size_t alignment = addressRestrictions->alignment != 0
553 		? addressRestrictions->alignment : B_PAGE_SIZE;
554 
555 	switch (addressRestrictions->address_specification) {
556 		case B_EXACT_ADDRESS:
557 		{
558 			if (address % B_PAGE_SIZE != 0)
559 				return B_BAD_VALUE;
560 			break;
561 		}
562 
563 		case B_BASE_ADDRESS:
564 			address = ROUNDUP(address, B_PAGE_SIZE);
565 			break;
566 
567 		case B_ANY_KERNEL_BLOCK_ADDRESS:
568 			// align the memory to the next power of two of the size
569 			while (alignment < size)
570 				alignment <<= 1;
571 
572 			// fall through...
573 
574 		case B_ANY_ADDRESS:
575 		case B_ANY_KERNEL_ADDRESS:
576 			address = fBase;
577 			// TODO: remove this again when vm86 mode is moved into the kernel
578 			// completely (currently needs a userland address space!)
579 			if (address == USER_BASE)
580 				address = USER_BASE_ANY;
581 			break;
582 
583 		default:
584 			return B_BAD_VALUE;
585 	}
586 
587 	// find a range
588 	Range* range = _FindFreeRange(address, size, alignment,
589 		addressRestrictions->address_specification, allowReservedRange,
590 		address);
591 	if (range == NULL) {
592 		return addressRestrictions->address_specification == B_EXACT_ADDRESS
593 			? B_BAD_VALUE : B_NO_MEMORY;
594 	}
595 
596 	TRACE("  VMKernelAddressSpace::_AllocateRange() found range:(%p (%#"
597 		B_PRIxADDR ", %#" B_PRIxSIZE ", %d)\n", range, range->base, range->size,
598 		range->type);
599 
600 	// We have found a range. It might not be a perfect fit, in which case
601 	// we have to split the range.
602 	size_t rangeSize = range->size;
603 
604 	if (address == range->base) {
605 		// allocation at the beginning of the range
606 		if (range->size > size) {
607 			// only partial -- split the range
608 			Range* leftOverRange = new(malloc_flags(allocationFlags)) Range(
609 				address + size, range->size - size, range);
610 			if (leftOverRange == NULL)
611 				return B_NO_MEMORY;
612 
613 			range->size = size;
614 			_InsertRange(leftOverRange);
615 		}
616 	} else if (address + size == range->base + range->size) {
617 		// allocation at the end of the range -- split the range
618 		Range* leftOverRange = new(malloc_flags(allocationFlags)) Range(
619 			range->base, range->size - size, range);
620 		if (leftOverRange == NULL)
621 			return B_NO_MEMORY;
622 
623 		range->base = address;
624 		range->size = size;
625 		_InsertRange(leftOverRange);
626 	} else {
627 		// allocation in the middle of the range -- split the range in three
628 		Range* leftOverRange1 = new(malloc_flags(allocationFlags)) Range(
629 			range->base, address - range->base, range);
630 		if (leftOverRange1 == NULL)
631 			return B_NO_MEMORY;
632 		Range* leftOverRange2 = new(malloc_flags(allocationFlags)) Range(
633 			address + size, range->size - size - leftOverRange1->size, range);
634 		if (leftOverRange2 == NULL) {
635 			free_etc(leftOverRange1, allocationFlags);
636 			return B_NO_MEMORY;
637 		}
638 
639 		range->base = address;
640 		range->size = size;
641 		_InsertRange(leftOverRange1);
642 		_InsertRange(leftOverRange2);
643 	}
644 
645 	// If the range is a free range, remove it from the respective free list.
646 	if (range->type == Range::RANGE_FREE)
647 		_FreeListRemoveRange(range, rangeSize);
648 
649 	IncrementChangeCount();
650 
651 	TRACE("  VMKernelAddressSpace::_AllocateRange() -> %p (%#" B_PRIxADDR ", %#"
652 		B_PRIxSIZE ")\n", range, range->base, range->size);
653 
654 	_range = range;
655 	return B_OK;
656 }
657 
658 
659 VMKernelAddressSpace::Range*
660 VMKernelAddressSpace::_FindFreeRange(addr_t start, size_t size,
661 	size_t alignment, uint32 addressSpec, bool allowReservedRange,
662 	addr_t& _foundAddress)
663 {
664 	TRACE("  VMKernelAddressSpace::_FindFreeRange(start: %#" B_PRIxADDR
665 		", size: %#" B_PRIxSIZE ", alignment: %#" B_PRIxSIZE ", addressSpec: %#"
666 		B_PRIx32 ", reserved allowed: %d)\n", start, size, alignment,
667 		addressSpec, allowReservedRange);
668 
669 	switch (addressSpec) {
670 		case B_BASE_ADDRESS:
671 		{
672 			// We have to iterate through the range list starting at the given
673 			// address. This is the most inefficient case.
674 			Range* range = fRangeTree.FindClosest(start, true);
675 			while (range != NULL) {
676 				if (range->type == Range::RANGE_FREE) {
677 					addr_t alignedBase = ROUNDUP(range->base, alignment);
678 					if (is_valid_spot(start, alignedBase, size,
679 							range->base + (range->size - 1))) {
680 						_foundAddress = alignedBase;
681 						return range;
682 					}
683 				}
684 				range = fRangeList.GetNext(range);
685 			}
686 
687 			// We didn't find a free spot in the requested range, so we'll
688 			// try again without any restrictions.
689 			start = fBase;
690 			addressSpec = B_ANY_ADDRESS;
691 			// fall through...
692 		}
693 
694 		case B_ANY_ADDRESS:
695 		case B_ANY_KERNEL_ADDRESS:
696 		case B_ANY_KERNEL_BLOCK_ADDRESS:
697 		{
698 			// We want to allocate from the first non-empty free list that is
699 			// guaranteed to contain the size. Finding a free range is O(1),
700 			// unless there are constraints (min base address, alignment).
701 			int freeListIndex = ld((size * 2 - 1) >> PAGE_SHIFT);
702 
703 			for (int32 i = freeListIndex; i < fFreeListCount; i++) {
704 				RangeFreeList& freeList = fFreeLists[i];
705 				if (freeList.IsEmpty())
706 					continue;
707 
708 				for (RangeFreeList::Iterator it = freeList.GetIterator();
709 						Range* range = it.Next();) {
710 					addr_t alignedBase = ROUNDUP(range->base, alignment);
711 					if (is_valid_spot(start, alignedBase, size,
712 							range->base + (range->size - 1))) {
713 						_foundAddress = alignedBase;
714 						return range;
715 					}
716 				}
717 			}
718 
719 			if (!allowReservedRange)
720 				return NULL;
721 
722 			// We haven't found any free ranges, but we're supposed to look
723 			// for reserved ones, too. Iterate through the range list starting
724 			// at the given address.
725 			Range* range = fRangeTree.FindClosest(start, true);
726 			while (range != NULL) {
727 				if (range->type == Range::RANGE_RESERVED) {
728 					addr_t alignedBase = ROUNDUP(range->base, alignment);
729 					if (is_valid_spot(start, alignedBase, size,
730 							range->base + (range->size - 1))) {
731 						// allocation from the back might be preferred
732 						// -- adjust the base accordingly
733 						if ((range->reserved.flags & RESERVED_AVOID_BASE)
734 								!= 0) {
735 							alignedBase = ROUNDDOWN(
736 								range->base + (range->size - size), alignment);
737 						}
738 
739 						_foundAddress = alignedBase;
740 						return range;
741 					}
742 				}
743 				range = fRangeList.GetNext(range);
744 			}
745 
746 			return NULL;
747 		}
748 
749 		case B_EXACT_ADDRESS:
750 		{
751 			Range* range = fRangeTree.FindClosest(start, true);
752 TRACE("    B_EXACT_ADDRESS: range: %p\n", range);
753 			if (range == NULL || range->type == Range::RANGE_AREA
754 				|| range->base + (range->size - 1) < start + (size - 1)) {
755 				// TODO: Support allocating if the area range covers multiple
756 				// free and reserved ranges!
757 TRACE("    -> no suitable range\n");
758 				return NULL;
759 			}
760 
761 			if (range->type != Range::RANGE_FREE && !allowReservedRange)
762 {
763 TRACE("    -> reserved range not allowed\n");
764 				return NULL;
765 }
766 
767 			_foundAddress = start;
768 			return range;
769 		}
770 
771 		default:
772 			return NULL;
773 	}
774 }
775 
776 
777 void
778 VMKernelAddressSpace::_FreeRange(Range* range, uint32 allocationFlags)
779 {
780 	TRACE("  VMKernelAddressSpace::_FreeRange(%p (%#" B_PRIxADDR ", %#"
781 		B_PRIxSIZE ", %d))\n", range, range->base, range->size, range->type);
782 
783 	// Check whether one or both of the neighboring ranges are free already,
784 	// and join them, if so.
785 	Range* previousRange = fRangeList.GetPrevious(range);
786 	Range* nextRange = fRangeList.GetNext(range);
787 
788 	if (previousRange != NULL && previousRange->type == Range::RANGE_FREE) {
789 		if (nextRange != NULL && nextRange->type == Range::RANGE_FREE) {
790 			// join them all -- keep the first one, delete the others
791 			_FreeListRemoveRange(previousRange, previousRange->size);
792 			_RemoveRange(range);
793 			_RemoveRange(nextRange);
794 			previousRange->size += range->size + nextRange->size;
795 			free_etc(range, allocationFlags);
796 			free_etc(nextRange, allocationFlags);
797 			_FreeListInsertRange(previousRange, previousRange->size);
798 		} else {
799 			// join with the previous range only, delete the supplied one
800 			_FreeListRemoveRange(previousRange, previousRange->size);
801 			_RemoveRange(range);
802 			previousRange->size += range->size;
803 			free_etc(range, allocationFlags);
804 			_FreeListInsertRange(previousRange, previousRange->size);
805 		}
806 	} else {
807 		if (nextRange != NULL && nextRange->type == Range::RANGE_FREE) {
808 			// join with the next range and delete it
809 			_RemoveRange(nextRange);
810 			range->size += nextRange->size;
811 			free_etc(nextRange, allocationFlags);
812 		}
813 
814 		// mark the range free and add it to the respective free list
815 		range->type = Range::RANGE_FREE;
816 		_FreeListInsertRange(range, range->size);
817 	}
818 
819 	IncrementChangeCount();
820 }
821 
822 
823 #ifdef PARANOIA_CHECKS
824 
825 void
826 VMKernelAddressSpace::_CheckStructures() const
827 {
828 	// general tree structure check
829 	fRangeTree.CheckTree();
830 
831 	// check range list and tree
832 	size_t spaceSize = fEndAddress - fBase + 1;
833 	addr_t nextBase = fBase;
834 	Range* previousRange = NULL;
835 	int previousRangeType = Range::RANGE_AREA;
836 	uint64 freeRanges = 0;
837 
838 	RangeList::ConstIterator listIt = fRangeList.GetIterator();
839 	RangeTree::ConstIterator treeIt = fRangeTree.GetIterator();
840 	while (true) {
841 		Range* range = listIt.Next();
842 		Range* treeRange = treeIt.Next();
843 		if (range != treeRange) {
844 			panic("VMKernelAddressSpace::_CheckStructures(): list/tree range "
845 				"mismatch: %p vs %p", range, treeRange);
846 		}
847 		if (range == NULL)
848 			break;
849 
850 		if (range->base != nextBase) {
851 			panic("VMKernelAddressSpace::_CheckStructures(): range base %#"
852 				B_PRIxADDR ", expected: %#" B_PRIxADDR, range->base, nextBase);
853 		}
854 
855 		if (range->size == 0) {
856 			panic("VMKernelAddressSpace::_CheckStructures(): empty range %p",
857 				range);
858 		}
859 
860 		if (range->size % B_PAGE_SIZE != 0) {
861 			panic("VMKernelAddressSpace::_CheckStructures(): range %p (%#"
862 				B_PRIxADDR ", %#" B_PRIxSIZE ") not page aligned", range,
863 				range->base, range->size);
864 		}
865 
866 		if (range->size > spaceSize - (range->base - fBase)) {
867 			panic("VMKernelAddressSpace::_CheckStructures(): range too large: "
868 				"(%#" B_PRIxADDR ", %#" B_PRIxSIZE "), address space end: %#"
869 				B_PRIxADDR, range->base, range->size, fEndAddress);
870 		}
871 
872 		if (range->type == Range::RANGE_FREE) {
873 			freeRanges++;
874 
875 			if (previousRangeType == Range::RANGE_FREE) {
876 				panic("VMKernelAddressSpace::_CheckStructures(): adjoining "
877 					"free ranges: %p (%#" B_PRIxADDR ", %#" B_PRIxSIZE
878 					"), %p (%#" B_PRIxADDR ", %#" B_PRIxSIZE ")", previousRange,
879 					previousRange->base, previousRange->size, range,
880 					range->base, range->size);
881 			}
882 		}
883 
884 		previousRange = range;
885 		nextBase = range->base + range->size;
886 		previousRangeType = range->type;
887 	}
888 
889 	if (nextBase - 1 != fEndAddress) {
890 		panic("VMKernelAddressSpace::_CheckStructures(): space not fully "
891 			"covered by ranges: last: %#" B_PRIxADDR ", expected %#" B_PRIxADDR,
892 			nextBase - 1, fEndAddress);
893 	}
894 
895 	// check free lists
896 	uint64 freeListRanges = 0;
897 	for (int i = 0; i < fFreeListCount; i++) {
898 		RangeFreeList& freeList = fFreeLists[i];
899 		if (freeList.IsEmpty())
900 			continue;
901 
902 		for (RangeFreeList::Iterator it = freeList.GetIterator();
903 				Range* range = it.Next();) {
904 			if (range->type != Range::RANGE_FREE) {
905 				panic("VMKernelAddressSpace::_CheckStructures(): non-free "
906 					"range %p (%#" B_PRIxADDR ", %#" B_PRIxSIZE ", %d) in "
907 					"free list %d", range, range->base, range->size,
908 					range->type, i);
909 			}
910 
911 			if (fRangeTree.Find(range->base) != range) {
912 				panic("VMKernelAddressSpace::_CheckStructures(): unknown "
913 					"range %p (%#" B_PRIxADDR ", %#" B_PRIxSIZE ", %d) in "
914 					"free list %d", range, range->base, range->size,
915 					range->type, i);
916 			}
917 
918 			if (ld(range->size) - PAGE_SHIFT != i) {
919 				panic("VMKernelAddressSpace::_CheckStructures(): "
920 					"range %p (%#" B_PRIxADDR ", %#" B_PRIxSIZE ", %d) in "
921 					"wrong free list %d", range, range->base, range->size,
922 					range->type, i);
923 			}
924 
925 			freeListRanges++;
926 		}
927 	}
928 }
929 
930 #endif	// PARANOIA_CHECKS
931