xref: /haiku/src/system/kernel/vm/VMKernelAddressSpace.cpp (revision 16ad15142c48ee36cd6a807a24efc99c88d4310d)
1 /*
2  * Copyright 2009-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 #include "VMKernelAddressSpace.h"
12 
13 #include <stdlib.h>
14 
15 #include <KernelExport.h>
16 
17 #include <heap.h>
18 #include <slab/Slab.h>
19 #include <thread.h>
20 #include <vm/vm.h>
21 #include <vm/VMArea.h>
22 
23 
24 //#define TRACE_VM
25 #ifdef TRACE_VM
26 #	define TRACE(x...) dprintf(x)
27 #else
28 #	define TRACE(x...) ;
29 #endif
30 
31 
32 //#define PARANOIA_CHECKS
33 #ifdef PARANOIA_CHECKS
34 #	define PARANOIA_CHECK_STRUCTURES()	_CheckStructures()
35 #else
36 #	define PARANOIA_CHECK_STRUCTURES()	do {} while (false)
37 #endif
38 
39 
40 static int
41 ld(size_t value)
42 {
43 	int index = -1;
44 	while (value > 0) {
45 		value >>= 1;
46 		index++;
47 	}
48 
49 	return index;
50 }
51 
52 
53 /*!	Verifies that an area with the given aligned base and size fits into
54 	the spot defined by base and limit and checks for overflows.
55 	\param base Minimum base address valid for the area.
56 	\param alignedBase The base address of the range to check.
57 	\param size The size of the area.
58 	\param limit The last (inclusive) addresss of the range to check.
59 */
60 static inline bool
61 is_valid_spot(addr_t base, addr_t alignedBase, addr_t size, addr_t limit)
62 {
63 	return (alignedBase >= base && alignedBase + (size - 1) > alignedBase
64 		&& alignedBase + (size - 1) <= limit);
65 }
66 
67 
68 // #pragma mark - VMKernelAddressSpace
69 
70 
71 VMKernelAddressSpace::VMKernelAddressSpace(team_id id, addr_t base, size_t size)
72 	:
73 	VMAddressSpace(id, base, size, "kernel address space"),
74 	fAreaObjectCache(NULL),
75 	fRangesObjectCache(NULL)
76 {
77 }
78 
79 
80 VMKernelAddressSpace::~VMKernelAddressSpace()
81 {
82 	panic("deleting the kernel aspace!\n");
83 }
84 
85 
86 status_t
87 VMKernelAddressSpace::InitObject()
88 {
89 	fAreaObjectCache = create_object_cache("kernel areas",
90 		sizeof(VMKernelArea), 0, NULL, NULL, NULL);
91 	if (fAreaObjectCache == NULL)
92 		return B_NO_MEMORY;
93 
94 	fRangesObjectCache = create_object_cache("kernel address ranges",
95 		sizeof(Range), 0, NULL, NULL, NULL);
96 	if (fRangesObjectCache == NULL)
97 		return B_NO_MEMORY;
98 
99 	// create the free lists
100 	size_t size = fEndAddress - fBase + 1;
101 	fFreeListCount = ld(size) - PAGE_SHIFT + 1;
102 	fFreeLists = new(std::nothrow) RangeFreeList[fFreeListCount];
103 	if (fFreeLists == NULL)
104 		return B_NO_MEMORY;
105 
106 	Range* range = new(fRangesObjectCache, 0) Range(fBase, size,
107 		Range::RANGE_FREE);
108 	if (range == NULL)
109 		return B_NO_MEMORY;
110 
111 	_InsertRange(range);
112 
113 	TRACE("VMKernelAddressSpace::InitObject(): address range: %#" B_PRIxADDR
114 		" - %#" B_PRIxADDR ", free lists: %d\n", fBase, fEndAddress,
115 		fFreeListCount);
116 
117 	return B_OK;
118 }
119 
120 
121 inline VMArea*
122 VMKernelAddressSpace::FirstArea() const
123 {
124 	Range* range = fRangeList.Head();
125 	while (range != NULL && range->type != Range::RANGE_AREA)
126 		range = fRangeList.GetNext(range);
127 	return range != NULL ? range->area : NULL;
128 }
129 
130 
131 inline VMArea*
132 VMKernelAddressSpace::NextArea(VMArea* _area) const
133 {
134 	Range* range = static_cast<VMKernelArea*>(_area)->Range();
135 	do {
136 		range = fRangeList.GetNext(range);
137 	} while (range != NULL && range->type != Range::RANGE_AREA);
138 	return range != NULL ? range->area : NULL;
139 }
140 
141 
142 VMArea*
143 VMKernelAddressSpace::CreateArea(const char* name, uint32 wiring,
144 	uint32 protection, uint32 allocationFlags)
145 {
146 	return VMKernelArea::Create(this, name, wiring, protection,
147 		fAreaObjectCache, allocationFlags);
148 }
149 
150 
151 void
152 VMKernelAddressSpace::DeleteArea(VMArea* _area, uint32 allocationFlags)
153 {
154 	TRACE("VMKernelAddressSpace::DeleteArea(%p)\n", _area);
155 
156 	VMKernelArea* area = static_cast<VMKernelArea*>(_area);
157 	object_cache_delete(fAreaObjectCache, area);
158 }
159 
160 
161 //! You must hold the address space's read lock.
162 VMArea*
163 VMKernelAddressSpace::LookupArea(addr_t address) const
164 {
165 	Range* range = fRangeTree.FindClosest(address, true);
166 	if (range == NULL || range->type != Range::RANGE_AREA)
167 		return NULL;
168 
169 	VMKernelArea* area = range->area;
170 	return area->ContainsAddress(address) ? area : NULL;
171 }
172 
173 
174 //! You must hold the address space's read lock.
175 VMArea*
176 VMKernelAddressSpace::FindClosestArea(addr_t address, bool less) const
177 {
178 	Range* range = fRangeTree.FindClosest(address, less);
179 	while (range != NULL && range->type != Range::RANGE_AREA)
180 		range = fRangeTree.Next(range);
181 
182 	return range != NULL ? range->area : NULL;
183 }
184 
185 
186 /*!	This inserts the area you pass into the address space.
187 	It will also set the "_address" argument to its base address when
188 	the call succeeds.
189 	You need to hold the VMAddressSpace write lock.
190 */
191 status_t
192 VMKernelAddressSpace::InsertArea(VMArea* _area, size_t size,
193 	const virtual_address_restrictions* addressRestrictions,
194 	uint32 allocationFlags, void** _address)
195 {
196 	TRACE("VMKernelAddressSpace::InsertArea(%p, %" B_PRIu32 ", %#" B_PRIxSIZE
197 		", %p \"%s\")\n", addressRestrictions->address,
198 		addressRestrictions->address_specification, size, _area, _area->name);
199 
200 	VMKernelArea* area = static_cast<VMKernelArea*>(_area);
201 
202 	Range* range;
203 	status_t error = _AllocateRange(addressRestrictions, size,
204 		addressRestrictions->address_specification == B_EXACT_ADDRESS,
205 		allocationFlags, range);
206 	if (error != B_OK)
207 		return error;
208 
209 	range->type = Range::RANGE_AREA;
210 	range->area = area;
211 	area->SetRange(range);
212 	area->SetBase(range->base);
213 	area->SetSize(range->size);
214 
215 	if (_address != NULL)
216 		*_address = (void*)area->Base();
217 	fFreeSpace -= area->Size();
218 
219 	PARANOIA_CHECK_STRUCTURES();
220 
221 	return B_OK;
222 }
223 
224 
225 //! You must hold the address space's write lock.
226 void
227 VMKernelAddressSpace::RemoveArea(VMArea* _area, uint32 allocationFlags)
228 {
229 	TRACE("VMKernelAddressSpace::RemoveArea(%p)\n", _area);
230 
231 	VMKernelArea* area = static_cast<VMKernelArea*>(_area);
232 
233 	_FreeRange(area->Range(), allocationFlags);
234 
235 	fFreeSpace += area->Size();
236 
237 	PARANOIA_CHECK_STRUCTURES();
238 }
239 
240 
241 bool
242 VMKernelAddressSpace::CanResizeArea(VMArea* area, size_t newSize)
243 {
244 	Range* range = static_cast<VMKernelArea*>(area)->Range();
245 
246 	if (newSize <= range->size)
247 		return true;
248 
249 	Range* nextRange = fRangeList.GetNext(range);
250 	if (nextRange == NULL || nextRange->type == Range::RANGE_AREA)
251 		return false;
252 
253 	if (nextRange->type == Range::RANGE_RESERVED
254 		&& nextRange->reserved.base > range->base) {
255 		return false;
256 	}
257 
258 	// TODO: If there is free space after a reserved range (or vice versa), it
259 	// could be used as well.
260 	return newSize - range->size <= nextRange->size;
261 }
262 
263 
264 status_t
265 VMKernelAddressSpace::ResizeArea(VMArea* _area, size_t newSize,
266 	uint32 allocationFlags)
267 {
268 	TRACE("VMKernelAddressSpace::ResizeArea(%p, %#" B_PRIxSIZE ")\n", _area,
269 		newSize);
270 
271 	VMKernelArea* area = static_cast<VMKernelArea*>(_area);
272 	Range* range = area->Range();
273 
274 	if (newSize == range->size)
275 		return B_OK;
276 
277 	Range* nextRange = fRangeList.GetNext(range);
278 
279 	if (newSize < range->size) {
280 		if (nextRange != NULL && nextRange->type == Range::RANGE_FREE) {
281 			// a free range is following -- just enlarge it
282 			_FreeListRemoveRange(nextRange, nextRange->size);
283 			nextRange->size += range->size - newSize;
284 			nextRange->base = range->base + newSize;
285 			_FreeListInsertRange(nextRange, nextRange->size);
286 		} else {
287 			// no free range following -- we need to allocate a new one and
288 			// insert it
289 			nextRange = new(fRangesObjectCache, allocationFlags) Range(
290 				range->base + newSize, range->size - newSize,
291 				Range::RANGE_FREE);
292 			if (nextRange == NULL)
293 				return B_NO_MEMORY;
294 			_InsertRange(nextRange);
295 		}
296 	} else {
297 		if (nextRange == NULL
298 			|| (nextRange->type == Range::RANGE_RESERVED
299 				&& nextRange->reserved.base > range->base)) {
300 			return B_BAD_VALUE;
301 		}
302 		// TODO: If there is free space after a reserved range (or vice versa),
303 		// it could be used as well.
304 		size_t sizeDiff = newSize - range->size;
305 		if (sizeDiff > nextRange->size)
306 			return B_BAD_VALUE;
307 
308 		if (sizeDiff == nextRange->size) {
309 			// The next range is completely covered -- remove and delete it.
310 			_RemoveRange(nextRange);
311 			object_cache_delete(fRangesObjectCache, nextRange, allocationFlags);
312 		} else {
313 			// The next range is only partially covered -- shrink it.
314 			if (nextRange->type == Range::RANGE_FREE)
315 				_FreeListRemoveRange(nextRange, nextRange->size);
316 			nextRange->size -= sizeDiff;
317 			nextRange->base = range->base + newSize;
318 			if (nextRange->type == Range::RANGE_FREE)
319 				_FreeListInsertRange(nextRange, nextRange->size);
320 		}
321 	}
322 
323 	range->size = newSize;
324 	area->SetSize(newSize);
325 
326 	IncrementChangeCount();
327 	PARANOIA_CHECK_STRUCTURES();
328 	return B_OK;
329 }
330 
331 
332 status_t
333 VMKernelAddressSpace::ShrinkAreaHead(VMArea* _area, size_t newSize,
334 	uint32 allocationFlags)
335 {
336 	TRACE("VMKernelAddressSpace::ShrinkAreaHead(%p, %#" B_PRIxSIZE ")\n", _area,
337 		newSize);
338 
339 	VMKernelArea* area = static_cast<VMKernelArea*>(_area);
340 	Range* range = area->Range();
341 
342 	if (newSize == range->size)
343 		return B_OK;
344 
345 	if (newSize > range->size)
346 		return B_BAD_VALUE;
347 
348 	Range* previousRange = fRangeList.GetPrevious(range);
349 
350 	size_t sizeDiff = range->size - newSize;
351 	if (previousRange != NULL && previousRange->type == Range::RANGE_FREE) {
352 		// the previous range is free -- just enlarge it
353 		_FreeListRemoveRange(previousRange, previousRange->size);
354 		previousRange->size += sizeDiff;
355 		_FreeListInsertRange(previousRange, previousRange->size);
356 		range->base += sizeDiff;
357 		range->size = newSize;
358 	} else {
359 		// no free range before -- we need to allocate a new one and
360 		// insert it
361 		previousRange = new(fRangesObjectCache, allocationFlags) Range(
362 			range->base, sizeDiff, Range::RANGE_FREE);
363 		if (previousRange == NULL)
364 			return B_NO_MEMORY;
365 		range->base += sizeDiff;
366 		range->size = newSize;
367 		_InsertRange(previousRange);
368 	}
369 
370 	area->SetBase(range->base);
371 	area->SetSize(range->size);
372 
373 	IncrementChangeCount();
374 	PARANOIA_CHECK_STRUCTURES();
375 	return B_OK;
376 }
377 
378 
379 status_t
380 VMKernelAddressSpace::ShrinkAreaTail(VMArea* area, size_t newSize,
381 	uint32 allocationFlags)
382 {
383 	return ResizeArea(area, newSize, allocationFlags);
384 }
385 
386 
387 status_t
388 VMKernelAddressSpace::ReserveAddressRange(size_t size,
389 	const virtual_address_restrictions* addressRestrictions,
390 	uint32 flags, uint32 allocationFlags, void** _address)
391 {
392 	TRACE("VMKernelAddressSpace::ReserveAddressRange(%p, %" B_PRIu32 ", %#"
393 		B_PRIxSIZE ", %#" B_PRIx32 ")\n", addressRestrictions->address,
394 		addressRestrictions->address_specification, size, flags);
395 
396 	// Don't allow range reservations, if the address space is about to be
397 	// deleted.
398 	if (fDeleting)
399 		return B_BAD_TEAM_ID;
400 
401 	Range* range;
402 	status_t error = _AllocateRange(addressRestrictions, size, false,
403 		allocationFlags, range);
404 	if (error != B_OK)
405 		return error;
406 
407 	range->type = Range::RANGE_RESERVED;
408 	range->reserved.base = range->base;
409 	range->reserved.flags = flags;
410 
411 	if (_address != NULL)
412 		*_address = (void*)range->base;
413 
414 	Get();
415 	PARANOIA_CHECK_STRUCTURES();
416 	return B_OK;
417 }
418 
419 
420 status_t
421 VMKernelAddressSpace::UnreserveAddressRange(addr_t address, size_t size,
422 	uint32 allocationFlags)
423 {
424 	TRACE("VMKernelAddressSpace::UnreserveAddressRange(%#" B_PRIxADDR ", %#"
425 		B_PRIxSIZE ")\n", address, size);
426 
427 	// Don't allow range unreservations, if the address space is about to be
428 	// deleted. UnreserveAllAddressRanges() must be used.
429 	if (fDeleting)
430 		return B_BAD_TEAM_ID;
431 
432 	// search range list and remove any matching reserved ranges
433 	addr_t endAddress = address + (size - 1);
434 	Range* range = fRangeTree.FindClosest(address, false);
435 	while (range != NULL && range->base + (range->size - 1) <= endAddress) {
436 		// Get the next range for the iteration -- we need to skip free ranges,
437 		// since _FreeRange() might join them with the current range and delete
438 		// them.
439 		Range* nextRange = fRangeList.GetNext(range);
440 		while (nextRange != NULL && nextRange->type == Range::RANGE_FREE)
441 			nextRange = fRangeList.GetNext(nextRange);
442 
443 		if (range->type == Range::RANGE_RESERVED) {
444 			_FreeRange(range, allocationFlags);
445 			Put();
446 		}
447 
448 		range = nextRange;
449 	}
450 
451 	PARANOIA_CHECK_STRUCTURES();
452 	return B_OK;
453 }
454 
455 
456 void
457 VMKernelAddressSpace::UnreserveAllAddressRanges(uint32 allocationFlags)
458 {
459 	Range* range = fRangeList.Head();
460 	while (range != NULL) {
461 		// Get the next range for the iteration -- we need to skip free ranges,
462 		// since _FreeRange() might join them with the current range and delete
463 		// them.
464 		Range* nextRange = fRangeList.GetNext(range);
465 		while (nextRange != NULL && nextRange->type == Range::RANGE_FREE)
466 			nextRange = fRangeList.GetNext(nextRange);
467 
468 		if (range->type == Range::RANGE_RESERVED) {
469 			_FreeRange(range, allocationFlags);
470 			Put();
471 		}
472 
473 		range = nextRange;
474 	}
475 
476 	PARANOIA_CHECK_STRUCTURES();
477 }
478 
479 
480 void
481 VMKernelAddressSpace::Dump() const
482 {
483 	VMAddressSpace::Dump();
484 
485 	kprintf("area_list:\n");
486 
487 	for (RangeList::ConstIterator it = fRangeList.GetIterator();
488 			Range* range = it.Next();) {
489 		if (range->type != Range::RANGE_AREA)
490 			continue;
491 
492 		VMKernelArea* area = range->area;
493 		kprintf(" area %" B_PRId32 ": ", area->id);
494 		kprintf("base_addr = %#" B_PRIxADDR " ", area->Base());
495 		kprintf("size = %#" B_PRIxSIZE " ", area->Size());
496 		kprintf("name = '%s' ", area->name);
497 		kprintf("protection = %#" B_PRIx32 "\n", area->protection);
498 	}
499 }
500 
501 
502 inline void
503 VMKernelAddressSpace::_FreeListInsertRange(Range* range, size_t size)
504 {
505 	TRACE("    VMKernelAddressSpace::_FreeListInsertRange(%p (%#" B_PRIxADDR
506 		", %#" B_PRIxSIZE ", %d), %#" B_PRIxSIZE " (%d))\n", range, range->base,
507 		range->size, range->type, size, ld(size) - PAGE_SHIFT);
508 
509 	fFreeLists[ld(size) - PAGE_SHIFT].Add(range);
510 }
511 
512 
513 inline void
514 VMKernelAddressSpace::_FreeListRemoveRange(Range* range, size_t size)
515 {
516 	TRACE("    VMKernelAddressSpace::_FreeListRemoveRange(%p (%#" B_PRIxADDR
517 		", %#" B_PRIxSIZE ", %d), %#" B_PRIxSIZE " (%d))\n", range, range->base,
518 		range->size, range->type, size, ld(size) - PAGE_SHIFT);
519 
520 	fFreeLists[ld(size) - PAGE_SHIFT].Remove(range);
521 }
522 
523 
524 void
525 VMKernelAddressSpace::_InsertRange(Range* range)
526 {
527 	TRACE("    VMKernelAddressSpace::_InsertRange(%p (%#" B_PRIxADDR ", %#"
528 		B_PRIxSIZE ", %d))\n", range, range->base, range->size, range->type);
529 
530 	// insert at the correct position in the range list
531 	Range* insertBeforeRange = fRangeTree.FindClosest(range->base, true);
532 	fRangeList.Insert(
533 		insertBeforeRange != NULL
534 			? fRangeList.GetNext(insertBeforeRange) : fRangeList.Head(),
535 		range);
536 
537 	// insert into tree
538 	fRangeTree.Insert(range);
539 
540 	// insert in the free ranges list, if the range is free
541 	if (range->type == Range::RANGE_FREE)
542 		_FreeListInsertRange(range, range->size);
543 }
544 
545 
546 void
547 VMKernelAddressSpace::_RemoveRange(Range* range)
548 {
549 	TRACE("    VMKernelAddressSpace::_RemoveRange(%p (%#" B_PRIxADDR ", %#"
550 		B_PRIxSIZE ", %d))\n", range, range->base, range->size, range->type);
551 
552 	// remove from tree and range list
553 	// insert at the correct position in the range list
554 	fRangeTree.Remove(range);
555 	fRangeList.Remove(range);
556 
557 	// if it is a free range, also remove it from the free list
558 	if (range->type == Range::RANGE_FREE)
559 		_FreeListRemoveRange(range, range->size);
560 }
561 
562 
563 status_t
564 VMKernelAddressSpace::_AllocateRange(
565 	const virtual_address_restrictions* addressRestrictions,
566 	size_t size, bool allowReservedRange, uint32 allocationFlags,
567 	Range*& _range)
568 {
569 	TRACE("  VMKernelAddressSpace::_AllocateRange(address: %p, size: %#"
570 		B_PRIxSIZE ", addressSpec: %#" B_PRIx32 ", reserved allowed: %d)\n",
571 		addressRestrictions->address, size,
572 		addressRestrictions->address_specification, allowReservedRange);
573 
574 	// prepare size, alignment and the base address for the range search
575 	addr_t address = (addr_t)addressRestrictions->address;
576 	size = ROUNDUP(size, B_PAGE_SIZE);
577 	size_t alignment = addressRestrictions->alignment != 0
578 		? addressRestrictions->alignment : B_PAGE_SIZE;
579 
580 	switch (addressRestrictions->address_specification) {
581 		case B_EXACT_ADDRESS:
582 		{
583 			if (address % B_PAGE_SIZE != 0)
584 				return B_BAD_VALUE;
585 			break;
586 		}
587 
588 		case B_BASE_ADDRESS:
589 			address = ROUNDUP(address, B_PAGE_SIZE);
590 			break;
591 
592 		case B_ANY_KERNEL_BLOCK_ADDRESS:
593 			// align the memory to the next power of two of the size
594 			while (alignment < size)
595 				alignment <<= 1;
596 
597 			// fall through...
598 
599 		case B_ANY_ADDRESS:
600 		case B_ANY_KERNEL_ADDRESS:
601 			address = fBase;
602 			break;
603 
604 		default:
605 			return B_BAD_VALUE;
606 	}
607 
608 	// find a range
609 	Range* range = _FindFreeRange(address, size, alignment,
610 		addressRestrictions->address_specification, allowReservedRange,
611 		address);
612 	if (range == NULL) {
613 		return addressRestrictions->address_specification == B_EXACT_ADDRESS
614 			? B_BAD_VALUE : B_NO_MEMORY;
615 	}
616 
617 	TRACE("  VMKernelAddressSpace::_AllocateRange() found range:(%p (%#"
618 		B_PRIxADDR ", %#" B_PRIxSIZE ", %d)\n", range, range->base, range->size,
619 		range->type);
620 
621 	// We have found a range. It might not be a perfect fit, in which case
622 	// we have to split the range.
623 	size_t rangeSize = range->size;
624 
625 	if (address == range->base) {
626 		// allocation at the beginning of the range
627 		if (range->size > size) {
628 			// only partial -- split the range
629 			Range* leftOverRange = new(fRangesObjectCache, allocationFlags)
630 				Range(address + size, range->size - size, range);
631 			if (leftOverRange == NULL)
632 				return B_NO_MEMORY;
633 
634 			range->size = size;
635 			_InsertRange(leftOverRange);
636 		}
637 	} else if (address + size == range->base + range->size) {
638 		// allocation at the end of the range -- split the range
639 		Range* leftOverRange = new(fRangesObjectCache, allocationFlags) Range(
640 			range->base, range->size - size, range);
641 		if (leftOverRange == NULL)
642 			return B_NO_MEMORY;
643 
644 		range->base = address;
645 		range->size = size;
646 		_InsertRange(leftOverRange);
647 	} else {
648 		// allocation in the middle of the range -- split the range in three
649 		Range* leftOverRange1 = new(fRangesObjectCache, allocationFlags) Range(
650 			range->base, address - range->base, range);
651 		if (leftOverRange1 == NULL)
652 			return B_NO_MEMORY;
653 		Range* leftOverRange2 = new(fRangesObjectCache, allocationFlags) Range(
654 			address + size, range->size - size - leftOverRange1->size, range);
655 		if (leftOverRange2 == NULL) {
656 			object_cache_delete(fRangesObjectCache, leftOverRange1,
657 				allocationFlags);
658 			return B_NO_MEMORY;
659 		}
660 
661 		range->base = address;
662 		range->size = size;
663 		_InsertRange(leftOverRange1);
664 		_InsertRange(leftOverRange2);
665 	}
666 
667 	// If the range is a free range, remove it from the respective free list.
668 	if (range->type == Range::RANGE_FREE)
669 		_FreeListRemoveRange(range, rangeSize);
670 
671 	IncrementChangeCount();
672 
673 	TRACE("  VMKernelAddressSpace::_AllocateRange() -> %p (%#" B_PRIxADDR ", %#"
674 		B_PRIxSIZE ")\n", range, range->base, range->size);
675 
676 	_range = range;
677 	return B_OK;
678 }
679 
680 
681 VMKernelAddressSpace::Range*
682 VMKernelAddressSpace::_FindFreeRange(addr_t start, size_t size,
683 	size_t alignment, uint32 addressSpec, bool allowReservedRange,
684 	addr_t& _foundAddress)
685 {
686 	TRACE("  VMKernelAddressSpace::_FindFreeRange(start: %#" B_PRIxADDR
687 		", size: %#" B_PRIxSIZE ", alignment: %#" B_PRIxSIZE ", addressSpec: %#"
688 		B_PRIx32 ", reserved allowed: %d)\n", start, size, alignment,
689 		addressSpec, allowReservedRange);
690 
691 	switch (addressSpec) {
692 		case B_BASE_ADDRESS:
693 		{
694 			// We have to iterate through the range list starting at the given
695 			// address. This is the most inefficient case.
696 			Range* range = fRangeTree.FindClosest(start, true);
697 			while (range != NULL) {
698 				if (range->type == Range::RANGE_FREE) {
699 					addr_t alignedBase = ROUNDUP(range->base, alignment);
700 					if (is_valid_spot(start, alignedBase, size,
701 							range->base + (range->size - 1))) {
702 						_foundAddress = alignedBase;
703 						return range;
704 					}
705 				}
706 				range = fRangeList.GetNext(range);
707 			}
708 
709 			// We didn't find a free spot in the requested range, so we'll
710 			// try again without any restrictions.
711 			start = fBase;
712 			addressSpec = B_ANY_ADDRESS;
713 			// fall through...
714 		}
715 
716 		case B_ANY_ADDRESS:
717 		case B_ANY_KERNEL_ADDRESS:
718 		case B_ANY_KERNEL_BLOCK_ADDRESS:
719 		{
720 			// We want to allocate from the first non-empty free list that is
721 			// guaranteed to contain the size. Finding a free range is O(1),
722 			// unless there are constraints (min base address, alignment).
723 			int freeListIndex = ld((size * 2 - 1) >> PAGE_SHIFT);
724 
725 			for (int32 i = freeListIndex; i < fFreeListCount; i++) {
726 				RangeFreeList& freeList = fFreeLists[i];
727 				if (freeList.IsEmpty())
728 					continue;
729 
730 				for (RangeFreeList::Iterator it = freeList.GetIterator();
731 						Range* range = it.Next();) {
732 					addr_t alignedBase = ROUNDUP(range->base, alignment);
733 					if (is_valid_spot(start, alignedBase, size,
734 							range->base + (range->size - 1))) {
735 						_foundAddress = alignedBase;
736 						return range;
737 					}
738 				}
739 			}
740 
741 			if (!allowReservedRange)
742 				return NULL;
743 
744 			// We haven't found any free ranges, but we're supposed to look
745 			// for reserved ones, too. Iterate through the range list starting
746 			// at the given address.
747 			Range* range = fRangeTree.FindClosest(start, true);
748 			while (range != NULL) {
749 				if (range->type == Range::RANGE_RESERVED) {
750 					addr_t alignedBase = ROUNDUP(range->base, alignment);
751 					if (is_valid_spot(start, alignedBase, size,
752 							range->base + (range->size - 1))) {
753 						// allocation from the back might be preferred
754 						// -- adjust the base accordingly
755 						if ((range->reserved.flags & RESERVED_AVOID_BASE)
756 								!= 0) {
757 							alignedBase = ROUNDDOWN(
758 								range->base + (range->size - size), alignment);
759 						}
760 
761 						_foundAddress = alignedBase;
762 						return range;
763 					}
764 				}
765 				range = fRangeList.GetNext(range);
766 			}
767 
768 			return NULL;
769 		}
770 
771 		case B_EXACT_ADDRESS:
772 		{
773 			Range* range = fRangeTree.FindClosest(start, true);
774 TRACE("    B_EXACT_ADDRESS: range: %p\n", range);
775 			if (range == NULL || range->type == Range::RANGE_AREA
776 				|| range->base + (range->size - 1) < start + (size - 1)) {
777 				// TODO: Support allocating if the area range covers multiple
778 				// free and reserved ranges!
779 TRACE("    -> no suitable range\n");
780 				return NULL;
781 			}
782 
783 			if (range->type != Range::RANGE_FREE && !allowReservedRange)
784 {
785 TRACE("    -> reserved range not allowed\n");
786 				return NULL;
787 }
788 
789 			_foundAddress = start;
790 			return range;
791 		}
792 
793 		default:
794 			return NULL;
795 	}
796 }
797 
798 
799 void
800 VMKernelAddressSpace::_FreeRange(Range* range, uint32 allocationFlags)
801 {
802 	TRACE("  VMKernelAddressSpace::_FreeRange(%p (%#" B_PRIxADDR ", %#"
803 		B_PRIxSIZE ", %d))\n", range, range->base, range->size, range->type);
804 
805 	// Check whether one or both of the neighboring ranges are free already,
806 	// and join them, if so.
807 	Range* previousRange = fRangeList.GetPrevious(range);
808 	Range* nextRange = fRangeList.GetNext(range);
809 
810 	if (previousRange != NULL && previousRange->type == Range::RANGE_FREE) {
811 		if (nextRange != NULL && nextRange->type == Range::RANGE_FREE) {
812 			// join them all -- keep the first one, delete the others
813 			_FreeListRemoveRange(previousRange, previousRange->size);
814 			_RemoveRange(range);
815 			_RemoveRange(nextRange);
816 			previousRange->size += range->size + nextRange->size;
817 			object_cache_delete(fRangesObjectCache, range, allocationFlags);
818 			object_cache_delete(fRangesObjectCache, nextRange, allocationFlags);
819 			_FreeListInsertRange(previousRange, previousRange->size);
820 		} else {
821 			// join with the previous range only, delete the supplied one
822 			_FreeListRemoveRange(previousRange, previousRange->size);
823 			_RemoveRange(range);
824 			previousRange->size += range->size;
825 			object_cache_delete(fRangesObjectCache, range, allocationFlags);
826 			_FreeListInsertRange(previousRange, previousRange->size);
827 		}
828 	} else {
829 		if (nextRange != NULL && nextRange->type == Range::RANGE_FREE) {
830 			// join with the next range and delete it
831 			_RemoveRange(nextRange);
832 			range->size += nextRange->size;
833 			object_cache_delete(fRangesObjectCache, nextRange, allocationFlags);
834 		}
835 
836 		// mark the range free and add it to the respective free list
837 		range->type = Range::RANGE_FREE;
838 		_FreeListInsertRange(range, range->size);
839 	}
840 
841 	IncrementChangeCount();
842 }
843 
844 
845 #ifdef PARANOIA_CHECKS
846 
847 void
848 VMKernelAddressSpace::_CheckStructures() const
849 {
850 	// general tree structure check
851 	fRangeTree.CheckTree();
852 
853 	// check range list and tree
854 	size_t spaceSize = fEndAddress - fBase + 1;
855 	addr_t nextBase = fBase;
856 	Range* previousRange = NULL;
857 	int previousRangeType = Range::RANGE_AREA;
858 	uint64 freeRanges = 0;
859 
860 	RangeList::ConstIterator listIt = fRangeList.GetIterator();
861 	RangeTree::ConstIterator treeIt = fRangeTree.GetIterator();
862 	while (true) {
863 		Range* range = listIt.Next();
864 		Range* treeRange = treeIt.Next();
865 		if (range != treeRange) {
866 			panic("VMKernelAddressSpace::_CheckStructures(): list/tree range "
867 				"mismatch: %p vs %p", range, treeRange);
868 		}
869 		if (range == NULL)
870 			break;
871 
872 		if (range->base != nextBase) {
873 			panic("VMKernelAddressSpace::_CheckStructures(): range base %#"
874 				B_PRIxADDR ", expected: %#" B_PRIxADDR, range->base, nextBase);
875 		}
876 
877 		if (range->size == 0) {
878 			panic("VMKernelAddressSpace::_CheckStructures(): empty range %p",
879 				range);
880 		}
881 
882 		if (range->size % B_PAGE_SIZE != 0) {
883 			panic("VMKernelAddressSpace::_CheckStructures(): range %p (%#"
884 				B_PRIxADDR ", %#" B_PRIxSIZE ") not page aligned", range,
885 				range->base, range->size);
886 		}
887 
888 		if (range->size > spaceSize - (range->base - fBase)) {
889 			panic("VMKernelAddressSpace::_CheckStructures(): range too large: "
890 				"(%#" B_PRIxADDR ", %#" B_PRIxSIZE "), address space end: %#"
891 				B_PRIxADDR, range->base, range->size, fEndAddress);
892 		}
893 
894 		if (range->type == Range::RANGE_FREE) {
895 			freeRanges++;
896 
897 			if (previousRangeType == Range::RANGE_FREE) {
898 				panic("VMKernelAddressSpace::_CheckStructures(): adjoining "
899 					"free ranges: %p (%#" B_PRIxADDR ", %#" B_PRIxSIZE
900 					"), %p (%#" B_PRIxADDR ", %#" B_PRIxSIZE ")", previousRange,
901 					previousRange->base, previousRange->size, range,
902 					range->base, range->size);
903 			}
904 		}
905 
906 		previousRange = range;
907 		nextBase = range->base + range->size;
908 		previousRangeType = range->type;
909 	}
910 
911 	if (nextBase - 1 != fEndAddress) {
912 		panic("VMKernelAddressSpace::_CheckStructures(): space not fully "
913 			"covered by ranges: last: %#" B_PRIxADDR ", expected %#" B_PRIxADDR,
914 			nextBase - 1, fEndAddress);
915 	}
916 
917 	// check free lists
918 	uint64 freeListRanges = 0;
919 	for (int i = 0; i < fFreeListCount; i++) {
920 		RangeFreeList& freeList = fFreeLists[i];
921 		if (freeList.IsEmpty())
922 			continue;
923 
924 		for (RangeFreeList::Iterator it = freeList.GetIterator();
925 				Range* range = it.Next();) {
926 			if (range->type != Range::RANGE_FREE) {
927 				panic("VMKernelAddressSpace::_CheckStructures(): non-free "
928 					"range %p (%#" B_PRIxADDR ", %#" B_PRIxSIZE ", %d) in "
929 					"free list %d", range, range->base, range->size,
930 					range->type, i);
931 			}
932 
933 			if (fRangeTree.Find(range->base) != range) {
934 				panic("VMKernelAddressSpace::_CheckStructures(): unknown "
935 					"range %p (%#" B_PRIxADDR ", %#" B_PRIxSIZE ", %d) in "
936 					"free list %d", range, range->base, range->size,
937 					range->type, i);
938 			}
939 
940 			if (ld(range->size) - PAGE_SHIFT != i) {
941 				panic("VMKernelAddressSpace::_CheckStructures(): "
942 					"range %p (%#" B_PRIxADDR ", %#" B_PRIxSIZE ", %d) in "
943 					"wrong free list %d", range, range->base, range->size,
944 					range->type, i);
945 			}
946 
947 			freeListRanges++;
948 		}
949 	}
950 }
951 
952 #endif	// PARANOIA_CHECKS
953