xref: /haiku/src/system/kernel/vm/VMKernelAddressSpace.cpp (revision ce08f03c61c1b71103e59648b2139d8f1604b783)
1 /*
2  * Copyright 2009-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 #include "VMKernelAddressSpace.h"
12 
13 #include <stdlib.h>
14 
15 #include <KernelExport.h>
16 
17 #include <heap.h>
18 #include <slab/Slab.h>
19 #include <thread.h>
20 #include <vm/vm.h>
21 #include <vm/VMArea.h>
22 
23 
24 //#define TRACE_VM
25 #ifdef TRACE_VM
26 #	define TRACE(x...) dprintf(x)
27 #else
28 #	define TRACE(x...) ;
29 #endif
30 
31 
32 //#define PARANOIA_CHECKS
33 #ifdef PARANOIA_CHECKS
34 #	define PARANOIA_CHECK_STRUCTURES()	_CheckStructures()
35 #else
36 #	define PARANOIA_CHECK_STRUCTURES()	do {} while (false)
37 #endif
38 
39 
40 static int
ld(size_t value)41 ld(size_t value)
42 {
43 	int index = -1;
44 	while (value > 0) {
45 		value >>= 1;
46 		index++;
47 	}
48 
49 	return index;
50 }
51 
52 
53 /*!	Verifies that an area with the given aligned base and size fits into
54 	the spot defined by base and limit and checks for overflows.
55 	\param base Minimum base address valid for the area.
56 	\param alignedBase The base address of the range to check.
57 	\param size The size of the area.
58 	\param limit The last (inclusive) addresss of the range to check.
59 */
60 static inline bool
is_valid_spot(addr_t base,addr_t alignedBase,addr_t size,addr_t limit)61 is_valid_spot(addr_t base, addr_t alignedBase, addr_t size, addr_t limit)
62 {
63 	return (alignedBase >= base && alignedBase + (size - 1) > alignedBase
64 		&& alignedBase + (size - 1) <= limit);
65 }
66 
67 
68 // #pragma mark - VMKernelAddressSpace
69 
70 
VMKernelAddressSpace(team_id id,addr_t base,size_t size)71 VMKernelAddressSpace::VMKernelAddressSpace(team_id id, addr_t base, size_t size)
72 	:
73 	VMAddressSpace(id, base, size, "kernel address space"),
74 	fAreaObjectCache(NULL),
75 	fRangesObjectCache(NULL)
76 {
77 }
78 
79 
~VMKernelAddressSpace()80 VMKernelAddressSpace::~VMKernelAddressSpace()
81 {
82 	panic("deleting the kernel aspace!\n");
83 }
84 
85 
86 status_t
InitObject()87 VMKernelAddressSpace::InitObject()
88 {
89 	fAreaObjectCache = create_object_cache("kernel areas",
90 		sizeof(VMKernelArea), 0, NULL, NULL, NULL);
91 	if (fAreaObjectCache == NULL)
92 		return B_NO_MEMORY;
93 
94 	fRangesObjectCache = create_object_cache("kernel address ranges",
95 		sizeof(Range), 0, NULL, NULL, NULL);
96 	if (fRangesObjectCache == NULL)
97 		return B_NO_MEMORY;
98 
99 	// create the free lists
100 	size_t size = fEndAddress - fBase + 1;
101 	fFreeListCount = ld(size) - PAGE_SHIFT + 1;
102 	fFreeLists = new(std::nothrow) RangeFreeList[fFreeListCount];
103 	if (fFreeLists == NULL)
104 		return B_NO_MEMORY;
105 
106 	Range* range = new(fRangesObjectCache, 0) Range(fBase, size,
107 		Range::RANGE_FREE);
108 	if (range == NULL)
109 		return B_NO_MEMORY;
110 
111 	_InsertRange(range);
112 
113 	TRACE("VMKernelAddressSpace::InitObject(): address range: %#" B_PRIxADDR
114 		" - %#" B_PRIxADDR ", free lists: %d\n", fBase, fEndAddress,
115 		fFreeListCount);
116 
117 	return B_OK;
118 }
119 
120 
121 inline VMArea*
FirstArea() const122 VMKernelAddressSpace::FirstArea() const
123 {
124 	Range* range = fRangeList.Head();
125 	while (range != NULL && range->type != Range::RANGE_AREA)
126 		range = fRangeList.GetNext(range);
127 	return range != NULL ? range->area : NULL;
128 }
129 
130 
131 inline VMArea*
NextArea(VMArea * _area) const132 VMKernelAddressSpace::NextArea(VMArea* _area) const
133 {
134 	Range* range = static_cast<VMKernelArea*>(_area)->Range();
135 	do {
136 		range = fRangeList.GetNext(range);
137 	} while (range != NULL && range->type != Range::RANGE_AREA);
138 	return range != NULL ? range->area : NULL;
139 }
140 
141 
142 VMArea*
CreateArea(const char * name,uint32 wiring,uint32 protection,uint32 allocationFlags)143 VMKernelAddressSpace::CreateArea(const char* name, uint32 wiring,
144 	uint32 protection, uint32 allocationFlags)
145 {
146 	return VMKernelArea::Create(this, name, wiring, protection,
147 		fAreaObjectCache, allocationFlags);
148 }
149 
150 
151 void
DeleteArea(VMArea * _area,uint32 allocationFlags)152 VMKernelAddressSpace::DeleteArea(VMArea* _area, uint32 allocationFlags)
153 {
154 	TRACE("VMKernelAddressSpace::DeleteArea(%p)\n", _area);
155 
156 	VMKernelArea* area = static_cast<VMKernelArea*>(_area);
157 	object_cache_delete(fAreaObjectCache, area);
158 }
159 
160 
161 //! You must hold the address space's read lock.
162 VMArea*
LookupArea(addr_t address) const163 VMKernelAddressSpace::LookupArea(addr_t address) const
164 {
165 	Range* range = fRangeTree.FindClosest(address, true);
166 	if (range == NULL || range->type != Range::RANGE_AREA)
167 		return NULL;
168 
169 	VMKernelArea* area = range->area;
170 	return area->ContainsAddress(address) ? area : NULL;
171 }
172 
173 
174 //! You must hold the address space's read lock.
175 VMArea*
FindClosestArea(addr_t address,bool less) const176 VMKernelAddressSpace::FindClosestArea(addr_t address, bool less) const
177 {
178 	Range* range = fRangeTree.FindClosest(address, less);
179 	while (range != NULL && range->type != Range::RANGE_AREA)
180 		range = fRangeTree.Next(range);
181 
182 	return range != NULL ? range->area : NULL;
183 }
184 
185 
186 /*!	This inserts the area you pass into the address space.
187 	It will also set the "_address" argument to its base address when
188 	the call succeeds.
189 	You need to hold the VMAddressSpace write lock.
190 */
191 status_t
InsertArea(VMArea * _area,size_t size,const virtual_address_restrictions * addressRestrictions,uint32 allocationFlags,void ** _address)192 VMKernelAddressSpace::InsertArea(VMArea* _area, size_t size,
193 	const virtual_address_restrictions* addressRestrictions,
194 	uint32 allocationFlags, void** _address)
195 {
196 	TRACE("VMKernelAddressSpace::InsertArea(%p, %" B_PRIu32 ", %#" B_PRIxSIZE
197 		", %p \"%s\")\n", addressRestrictions->address,
198 		addressRestrictions->address_specification, size, _area, _area->name);
199 
200 	VMKernelArea* area = static_cast<VMKernelArea*>(_area);
201 
202 	Range* range;
203 	status_t error = _AllocateRange(addressRestrictions, size,
204 		addressRestrictions->address_specification == B_EXACT_ADDRESS,
205 		allocationFlags, range);
206 	if (error != B_OK)
207 		return error;
208 
209 	range->type = Range::RANGE_AREA;
210 	range->area = area;
211 	area->SetRange(range);
212 	area->SetBase(range->base);
213 	area->SetSize(range->size);
214 
215 	if (_address != NULL)
216 		*_address = (void*)area->Base();
217 	fFreeSpace -= area->Size();
218 
219 	PARANOIA_CHECK_STRUCTURES();
220 
221 	return B_OK;
222 }
223 
224 
225 //! You must hold the address space's write lock.
226 void
RemoveArea(VMArea * _area,uint32 allocationFlags)227 VMKernelAddressSpace::RemoveArea(VMArea* _area, uint32 allocationFlags)
228 {
229 	TRACE("VMKernelAddressSpace::RemoveArea(%p)\n", _area);
230 
231 	VMKernelArea* area = static_cast<VMKernelArea*>(_area);
232 
233 	_FreeRange(area->Range(), allocationFlags);
234 
235 	fFreeSpace += area->Size();
236 
237 	PARANOIA_CHECK_STRUCTURES();
238 }
239 
240 
241 bool
CanResizeArea(VMArea * area,size_t newSize)242 VMKernelAddressSpace::CanResizeArea(VMArea* area, size_t newSize)
243 {
244 	Range* range = static_cast<VMKernelArea*>(area)->Range();
245 
246 	if (newSize <= range->size)
247 		return true;
248 
249 	Range* nextRange = fRangeList.GetNext(range);
250 	if (nextRange == NULL || nextRange->type == Range::RANGE_AREA)
251 		return false;
252 
253 	if (nextRange->type == Range::RANGE_RESERVED
254 		&& nextRange->reserved.base > range->base) {
255 		return false;
256 	}
257 
258 	// TODO: If there is free space after a reserved range (or vice versa), it
259 	// could be used as well.
260 	return newSize - range->size <= nextRange->size;
261 }
262 
263 
264 status_t
ResizeArea(VMArea * _area,size_t newSize,uint32 allocationFlags)265 VMKernelAddressSpace::ResizeArea(VMArea* _area, size_t newSize,
266 	uint32 allocationFlags)
267 {
268 	TRACE("VMKernelAddressSpace::ResizeArea(%p, %#" B_PRIxSIZE ")\n", _area,
269 		newSize);
270 
271 	VMKernelArea* area = static_cast<VMKernelArea*>(_area);
272 	Range* range = area->Range();
273 
274 	if (newSize == range->size)
275 		return B_OK;
276 
277 	Range* nextRange = fRangeList.GetNext(range);
278 
279 	if (newSize < range->size) {
280 		if (nextRange != NULL && nextRange->type == Range::RANGE_FREE) {
281 			// a free range is following -- just enlarge it
282 			_FreeListRemoveRange(nextRange, nextRange->size);
283 			nextRange->size += range->size - newSize;
284 			nextRange->base = range->base + newSize;
285 			_FreeListInsertRange(nextRange, nextRange->size);
286 		} else {
287 			// no free range following -- we need to allocate a new one and
288 			// insert it
289 			nextRange = new(fRangesObjectCache, allocationFlags) Range(
290 				range->base + newSize, range->size - newSize,
291 				Range::RANGE_FREE);
292 			if (nextRange == NULL)
293 				return B_NO_MEMORY;
294 			_InsertRange(nextRange);
295 		}
296 	} else {
297 		if (nextRange == NULL
298 			|| (nextRange->type == Range::RANGE_RESERVED
299 				&& nextRange->reserved.base > range->base)) {
300 			return B_BAD_VALUE;
301 		}
302 		// TODO: If there is free space after a reserved range (or vice versa),
303 		// it could be used as well.
304 		size_t sizeDiff = newSize - range->size;
305 		if (sizeDiff > nextRange->size)
306 			return B_BAD_VALUE;
307 
308 		if (sizeDiff == nextRange->size) {
309 			// The next range is completely covered -- remove and delete it.
310 			_RemoveRange(nextRange);
311 			object_cache_delete(fRangesObjectCache, nextRange, allocationFlags);
312 		} else {
313 			// The next range is only partially covered -- shrink it.
314 			if (nextRange->type == Range::RANGE_FREE)
315 				_FreeListRemoveRange(nextRange, nextRange->size);
316 			nextRange->size -= sizeDiff;
317 			nextRange->base = range->base + newSize;
318 			if (nextRange->type == Range::RANGE_FREE)
319 				_FreeListInsertRange(nextRange, nextRange->size);
320 		}
321 	}
322 
323 	range->size = newSize;
324 	area->SetSize(newSize);
325 
326 	IncrementChangeCount();
327 	PARANOIA_CHECK_STRUCTURES();
328 	return B_OK;
329 }
330 
331 
332 status_t
ShrinkAreaHead(VMArea * _area,size_t newSize,uint32 allocationFlags)333 VMKernelAddressSpace::ShrinkAreaHead(VMArea* _area, size_t newSize,
334 	uint32 allocationFlags)
335 {
336 	TRACE("VMKernelAddressSpace::ShrinkAreaHead(%p, %#" B_PRIxSIZE ")\n", _area,
337 		newSize);
338 
339 	VMKernelArea* area = static_cast<VMKernelArea*>(_area);
340 	Range* range = area->Range();
341 
342 	if (newSize == range->size)
343 		return B_OK;
344 
345 	if (newSize > range->size)
346 		return B_BAD_VALUE;
347 
348 	Range* previousRange = fRangeList.GetPrevious(range);
349 
350 	size_t sizeDiff = range->size - newSize;
351 	if (previousRange != NULL && previousRange->type == Range::RANGE_FREE) {
352 		// the previous range is free -- just enlarge it
353 		_FreeListRemoveRange(previousRange, previousRange->size);
354 		previousRange->size += sizeDiff;
355 		_FreeListInsertRange(previousRange, previousRange->size);
356 		range->base += sizeDiff;
357 		range->size = newSize;
358 	} else {
359 		// no free range before -- we need to allocate a new one and
360 		// insert it
361 		previousRange = new(fRangesObjectCache, allocationFlags) Range(
362 			range->base, sizeDiff, Range::RANGE_FREE);
363 		if (previousRange == NULL)
364 			return B_NO_MEMORY;
365 		range->base += sizeDiff;
366 		range->size = newSize;
367 		_InsertRange(previousRange);
368 	}
369 
370 	area->SetBase(range->base);
371 	area->SetSize(range->size);
372 
373 	IncrementChangeCount();
374 	PARANOIA_CHECK_STRUCTURES();
375 	return B_OK;
376 }
377 
378 
379 status_t
ShrinkAreaTail(VMArea * area,size_t newSize,uint32 allocationFlags)380 VMKernelAddressSpace::ShrinkAreaTail(VMArea* area, size_t newSize,
381 	uint32 allocationFlags)
382 {
383 	return ResizeArea(area, newSize, allocationFlags);
384 }
385 
386 
387 status_t
ReserveAddressRange(size_t size,const virtual_address_restrictions * addressRestrictions,uint32 flags,uint32 allocationFlags,void ** _address)388 VMKernelAddressSpace::ReserveAddressRange(size_t size,
389 	const virtual_address_restrictions* addressRestrictions,
390 	uint32 flags, uint32 allocationFlags, void** _address)
391 {
392 	TRACE("VMKernelAddressSpace::ReserveAddressRange(%p, %" B_PRIu32 ", %#"
393 		B_PRIxSIZE ", %#" B_PRIx32 ")\n", addressRestrictions->address,
394 		addressRestrictions->address_specification, size, flags);
395 
396 	// Don't allow range reservations, if the address space is about to be
397 	// deleted.
398 	if (fDeleting)
399 		return B_BAD_TEAM_ID;
400 
401 	Range* range;
402 	status_t error = _AllocateRange(addressRestrictions, size, false,
403 		allocationFlags, range);
404 	if (error != B_OK)
405 		return error;
406 
407 	range->type = Range::RANGE_RESERVED;
408 	range->reserved.base = range->base;
409 	range->reserved.flags = flags;
410 
411 	if (_address != NULL)
412 		*_address = (void*)range->base;
413 
414 	Get();
415 	PARANOIA_CHECK_STRUCTURES();
416 	return B_OK;
417 }
418 
419 
420 status_t
UnreserveAddressRange(addr_t address,size_t size,uint32 allocationFlags)421 VMKernelAddressSpace::UnreserveAddressRange(addr_t address, size_t size,
422 	uint32 allocationFlags)
423 {
424 	TRACE("VMKernelAddressSpace::UnreserveAddressRange(%#" B_PRIxADDR ", %#"
425 		B_PRIxSIZE ")\n", address, size);
426 
427 	// Don't allow range unreservations, if the address space is about to be
428 	// deleted. UnreserveAllAddressRanges() must be used.
429 	if (fDeleting)
430 		return B_BAD_TEAM_ID;
431 
432 	// search range list and remove any matching reserved ranges
433 	addr_t endAddress = address + (size - 1);
434 	Range* range = fRangeTree.FindClosest(address, false);
435 	while (range != NULL && range->base + (range->size - 1) <= endAddress) {
436 		// Get the next range for the iteration -- we need to skip free ranges,
437 		// since _FreeRange() might join them with the current range and delete
438 		// them.
439 		Range* nextRange = fRangeList.GetNext(range);
440 		while (nextRange != NULL && nextRange->type == Range::RANGE_FREE)
441 			nextRange = fRangeList.GetNext(nextRange);
442 
443 		if (range->type == Range::RANGE_RESERVED) {
444 			_FreeRange(range, allocationFlags);
445 			Put();
446 		}
447 
448 		range = nextRange;
449 	}
450 
451 	PARANOIA_CHECK_STRUCTURES();
452 	return B_OK;
453 }
454 
455 
456 void
UnreserveAllAddressRanges(uint32 allocationFlags)457 VMKernelAddressSpace::UnreserveAllAddressRanges(uint32 allocationFlags)
458 {
459 	Range* range = fRangeList.Head();
460 	while (range != NULL) {
461 		// Get the next range for the iteration -- we need to skip free ranges,
462 		// since _FreeRange() might join them with the current range and delete
463 		// them.
464 		Range* nextRange = fRangeList.GetNext(range);
465 		while (nextRange != NULL && nextRange->type == Range::RANGE_FREE)
466 			nextRange = fRangeList.GetNext(nextRange);
467 
468 		if (range->type == Range::RANGE_RESERVED) {
469 			_FreeRange(range, allocationFlags);
470 			Put();
471 		}
472 
473 		range = nextRange;
474 	}
475 
476 	PARANOIA_CHECK_STRUCTURES();
477 }
478 
479 
480 void
Dump() const481 VMKernelAddressSpace::Dump() const
482 {
483 	VMAddressSpace::Dump();
484 
485 	kprintf("range list:\n");
486 
487 	for (RangeList::ConstIterator it = fRangeList.GetIterator();
488 			Range* range = it.Next();) {
489 
490 		switch (range->type) {
491 			case Range::RANGE_AREA:
492 			{
493 				VMKernelArea* area = range->area;
494 				kprintf(" area %" B_PRId32 ": ", area->id);
495 				kprintf("base_addr = %#" B_PRIxADDR " ", area->Base());
496 				kprintf("size = %#" B_PRIxSIZE " ", area->Size());
497 				kprintf("name = '%s' ", area->name);
498 				kprintf("protection = %#" B_PRIx32 "\n", area->protection);
499 				break;
500 			}
501 
502 			case Range::RANGE_RESERVED:
503 				kprintf(" reserved: base_addr = %#" B_PRIxADDR
504 					" reserved_base = %#" B_PRIxADDR " size = %#"
505 					B_PRIxSIZE " flags = %#" B_PRIx32 "\n", range->base,
506 					range->reserved.base, range->size, range->reserved.flags);
507 				break;
508 
509 			case Range::RANGE_FREE:
510 				kprintf(" free: base_addr = %#" B_PRIxADDR " size = %#"
511 					B_PRIxSIZE "\n", range->base, range->size);
512 				break;
513 		}
514 	}
515 }
516 
517 
518 inline void
_FreeListInsertRange(Range * range,size_t size)519 VMKernelAddressSpace::_FreeListInsertRange(Range* range, size_t size)
520 {
521 	TRACE("    VMKernelAddressSpace::_FreeListInsertRange(%p (%#" B_PRIxADDR
522 		", %#" B_PRIxSIZE ", %d), %#" B_PRIxSIZE " (%d))\n", range, range->base,
523 		range->size, range->type, size, ld(size) - PAGE_SHIFT);
524 
525 	fFreeLists[ld(size) - PAGE_SHIFT].Add(range);
526 }
527 
528 
529 inline void
_FreeListRemoveRange(Range * range,size_t size)530 VMKernelAddressSpace::_FreeListRemoveRange(Range* range, size_t size)
531 {
532 	TRACE("    VMKernelAddressSpace::_FreeListRemoveRange(%p (%#" B_PRIxADDR
533 		", %#" B_PRIxSIZE ", %d), %#" B_PRIxSIZE " (%d))\n", range, range->base,
534 		range->size, range->type, size, ld(size) - PAGE_SHIFT);
535 
536 	fFreeLists[ld(size) - PAGE_SHIFT].Remove(range);
537 }
538 
539 
540 void
_InsertRange(Range * range)541 VMKernelAddressSpace::_InsertRange(Range* range)
542 {
543 	TRACE("    VMKernelAddressSpace::_InsertRange(%p (%#" B_PRIxADDR ", %#"
544 		B_PRIxSIZE ", %d))\n", range, range->base, range->size, range->type);
545 
546 	// insert at the correct position in the range list
547 	Range* insertBeforeRange = fRangeTree.FindClosest(range->base, true);
548 	fRangeList.InsertBefore(
549 		insertBeforeRange != NULL
550 			? fRangeList.GetNext(insertBeforeRange) : fRangeList.Head(),
551 		range);
552 
553 	// insert into tree
554 	fRangeTree.Insert(range);
555 
556 	// insert in the free ranges list, if the range is free
557 	if (range->type == Range::RANGE_FREE)
558 		_FreeListInsertRange(range, range->size);
559 }
560 
561 
562 void
_RemoveRange(Range * range)563 VMKernelAddressSpace::_RemoveRange(Range* range)
564 {
565 	TRACE("    VMKernelAddressSpace::_RemoveRange(%p (%#" B_PRIxADDR ", %#"
566 		B_PRIxSIZE ", %d))\n", range, range->base, range->size, range->type);
567 
568 	// remove from tree and range list
569 	// insert at the correct position in the range list
570 	fRangeTree.Remove(range);
571 	fRangeList.Remove(range);
572 
573 	// if it is a free range, also remove it from the free list
574 	if (range->type == Range::RANGE_FREE)
575 		_FreeListRemoveRange(range, range->size);
576 }
577 
578 
579 status_t
_AllocateRange(const virtual_address_restrictions * addressRestrictions,size_t size,bool allowReservedRange,uint32 allocationFlags,Range * & _range)580 VMKernelAddressSpace::_AllocateRange(
581 	const virtual_address_restrictions* addressRestrictions,
582 	size_t size, bool allowReservedRange, uint32 allocationFlags,
583 	Range*& _range)
584 {
585 	TRACE("  VMKernelAddressSpace::_AllocateRange(address: %p, size: %#"
586 		B_PRIxSIZE ", addressSpec: %#" B_PRIx32 ", reserved allowed: %d)\n",
587 		addressRestrictions->address, size,
588 		addressRestrictions->address_specification, allowReservedRange);
589 
590 	// prepare size, alignment and the base address for the range search
591 	addr_t address = (addr_t)addressRestrictions->address;
592 	size = ROUNDUP(size, B_PAGE_SIZE);
593 	size_t alignment = addressRestrictions->alignment != 0
594 		? addressRestrictions->alignment : B_PAGE_SIZE;
595 
596 	switch (addressRestrictions->address_specification) {
597 		case B_EXACT_ADDRESS:
598 		{
599 			if (address % B_PAGE_SIZE != 0)
600 				return B_BAD_VALUE;
601 			break;
602 		}
603 
604 		case B_BASE_ADDRESS:
605 			address = ROUNDUP(address, B_PAGE_SIZE);
606 			break;
607 
608 		case B_ANY_KERNEL_BLOCK_ADDRESS:
609 			// align the memory to the next power of two of the size
610 			while (alignment < size)
611 				alignment <<= 1;
612 
613 			// fall through...
614 
615 		case B_ANY_ADDRESS:
616 		case B_ANY_KERNEL_ADDRESS:
617 			address = fBase;
618 			break;
619 
620 		default:
621 			return B_BAD_VALUE;
622 	}
623 
624 	// find a range
625 	Range* range = _FindFreeRange(address, size, alignment,
626 		addressRestrictions->address_specification, allowReservedRange,
627 		address);
628 	if (range == NULL) {
629 		return addressRestrictions->address_specification == B_EXACT_ADDRESS
630 			? B_BAD_VALUE : B_NO_MEMORY;
631 	}
632 
633 	TRACE("  VMKernelAddressSpace::_AllocateRange() found range:(%p (%#"
634 		B_PRIxADDR ", %#" B_PRIxSIZE ", %d)\n", range, range->base, range->size,
635 		range->type);
636 
637 	// We have found a range. It might not be a perfect fit, in which case
638 	// we have to split the range.
639 	size_t rangeSize = range->size;
640 
641 	if (address == range->base) {
642 		// allocation at the beginning of the range
643 		if (range->size > size) {
644 			// only partial -- split the range
645 			Range* leftOverRange = new(fRangesObjectCache, allocationFlags)
646 				Range(address + size, range->size - size, range);
647 			if (leftOverRange == NULL)
648 				return B_NO_MEMORY;
649 
650 			range->size = size;
651 			_InsertRange(leftOverRange);
652 		}
653 	} else if (address + size == range->base + range->size) {
654 		// allocation at the end of the range -- split the range
655 		Range* leftOverRange = new(fRangesObjectCache, allocationFlags) Range(
656 			range->base, range->size - size, range);
657 		if (leftOverRange == NULL)
658 			return B_NO_MEMORY;
659 
660 		range->base = address;
661 		range->size = size;
662 		_InsertRange(leftOverRange);
663 	} else {
664 		// allocation in the middle of the range -- split the range in three
665 		Range* leftOverRange1 = new(fRangesObjectCache, allocationFlags) Range(
666 			range->base, address - range->base, range);
667 		if (leftOverRange1 == NULL)
668 			return B_NO_MEMORY;
669 		Range* leftOverRange2 = new(fRangesObjectCache, allocationFlags) Range(
670 			address + size, range->size - size - leftOverRange1->size, range);
671 		if (leftOverRange2 == NULL) {
672 			object_cache_delete(fRangesObjectCache, leftOverRange1,
673 				allocationFlags);
674 			return B_NO_MEMORY;
675 		}
676 
677 		range->base = address;
678 		range->size = size;
679 		_InsertRange(leftOverRange1);
680 		_InsertRange(leftOverRange2);
681 	}
682 
683 	// If the range is a free range, remove it from the respective free list.
684 	if (range->type == Range::RANGE_FREE)
685 		_FreeListRemoveRange(range, rangeSize);
686 
687 	IncrementChangeCount();
688 
689 	TRACE("  VMKernelAddressSpace::_AllocateRange() -> %p (%#" B_PRIxADDR ", %#"
690 		B_PRIxSIZE ")\n", range, range->base, range->size);
691 
692 	_range = range;
693 	return B_OK;
694 }
695 
696 
697 VMKernelAddressSpace::Range*
_FindFreeRange(addr_t start,size_t size,size_t alignment,uint32 addressSpec,bool allowReservedRange,addr_t & _foundAddress)698 VMKernelAddressSpace::_FindFreeRange(addr_t start, size_t size,
699 	size_t alignment, uint32 addressSpec, bool allowReservedRange,
700 	addr_t& _foundAddress)
701 {
702 	TRACE("  VMKernelAddressSpace::_FindFreeRange(start: %#" B_PRIxADDR
703 		", size: %#" B_PRIxSIZE ", alignment: %#" B_PRIxSIZE ", addressSpec: %#"
704 		B_PRIx32 ", reserved allowed: %d)\n", start, size, alignment,
705 		addressSpec, allowReservedRange);
706 
707 	switch (addressSpec) {
708 		case B_BASE_ADDRESS:
709 		{
710 			// We have to iterate through the range list starting at the given
711 			// address. This is the most inefficient case.
712 			Range* range = fRangeTree.FindClosest(start, true);
713 			while (range != NULL) {
714 				if (range->type == Range::RANGE_FREE) {
715 					addr_t alignedBase = ROUNDUP(range->base, alignment);
716 					if (is_valid_spot(start, alignedBase, size,
717 							range->base + (range->size - 1))) {
718 						_foundAddress = alignedBase;
719 						return range;
720 					}
721 				}
722 				range = fRangeList.GetNext(range);
723 			}
724 
725 			// We didn't find a free spot in the requested range, so we'll
726 			// try again without any restrictions.
727 			start = fBase;
728 			addressSpec = B_ANY_ADDRESS;
729 			// fall through...
730 		}
731 
732 		case B_ANY_ADDRESS:
733 		case B_ANY_KERNEL_ADDRESS:
734 		case B_ANY_KERNEL_BLOCK_ADDRESS:
735 		{
736 			// We want to allocate from the first non-empty free list that is
737 			// guaranteed to contain the size. Finding a free range is O(1),
738 			// unless there are constraints (min base address, alignment).
739 			int freeListIndex = ld((size * 2 - 1) >> PAGE_SHIFT);
740 
741 			for (int32 i = freeListIndex; i < fFreeListCount; i++) {
742 				RangeFreeList& freeList = fFreeLists[i];
743 				if (freeList.IsEmpty())
744 					continue;
745 
746 				for (RangeFreeList::Iterator it = freeList.GetIterator();
747 						Range* range = it.Next();) {
748 					addr_t alignedBase = ROUNDUP(range->base, alignment);
749 					if (is_valid_spot(start, alignedBase, size,
750 							range->base + (range->size - 1))) {
751 						_foundAddress = alignedBase;
752 						return range;
753 					}
754 				}
755 			}
756 
757 			if (!allowReservedRange)
758 				return NULL;
759 
760 			// We haven't found any free ranges, but we're supposed to look
761 			// for reserved ones, too. Iterate through the range list starting
762 			// at the given address.
763 			Range* range = fRangeTree.FindClosest(start, true);
764 			while (range != NULL) {
765 				if (range->type == Range::RANGE_RESERVED) {
766 					addr_t alignedBase = ROUNDUP(range->base, alignment);
767 					if (is_valid_spot(start, alignedBase, size,
768 							range->base + (range->size - 1))) {
769 						// allocation from the back might be preferred
770 						// -- adjust the base accordingly
771 						if ((range->reserved.flags & RESERVED_AVOID_BASE)
772 								!= 0) {
773 							alignedBase = ROUNDDOWN(
774 								range->base + (range->size - size), alignment);
775 						}
776 
777 						_foundAddress = alignedBase;
778 						return range;
779 					}
780 				}
781 				range = fRangeList.GetNext(range);
782 			}
783 
784 			return NULL;
785 		}
786 
787 		case B_EXACT_ADDRESS:
788 		{
789 			Range* range = fRangeTree.FindClosest(start, true);
790 TRACE("    B_EXACT_ADDRESS: range: %p\n", range);
791 			if (range == NULL || range->type == Range::RANGE_AREA
792 				|| range->base + (range->size - 1) < start + (size - 1)) {
793 				// TODO: Support allocating if the area range covers multiple
794 				// free and reserved ranges!
795 TRACE("    -> no suitable range\n");
796 				return NULL;
797 			}
798 
799 			if (range->type != Range::RANGE_FREE && !allowReservedRange)
800 {
801 TRACE("    -> reserved range not allowed\n");
802 				return NULL;
803 }
804 
805 			_foundAddress = start;
806 			return range;
807 		}
808 
809 		default:
810 			return NULL;
811 	}
812 }
813 
814 
815 void
_FreeRange(Range * range,uint32 allocationFlags)816 VMKernelAddressSpace::_FreeRange(Range* range, uint32 allocationFlags)
817 {
818 	TRACE("  VMKernelAddressSpace::_FreeRange(%p (%#" B_PRIxADDR ", %#"
819 		B_PRIxSIZE ", %d))\n", range, range->base, range->size, range->type);
820 
821 	// Check whether one or both of the neighboring ranges are free already,
822 	// and join them, if so.
823 	Range* previousRange = fRangeList.GetPrevious(range);
824 	Range* nextRange = fRangeList.GetNext(range);
825 
826 	if (previousRange != NULL && previousRange->type == Range::RANGE_FREE) {
827 		if (nextRange != NULL && nextRange->type == Range::RANGE_FREE) {
828 			// join them all -- keep the first one, delete the others
829 			_FreeListRemoveRange(previousRange, previousRange->size);
830 			_RemoveRange(range);
831 			_RemoveRange(nextRange);
832 			previousRange->size += range->size + nextRange->size;
833 			object_cache_delete(fRangesObjectCache, range, allocationFlags);
834 			object_cache_delete(fRangesObjectCache, nextRange, allocationFlags);
835 			_FreeListInsertRange(previousRange, previousRange->size);
836 		} else {
837 			// join with the previous range only, delete the supplied one
838 			_FreeListRemoveRange(previousRange, previousRange->size);
839 			_RemoveRange(range);
840 			previousRange->size += range->size;
841 			object_cache_delete(fRangesObjectCache, range, allocationFlags);
842 			_FreeListInsertRange(previousRange, previousRange->size);
843 		}
844 	} else {
845 		if (nextRange != NULL && nextRange->type == Range::RANGE_FREE) {
846 			// join with the next range and delete it
847 			_RemoveRange(nextRange);
848 			range->size += nextRange->size;
849 			object_cache_delete(fRangesObjectCache, nextRange, allocationFlags);
850 		}
851 
852 		// mark the range free and add it to the respective free list
853 		range->type = Range::RANGE_FREE;
854 		_FreeListInsertRange(range, range->size);
855 	}
856 
857 	IncrementChangeCount();
858 }
859 
860 
861 #ifdef PARANOIA_CHECKS
862 
863 void
_CheckStructures() const864 VMKernelAddressSpace::_CheckStructures() const
865 {
866 	// general tree structure check
867 	fRangeTree.CheckTree();
868 
869 	// check range list and tree
870 	size_t spaceSize = fEndAddress - fBase + 1;
871 	addr_t nextBase = fBase;
872 	Range* previousRange = NULL;
873 	int previousRangeType = Range::RANGE_AREA;
874 	uint64 freeRanges = 0;
875 
876 	RangeList::ConstIterator listIt = fRangeList.GetIterator();
877 	RangeTree::ConstIterator treeIt = fRangeTree.GetIterator();
878 	while (true) {
879 		Range* range = listIt.Next();
880 		Range* treeRange = treeIt.Next();
881 		if (range != treeRange) {
882 			panic("VMKernelAddressSpace::_CheckStructures(): list/tree range "
883 				"mismatch: %p vs %p", range, treeRange);
884 		}
885 		if (range == NULL)
886 			break;
887 
888 		if (range->base != nextBase) {
889 			panic("VMKernelAddressSpace::_CheckStructures(): range base %#"
890 				B_PRIxADDR ", expected: %#" B_PRIxADDR, range->base, nextBase);
891 		}
892 
893 		if (range->size == 0) {
894 			panic("VMKernelAddressSpace::_CheckStructures(): empty range %p",
895 				range);
896 		}
897 
898 		if (range->size % B_PAGE_SIZE != 0) {
899 			panic("VMKernelAddressSpace::_CheckStructures(): range %p (%#"
900 				B_PRIxADDR ", %#" B_PRIxSIZE ") not page aligned", range,
901 				range->base, range->size);
902 		}
903 
904 		if (range->size > spaceSize - (range->base - fBase)) {
905 			panic("VMKernelAddressSpace::_CheckStructures(): range too large: "
906 				"(%#" B_PRIxADDR ", %#" B_PRIxSIZE "), address space end: %#"
907 				B_PRIxADDR, range->base, range->size, fEndAddress);
908 		}
909 
910 		if (range->type == Range::RANGE_FREE) {
911 			freeRanges++;
912 
913 			if (previousRangeType == Range::RANGE_FREE) {
914 				panic("VMKernelAddressSpace::_CheckStructures(): adjoining "
915 					"free ranges: %p (%#" B_PRIxADDR ", %#" B_PRIxSIZE
916 					"), %p (%#" B_PRIxADDR ", %#" B_PRIxSIZE ")", previousRange,
917 					previousRange->base, previousRange->size, range,
918 					range->base, range->size);
919 			}
920 		}
921 
922 		previousRange = range;
923 		nextBase = range->base + range->size;
924 		previousRangeType = range->type;
925 	}
926 
927 	if (nextBase - 1 != fEndAddress) {
928 		panic("VMKernelAddressSpace::_CheckStructures(): space not fully "
929 			"covered by ranges: last: %#" B_PRIxADDR ", expected %#" B_PRIxADDR,
930 			nextBase - 1, fEndAddress);
931 	}
932 
933 	// check free lists
934 	uint64 freeListRanges = 0;
935 	for (int i = 0; i < fFreeListCount; i++) {
936 		RangeFreeList& freeList = fFreeLists[i];
937 		if (freeList.IsEmpty())
938 			continue;
939 
940 		for (RangeFreeList::Iterator it = freeList.GetIterator();
941 				Range* range = it.Next();) {
942 			if (range->type != Range::RANGE_FREE) {
943 				panic("VMKernelAddressSpace::_CheckStructures(): non-free "
944 					"range %p (%#" B_PRIxADDR ", %#" B_PRIxSIZE ", %d) in "
945 					"free list %d", range, range->base, range->size,
946 					range->type, i);
947 			}
948 
949 			if (fRangeTree.Find(range->base) != range) {
950 				panic("VMKernelAddressSpace::_CheckStructures(): unknown "
951 					"range %p (%#" B_PRIxADDR ", %#" B_PRIxSIZE ", %d) in "
952 					"free list %d", range, range->base, range->size,
953 					range->type, i);
954 			}
955 
956 			if (ld(range->size) - PAGE_SHIFT != i) {
957 				panic("VMKernelAddressSpace::_CheckStructures(): "
958 					"range %p (%#" B_PRIxADDR ", %#" B_PRIxSIZE ", %d) in "
959 					"wrong free list %d", range, range->base, range->size,
960 					range->type, i);
961 			}
962 
963 			freeListRanges++;
964 		}
965 	}
966 }
967 
968 #endif	// PARANOIA_CHECKS
969