xref: /haiku/src/system/kernel/vm/VMUserAddressSpace.cpp (revision 1deede7388b04dbeec5af85cae7164735ea9e70d)
1 /*
2  * Copyright 2009-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 #include "VMUserAddressSpace.h"
12 
13 #include <stdlib.h>
14 
15 #include <algorithm>
16 
17 #include <KernelExport.h>
18 
19 #include <heap.h>
20 #include <thread.h>
21 #include <util/atomic.h>
22 #include <util/Random.h>
23 #include <vm/vm.h>
24 #include <vm/VMArea.h>
25 
26 
27 //#define TRACE_VM
28 #ifdef TRACE_VM
29 #	define TRACE(x) dprintf x
30 #else
31 #	define TRACE(x) ;
32 #endif
33 
34 
35 #ifdef B_HAIKU_64_BIT
36 const addr_t VMUserAddressSpace::kMaxRandomize			=  0x8000000000ul;
37 const addr_t VMUserAddressSpace::kMaxInitialRandomize	= 0x20000000000ul;
38 #else
39 const addr_t VMUserAddressSpace::kMaxRandomize			=  0x800000ul;
40 const addr_t VMUserAddressSpace::kMaxInitialRandomize	= 0x2000000ul;
41 #endif
42 
43 
44 /*!	Verifies that an area with the given aligned base and size fits into
45 	the spot defined by base and limit and checks for overflows.
46 */
47 static inline bool
48 is_valid_spot(addr_t base, addr_t alignedBase, addr_t size, addr_t limit)
49 {
50 	return (alignedBase >= base && alignedBase + (size - 1) > alignedBase
51 		&& alignedBase + (size - 1) <= limit);
52 }
53 
54 
55 static inline bool
56 is_base_address_spec(uint32 addressSpec)
57 {
58 	return addressSpec == B_BASE_ADDRESS
59 		|| addressSpec == B_RANDOMIZED_BASE_ADDRESS;
60 }
61 
62 
63 static inline addr_t
64 align_address(addr_t address, size_t alignment)
65 {
66 	return ROUNDUP(address, alignment);
67 }
68 
69 
70 static inline addr_t
71 align_address(addr_t address, size_t alignment, uint32 addressSpec,
72 	addr_t baseAddress)
73 {
74 	if (is_base_address_spec(addressSpec))
75 		address = std::max(address, baseAddress);
76 	return align_address(address, alignment);
77 }
78 
79 
80 // #pragma mark - VMUserAddressSpace
81 
82 
83 VMUserAddressSpace::VMUserAddressSpace(team_id id, addr_t base, size_t size)
84 	:
85 	VMAddressSpace(id, base, size, "address space"),
86 	fNextInsertHint(0)
87 {
88 }
89 
90 
91 VMUserAddressSpace::~VMUserAddressSpace()
92 {
93 }
94 
95 
96 inline VMArea*
97 VMUserAddressSpace::FirstArea() const
98 {
99 	VMUserArea* area = fAreas.LeftMost();
100 	while (area != NULL && area->id == RESERVED_AREA_ID)
101 		area = fAreas.Next(area);
102 	return area;
103 }
104 
105 
106 inline VMArea*
107 VMUserAddressSpace::NextArea(VMArea* _area) const
108 {
109 	VMUserArea* area = static_cast<VMUserArea*>(_area);
110 	area = fAreas.Next(area);
111 	while (area != NULL && area->id == RESERVED_AREA_ID)
112 		area = fAreas.Next(area);
113 	return area;
114 }
115 
116 
117 VMArea*
118 VMUserAddressSpace::CreateArea(const char* name, uint32 wiring,
119 	uint32 protection, uint32 allocationFlags)
120 {
121 	return VMUserArea::Create(this, name, wiring, protection, allocationFlags);
122 }
123 
124 
125 void
126 VMUserAddressSpace::DeleteArea(VMArea* _area, uint32 allocationFlags)
127 {
128 	VMUserArea* area = static_cast<VMUserArea*>(_area);
129 	area->~VMUserArea();
130 	free_etc(area, allocationFlags);
131 }
132 
133 
134 //! You must hold the address space's read lock.
135 VMArea*
136 VMUserAddressSpace::LookupArea(addr_t address) const
137 {
138 	VMUserArea* area = fAreas.FindClosest(address, true);
139 	if (area == NULL || area->id == RESERVED_AREA_ID)
140 		return NULL;
141 
142 	return area->ContainsAddress(address) ? area : NULL;
143 }
144 
145 
146 //! You must hold the address space's read lock.
147 VMArea*
148 VMUserAddressSpace::FindClosestArea(addr_t address, bool less) const
149 {
150 	VMUserArea* area = fAreas.FindClosest(address, less);
151 	while (area != NULL && area->id == RESERVED_AREA_ID)
152 		area = fAreas.Next(area);
153 	return area;
154 }
155 
156 
157 /*!	This inserts the area you pass into the address space.
158 	It will also set the "_address" argument to its base address when
159 	the call succeeds.
160 	You need to hold the VMAddressSpace write lock.
161 */
162 status_t
163 VMUserAddressSpace::InsertArea(VMArea* _area, size_t size,
164 	const virtual_address_restrictions* addressRestrictions,
165 	uint32 allocationFlags, void** _address)
166 {
167 	VMUserArea* area = static_cast<VMUserArea*>(_area);
168 
169 	addr_t searchBase, searchEnd;
170 	status_t status;
171 
172 	switch (addressRestrictions->address_specification) {
173 		case B_EXACT_ADDRESS:
174 			searchBase = (addr_t)addressRestrictions->address;
175 			searchEnd = (addr_t)addressRestrictions->address + (size - 1);
176 			break;
177 
178 		case B_BASE_ADDRESS:
179 		case B_RANDOMIZED_BASE_ADDRESS:
180 			searchBase = std::max(fBase, (addr_t)addressRestrictions->address);
181 			searchEnd = fEndAddress;
182 			break;
183 
184 		case B_ANY_ADDRESS:
185 		case B_ANY_KERNEL_ADDRESS:
186 		case B_ANY_KERNEL_BLOCK_ADDRESS:
187 		case B_RANDOMIZED_ANY_ADDRESS:
188 			searchBase = std::max(fBase, (addr_t)USER_BASE_ANY);
189 			searchEnd = fEndAddress;
190 			break;
191 
192 		default:
193 			return B_BAD_VALUE;
194 	}
195 
196 	status = _InsertAreaSlot(searchBase, size, searchEnd,
197 		addressRestrictions->address_specification,
198 		addressRestrictions->alignment, area, allocationFlags);
199 	if (status == B_OK) {
200 		if (_address != NULL)
201 			*_address = (void*)area->Base();
202 		fFreeSpace -= area->Size();
203 	}
204 
205 	return status;
206 }
207 
208 
209 //! You must hold the address space's write lock.
210 void
211 VMUserAddressSpace::RemoveArea(VMArea* _area, uint32 allocationFlags)
212 {
213 	VMUserArea* area = static_cast<VMUserArea*>(_area);
214 
215 	fAreas.Remove(area);
216 
217 	if (area->id != RESERVED_AREA_ID) {
218 		IncrementChangeCount();
219 		fFreeSpace += area->Size();
220 	}
221 }
222 
223 
224 bool
225 VMUserAddressSpace::CanResizeArea(VMArea* area, size_t newSize)
226 {
227 	VMUserArea* next = fAreas.Next(static_cast<VMUserArea*>(area));
228 	addr_t newEnd = area->Base() + (newSize - 1);
229 
230 	if (next == NULL)
231 		return fEndAddress >= newEnd;
232 
233 	if (next->Base() > newEnd)
234 		return true;
235 
236 	// If the area was created inside a reserved area, it can
237 	// also be resized in that area
238 	// TODO: if there is free space after the reserved area, it could
239 	// be used as well...
240 	return next->id == RESERVED_AREA_ID
241 		&& (uint64)next->cache_offset <= (uint64)area->Base()
242 		&& next->Base() + (next->Size() - 1) >= newEnd;
243 }
244 
245 
246 status_t
247 VMUserAddressSpace::ResizeArea(VMArea* _area, size_t newSize,
248 	uint32 allocationFlags)
249 {
250 	VMUserArea* area = static_cast<VMUserArea*>(_area);
251 
252 	addr_t newEnd = area->Base() + (newSize - 1);
253 	VMUserArea* next = fAreas.Next(area);
254 	if (next != NULL && next->Base() <= newEnd) {
255 		if (next->id != RESERVED_AREA_ID
256 			|| (uint64)next->cache_offset > (uint64)area->Base()
257 			|| next->Base() + (next->Size() - 1) < newEnd) {
258 			panic("resize situation for area %p has changed although we "
259 				"should have the address space lock", area);
260 			return B_ERROR;
261 		}
262 
263 		// resize reserved area
264 		addr_t offset = area->Base() + newSize - next->Base();
265 		if (next->Size() <= offset) {
266 			RemoveArea(next, allocationFlags);
267 			next->~VMUserArea();
268 			free_etc(next, allocationFlags);
269 		} else {
270 			status_t error = ShrinkAreaHead(next, next->Size() - offset,
271 				allocationFlags);
272 			if (error != B_OK)
273 				return error;
274 		}
275 	}
276 
277 	area->SetSize(newSize);
278 	return B_OK;
279 }
280 
281 
282 status_t
283 VMUserAddressSpace::ShrinkAreaHead(VMArea* area, size_t size,
284 	uint32 allocationFlags)
285 {
286 	size_t oldSize = area->Size();
287 	if (size == oldSize)
288 		return B_OK;
289 
290 	area->SetBase(area->Base() + oldSize - size);
291 	area->SetSize(size);
292 
293 	return B_OK;
294 }
295 
296 
297 status_t
298 VMUserAddressSpace::ShrinkAreaTail(VMArea* area, size_t size,
299 	uint32 allocationFlags)
300 {
301 	size_t oldSize = area->Size();
302 	if (size == oldSize)
303 		return B_OK;
304 
305 	area->SetSize(size);
306 
307 	return B_OK;
308 }
309 
310 
311 status_t
312 VMUserAddressSpace::ReserveAddressRange(size_t size,
313 	const virtual_address_restrictions* addressRestrictions,
314 	uint32 flags, uint32 allocationFlags, void** _address)
315 {
316 	// check to see if this address space has entered DELETE state
317 	if (fDeleting) {
318 		// okay, someone is trying to delete this address space now, so we
319 		// can't insert the area, let's back out
320 		return B_BAD_TEAM_ID;
321 	}
322 
323 	VMUserArea* area = VMUserArea::CreateReserved(this, flags, allocationFlags);
324 	if (area == NULL)
325 		return B_NO_MEMORY;
326 
327 	status_t status = InsertArea(area, size, addressRestrictions,
328 		allocationFlags, _address);
329 	if (status != B_OK) {
330 		area->~VMUserArea();
331 		free_etc(area, allocationFlags);
332 		return status;
333 	}
334 
335 	area->cache_offset = area->Base();
336 		// we cache the original base address here
337 
338 	Get();
339 	return B_OK;
340 }
341 
342 
343 status_t
344 VMUserAddressSpace::UnreserveAddressRange(addr_t address, size_t size,
345 	uint32 allocationFlags)
346 {
347 	// check to see if this address space has entered DELETE state
348 	if (fDeleting) {
349 		// okay, someone is trying to delete this address space now, so we can't
350 		// remove the area, so back out
351 		return B_BAD_TEAM_ID;
352 	}
353 
354 	// the area must be completely part of the reserved range
355 	VMUserArea* area = fAreas.FindClosest(address, false);
356 	if (area == NULL)
357 		return B_OK;
358 
359 	addr_t endAddress = address + size - 1;
360 	for (VMUserAreaTree::Iterator it = fAreas.GetIterator(area);
361 		(area = it.Next()) != NULL
362 			&& area->Base() + area->Size() - 1 <= endAddress;) {
363 
364 		if (area->id == RESERVED_AREA_ID) {
365 			// remove reserved range
366 			RemoveArea(area, allocationFlags);
367 			Put();
368 			area->~VMUserArea();
369 			free_etc(area, allocationFlags);
370 		}
371 	}
372 
373 	return B_OK;
374 }
375 
376 
377 void
378 VMUserAddressSpace::UnreserveAllAddressRanges(uint32 allocationFlags)
379 {
380 	for (VMUserAreaTree::Iterator it = fAreas.GetIterator();
381 			VMUserArea* area = it.Next();) {
382 		if (area->id == RESERVED_AREA_ID) {
383 			RemoveArea(area, allocationFlags);
384 			Put();
385 			area->~VMUserArea();
386 			free_etc(area, allocationFlags);
387 		}
388 	}
389 }
390 
391 
392 void
393 VMUserAddressSpace::Dump() const
394 {
395 	VMAddressSpace::Dump();
396 	kprintf("area_list:\n");
397 
398 	for (VMUserAreaTree::ConstIterator it = fAreas.GetIterator();
399 			VMUserArea* area = it.Next();) {
400 		kprintf(" area 0x%" B_PRIx32 ": ", area->id);
401 		kprintf("base_addr = 0x%lx ", area->Base());
402 		kprintf("size = 0x%lx ", area->Size());
403 		kprintf("name = '%s' ",
404 			area->id != RESERVED_AREA_ID ? area->name : "reserved");
405 		kprintf("protection = 0x%" B_PRIx32 "\n", area->protection);
406 	}
407 }
408 
409 
410 inline bool
411 VMUserAddressSpace::_IsRandomized(uint32 addressSpec) const
412 {
413 	return fRandomizingEnabled
414 		&& (addressSpec == B_RANDOMIZED_ANY_ADDRESS
415 			|| addressSpec == B_RANDOMIZED_BASE_ADDRESS);
416 }
417 
418 
419 addr_t
420 VMUserAddressSpace::_RandomizeAddress(addr_t start, addr_t end,
421 	size_t alignment, bool initial)
422 {
423 	ASSERT((start & addr_t(alignment - 1)) == 0);
424 	ASSERT(start <= end);
425 
426 	if (start == end)
427 		return start;
428 
429 	addr_t range = end - start + 1;
430 	if (initial)
431 		range = std::min(range, kMaxInitialRandomize);
432 	else
433 		range = std::min(range, kMaxRandomize);
434 
435 	addr_t random = secure_get_random<addr_t>();
436 	random %= range;
437 	random &= ~addr_t(alignment - 1);
438 
439 	return start + random;
440 }
441 
442 
443 /*!	Finds a reserved area that covers the region spanned by \a start and
444 	\a size, inserts the \a area into that region and makes sure that
445 	there are reserved regions for the remaining parts.
446 */
447 status_t
448 VMUserAddressSpace::_InsertAreaIntoReservedRegion(addr_t start, size_t size,
449 	VMUserArea* area, uint32 allocationFlags)
450 {
451 	VMUserArea* reserved = fAreas.FindClosest(start, true);
452 	if (reserved == NULL
453 		|| !reserved->ContainsAddress(start)
454 		|| !reserved->ContainsAddress(start + size - 1)) {
455 		return B_ENTRY_NOT_FOUND;
456 	}
457 
458 	// This area covers the requested range
459 	if (reserved->id != RESERVED_AREA_ID) {
460 		// but it's not reserved space, it's a real area
461 		return B_BAD_VALUE;
462 	}
463 
464 	// Now we have to transfer the requested part of the reserved
465 	// range to the new area - and remove, resize or split the old
466 	// reserved area.
467 
468 	if (start == reserved->Base()) {
469 		// the area starts at the beginning of the reserved range
470 
471 		if (size == reserved->Size()) {
472 			// the new area fully covers the reserved range
473 			fAreas.Remove(reserved);
474 			Put();
475 			reserved->~VMUserArea();
476 			free_etc(reserved, allocationFlags);
477 		} else {
478 			// resize the reserved range behind the area
479 			reserved->SetBase(reserved->Base() + size);
480 			reserved->SetSize(reserved->Size() - size);
481 		}
482 	} else if (start + size == reserved->Base() + reserved->Size()) {
483 		// the area is at the end of the reserved range
484 		// resize the reserved range before the area
485 		reserved->SetSize(start - reserved->Base());
486 	} else {
487 		// the area splits the reserved range into two separate ones
488 		// we need a new reserved area to cover this space
489 		VMUserArea* newReserved = VMUserArea::CreateReserved(this,
490 			reserved->protection, allocationFlags);
491 		if (newReserved == NULL)
492 			return B_NO_MEMORY;
493 
494 		Get();
495 
496 		// resize regions
497 		newReserved->SetBase(start + size);
498 		newReserved->SetSize(
499 			reserved->Base() + reserved->Size() - start - size);
500 		newReserved->cache_offset = reserved->cache_offset;
501 
502 		reserved->SetSize(start - reserved->Base());
503 
504 		fAreas.Insert(newReserved);
505 	}
506 
507 	area->SetBase(start);
508 	area->SetSize(size);
509 	fAreas.Insert(area);
510 	IncrementChangeCount();
511 
512 	return B_OK;
513 }
514 
515 
516 /*!	Must be called with this address space's write lock held */
517 status_t
518 VMUserAddressSpace::_InsertAreaSlot(addr_t start, addr_t size, addr_t end,
519 	uint32 addressSpec, size_t alignment, VMUserArea* area,
520 	uint32 allocationFlags)
521 {
522 	TRACE(("VMUserAddressSpace::_InsertAreaSlot: address space %p, start "
523 		"0x%lx, size %ld, end 0x%lx, addressSpec %" B_PRIu32 ", area %p\n",
524 		this, start, size, end, addressSpec, area));
525 
526 	// do some sanity checking
527 	if (start < fBase || size == 0 || end > fEndAddress
528 		|| start + (size - 1) > end)
529 		return B_BAD_ADDRESS;
530 
531 	if (addressSpec == B_EXACT_ADDRESS && area->id != RESERVED_AREA_ID) {
532 		// search for a reserved area
533 		status_t status = _InsertAreaIntoReservedRegion(start, size, area,
534 			allocationFlags);
535 		if (status == B_OK || status == B_BAD_VALUE)
536 			return status;
537 
538 		// There was no reserved area, and the slot doesn't seem to be used
539 		// already
540 		// TODO: this could be further optimized.
541 	}
542 
543 	if (alignment == 0)
544 		alignment = B_PAGE_SIZE;
545 	if (addressSpec == B_ANY_KERNEL_BLOCK_ADDRESS) {
546 		// align the memory to the next power of two of the size
547 		while (alignment < size)
548 			alignment <<= 1;
549 	}
550 
551 	start = align_address(start, alignment);
552 
553 	bool useHint
554 		= addressSpec != B_EXACT_ADDRESS && !is_base_address_spec(addressSpec);
555 
556 	addr_t originalStart = 0;
557 	if (fRandomizingEnabled && addressSpec == B_RANDOMIZED_BASE_ADDRESS) {
558 		originalStart = start;
559 		start = _RandomizeAddress(start, end - size + 1, alignment, true);
560 	} else if (useHint
561 		&& start <= fNextInsertHint && fNextInsertHint <= end - size + 1) {
562 		originalStart = start;
563 		start = fNextInsertHint;
564 	}
565 
566 	// walk up to the spot where we should start searching
567 second_chance:
568 	VMUserArea* next = fAreas.FindClosest(start + size, false);
569 	VMUserArea* last = next != NULL
570 			? fAreas.Previous(next) : fAreas.FindClosest(start + size, true);
571 
572 	// find the right spot depending on the address specification - the area
573 	// will be inserted directly after "last" ("next" is not referenced anymore)
574 
575 	bool foundSpot = false;
576 	switch (addressSpec) {
577 		case B_ANY_ADDRESS:
578 		case B_ANY_KERNEL_ADDRESS:
579 		case B_ANY_KERNEL_BLOCK_ADDRESS:
580 		case B_RANDOMIZED_ANY_ADDRESS:
581 		case B_BASE_ADDRESS:
582 		case B_RANDOMIZED_BASE_ADDRESS:
583 		{
584 			VMUserAreaTree::Iterator it = fAreas.GetIterator(
585 				next != NULL ? next : fAreas.LeftMost());
586 
587 			// find a hole big enough for a new area
588 			if (last == NULL) {
589 				// see if we can build it at the beginning of the virtual map
590 				addr_t alignedBase = align_address(start, alignment);
591 				addr_t nextBase = next == NULL
592 					? end : std::min(next->Base() - 1, end);
593 				if (is_valid_spot(start, alignedBase, size, nextBase)) {
594 					addr_t rangeEnd = std::min(nextBase - size + 1, end);
595 					if (_IsRandomized(addressSpec)) {
596 						alignedBase = _RandomizeAddress(alignedBase, rangeEnd,
597 							alignment);
598 					}
599 
600 					foundSpot = true;
601 					area->SetBase(alignedBase);
602 					break;
603 				}
604 
605 				last = next;
606 				next = it.Next();
607 			}
608 
609 			// keep walking
610 			while (next != NULL && next->Base() + next->Size() - 1 <= end) {
611 				addr_t alignedBase = align_address(last->Base() + last->Size(),
612 					alignment, addressSpec, start);
613 				addr_t nextBase = std::min(end, next->Base() - 1);
614 
615 				if (is_valid_spot(last->Base() + (last->Size() - 1),
616 						alignedBase, size, nextBase)) {
617 					addr_t rangeEnd = std::min(nextBase - size + 1, end);
618 					if (_IsRandomized(addressSpec)) {
619 						alignedBase = _RandomizeAddress(alignedBase,
620 							rangeEnd, alignment);
621 					}
622 
623 					foundSpot = true;
624 					area->SetBase(alignedBase);
625 					break;
626 				}
627 
628 				last = next;
629 				next = it.Next();
630 			}
631 
632 			if (foundSpot)
633 				break;
634 
635 			addr_t alignedBase = align_address(last->Base() + last->Size(),
636 				alignment, addressSpec, start);
637 
638 			if (next == NULL && is_valid_spot(last->Base() + (last->Size() - 1),
639 					alignedBase, size, end)) {
640 				if (_IsRandomized(addressSpec)) {
641 					alignedBase = _RandomizeAddress(alignedBase, end - size + 1,
642 						alignment);
643 				}
644 
645 				// got a spot
646 				foundSpot = true;
647 				area->SetBase(alignedBase);
648 				break;
649 			} else if (is_base_address_spec(addressSpec)) {
650 				// we didn't find a free spot in the requested range, so we'll
651 				// try again without any restrictions
652 				if (!_IsRandomized(addressSpec)) {
653 					start = USER_BASE_ANY;
654 					addressSpec = B_ANY_ADDRESS;
655 				} else if (start == originalStart) {
656 					start = USER_BASE_ANY;
657 					addressSpec = B_RANDOMIZED_ANY_ADDRESS;
658 				} else {
659 					start = originalStart;
660 					addressSpec = B_RANDOMIZED_BASE_ADDRESS;
661 				}
662 
663 				goto second_chance;
664 			} else if (useHint
665 					&& originalStart != 0 && start != originalStart) {
666 				start = originalStart;
667 				goto second_chance;
668 			} else if (area->id != RESERVED_AREA_ID) {
669 				// We didn't find a free spot - if there are any reserved areas,
670 				// we can now test those for free space
671 				// TODO: it would make sense to start with the biggest of them
672 				it = fAreas.GetIterator();
673 				next = it.Next();
674 				for (last = NULL; next != NULL; next = it.Next()) {
675 					if (next->id != RESERVED_AREA_ID) {
676 						last = next;
677 						continue;
678 					} else if (next->Base() + size - 1 > end)
679 						break;
680 
681 					// TODO: take free space after the reserved area into
682 					// account!
683 					addr_t alignedBase = align_address(next->Base(), alignment);
684 					if (next->Base() == alignedBase && next->Size() == size) {
685 						// The reserved area is entirely covered, and thus,
686 						// removed
687 						fAreas.Remove(next);
688 
689 						foundSpot = true;
690 						area->SetBase(alignedBase);
691 						next->~VMUserArea();
692 						free_etc(next, allocationFlags);
693 						break;
694 					}
695 
696 					if ((next->protection & RESERVED_AVOID_BASE) == 0
697 						&& alignedBase == next->Base()
698 						&& next->Size() >= size) {
699 						addr_t rangeEnd = std::min(
700 							next->Base() + next->Size() - size, end);
701 						if (_IsRandomized(addressSpec)) {
702 							alignedBase = _RandomizeAddress(next->Base(),
703 								rangeEnd, alignment);
704 						}
705 						addr_t offset = alignedBase - next->Base();
706 
707 						// The new area will be placed at the beginning of the
708 						// reserved area and the reserved area will be offset
709 						// and resized
710 						foundSpot = true;
711 						next->SetBase(next->Base() + offset + size);
712 						next->SetSize(next->Size() - offset - size);
713 						area->SetBase(alignedBase);
714 						break;
715 					}
716 
717 					if (is_valid_spot(next->Base(), alignedBase, size,
718 							std::min(next->Base() + next->Size() - 1, end))) {
719 						// The new area will be placed at the end of the
720 						// reserved area, and the reserved area will be resized
721 						// to make space
722 
723 						if (_IsRandomized(addressSpec)) {
724 							addr_t alignedNextBase = align_address(next->Base(),
725 								alignment);
726 
727 							addr_t startRange = next->Base() + next->Size();
728 							startRange -= size + kMaxRandomize;
729 							startRange = ROUNDDOWN(startRange, alignment);
730 							startRange = std::max(startRange, alignedNextBase);
731 
732 							addr_t rangeEnd
733 								= std::min(next->Base() + next->Size() - size,
734 									end);
735 							alignedBase = _RandomizeAddress(startRange,
736 								rangeEnd, alignment);
737 						} else {
738 							alignedBase = ROUNDDOWN(
739 								next->Base() + next->Size() - size, alignment);
740 						}
741 
742 						foundSpot = true;
743 						next->SetSize(alignedBase - next->Base());
744 						area->SetBase(alignedBase);
745 						break;
746 					}
747 
748 					last = next;
749 				}
750 			}
751 
752 			break;
753 		}
754 
755 		case B_EXACT_ADDRESS:
756 			// see if we can create it exactly here
757 			if ((last == NULL || last->Base() + (last->Size() - 1) < start)
758 				&& (next == NULL || next->Base() > start + (size - 1))) {
759 				foundSpot = true;
760 				area->SetBase(start);
761 				break;
762 			}
763 			break;
764 		default:
765 			return B_BAD_VALUE;
766 	}
767 
768 	if (!foundSpot)
769 		return addressSpec == B_EXACT_ADDRESS ? B_BAD_VALUE : B_NO_MEMORY;
770 
771 	if (useHint)
772 		fNextInsertHint = area->Base() + size;
773 
774 	area->SetSize(size);
775 	fAreas.Insert(area);
776 	IncrementChangeCount();
777 	return B_OK;
778 }
779