xref: /haiku/src/system/kernel/vm/VMUserAddressSpace.cpp (revision 15fb7d88e971c4d6c787c6a3a5c159afb1ebf77b)
1 /*
2  * Copyright 2009-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 #include "VMUserAddressSpace.h"
12 
13 #include <stdlib.h>
14 
15 #include <algorithm>
16 
17 #include <KernelExport.h>
18 
19 #include <heap.h>
20 #include <thread.h>
21 #include <util/atomic.h>
22 #include <util/Random.h>
23 #include <vm/vm.h>
24 #include <vm/VMArea.h>
25 
26 
27 //#define TRACE_VM
28 #ifdef TRACE_VM
29 #	define TRACE(x) dprintf x
30 #else
31 #	define TRACE(x) ;
32 #endif
33 
34 
35 #ifdef B_HAIKU_64_BIT
36 const addr_t VMUserAddressSpace::kMaxRandomize			=  0x8000000000ul;
37 const addr_t VMUserAddressSpace::kMaxInitialRandomize	= 0x20000000000ul;
38 #else
39 const addr_t VMUserAddressSpace::kMaxRandomize			=  0x800000ul;
40 const addr_t VMUserAddressSpace::kMaxInitialRandomize	= 0x2000000ul;
41 #endif
42 
43 
44 /*!	Verifies that an area with the given aligned base and size fits into
45 	the spot defined by base and limit and checks for overflows.
46 */
47 static inline bool
48 is_valid_spot(addr_t base, addr_t alignedBase, addr_t size, addr_t limit)
49 {
50 	return (alignedBase >= base && alignedBase + (size - 1) > alignedBase
51 		&& alignedBase + (size - 1) <= limit);
52 }
53 
54 
55 static inline bool
56 is_base_address_spec(uint32 addressSpec)
57 {
58 	return addressSpec == B_BASE_ADDRESS
59 		|| addressSpec == B_RANDOMIZED_BASE_ADDRESS;
60 }
61 
62 
63 static inline addr_t
64 align_address(addr_t address, size_t alignment)
65 {
66 	return ROUNDUP(address, alignment);
67 }
68 
69 
70 static inline addr_t
71 align_address(addr_t address, size_t alignment, uint32 addressSpec,
72 	addr_t baseAddress)
73 {
74 	if (is_base_address_spec(addressSpec))
75 		address = std::max(address, baseAddress);
76 	return align_address(address, alignment);
77 }
78 
79 
80 // #pragma mark - VMUserAddressSpace
81 
82 
83 VMUserAddressSpace::VMUserAddressSpace(team_id id, addr_t base, size_t size)
84 	:
85 	VMAddressSpace(id, base, size, "address space"),
86 	fAreaHint(NULL)
87 {
88 }
89 
90 
91 VMUserAddressSpace::~VMUserAddressSpace()
92 {
93 }
94 
95 
96 inline VMArea*
97 VMUserAddressSpace::FirstArea() const
98 {
99 	VMUserArea* area = fAreas.Head();
100 	while (area != NULL && area->id == RESERVED_AREA_ID)
101 		area = fAreas.GetNext(area);
102 	return area;
103 }
104 
105 
106 inline VMArea*
107 VMUserAddressSpace::NextArea(VMArea* _area) const
108 {
109 	VMUserArea* area = static_cast<VMUserArea*>(_area);
110 	area = fAreas.GetNext(area);
111 	while (area != NULL && area->id == RESERVED_AREA_ID)
112 		area = fAreas.GetNext(area);
113 	return area;
114 }
115 
116 
117 VMArea*
118 VMUserAddressSpace::CreateArea(const char* name, uint32 wiring,
119 	uint32 protection, uint32 allocationFlags)
120 {
121 	return VMUserArea::Create(this, name, wiring, protection, allocationFlags);
122 }
123 
124 
125 void
126 VMUserAddressSpace::DeleteArea(VMArea* _area, uint32 allocationFlags)
127 {
128 	VMUserArea* area = static_cast<VMUserArea*>(_area);
129 	area->~VMUserArea();
130 	free_etc(area, allocationFlags);
131 }
132 
133 
134 //! You must hold the address space's read lock.
135 VMArea*
136 VMUserAddressSpace::LookupArea(addr_t address) const
137 {
138 	// check the area hint first
139 	VMArea* areaHint = atomic_pointer_get(&fAreaHint);
140 	if (areaHint != NULL && areaHint->ContainsAddress(address))
141 		return areaHint;
142 
143 	for (VMUserAreaList::ConstIterator it = fAreas.GetIterator();
144 			VMUserArea* area = it.Next();) {
145 		if (area->id == RESERVED_AREA_ID)
146 			continue;
147 
148 		if (area->ContainsAddress(address)) {
149 			atomic_pointer_set(&fAreaHint, area);
150 			return area;
151 		}
152 	}
153 
154 	return NULL;
155 }
156 
157 
158 /*!	This inserts the area you pass into the address space.
159 	It will also set the "_address" argument to its base address when
160 	the call succeeds.
161 	You need to hold the VMAddressSpace write lock.
162 */
163 status_t
164 VMUserAddressSpace::InsertArea(VMArea* _area, size_t size,
165 	const virtual_address_restrictions* addressRestrictions,
166 	uint32 allocationFlags, void** _address)
167 {
168 	VMUserArea* area = static_cast<VMUserArea*>(_area);
169 
170 	addr_t searchBase, searchEnd;
171 	status_t status;
172 
173 	switch (addressRestrictions->address_specification) {
174 		case B_EXACT_ADDRESS:
175 			searchBase = (addr_t)addressRestrictions->address;
176 			searchEnd = (addr_t)addressRestrictions->address + (size - 1);
177 			break;
178 
179 		case B_BASE_ADDRESS:
180 		case B_RANDOMIZED_BASE_ADDRESS:
181 			searchBase = std::max(fBase, (addr_t)addressRestrictions->address);
182 			searchEnd = fEndAddress;
183 			break;
184 
185 		case B_ANY_ADDRESS:
186 		case B_ANY_KERNEL_ADDRESS:
187 		case B_ANY_KERNEL_BLOCK_ADDRESS:
188 		case B_RANDOMIZED_ANY_ADDRESS:
189 			searchBase = std::max(fBase, (addr_t)USER_BASE_ANY);
190 			searchEnd = fEndAddress;
191 			break;
192 
193 		default:
194 			return B_BAD_VALUE;
195 	}
196 
197 	status = _InsertAreaSlot(searchBase, size, searchEnd,
198 		addressRestrictions->address_specification,
199 		addressRestrictions->alignment, area, allocationFlags);
200 	if (status == B_OK) {
201 		if (_address != NULL)
202 			*_address = (void*)area->Base();
203 		fFreeSpace -= area->Size();
204 	}
205 
206 	return status;
207 }
208 
209 
210 //! You must hold the address space's write lock.
211 void
212 VMUserAddressSpace::RemoveArea(VMArea* _area, uint32 allocationFlags)
213 {
214 	VMUserArea* area = static_cast<VMUserArea*>(_area);
215 
216 	fAreas.Remove(area);
217 
218 	if (area->id != RESERVED_AREA_ID) {
219 		IncrementChangeCount();
220 		fFreeSpace += area->Size();
221 
222 		if (area == fAreaHint)
223 			fAreaHint = NULL;
224 	}
225 }
226 
227 
228 bool
229 VMUserAddressSpace::CanResizeArea(VMArea* area, size_t newSize)
230 {
231 	VMUserArea* next = fAreas.GetNext(static_cast<VMUserArea*>(area));
232 	addr_t newEnd = area->Base() + (newSize - 1);
233 
234 	if (next == NULL)
235 		return fEndAddress >= newEnd;
236 
237 	if (next->Base() > newEnd)
238 		return true;
239 
240 	// If the area was created inside a reserved area, it can
241 	// also be resized in that area
242 	// TODO: if there is free space after the reserved area, it could
243 	// be used as well...
244 	return next->id == RESERVED_AREA_ID
245 		&& (uint64)next->cache_offset <= (uint64)area->Base()
246 		&& next->Base() + (next->Size() - 1) >= newEnd;
247 }
248 
249 
250 status_t
251 VMUserAddressSpace::ResizeArea(VMArea* _area, size_t newSize,
252 	uint32 allocationFlags)
253 {
254 	VMUserArea* area = static_cast<VMUserArea*>(_area);
255 
256 	addr_t newEnd = area->Base() + (newSize - 1);
257 	VMUserArea* next = fAreas.GetNext(area);
258 	if (next != NULL && next->Base() <= newEnd) {
259 		if (next->id != RESERVED_AREA_ID
260 			|| (uint64)next->cache_offset > (uint64)area->Base()
261 			|| next->Base() + (next->Size() - 1) < newEnd) {
262 			panic("resize situation for area %p has changed although we "
263 				"should have the address space lock", area);
264 			return B_ERROR;
265 		}
266 
267 		// resize reserved area
268 		addr_t offset = area->Base() + newSize - next->Base();
269 		if (next->Size() <= offset) {
270 			RemoveArea(next, allocationFlags);
271 			next->~VMUserArea();
272 			free_etc(next, allocationFlags);
273 		} else {
274 			status_t error = ShrinkAreaHead(next, next->Size() - offset,
275 				allocationFlags);
276 			if (error != B_OK)
277 				return error;
278 		}
279 	}
280 
281 	area->SetSize(newSize);
282 	return B_OK;
283 }
284 
285 
286 status_t
287 VMUserAddressSpace::ShrinkAreaHead(VMArea* area, size_t size,
288 	uint32 allocationFlags)
289 {
290 	size_t oldSize = area->Size();
291 	if (size == oldSize)
292 		return B_OK;
293 
294 	area->SetBase(area->Base() + oldSize - size);
295 	area->SetSize(size);
296 
297 	return B_OK;
298 }
299 
300 
301 status_t
302 VMUserAddressSpace::ShrinkAreaTail(VMArea* area, size_t size,
303 	uint32 allocationFlags)
304 {
305 	size_t oldSize = area->Size();
306 	if (size == oldSize)
307 		return B_OK;
308 
309 	area->SetSize(size);
310 
311 	return B_OK;
312 }
313 
314 
315 status_t
316 VMUserAddressSpace::ReserveAddressRange(size_t size,
317 	const virtual_address_restrictions* addressRestrictions,
318 	uint32 flags, uint32 allocationFlags, void** _address)
319 {
320 	// check to see if this address space has entered DELETE state
321 	if (fDeleting) {
322 		// okay, someone is trying to delete this address space now, so we
323 		// can't insert the area, let's back out
324 		return B_BAD_TEAM_ID;
325 	}
326 
327 	VMUserArea* area = VMUserArea::CreateReserved(this, flags, allocationFlags);
328 	if (area == NULL)
329 		return B_NO_MEMORY;
330 
331 	status_t status = InsertArea(area, size, addressRestrictions,
332 		allocationFlags, _address);
333 	if (status != B_OK) {
334 		area->~VMUserArea();
335 		free_etc(area, allocationFlags);
336 		return status;
337 	}
338 
339 	area->cache_offset = area->Base();
340 		// we cache the original base address here
341 
342 	Get();
343 	return B_OK;
344 }
345 
346 
347 status_t
348 VMUserAddressSpace::UnreserveAddressRange(addr_t address, size_t size,
349 	uint32 allocationFlags)
350 {
351 	// check to see if this address space has entered DELETE state
352 	if (fDeleting) {
353 		// okay, someone is trying to delete this address space now, so we can't
354 		// insert the area, so back out
355 		return B_BAD_TEAM_ID;
356 	}
357 
358 	// search area list and remove any matching reserved ranges
359 	addr_t endAddress = address + (size - 1);
360 	for (VMUserAreaList::Iterator it = fAreas.GetIterator();
361 			VMUserArea* area = it.Next();) {
362 		// the area must be completely part of the reserved range
363 		if (area->Base() + (area->Size() - 1) > endAddress)
364 			break;
365 		if (area->id == RESERVED_AREA_ID && area->Base() >= (addr_t)address) {
366 			// remove reserved range
367 			RemoveArea(area, allocationFlags);
368 			Put();
369 			area->~VMUserArea();
370 			free_etc(area, allocationFlags);
371 		}
372 	}
373 
374 	return B_OK;
375 }
376 
377 
378 void
379 VMUserAddressSpace::UnreserveAllAddressRanges(uint32 allocationFlags)
380 {
381 	for (VMUserAreaList::Iterator it = fAreas.GetIterator();
382 			VMUserArea* area = it.Next();) {
383 		if (area->id == RESERVED_AREA_ID) {
384 			RemoveArea(area, allocationFlags);
385 			Put();
386 			area->~VMUserArea();
387 			free_etc(area, allocationFlags);
388 		}
389 	}
390 }
391 
392 
393 void
394 VMUserAddressSpace::Dump() const
395 {
396 	VMAddressSpace::Dump();
397 	kprintf("area_hint: %p\n", fAreaHint);
398 
399 	kprintf("area_list:\n");
400 
401 	for (VMUserAreaList::ConstIterator it = fAreas.GetIterator();
402 			VMUserArea* area = it.Next();) {
403 		kprintf(" area 0x%" B_PRIx32 ": ", area->id);
404 		kprintf("base_addr = 0x%lx ", area->Base());
405 		kprintf("size = 0x%lx ", area->Size());
406 		kprintf("name = '%s' ", area->name);
407 		kprintf("protection = 0x%" B_PRIx32 "\n", area->protection);
408 	}
409 }
410 
411 
412 inline bool
413 VMUserAddressSpace::_IsRandomized(uint32 addressSpec) const
414 {
415 	return fRandomizingEnabled
416 		&& (addressSpec == B_RANDOMIZED_ANY_ADDRESS
417 			|| addressSpec == B_RANDOMIZED_BASE_ADDRESS);
418 }
419 
420 
421 addr_t
422 VMUserAddressSpace::_RandomizeAddress(addr_t start, addr_t end,
423 	size_t alignment, bool initial)
424 {
425 	ASSERT((start & addr_t(alignment - 1)) == 0);
426 	ASSERT(start <= end);
427 
428 	if (start == end)
429 		return start;
430 
431 	addr_t range = end - start + 1;
432 	if (initial)
433 		range = std::min(range, kMaxInitialRandomize);
434 	else
435 		range = std::min(range, kMaxRandomize);
436 
437 	addr_t random = secure_get_random<addr_t>();
438 	random %= range;
439 	random &= ~addr_t(alignment - 1);
440 
441 	return start + random;
442 }
443 
444 
445 /*!	Finds a reserved area that covers the region spanned by \a start and
446 	\a size, inserts the \a area into that region and makes sure that
447 	there are reserved regions for the remaining parts.
448 */
449 status_t
450 VMUserAddressSpace::_InsertAreaIntoReservedRegion(addr_t start, size_t size,
451 	VMUserArea* area, uint32 allocationFlags)
452 {
453 	VMUserArea* next;
454 
455 	for (VMUserAreaList::Iterator it = fAreas.GetIterator();
456 			(next = it.Next()) != NULL;) {
457 		if (next->Base() <= start
458 			&& next->Base() + (next->Size() - 1) >= start + (size - 1)) {
459 			// This area covers the requested range
460 			if (next->id != RESERVED_AREA_ID) {
461 				// but it's not reserved space, it's a real area
462 				return B_BAD_VALUE;
463 			}
464 
465 			break;
466 		}
467 	}
468 
469 	if (next == NULL)
470 		return B_ENTRY_NOT_FOUND;
471 
472 	// Now we have to transfer the requested part of the reserved
473 	// range to the new area - and remove, resize or split the old
474 	// reserved area.
475 
476 	if (start == next->Base()) {
477 		// the area starts at the beginning of the reserved range
478 		fAreas.Insert(next, area);
479 
480 		if (size == next->Size()) {
481 			// the new area fully covers the reversed range
482 			fAreas.Remove(next);
483 			Put();
484 			next->~VMUserArea();
485 			free_etc(next, allocationFlags);
486 		} else {
487 			// resize the reserved range behind the area
488 			next->SetBase(next->Base() + size);
489 			next->SetSize(next->Size() - size);
490 		}
491 	} else if (start + size == next->Base() + next->Size()) {
492 		// the area is at the end of the reserved range
493 		fAreas.Insert(fAreas.GetNext(next), area);
494 
495 		// resize the reserved range before the area
496 		next->SetSize(start - next->Base());
497 	} else {
498 		// the area splits the reserved range into two separate ones
499 		// we need a new reserved area to cover this space
500 		VMUserArea* reserved = VMUserArea::CreateReserved(this,
501 			next->protection, allocationFlags);
502 		if (reserved == NULL)
503 			return B_NO_MEMORY;
504 
505 		Get();
506 		fAreas.Insert(fAreas.GetNext(next), reserved);
507 		fAreas.Insert(reserved, area);
508 
509 		// resize regions
510 		reserved->SetSize(next->Base() + next->Size() - start - size);
511 		next->SetSize(start - next->Base());
512 		reserved->SetBase(start + size);
513 		reserved->cache_offset = next->cache_offset;
514 	}
515 
516 	area->SetBase(start);
517 	area->SetSize(size);
518 	IncrementChangeCount();
519 
520 	return B_OK;
521 }
522 
523 
524 /*!	Must be called with this address space's write lock held */
525 status_t
526 VMUserAddressSpace::_InsertAreaSlot(addr_t start, addr_t size, addr_t end,
527 	uint32 addressSpec, size_t alignment, VMUserArea* area,
528 	uint32 allocationFlags)
529 {
530 	VMUserArea* last = NULL;
531 	VMUserArea* next;
532 	bool foundSpot = false;
533 	addr_t originalStart = 0;
534 
535 	TRACE(("VMUserAddressSpace::_InsertAreaSlot: address space %p, start "
536 		"0x%lx, size %ld, end 0x%lx, addressSpec %" B_PRIu32 ", area %p\n",
537 		this, start, size, end, addressSpec, area));
538 
539 	// do some sanity checking
540 	if (start < fBase || size == 0 || end > fEndAddress
541 		|| start + (size - 1) > end)
542 		return B_BAD_ADDRESS;
543 
544 	if (addressSpec == B_EXACT_ADDRESS && area->id != RESERVED_AREA_ID) {
545 		// search for a reserved area
546 		status_t status = _InsertAreaIntoReservedRegion(start, size, area,
547 			allocationFlags);
548 		if (status == B_OK || status == B_BAD_VALUE)
549 			return status;
550 
551 		// There was no reserved area, and the slot doesn't seem to be used
552 		// already
553 		// TODO: this could be further optimized.
554 	}
555 
556 	if (alignment == 0)
557 		alignment = B_PAGE_SIZE;
558 	if (addressSpec == B_ANY_KERNEL_BLOCK_ADDRESS) {
559 		// align the memory to the next power of two of the size
560 		while (alignment < size)
561 			alignment <<= 1;
562 	}
563 
564 	start = align_address(start, alignment);
565 
566 	if (fRandomizingEnabled && addressSpec == B_RANDOMIZED_BASE_ADDRESS) {
567 		originalStart = start;
568 		start = _RandomizeAddress(start, end - size + 1, alignment, true);
569 	}
570 
571 	// walk up to the spot where we should start searching
572 second_chance:
573 	VMUserAreaList::Iterator it = fAreas.GetIterator();
574 	while ((next = it.Next()) != NULL) {
575 		if (next->Base() > start + (size - 1)) {
576 			// we have a winner
577 			break;
578 		}
579 
580 		last = next;
581 	}
582 
583 	// find the right spot depending on the address specification - the area
584 	// will be inserted directly after "last" ("next" is not referenced anymore)
585 
586 	switch (addressSpec) {
587 		case B_ANY_ADDRESS:
588 		case B_ANY_KERNEL_ADDRESS:
589 		case B_ANY_KERNEL_BLOCK_ADDRESS:
590 		case B_RANDOMIZED_ANY_ADDRESS:
591 		case B_BASE_ADDRESS:
592 		case B_RANDOMIZED_BASE_ADDRESS:
593 		{
594 			// find a hole big enough for a new area
595 			if (last == NULL) {
596 				// see if we can build it at the beginning of the virtual map
597 				addr_t alignedBase = align_address(start, alignment);
598 				addr_t nextBase = next == NULL
599 					? end : std::min(next->Base() - 1, end);
600 				if (is_valid_spot(start, alignedBase, size, nextBase)) {
601 					addr_t rangeEnd = std::min(nextBase - size + 1, end);
602 					if (_IsRandomized(addressSpec)) {
603 						alignedBase = _RandomizeAddress(alignedBase, rangeEnd,
604 							alignment);
605 					}
606 
607 					foundSpot = true;
608 					area->SetBase(alignedBase);
609 					break;
610 				}
611 
612 				last = next;
613 				next = it.Next();
614 			}
615 
616 			// keep walking
617 			while (next != NULL && next->Base() + next->Size() - 1 <= end) {
618 				addr_t alignedBase = align_address(last->Base() + last->Size(),
619 					alignment, addressSpec, start);
620 				addr_t nextBase = std::min(end, next->Base() - 1);
621 
622 				if (is_valid_spot(last->Base() + (last->Size() - 1),
623 						alignedBase, size, nextBase)) {
624 					addr_t rangeEnd = std::min(nextBase - size + 1, end);
625 					if (_IsRandomized(addressSpec)) {
626 						alignedBase = _RandomizeAddress(alignedBase,
627 							rangeEnd, alignment);
628 					}
629 
630 					foundSpot = true;
631 					area->SetBase(alignedBase);
632 					break;
633 				}
634 
635 				last = next;
636 				next = it.Next();
637 			}
638 
639 			if (foundSpot)
640 				break;
641 
642 			addr_t alignedBase = align_address(last->Base() + last->Size(),
643 				alignment, addressSpec, start);
644 
645 			if (next == NULL && is_valid_spot(last->Base() + (last->Size() - 1),
646 					alignedBase, size, end)) {
647 				if (_IsRandomized(addressSpec)) {
648 					alignedBase = _RandomizeAddress(alignedBase, end - size + 1,
649 						alignment);
650 				}
651 
652 				// got a spot
653 				foundSpot = true;
654 				area->SetBase(alignedBase);
655 				break;
656 			} else if (is_base_address_spec(addressSpec)) {
657 				// we didn't find a free spot in the requested range, so we'll
658 				// try again without any restrictions
659 				if (!_IsRandomized(addressSpec)) {
660 					start = USER_BASE_ANY;
661 					addressSpec = B_ANY_ADDRESS;
662 				} else if (start == originalStart) {
663 					start = USER_BASE_ANY;
664 					addressSpec = B_RANDOMIZED_ANY_ADDRESS;
665 				} else {
666 					start = originalStart;
667 					addressSpec = B_RANDOMIZED_BASE_ADDRESS;
668 				}
669 
670 				last = NULL;
671 				goto second_chance;
672 			} else if (area->id != RESERVED_AREA_ID) {
673 				// We didn't find a free spot - if there are any reserved areas,
674 				// we can now test those for free space
675 				// TODO: it would make sense to start with the biggest of them
676 				it.Rewind();
677 				next = it.Next();
678 				for (last = NULL; next != NULL; next = it.Next()) {
679 					if (next->id != RESERVED_AREA_ID) {
680 						last = next;
681 						continue;
682 					} else if (next->Base() + size - 1 > end)
683 						break;
684 
685 					// TODO: take free space after the reserved area into
686 					// account!
687 					addr_t alignedBase = align_address(next->Base(), alignment);
688 					if (next->Base() == alignedBase && next->Size() == size) {
689 						// The reserved area is entirely covered, and thus,
690 						// removed
691 						fAreas.Remove(next);
692 
693 						foundSpot = true;
694 						area->SetBase(alignedBase);
695 						next->~VMUserArea();
696 						free_etc(next, allocationFlags);
697 						break;
698 					}
699 
700 					if ((next->protection & RESERVED_AVOID_BASE) == 0
701 						&& alignedBase == next->Base()
702 						&& next->Size() >= size) {
703 						addr_t rangeEnd = std::min(
704 							next->Base() + next->Size() - size, end);
705 						if (_IsRandomized(addressSpec)) {
706 							alignedBase = _RandomizeAddress(next->Base(),
707 								rangeEnd, alignment);
708 						}
709 						addr_t offset = alignedBase - next->Base();
710 
711 						// The new area will be placed at the beginning of the
712 						// reserved area and the reserved area will be offset
713 						// and resized
714 						foundSpot = true;
715 						next->SetBase(next->Base() + offset + size);
716 						next->SetSize(next->Size() - offset - size);
717 						area->SetBase(alignedBase);
718 						break;
719 					}
720 
721 					if (is_valid_spot(next->Base(), alignedBase, size,
722 							std::min(next->Base() + next->Size() - 1, end))) {
723 						// The new area will be placed at the end of the
724 						// reserved area, and the reserved area will be resized
725 						// to make space
726 
727 						if (_IsRandomized(addressSpec)) {
728 							addr_t alignedNextBase = align_address(next->Base(),
729 								alignment);
730 
731 							addr_t startRange = next->Base() + next->Size();
732 							startRange -= size + kMaxRandomize;
733 							startRange = ROUNDDOWN(startRange, alignment);
734 							startRange = std::max(startRange, alignedNextBase);
735 
736 							addr_t rangeEnd
737 								= std::min(next->Base() + next->Size() - size,
738 									end);
739 							alignedBase = _RandomizeAddress(startRange,
740 								rangeEnd, alignment);
741 						} else {
742 							alignedBase = ROUNDDOWN(
743 								next->Base() + next->Size() - size, alignment);
744 						}
745 
746 						foundSpot = true;
747 						next->SetSize(alignedBase - next->Base());
748 						area->SetBase(alignedBase);
749 						last = next;
750 						break;
751 					}
752 
753 					last = next;
754 				}
755 			}
756 
757 			break;
758 		}
759 
760 		case B_EXACT_ADDRESS:
761 			// see if we can create it exactly here
762 			if ((last == NULL || last->Base() + (last->Size() - 1) < start)
763 				&& (next == NULL || next->Base() > start + (size - 1))) {
764 				foundSpot = true;
765 				area->SetBase(start);
766 				break;
767 			}
768 			break;
769 		default:
770 			return B_BAD_VALUE;
771 	}
772 
773 	if (!foundSpot)
774 		return addressSpec == B_EXACT_ADDRESS ? B_BAD_VALUE : B_NO_MEMORY;
775 
776 	area->SetSize(size);
777 	if (last)
778 		fAreas.Insert(fAreas.GetNext(last), area);
779 	else
780 		fAreas.Insert(fAreas.Head(), area);
781 
782 	IncrementChangeCount();
783 	return B_OK;
784 }
785