xref: /haiku/src/system/kernel/vm/VMUserAddressSpace.cpp (revision 16c83730262f1e4f0fc69d80744bb36dcfbbe3af)
1 /*
2  * Copyright 2009-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 #include "VMUserAddressSpace.h"
12 
13 #include <stdlib.h>
14 
15 #include <KernelExport.h>
16 
17 #include <heap.h>
18 #include <thread.h>
19 #include <util/atomic.h>
20 #include <util/Random.h>
21 #include <vm/vm.h>
22 #include <vm/VMArea.h>
23 
24 
25 //#define TRACE_VM
26 #ifdef TRACE_VM
27 #	define TRACE(x) dprintf x
28 #else
29 #	define TRACE(x) ;
30 #endif
31 
32 
33 #ifdef B_HAIKU_64_BIT
34 const addr_t VMUserAddressSpace::kMaxRandomize			=  0x8000000000ul;
35 const addr_t VMUserAddressSpace::kMaxInitialRandomize	= 0x20000000000ul;
36 #else
37 const addr_t VMUserAddressSpace::kMaxRandomize			=  0x800000ul;
38 const addr_t VMUserAddressSpace::kMaxInitialRandomize	= 0x2000000ul;
39 #endif
40 
41 
42 /*!	Verifies that an area with the given aligned base and size fits into
43 	the spot defined by base and limit and checks for overflows.
44 */
45 static inline bool
46 is_valid_spot(addr_t base, addr_t alignedBase, addr_t size, addr_t limit)
47 {
48 	return (alignedBase >= base && alignedBase + (size - 1) > alignedBase
49 		&& alignedBase + (size - 1) <= limit);
50 }
51 
52 
53 static inline bool
54 is_randomized(uint32 addressSpec)
55 {
56 	return addressSpec == B_RANDOMIZED_ANY_ADDRESS
57 		|| addressSpec == B_RANDOMIZED_BASE_ADDRESS;
58 }
59 
60 
61 VMUserAddressSpace::VMUserAddressSpace(team_id id, addr_t base, size_t size)
62 	:
63 	VMAddressSpace(id, base, size, "address space"),
64 	fAreaHint(NULL)
65 {
66 }
67 
68 
69 VMUserAddressSpace::~VMUserAddressSpace()
70 {
71 }
72 
73 
74 inline VMArea*
75 VMUserAddressSpace::FirstArea() const
76 {
77 	VMUserArea* area = fAreas.Head();
78 	while (area != NULL && area->id == RESERVED_AREA_ID)
79 		area = fAreas.GetNext(area);
80 	return area;
81 }
82 
83 
84 inline VMArea*
85 VMUserAddressSpace::NextArea(VMArea* _area) const
86 {
87 	VMUserArea* area = static_cast<VMUserArea*>(_area);
88 	area = fAreas.GetNext(area);
89 	while (area != NULL && area->id == RESERVED_AREA_ID)
90 		area = fAreas.GetNext(area);
91 	return area;
92 }
93 
94 
95 VMArea*
96 VMUserAddressSpace::CreateArea(const char* name, uint32 wiring,
97 	uint32 protection, uint32 allocationFlags)
98 {
99 	return VMUserArea::Create(this, name, wiring, protection, allocationFlags);
100 }
101 
102 
103 void
104 VMUserAddressSpace::DeleteArea(VMArea* _area, uint32 allocationFlags)
105 {
106 	VMUserArea* area = static_cast<VMUserArea*>(_area);
107 	area->~VMUserArea();
108 	free_etc(area, allocationFlags);
109 }
110 
111 
112 //! You must hold the address space's read lock.
113 VMArea*
114 VMUserAddressSpace::LookupArea(addr_t address) const
115 {
116 	// check the area hint first
117 	VMArea* areaHint = atomic_pointer_get(&fAreaHint);
118 	if (areaHint != NULL && areaHint->ContainsAddress(address))
119 		return areaHint;
120 
121 	for (VMUserAreaList::ConstIterator it = fAreas.GetIterator();
122 			VMUserArea* area = it.Next();) {
123 		if (area->id == RESERVED_AREA_ID)
124 			continue;
125 
126 		if (area->ContainsAddress(address)) {
127 			atomic_pointer_set(&fAreaHint, area);
128 			return area;
129 		}
130 	}
131 
132 	return NULL;
133 }
134 
135 
136 /*!	This inserts the area you pass into the address space.
137 	It will also set the "_address" argument to its base address when
138 	the call succeeds.
139 	You need to hold the VMAddressSpace write lock.
140 */
141 status_t
142 VMUserAddressSpace::InsertArea(VMArea* _area, size_t size,
143 	const virtual_address_restrictions* addressRestrictions,
144 	uint32 allocationFlags, void** _address)
145 {
146 	VMUserArea* area = static_cast<VMUserArea*>(_area);
147 
148 	addr_t searchBase, searchEnd;
149 	status_t status;
150 
151 	switch (addressRestrictions->address_specification) {
152 		case B_EXACT_ADDRESS:
153 			searchBase = (addr_t)addressRestrictions->address;
154 			searchEnd = (addr_t)addressRestrictions->address + (size - 1);
155 			break;
156 
157 		case B_BASE_ADDRESS:
158 		case B_RANDOMIZED_BASE_ADDRESS:
159 			searchBase = (addr_t)addressRestrictions->address;
160 			searchEnd = fEndAddress;
161 			break;
162 
163 		case B_ANY_ADDRESS:
164 		case B_ANY_KERNEL_ADDRESS:
165 		case B_ANY_KERNEL_BLOCK_ADDRESS:
166 		case B_RANDOMIZED_ANY_ADDRESS:
167 			searchBase = fBase;
168 			searchEnd = fEndAddress;
169 			break;
170 
171 		default:
172 			return B_BAD_VALUE;
173 	}
174 
175 	// TODO: remove this again when vm86 mode is moved into the kernel
176 	// completely (currently needs a userland address space!)
177 	if (addressRestrictions->address_specification != B_EXACT_ADDRESS)
178 		searchBase = max_c(searchBase, USER_BASE_ANY);
179 
180 	status = _InsertAreaSlot(searchBase, size, searchEnd,
181 		addressRestrictions->address_specification,
182 		addressRestrictions->alignment, area, allocationFlags);
183 	if (status == B_OK) {
184 		if (_address != NULL)
185 			*_address = (void*)area->Base();
186 		fFreeSpace -= area->Size();
187 	}
188 
189 	return status;
190 }
191 
192 
193 //! You must hold the address space's write lock.
194 void
195 VMUserAddressSpace::RemoveArea(VMArea* _area, uint32 allocationFlags)
196 {
197 	VMUserArea* area = static_cast<VMUserArea*>(_area);
198 
199 	fAreas.Remove(area);
200 
201 	if (area->id != RESERVED_AREA_ID) {
202 		IncrementChangeCount();
203 		fFreeSpace += area->Size();
204 
205 		if (area == fAreaHint)
206 			fAreaHint = NULL;
207 	}
208 }
209 
210 
211 bool
212 VMUserAddressSpace::CanResizeArea(VMArea* area, size_t newSize)
213 {
214 	VMUserArea* next = fAreas.GetNext(static_cast<VMUserArea*>(area));
215 	addr_t newEnd = area->Base() + (newSize - 1);
216 
217 	if (next == NULL)
218 		return fEndAddress >= newEnd;
219 
220 	if (next->Base() > newEnd)
221 		return true;
222 
223 	// If the area was created inside a reserved area, it can
224 	// also be resized in that area
225 	// TODO: if there is free space after the reserved area, it could
226 	// be used as well...
227 	return next->id == RESERVED_AREA_ID
228 		&& (uint64)next->cache_offset <= (uint64)area->Base()
229 		&& next->Base() + (next->Size() - 1) >= newEnd;
230 }
231 
232 
233 status_t
234 VMUserAddressSpace::ResizeArea(VMArea* _area, size_t newSize,
235 	uint32 allocationFlags)
236 {
237 	VMUserArea* area = static_cast<VMUserArea*>(_area);
238 
239 	addr_t newEnd = area->Base() + (newSize - 1);
240 	VMUserArea* next = fAreas.GetNext(area);
241 	if (next != NULL && next->Base() <= newEnd) {
242 		if (next->id != RESERVED_AREA_ID
243 			|| (uint64)next->cache_offset > (uint64)area->Base()
244 			|| next->Base() + (next->Size() - 1) < newEnd) {
245 			panic("resize situation for area %p has changed although we "
246 				"should have the address space lock", area);
247 			return B_ERROR;
248 		}
249 
250 		// resize reserved area
251 		addr_t offset = area->Base() + newSize - next->Base();
252 		if (next->Size() <= offset) {
253 			RemoveArea(next, allocationFlags);
254 			next->~VMUserArea();
255 			free_etc(next, allocationFlags);
256 		} else {
257 			status_t error = ShrinkAreaHead(next, next->Size() - offset,
258 				allocationFlags);
259 			if (error != B_OK)
260 				return error;
261 		}
262 	}
263 
264 	area->SetSize(newSize);
265 	return B_OK;
266 }
267 
268 
269 status_t
270 VMUserAddressSpace::ShrinkAreaHead(VMArea* area, size_t size,
271 	uint32 allocationFlags)
272 {
273 	size_t oldSize = area->Size();
274 	if (size == oldSize)
275 		return B_OK;
276 
277 	area->SetBase(area->Base() + oldSize - size);
278 	area->SetSize(size);
279 
280 	return B_OK;
281 }
282 
283 
284 status_t
285 VMUserAddressSpace::ShrinkAreaTail(VMArea* area, size_t size,
286 	uint32 allocationFlags)
287 {
288 	size_t oldSize = area->Size();
289 	if (size == oldSize)
290 		return B_OK;
291 
292 	area->SetSize(size);
293 
294 	return B_OK;
295 }
296 
297 
298 status_t
299 VMUserAddressSpace::ReserveAddressRange(size_t size,
300 	const virtual_address_restrictions* addressRestrictions,
301 	uint32 flags, uint32 allocationFlags, void** _address)
302 {
303 	// check to see if this address space has entered DELETE state
304 	if (fDeleting) {
305 		// okay, someone is trying to delete this address space now, so we
306 		// can't insert the area, let's back out
307 		return B_BAD_TEAM_ID;
308 	}
309 
310 	VMUserArea* area = VMUserArea::CreateReserved(this, flags, allocationFlags);
311 	if (area == NULL)
312 		return B_NO_MEMORY;
313 
314 	status_t status = InsertArea(area, size, addressRestrictions,
315 		allocationFlags, _address);
316 	if (status != B_OK) {
317 		area->~VMUserArea();
318 		free_etc(area, allocationFlags);
319 		return status;
320 	}
321 
322 	area->cache_offset = area->Base();
323 		// we cache the original base address here
324 
325 	Get();
326 	return B_OK;
327 }
328 
329 
330 status_t
331 VMUserAddressSpace::UnreserveAddressRange(addr_t address, size_t size,
332 	uint32 allocationFlags)
333 {
334 	// check to see if this address space has entered DELETE state
335 	if (fDeleting) {
336 		// okay, someone is trying to delete this address space now, so we can't
337 		// insert the area, so back out
338 		return B_BAD_TEAM_ID;
339 	}
340 
341 	// search area list and remove any matching reserved ranges
342 	addr_t endAddress = address + (size - 1);
343 	for (VMUserAreaList::Iterator it = fAreas.GetIterator();
344 			VMUserArea* area = it.Next();) {
345 		// the area must be completely part of the reserved range
346 		if (area->Base() + (area->Size() - 1) > endAddress)
347 			break;
348 		if (area->id == RESERVED_AREA_ID && area->Base() >= (addr_t)address) {
349 			// remove reserved range
350 			RemoveArea(area, allocationFlags);
351 			Put();
352 			area->~VMUserArea();
353 			free_etc(area, allocationFlags);
354 		}
355 	}
356 
357 	return B_OK;
358 }
359 
360 
361 void
362 VMUserAddressSpace::UnreserveAllAddressRanges(uint32 allocationFlags)
363 {
364 	for (VMUserAreaList::Iterator it = fAreas.GetIterator();
365 			VMUserArea* area = it.Next();) {
366 		if (area->id == RESERVED_AREA_ID) {
367 			RemoveArea(area, allocationFlags);
368 			Put();
369 			area->~VMUserArea();
370 			free_etc(area, allocationFlags);
371 		}
372 	}
373 }
374 
375 
376 void
377 VMUserAddressSpace::Dump() const
378 {
379 	VMAddressSpace::Dump();
380 	kprintf("area_hint: %p\n", fAreaHint);
381 
382 	kprintf("area_list:\n");
383 
384 	for (VMUserAreaList::ConstIterator it = fAreas.GetIterator();
385 			VMUserArea* area = it.Next();) {
386 		kprintf(" area 0x%" B_PRIx32 ": ", area->id);
387 		kprintf("base_addr = 0x%lx ", area->Base());
388 		kprintf("size = 0x%lx ", area->Size());
389 		kprintf("name = '%s' ", area->name);
390 		kprintf("protection = 0x%" B_PRIx32 "\n", area->protection);
391 	}
392 }
393 
394 
395 addr_t
396 VMUserAddressSpace::_RandomizeAddress(addr_t start, addr_t end,
397 	size_t alignment, bool initial)
398 {
399 	ASSERT((start & addr_t(alignment - 1)) == 0);
400 	ASSERT(start <= end);
401 
402 	if (start == end)
403 		return start;
404 
405 	addr_t range = end - start + 1;
406 	if (initial)
407 		range = min_c(range, kMaxInitialRandomize);
408 	else
409 		range = min_c(range, kMaxRandomize);
410 
411 	addr_t random = secure_get_random<addr_t>();
412 	random %= range;
413 	random &= ~addr_t(alignment - 1);
414 
415 	return start + random;
416 }
417 
418 
419 /*!	Finds a reserved area that covers the region spanned by \a start and
420 	\a size, inserts the \a area into that region and makes sure that
421 	there are reserved regions for the remaining parts.
422 */
423 status_t
424 VMUserAddressSpace::_InsertAreaIntoReservedRegion(addr_t start, size_t size,
425 	VMUserArea* area, uint32 allocationFlags)
426 {
427 	VMUserArea* next;
428 
429 	for (VMUserAreaList::Iterator it = fAreas.GetIterator();
430 			(next = it.Next()) != NULL;) {
431 		if (next->Base() <= start
432 			&& next->Base() + (next->Size() - 1) >= start + (size - 1)) {
433 			// This area covers the requested range
434 			if (next->id != RESERVED_AREA_ID) {
435 				// but it's not reserved space, it's a real area
436 				return B_BAD_VALUE;
437 			}
438 
439 			break;
440 		}
441 	}
442 
443 	if (next == NULL)
444 		return B_ENTRY_NOT_FOUND;
445 
446 	// Now we have to transfer the requested part of the reserved
447 	// range to the new area - and remove, resize or split the old
448 	// reserved area.
449 
450 	if (start == next->Base()) {
451 		// the area starts at the beginning of the reserved range
452 		fAreas.Insert(next, area);
453 
454 		if (size == next->Size()) {
455 			// the new area fully covers the reversed range
456 			fAreas.Remove(next);
457 			Put();
458 			next->~VMUserArea();
459 			free_etc(next, allocationFlags);
460 		} else {
461 			// resize the reserved range behind the area
462 			next->SetBase(next->Base() + size);
463 			next->SetSize(next->Size() - size);
464 		}
465 	} else if (start + size == next->Base() + next->Size()) {
466 		// the area is at the end of the reserved range
467 		fAreas.Insert(fAreas.GetNext(next), area);
468 
469 		// resize the reserved range before the area
470 		next->SetSize(start - next->Base());
471 	} else {
472 		// the area splits the reserved range into two separate ones
473 		// we need a new reserved area to cover this space
474 		VMUserArea* reserved = VMUserArea::CreateReserved(this,
475 			next->protection, allocationFlags);
476 		if (reserved == NULL)
477 			return B_NO_MEMORY;
478 
479 		Get();
480 		fAreas.Insert(fAreas.GetNext(next), reserved);
481 		fAreas.Insert(reserved, area);
482 
483 		// resize regions
484 		reserved->SetSize(next->Base() + next->Size() - start - size);
485 		next->SetSize(start - next->Base());
486 		reserved->SetBase(start + size);
487 		reserved->cache_offset = next->cache_offset;
488 	}
489 
490 	area->SetBase(start);
491 	area->SetSize(size);
492 	IncrementChangeCount();
493 
494 	return B_OK;
495 }
496 
497 
498 /*!	Must be called with this address space's write lock held */
499 status_t
500 VMUserAddressSpace::_InsertAreaSlot(addr_t start, addr_t size, addr_t end,
501 	uint32 addressSpec, size_t alignment, VMUserArea* area,
502 	uint32 allocationFlags)
503 {
504 	VMUserArea* last = NULL;
505 	VMUserArea* next;
506 	bool foundSpot = false;
507 	addr_t originalStart = 0;
508 
509 	TRACE(("VMUserAddressSpace::_InsertAreaSlot: address space %p, start "
510 		"0x%lx, size %ld, end 0x%lx, addressSpec %" B_PRIu32 ", area %p\n",
511 		this, start, size, end, addressSpec, area));
512 
513 	// do some sanity checking
514 	if (start < fBase || size == 0 || end > fEndAddress
515 		|| start + (size - 1) > end)
516 		return B_BAD_ADDRESS;
517 
518 	if (addressSpec == B_EXACT_ADDRESS && area->id != RESERVED_AREA_ID) {
519 		// search for a reserved area
520 		status_t status = _InsertAreaIntoReservedRegion(start, size, area,
521 			allocationFlags);
522 		if (status == B_OK || status == B_BAD_VALUE)
523 			return status;
524 
525 		// There was no reserved area, and the slot doesn't seem to be used
526 		// already
527 		// TODO: this could be further optimized.
528 	}
529 
530 	if (alignment == 0)
531 		alignment = B_PAGE_SIZE;
532 	if (addressSpec == B_ANY_KERNEL_BLOCK_ADDRESS) {
533 		// align the memory to the next power of two of the size
534 		while (alignment < size)
535 			alignment <<= 1;
536 	}
537 
538 	start = ROUNDUP(start, alignment);
539 
540 	if (addressSpec == B_RANDOMIZED_BASE_ADDRESS) {
541 		originalStart = start;
542 		start = _RandomizeAddress(start, end - size + 1, alignment, true);
543 	}
544 
545 	// walk up to the spot where we should start searching
546 second_chance:
547 	VMUserAreaList::Iterator it = fAreas.GetIterator();
548 	while ((next = it.Next()) != NULL) {
549 		if (next->Base() > start + (size - 1)) {
550 			// we have a winner
551 			break;
552 		}
553 
554 		last = next;
555 	}
556 
557 	// find the right spot depending on the address specification - the area
558 	// will be inserted directly after "last" ("next" is not referenced anymore)
559 
560 	switch (addressSpec) {
561 		case B_ANY_ADDRESS:
562 		case B_ANY_KERNEL_ADDRESS:
563 		case B_ANY_KERNEL_BLOCK_ADDRESS:
564 		case B_RANDOMIZED_ANY_ADDRESS:
565 		case B_BASE_ADDRESS:
566 		case B_RANDOMIZED_BASE_ADDRESS:
567 		{
568 			// find a hole big enough for a new area
569 			if (last == NULL) {
570 				// see if we can build it at the beginning of the virtual map
571 				addr_t alignedBase = ROUNDUP(start, alignment);
572 				addr_t nextBase = next == NULL ? end : min_c(next->Base() - 1, end);
573 				if (is_valid_spot(start, alignedBase, size, nextBase)) {
574 
575 					addr_t rangeEnd = min_c(nextBase - size + 1, end);
576 					if (is_randomized(addressSpec)) {
577 						alignedBase = _RandomizeAddress(alignedBase, rangeEnd,
578 							alignment);
579 					}
580 
581 					foundSpot = true;
582 					area->SetBase(alignedBase);
583 					break;
584 				}
585 
586 				last = next;
587 				next = it.Next();
588 			}
589 
590 			// keep walking
591 			while (next != NULL && next->Base() + next->Size() - 1 <= end) {
592 				addr_t alignedBase = ROUNDUP(last->Base() + last->Size(),
593 					alignment);
594 				addr_t nextBase = min_c(end, next->Base() - 1);
595 				if (is_valid_spot(last->Base() + (last->Size() - 1),
596 						alignedBase, size, nextBase)) {
597 
598 					addr_t rangeEnd = min_c(nextBase - size + 1, end);
599 					if (is_randomized(addressSpec)) {
600 						alignedBase = _RandomizeAddress(alignedBase,
601 							rangeEnd, alignment);
602 					}
603 
604 					foundSpot = true;
605 					area->SetBase(alignedBase);
606 					break;
607 				}
608 
609 				last = next;
610 				next = it.Next();
611 			}
612 
613 			if (foundSpot)
614 				break;
615 
616 			addr_t alignedBase = ROUNDUP(last->Base() + last->Size(),
617 				alignment);
618 			if (next == NULL && is_valid_spot(last->Base() + (last->Size() - 1),
619 					alignedBase, size, end)) {
620 
621 				if (is_randomized(addressSpec)) {
622 					alignedBase = _RandomizeAddress(alignedBase, end - size + 1,
623 						alignment);
624 				}
625 
626 				// got a spot
627 				foundSpot = true;
628 				area->SetBase(alignedBase);
629 				break;
630 			} else if (addressSpec == B_BASE_ADDRESS
631 				|| addressSpec == B_RANDOMIZED_BASE_ADDRESS) {
632 
633 				// we didn't find a free spot in the requested range, so we'll
634 				// try again without any restrictions
635 				if (!is_randomized(addressSpec)) {
636 					start = USER_BASE_ANY;
637 					addressSpec = B_ANY_ADDRESS;
638 				} else if (start == originalStart) {
639 					start = USER_BASE_ANY;
640 					addressSpec = B_RANDOMIZED_ANY_ADDRESS;
641 				} else {
642 					start = originalStart;
643 					addressSpec = B_RANDOMIZED_BASE_ADDRESS;
644 				}
645 
646 				last = NULL;
647 				goto second_chance;
648 			} else if (area->id != RESERVED_AREA_ID) {
649 				// We didn't find a free spot - if there are any reserved areas,
650 				// we can now test those for free space
651 				// TODO: it would make sense to start with the biggest of them
652 				it.Rewind();
653 				next = it.Next();
654 				for (last = NULL; next != NULL; next = it.Next()) {
655 					if (next->id != RESERVED_AREA_ID) {
656 						last = next;
657 						continue;
658 					} else if (next->Base() + size - 1 > end)
659 						break;
660 
661 					// TODO: take free space after the reserved area into
662 					// account!
663 					addr_t alignedBase = ROUNDUP(next->Base(), alignment);
664 					if (next->Base() == alignedBase && next->Size() == size) {
665 						// The reserved area is entirely covered, and thus,
666 						// removed
667 						fAreas.Remove(next);
668 
669 						foundSpot = true;
670 						area->SetBase(alignedBase);
671 						next->~VMUserArea();
672 						free_etc(next, allocationFlags);
673 						break;
674 					}
675 
676 					if ((next->protection & RESERVED_AVOID_BASE) == 0
677 						&& alignedBase == next->Base()
678 						&& next->Size() >= size) {
679 
680 						addr_t rangeEnd = min_c(
681 							next->Base() + next->Size() - size, end);
682 						if (is_randomized(addressSpec)) {
683 							alignedBase = _RandomizeAddress(next->Base(),
684 								rangeEnd, alignment);
685 						}
686 						addr_t offset = alignedBase - next->Base();
687 
688 						// The new area will be placed at the beginning of the
689 						// reserved area and the reserved area will be offset
690 						// and resized
691 						foundSpot = true;
692 						next->SetBase(next->Base() + offset + size);
693 						next->SetSize(next->Size() - offset - size);
694 						area->SetBase(alignedBase);
695 						break;
696 					}
697 
698 					if (is_valid_spot(next->Base(), alignedBase, size,
699 							min_c(next->Base() + next->Size() - 1, end))) {
700 						// The new area will be placed at the end of the
701 						// reserved area, and the reserved area will be resized
702 						// to make space
703 
704 						if (is_randomized(addressSpec)) {
705 							addr_t alignedNextBase = ROUNDUP(next->Base(),
706 								alignment);
707 
708 							addr_t startRange = next->Base() + next->Size();
709 							startRange -= size + kMaxRandomize;
710 							startRange = ROUNDDOWN(startRange, alignment);
711 
712 							startRange = max_c(startRange, alignedNextBase);
713 
714 							addr_t rangeEnd
715 								= min_c(next->Base() + next->Size() - size,
716 									end);
717 							alignedBase = _RandomizeAddress(startRange,
718 								rangeEnd, alignment);
719 						} else {
720 							alignedBase = ROUNDDOWN(
721 								next->Base() + next->Size() - size, alignment);
722 						}
723 
724 						foundSpot = true;
725 						next->SetSize(alignedBase - next->Base());
726 						area->SetBase(alignedBase);
727 						last = next;
728 						break;
729 					}
730 
731 					last = next;
732 				}
733 			}
734 
735 			break;
736 		}
737 
738 		case B_EXACT_ADDRESS:
739 			// see if we can create it exactly here
740 			if ((last == NULL || last->Base() + (last->Size() - 1) < start)
741 				&& (next == NULL || next->Base() > start + (size - 1))) {
742 				foundSpot = true;
743 				area->SetBase(start);
744 				break;
745 			}
746 			break;
747 		default:
748 			return B_BAD_VALUE;
749 	}
750 
751 	if (!foundSpot)
752 		return addressSpec == B_EXACT_ADDRESS ? B_BAD_VALUE : B_NO_MEMORY;
753 
754 	area->SetSize(size);
755 	if (last)
756 		fAreas.Insert(fAreas.GetNext(last), area);
757 	else
758 		fAreas.Insert(fAreas.Head(), area);
759 
760 	IncrementChangeCount();
761 	return B_OK;
762 }
763