xref: /haiku/src/system/kernel/vm/VMUserAddressSpace.cpp (revision 7cea5bf07ffaec7e25508f3b81a2e5bd989e1b34)
1 /*
2  * Copyright 2009-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 #include "VMUserAddressSpace.h"
12 
13 #include <stdlib.h>
14 
15 #include <algorithm>
16 
17 #include <KernelExport.h>
18 
19 #include <heap.h>
20 #include <thread.h>
21 #include <util/atomic.h>
22 #include <util/Random.h>
23 #include <vm/vm.h>
24 #include <vm/VMArea.h>
25 
26 
27 //#define TRACE_VM
28 #ifdef TRACE_VM
29 #	define TRACE(x) dprintf x
30 #else
31 #	define TRACE(x) ;
32 #endif
33 
34 
35 #ifdef B_HAIKU_64_BIT
36 const addr_t VMUserAddressSpace::kMaxRandomize			=  0x8000000000ul;
37 const addr_t VMUserAddressSpace::kMaxInitialRandomize	= 0x20000000000ul;
38 #else
39 const addr_t VMUserAddressSpace::kMaxRandomize			=  0x800000ul;
40 const addr_t VMUserAddressSpace::kMaxInitialRandomize	= 0x2000000ul;
41 #endif
42 
43 
44 /*!	Verifies that an area with the given aligned base and size fits into
45 	the spot defined by base and limit and checks for overflows.
46 */
47 static inline bool
48 is_valid_spot(addr_t base, addr_t alignedBase, addr_t size, addr_t limit)
49 {
50 	return (alignedBase >= base && alignedBase + (size - 1) > alignedBase
51 		&& alignedBase + (size - 1) <= limit);
52 }
53 
54 
55 static inline bool
56 is_base_address_spec(uint32 addressSpec)
57 {
58 	return addressSpec == B_BASE_ADDRESS
59 		|| addressSpec == B_RANDOMIZED_BASE_ADDRESS;
60 }
61 
62 
63 static inline addr_t
64 align_address(addr_t address, size_t alignment)
65 {
66 	return ROUNDUP(address, alignment);
67 }
68 
69 
70 static inline addr_t
71 align_address(addr_t address, size_t alignment, uint32 addressSpec,
72 	addr_t baseAddress)
73 {
74 	if (is_base_address_spec(addressSpec))
75 		address = std::max(address, baseAddress);
76 	return align_address(address, alignment);
77 }
78 
79 
80 // #pragma mark - VMUserAddressSpace
81 
82 
83 VMUserAddressSpace::VMUserAddressSpace(team_id id, addr_t base, size_t size)
84 	:
85 	VMAddressSpace(id, base, size, "address space"),
86 	fNextInsertHint(0)
87 {
88 }
89 
90 
91 VMUserAddressSpace::~VMUserAddressSpace()
92 {
93 }
94 
95 
96 inline VMArea*
97 VMUserAddressSpace::FirstArea() const
98 {
99 	VMUserArea* area = fAreas.LeftMost();
100 	while (area != NULL && area->id == RESERVED_AREA_ID)
101 		area = fAreas.Next(area);
102 	return area;
103 }
104 
105 
106 inline VMArea*
107 VMUserAddressSpace::NextArea(VMArea* _area) const
108 {
109 	VMUserArea* area = static_cast<VMUserArea*>(_area);
110 	area = fAreas.Next(area);
111 	while (area != NULL && area->id == RESERVED_AREA_ID)
112 		area = fAreas.Next(area);
113 	return area;
114 }
115 
116 
117 VMArea*
118 VMUserAddressSpace::CreateArea(const char* name, uint32 wiring,
119 	uint32 protection, uint32 allocationFlags)
120 {
121 	return VMUserArea::Create(this, name, wiring, protection, allocationFlags);
122 }
123 
124 
125 void
126 VMUserAddressSpace::DeleteArea(VMArea* _area, uint32 allocationFlags)
127 {
128 	VMUserArea* area = static_cast<VMUserArea*>(_area);
129 	area->~VMUserArea();
130 	free_etc(area, allocationFlags);
131 }
132 
133 
134 //! You must hold the address space's read lock.
135 VMArea*
136 VMUserAddressSpace::LookupArea(addr_t address) const
137 {
138 	VMUserArea* area = fAreas.FindClosest(address, true);
139 	if (area == NULL || area->id == RESERVED_AREA_ID)
140 		return NULL;
141 
142 	return area->ContainsAddress(address) ? area : NULL;
143 }
144 
145 
146 //! You must hold the address space's read lock.
147 VMArea*
148 VMUserAddressSpace::FindClosestArea(addr_t address, bool less) const
149 {
150 	VMUserArea* area = fAreas.FindClosest(address, less);
151 	while (area != NULL && area->id == RESERVED_AREA_ID)
152 		area = fAreas.Next(area);
153 	return area;
154 }
155 
156 
157 /*!	This inserts the area you pass into the address space.
158 	It will also set the "_address" argument to its base address when
159 	the call succeeds.
160 	You need to hold the VMAddressSpace write lock.
161 */
162 status_t
163 VMUserAddressSpace::InsertArea(VMArea* _area, size_t size,
164 	const virtual_address_restrictions* addressRestrictions,
165 	uint32 allocationFlags, void** _address)
166 {
167 	VMUserArea* area = static_cast<VMUserArea*>(_area);
168 
169 	addr_t searchBase, searchEnd;
170 	status_t status;
171 
172 	switch (addressRestrictions->address_specification) {
173 		case B_EXACT_ADDRESS:
174 			searchBase = (addr_t)addressRestrictions->address;
175 			searchEnd = (addr_t)addressRestrictions->address + (size - 1);
176 			break;
177 
178 		case B_BASE_ADDRESS:
179 		case B_RANDOMIZED_BASE_ADDRESS:
180 			searchBase = std::max(fBase, (addr_t)addressRestrictions->address);
181 			searchEnd = fEndAddress;
182 			break;
183 
184 		case B_ANY_ADDRESS:
185 		case B_ANY_KERNEL_ADDRESS:
186 		case B_ANY_KERNEL_BLOCK_ADDRESS:
187 		case B_RANDOMIZED_ANY_ADDRESS:
188 			searchBase = std::max(fBase, (addr_t)USER_BASE_ANY);
189 			searchEnd = fEndAddress;
190 			break;
191 
192 		default:
193 			return B_BAD_VALUE;
194 	}
195 
196 	status = _InsertAreaSlot(searchBase, size, searchEnd,
197 		addressRestrictions->address_specification,
198 		addressRestrictions->alignment, area, allocationFlags);
199 	if (status == B_OK) {
200 		if (_address != NULL)
201 			*_address = (void*)area->Base();
202 		fFreeSpace -= area->Size();
203 	}
204 
205 	return status;
206 }
207 
208 
209 //! You must hold the address space's write lock.
210 void
211 VMUserAddressSpace::RemoveArea(VMArea* _area, uint32 allocationFlags)
212 {
213 	VMUserArea* area = static_cast<VMUserArea*>(_area);
214 
215 	fAreas.Remove(area);
216 
217 	if (area->id != RESERVED_AREA_ID) {
218 		IncrementChangeCount();
219 		fFreeSpace += area->Size();
220 	}
221 }
222 
223 
224 bool
225 VMUserAddressSpace::CanResizeArea(VMArea* area, size_t newSize)
226 {
227 	VMUserArea* next = fAreas.Next(static_cast<VMUserArea*>(area));
228 	addr_t newEnd = area->Base() + (newSize - 1);
229 
230 	if (next == NULL)
231 		return fEndAddress >= newEnd;
232 
233 	if (next->Base() > newEnd)
234 		return true;
235 
236 	// If the area was created inside a reserved area, it can
237 	// also be resized in that area
238 	// TODO: if there is free space after the reserved area, it could
239 	// be used as well...
240 	return next->id == RESERVED_AREA_ID
241 		&& (uint64)next->cache_offset <= (uint64)area->Base()
242 		&& next->Base() + (next->Size() - 1) >= newEnd;
243 }
244 
245 
246 status_t
247 VMUserAddressSpace::ResizeArea(VMArea* _area, size_t newSize,
248 	uint32 allocationFlags)
249 {
250 	VMUserArea* area = static_cast<VMUserArea*>(_area);
251 
252 	addr_t newEnd = area->Base() + (newSize - 1);
253 	VMUserArea* next = fAreas.Next(area);
254 	if (next != NULL && next->Base() <= newEnd) {
255 		if (next->id != RESERVED_AREA_ID
256 			|| (uint64)next->cache_offset > (uint64)area->Base()
257 			|| next->Base() + (next->Size() - 1) < newEnd) {
258 			panic("resize situation for area %p has changed although we "
259 				"should have the address space lock", area);
260 			return B_ERROR;
261 		}
262 
263 		// resize reserved area
264 		addr_t offset = area->Base() + newSize - next->Base();
265 		if (next->Size() <= offset) {
266 			RemoveArea(next, allocationFlags);
267 			next->~VMUserArea();
268 			free_etc(next, allocationFlags);
269 		} else {
270 			status_t error = ShrinkAreaHead(next, next->Size() - offset,
271 				allocationFlags);
272 			if (error != B_OK)
273 				return error;
274 		}
275 	}
276 
277 	area->SetSize(newSize);
278 	return B_OK;
279 }
280 
281 
282 status_t
283 VMUserAddressSpace::ShrinkAreaHead(VMArea* area, size_t size,
284 	uint32 allocationFlags)
285 {
286 	size_t oldSize = area->Size();
287 	if (size == oldSize)
288 		return B_OK;
289 
290 	area->SetBase(area->Base() + oldSize - size);
291 	area->SetSize(size);
292 
293 	return B_OK;
294 }
295 
296 
297 status_t
298 VMUserAddressSpace::ShrinkAreaTail(VMArea* area, size_t size,
299 	uint32 allocationFlags)
300 {
301 	size_t oldSize = area->Size();
302 	if (size == oldSize)
303 		return B_OK;
304 
305 	area->SetSize(size);
306 
307 	return B_OK;
308 }
309 
310 
311 status_t
312 VMUserAddressSpace::ReserveAddressRange(size_t size,
313 	const virtual_address_restrictions* addressRestrictions,
314 	uint32 flags, uint32 allocationFlags, void** _address)
315 {
316 	// check to see if this address space has entered DELETE state
317 	if (fDeleting) {
318 		// okay, someone is trying to delete this address space now, so we
319 		// can't insert the area, let's back out
320 		return B_BAD_TEAM_ID;
321 	}
322 
323 	VMUserArea* area = VMUserArea::CreateReserved(this, flags, allocationFlags);
324 	if (area == NULL)
325 		return B_NO_MEMORY;
326 
327 	status_t status = InsertArea(area, size, addressRestrictions,
328 		allocationFlags, _address);
329 	if (status != B_OK) {
330 		area->~VMUserArea();
331 		free_etc(area, allocationFlags);
332 		return status;
333 	}
334 
335 	area->cache_offset = area->Base();
336 		// we cache the original base address here
337 
338 	Get();
339 	return B_OK;
340 }
341 
342 
343 status_t
344 VMUserAddressSpace::UnreserveAddressRange(addr_t address, size_t size,
345 	uint32 allocationFlags)
346 {
347 	// check to see if this address space has entered DELETE state
348 	if (fDeleting) {
349 		// okay, someone is trying to delete this address space now, so we can't
350 		// insert the area, so back out
351 		return B_BAD_TEAM_ID;
352 	}
353 
354 	// the area must be completely part of the reserved range
355 	VMUserArea* area = fAreas.FindClosest(address, false);
356 	if (area == NULL)
357 		return B_OK;
358 
359 	addr_t endAddress = address + size - 1;
360 	for (VMUserAreaTree::Iterator it = fAreas.GetIterator(area);
361 		(area = it.Next()) != NULL
362 			&& area->Base() + area->Size() - 1 <= endAddress;) {
363 
364 		if (area->id == RESERVED_AREA_ID) {
365 			// remove reserved range
366 			RemoveArea(area, allocationFlags);
367 			Put();
368 			area->~VMUserArea();
369 			free_etc(area, allocationFlags);
370 		}
371 	}
372 
373 	return B_OK;
374 }
375 
376 
377 void
378 VMUserAddressSpace::UnreserveAllAddressRanges(uint32 allocationFlags)
379 {
380 	for (VMUserAreaTree::Iterator it = fAreas.GetIterator();
381 			VMUserArea* area = it.Next();) {
382 		if (area->id == RESERVED_AREA_ID) {
383 			RemoveArea(area, allocationFlags);
384 			Put();
385 			area->~VMUserArea();
386 			free_etc(area, allocationFlags);
387 		}
388 	}
389 }
390 
391 
392 void
393 VMUserAddressSpace::Dump() const
394 {
395 	VMAddressSpace::Dump();
396 	kprintf("area_list:\n");
397 
398 	for (VMUserAreaTree::ConstIterator it = fAreas.GetIterator();
399 			VMUserArea* area = it.Next();) {
400 		kprintf(" area 0x%" B_PRIx32 ": ", area->id);
401 		kprintf("base_addr = 0x%lx ", area->Base());
402 		kprintf("size = 0x%lx ", area->Size());
403 		kprintf("name = '%s' ", area->name);
404 		kprintf("protection = 0x%" B_PRIx32 "\n", area->protection);
405 	}
406 }
407 
408 
409 inline bool
410 VMUserAddressSpace::_IsRandomized(uint32 addressSpec) const
411 {
412 	return fRandomizingEnabled
413 		&& (addressSpec == B_RANDOMIZED_ANY_ADDRESS
414 			|| addressSpec == B_RANDOMIZED_BASE_ADDRESS);
415 }
416 
417 
418 addr_t
419 VMUserAddressSpace::_RandomizeAddress(addr_t start, addr_t end,
420 	size_t alignment, bool initial)
421 {
422 	ASSERT((start & addr_t(alignment - 1)) == 0);
423 	ASSERT(start <= end);
424 
425 	if (start == end)
426 		return start;
427 
428 	addr_t range = end - start + 1;
429 	if (initial)
430 		range = std::min(range, kMaxInitialRandomize);
431 	else
432 		range = std::min(range, kMaxRandomize);
433 
434 	addr_t random = secure_get_random<addr_t>();
435 	random %= range;
436 	random &= ~addr_t(alignment - 1);
437 
438 	return start + random;
439 }
440 
441 
442 /*!	Finds a reserved area that covers the region spanned by \a start and
443 	\a size, inserts the \a area into that region and makes sure that
444 	there are reserved regions for the remaining parts.
445 */
446 status_t
447 VMUserAddressSpace::_InsertAreaIntoReservedRegion(addr_t start, size_t size,
448 	VMUserArea* area, uint32 allocationFlags)
449 {
450 	VMUserArea* reserved = fAreas.FindClosest(start, true);
451 	if (reserved == NULL
452 		|| !reserved->ContainsAddress(start)
453 		|| !reserved->ContainsAddress(start + size - 1)) {
454 		return B_ENTRY_NOT_FOUND;
455 	}
456 
457 	// This area covers the requested range
458 	if (reserved->id != RESERVED_AREA_ID) {
459 		// but it's not reserved space, it's a real area
460 		return B_BAD_VALUE;
461 	}
462 
463 	// Now we have to transfer the requested part of the reserved
464 	// range to the new area - and remove, resize or split the old
465 	// reserved area.
466 
467 	if (start == reserved->Base()) {
468 		// the area starts at the beginning of the reserved range
469 
470 		if (size == reserved->Size()) {
471 			// the new area fully covers the reserved range
472 			fAreas.Remove(reserved);
473 			Put();
474 			reserved->~VMUserArea();
475 			free_etc(reserved, allocationFlags);
476 		} else {
477 			// resize the reserved range behind the area
478 			reserved->SetBase(reserved->Base() + size);
479 			reserved->SetSize(reserved->Size() - size);
480 		}
481 	} else if (start + size == reserved->Base() + reserved->Size()) {
482 		// the area is at the end of the reserved range
483 		// resize the reserved range before the area
484 		reserved->SetSize(start - reserved->Base());
485 	} else {
486 		// the area splits the reserved range into two separate ones
487 		// we need a new reserved area to cover this space
488 		VMUserArea* newReserved = VMUserArea::CreateReserved(this,
489 			reserved->protection, allocationFlags);
490 		if (newReserved == NULL)
491 			return B_NO_MEMORY;
492 
493 		Get();
494 
495 		// resize regions
496 		newReserved->SetBase(start + size);
497 		newReserved->SetSize(
498 			reserved->Base() + reserved->Size() - start - size);
499 		newReserved->cache_offset = reserved->cache_offset;
500 
501 		reserved->SetSize(start - reserved->Base());
502 
503 		fAreas.Insert(newReserved);
504 	}
505 
506 	area->SetBase(start);
507 	area->SetSize(size);
508 	fAreas.Insert(area);
509 	IncrementChangeCount();
510 
511 	return B_OK;
512 }
513 
514 
515 /*!	Must be called with this address space's write lock held */
516 status_t
517 VMUserAddressSpace::_InsertAreaSlot(addr_t start, addr_t size, addr_t end,
518 	uint32 addressSpec, size_t alignment, VMUserArea* area,
519 	uint32 allocationFlags)
520 {
521 	TRACE(("VMUserAddressSpace::_InsertAreaSlot: address space %p, start "
522 		"0x%lx, size %ld, end 0x%lx, addressSpec %" B_PRIu32 ", area %p\n",
523 		this, start, size, end, addressSpec, area));
524 
525 	// do some sanity checking
526 	if (start < fBase || size == 0 || end > fEndAddress
527 		|| start + (size - 1) > end)
528 		return B_BAD_ADDRESS;
529 
530 	if (addressSpec == B_EXACT_ADDRESS && area->id != RESERVED_AREA_ID) {
531 		// search for a reserved area
532 		status_t status = _InsertAreaIntoReservedRegion(start, size, area,
533 			allocationFlags);
534 		if (status == B_OK || status == B_BAD_VALUE)
535 			return status;
536 
537 		// There was no reserved area, and the slot doesn't seem to be used
538 		// already
539 		// TODO: this could be further optimized.
540 	}
541 
542 	if (alignment == 0)
543 		alignment = B_PAGE_SIZE;
544 	if (addressSpec == B_ANY_KERNEL_BLOCK_ADDRESS) {
545 		// align the memory to the next power of two of the size
546 		while (alignment < size)
547 			alignment <<= 1;
548 	}
549 
550 	start = align_address(start, alignment);
551 
552 	bool useHint
553 		= addressSpec != B_EXACT_ADDRESS && !is_base_address_spec(addressSpec);
554 
555 	addr_t originalStart = 0;
556 	if (fRandomizingEnabled && addressSpec == B_RANDOMIZED_BASE_ADDRESS) {
557 		originalStart = start;
558 		start = _RandomizeAddress(start, end - size + 1, alignment, true);
559 	} else if (useHint
560 		&& start <= fNextInsertHint && fNextInsertHint <= end - size + 1) {
561 		originalStart = start;
562 		start = fNextInsertHint;
563 	}
564 
565 	// walk up to the spot where we should start searching
566 second_chance:
567 	VMUserArea* next = fAreas.FindClosest(start + size, false);
568 	VMUserArea* last = next != NULL
569 			? fAreas.Previous(next) : fAreas.FindClosest(start + size, true);
570 
571 	// find the right spot depending on the address specification - the area
572 	// will be inserted directly after "last" ("next" is not referenced anymore)
573 
574 	bool foundSpot = false;
575 	switch (addressSpec) {
576 		case B_ANY_ADDRESS:
577 		case B_ANY_KERNEL_ADDRESS:
578 		case B_ANY_KERNEL_BLOCK_ADDRESS:
579 		case B_RANDOMIZED_ANY_ADDRESS:
580 		case B_BASE_ADDRESS:
581 		case B_RANDOMIZED_BASE_ADDRESS:
582 		{
583 			VMUserAreaTree::Iterator it = fAreas.GetIterator(
584 				next != NULL ? next : fAreas.LeftMost());
585 
586 			// find a hole big enough for a new area
587 			if (last == NULL) {
588 				// see if we can build it at the beginning of the virtual map
589 				addr_t alignedBase = align_address(start, alignment);
590 				addr_t nextBase = next == NULL
591 					? end : std::min(next->Base() - 1, end);
592 				if (is_valid_spot(start, alignedBase, size, nextBase)) {
593 					addr_t rangeEnd = std::min(nextBase - size + 1, end);
594 					if (_IsRandomized(addressSpec)) {
595 						alignedBase = _RandomizeAddress(alignedBase, rangeEnd,
596 							alignment);
597 					}
598 
599 					foundSpot = true;
600 					area->SetBase(alignedBase);
601 					break;
602 				}
603 
604 				last = next;
605 				next = it.Next();
606 			}
607 
608 			// keep walking
609 			while (next != NULL && next->Base() + next->Size() - 1 <= end) {
610 				addr_t alignedBase = align_address(last->Base() + last->Size(),
611 					alignment, addressSpec, start);
612 				addr_t nextBase = std::min(end, next->Base() - 1);
613 
614 				if (is_valid_spot(last->Base() + (last->Size() - 1),
615 						alignedBase, size, nextBase)) {
616 					addr_t rangeEnd = std::min(nextBase - size + 1, end);
617 					if (_IsRandomized(addressSpec)) {
618 						alignedBase = _RandomizeAddress(alignedBase,
619 							rangeEnd, alignment);
620 					}
621 
622 					foundSpot = true;
623 					area->SetBase(alignedBase);
624 					break;
625 				}
626 
627 				last = next;
628 				next = it.Next();
629 			}
630 
631 			if (foundSpot)
632 				break;
633 
634 			addr_t alignedBase = align_address(last->Base() + last->Size(),
635 				alignment, addressSpec, start);
636 
637 			if (next == NULL && is_valid_spot(last->Base() + (last->Size() - 1),
638 					alignedBase, size, end)) {
639 				if (_IsRandomized(addressSpec)) {
640 					alignedBase = _RandomizeAddress(alignedBase, end - size + 1,
641 						alignment);
642 				}
643 
644 				// got a spot
645 				foundSpot = true;
646 				area->SetBase(alignedBase);
647 				break;
648 			} else if (is_base_address_spec(addressSpec)) {
649 				// we didn't find a free spot in the requested range, so we'll
650 				// try again without any restrictions
651 				if (!_IsRandomized(addressSpec)) {
652 					start = USER_BASE_ANY;
653 					addressSpec = B_ANY_ADDRESS;
654 				} else if (start == originalStart) {
655 					start = USER_BASE_ANY;
656 					addressSpec = B_RANDOMIZED_ANY_ADDRESS;
657 				} else {
658 					start = originalStart;
659 					addressSpec = B_RANDOMIZED_BASE_ADDRESS;
660 				}
661 
662 				goto second_chance;
663 			} else if (useHint
664 					&& originalStart != 0 && start != originalStart) {
665 				start = originalStart;
666 				goto second_chance;
667 			} else if (area->id != RESERVED_AREA_ID) {
668 				// We didn't find a free spot - if there are any reserved areas,
669 				// we can now test those for free space
670 				// TODO: it would make sense to start with the biggest of them
671 				it = fAreas.GetIterator();
672 				next = it.Next();
673 				for (last = NULL; next != NULL; next = it.Next()) {
674 					if (next->id != RESERVED_AREA_ID) {
675 						last = next;
676 						continue;
677 					} else if (next->Base() + size - 1 > end)
678 						break;
679 
680 					// TODO: take free space after the reserved area into
681 					// account!
682 					addr_t alignedBase = align_address(next->Base(), alignment);
683 					if (next->Base() == alignedBase && next->Size() == size) {
684 						// The reserved area is entirely covered, and thus,
685 						// removed
686 						fAreas.Remove(next);
687 
688 						foundSpot = true;
689 						area->SetBase(alignedBase);
690 						next->~VMUserArea();
691 						free_etc(next, allocationFlags);
692 						break;
693 					}
694 
695 					if ((next->protection & RESERVED_AVOID_BASE) == 0
696 						&& alignedBase == next->Base()
697 						&& next->Size() >= size) {
698 						addr_t rangeEnd = std::min(
699 							next->Base() + next->Size() - size, end);
700 						if (_IsRandomized(addressSpec)) {
701 							alignedBase = _RandomizeAddress(next->Base(),
702 								rangeEnd, alignment);
703 						}
704 						addr_t offset = alignedBase - next->Base();
705 
706 						// The new area will be placed at the beginning of the
707 						// reserved area and the reserved area will be offset
708 						// and resized
709 						foundSpot = true;
710 						next->SetBase(next->Base() + offset + size);
711 						next->SetSize(next->Size() - offset - size);
712 						area->SetBase(alignedBase);
713 						break;
714 					}
715 
716 					if (is_valid_spot(next->Base(), alignedBase, size,
717 							std::min(next->Base() + next->Size() - 1, end))) {
718 						// The new area will be placed at the end of the
719 						// reserved area, and the reserved area will be resized
720 						// to make space
721 
722 						if (_IsRandomized(addressSpec)) {
723 							addr_t alignedNextBase = align_address(next->Base(),
724 								alignment);
725 
726 							addr_t startRange = next->Base() + next->Size();
727 							startRange -= size + kMaxRandomize;
728 							startRange = ROUNDDOWN(startRange, alignment);
729 							startRange = std::max(startRange, alignedNextBase);
730 
731 							addr_t rangeEnd
732 								= std::min(next->Base() + next->Size() - size,
733 									end);
734 							alignedBase = _RandomizeAddress(startRange,
735 								rangeEnd, alignment);
736 						} else {
737 							alignedBase = ROUNDDOWN(
738 								next->Base() + next->Size() - size, alignment);
739 						}
740 
741 						foundSpot = true;
742 						next->SetSize(alignedBase - next->Base());
743 						area->SetBase(alignedBase);
744 						break;
745 					}
746 
747 					last = next;
748 				}
749 			}
750 
751 			break;
752 		}
753 
754 		case B_EXACT_ADDRESS:
755 			// see if we can create it exactly here
756 			if ((last == NULL || last->Base() + (last->Size() - 1) < start)
757 				&& (next == NULL || next->Base() > start + (size - 1))) {
758 				foundSpot = true;
759 				area->SetBase(start);
760 				break;
761 			}
762 			break;
763 		default:
764 			return B_BAD_VALUE;
765 	}
766 
767 	if (!foundSpot)
768 		return addressSpec == B_EXACT_ADDRESS ? B_BAD_VALUE : B_NO_MEMORY;
769 
770 	if (useHint)
771 		fNextInsertHint = area->Base() + size;
772 
773 	area->SetSize(size);
774 	fAreas.Insert(area);
775 	IncrementChangeCount();
776 	return B_OK;
777 }
778