xref: /haiku/src/system/kernel/vm/VMUserAddressSpace.cpp (revision e0ef64750f3169cd634bb2f7a001e22488b05231)
1 /*
2  * Copyright 2009-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 #include "VMUserAddressSpace.h"
12 
13 #include <stdlib.h>
14 
15 #include <KernelExport.h>
16 
17 #include <heap.h>
18 #include <thread.h>
19 #include <util/atomic.h>
20 #include <vm/vm.h>
21 #include <vm/VMArea.h>
22 
23 
24 //#define TRACE_VM
25 #ifdef TRACE_VM
26 #	define TRACE(x) dprintf x
27 #else
28 #	define TRACE(x) ;
29 #endif
30 
31 
32 /*!	Verifies that an area with the given aligned base and size fits into
33 	the spot defined by base and limit and checks for overflows.
34 */
35 static inline bool
36 is_valid_spot(addr_t base, addr_t alignedBase, addr_t size, addr_t limit)
37 {
38 	return (alignedBase >= base && alignedBase + (size - 1) > alignedBase
39 		&& alignedBase + (size - 1) <= limit);
40 }
41 
42 
43 VMUserAddressSpace::VMUserAddressSpace(team_id id, addr_t base, size_t size)
44 	:
45 	VMAddressSpace(id, base, size, "address space"),
46 	fAreaHint(NULL)
47 {
48 }
49 
50 
51 VMUserAddressSpace::~VMUserAddressSpace()
52 {
53 }
54 
55 
56 inline VMArea*
57 VMUserAddressSpace::FirstArea() const
58 {
59 	VMUserArea* area = fAreas.Head();
60 	while (area != NULL && area->id == RESERVED_AREA_ID)
61 		area = fAreas.GetNext(area);
62 	return area;
63 }
64 
65 
66 inline VMArea*
67 VMUserAddressSpace::NextArea(VMArea* _area) const
68 {
69 	VMUserArea* area = static_cast<VMUserArea*>(_area);
70 	area = fAreas.GetNext(area);
71 	while (area != NULL && area->id == RESERVED_AREA_ID)
72 		area = fAreas.GetNext(area);
73 	return area;
74 }
75 
76 
77 VMArea*
78 VMUserAddressSpace::CreateArea(const char* name, uint32 wiring,
79 	uint32 protection, uint32 allocationFlags)
80 {
81 	return VMUserArea::Create(this, name, wiring, protection, allocationFlags);
82 }
83 
84 
85 void
86 VMUserAddressSpace::DeleteArea(VMArea* _area, uint32 allocationFlags)
87 {
88 	VMUserArea* area = static_cast<VMUserArea*>(_area);
89 	area->~VMUserArea();
90 	free_etc(area, allocationFlags);
91 }
92 
93 
94 //! You must hold the address space's read lock.
95 VMArea*
96 VMUserAddressSpace::LookupArea(addr_t address) const
97 {
98 	// check the area hint first
99 	VMArea* areaHint = atomic_pointer_get(&fAreaHint);
100 	if (areaHint != NULL && areaHint->ContainsAddress(address))
101 		return areaHint;
102 
103 	for (VMUserAreaList::ConstIterator it = fAreas.GetIterator();
104 			VMUserArea* area = it.Next();) {
105 		if (area->id == RESERVED_AREA_ID)
106 			continue;
107 
108 		if (area->ContainsAddress(address)) {
109 			atomic_pointer_set(&fAreaHint, area);
110 			return area;
111 		}
112 	}
113 
114 	return NULL;
115 }
116 
117 
118 /*!	This inserts the area you pass into the address space.
119 	It will also set the "_address" argument to its base address when
120 	the call succeeds.
121 	You need to hold the VMAddressSpace write lock.
122 */
123 status_t
124 VMUserAddressSpace::InsertArea(VMArea* _area, size_t size,
125 	const virtual_address_restrictions* addressRestrictions,
126 	uint32 allocationFlags, void** _address)
127 {
128 	VMUserArea* area = static_cast<VMUserArea*>(_area);
129 
130 	addr_t searchBase, searchEnd;
131 	status_t status;
132 
133 	switch (addressRestrictions->address_specification) {
134 		case B_EXACT_ADDRESS:
135 			searchBase = (addr_t)addressRestrictions->address;
136 			searchEnd = (addr_t)addressRestrictions->address + (size - 1);
137 			break;
138 
139 		case B_BASE_ADDRESS:
140 			searchBase = (addr_t)addressRestrictions->address;
141 			searchEnd = fEndAddress;
142 			break;
143 
144 		case B_ANY_ADDRESS:
145 		case B_ANY_KERNEL_ADDRESS:
146 		case B_ANY_KERNEL_BLOCK_ADDRESS:
147 			searchBase = fBase;
148 			// TODO: remove this again when vm86 mode is moved into the kernel
149 			// completely (currently needs a userland address space!)
150 			if (searchBase == USER_BASE)
151 				searchBase = USER_BASE_ANY;
152 			searchEnd = fEndAddress;
153 			break;
154 
155 		default:
156 			return B_BAD_VALUE;
157 	}
158 
159 	status = _InsertAreaSlot(searchBase, size, searchEnd,
160 		addressRestrictions->address_specification,
161 		addressRestrictions->alignment, area, allocationFlags);
162 	if (status == B_OK) {
163 		if (_address != NULL)
164 			*_address = (void*)area->Base();
165 		fFreeSpace -= area->Size();
166 	}
167 
168 	return status;
169 }
170 
171 
172 //! You must hold the address space's write lock.
173 void
174 VMUserAddressSpace::RemoveArea(VMArea* _area, uint32 allocationFlags)
175 {
176 	VMUserArea* area = static_cast<VMUserArea*>(_area);
177 
178 	fAreas.Remove(area);
179 
180 	if (area->id != RESERVED_AREA_ID) {
181 		IncrementChangeCount();
182 		fFreeSpace += area->Size();
183 
184 		if (area == fAreaHint)
185 			fAreaHint = NULL;
186 	}
187 }
188 
189 
190 bool
191 VMUserAddressSpace::CanResizeArea(VMArea* area, size_t newSize)
192 {
193 	VMUserArea* next = fAreas.GetNext(static_cast<VMUserArea*>(area));
194 	addr_t newEnd = area->Base() + (newSize - 1);
195 
196 	if (next == NULL)
197 		return fEndAddress >= newEnd;
198 
199 	if (next->Base() > newEnd)
200 		return true;
201 
202 	// If the area was created inside a reserved area, it can
203 	// also be resized in that area
204 	// TODO: if there is free space after the reserved area, it could
205 	// be used as well...
206 	return next->id == RESERVED_AREA_ID && next->cache_offset <= area->Base()
207 		&& next->Base() + (next->Size() - 1) >= newEnd;
208 }
209 
210 
211 status_t
212 VMUserAddressSpace::ResizeArea(VMArea* _area, size_t newSize,
213 	uint32 allocationFlags)
214 {
215 	VMUserArea* area = static_cast<VMUserArea*>(_area);
216 
217 	addr_t newEnd = area->Base() + (newSize - 1);
218 	VMUserArea* next = fAreas.GetNext(area);
219 	if (next != NULL && next->Base() <= newEnd) {
220 		if (next->id != RESERVED_AREA_ID
221 			|| next->cache_offset > area->Base()
222 			|| next->Base() + (next->Size() - 1) < newEnd) {
223 			panic("resize situation for area %p has changed although we "
224 				"should have the address space lock", area);
225 			return B_ERROR;
226 		}
227 
228 		// resize reserved area
229 		addr_t offset = area->Base() + newSize - next->Base();
230 		if (next->Size() <= offset) {
231 			RemoveArea(next, allocationFlags);
232 			next->~VMUserArea();
233 			free_etc(next, allocationFlags);
234 		} else {
235 			status_t error = ShrinkAreaHead(next, next->Size() - offset,
236 				allocationFlags);
237 			if (error != B_OK)
238 				return error;
239 		}
240 	}
241 
242 	area->SetSize(newSize);
243 	return B_OK;
244 }
245 
246 
247 status_t
248 VMUserAddressSpace::ShrinkAreaHead(VMArea* area, size_t size,
249 	uint32 allocationFlags)
250 {
251 	size_t oldSize = area->Size();
252 	if (size == oldSize)
253 		return B_OK;
254 
255 	area->SetBase(area->Base() + oldSize - size);
256 	area->SetSize(size);
257 
258 	return B_OK;
259 }
260 
261 
262 status_t
263 VMUserAddressSpace::ShrinkAreaTail(VMArea* area, size_t size,
264 	uint32 allocationFlags)
265 {
266 	size_t oldSize = area->Size();
267 	if (size == oldSize)
268 		return B_OK;
269 
270 	area->SetSize(size);
271 
272 	return B_OK;
273 }
274 
275 
276 status_t
277 VMUserAddressSpace::ReserveAddressRange(size_t size,
278 	const virtual_address_restrictions* addressRestrictions,
279 	uint32 flags, uint32 allocationFlags, void** _address)
280 {
281 	// check to see if this address space has entered DELETE state
282 	if (fDeleting) {
283 		// okay, someone is trying to delete this address space now, so we
284 		// can't insert the area, let's back out
285 		return B_BAD_TEAM_ID;
286 	}
287 
288 	VMUserArea* area = VMUserArea::CreateReserved(this, flags, allocationFlags);
289 	if (area == NULL)
290 		return B_NO_MEMORY;
291 
292 	status_t status = InsertArea(area, size, addressRestrictions,
293 		allocationFlags, _address);
294 	if (status != B_OK) {
295 		area->~VMUserArea();
296 		free_etc(area, allocationFlags);
297 		return status;
298 	}
299 
300 	area->cache_offset = area->Base();
301 		// we cache the original base address here
302 
303 	Get();
304 	return B_OK;
305 }
306 
307 
308 status_t
309 VMUserAddressSpace::UnreserveAddressRange(addr_t address, size_t size,
310 	uint32 allocationFlags)
311 {
312 	// check to see if this address space has entered DELETE state
313 	if (fDeleting) {
314 		// okay, someone is trying to delete this address space now, so we can't
315 		// insert the area, so back out
316 		return B_BAD_TEAM_ID;
317 	}
318 
319 	// search area list and remove any matching reserved ranges
320 	addr_t endAddress = address + (size - 1);
321 	for (VMUserAreaList::Iterator it = fAreas.GetIterator();
322 			VMUserArea* area = it.Next();) {
323 		// the area must be completely part of the reserved range
324 		if (area->Base() + (area->Size() - 1) > endAddress)
325 			break;
326 		if (area->id == RESERVED_AREA_ID && area->Base() >= (addr_t)address) {
327 			// remove reserved range
328 			RemoveArea(area, allocationFlags);
329 			Put();
330 			area->~VMUserArea();
331 			free_etc(area, allocationFlags);
332 		}
333 	}
334 
335 	return B_OK;
336 }
337 
338 
339 void
340 VMUserAddressSpace::UnreserveAllAddressRanges(uint32 allocationFlags)
341 {
342 	for (VMUserAreaList::Iterator it = fAreas.GetIterator();
343 			VMUserArea* area = it.Next();) {
344 		if (area->id == RESERVED_AREA_ID) {
345 			RemoveArea(area, allocationFlags);
346 			Put();
347 			area->~VMUserArea();
348 			free_etc(area, allocationFlags);
349 		}
350 	}
351 }
352 
353 
354 void
355 VMUserAddressSpace::Dump() const
356 {
357 	VMAddressSpace::Dump();
358 	kprintf("area_hint: %p\n", fAreaHint);
359 
360 	kprintf("area_list:\n");
361 
362 	for (VMUserAreaList::ConstIterator it = fAreas.GetIterator();
363 			VMUserArea* area = it.Next();) {
364 		kprintf(" area 0x%lx: ", area->id);
365 		kprintf("base_addr = 0x%lx ", area->Base());
366 		kprintf("size = 0x%lx ", area->Size());
367 		kprintf("name = '%s' ", area->name);
368 		kprintf("protection = 0x%lx\n", area->protection);
369 	}
370 }
371 
372 
373 /*!	Finds a reserved area that covers the region spanned by \a start and
374 	\a size, inserts the \a area into that region and makes sure that
375 	there are reserved regions for the remaining parts.
376 */
377 status_t
378 VMUserAddressSpace::_InsertAreaIntoReservedRegion(addr_t start, size_t size,
379 	VMUserArea* area, uint32 allocationFlags)
380 {
381 	VMUserArea* next;
382 
383 	for (VMUserAreaList::Iterator it = fAreas.GetIterator();
384 			(next = it.Next()) != NULL;) {
385 		if (next->Base() <= start
386 			&& next->Base() + (next->Size() - 1) >= start + (size - 1)) {
387 			// This area covers the requested range
388 			if (next->id != RESERVED_AREA_ID) {
389 				// but it's not reserved space, it's a real area
390 				return B_BAD_VALUE;
391 			}
392 
393 			break;
394 		}
395 	}
396 
397 	if (next == NULL)
398 		return B_ENTRY_NOT_FOUND;
399 
400 	// Now we have to transfer the requested part of the reserved
401 	// range to the new area - and remove, resize or split the old
402 	// reserved area.
403 
404 	if (start == next->Base()) {
405 		// the area starts at the beginning of the reserved range
406 		fAreas.Insert(next, area);
407 
408 		if (size == next->Size()) {
409 			// the new area fully covers the reversed range
410 			fAreas.Remove(next);
411 			Put();
412 			next->~VMUserArea();
413 			free_etc(next, allocationFlags);
414 		} else {
415 			// resize the reserved range behind the area
416 			next->SetBase(next->Base() + size);
417 			next->SetSize(next->Size() - size);
418 		}
419 	} else if (start + size == next->Base() + next->Size()) {
420 		// the area is at the end of the reserved range
421 		fAreas.Insert(fAreas.GetNext(next), area);
422 
423 		// resize the reserved range before the area
424 		next->SetSize(start - next->Base());
425 	} else {
426 		// the area splits the reserved range into two separate ones
427 		// we need a new reserved area to cover this space
428 		VMUserArea* reserved = VMUserArea::CreateReserved(this,
429 			next->protection, allocationFlags);
430 		if (reserved == NULL)
431 			return B_NO_MEMORY;
432 
433 		Get();
434 		fAreas.Insert(fAreas.GetNext(next), reserved);
435 		fAreas.Insert(reserved, area);
436 
437 		// resize regions
438 		reserved->SetSize(next->Base() + next->Size() - start - size);
439 		next->SetSize(start - next->Base());
440 		reserved->SetBase(start + size);
441 		reserved->cache_offset = next->cache_offset;
442 	}
443 
444 	area->SetBase(start);
445 	area->SetSize(size);
446 	IncrementChangeCount();
447 
448 	return B_OK;
449 }
450 
451 
452 /*!	Must be called with this address space's write lock held */
453 status_t
454 VMUserAddressSpace::_InsertAreaSlot(addr_t start, addr_t size, addr_t end,
455 	uint32 addressSpec, size_t alignment, VMUserArea* area,
456 	uint32 allocationFlags)
457 {
458 	VMUserArea* last = NULL;
459 	VMUserArea* next;
460 	bool foundSpot = false;
461 
462 	TRACE(("VMUserAddressSpace::_InsertAreaSlot: address space %p, start "
463 		"0x%lx, size %ld, end 0x%lx, addressSpec %ld, area %p\n", this, start,
464 		size, end, addressSpec, area));
465 
466 	// do some sanity checking
467 	if (start < fBase || size == 0 || end > fEndAddress
468 		|| start + (size - 1) > end)
469 		return B_BAD_ADDRESS;
470 
471 	if (addressSpec == B_EXACT_ADDRESS && area->id != RESERVED_AREA_ID) {
472 		// search for a reserved area
473 		status_t status = _InsertAreaIntoReservedRegion(start, size, area,
474 			allocationFlags);
475 		if (status == B_OK || status == B_BAD_VALUE)
476 			return status;
477 
478 		// There was no reserved area, and the slot doesn't seem to be used
479 		// already
480 		// TODO: this could be further optimized.
481 	}
482 
483 	if (alignment == 0)
484 		alignment = B_PAGE_SIZE;
485 	if (addressSpec == B_ANY_KERNEL_BLOCK_ADDRESS) {
486 		// align the memory to the next power of two of the size
487 		while (alignment < size)
488 			alignment <<= 1;
489 	}
490 
491 	start = ROUNDUP(start, alignment);
492 
493 	// walk up to the spot where we should start searching
494 second_chance:
495 	VMUserAreaList::Iterator it = fAreas.GetIterator();
496 	while ((next = it.Next()) != NULL) {
497 		if (next->Base() > start + (size - 1)) {
498 			// we have a winner
499 			break;
500 		}
501 
502 		last = next;
503 	}
504 
505 	// find the right spot depending on the address specification - the area
506 	// will be inserted directly after "last" ("next" is not referenced anymore)
507 
508 	switch (addressSpec) {
509 		case B_ANY_ADDRESS:
510 		case B_ANY_KERNEL_ADDRESS:
511 		case B_ANY_KERNEL_BLOCK_ADDRESS:
512 		{
513 			// find a hole big enough for a new area
514 			if (last == NULL) {
515 				// see if we can build it at the beginning of the virtual map
516 				addr_t alignedBase = ROUNDUP(fBase, alignment);
517 				if (is_valid_spot(fBase, alignedBase, size,
518 						next == NULL ? end : next->Base())) {
519 					foundSpot = true;
520 					area->SetBase(alignedBase);
521 					break;
522 				}
523 
524 				last = next;
525 				next = it.Next();
526 			}
527 
528 			// keep walking
529 			while (next != NULL) {
530 				addr_t alignedBase = ROUNDUP(last->Base() + last->Size(),
531 					alignment);
532 				if (is_valid_spot(last->Base() + (last->Size() - 1),
533 						alignedBase, size, next->Base())) {
534 					foundSpot = true;
535 					area->SetBase(alignedBase);
536 					break;
537 				}
538 
539 				last = next;
540 				next = it.Next();
541 			}
542 
543 			if (foundSpot)
544 				break;
545 
546 			addr_t alignedBase = ROUNDUP(last->Base() + last->Size(),
547 				alignment);
548 			if (is_valid_spot(last->Base() + (last->Size() - 1), alignedBase,
549 					size, end)) {
550 				// got a spot
551 				foundSpot = true;
552 				area->SetBase(alignedBase);
553 				break;
554 			} else if (area->id != RESERVED_AREA_ID) {
555 				// We didn't find a free spot - if there are any reserved areas,
556 				// we can now test those for free space
557 				// TODO: it would make sense to start with the biggest of them
558 				it.Rewind();
559 				next = it.Next();
560 				for (last = NULL; next != NULL; next = it.Next()) {
561 					if (next->id != RESERVED_AREA_ID) {
562 						last = next;
563 						continue;
564 					}
565 
566 					// TODO: take free space after the reserved area into
567 					// account!
568 					addr_t alignedBase = ROUNDUP(next->Base(), alignment);
569 					if (next->Base() == alignedBase && next->Size() == size) {
570 						// The reserved area is entirely covered, and thus,
571 						// removed
572 						fAreas.Remove(next);
573 
574 						foundSpot = true;
575 						area->SetBase(alignedBase);
576 						next->~VMUserArea();
577 						free_etc(next, allocationFlags);
578 						break;
579 					}
580 
581 					if ((next->protection & RESERVED_AVOID_BASE) == 0
582 						&&  alignedBase == next->Base()
583 						&& next->Size() >= size) {
584 						// The new area will be placed at the beginning of the
585 						// reserved area and the reserved area will be offset
586 						// and resized
587 						foundSpot = true;
588 						next->SetBase(next->Base() + size);
589 						next->SetSize(next->Size() - size);
590 						area->SetBase(alignedBase);
591 						break;
592 					}
593 
594 					if (is_valid_spot(next->Base(), alignedBase, size,
595 							next->Base() + (next->Size() - 1))) {
596 						// The new area will be placed at the end of the
597 						// reserved area, and the reserved area will be resized
598 						// to make space
599 						alignedBase = ROUNDDOWN(
600 							next->Base() + next->Size() - size, alignment);
601 
602 						foundSpot = true;
603 						next->SetSize(alignedBase - next->Base());
604 						area->SetBase(alignedBase);
605 						last = next;
606 						break;
607 					}
608 
609 					last = next;
610 				}
611 			}
612 			break;
613 		}
614 
615 		case B_BASE_ADDRESS:
616 		{
617 			// find a hole big enough for a new area beginning with "start"
618 			if (last == NULL) {
619 				// see if we can build it at the beginning of the specified
620 				// start
621 				if (next == NULL || next->Base() > start + (size - 1)) {
622 					foundSpot = true;
623 					area->SetBase(start);
624 					break;
625 				}
626 
627 				last = next;
628 				next = it.Next();
629 			}
630 
631 			// keep walking
632 			while (next != NULL) {
633 				if (next->Base() - (last->Base() + last->Size()) >= size) {
634 					// we found a spot (it'll be filled up below)
635 					break;
636 				}
637 
638 				last = next;
639 				next = it.Next();
640 			}
641 
642 			addr_t lastEnd = last->Base() + (last->Size() - 1);
643 			if (next != NULL || end - lastEnd >= size) {
644 				// got a spot
645 				foundSpot = true;
646 				if (lastEnd < start)
647 					area->SetBase(start);
648 				else
649 					area->SetBase(lastEnd + 1);
650 				break;
651 			}
652 
653 			// we didn't find a free spot in the requested range, so we'll
654 			// try again without any restrictions
655 			start = fBase;
656 			addressSpec = B_ANY_ADDRESS;
657 			last = NULL;
658 			goto second_chance;
659 		}
660 
661 		case B_EXACT_ADDRESS:
662 			// see if we can create it exactly here
663 			if ((last == NULL || last->Base() + (last->Size() - 1) < start)
664 				&& (next == NULL || next->Base() > start + (size - 1))) {
665 				foundSpot = true;
666 				area->SetBase(start);
667 				break;
668 			}
669 			break;
670 		default:
671 			return B_BAD_VALUE;
672 	}
673 
674 	if (!foundSpot)
675 		return addressSpec == B_EXACT_ADDRESS ? B_BAD_VALUE : B_NO_MEMORY;
676 
677 	area->SetSize(size);
678 	if (last)
679 		fAreas.Insert(fAreas.GetNext(last), area);
680 	else
681 		fAreas.Insert(fAreas.Head(), area);
682 
683 	IncrementChangeCount();
684 	return B_OK;
685 }
686