xref: /haiku/src/system/kernel/vm/VMUserAddressSpace.cpp (revision 97901ec593ec4dd50ac115c1c35a6d72f6e489a5)
1 /*
2  * Copyright 2009-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 #include "VMUserAddressSpace.h"
12 
13 #include <stdlib.h>
14 
15 #include <KernelExport.h>
16 
17 #include <heap.h>
18 #include <thread.h>
19 #include <util/atomic.h>
20 #include <vm/vm.h>
21 #include <vm/VMArea.h>
22 
23 
24 //#define TRACE_VM
25 #ifdef TRACE_VM
26 #	define TRACE(x) dprintf x
27 #else
28 #	define TRACE(x) ;
29 #endif
30 
31 
32 /*!	Verifies that an area with the given aligned base and size fits into
33 	the spot defined by base and limit and checks for overflows.
34 */
35 static inline bool
36 is_valid_spot(addr_t base, addr_t alignedBase, addr_t size, addr_t limit)
37 {
38 	return (alignedBase >= base && alignedBase + (size - 1) > alignedBase
39 		&& alignedBase + (size - 1) <= limit);
40 }
41 
42 
43 VMUserAddressSpace::VMUserAddressSpace(team_id id, addr_t base, size_t size)
44 	:
45 	VMAddressSpace(id, base, size, "address space"),
46 	fAreaHint(NULL)
47 {
48 }
49 
50 
51 VMUserAddressSpace::~VMUserAddressSpace()
52 {
53 }
54 
55 
56 inline VMArea*
57 VMUserAddressSpace::FirstArea() const
58 {
59 	VMUserArea* area = fAreas.Head();
60 	while (area != NULL && area->id == RESERVED_AREA_ID)
61 		area = fAreas.GetNext(area);
62 	return area;
63 }
64 
65 
66 inline VMArea*
67 VMUserAddressSpace::NextArea(VMArea* _area) const
68 {
69 	VMUserArea* area = static_cast<VMUserArea*>(_area);
70 	area = fAreas.GetNext(area);
71 	while (area != NULL && area->id == RESERVED_AREA_ID)
72 		area = fAreas.GetNext(area);
73 	return area;
74 }
75 
76 
77 VMArea*
78 VMUserAddressSpace::CreateArea(const char* name, uint32 wiring,
79 	uint32 protection, uint32 allocationFlags)
80 {
81 	return VMUserArea::Create(this, name, wiring, protection, allocationFlags);
82 }
83 
84 
85 void
86 VMUserAddressSpace::DeleteArea(VMArea* _area, uint32 allocationFlags)
87 {
88 	VMUserArea* area = static_cast<VMUserArea*>(_area);
89 	area->~VMUserArea();
90 	free_etc(area, allocationFlags);
91 }
92 
93 
94 //! You must hold the address space's read lock.
95 VMArea*
96 VMUserAddressSpace::LookupArea(addr_t address) const
97 {
98 	// check the area hint first
99 	VMArea* areaHint = atomic_pointer_get(&fAreaHint);
100 	if (areaHint != NULL && areaHint->ContainsAddress(address))
101 		return areaHint;
102 
103 	for (VMUserAreaList::ConstIterator it = fAreas.GetIterator();
104 			VMUserArea* area = it.Next();) {
105 		if (area->id == RESERVED_AREA_ID)
106 			continue;
107 
108 		if (area->ContainsAddress(address)) {
109 			atomic_pointer_set(&fAreaHint, area);
110 			return area;
111 		}
112 	}
113 
114 	return NULL;
115 }
116 
117 
118 /*!	This inserts the area you pass into the address space.
119 	It will also set the "_address" argument to its base address when
120 	the call succeeds.
121 	You need to hold the VMAddressSpace write lock.
122 */
123 status_t
124 VMUserAddressSpace::InsertArea(void** _address, uint32 addressSpec,
125 	size_t size, VMArea* _area, uint32 allocationFlags)
126 {
127 	VMUserArea* area = static_cast<VMUserArea*>(_area);
128 
129 	addr_t searchBase, searchEnd;
130 	status_t status;
131 
132 	switch (addressSpec) {
133 		case B_EXACT_ADDRESS:
134 			searchBase = (addr_t)*_address;
135 			searchEnd = (addr_t)*_address + (size - 1);
136 			break;
137 
138 		case B_BASE_ADDRESS:
139 			searchBase = (addr_t)*_address;
140 			searchEnd = fEndAddress;
141 			break;
142 
143 		case B_ANY_ADDRESS:
144 		case B_ANY_KERNEL_ADDRESS:
145 		case B_ANY_KERNEL_BLOCK_ADDRESS:
146 			searchBase = fBase;
147 			// TODO: remove this again when vm86 mode is moved into the kernel
148 			// completely (currently needs a userland address space!)
149 			if (searchBase == USER_BASE)
150 				searchBase = USER_BASE_ANY;
151 			searchEnd = fEndAddress;
152 			break;
153 
154 		default:
155 			return B_BAD_VALUE;
156 	}
157 
158 	status = _InsertAreaSlot(searchBase, size, searchEnd, addressSpec, area,
159 		allocationFlags);
160 	if (status == B_OK) {
161 		*_address = (void*)area->Base();
162 		fFreeSpace -= area->Size();
163 	}
164 
165 	return status;
166 }
167 
168 
169 //! You must hold the address space's write lock.
170 void
171 VMUserAddressSpace::RemoveArea(VMArea* _area, uint32 allocationFlags)
172 {
173 	VMUserArea* area = static_cast<VMUserArea*>(_area);
174 
175 	fAreas.Remove(area);
176 
177 	if (area->id != RESERVED_AREA_ID) {
178 		IncrementChangeCount();
179 		fFreeSpace += area->Size();
180 
181 		if (area == fAreaHint)
182 			fAreaHint = NULL;
183 	}
184 }
185 
186 
187 bool
188 VMUserAddressSpace::CanResizeArea(VMArea* area, size_t newSize)
189 {
190 	VMUserArea* next = fAreas.GetNext(static_cast<VMUserArea*>(area));
191 	addr_t newEnd = area->Base() + (newSize - 1);
192 	if (next == NULL) {
193 		if (fEndAddress >= newEnd)
194 			return true;
195 	} else {
196 		if (next->Base() > newEnd)
197 			return true;
198 	}
199 
200 	// If the area was created inside a reserved area, it can
201 	// also be resized in that area
202 	// TODO: if there is free space after the reserved area, it could
203 	// be used as well...
204 	if (next->id == RESERVED_AREA_ID && next->cache_offset <= area->Base()
205 		&& next->Base() + (next->Size() - 1) >= newEnd) {
206 		return true;
207 	}
208 
209 	return false;
210 }
211 
212 
213 status_t
214 VMUserAddressSpace::ResizeArea(VMArea* _area, size_t newSize,
215 	uint32 allocationFlags)
216 {
217 	VMUserArea* area = static_cast<VMUserArea*>(_area);
218 
219 	addr_t newEnd = area->Base() + (newSize - 1);
220 	VMUserArea* next = fAreas.GetNext(area);
221 	if (next != NULL && next->Base() <= newEnd) {
222 		if (next->id != RESERVED_AREA_ID
223 			|| next->cache_offset > area->Base()
224 			|| next->Base() + (next->Size() - 1) < newEnd) {
225 			panic("resize situation for area %p has changed although we "
226 				"should have the address space lock", area);
227 			return B_ERROR;
228 		}
229 
230 		// resize reserved area
231 		addr_t offset = area->Base() + newSize - next->Base();
232 		if (next->Size() <= offset) {
233 			RemoveArea(next, allocationFlags);
234 			next->~VMUserArea();
235 			free_etc(next, allocationFlags);
236 		} else {
237 			status_t error = ShrinkAreaHead(next, next->Size() - offset,
238 				allocationFlags);
239 			if (error != B_OK)
240 				return error;
241 		}
242 	}
243 
244 	area->SetSize(newSize);
245 	return B_OK;
246 }
247 
248 
249 status_t
250 VMUserAddressSpace::ShrinkAreaHead(VMArea* area, size_t size,
251 	uint32 allocationFlags)
252 {
253 	size_t oldSize = area->Size();
254 	if (size == oldSize)
255 		return B_OK;
256 
257 	area->SetBase(area->Base() + oldSize - size);
258 	area->SetSize(size);
259 
260 	return B_OK;
261 }
262 
263 
264 status_t
265 VMUserAddressSpace::ShrinkAreaTail(VMArea* area, size_t size,
266 	uint32 allocationFlags)
267 {
268 	size_t oldSize = area->Size();
269 	if (size == oldSize)
270 		return B_OK;
271 
272 	area->SetSize(size);
273 
274 	return B_OK;
275 }
276 
277 
278 status_t
279 VMUserAddressSpace::ReserveAddressRange(void** _address, uint32 addressSpec,
280 	size_t size, uint32 flags, uint32 allocationFlags)
281 {
282 	// check to see if this address space has entered DELETE state
283 	if (fDeleting) {
284 		// okay, someone is trying to delete this address space now, so we
285 		// can't insert the area, let's back out
286 		return B_BAD_TEAM_ID;
287 	}
288 
289 	VMUserArea* area = VMUserArea::CreateReserved(this, flags, allocationFlags);
290 	if (area == NULL)
291 		return B_NO_MEMORY;
292 
293 	status_t status = InsertArea(_address, addressSpec, size, area,
294 		allocationFlags);
295 	if (status != B_OK) {
296 		area->~VMUserArea();
297 		free_etc(area, allocationFlags);
298 		return status;
299 	}
300 
301 	area->cache_offset = area->Base();
302 		// we cache the original base address here
303 
304 	Get();
305 	return B_OK;
306 }
307 
308 
309 status_t
310 VMUserAddressSpace::UnreserveAddressRange(addr_t address, size_t size,
311 	uint32 allocationFlags)
312 {
313 	// check to see if this address space has entered DELETE state
314 	if (fDeleting) {
315 		// okay, someone is trying to delete this address space now, so we can't
316 		// insert the area, so back out
317 		return B_BAD_TEAM_ID;
318 	}
319 
320 	// search area list and remove any matching reserved ranges
321 	addr_t endAddress = address + (size - 1);
322 	for (VMUserAreaList::Iterator it = fAreas.GetIterator();
323 			VMUserArea* area = it.Next();) {
324 		// the area must be completely part of the reserved range
325 		if (area->Base() + (area->Size() - 1) > endAddress)
326 			break;
327 		if (area->id == RESERVED_AREA_ID && area->Base() >= (addr_t)address) {
328 			// remove reserved range
329 			RemoveArea(area, allocationFlags);
330 			Put();
331 			area->~VMUserArea();
332 			free_etc(area, allocationFlags);
333 		}
334 	}
335 
336 	return B_OK;
337 }
338 
339 
340 void
341 VMUserAddressSpace::UnreserveAllAddressRanges(uint32 allocationFlags)
342 {
343 	for (VMUserAreaList::Iterator it = fAreas.GetIterator();
344 			VMUserArea* area = it.Next();) {
345 		if (area->id == RESERVED_AREA_ID) {
346 			RemoveArea(area, allocationFlags);
347 			Put();
348 			area->~VMUserArea();
349 			free_etc(area, allocationFlags);
350 		}
351 	}
352 }
353 
354 
355 void
356 VMUserAddressSpace::Dump() const
357 {
358 	VMAddressSpace::Dump();
359 	kprintf("area_hint: %p\n", fAreaHint);
360 
361 	kprintf("area_list:\n");
362 
363 	for (VMUserAreaList::ConstIterator it = fAreas.GetIterator();
364 			VMUserArea* area = it.Next();) {
365 		kprintf(" area 0x%lx: ", area->id);
366 		kprintf("base_addr = 0x%lx ", area->Base());
367 		kprintf("size = 0x%lx ", area->Size());
368 		kprintf("name = '%s' ", area->name);
369 		kprintf("protection = 0x%lx\n", area->protection);
370 	}
371 }
372 
373 
374 /*!	Finds a reserved area that covers the region spanned by \a start and
375 	\a size, inserts the \a area into that region and makes sure that
376 	there are reserved regions for the remaining parts.
377 */
378 status_t
379 VMUserAddressSpace::_InsertAreaIntoReservedRegion(addr_t start, size_t size,
380 	VMUserArea* area, uint32 allocationFlags)
381 {
382 	VMUserArea* next;
383 
384 	for (VMUserAreaList::Iterator it = fAreas.GetIterator();
385 			(next = it.Next()) != NULL;) {
386 		if (next->Base() <= start
387 			&& next->Base() + (next->Size() - 1) >= start + (size - 1)) {
388 			// This area covers the requested range
389 			if (next->id != RESERVED_AREA_ID) {
390 				// but it's not reserved space, it's a real area
391 				return B_BAD_VALUE;
392 			}
393 
394 			break;
395 		}
396 	}
397 
398 	if (next == NULL)
399 		return B_ENTRY_NOT_FOUND;
400 
401 	// Now we have to transfer the requested part of the reserved
402 	// range to the new area - and remove, resize or split the old
403 	// reserved area.
404 
405 	if (start == next->Base()) {
406 		// the area starts at the beginning of the reserved range
407 		fAreas.Insert(next, area);
408 
409 		if (size == next->Size()) {
410 			// the new area fully covers the reversed range
411 			fAreas.Remove(next);
412 			Put();
413 			next->~VMUserArea();
414 			free_etc(next, allocationFlags);
415 		} else {
416 			// resize the reserved range behind the area
417 			next->SetBase(next->Base() + size);
418 			next->SetSize(next->Size() - size);
419 		}
420 	} else if (start + size == next->Base() + next->Size()) {
421 		// the area is at the end of the reserved range
422 		fAreas.Insert(fAreas.GetNext(next), area);
423 
424 		// resize the reserved range before the area
425 		next->SetSize(start - next->Base());
426 	} else {
427 		// the area splits the reserved range into two separate ones
428 		// we need a new reserved area to cover this space
429 		VMUserArea* reserved = VMUserArea::CreateReserved(this,
430 			next->protection, allocationFlags);
431 		if (reserved == NULL)
432 			return B_NO_MEMORY;
433 
434 		Get();
435 		fAreas.Insert(fAreas.GetNext(next), reserved);
436 		fAreas.Insert(reserved, area);
437 
438 		// resize regions
439 		reserved->SetSize(next->Base() + next->Size() - start - size);
440 		next->SetSize(start - next->Base());
441 		reserved->SetBase(start + size);
442 		reserved->cache_offset = next->cache_offset;
443 	}
444 
445 	area->SetBase(start);
446 	area->SetSize(size);
447 	IncrementChangeCount();
448 
449 	return B_OK;
450 }
451 
452 
453 /*!	Must be called with this address space's write lock held */
454 status_t
455 VMUserAddressSpace::_InsertAreaSlot(addr_t start, addr_t size, addr_t end,
456 	uint32 addressSpec, VMUserArea* area, uint32 allocationFlags)
457 {
458 	VMUserArea* last = NULL;
459 	VMUserArea* next;
460 	bool foundSpot = false;
461 
462 	TRACE(("VMUserAddressSpace::_InsertAreaSlot: address space %p, start "
463 		"0x%lx, size %ld, end 0x%lx, addressSpec %ld, area %p\n", this, start,
464 		size, end, addressSpec, area));
465 
466 	// do some sanity checking
467 	if (start < fBase || size == 0 || end > fEndAddress
468 		|| start + (size - 1) > end)
469 		return B_BAD_ADDRESS;
470 
471 	if (addressSpec == B_EXACT_ADDRESS && area->id != RESERVED_AREA_ID) {
472 		// search for a reserved area
473 		status_t status = _InsertAreaIntoReservedRegion(start, size, area,
474 			allocationFlags);
475 		if (status == B_OK || status == B_BAD_VALUE)
476 			return status;
477 
478 		// There was no reserved area, and the slot doesn't seem to be used
479 		// already
480 		// TODO: this could be further optimized.
481 	}
482 
483 	size_t alignment = B_PAGE_SIZE;
484 	if (addressSpec == B_ANY_KERNEL_BLOCK_ADDRESS) {
485 		// align the memory to the next power of two of the size
486 		while (alignment < size)
487 			alignment <<= 1;
488 	}
489 
490 	start = ROUNDUP(start, alignment);
491 
492 	// walk up to the spot where we should start searching
493 second_chance:
494 	VMUserAreaList::Iterator it = fAreas.GetIterator();
495 	while ((next = it.Next()) != NULL) {
496 		if (next->Base() > start + (size - 1)) {
497 			// we have a winner
498 			break;
499 		}
500 
501 		last = next;
502 	}
503 
504 	// find the right spot depending on the address specification - the area
505 	// will be inserted directly after "last" ("next" is not referenced anymore)
506 
507 	switch (addressSpec) {
508 		case B_ANY_ADDRESS:
509 		case B_ANY_KERNEL_ADDRESS:
510 		case B_ANY_KERNEL_BLOCK_ADDRESS:
511 		{
512 			// find a hole big enough for a new area
513 			if (last == NULL) {
514 				// see if we can build it at the beginning of the virtual map
515 				addr_t alignedBase = ROUNDUP(fBase, alignment);
516 				if (is_valid_spot(fBase, alignedBase, size,
517 						next == NULL ? end : next->Base())) {
518 					foundSpot = true;
519 					area->SetBase(alignedBase);
520 					break;
521 				}
522 
523 				last = next;
524 				next = it.Next();
525 			}
526 
527 			// keep walking
528 			while (next != NULL) {
529 				addr_t alignedBase = ROUNDUP(last->Base() + last->Size(),
530 					alignment);
531 				if (is_valid_spot(last->Base() + (last->Size() - 1),
532 						alignedBase, size, next->Base())) {
533 					foundSpot = true;
534 					area->SetBase(alignedBase);
535 					break;
536 				}
537 
538 				last = next;
539 				next = it.Next();
540 			}
541 
542 			if (foundSpot)
543 				break;
544 
545 			addr_t alignedBase = ROUNDUP(last->Base() + last->Size(),
546 				alignment);
547 			if (is_valid_spot(last->Base() + (last->Size() - 1), alignedBase,
548 					size, end)) {
549 				// got a spot
550 				foundSpot = true;
551 				area->SetBase(alignedBase);
552 				break;
553 			} else if (area->id != RESERVED_AREA_ID) {
554 				// We didn't find a free spot - if there are any reserved areas,
555 				// we can now test those for free space
556 				// TODO: it would make sense to start with the biggest of them
557 				it.Rewind();
558 				next = it.Next();
559 				for (last = NULL; next != NULL; next = it.Next()) {
560 					if (next->id != RESERVED_AREA_ID) {
561 						last = next;
562 						continue;
563 					}
564 
565 					// TODO: take free space after the reserved area into
566 					// account!
567 					addr_t alignedBase = ROUNDUP(next->Base(), alignment);
568 					if (next->Base() == alignedBase && next->Size() == size) {
569 						// The reserved area is entirely covered, and thus,
570 						// removed
571 						fAreas.Remove(next);
572 
573 						foundSpot = true;
574 						area->SetBase(alignedBase);
575 						next->~VMUserArea();
576 						free_etc(next, allocationFlags);
577 						break;
578 					}
579 
580 					if ((next->protection & RESERVED_AVOID_BASE) == 0
581 						&&  alignedBase == next->Base()
582 						&& next->Size() >= size) {
583 						// The new area will be placed at the beginning of the
584 						// reserved area and the reserved area will be offset
585 						// and resized
586 						foundSpot = true;
587 						next->SetBase(next->Base() + size);
588 						next->SetSize(next->Size() - size);
589 						area->SetBase(alignedBase);
590 						break;
591 					}
592 
593 					if (is_valid_spot(next->Base(), alignedBase, size,
594 							next->Base() + (next->Size() - 1))) {
595 						// The new area will be placed at the end of the
596 						// reserved area, and the reserved area will be resized
597 						// to make space
598 						alignedBase = ROUNDDOWN(
599 							next->Base() + next->Size() - size, alignment);
600 
601 						foundSpot = true;
602 						next->SetSize(alignedBase - next->Base());
603 						area->SetBase(alignedBase);
604 						last = next;
605 						break;
606 					}
607 
608 					last = next;
609 				}
610 			}
611 			break;
612 		}
613 
614 		case B_BASE_ADDRESS:
615 		{
616 			// find a hole big enough for a new area beginning with "start"
617 			if (last == NULL) {
618 				// see if we can build it at the beginning of the specified
619 				// start
620 				if (next == NULL || next->Base() > start + (size - 1)) {
621 					foundSpot = true;
622 					area->SetBase(start);
623 					break;
624 				}
625 
626 				last = next;
627 				next = it.Next();
628 			}
629 
630 			// keep walking
631 			while (next != NULL) {
632 				if (next->Base() - (last->Base() + last->Size()) >= size) {
633 					// we found a spot (it'll be filled up below)
634 					break;
635 				}
636 
637 				last = next;
638 				next = it.Next();
639 			}
640 
641 			addr_t lastEnd = last->Base() + (last->Size() - 1);
642 			if (next != NULL || end - lastEnd >= size) {
643 				// got a spot
644 				foundSpot = true;
645 				if (lastEnd < start)
646 					area->SetBase(start);
647 				else
648 					area->SetBase(lastEnd + 1);
649 				break;
650 			}
651 
652 			// we didn't find a free spot in the requested range, so we'll
653 			// try again without any restrictions
654 			start = fBase;
655 			addressSpec = B_ANY_ADDRESS;
656 			last = NULL;
657 			goto second_chance;
658 		}
659 
660 		case B_EXACT_ADDRESS:
661 			// see if we can create it exactly here
662 			if ((last == NULL || last->Base() + (last->Size() - 1) < start)
663 				&& (next == NULL || next->Base() > start + (size - 1))) {
664 				foundSpot = true;
665 				area->SetBase(start);
666 				break;
667 			}
668 			break;
669 		default:
670 			return B_BAD_VALUE;
671 	}
672 
673 	if (!foundSpot)
674 		return addressSpec == B_EXACT_ADDRESS ? B_BAD_VALUE : B_NO_MEMORY;
675 
676 	area->SetSize(size);
677 	if (last)
678 		fAreas.Insert(fAreas.GetNext(last), area);
679 	else
680 		fAreas.Insert(fAreas.Head(), area);
681 
682 	IncrementChangeCount();
683 	return B_OK;
684 }
685