xref: /haiku/src/system/kernel/arch/x86/arch_vm.cpp (revision f8da8f3477d3c18142e59d17d05a545982faa5a8)
1 /*
2  * Copyright 2009-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2008, Jérôme Duval.
4  * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de.
5  * Distributed under the terms of the MIT License.
6  *
7  * Copyright 2001, Travis Geiselbrecht. All rights reserved.
8  * Distributed under the terms of the NewOS License.
9  */
10 
11 
12 #include <stdlib.h>
13 #include <string.h>
14 
15 #include <algorithm>
16 #include <new>
17 
18 #include <KernelExport.h>
19 
20 #include <boot/kernel_args.h>
21 #include <smp.h>
22 #include <util/AutoLock.h>
23 #include <vm/vm.h>
24 #include <vm/vm_page.h>
25 #include <vm/vm_priv.h>
26 #include <vm/VMAddressSpace.h>
27 #include <vm/VMArea.h>
28 
29 #include <arch/vm.h>
30 #include <arch/int.h>
31 #include <arch/cpu.h>
32 
33 #include <arch/x86/bios.h>
34 
35 
36 //#define TRACE_ARCH_VM
37 #ifdef TRACE_ARCH_VM
38 #	define TRACE(x) dprintf x
39 #else
40 #	define TRACE(x) ;
41 #endif
42 
43 // 0: disabled, 1: some, 2: more
44 #define TRACE_MTRR_ARCH_VM 1
45 
46 #if TRACE_MTRR_ARCH_VM >= 1
47 #	define TRACE_MTRR(x...) dprintf(x)
48 #else
49 #	define TRACE_MTRR(x...)
50 #endif
51 
52 #if TRACE_MTRR_ARCH_VM >= 2
53 #	define TRACE_MTRR2(x...) dprintf(x)
54 #else
55 #	define TRACE_MTRR2(x...)
56 #endif
57 
58 
59 void *gDmaAddress;
60 
61 
62 struct memory_type_range : DoublyLinkedListLinkImpl<memory_type_range> {
63 	uint64						base;
64 	uint64						size;
65 	uint32						type;
66 	area_id						area;
67 };
68 
69 
70 struct memory_type_range_point
71 		: DoublyLinkedListLinkImpl<memory_type_range_point> {
72 	uint64				address;
73 	memory_type_range*	range;
74 
75 	bool IsStart() const	{ return range->base == address; }
76 
77 	bool operator<(const memory_type_range_point& other) const
78 	{
79 		return address < other.address;
80 	}
81 };
82 
83 
84 struct update_mtrr_info {
85 	uint64	ignoreUncacheableSize;
86 	uint64	shortestUncacheableSize;
87 };
88 
89 
90 typedef DoublyLinkedList<memory_type_range> MemoryTypeRangeList;
91 
92 static mutex sMemoryTypeLock = MUTEX_INITIALIZER("memory type ranges");
93 static MemoryTypeRangeList sMemoryTypeRanges;
94 static int32 sMemoryTypeRangeCount = 0;
95 
96 static const uint32 kMaxMemoryTypeRegisters	= 32;
97 static x86_mtrr_info sMemoryTypeRegisters[kMaxMemoryTypeRegisters];
98 static uint32 sMemoryTypeRegisterCount;
99 static uint32 sMemoryTypeRegistersUsed;
100 
101 static memory_type_range* sTemporaryRanges = NULL;
102 static memory_type_range_point* sTemporaryRangePoints = NULL;
103 static int32 sTemporaryRangeCount = 0;
104 static int32 sTemporaryRangePointCount = 0;
105 
106 
107 static void
108 set_mtrrs()
109 {
110 	x86_set_mtrrs(IA32_MTR_WRITE_BACK, sMemoryTypeRegisters,
111 		sMemoryTypeRegistersUsed);
112 
113 #if TRACE_MTRR_ARCH_VM
114 	TRACE_MTRR("set MTRRs to:\n");
115 	for (uint32 i = 0; i < sMemoryTypeRegistersUsed; i++) {
116 		const x86_mtrr_info& info = sMemoryTypeRegisters[i];
117 		TRACE_MTRR("  mtrr: %2" B_PRIu32 ": base: %#10" B_PRIx64  ", size: %#10"
118 			B_PRIx64 ", type: %u\n", i, info.base, info.size,
119 			info.type);
120 	}
121 #endif
122 }
123 
124 
125 static bool
126 add_used_mtrr(uint64 base, uint64 size, uint32 type)
127 {
128 	if (sMemoryTypeRegistersUsed == sMemoryTypeRegisterCount)
129 		return false;
130 
131 	x86_mtrr_info& mtrr = sMemoryTypeRegisters[sMemoryTypeRegistersUsed++];
132 	mtrr.base = base;
133 	mtrr.size = size;
134 	mtrr.type = type;
135 
136 	return true;
137 }
138 
139 
140 static bool
141 add_mtrrs_for_range(uint64 base, uint64 size, uint32 type)
142 {
143 	for (uint64 interval = B_PAGE_SIZE; size > 0; interval <<= 1) {
144 		if ((base & interval) != 0) {
145 			if (!add_used_mtrr(base, interval, type))
146 				return false;
147 			base += interval;
148 			size -= interval;
149 		}
150 
151 		if ((size & interval) != 0) {
152 			if (!add_used_mtrr(base + size - interval, interval, type))
153 				return false;
154 			size -= interval;
155 		}
156 	}
157 
158 	return true;
159 }
160 
161 
162 static memory_type_range*
163 find_range(area_id areaID)
164 {
165 	for (MemoryTypeRangeList::Iterator it = sMemoryTypeRanges.GetIterator();
166 			memory_type_range* range = it.Next();) {
167 		if (range->area == areaID)
168 			return range;
169 	}
170 
171 	return NULL;
172 }
173 
174 
175 static void
176 optimize_memory_ranges(MemoryTypeRangeList& ranges, uint32 type,
177 	bool removeRanges)
178 {
179 	uint64 previousEnd = 0;
180 	uint64 nextStart = 0;
181 	MemoryTypeRangeList::Iterator it = ranges.GetIterator();
182 	memory_type_range* range = it.Next();
183 	while (range != NULL) {
184 		if (range->type != type) {
185 			previousEnd = range->base + range->size;
186 			nextStart = 0;
187 			range = it.Next();
188 			continue;
189 		}
190 
191 		// find the start of the next range we cannot join this one with
192 		if (nextStart == 0) {
193 			MemoryTypeRangeList::Iterator nextIt = it;
194 			while (memory_type_range* nextRange = nextIt.Next()) {
195 				if (nextRange->type != range->type) {
196 					nextStart = nextRange->base;
197 					break;
198 				}
199 			}
200 
201 			if (nextStart == 0) {
202 				// no upper limit -- set an artificial one, so we don't need to
203 				// special case below
204 				nextStart = (uint64)1 << 32;
205 			}
206 		}
207 
208 		// Align the range's base and end to the greatest power of two possible.
209 		// As long as we can align both without intersecting any differently
210 		// range, we can extend the range without making it more complicated.
211 		// Once one side hit a limit we need to be careful. We can still
212 		// continue aligning the other side, if the range crosses the power of
213 		// two boundary.
214 		uint64 rangeBase = range->base;
215 		uint64 rangeEnd = rangeBase + range->size;
216 		uint64 interval = B_PAGE_SIZE * 2;
217 		while (true) {
218 			uint64 alignedBase = rangeBase & ~(interval - 1);
219 			uint64 alignedEnd = (rangeEnd + interval - 1) & ~(interval - 1);
220 
221 			if (alignedBase < previousEnd)
222 				alignedBase += interval;
223 
224 			if (alignedEnd > nextStart)
225 				alignedEnd -= interval;
226 
227 			if (alignedBase >= alignedEnd)
228 				break;
229 
230 			rangeBase = std::min(rangeBase, alignedBase);
231 			rangeEnd = std::max(rangeEnd, alignedEnd);
232 
233 			interval <<= 1;
234 		}
235 
236 		range->base = rangeBase;
237 		range->size = rangeEnd - rangeBase;
238 
239 		if (removeRanges)
240 			it.Remove();
241 
242 		previousEnd = rangeEnd;
243 
244 		// Skip the subsequent ranges we have swallowed and possible cut one
245 		// we now partially intersect with.
246 		while ((range = it.Next()) != NULL) {
247 			if (range->base >= rangeEnd)
248 				break;
249 
250 			if (range->base + range->size > rangeEnd) {
251 				// we partially intersect -- cut the range
252 				range->size = range->base + range->size - rangeEnd;
253 				range->base = rangeEnd;
254 				break;
255 			}
256 
257 			// we have swallowed this range completely
258 			range->size = 0;
259 			it.Remove();
260 		}
261 	}
262 }
263 
264 
265 static bool
266 ensure_temporary_ranges_space(int32 count)
267 {
268 	if (sTemporaryRangeCount >= count && sTemporaryRangePointCount >= count)
269 		return true;
270 
271 	// round count to power of 2
272 	int32 unalignedCount = count;
273 	count = 8;
274 	while (count < unalignedCount)
275 		count <<= 1;
276 
277 	// resize ranges array
278 	if (sTemporaryRangeCount < count) {
279 		memory_type_range* ranges = new(std::nothrow) memory_type_range[count];
280 		if (ranges == NULL)
281 			return false;
282 
283 		delete[] sTemporaryRanges;
284 
285 		sTemporaryRanges = ranges;
286 		sTemporaryRangeCount = count;
287 	}
288 
289 	// resize points array
290 	if (sTemporaryRangePointCount < count) {
291 		memory_type_range_point* points
292 			= new(std::nothrow) memory_type_range_point[count];
293 		if (points == NULL)
294 			return false;
295 
296 		delete[] sTemporaryRangePoints;
297 
298 		sTemporaryRangePoints = points;
299 		sTemporaryRangePointCount = count;
300 	}
301 
302 	return true;
303 }
304 
305 
306 status_t
307 update_mtrrs(update_mtrr_info& updateInfo)
308 {
309 	// resize the temporary points/ranges arrays, if necessary
310 	if (!ensure_temporary_ranges_space(sMemoryTypeRangeCount * 2))
311 		return B_NO_MEMORY;
312 
313 	// get the range points and sort them
314 	memory_type_range_point* rangePoints = sTemporaryRangePoints;
315 	int32 pointCount = 0;
316 	for (MemoryTypeRangeList::Iterator it = sMemoryTypeRanges.GetIterator();
317 			memory_type_range* range = it.Next();) {
318 		if (range->type == IA32_MTR_UNCACHED) {
319 			// Ignore uncacheable ranges below a certain size, if requested.
320 			// Since we always enforce uncacheability via the PTE attributes,
321 			// this is no problem (though not recommended for performance
322 			// reasons).
323 			if (range->size <= updateInfo.ignoreUncacheableSize)
324 				continue;
325 			if (range->size < updateInfo.shortestUncacheableSize)
326 				updateInfo.shortestUncacheableSize = range->size;
327 		}
328 
329 		rangePoints[pointCount].address = range->base;
330 		rangePoints[pointCount++].range = range;
331 		rangePoints[pointCount].address = range->base + range->size;
332 		rangePoints[pointCount++].range = range;
333 	}
334 
335 	std::sort(rangePoints, rangePoints + pointCount);
336 
337 #if TRACE_MTRR_ARCH_VM >= 2
338 	TRACE_MTRR2("memory type range points:\n");
339 	for (int32 i = 0; i < pointCount; i++) {
340 		TRACE_MTRR2("%12" B_PRIx64 " (%p)\n", rangePoints[i].address,
341 			rangePoints[i].range);
342 	}
343 #endif
344 
345 	// Compute the effective ranges. When ranges overlap, we go with the
346 	// stricter requirement. The types are not necessarily totally ordered, so
347 	// the order we use below is not always correct. To keep it simple we
348 	// consider it the reponsibility of the callers not to define overlapping
349 	// memory ranges with uncomparable types.
350 
351 	memory_type_range* ranges = sTemporaryRanges;
352 	typedef DoublyLinkedList<memory_type_range_point> PointList;
353 	PointList pendingPoints;
354 	memory_type_range* activeRange = NULL;
355 	int32 rangeCount = 0;
356 
357 	for (int32 i = 0; i < pointCount; i++) {
358 		memory_type_range_point* point = &rangePoints[i];
359 		bool terminateRange = false;
360 		if (point->IsStart()) {
361 			// a range start point
362 			pendingPoints.Add(point);
363 			if (activeRange != NULL && activeRange->type > point->range->type)
364 				terminateRange = true;
365 		} else {
366 			// a range end point -- remove the pending start point
367 			for (PointList::Iterator it = pendingPoints.GetIterator();
368 					memory_type_range_point* pendingPoint = it.Next();) {
369 				if (pendingPoint->range == point->range) {
370 					it.Remove();
371 					break;
372 				}
373 			}
374 
375 			if (point->range == activeRange)
376 				terminateRange = true;
377 		}
378 
379 		if (terminateRange) {
380 			ranges[rangeCount].size = point->address - ranges[rangeCount].base;
381 			rangeCount++;
382 			activeRange = NULL;
383 		}
384 
385 		if (activeRange != NULL || pendingPoints.IsEmpty())
386 			continue;
387 
388 		// we need to start a new range -- find the strictest pending range
389 		for (PointList::Iterator it = pendingPoints.GetIterator();
390 				memory_type_range_point* pendingPoint = it.Next();) {
391 			memory_type_range* pendingRange = pendingPoint->range;
392 			if (activeRange == NULL || activeRange->type > pendingRange->type)
393 				activeRange = pendingRange;
394 		}
395 
396 		memory_type_range* previousRange = rangeCount > 0
397 			? &ranges[rangeCount - 1] : NULL;
398 		if (previousRange == NULL || previousRange->type != activeRange->type
399 				|| previousRange->base + previousRange->size
400 					< activeRange->base) {
401 			// we can't join with the previous range -- add a new one
402 			ranges[rangeCount].base = point->address;
403 			ranges[rangeCount].type = activeRange->type;
404 		} else
405 			rangeCount--;
406 	}
407 
408 #if TRACE_MTRR_ARCH_VM >= 2
409 	TRACE_MTRR2("effective memory type ranges:\n");
410 	for (int32 i = 0; i < rangeCount; i++) {
411 		TRACE_MTRR2("%12" B_PRIx64 " - %12" B_PRIx64 ": %" B_PRIu32 "\n",
412 			ranges[i].base, ranges[i].base + ranges[i].size, ranges[i].type);
413 	}
414 #endif
415 
416 	// Extend ranges to be more MTRR-friendly. A range is MTRR friendly, when it
417 	// has a power of two size and a base address aligned to the size. For
418 	// ranges without this property we need more than one MTRR. We improve
419 	// MTRR-friendliness by aligning a range's base and end address to the
420 	// greatest power of two (base rounded down, end up) such that the extended
421 	// range does not intersect with any other differently typed range. We join
422 	// equally typed ranges, if possible. There are two exceptions to the
423 	// intersection requirement: Uncached ranges may intersect with any other
424 	// range; the resulting type will still be uncached. Hence we can ignore
425 	// uncached ranges when extending the other ranges. Write-through ranges may
426 	// intersect with write-back ranges; the resulting type will be
427 	// write-through. Hence we can ignore write-through ranges when extending
428 	// write-back ranges.
429 
430 	MemoryTypeRangeList rangeList;
431 	for (int32 i = 0; i < rangeCount; i++)
432 		rangeList.Add(&ranges[i]);
433 
434 	static const uint32 kMemoryTypes[] = {
435 		IA32_MTR_UNCACHED,
436 		IA32_MTR_WRITE_COMBINING,
437 		IA32_MTR_WRITE_PROTECTED,
438 		IA32_MTR_WRITE_THROUGH,
439 		IA32_MTR_WRITE_BACK
440 	};
441 	static const int32 kMemoryTypeCount = sizeof(kMemoryTypes)
442 		/ sizeof(*kMemoryTypes);
443 
444 	for (int32 i = 0; i < kMemoryTypeCount; i++) {
445 		uint32 type = kMemoryTypes[i];
446 
447 		// Remove uncached and write-through ranges after processing them. This
448 		// let's us leverage their intersection property with any other
449 		// respectively write-back ranges.
450 		bool removeRanges = type == IA32_MTR_UNCACHED
451 			|| type == IA32_MTR_WRITE_THROUGH;
452 
453 		optimize_memory_ranges(rangeList, type, removeRanges);
454 	}
455 
456 #if TRACE_MTRR_ARCH_VM >= 2
457 	TRACE_MTRR2("optimized memory type ranges:\n");
458 	for (int32 i = 0; i < rangeCount; i++) {
459 		if (ranges[i].size > 0) {
460 			TRACE_MTRR2("%12" B_PRIx64 " - %12" B_PRIx64 ": %" B_PRIu32 "\n",
461 				ranges[i].base, ranges[i].base + ranges[i].size,
462 				ranges[i].type);
463 		}
464 	}
465 #endif
466 
467 	// compute the mtrrs from the ranges
468 	sMemoryTypeRegistersUsed = 0;
469 	for (int32 i = 0; i < kMemoryTypeCount; i++) {
470 		uint32 type = kMemoryTypes[i];
471 
472 		// skip write-back ranges -- that'll be the default type anyway
473 		if (type == IA32_MTR_WRITE_BACK)
474 			continue;
475 
476 		for (int32 i = 0; i < rangeCount; i++) {
477 			if (ranges[i].size == 0 || ranges[i].type != type)
478 				continue;
479 
480 			if (!add_mtrrs_for_range(ranges[i].base, ranges[i].size, type))
481 				return B_BUSY;
482 		}
483 	}
484 
485 	set_mtrrs();
486 
487 	return B_OK;
488 }
489 
490 
491 status_t
492 update_mtrrs()
493 {
494 	// Until we know how many MTRRs we have, pretend everything is OK.
495 	if (sMemoryTypeRegisterCount == 0)
496 		return B_OK;
497 
498 	update_mtrr_info updateInfo;
499 	updateInfo.ignoreUncacheableSize = 0;
500 
501 	while (true) {
502 		TRACE_MTRR2("update_mtrrs(): Trying with ignoreUncacheableSize %#"
503 			B_PRIx64 ".\n", updateInfo.ignoreUncacheableSize);
504 
505 		updateInfo.shortestUncacheableSize = ~(uint64)0;
506 		status_t error = update_mtrrs(updateInfo);
507 		if (error != B_BUSY) {
508 			if (error == B_OK && updateInfo.ignoreUncacheableSize > 0) {
509 				TRACE_MTRR("update_mtrrs(): Succeeded setting MTRRs after "
510 					"ignoring uncacheable ranges up to size %#" B_PRIx64 ".\n",
511 					updateInfo.ignoreUncacheableSize);
512 			}
513 			return error;
514 		}
515 
516 		// Not enough MTRRs. Retry with less uncacheable ranges.
517 		if (updateInfo.shortestUncacheableSize == ~(uint64)0) {
518 			// Ugh, even without any uncacheable ranges the available MTRRs do
519 			// not suffice.
520 			panic("update_mtrrs(): Out of MTRRs!");
521 			return B_BUSY;
522 		}
523 
524 		ASSERT(updateInfo.ignoreUncacheableSize
525 			< updateInfo.shortestUncacheableSize);
526 
527 		updateInfo.ignoreUncacheableSize = updateInfo.shortestUncacheableSize;
528 	}
529 }
530 
531 
532 static status_t
533 add_memory_type_range(area_id areaID, uint64 base, uint64 size, uint32 type)
534 {
535 	// translate the type
536 	if (type == 0)
537 		return B_OK;
538 
539 	switch (type) {
540 		case B_MTR_UC:
541 			type = IA32_MTR_UNCACHED;
542 			break;
543 		case B_MTR_WC:
544 			type = IA32_MTR_WRITE_COMBINING;
545 			break;
546 		case B_MTR_WT:
547 			type = IA32_MTR_WRITE_THROUGH;
548 			break;
549 		case B_MTR_WP:
550 			type = IA32_MTR_WRITE_PROTECTED;
551 			break;
552 		case B_MTR_WB:
553 			type = IA32_MTR_WRITE_BACK;
554 			break;
555 		default:
556 			return B_BAD_VALUE;
557 	}
558 
559 	TRACE_MTRR("add_memory_type_range(%" B_PRId32 ", %#" B_PRIx64 ", %#"
560 		B_PRIx64 ", %" B_PRIu32 ")\n", areaID, base, size, type);
561 
562 	MutexLocker locker(sMemoryTypeLock);
563 
564 	memory_type_range* range = areaID >= 0 ? find_range(areaID) : NULL;
565 	int32 oldRangeType = -1;
566 	if (range != NULL) {
567 		if (range->base != base || range->size != size)
568 			return B_BAD_VALUE;
569 		if (range->type == type)
570 			return B_OK;
571 
572 		oldRangeType = range->type;
573 		range->type = type;
574 	} else {
575 		range = new(std::nothrow) memory_type_range;
576 		if (range == NULL)
577 			return B_NO_MEMORY;
578 
579 		range->area = areaID;
580 		range->base = base;
581 		range->size = size;
582 		range->type = type;
583 		sMemoryTypeRanges.Add(range);
584 		sMemoryTypeRangeCount++;
585 	}
586 
587 	status_t error = update_mtrrs();
588 	if (error != B_OK) {
589 		// revert the addition of the range/change of its type
590 		if (oldRangeType < 0) {
591 			sMemoryTypeRanges.Remove(range);
592 			sMemoryTypeRangeCount--;
593 			delete range;
594 		} else
595 			range->type = oldRangeType;
596 
597 		update_mtrrs();
598 		return error;
599 	}
600 
601 	return B_OK;
602 }
603 
604 
605 static void
606 remove_memory_type_range(area_id areaID)
607 {
608 	MutexLocker locker(sMemoryTypeLock);
609 
610 	memory_type_range* range = find_range(areaID);
611 	if (range != NULL) {
612 		TRACE_MTRR("remove_memory_type_range(%" B_PRId32 ", %#" B_PRIx64 ", %#"
613 			B_PRIx64 ", %" B_PRIu32 ")\n", range->area, range->base,
614 			range->size, range->type);
615 
616 		sMemoryTypeRanges.Remove(range);
617 		sMemoryTypeRangeCount--;
618 		delete range;
619 
620 		update_mtrrs();
621 	} else {
622 		dprintf("remove_memory_type_range(): no range known for area %" B_PRId32
623 			"\n", areaID);
624 	}
625 }
626 
627 
628 //	#pragma mark -
629 
630 
631 status_t
632 arch_vm_init(kernel_args *args)
633 {
634 	TRACE(("arch_vm_init: entry\n"));
635 	return 0;
636 }
637 
638 
639 /*!	Marks DMA region as in-use, and maps it into the kernel space */
640 status_t
641 arch_vm_init_post_area(kernel_args *args)
642 {
643 	area_id id;
644 
645 	TRACE(("arch_vm_init_post_area: entry\n"));
646 
647 	// account for DMA area and mark the pages unusable
648 	vm_mark_page_range_inuse(0x0, 0xa0000 / B_PAGE_SIZE);
649 
650 	// map 0 - 0xa0000 directly
651 	id = map_physical_memory("dma_region", 0x0, 0xa0000,
652 		B_ANY_KERNEL_ADDRESS | B_MTR_WB,
653 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, &gDmaAddress);
654 	if (id < 0) {
655 		panic("arch_vm_init_post_area: unable to map dma region\n");
656 		return B_NO_MEMORY;
657 	}
658 
659 #ifndef __x86_64__
660 	return bios_init();
661 #else
662 	return B_OK;
663 #endif
664 }
665 
666 
667 /*!	Gets rid of all yet unmapped (and therefore now unused) page tables */
668 status_t
669 arch_vm_init_end(kernel_args *args)
670 {
671 	TRACE(("arch_vm_init_endvm: entry\n"));
672 
673 	// throw away anything in the kernel_args.pgtable[] that's not yet mapped
674 	vm_free_unused_boot_loader_range(KERNEL_LOAD_BASE,
675 		args->arch_args.virtual_end - KERNEL_LOAD_BASE);
676 
677 	return B_OK;
678 }
679 
680 
681 status_t
682 arch_vm_init_post_modules(kernel_args *args)
683 {
684 	// the x86 CPU modules are now accessible
685 
686 	sMemoryTypeRegisterCount = x86_count_mtrrs();
687 	if (sMemoryTypeRegisterCount == 0)
688 		return B_OK;
689 
690 	// not very likely, but play safe here
691 	if (sMemoryTypeRegisterCount > kMaxMemoryTypeRegisters)
692 		sMemoryTypeRegisterCount = kMaxMemoryTypeRegisters;
693 
694 	// set the physical memory ranges to write-back mode
695 	for (uint32 i = 0; i < args->num_physical_memory_ranges; i++) {
696 		add_memory_type_range(-1, args->physical_memory_range[i].start,
697 			args->physical_memory_range[i].size, B_MTR_WB);
698 	}
699 
700 	return B_OK;
701 }
702 
703 
704 void
705 arch_vm_aspace_swap(struct VMAddressSpace *from, struct VMAddressSpace *to)
706 {
707 	// This functions is only invoked when a userland thread is in the process
708 	// of dying. It switches to the kernel team and does whatever cleanup is
709 	// necessary (in case it is the team's main thread, it will delete the
710 	// team).
711 	// It is however not necessary to change the page directory. Userland team's
712 	// page directories include all kernel mappings as well. Furthermore our
713 	// arch specific translation map data objects are ref-counted, so they won't
714 	// go away as long as they are still used on any CPU.
715 }
716 
717 
718 bool
719 arch_vm_supports_protection(uint32 protection)
720 {
721 	// x86 always has the same read/write properties for userland and the
722 	// kernel.
723 	// That's why we do not support user-read/kernel-write access. While the
724 	// other way around is not supported either, we don't care in this case
725 	// and give the kernel full access.
726 	if ((protection & (B_READ_AREA | B_WRITE_AREA)) == B_READ_AREA
727 		&& (protection & B_KERNEL_WRITE_AREA) != 0) {
728 		return false;
729 	}
730 
731 	// Userland and the kernel have the same setting of NX-bit.
732 	// That's why we do not allow any area that user can access, but not execute
733 	// and the kernel can execute.
734 	if ((protection & (B_READ_AREA | B_WRITE_AREA)) != 0
735 		&& (protection & B_EXECUTE_AREA) == 0
736 		&& (protection & B_KERNEL_EXECUTE_AREA) != 0) {
737 		return false;
738 	}
739 
740 	return true;
741 }
742 
743 
744 void
745 arch_vm_unset_memory_type(struct VMArea *area)
746 {
747 	if (area->MemoryType() == 0)
748 		return;
749 
750 	remove_memory_type_range(area->id);
751 }
752 
753 
754 status_t
755 arch_vm_set_memory_type(struct VMArea *area, phys_addr_t physicalBase,
756 	uint32 type)
757 {
758 	return add_memory_type_range(area->id, physicalBase, area->Size(), type);
759 }
760