xref: /haiku/src/system/kernel/arch/x86/arch_vm.cpp (revision a5bf12376daeded4049521eb17a6cc41192250d9)
1 /*
2  * Copyright 2009-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2008, Jérôme Duval.
4  * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de.
5  * Distributed under the terms of the MIT License.
6  *
7  * Copyright 2001, Travis Geiselbrecht. All rights reserved.
8  * Distributed under the terms of the NewOS License.
9  */
10 
11 
12 #include <stdlib.h>
13 #include <string.h>
14 
15 #include <algorithm>
16 #include <new>
17 
18 #include <KernelExport.h>
19 
20 #include <smp.h>
21 #include <util/AutoLock.h>
22 #include <vm/vm.h>
23 #include <vm/vm_page.h>
24 #include <vm/vm_priv.h>
25 #include <vm/VMAddressSpace.h>
26 #include <vm/VMArea.h>
27 
28 #include <arch/vm.h>
29 #include <arch/int.h>
30 #include <arch/cpu.h>
31 
32 #include <arch/x86/bios.h>
33 
34 
35 //#define TRACE_ARCH_VM
36 #ifdef TRACE_ARCH_VM
37 #	define TRACE(x) dprintf x
38 #else
39 #	define TRACE(x) ;
40 #endif
41 
42 // 0: disabled, 1: some, 2: more
43 #define TRACE_MTRR_ARCH_VM 1
44 
45 #if TRACE_MTRR_ARCH_VM >= 1
46 #	define TRACE_MTRR(x...) dprintf(x)
47 #else
48 #	define TRACE_MTRR(x...)
49 #endif
50 
51 #if TRACE_MTRR_ARCH_VM >= 2
52 #	define TRACE_MTRR2(x...) dprintf(x)
53 #else
54 #	define TRACE_MTRR2(x...)
55 #endif
56 
57 
58 void *gDmaAddress;
59 
60 
61 struct memory_type_range : DoublyLinkedListLinkImpl<memory_type_range> {
62 	uint64						base;
63 	uint64						size;
64 	uint32						type;
65 	area_id						area;
66 };
67 
68 
69 struct memory_type_range_point
70 		: DoublyLinkedListLinkImpl<memory_type_range_point> {
71 	uint64				address;
72 	memory_type_range*	range;
73 
74 	bool IsStart() const	{ return range->base == address; }
75 
76 	bool operator<(const memory_type_range_point& other) const
77 	{
78 		return address < other.address;
79 	}
80 };
81 
82 
83 struct update_mtrr_info {
84 	uint64	ignoreUncacheableSize;
85 	uint64	shortestUncacheableSize;
86 };
87 
88 
89 typedef DoublyLinkedList<memory_type_range> MemoryTypeRangeList;
90 
91 static mutex sMemoryTypeLock = MUTEX_INITIALIZER("memory type ranges");
92 static MemoryTypeRangeList sMemoryTypeRanges;
93 static int32 sMemoryTypeRangeCount = 0;
94 
95 static const uint32 kMaxMemoryTypeRegisters	= 32;
96 static x86_mtrr_info sMemoryTypeRegisters[kMaxMemoryTypeRegisters];
97 static uint32 sMemoryTypeRegisterCount;
98 static uint32 sMemoryTypeRegistersUsed;
99 
100 static memory_type_range* sTemporaryRanges = NULL;
101 static memory_type_range_point* sTemporaryRangePoints = NULL;
102 static int32 sTemporaryRangeCount = 0;
103 static int32 sTemporaryRangePointCount = 0;
104 
105 
106 static void
107 set_mtrrs()
108 {
109 	x86_set_mtrrs(IA32_MTR_WRITE_BACK, sMemoryTypeRegisters,
110 		sMemoryTypeRegistersUsed);
111 
112 #if TRACE_MTRR_ARCH_VM
113 	TRACE_MTRR("set MTRRs to:\n");
114 	for (uint32 i = 0; i < sMemoryTypeRegistersUsed; i++) {
115 		const x86_mtrr_info& info = sMemoryTypeRegisters[i];
116 		TRACE_MTRR("  mtrr: %2" B_PRIu32 ": base: %#10" B_PRIx64  ", size: %#10"
117 			B_PRIx64 ", type: %u\n", i, info.base, info.size,
118 			info.type);
119 	}
120 #endif
121 }
122 
123 
124 static bool
125 add_used_mtrr(uint64 base, uint64 size, uint32 type)
126 {
127 	if (sMemoryTypeRegistersUsed == sMemoryTypeRegisterCount)
128 		return false;
129 
130 	x86_mtrr_info& mtrr = sMemoryTypeRegisters[sMemoryTypeRegistersUsed++];
131 	mtrr.base = base;
132 	mtrr.size = size;
133 	mtrr.type = type;
134 
135 	return true;
136 }
137 
138 
139 static bool
140 add_mtrrs_for_range(uint64 base, uint64 size, uint32 type)
141 {
142 	for (uint64 interval = B_PAGE_SIZE; size > 0; interval <<= 1) {
143 		if ((base & interval) != 0) {
144 			if (!add_used_mtrr(base, interval, type))
145 				return false;
146 			base += interval;
147 			size -= interval;
148 		}
149 
150 		if ((size & interval) != 0) {
151 			if (!add_used_mtrr(base + size - interval, interval, type))
152 				return false;
153 			size -= interval;
154 		}
155 	}
156 
157 	return true;
158 }
159 
160 
161 static memory_type_range*
162 find_range(area_id areaID)
163 {
164 	for (MemoryTypeRangeList::Iterator it = sMemoryTypeRanges.GetIterator();
165 			memory_type_range* range = it.Next();) {
166 		if (range->area == areaID)
167 			return range;
168 	}
169 
170 	return NULL;
171 }
172 
173 
174 static void
175 optimize_memory_ranges(MemoryTypeRangeList& ranges, uint32 type,
176 	bool removeRanges)
177 {
178 	uint64 previousEnd = 0;
179 	uint64 nextStart = 0;
180 	MemoryTypeRangeList::Iterator it = ranges.GetIterator();
181 	memory_type_range* range = it.Next();
182 	while (range != NULL) {
183 		if (range->type != type) {
184 			previousEnd = range->base + range->size;
185 			nextStart = 0;
186 			range = it.Next();
187 			continue;
188 		}
189 
190 		// find the start of the next range we cannot join this one with
191 		if (nextStart == 0) {
192 			MemoryTypeRangeList::Iterator nextIt = it;
193 			while (memory_type_range* nextRange = nextIt.Next()) {
194 				if (nextRange->type != range->type) {
195 					nextStart = nextRange->base;
196 					break;
197 				}
198 			}
199 
200 			if (nextStart == 0) {
201 				// no upper limit -- set an artificial one, so we don't need to
202 				// special case below
203 				nextStart = (uint64)1 << 32;
204 			}
205 		}
206 
207 		// Align the range's base and end to the greatest power of two possible.
208 		// As long as we can align both without intersecting any differently
209 		// range, we can extend the range without making it more complicated.
210 		// Once one side hit a limit we need to be careful. We can still
211 		// continue aligning the other side, if the range crosses the power of
212 		// two boundary.
213 		uint64 rangeBase = range->base;
214 		uint64 rangeEnd = rangeBase + range->size;
215 		uint64 interval = B_PAGE_SIZE * 2;
216 		while (true) {
217 			uint64 alignedBase = rangeBase & ~(interval - 1);
218 			uint64 alignedEnd = (rangeEnd + interval - 1) & ~(interval - 1);
219 
220 			if (alignedBase < previousEnd)
221 				alignedBase += interval;
222 
223 			if (alignedEnd > nextStart)
224 				alignedEnd -= interval;
225 
226 			if (alignedBase >= alignedEnd)
227 				break;
228 
229 			rangeBase = std::min(rangeBase, alignedBase);
230 			rangeEnd = std::max(rangeEnd, alignedEnd);
231 
232 			interval <<= 1;
233 		}
234 
235 		range->base = rangeBase;
236 		range->size = rangeEnd - rangeBase;
237 
238 		if (removeRanges)
239 			it.Remove();
240 
241 		previousEnd = rangeEnd;
242 
243 		// Skip the subsequent ranges we have swallowed and possible cut one
244 		// we now partially intersect with.
245 		while ((range = it.Next()) != NULL) {
246 			if (range->base >= rangeEnd)
247 				break;
248 
249 			if (range->base + range->size > rangeEnd) {
250 				// we partially intersect -- cut the range
251 				range->size = range->base + range->size - rangeEnd;
252 				range->base = rangeEnd;
253 				break;
254 			}
255 
256 			// we have swallowed this range completely
257 			range->size = 0;
258 			it.Remove();
259 		}
260 	}
261 }
262 
263 
264 static bool
265 ensure_temporary_ranges_space(int32 count)
266 {
267 	if (sTemporaryRangeCount >= count && sTemporaryRangePointCount >= count)
268 		return true;
269 
270 	// round count to power of 2
271 	int32 unalignedCount = count;
272 	count = 8;
273 	while (count < unalignedCount)
274 		count <<= 1;
275 
276 	// resize ranges array
277 	if (sTemporaryRangeCount < count) {
278 		memory_type_range* ranges = new(std::nothrow) memory_type_range[count];
279 		if (ranges == NULL)
280 			return false;
281 
282 		delete[] sTemporaryRanges;
283 
284 		sTemporaryRanges = ranges;
285 		sTemporaryRangeCount = count;
286 	}
287 
288 	// resize points array
289 	if (sTemporaryRangePointCount < count) {
290 		memory_type_range_point* points
291 			= new(std::nothrow) memory_type_range_point[count];
292 		if (points == NULL)
293 			return false;
294 
295 		delete[] sTemporaryRangePoints;
296 
297 		sTemporaryRangePoints = points;
298 		sTemporaryRangePointCount = count;
299 	}
300 
301 	return true;
302 }
303 
304 
305 status_t
306 update_mtrrs(update_mtrr_info& updateInfo)
307 {
308 	// resize the temporary points/ranges arrays, if necessary
309 	if (!ensure_temporary_ranges_space(sMemoryTypeRangeCount * 2))
310 		return B_NO_MEMORY;
311 
312 	// get the range points and sort them
313 	memory_type_range_point* rangePoints = sTemporaryRangePoints;
314 	int32 pointCount = 0;
315 	for (MemoryTypeRangeList::Iterator it = sMemoryTypeRanges.GetIterator();
316 			memory_type_range* range = it.Next();) {
317 		if (range->type == IA32_MTR_UNCACHED) {
318 			// Ignore uncacheable ranges below a certain size, if requested.
319 			// Since we always enforce uncacheability via the PTE attributes,
320 			// this is no problem (though not recommended for performance
321 			// reasons).
322 			if (range->size <= updateInfo.ignoreUncacheableSize)
323 				continue;
324 			if (range->size < updateInfo.shortestUncacheableSize)
325 				updateInfo.shortestUncacheableSize = range->size;
326 		}
327 
328 		rangePoints[pointCount].address = range->base;
329 		rangePoints[pointCount++].range = range;
330 		rangePoints[pointCount].address = range->base + range->size;
331 		rangePoints[pointCount++].range = range;
332 	}
333 
334 	std::sort(rangePoints, rangePoints + pointCount);
335 
336 #if TRACE_MTRR_ARCH_VM >= 2
337 	TRACE_MTRR2("memory type range points:\n");
338 	for (int32 i = 0; i < pointCount; i++) {
339 		TRACE_MTRR2("%12" B_PRIx64 " (%p)\n", rangePoints[i].address,
340 			rangePoints[i].range);
341 	}
342 #endif
343 
344 	// Compute the effective ranges. When ranges overlap, we go with the
345 	// stricter requirement. The types are not necessarily totally ordered, so
346 	// the order we use below is not always correct. To keep it simple we
347 	// consider it the reponsibility of the callers not to define overlapping
348 	// memory ranges with uncomparable types.
349 
350 	memory_type_range* ranges = sTemporaryRanges;
351 	typedef DoublyLinkedList<memory_type_range_point> PointList;
352 	PointList pendingPoints;
353 	memory_type_range* activeRange = NULL;
354 	int32 rangeCount = 0;
355 
356 	for (int32 i = 0; i < pointCount; i++) {
357 		memory_type_range_point* point = &rangePoints[i];
358 		bool terminateRange = false;
359 		if (point->IsStart()) {
360 			// a range start point
361 			pendingPoints.Add(point);
362 			if (activeRange != NULL && activeRange->type > point->range->type)
363 				terminateRange = true;
364 		} else {
365 			// a range end point -- remove the pending start point
366 			for (PointList::Iterator it = pendingPoints.GetIterator();
367 					memory_type_range_point* pendingPoint = it.Next();) {
368 				if (pendingPoint->range == point->range) {
369 					it.Remove();
370 					break;
371 				}
372 			}
373 
374 			if (point->range == activeRange)
375 				terminateRange = true;
376 		}
377 
378 		if (terminateRange) {
379 			ranges[rangeCount].size = point->address - ranges[rangeCount].base;
380 			rangeCount++;
381 			activeRange = NULL;
382 		}
383 
384 		if (activeRange != NULL || pendingPoints.IsEmpty())
385 			continue;
386 
387 		// we need to start a new range -- find the strictest pending range
388 		for (PointList::Iterator it = pendingPoints.GetIterator();
389 				memory_type_range_point* pendingPoint = it.Next();) {
390 			memory_type_range* pendingRange = pendingPoint->range;
391 			if (activeRange == NULL || activeRange->type > pendingRange->type)
392 				activeRange = pendingRange;
393 		}
394 
395 		memory_type_range* previousRange = rangeCount > 0
396 			? &ranges[rangeCount - 1] : NULL;
397 		if (previousRange == NULL || previousRange->type != activeRange->type
398 				|| previousRange->base + previousRange->size
399 					< activeRange->base) {
400 			// we can't join with the previous range -- add a new one
401 			ranges[rangeCount].base = point->address;
402 			ranges[rangeCount].type = activeRange->type;
403 		} else
404 			rangeCount--;
405 	}
406 
407 #if TRACE_MTRR_ARCH_VM >= 2
408 	TRACE_MTRR2("effective memory type ranges:\n");
409 	for (int32 i = 0; i < rangeCount; i++) {
410 		TRACE_MTRR2("%12" B_PRIx64 " - %12" B_PRIx64 ": %" B_PRIu32 "\n",
411 			ranges[i].base, ranges[i].base + ranges[i].size, ranges[i].type);
412 	}
413 #endif
414 
415 	// Extend ranges to be more MTRR-friendly. A range is MTRR friendly, when it
416 	// has a power of two size and a base address aligned to the size. For
417 	// ranges without this property we need more than one MTRR. We improve
418 	// MTRR-friendliness by aligning a range's base and end address to the
419 	// greatest power of two (base rounded down, end up) such that the extended
420 	// range does not intersect with any other differently typed range. We join
421 	// equally typed ranges, if possible. There are two exceptions to the
422 	// intersection requirement: Uncached ranges may intersect with any other
423 	// range; the resulting type will still be uncached. Hence we can ignore
424 	// uncached ranges when extending the other ranges. Write-through ranges may
425 	// intersect with write-back ranges; the resulting type will be
426 	// write-through. Hence we can ignore write-through ranges when extending
427 	// write-back ranges.
428 
429 	MemoryTypeRangeList rangeList;
430 	for (int32 i = 0; i < rangeCount; i++)
431 		rangeList.Add(&ranges[i]);
432 
433 	static const uint32 kMemoryTypes[] = {
434 		IA32_MTR_UNCACHED,
435 		IA32_MTR_WRITE_COMBINING,
436 		IA32_MTR_WRITE_PROTECTED,
437 		IA32_MTR_WRITE_THROUGH,
438 		IA32_MTR_WRITE_BACK
439 	};
440 	static const int32 kMemoryTypeCount = sizeof(kMemoryTypes)
441 		/ sizeof(*kMemoryTypes);
442 
443 	for (int32 i = 0; i < kMemoryTypeCount; i++) {
444 		uint32 type = kMemoryTypes[i];
445 
446 		// Remove uncached and write-through ranges after processing them. This
447 		// let's us leverage their intersection property with any other
448 		// respectively write-back ranges.
449 		bool removeRanges = type == IA32_MTR_UNCACHED
450 			|| type == IA32_MTR_WRITE_THROUGH;
451 
452 		optimize_memory_ranges(rangeList, type, removeRanges);
453 	}
454 
455 #if TRACE_MTRR_ARCH_VM >= 2
456 	TRACE_MTRR2("optimized memory type ranges:\n");
457 	for (int32 i = 0; i < rangeCount; i++) {
458 		if (ranges[i].size > 0) {
459 			TRACE_MTRR2("%12" B_PRIx64 " - %12" B_PRIx64 ": %" B_PRIu32 "\n",
460 				ranges[i].base, ranges[i].base + ranges[i].size,
461 				ranges[i].type);
462 		}
463 	}
464 #endif
465 
466 	// compute the mtrrs from the ranges
467 	sMemoryTypeRegistersUsed = 0;
468 	for (int32 i = 0; i < kMemoryTypeCount; i++) {
469 		uint32 type = kMemoryTypes[i];
470 
471 		// skip write-back ranges -- that'll be the default type anyway
472 		if (type == IA32_MTR_WRITE_BACK)
473 			continue;
474 
475 		for (int32 i = 0; i < rangeCount; i++) {
476 			if (ranges[i].size == 0 || ranges[i].type != type)
477 				continue;
478 
479 			if (!add_mtrrs_for_range(ranges[i].base, ranges[i].size, type))
480 				return B_BUSY;
481 		}
482 	}
483 
484 	set_mtrrs();
485 
486 	return B_OK;
487 }
488 
489 
490 status_t
491 update_mtrrs()
492 {
493 	// Until we know how many MTRRs we have, pretend everything is OK.
494 	if (sMemoryTypeRegisterCount == 0)
495 		return B_OK;
496 
497 	update_mtrr_info updateInfo;
498 	updateInfo.ignoreUncacheableSize = 0;
499 
500 	while (true) {
501 		TRACE_MTRR2("update_mtrrs(): Trying with ignoreUncacheableSize %#"
502 			B_PRIx64 ".\n", updateInfo.ignoreUncacheableSize);
503 
504 		updateInfo.shortestUncacheableSize = ~(uint64)0;
505 		status_t error = update_mtrrs(updateInfo);
506 		if (error != B_BUSY) {
507 			if (error == B_OK && updateInfo.ignoreUncacheableSize > 0) {
508 				TRACE_MTRR("update_mtrrs(): Succeeded setting MTRRs after "
509 					"ignoring uncacheable ranges up to size %#" B_PRIx64 ".\n",
510 					updateInfo.ignoreUncacheableSize);
511 			}
512 			return error;
513 		}
514 
515 		// Not enough MTRRs. Retry with less uncacheable ranges.
516 		if (updateInfo.shortestUncacheableSize == ~(uint64)0) {
517 			// Ugh, even without any uncacheable ranges the available MTRRs do
518 			// not suffice.
519 			panic("update_mtrrs(): Out of MTRRs!");
520 			return B_BUSY;
521 		}
522 
523 		ASSERT(updateInfo.ignoreUncacheableSize
524 			< updateInfo.shortestUncacheableSize);
525 
526 		updateInfo.ignoreUncacheableSize = updateInfo.shortestUncacheableSize;
527 	}
528 }
529 
530 
531 static status_t
532 add_memory_type_range(area_id areaID, uint64 base, uint64 size, uint32 type)
533 {
534 	// translate the type
535 	if (type == 0)
536 		return B_OK;
537 
538 	switch (type) {
539 		case B_MTR_UC:
540 			type = IA32_MTR_UNCACHED;
541 			break;
542 		case B_MTR_WC:
543 			type = IA32_MTR_WRITE_COMBINING;
544 			break;
545 		case B_MTR_WT:
546 			type = IA32_MTR_WRITE_THROUGH;
547 			break;
548 		case B_MTR_WP:
549 			type = IA32_MTR_WRITE_PROTECTED;
550 			break;
551 		case B_MTR_WB:
552 			type = IA32_MTR_WRITE_BACK;
553 			break;
554 		default:
555 			return B_BAD_VALUE;
556 	}
557 
558 	TRACE_MTRR("add_memory_type_range(%" B_PRId32 ", %#" B_PRIx64 ", %#"
559 		B_PRIx64 ", %" B_PRIu32 ")\n", areaID, base, size, type);
560 
561 	MutexLocker locker(sMemoryTypeLock);
562 
563 	memory_type_range* range = areaID >= 0 ? find_range(areaID) : NULL;
564 	int32 oldRangeType = -1;
565 	if (range != NULL) {
566 		if (range->base != base || range->size != size)
567 			return B_BAD_VALUE;
568 		if (range->type == type)
569 			return B_OK;
570 
571 		oldRangeType = range->type;
572 		range->type = type;
573 	} else {
574 		range = new(std::nothrow) memory_type_range;
575 		if (range == NULL)
576 			return B_NO_MEMORY;
577 
578 		range->area = areaID;
579 		range->base = base;
580 		range->size = size;
581 		range->type = type;
582 		sMemoryTypeRanges.Add(range);
583 		sMemoryTypeRangeCount++;
584 	}
585 
586 	status_t error = update_mtrrs();
587 	if (error != B_OK) {
588 		// revert the addition of the range/change of its type
589 		if (oldRangeType < 0) {
590 			sMemoryTypeRanges.Remove(range);
591 			sMemoryTypeRangeCount--;
592 			delete range;
593 		} else
594 			range->type = oldRangeType;
595 
596 		update_mtrrs();
597 		return error;
598 	}
599 
600 	return B_OK;
601 }
602 
603 
604 static void
605 remove_memory_type_range(area_id areaID)
606 {
607 	MutexLocker locker(sMemoryTypeLock);
608 
609 	memory_type_range* range = find_range(areaID);
610 	if (range != NULL) {
611 		TRACE_MTRR("remove_memory_type_range(%" B_PRId32 ", %#" B_PRIx64 ", %#"
612 			B_PRIx64 ", %" B_PRIu32 ")\n", range->area, range->base,
613 			range->size, range->type);
614 
615 		sMemoryTypeRanges.Remove(range);
616 		sMemoryTypeRangeCount--;
617 		delete range;
618 
619 		update_mtrrs();
620 	} else {
621 		dprintf("remove_memory_type_range(): no range known for area %" B_PRId32
622 			"\n", areaID);
623 	}
624 }
625 
626 
627 //	#pragma mark -
628 
629 
630 status_t
631 arch_vm_init(kernel_args *args)
632 {
633 	TRACE(("arch_vm_init: entry\n"));
634 	return 0;
635 }
636 
637 
638 /*!	Marks DMA region as in-use, and maps it into the kernel space */
639 status_t
640 arch_vm_init_post_area(kernel_args *args)
641 {
642 	area_id id;
643 
644 	TRACE(("arch_vm_init_post_area: entry\n"));
645 
646 	// account for DMA area and mark the pages unusable
647 	vm_mark_page_range_inuse(0x0, 0xa0000 / B_PAGE_SIZE);
648 
649 	// map 0 - 0xa0000 directly
650 	id = map_physical_memory("dma_region", 0x0, 0xa0000,
651 		B_ANY_KERNEL_ADDRESS | B_MTR_WB,
652 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, &gDmaAddress);
653 	if (id < 0) {
654 		panic("arch_vm_init_post_area: unable to map dma region\n");
655 		return B_NO_MEMORY;
656 	}
657 
658 	return bios_init();
659 }
660 
661 
662 /*!	Gets rid of all yet unmapped (and therefore now unused) page tables */
663 status_t
664 arch_vm_init_end(kernel_args *args)
665 {
666 	TRACE(("arch_vm_init_endvm: entry\n"));
667 
668 	// throw away anything in the kernel_args.pgtable[] that's not yet mapped
669 	vm_free_unused_boot_loader_range(KERNEL_BASE,
670 		args->arch_args.virtual_end - KERNEL_BASE);
671 
672 	return B_OK;
673 }
674 
675 
676 status_t
677 arch_vm_init_post_modules(kernel_args *args)
678 {
679 	// the x86 CPU modules are now accessible
680 
681 	sMemoryTypeRegisterCount = x86_count_mtrrs();
682 	if (sMemoryTypeRegisterCount == 0)
683 		return B_OK;
684 
685 	// not very likely, but play safe here
686 	if (sMemoryTypeRegisterCount > kMaxMemoryTypeRegisters)
687 		sMemoryTypeRegisterCount = kMaxMemoryTypeRegisters;
688 
689 	// set the physical memory ranges to write-back mode
690 	for (uint32 i = 0; i < args->num_physical_memory_ranges; i++) {
691 		add_memory_type_range(-1, args->physical_memory_range[i].start,
692 			args->physical_memory_range[i].size, B_MTR_WB);
693 	}
694 
695 	return B_OK;
696 }
697 
698 
699 void
700 arch_vm_aspace_swap(struct VMAddressSpace *from, struct VMAddressSpace *to)
701 {
702 	// This functions is only invoked when a userland thread is in the process
703 	// of dying. It switches to the kernel team and does whatever cleanup is
704 	// necessary (in case it is the team's main thread, it will delete the
705 	// team).
706 	// It is however not necessary to change the page directory. Userland team's
707 	// page directories include all kernel mappings as well. Furthermore our
708 	// arch specific translation map data objects are ref-counted, so they won't
709 	// go away as long as they are still used on any CPU.
710 }
711 
712 
713 bool
714 arch_vm_supports_protection(uint32 protection)
715 {
716 	// x86 always has the same read/write properties for userland and the
717 	// kernel.
718 	// That's why we do not support user-read/kernel-write access. While the
719 	// other way around is not supported either, we don't care in this case
720 	// and give the kernel full access.
721 	if ((protection & (B_READ_AREA | B_WRITE_AREA)) == B_READ_AREA
722 		&& (protection & B_KERNEL_WRITE_AREA) != 0) {
723 		return false;
724 	}
725 
726 	return true;
727 }
728 
729 
730 void
731 arch_vm_unset_memory_type(struct VMArea *area)
732 {
733 	if (area->MemoryType() == 0)
734 		return;
735 
736 	remove_memory_type_range(area->id);
737 }
738 
739 
740 status_t
741 arch_vm_set_memory_type(struct VMArea *area, phys_addr_t physicalBase,
742 	uint32 type)
743 {
744 	return add_memory_type_range(area->id, physicalBase, area->Size(), type);
745 }
746