xref: /haiku/src/system/kernel/arch/x86/arch_vm.cpp (revision 95d8739f18538fc82bf80d7308db7f6720cc36da)
1 /*
2  * Copyright 2009-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2008, Jérôme Duval.
4  * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de.
5  * Distributed under the terms of the MIT License.
6  *
7  * Copyright 2001, Travis Geiselbrecht. All rights reserved.
8  * Distributed under the terms of the NewOS License.
9  */
10 
11 
12 #include <stdlib.h>
13 #include <string.h>
14 
15 #include <algorithm>
16 #include <new>
17 
18 #include <KernelExport.h>
19 
20 #include <boot/kernel_args.h>
21 #include <smp.h>
22 #include <util/AutoLock.h>
23 #include <vm/vm.h>
24 #include <vm/vm_page.h>
25 #include <vm/vm_priv.h>
26 #include <vm/VMAddressSpace.h>
27 #include <vm/VMArea.h>
28 
29 #include <arch/vm.h>
30 #include <arch/int.h>
31 #include <arch/cpu.h>
32 
33 #include <arch/x86/bios.h>
34 
35 
36 //#define TRACE_ARCH_VM
37 #ifdef TRACE_ARCH_VM
38 #	define TRACE(x) dprintf x
39 #else
40 #	define TRACE(x) ;
41 #endif
42 
43 // 0: disabled, 1: some, 2: more
44 #define TRACE_MTRR_ARCH_VM 1
45 
46 #if TRACE_MTRR_ARCH_VM >= 1
47 #	define TRACE_MTRR(x...) dprintf(x)
48 #else
49 #	define TRACE_MTRR(x...)
50 #endif
51 
52 #if TRACE_MTRR_ARCH_VM >= 2
53 #	define TRACE_MTRR2(x...) dprintf(x)
54 #else
55 #	define TRACE_MTRR2(x...)
56 #endif
57 
58 
59 void *gDmaAddress;
60 
61 
62 namespace {
63 
64 struct memory_type_range : DoublyLinkedListLinkImpl<memory_type_range> {
65 	uint64						base;
66 	uint64						size;
67 	uint32						type;
68 	area_id						area;
69 };
70 
71 
72 struct memory_type_range_point
73 		: DoublyLinkedListLinkImpl<memory_type_range_point> {
74 	uint64				address;
75 	memory_type_range*	range;
76 
IsStart__anonb4926a8e0111::memory_type_range_point77 	bool IsStart() const	{ return range->base == address; }
78 
operator <__anonb4926a8e0111::memory_type_range_point79 	bool operator<(const memory_type_range_point& other) const
80 	{
81 		return address < other.address;
82 	}
83 };
84 
85 
86 struct update_mtrr_info {
87 	uint64	ignoreUncacheableSize;
88 	uint64	shortestUncacheableSize;
89 };
90 
91 
92 typedef DoublyLinkedList<memory_type_range> MemoryTypeRangeList;
93 
94 } // namespace
95 
96 
97 static mutex sMemoryTypeLock = MUTEX_INITIALIZER("memory type ranges");
98 static MemoryTypeRangeList sMemoryTypeRanges;
99 static int32 sMemoryTypeRangeCount = 0;
100 
101 static const uint32 kMaxMemoryTypeRegisters	= 32;
102 static x86_mtrr_info sMemoryTypeRegisters[kMaxMemoryTypeRegisters];
103 static uint32 sMemoryTypeRegisterCount;
104 static uint32 sMemoryTypeRegistersUsed;
105 
106 static memory_type_range* sTemporaryRanges = NULL;
107 static memory_type_range_point* sTemporaryRangePoints = NULL;
108 static int32 sTemporaryRangeCount = 0;
109 static int32 sTemporaryRangePointCount = 0;
110 
111 
112 static void
set_mtrrs()113 set_mtrrs()
114 {
115 	x86_set_mtrrs(IA32_MTR_WRITE_BACK, sMemoryTypeRegisters,
116 		sMemoryTypeRegistersUsed);
117 
118 #if TRACE_MTRR_ARCH_VM
119 	TRACE_MTRR("set MTRRs to:\n");
120 	for (uint32 i = 0; i < sMemoryTypeRegistersUsed; i++) {
121 		const x86_mtrr_info& info = sMemoryTypeRegisters[i];
122 		TRACE_MTRR("  mtrr: %2" B_PRIu32 ": base: %#10" B_PRIx64  ", size: %#10"
123 			B_PRIx64 ", type: %u\n", i, info.base, info.size,
124 			info.type);
125 	}
126 #endif
127 }
128 
129 
130 static bool
add_used_mtrr(uint64 base,uint64 size,uint32 type)131 add_used_mtrr(uint64 base, uint64 size, uint32 type)
132 {
133 	switch (type) {
134 		case B_UNCACHED_MEMORY:
135 			type = IA32_MTR_UNCACHED;
136 			break;
137 		case B_WRITE_COMBINING_MEMORY:
138 			type = IA32_MTR_WRITE_COMBINING;
139 			break;
140 		case B_WRITE_THROUGH_MEMORY:
141 			type = IA32_MTR_WRITE_THROUGH;
142 			break;
143 		case B_WRITE_PROTECTED_MEMORY:
144 			type = IA32_MTR_WRITE_PROTECTED;
145 			break;
146 		case B_WRITE_BACK_MEMORY:
147 			type = IA32_MTR_WRITE_BACK;
148 			break;
149 		default:
150 			return false;
151 	}
152 
153 	if (sMemoryTypeRegistersUsed == sMemoryTypeRegisterCount)
154 		return false;
155 
156 	x86_mtrr_info& mtrr = sMemoryTypeRegisters[sMemoryTypeRegistersUsed++];
157 	mtrr.base = base;
158 	mtrr.size = size;
159 	mtrr.type = type;
160 
161 	return true;
162 }
163 
164 
165 static bool
add_mtrrs_for_range(uint64 base,uint64 size,uint32 type)166 add_mtrrs_for_range(uint64 base, uint64 size, uint32 type)
167 {
168 	for (uint64 interval = B_PAGE_SIZE; size > 0; interval <<= 1) {
169 		if ((base & interval) != 0) {
170 			if (!add_used_mtrr(base, interval, type))
171 				return false;
172 			base += interval;
173 			size -= interval;
174 		}
175 
176 		if ((size & interval) != 0) {
177 			if (!add_used_mtrr(base + size - interval, interval, type))
178 				return false;
179 			size -= interval;
180 		}
181 	}
182 
183 	return true;
184 }
185 
186 
187 static memory_type_range*
find_range(area_id areaID)188 find_range(area_id areaID)
189 {
190 	for (MemoryTypeRangeList::Iterator it = sMemoryTypeRanges.GetIterator();
191 			memory_type_range* range = it.Next();) {
192 		if (range->area == areaID)
193 			return range;
194 	}
195 
196 	return NULL;
197 }
198 
199 
200 static void
optimize_memory_ranges(MemoryTypeRangeList & ranges,uint32 type,bool removeRanges)201 optimize_memory_ranges(MemoryTypeRangeList& ranges, uint32 type,
202 	bool removeRanges)
203 {
204 	uint64 previousEnd = 0;
205 	uint64 nextStart = 0;
206 	MemoryTypeRangeList::Iterator it = ranges.GetIterator();
207 	memory_type_range* range = it.Next();
208 	while (range != NULL) {
209 		if (range->type != type) {
210 			previousEnd = range->base + range->size;
211 			nextStart = 0;
212 			range = it.Next();
213 			continue;
214 		}
215 
216 		// find the start of the next range we cannot join this one with
217 		if (nextStart == 0) {
218 			MemoryTypeRangeList::Iterator nextIt = it;
219 			while (memory_type_range* nextRange = nextIt.Next()) {
220 				if (nextRange->type != range->type) {
221 					nextStart = nextRange->base;
222 					break;
223 				}
224 			}
225 
226 			if (nextStart == 0) {
227 				// no upper limit -- set an artificial one, so we don't need to
228 				// special case below
229 				nextStart = (uint64)1 << 32;
230 			}
231 		}
232 
233 		// Align the range's base and end to the greatest power of two possible.
234 		// As long as we can align both without intersecting any differently
235 		// range, we can extend the range without making it more complicated.
236 		// Once one side hit a limit we need to be careful. We can still
237 		// continue aligning the other side, if the range crosses the power of
238 		// two boundary.
239 		uint64 rangeBase = range->base;
240 		uint64 rangeEnd = rangeBase + range->size;
241 		uint64 interval = B_PAGE_SIZE * 2;
242 		while (true) {
243 			uint64 alignedBase = rangeBase & ~(interval - 1);
244 			uint64 alignedEnd = (rangeEnd + interval - 1) & ~(interval - 1);
245 
246 			if (alignedBase < previousEnd)
247 				alignedBase += interval;
248 
249 			if (alignedEnd > nextStart)
250 				alignedEnd -= interval;
251 
252 			if (alignedBase >= alignedEnd)
253 				break;
254 
255 			rangeBase = std::min(rangeBase, alignedBase);
256 			rangeEnd = std::max(rangeEnd, alignedEnd);
257 
258 			interval <<= 1;
259 		}
260 
261 		range->base = rangeBase;
262 		range->size = rangeEnd - rangeBase;
263 
264 		if (removeRanges)
265 			it.Remove();
266 
267 		previousEnd = rangeEnd;
268 
269 		// Skip the subsequent ranges we have swallowed and possible cut one
270 		// we now partially intersect with.
271 		while ((range = it.Next()) != NULL) {
272 			if (range->base >= rangeEnd)
273 				break;
274 
275 			if (range->base + range->size > rangeEnd) {
276 				// we partially intersect -- cut the range
277 				range->size = range->base + range->size - rangeEnd;
278 				range->base = rangeEnd;
279 				break;
280 			}
281 
282 			// we have swallowed this range completely
283 			range->size = 0;
284 			it.Remove();
285 		}
286 	}
287 }
288 
289 
290 static bool
ensure_temporary_ranges_space(int32 count)291 ensure_temporary_ranges_space(int32 count)
292 {
293 	if (sTemporaryRangeCount >= count && sTemporaryRangePointCount >= count)
294 		return true;
295 
296 	// round count to power of 2
297 	int32 unalignedCount = count;
298 	count = 8;
299 	while (count < unalignedCount)
300 		count <<= 1;
301 
302 	// resize ranges array
303 	if (sTemporaryRangeCount < count) {
304 		memory_type_range* ranges = new(std::nothrow) memory_type_range[count];
305 		if (ranges == NULL)
306 			return false;
307 
308 		delete[] sTemporaryRanges;
309 
310 		sTemporaryRanges = ranges;
311 		sTemporaryRangeCount = count;
312 	}
313 
314 	// resize points array
315 	if (sTemporaryRangePointCount < count) {
316 		memory_type_range_point* points
317 			= new(std::nothrow) memory_type_range_point[count];
318 		if (points == NULL)
319 			return false;
320 
321 		delete[] sTemporaryRangePoints;
322 
323 		sTemporaryRangePoints = points;
324 		sTemporaryRangePointCount = count;
325 	}
326 
327 	return true;
328 }
329 
330 
331 static status_t
update_mtrrs(update_mtrr_info & updateInfo)332 update_mtrrs(update_mtrr_info& updateInfo)
333 {
334 	// resize the temporary points/ranges arrays, if necessary
335 	if (!ensure_temporary_ranges_space(sMemoryTypeRangeCount * 2))
336 		return B_NO_MEMORY;
337 
338 	// get the range points and sort them
339 	memory_type_range_point* rangePoints = sTemporaryRangePoints;
340 	int32 pointCount = 0;
341 	for (MemoryTypeRangeList::Iterator it = sMemoryTypeRanges.GetIterator();
342 			memory_type_range* range = it.Next();) {
343 		if (range->type == B_UNCACHED_MEMORY) {
344 			// Ignore uncacheable ranges below a certain size, if requested.
345 			// Since we always enforce uncacheability via the PTE attributes,
346 			// this is no problem (though not recommended for performance
347 			// reasons).
348 			if (range->size <= updateInfo.ignoreUncacheableSize)
349 				continue;
350 			if (range->size < updateInfo.shortestUncacheableSize)
351 				updateInfo.shortestUncacheableSize = range->size;
352 		}
353 
354 		rangePoints[pointCount].address = range->base;
355 		rangePoints[pointCount++].range = range;
356 		rangePoints[pointCount].address = range->base + range->size;
357 		rangePoints[pointCount++].range = range;
358 	}
359 
360 	std::sort(rangePoints, rangePoints + pointCount);
361 
362 #if TRACE_MTRR_ARCH_VM >= 2
363 	TRACE_MTRR2("memory type range points:\n");
364 	for (int32 i = 0; i < pointCount; i++) {
365 		TRACE_MTRR2("%12" B_PRIx64 " (%p)\n", rangePoints[i].address,
366 			rangePoints[i].range);
367 	}
368 #endif
369 
370 	// Compute the effective ranges. When ranges overlap, we go with the
371 	// stricter requirement. The types are not necessarily totally ordered, so
372 	// the order we use below is not always correct. To keep it simple we
373 	// consider it the reponsibility of the callers not to define overlapping
374 	// memory ranges with uncomparable types.
375 
376 	memory_type_range* ranges = sTemporaryRanges;
377 	typedef DoublyLinkedList<memory_type_range_point> PointList;
378 	PointList pendingPoints;
379 	memory_type_range* activeRange = NULL;
380 	int32 rangeCount = 0;
381 
382 	for (int32 i = 0; i < pointCount; i++) {
383 		memory_type_range_point* point = &rangePoints[i];
384 		bool terminateRange = false;
385 		if (point->IsStart()) {
386 			// a range start point
387 			pendingPoints.Add(point);
388 			if (activeRange != NULL && activeRange->type > point->range->type)
389 				terminateRange = true;
390 		} else {
391 			// a range end point -- remove the pending start point
392 			for (PointList::Iterator it = pendingPoints.GetIterator();
393 					memory_type_range_point* pendingPoint = it.Next();) {
394 				if (pendingPoint->range == point->range) {
395 					it.Remove();
396 					break;
397 				}
398 			}
399 
400 			if (point->range == activeRange)
401 				terminateRange = true;
402 		}
403 
404 		if (terminateRange) {
405 			ranges[rangeCount].size = point->address - ranges[rangeCount].base;
406 			rangeCount++;
407 			activeRange = NULL;
408 		}
409 
410 		if (activeRange != NULL || pendingPoints.IsEmpty())
411 			continue;
412 
413 		// we need to start a new range -- find the strictest pending range
414 		for (PointList::Iterator it = pendingPoints.GetIterator();
415 				memory_type_range_point* pendingPoint = it.Next();) {
416 			memory_type_range* pendingRange = pendingPoint->range;
417 			if (activeRange == NULL || activeRange->type > pendingRange->type)
418 				activeRange = pendingRange;
419 		}
420 
421 		memory_type_range* previousRange = rangeCount > 0
422 			? &ranges[rangeCount - 1] : NULL;
423 		if (previousRange == NULL || previousRange->type != activeRange->type
424 				|| previousRange->base + previousRange->size
425 					< activeRange->base) {
426 			// we can't join with the previous range -- add a new one
427 			ranges[rangeCount].base = point->address;
428 			ranges[rangeCount].type = activeRange->type;
429 		} else
430 			rangeCount--;
431 	}
432 
433 #if TRACE_MTRR_ARCH_VM >= 2
434 	TRACE_MTRR2("effective memory type ranges:\n");
435 	for (int32 i = 0; i < rangeCount; i++) {
436 		TRACE_MTRR2("%12" B_PRIx64 " - %12" B_PRIx64 ": %" B_PRIu32 "\n",
437 			ranges[i].base, ranges[i].base + ranges[i].size, ranges[i].type);
438 	}
439 #endif
440 
441 	// Extend ranges to be more MTRR-friendly. A range is MTRR friendly, when it
442 	// has a power of two size and a base address aligned to the size. For
443 	// ranges without this property we need more than one MTRR. We improve
444 	// MTRR-friendliness by aligning a range's base and end address to the
445 	// greatest power of two (base rounded down, end up) such that the extended
446 	// range does not intersect with any other differently typed range. We join
447 	// equally typed ranges, if possible. There are two exceptions to the
448 	// intersection requirement: Uncached ranges may intersect with any other
449 	// range; the resulting type will still be uncached. Hence we can ignore
450 	// uncached ranges when extending the other ranges. Write-through ranges may
451 	// intersect with write-back ranges; the resulting type will be
452 	// write-through. Hence we can ignore write-through ranges when extending
453 	// write-back ranges.
454 
455 	MemoryTypeRangeList rangeList;
456 	for (int32 i = 0; i < rangeCount; i++)
457 		rangeList.Add(&ranges[i]);
458 
459 	static const uint32 kMemoryTypes[] = {
460 		B_UNCACHED_MEMORY,
461 		B_WRITE_COMBINING_MEMORY,
462 		B_WRITE_PROTECTED_MEMORY,
463 		B_WRITE_THROUGH_MEMORY,
464 		B_WRITE_BACK_MEMORY
465 	};
466 	static const int32 kMemoryTypeCount = B_COUNT_OF(kMemoryTypes);
467 
468 	for (int32 i = 0; i < kMemoryTypeCount; i++) {
469 		uint32 type = kMemoryTypes[i];
470 
471 		// Remove uncached and write-through ranges after processing them. This
472 		// let's us leverage their intersection property with any other
473 		// respectively write-back ranges.
474 		bool removeRanges = type == B_UNCACHED_MEMORY || type == B_WRITE_THROUGH_MEMORY;
475 
476 		optimize_memory_ranges(rangeList, type, removeRanges);
477 	}
478 
479 #if TRACE_MTRR_ARCH_VM >= 2
480 	TRACE_MTRR2("optimized memory type ranges:\n");
481 	for (int32 i = 0; i < rangeCount; i++) {
482 		if (ranges[i].size > 0) {
483 			TRACE_MTRR2("%12" B_PRIx64 " - %12" B_PRIx64 ": %" B_PRIu32 "\n",
484 				ranges[i].base, ranges[i].base + ranges[i].size,
485 				ranges[i].type);
486 		}
487 	}
488 #endif
489 
490 	// compute the mtrrs from the ranges
491 	sMemoryTypeRegistersUsed = 0;
492 	for (int32 i = 0; i < kMemoryTypeCount; i++) {
493 		uint32 type = kMemoryTypes[i];
494 
495 		// skip write-back ranges -- that'll be the default type anyway
496 		if (type == B_WRITE_BACK_MEMORY)
497 			continue;
498 
499 		for (int32 i = 0; i < rangeCount; i++) {
500 			if (ranges[i].size == 0 || ranges[i].type != type)
501 				continue;
502 
503 			if (!add_mtrrs_for_range(ranges[i].base, ranges[i].size, type))
504 				return B_BUSY;
505 		}
506 	}
507 
508 	set_mtrrs();
509 
510 	return B_OK;
511 }
512 
513 
514 static status_t
update_mtrrs()515 update_mtrrs()
516 {
517 	// Until we know how many MTRRs we have, pretend everything is OK.
518 	if (sMemoryTypeRegisterCount == 0)
519 		return B_OK;
520 
521 	update_mtrr_info updateInfo;
522 	updateInfo.ignoreUncacheableSize = 0;
523 
524 	while (true) {
525 		TRACE_MTRR2("update_mtrrs(): Trying with ignoreUncacheableSize %#"
526 			B_PRIx64 ".\n", updateInfo.ignoreUncacheableSize);
527 
528 		updateInfo.shortestUncacheableSize = ~(uint64)0;
529 		status_t error = update_mtrrs(updateInfo);
530 		if (error != B_BUSY) {
531 			if (error == B_OK && updateInfo.ignoreUncacheableSize > 0) {
532 				TRACE_MTRR("update_mtrrs(): Succeeded setting MTRRs after "
533 					"ignoring uncacheable ranges up to size %#" B_PRIx64 ".\n",
534 					updateInfo.ignoreUncacheableSize);
535 			}
536 			return error;
537 		}
538 
539 		// Not enough MTRRs. Retry with less uncacheable ranges.
540 		if (updateInfo.shortestUncacheableSize == ~(uint64)0) {
541 			// Ugh, even without any uncacheable ranges the available MTRRs do
542 			// not suffice.
543 			panic("update_mtrrs(): Out of MTRRs!");
544 			return B_BUSY;
545 		}
546 
547 		ASSERT(updateInfo.ignoreUncacheableSize
548 			< updateInfo.shortestUncacheableSize);
549 
550 		updateInfo.ignoreUncacheableSize = updateInfo.shortestUncacheableSize;
551 	}
552 }
553 
554 
555 static status_t
add_memory_type_range(area_id areaID,uint64 base,uint64 size,uint32 type,uint32 * effectiveType)556 add_memory_type_range(area_id areaID, uint64 base, uint64 size, uint32 type,
557 	uint32 *effectiveType)
558 {
559 	if (type == 0)
560 		return B_OK;
561 
562 	TRACE_MTRR2("add_memory_type_range(%" B_PRId32 ", %#" B_PRIx64 ", %#"
563 		B_PRIx64 ", %" B_PRIu32 ")\n", areaID, base, size, type);
564 
565 	MutexLocker locker(sMemoryTypeLock);
566 
567 	for (MemoryTypeRangeList::Iterator it = sMemoryTypeRanges.GetIterator();
568 			memory_type_range* range = it.Next(); ) {
569 
570 		if (range->area == areaID || range->type == type
571 				|| base + size <= range->base
572 				|| base >= range->base + range->size) {
573 			continue;
574 		}
575 
576 		if (range->area == -1 && !x86_use_pat()) {
577 			// Physical memory range in MTRRs; permit overlapping.
578 			continue;
579 		}
580 
581 		if (effectiveType != NULL) {
582 			type = *effectiveType = range->type;
583 			effectiveType = NULL;
584 
585 			dprintf("assuming memory type %" B_PRIx32 " for overlapping %#"
586 				B_PRIx64 ", %#" B_PRIx64 " area %" B_PRId32 " from existing %#"
587 				B_PRIx64 ", %#" B_PRIx64 " area %" B_PRId32 "\n", type,
588 				base, size, areaID, range->base, range->size, range->area);
589 			continue;
590 		}
591 
592 		(KDEBUG ? panic : dprintf)("incompatible overlapping memory %#" B_PRIx64
593 			", %#" B_PRIx64 " type %" B_PRIx32 " area %" B_PRId32
594 			" with existing %#" B_PRIx64 ", %#" B_PRIx64 " type %" B_PRIx32
595 			" area %" B_PRId32 "\n", base, size, type, areaID, range->base,
596 			range->size, range->type, range->area);
597 		return B_BUSY;
598 	}
599 
600 	memory_type_range* range = areaID >= 0 ? find_range(areaID) : NULL;
601 	int32 oldRangeType = -1;
602 	if (range != NULL) {
603 		if (range->base != base || range->size != size)
604 			return B_BAD_VALUE;
605 		if (range->type == type)
606 			return B_OK;
607 
608 		oldRangeType = range->type;
609 		range->type = type;
610 	} else {
611 		range = new(std::nothrow) memory_type_range;
612 		if (range == NULL)
613 			return B_NO_MEMORY;
614 
615 		range->area = areaID;
616 		range->base = base;
617 		range->size = size;
618 		range->type = type;
619 		sMemoryTypeRanges.Add(range);
620 		sMemoryTypeRangeCount++;
621 	}
622 
623 	status_t error = update_mtrrs();
624 	if (error != B_OK) {
625 		// revert the addition of the range/change of its type
626 		if (oldRangeType < 0) {
627 			sMemoryTypeRanges.Remove(range);
628 			sMemoryTypeRangeCount--;
629 			delete range;
630 		} else
631 			range->type = oldRangeType;
632 
633 		update_mtrrs();
634 		return error;
635 	}
636 
637 	return B_OK;
638 }
639 
640 
641 static void
remove_memory_type_range(area_id areaID)642 remove_memory_type_range(area_id areaID)
643 {
644 	MutexLocker locker(sMemoryTypeLock);
645 
646 	memory_type_range* range = find_range(areaID);
647 	if (range != NULL) {
648 		TRACE_MTRR2("remove_memory_type_range(%" B_PRId32 ", %#" B_PRIx64 ", %#"
649 			B_PRIx64 ", %" B_PRIu32 ")\n", range->area, range->base,
650 			range->size, range->type);
651 
652 		sMemoryTypeRanges.Remove(range);
653 		sMemoryTypeRangeCount--;
654 		delete range;
655 
656 		update_mtrrs();
657 	} else {
658 		dprintf("remove_memory_type_range(): no range known for area %" B_PRId32
659 			"\n", areaID);
660 	}
661 }
662 
663 
664 static const char *
memory_type_to_string(uint32 type)665 memory_type_to_string(uint32 type)
666 {
667 	switch (type) {
668 		case B_UNCACHED_MEMORY:
669 			return "uncacheable";
670 		case B_WRITE_COMBINING_MEMORY:
671 			return "write combining";
672 		case B_WRITE_THROUGH_MEMORY:
673 			return "write-through";
674 		case B_WRITE_PROTECTED_MEMORY:
675 			return "write-protected";
676 		case B_WRITE_BACK_MEMORY:
677 			return "write-back";
678 		default:
679 			return "unknown";
680 	}
681 }
682 
683 
684 static int
dump_memory_type_ranges(int argc,char ** argv)685 dump_memory_type_ranges(int argc, char **argv)
686 {
687 	kprintf(
688 		"start            end              size             area     type\n");
689 
690 	for (MemoryTypeRangeList::Iterator it = sMemoryTypeRanges.GetIterator();
691 			memory_type_range* range = it.Next();) {
692 
693 		kprintf("%#16" B_PRIx64 " %#16" B_PRIx64 " %#16" B_PRIx64 " % 8"
694 			B_PRId32 " %#" B_PRIx32 " %s\n", range->base,
695 			range->base + range->size, range->size, range->area, range->type,
696 			memory_type_to_string(range->type));
697 	}
698 
699 	return 0;
700 }
701 
702 
703 //	#pragma mark -
704 
705 
706 status_t
arch_vm_init(kernel_args * args)707 arch_vm_init(kernel_args *args)
708 {
709 	TRACE(("arch_vm_init: entry\n"));
710 	return 0;
711 }
712 
713 
714 /*!	Marks DMA region as in-use, and maps it into the kernel space */
715 status_t
arch_vm_init_post_area(kernel_args * args)716 arch_vm_init_post_area(kernel_args *args)
717 {
718 	area_id id;
719 
720 	TRACE(("arch_vm_init_post_area: entry\n"));
721 
722 	// account for DMA area and mark the pages unusable
723 	vm_mark_page_range_inuse(0x0, 0xa0000 / B_PAGE_SIZE);
724 
725 	// map 0 - 0xa0000 directly
726 	id = map_physical_memory("dma_region", 0x0, 0xa0000,
727 		B_ANY_KERNEL_ADDRESS | B_WRITE_BACK_MEMORY,
728 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, &gDmaAddress);
729 	if (id < 0) {
730 		panic("arch_vm_init_post_area: unable to map dma region\n");
731 		return B_NO_MEMORY;
732 	}
733 
734 	add_debugger_command_etc("memory_type_ranges", &dump_memory_type_ranges,
735 		"List all configured memory type ranges",
736 		"\n"
737 		"Lists all memory type ranges with their types and areas.\n", 0);
738 
739 #ifndef __x86_64__
740 	return bios_init();
741 #else
742 	return B_OK;
743 #endif
744 }
745 
746 
747 /*!	Gets rid of all yet unmapped (and therefore now unused) page tables */
748 status_t
arch_vm_init_end(kernel_args * args)749 arch_vm_init_end(kernel_args *args)
750 {
751 	TRACE(("arch_vm_init_endvm: entry\n"));
752 
753 	// throw away anything in the kernel_args.pgtable[] that's not yet mapped
754 	vm_free_unused_boot_loader_range(KERNEL_LOAD_BASE,
755 		args->arch_args.virtual_end - KERNEL_LOAD_BASE);
756 
757 	return B_OK;
758 }
759 
760 
761 status_t
arch_vm_init_post_modules(kernel_args * args)762 arch_vm_init_post_modules(kernel_args *args)
763 {
764 	// the x86 CPU modules are now accessible
765 
766 	sMemoryTypeRegisterCount = x86_count_mtrrs();
767 	if (sMemoryTypeRegisterCount == 0)
768 		return B_OK;
769 
770 	// not very likely, but play safe here
771 	if (sMemoryTypeRegisterCount > kMaxMemoryTypeRegisters)
772 		sMemoryTypeRegisterCount = kMaxMemoryTypeRegisters;
773 
774 	// set the physical memory ranges to write-back mode
775 	for (uint32 i = 0; i < args->num_physical_memory_ranges; i++) {
776 		add_memory_type_range(-1, args->physical_memory_range[i].start,
777 			args->physical_memory_range[i].size, B_WRITE_BACK_MEMORY, NULL);
778 	}
779 
780 	return B_OK;
781 }
782 
783 
784 void
arch_vm_aspace_swap(struct VMAddressSpace * from,struct VMAddressSpace * to)785 arch_vm_aspace_swap(struct VMAddressSpace *from, struct VMAddressSpace *to)
786 {
787 	// This functions is only invoked when a userland thread is in the process
788 	// of dying. It switches to the kernel team and does whatever cleanup is
789 	// necessary (in case it is the team's main thread, it will delete the
790 	// team).
791 	// It is however not necessary to change the page directory. Userland team's
792 	// page directories include all kernel mappings as well. Furthermore our
793 	// arch specific translation map data objects are ref-counted, so they won't
794 	// go away as long as they are still used on any CPU.
795 }
796 
797 
798 bool
arch_vm_supports_protection(uint32 protection)799 arch_vm_supports_protection(uint32 protection)
800 {
801 	// x86 always has the same read/write properties for userland and the
802 	// kernel.
803 	// That's why we do not support user-read/kernel-write access. While the
804 	// other way around is not supported either, we don't care in this case
805 	// and give the kernel full access.
806 	if ((protection & (B_READ_AREA | B_WRITE_AREA)) == B_READ_AREA
807 		&& (protection & B_KERNEL_WRITE_AREA) != 0) {
808 		return false;
809 	}
810 
811 	// Userland and the kernel have the same setting of NX-bit.
812 	// That's why we do not allow any area that user can access, but not execute
813 	// and the kernel can execute.
814 	if ((protection & (B_READ_AREA | B_WRITE_AREA)) != 0
815 		&& (protection & B_EXECUTE_AREA) == 0
816 		&& (protection & B_KERNEL_EXECUTE_AREA) != 0) {
817 		return false;
818 	}
819 
820 	return true;
821 }
822 
823 
824 void
arch_vm_unset_memory_type(struct VMArea * area)825 arch_vm_unset_memory_type(struct VMArea *area)
826 {
827 	if (area->MemoryType() == 0)
828 		return;
829 
830 	remove_memory_type_range(area->id);
831 }
832 
833 
834 status_t
arch_vm_set_memory_type(struct VMArea * area,phys_addr_t physicalBase,uint32 type,uint32 * effectiveType)835 arch_vm_set_memory_type(struct VMArea *area, phys_addr_t physicalBase,
836 	uint32 type, uint32 *effectiveType)
837 {
838 	return add_memory_type_range(area->id, physicalBase, area->Size(), type,
839 		effectiveType);
840 }
841