xref: /haiku/src/system/kernel/arch/x86/arch_vm_translation_map.cpp (revision e5d65858f2361fe0552495b61620c84dcee6bc00)
1 /*
2  * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 #include <arch/vm_translation_map.h>
12 
13 #include <boot/kernel_args.h>
14 
15 #ifdef __x86_64__
16 #	include "paging/64bit/X86PagingMethod64Bit.h"
17 #else
18 #	include "paging/32bit/X86PagingMethod32Bit.h"
19 #	include "paging/pae/X86PagingMethodPAE.h"
20 #endif
21 
22 
23 //#define TRACE_VM_TMAP
24 #ifdef TRACE_VM_TMAP
25 #	define TRACE(x...) dprintf(x)
26 #else
27 #	define TRACE(x...) ;
28 #endif
29 
30 
31 static union {
32 	uint64	align;
33 #ifdef __x86_64__
34 	char	sixty_four[sizeof(X86PagingMethod64Bit)];
35 #else
36 	char	thirty_two[sizeof(X86PagingMethod32Bit)];
37 #if B_HAIKU_PHYSICAL_BITS == 64
38 	char	pae[sizeof(X86PagingMethodPAE)];
39 #endif
40 #endif
41 } sPagingMethodBuffer;
42 
43 
44 // #pragma mark - VM API
45 
46 
47 status_t
48 arch_vm_translation_map_create_map(bool kernel, VMTranslationMap** _map)
49 {
50 	return gX86PagingMethod->CreateTranslationMap(kernel, _map);
51 }
52 
53 
54 status_t
55 arch_vm_translation_map_init(kernel_args *args,
56 	VMPhysicalPageMapper** _physicalPageMapper)
57 {
58 	TRACE("vm_translation_map_init: entry\n");
59 
60 #ifdef TRACE_VM_TMAP
61 	TRACE("physical memory ranges:\n");
62 	for (uint32 i = 0; i < args->num_physical_memory_ranges; i++) {
63 		phys_addr_t start = args->physical_memory_range[i].start;
64 		phys_addr_t end = start + args->physical_memory_range[i].size;
65 		TRACE("  %#10" B_PRIxPHYSADDR " - %#10" B_PRIxPHYSADDR "\n", start,
66 			end);
67 	}
68 
69 	TRACE("allocated physical ranges:\n");
70 	for (uint32 i = 0; i < args->num_physical_allocated_ranges; i++) {
71 		phys_addr_t start = args->physical_allocated_range[i].start;
72 		phys_addr_t end = start + args->physical_allocated_range[i].size;
73 		TRACE("  %#10" B_PRIxPHYSADDR " - %#10" B_PRIxPHYSADDR "\n", start,
74 			end);
75 	}
76 
77 	TRACE("allocated virtual ranges:\n");
78 	for (uint32 i = 0; i < args->num_virtual_allocated_ranges; i++) {
79 		addr_t start = args->virtual_allocated_range[i].start;
80 		addr_t end = start + args->virtual_allocated_range[i].size;
81 		TRACE("  %#10" B_PRIxADDR " - %#10" B_PRIxADDR "\n", start, end);
82 	}
83 #endif
84 
85 #ifdef __x86_64__
86 	gX86PagingMethod = new(&sPagingMethodBuffer) X86PagingMethod64Bit;
87 #elif B_HAIKU_PHYSICAL_BITS == 64
88 	bool paeAvailable = x86_check_feature(IA32_FEATURE_PAE, FEATURE_COMMON);
89 	bool paeNeeded = x86_check_feature(IA32_FEATURE_AMD_EXT_NX,
90 		FEATURE_EXT_AMD);
91 	if (!paeNeeded) {
92 		for (uint32 i = 0; i < args->num_physical_memory_ranges; i++) {
93 			phys_addr_t end = args->physical_memory_range[i].start
94 				+ args->physical_memory_range[i].size;
95 			if (end > 0x100000000LL) {
96 				paeNeeded = true;
97 				break;
98 			}
99 		}
100 	}
101 
102 	if (paeAvailable && paeNeeded) {
103 		dprintf("using PAE paging\n");
104 		gX86PagingMethod = new(&sPagingMethodBuffer) X86PagingMethodPAE;
105 	} else {
106 		dprintf("using 32 bit paging (PAE not %s)\n",
107 			paeNeeded ? "available" : "needed");
108 		gX86PagingMethod = new(&sPagingMethodBuffer) X86PagingMethod32Bit;
109 	}
110 #else
111 	gX86PagingMethod = new(&sPagingMethodBuffer) X86PagingMethod32Bit;
112 #endif
113 
114 	return gX86PagingMethod->Init(args, _physicalPageMapper);
115 }
116 
117 
118 status_t
119 arch_vm_translation_map_init_post_sem(kernel_args *args)
120 {
121 	return B_OK;
122 }
123 
124 
125 status_t
126 arch_vm_translation_map_init_post_area(kernel_args *args)
127 {
128 	TRACE("vm_translation_map_init_post_area: entry\n");
129 
130 	return gX86PagingMethod->InitPostArea(args);
131 }
132 
133 
134 status_t
135 arch_vm_translation_map_early_map(kernel_args *args, addr_t va, phys_addr_t pa,
136 	uint8 attributes, phys_addr_t (*get_free_page)(kernel_args *))
137 {
138 	TRACE("early_tmap: entry pa 0x%lx va 0x%lx\n", pa, va);
139 
140 	return gX86PagingMethod->MapEarly(args, va, pa, attributes, get_free_page);
141 }
142 
143 
144 /*!	Verifies that the page at the given virtual address can be accessed in the
145 	current context.
146 
147 	This function is invoked in the kernel debugger. Paranoid checking is in
148 	order.
149 
150 	\param virtualAddress The virtual address to be checked.
151 	\param protection The area protection for which to check. Valid is a bitwise
152 		or of one or more of \c B_KERNEL_READ_AREA or \c B_KERNEL_WRITE_AREA.
153 	\return \c true, if the address can be accessed in all ways specified by
154 		\a protection, \c false otherwise.
155 */
156 bool
157 arch_vm_translation_map_is_kernel_page_accessible(addr_t virtualAddress,
158 	uint32 protection)
159 {
160 	if (!gX86PagingMethod)
161 		return true;
162 
163 	return gX86PagingMethod->IsKernelPageAccessible(virtualAddress, protection);
164 }
165