1 /*
2 * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
4 * Distributed under the terms of the MIT License.
5 *
6 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7 * Distributed under the terms of the NewOS License.
8 */
9
10
11 #include <arch/vm_translation_map.h>
12
13 #include <boot/kernel_args.h>
14 #include <safemode.h>
15
16 #ifdef __x86_64__
17 # include "paging/64bit/X86PagingMethod64Bit.h"
18 #else
19 # include "paging/32bit/X86PagingMethod32Bit.h"
20 # include "paging/pae/X86PagingMethodPAE.h"
21 #endif
22
23
24 //#define TRACE_VM_TMAP
25 #ifdef TRACE_VM_TMAP
26 # define TRACE(x...) dprintf(x)
27 #else
28 # define TRACE(x...) ;
29 #endif
30
31
32 static union {
33 uint64 align;
34 #ifdef __x86_64__
35 char sixty_four[sizeof(X86PagingMethod64Bit)];
36 #else
37 char thirty_two[sizeof(X86PagingMethod32Bit)];
38 #if B_HAIKU_PHYSICAL_BITS == 64
39 char pae[sizeof(X86PagingMethodPAE)];
40 #endif
41 #endif
42 } sPagingMethodBuffer;
43
44
45 // #pragma mark - VM API
46
47
48 status_t
arch_vm_translation_map_create_map(bool kernel,VMTranslationMap ** _map)49 arch_vm_translation_map_create_map(bool kernel, VMTranslationMap** _map)
50 {
51 return gX86PagingMethod->CreateTranslationMap(kernel, _map);
52 }
53
54
55 status_t
arch_vm_translation_map_init(kernel_args * args,VMPhysicalPageMapper ** _physicalPageMapper)56 arch_vm_translation_map_init(kernel_args *args,
57 VMPhysicalPageMapper** _physicalPageMapper)
58 {
59 TRACE("vm_translation_map_init: entry\n");
60
61 #ifdef TRACE_VM_TMAP
62 TRACE("physical memory ranges:\n");
63 for (uint32 i = 0; i < args->num_physical_memory_ranges; i++) {
64 phys_addr_t start = args->physical_memory_range[i].start;
65 phys_addr_t end = start + args->physical_memory_range[i].size;
66 TRACE(" %#10" B_PRIxPHYSADDR " - %#10" B_PRIxPHYSADDR "\n", start,
67 end);
68 }
69
70 TRACE("allocated physical ranges:\n");
71 for (uint32 i = 0; i < args->num_physical_allocated_ranges; i++) {
72 phys_addr_t start = args->physical_allocated_range[i].start;
73 phys_addr_t end = start + args->physical_allocated_range[i].size;
74 TRACE(" %#10" B_PRIxPHYSADDR " - %#10" B_PRIxPHYSADDR "\n", start,
75 end);
76 }
77
78 TRACE("allocated virtual ranges:\n");
79 for (uint32 i = 0; i < args->num_virtual_allocated_ranges; i++) {
80 addr_t start = args->virtual_allocated_range[i].start;
81 addr_t end = start + args->virtual_allocated_range[i].size;
82 TRACE(" %#10" B_PRIxADDR " - %#10" B_PRIxADDR "\n", start, end);
83 }
84 #endif
85
86 #ifdef __x86_64__
87 bool la57Available = x86_check_feature(IA32_FEATURE_LA57, FEATURE_7_ECX);
88 bool enabled = la57Available && (x86_read_cr4() & IA32_CR4_LA57) != 0;
89 if (enabled)
90 dprintf("using LA57 paging\n");
91 gX86PagingMethod = new(&sPagingMethodBuffer) X86PagingMethod64Bit(enabled);
92 #elif B_HAIKU_PHYSICAL_BITS == 64
93 bool paeAvailable = x86_check_feature(IA32_FEATURE_PAE, FEATURE_COMMON);
94 bool paeNeeded = x86_check_feature(IA32_FEATURE_AMD_EXT_NX,
95 FEATURE_EXT_AMD);
96 if (!paeNeeded) {
97 for (uint32 i = 0; i < args->num_physical_memory_ranges; i++) {
98 phys_addr_t end = args->physical_memory_range[i].start
99 + args->physical_memory_range[i].size;
100 if (end > 0x100000000LL) {
101 paeNeeded = true;
102 break;
103 }
104 }
105 }
106
107 bool paeDisabled = get_safemode_boolean_early(args,
108 B_SAFEMODE_4_GB_MEMORY_LIMIT, false);
109
110 if (paeAvailable && paeNeeded && !paeDisabled) {
111 dprintf("using PAE paging\n");
112 gX86PagingMethod = new(&sPagingMethodBuffer) X86PagingMethodPAE;
113 } else {
114 dprintf("using 32 bit paging (PAE %s)\n",
115 paeNeeded
116 ? "not available"
117 : (paeDisabled ? "disabled" : "not needed"));
118 gX86PagingMethod = new(&sPagingMethodBuffer) X86PagingMethod32Bit;
119 }
120 #else
121 gX86PagingMethod = new(&sPagingMethodBuffer) X86PagingMethod32Bit;
122 #endif
123
124 return gX86PagingMethod->Init(args, _physicalPageMapper);
125 }
126
127
128 status_t
arch_vm_translation_map_init_post_sem(kernel_args * args)129 arch_vm_translation_map_init_post_sem(kernel_args *args)
130 {
131 return B_OK;
132 }
133
134
135 status_t
arch_vm_translation_map_init_post_area(kernel_args * args)136 arch_vm_translation_map_init_post_area(kernel_args *args)
137 {
138 TRACE("vm_translation_map_init_post_area: entry\n");
139
140 return gX86PagingMethod->InitPostArea(args);
141 }
142
143
144 status_t
arch_vm_translation_map_early_map(kernel_args * args,addr_t va,phys_addr_t pa,uint8 attributes,phys_addr_t (* get_free_page)(kernel_args *))145 arch_vm_translation_map_early_map(kernel_args *args, addr_t va, phys_addr_t pa,
146 uint8 attributes, phys_addr_t (*get_free_page)(kernel_args *))
147 {
148 TRACE("early_tmap: entry pa %#" B_PRIxPHYSADDR " va %#" B_PRIxADDR "\n", pa,
149 va);
150
151 return gX86PagingMethod->MapEarly(args, va, pa, attributes, get_free_page);
152 }
153
154
155 /*! Verifies that the page at the given virtual address can be accessed in the
156 current context.
157
158 This function is invoked in the kernel debugger. Paranoid checking is in
159 order.
160
161 \param virtualAddress The virtual address to be checked.
162 \param protection The area protection for which to check. Valid is a bitwise
163 or of one or more of \c B_KERNEL_READ_AREA or \c B_KERNEL_WRITE_AREA.
164 \return \c true, if the address can be accessed in all ways specified by
165 \a protection, \c false otherwise.
166 */
167 bool
arch_vm_translation_map_is_kernel_page_accessible(addr_t virtualAddress,uint32 protection)168 arch_vm_translation_map_is_kernel_page_accessible(addr_t virtualAddress,
169 uint32 protection)
170 {
171 if (!gX86PagingMethod)
172 return true;
173
174 return gX86PagingMethod->IsKernelPageAccessible(virtualAddress, protection);
175 }
176