xref: /haiku/src/system/kernel/arch/x86/arch_vm_translation_map.cpp (revision 2510baa4685f8f570c607ceedfd73473d69342c4)
1 /*
2  * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 #include <arch/vm_translation_map.h>
12 
13 #include <boot/kernel_args.h>
14 
15 #ifdef __x86_64__
16 #	include "paging/64bit/X86PagingMethod64Bit.h"
17 #else
18 #	include "paging/32bit/X86PagingMethod32Bit.h"
19 #	include "paging/pae/X86PagingMethodPAE.h"
20 #endif
21 
22 
23 //#define TRACE_VM_TMAP
24 #ifdef TRACE_VM_TMAP
25 #	define TRACE(x...) dprintf(x)
26 #else
27 #	define TRACE(x...) ;
28 #endif
29 
30 
31 static union {
32 	uint64	align;
33 #ifdef __x86_64__
34 	char	sixty_four[sizeof(X86PagingMethod64Bit)];
35 #else
36 	char	thirty_two[sizeof(X86PagingMethod32Bit)];
37 #if B_HAIKU_PHYSICAL_BITS == 64
38 	char	pae[sizeof(X86PagingMethodPAE)];
39 #endif
40 #endif
41 } sPagingMethodBuffer;
42 
43 
44 // #pragma mark - VM API
45 
46 
47 status_t
48 arch_vm_translation_map_create_map(bool kernel, VMTranslationMap** _map)
49 {
50 	return gX86PagingMethod->CreateTranslationMap(kernel, _map);
51 }
52 
53 
54 status_t
55 arch_vm_translation_map_init(kernel_args *args,
56 	VMPhysicalPageMapper** _physicalPageMapper)
57 {
58 	TRACE("vm_translation_map_init: entry\n");
59 
60 #ifdef TRACE_VM_TMAP
61 	TRACE("physical memory ranges:\n");
62 	for (uint32 i = 0; i < args->num_physical_memory_ranges; i++) {
63 		phys_addr_t start = args->physical_memory_range[i].start;
64 		phys_addr_t end = start + args->physical_memory_range[i].size;
65 		TRACE("  %#10" B_PRIxPHYSADDR " - %#10" B_PRIxPHYSADDR "\n", start,
66 			end);
67 	}
68 
69 	TRACE("allocated physical ranges:\n");
70 	for (uint32 i = 0; i < args->num_physical_allocated_ranges; i++) {
71 		phys_addr_t start = args->physical_allocated_range[i].start;
72 		phys_addr_t end = start + args->physical_allocated_range[i].size;
73 		TRACE("  %#10" B_PRIxPHYSADDR " - %#10" B_PRIxPHYSADDR "\n", start,
74 			end);
75 	}
76 
77 	TRACE("allocated virtual ranges:\n");
78 	for (uint32 i = 0; i < args->num_virtual_allocated_ranges; i++) {
79 		addr_t start = args->virtual_allocated_range[i].start;
80 		addr_t end = start + args->virtual_allocated_range[i].size;
81 		TRACE("  %#10" B_PRIxADDR " - %#10" B_PRIxADDR "\n", start, end);
82 	}
83 #endif
84 
85 #ifdef __x86_64__
86 	gX86PagingMethod = new(&sPagingMethodBuffer) X86PagingMethod64Bit;
87 #elif B_HAIKU_PHYSICAL_BITS == 64
88 	bool paeAvailable = x86_check_feature(IA32_FEATURE_PAE, FEATURE_COMMON);
89 	bool paeNeeded = false;
90 	for (uint32 i = 0; i < args->num_physical_memory_ranges; i++) {
91 		phys_addr_t end = args->physical_memory_range[i].start
92 			+ args->physical_memory_range[i].size;
93 		if (end > 0x100000000LL) {
94 			paeNeeded = true;
95 			break;
96 		}
97 	}
98 
99 	if (paeAvailable && paeNeeded) {
100 		dprintf("using PAE paging\n");
101 		gX86PagingMethod = new(&sPagingMethodBuffer) X86PagingMethodPAE;
102 	} else {
103 		dprintf("using 32 bit paging (PAE not %s)\n",
104 			paeNeeded ? "available" : "needed");
105 		gX86PagingMethod = new(&sPagingMethodBuffer) X86PagingMethod32Bit;
106 	}
107 #else
108 	gX86PagingMethod = new(&sPagingMethodBuffer) X86PagingMethod32Bit;
109 #endif
110 
111 	return gX86PagingMethod->Init(args, _physicalPageMapper);
112 }
113 
114 
115 status_t
116 arch_vm_translation_map_init_post_sem(kernel_args *args)
117 {
118 	return B_OK;
119 }
120 
121 
122 status_t
123 arch_vm_translation_map_init_post_area(kernel_args *args)
124 {
125 	TRACE("vm_translation_map_init_post_area: entry\n");
126 
127 	return gX86PagingMethod->InitPostArea(args);
128 }
129 
130 
131 status_t
132 arch_vm_translation_map_early_map(kernel_args *args, addr_t va, phys_addr_t pa,
133 	uint8 attributes, phys_addr_t (*get_free_page)(kernel_args *))
134 {
135 	TRACE("early_tmap: entry pa 0x%lx va 0x%lx\n", pa, va);
136 
137 	return gX86PagingMethod->MapEarly(args, va, pa, attributes, get_free_page);
138 }
139 
140 
141 /*!	Verifies that the page at the given virtual address can be accessed in the
142 	current context.
143 
144 	This function is invoked in the kernel debugger. Paranoid checking is in
145 	order.
146 
147 	\param virtualAddress The virtual address to be checked.
148 	\param protection The area protection for which to check. Valid is a bitwise
149 		or of one or more of \c B_KERNEL_READ_AREA or \c B_KERNEL_WRITE_AREA.
150 	\return \c true, if the address can be accessed in all ways specified by
151 		\a protection, \c false otherwise.
152 */
153 bool
154 arch_vm_translation_map_is_kernel_page_accessible(addr_t virtualAddress,
155 	uint32 protection)
156 {
157 	if (!gX86PagingMethod)
158 		return true;
159 
160 	return gX86PagingMethod->IsKernelPageAccessible(virtualAddress, protection);
161 }
162