xref: /haiku/src/system/kernel/arch/x86/arch_vm_translation_map.cpp (revision f2b4344867e97c3f4e742a1b4a15e6879644601a)
1 /*
2  * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 #include <arch/vm_translation_map.h>
12 
13 #include <boot/kernel_args.h>
14 
15 #include "paging/32bit/X86PagingMethod32Bit.h"
16 #include "paging/pae/X86PagingMethodPAE.h"
17 
18 
19 //#define TRACE_VM_TMAP
20 #ifdef TRACE_VM_TMAP
21 #	define TRACE(x...) dprintf(x)
22 #else
23 #	define TRACE(x...) ;
24 #endif
25 
26 
27 static union {
28 	uint64	align;
29 	char	thirty_two[sizeof(X86PagingMethod32Bit)];
30 #if B_HAIKU_PHYSICAL_BITS == 64
31 	char	pae[sizeof(X86PagingMethodPAE)];
32 #endif
33 } sPagingMethodBuffer;
34 
35 
36 // #pragma mark - VM API
37 
38 
39 status_t
40 arch_vm_translation_map_create_map(bool kernel, VMTranslationMap** _map)
41 {
42 	return gX86PagingMethod->CreateTranslationMap(kernel, _map);
43 }
44 
45 
46 status_t
47 arch_vm_translation_map_init(kernel_args *args,
48 	VMPhysicalPageMapper** _physicalPageMapper)
49 {
50 	TRACE("vm_translation_map_init: entry\n");
51 
52 #ifdef TRACE_VM_TMAP
53 	TRACE("physical memory ranges:\n");
54 	for (uint32 i = 0; i < args->num_physical_memory_ranges; i++) {
55 		phys_addr_t start = args->physical_memory_range[i].start;
56 		phys_addr_t end = start + args->physical_memory_range[i].size;
57 		TRACE("  %#10" B_PRIxPHYSADDR " - %#10" B_PRIxPHYSADDR "\n", start,
58 			end);
59 	}
60 
61 	TRACE("allocated physical ranges:\n");
62 	for (uint32 i = 0; i < args->num_physical_allocated_ranges; i++) {
63 		phys_addr_t start = args->physical_allocated_range[i].start;
64 		phys_addr_t end = start + args->physical_allocated_range[i].size;
65 		TRACE("  %#10" B_PRIxPHYSADDR " - %#10" B_PRIxPHYSADDR "\n", start,
66 			end);
67 	}
68 
69 	TRACE("allocated virtual ranges:\n");
70 	for (uint32 i = 0; i < args->num_virtual_allocated_ranges; i++) {
71 		addr_t start = args->virtual_allocated_range[i].start;
72 		addr_t end = start + args->virtual_allocated_range[i].size;
73 		TRACE("  %#10" B_PRIxADDR " - %#10" B_PRIxADDR "\n", start, end);
74 	}
75 #endif
76 
77 #if B_HAIKU_PHYSICAL_BITS == 64
78 	bool paeAvailable = x86_check_feature(IA32_FEATURE_PAE, FEATURE_COMMON);
79 	bool paeNeeded = false;
80 	for (uint32 i = 0; i < args->num_physical_memory_ranges; i++) {
81 		phys_addr_t end = args->physical_memory_range[i].start
82 			+ args->physical_memory_range[i].size;
83 		if (end > 0x100000000LL) {
84 			paeNeeded = true;
85 			break;
86 		}
87 	}
88 
89 	if (paeAvailable && paeNeeded) {
90 		dprintf("using PAE paging\n");
91 		gX86PagingMethod = new(&sPagingMethodBuffer) X86PagingMethodPAE;
92 	} else {
93 		dprintf("using 32 bit paging (PAE not %s)\n",
94 			paeNeeded ? "available" : "needed");
95 		gX86PagingMethod = new(&sPagingMethodBuffer) X86PagingMethod32Bit;
96 	}
97 #else
98 	gX86PagingMethod = new(&sPagingMethodBuffer) X86PagingMethod32Bit;
99 #endif
100 
101 	return gX86PagingMethod->Init(args, _physicalPageMapper);
102 }
103 
104 
105 status_t
106 arch_vm_translation_map_init_post_sem(kernel_args *args)
107 {
108 	return B_OK;
109 }
110 
111 
112 status_t
113 arch_vm_translation_map_init_post_area(kernel_args *args)
114 {
115 	TRACE("vm_translation_map_init_post_area: entry\n");
116 
117 	return gX86PagingMethod->InitPostArea(args);
118 }
119 
120 
121 status_t
122 arch_vm_translation_map_early_map(kernel_args *args, addr_t va, phys_addr_t pa,
123 	uint8 attributes, phys_addr_t (*get_free_page)(kernel_args *))
124 {
125 	TRACE("early_tmap: entry pa 0x%lx va 0x%lx\n", pa, va);
126 
127 	return gX86PagingMethod->MapEarly(args, va, pa, attributes, get_free_page);
128 }
129 
130 
131 /*!	Verifies that the page at the given virtual address can be accessed in the
132 	current context.
133 
134 	This function is invoked in the kernel debugger. Paranoid checking is in
135 	order.
136 
137 	\param virtualAddress The virtual address to be checked.
138 	\param protection The area protection for which to check. Valid is a bitwise
139 		or of one or more of \c B_KERNEL_READ_AREA or \c B_KERNEL_WRITE_AREA.
140 	\return \c true, if the address can be accessed in all ways specified by
141 		\a protection, \c false otherwise.
142 */
143 bool
144 arch_vm_translation_map_is_kernel_page_accessible(addr_t virtualAddress,
145 	uint32 protection)
146 {
147 	return gX86PagingMethod->IsKernelPageAccessible(virtualAddress, protection);
148 }
149