xref: /haiku/src/system/kernel/arch/x86/paging/64bit/X86PagingMethod64Bit.h (revision 5e96d7d537fbec23bad4ae9b4c8e7b02e769f0c6)
1 /*
2  * Copyright 2012, Alex Smith, alex@alex-smith.me.uk.
3  * Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
4  * Distributed under the terms of the MIT License.
5  */
6 #ifndef KERNEL_ARCH_X86_PAGING_64BIT_X86_PAGING_METHOD_64BIT_H
7 #define KERNEL_ARCH_X86_PAGING_64BIT_X86_PAGING_METHOD_64BIT_H
8 
9 
10 #include <KernelExport.h>
11 
12 #include <lock.h>
13 #include <vm/vm_types.h>
14 
15 #include "paging/64bit/paging.h"
16 #include "paging/X86PagingMethod.h"
17 #include "paging/X86PagingStructures.h"
18 
19 
20 class TranslationMapPhysicalPageMapper;
21 class X86PhysicalPageMapper;
22 struct vm_page_reservation;
23 
24 
25 class X86PagingMethod64Bit : public X86PagingMethod {
26 public:
27 								X86PagingMethod64Bit();
28 	virtual						~X86PagingMethod64Bit();
29 
30 	virtual	status_t			Init(kernel_args* args,
31 									VMPhysicalPageMapper** _physicalPageMapper);
32 	virtual	status_t			InitPostArea(kernel_args* args);
33 
34 	virtual	status_t			CreateTranslationMap(bool kernel,
35 									VMTranslationMap** _map);
36 
37 	virtual	status_t			MapEarly(kernel_args* args,
38 									addr_t virtualAddress,
39 									phys_addr_t physicalAddress,
40 									uint8 attributes,
41 									page_num_t (*get_free_page)(kernel_args*));
42 
43 	virtual	bool				IsKernelPageAccessible(addr_t virtualAddress,
44 									uint32 protection);
45 
46 	inline	X86PhysicalPageMapper* PhysicalPageMapper() const
47 									{ return fPhysicalPageMapper; }
48 	inline	TranslationMapPhysicalPageMapper* KernelPhysicalPageMapper() const
49 									{ return fKernelPhysicalPageMapper; }
50 
51 	inline	uint64*				KernelVirtualPML4() const
52 									{ return fKernelVirtualPML4; }
53 	inline	phys_addr_t			KernelPhysicalPML4() const
54 									{ return fKernelPhysicalPML4; }
55 
56 	static	X86PagingMethod64Bit* Method();
57 
58 	static	uint64*				PageDirectoryForAddress(uint64* virtualPML4,
59 									addr_t virtualAddress, bool isKernel,
60 									bool allocateTables,
61 									vm_page_reservation* reservation,
62 									TranslationMapPhysicalPageMapper*
63 										pageMapper, int32& mapCount);
64 	static	uint64*				PageDirectoryEntryForAddress(
65 									uint64* virtualPML4, addr_t virtualAddress,
66 									bool isKernel, bool allocateTables,
67 									vm_page_reservation* reservation,
68 									TranslationMapPhysicalPageMapper*
69 										pageMapper, int32& mapCount);
70 	static	uint64*				PageTableForAddress(uint64* virtualPML4,
71 									addr_t virtualAddress, bool isKernel,
72 									bool allocateTables,
73 									vm_page_reservation* reservation,
74 									TranslationMapPhysicalPageMapper*
75 										pageMapper, int32& mapCount);
76 	static	uint64*				PageTableEntryForAddress(uint64* virtualPML4,
77 									addr_t virtualAddress, bool isKernel,
78 									bool allocateTables,
79 									vm_page_reservation* reservation,
80 									TranslationMapPhysicalPageMapper*
81 										pageMapper, int32& mapCount);
82 
83 	static	void				PutPageTableEntryInTable(
84 									uint64* entry, phys_addr_t physicalAddress,
85 									uint32 attributes, uint32 memoryType,
86 									bool globalPage);
87 	static	uint64				SetTableEntry(uint64* entry, uint64 newEntry);
88 	static	uint64				SetTableEntryFlags(uint64* entry, uint64 flags);
89 	static	uint64				TestAndSetTableEntry(uint64* entry,
90 									uint64 newEntry, uint64 oldEntry);
91 	static	uint64				ClearTableEntry(uint64* entry);
92 	static	uint64				ClearTableEntryFlags(uint64* entry,
93 									uint64 flags);
94 
95 	static	uint64				MemoryTypeToPageTableEntryFlags(
96 									uint32 memoryType);
97 
98 private:
99 	static	void				_EnableExecutionDisable(void* dummy, int cpu);
100 
101 			phys_addr_t			fKernelPhysicalPML4;
102 			uint64*				fKernelVirtualPML4;
103 
104 			X86PhysicalPageMapper* fPhysicalPageMapper;
105 			TranslationMapPhysicalPageMapper* fKernelPhysicalPageMapper;
106 };
107 
108 
109 /*static*/ inline X86PagingMethod64Bit*
110 X86PagingMethod64Bit::Method()
111 {
112 	return static_cast<X86PagingMethod64Bit*>(gX86PagingMethod);
113 }
114 
115 
116 /*static*/ inline uint64
117 X86PagingMethod64Bit::SetTableEntry(uint64* entry, uint64 newEntry)
118 {
119 	return atomic_get_and_set64((int64*)entry, newEntry);
120 }
121 
122 
123 /*static*/ inline uint64
124 X86PagingMethod64Bit::SetTableEntryFlags(uint64* entry, uint64 flags)
125 {
126 	return atomic_or64((int64*)entry, flags);
127 }
128 
129 
130 /*static*/ inline uint64
131 X86PagingMethod64Bit::TestAndSetTableEntry(uint64* entry, uint64 newEntry,
132 	uint64 oldEntry)
133 {
134 	return atomic_test_and_set64((int64*)entry, newEntry, oldEntry);
135 }
136 
137 
138 /*static*/ inline uint64
139 X86PagingMethod64Bit::ClearTableEntry(uint64* entry)
140 {
141 	return SetTableEntry(entry, 0);
142 }
143 
144 
145 /*static*/ inline uint64
146 X86PagingMethod64Bit::ClearTableEntryFlags(uint64* entry, uint64 flags)
147 {
148 	return atomic_and64((int64*)entry, ~flags);
149 }
150 
151 
152 /*static*/ inline uint64
153 X86PagingMethod64Bit::MemoryTypeToPageTableEntryFlags(uint32 memoryType)
154 {
155 	// ATM we only handle the uncacheable and write-through type explicitly. For
156 	// all other types we rely on the MTRRs to be set up correctly. Since we set
157 	// the default memory type to write-back and since the uncacheable type in
158 	// the PTE overrides any MTRR attribute (though, as per the specs, that is
159 	// not recommended for performance reasons), this reduces the work we
160 	// actually *have* to do with the MTRRs to setting the remaining types
161 	// (usually only write-combining for the frame buffer).
162 	switch (memoryType) {
163 		case B_MTR_UC:
164 			return X86_64_PTE_CACHING_DISABLED | X86_64_PTE_WRITE_THROUGH;
165 
166 		case B_MTR_WC:
167 			// X86_PTE_WRITE_THROUGH would be closer, but the combination with
168 			// MTRR WC is "implementation defined" for Pentium Pro/II.
169 			return 0;
170 
171 		case B_MTR_WT:
172 			return X86_64_PTE_WRITE_THROUGH;
173 
174 		case B_MTR_WP:
175 		case B_MTR_WB:
176 		default:
177 			return 0;
178 	}
179 }
180 
181 
182 #endif	// KERNEL_ARCH_X86_PAGING_64BIT_X86_PAGING_METHOD_64BIT_H
183