xref: /haiku/src/system/kernel/arch/x86/paging/64bit/X86PagingMethod64Bit.h (revision fc7456e9b1ec38c941134ed6d01c438cf289381e)
1 /*
2  * Copyright 2014, Paweł Dziepak, pdziepak@quarnos.org.
3  * Copyright 2012, Alex Smith, alex@alex-smith.me.uk.
4  * Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
5  * Distributed under the terms of the MIT License.
6  */
7 #ifndef KERNEL_ARCH_X86_PAGING_64BIT_X86_PAGING_METHOD_64BIT_H
8 #define KERNEL_ARCH_X86_PAGING_64BIT_X86_PAGING_METHOD_64BIT_H
9 
10 
11 #include <atomic>
12 
13 #include <KernelExport.h>
14 
15 #include <lock.h>
16 #include <vm/vm_types.h>
17 
18 #include "paging/64bit/paging.h"
19 #include "paging/X86PagingMethod.h"
20 #include "paging/X86PagingStructures.h"
21 
22 
23 class TranslationMapPhysicalPageMapper;
24 class X86PhysicalPageMapper;
25 struct vm_page_reservation;
26 
27 
28 class X86PagingMethod64Bit final : public X86PagingMethod {
29 public:
30 								X86PagingMethod64Bit(bool la57);
31 	virtual						~X86PagingMethod64Bit();
32 
33 	virtual	status_t			Init(kernel_args* args,
34 									VMPhysicalPageMapper** _physicalPageMapper);
35 	virtual	status_t			InitPostArea(kernel_args* args);
36 
37 	virtual	status_t			CreateTranslationMap(bool kernel,
38 									VMTranslationMap** _map);
39 
40 	virtual	status_t			MapEarly(kernel_args* args,
41 									addr_t virtualAddress,
42 									phys_addr_t physicalAddress,
43 									uint8 attributes,
44 									page_num_t (*get_free_page)(kernel_args*));
45 
46 	virtual	bool				IsKernelPageAccessible(addr_t virtualAddress,
47 									uint32 protection);
48 
49 	inline	X86PhysicalPageMapper* PhysicalPageMapper() const
50 									{ return fPhysicalPageMapper; }
51 	inline	TranslationMapPhysicalPageMapper* KernelPhysicalPageMapper() const
52 									{ return fKernelPhysicalPageMapper; }
53 
54 	inline	uint64*				KernelVirtualPMLTop() const
55 									{ return fKernelVirtualPMLTop; }
56 	inline	phys_addr_t			KernelPhysicalPMLTop() const
57 									{ return fKernelPhysicalPMLTop; }
58 
59 	static	X86PagingMethod64Bit* Method();
60 
61 	static	uint64*				PageDirectoryForAddress(uint64* virtualPML4,
62 									addr_t virtualAddress, bool isKernel,
63 									bool allocateTables,
64 									vm_page_reservation* reservation,
65 									TranslationMapPhysicalPageMapper*
66 										pageMapper, int32& mapCount);
67 	static	uint64*				PageDirectoryEntryForAddress(
68 									uint64* virtualPML4, addr_t virtualAddress,
69 									bool isKernel, bool allocateTables,
70 									vm_page_reservation* reservation,
71 									TranslationMapPhysicalPageMapper*
72 										pageMapper, int32& mapCount);
73 	static	uint64*				PageTableForAddress(uint64* virtualPML4,
74 									addr_t virtualAddress, bool isKernel,
75 									bool allocateTables,
76 									vm_page_reservation* reservation,
77 									TranslationMapPhysicalPageMapper*
78 										pageMapper, int32& mapCount);
79 	static	uint64*				PageTableEntryForAddress(uint64* virtualPML4,
80 									addr_t virtualAddress, bool isKernel,
81 									bool allocateTables,
82 									vm_page_reservation* reservation,
83 									TranslationMapPhysicalPageMapper*
84 										pageMapper, int32& mapCount);
85 
86 	static	void				PutPageTableEntryInTable(
87 									uint64* entry, phys_addr_t physicalAddress,
88 									uint32 attributes, uint32 memoryType,
89 									bool globalPage);
90 	static	void				SetTableEntry(uint64_t* entry,
91 									uint64_t newEntry);
92 	static	uint64_t			SetTableEntryFlags(uint64_t* entryPointer,
93 									uint64_t flags);
94 	static	uint64				TestAndSetTableEntry(uint64* entry,
95 									uint64 newEntry, uint64 oldEntry);
96 	static	uint64_t			ClearTableEntry(uint64_t* entryPointer);
97 	static	uint64_t			ClearTableEntryFlags(uint64_t* entryPointer,
98 									uint64_t flags);
99 
100 	static	uint64				MemoryTypeToPageTableEntryFlags(
101 									uint32 memoryType);
102 
103 private:
104 	static	void				_EnableExecutionDisable(void* dummy, int cpu);
105 
106 			phys_addr_t			fKernelPhysicalPMLTop;
107 			uint64*				fKernelVirtualPMLTop;
108 
109 			X86PhysicalPageMapper* fPhysicalPageMapper;
110 			TranslationMapPhysicalPageMapper* fKernelPhysicalPageMapper;
111 
112 	static	bool				la57;
113 };
114 
115 
116 static_assert(sizeof(std::atomic<uint64_t>) == sizeof(uint64_t),
117 	"Non-trivial representation of atomic uint64_t.");
118 
119 
120 /*static*/ inline X86PagingMethod64Bit*
121 X86PagingMethod64Bit::Method()
122 {
123 	return static_cast<X86PagingMethod64Bit*>(gX86PagingMethod);
124 }
125 
126 
127 /*static*/ inline void
128 X86PagingMethod64Bit::SetTableEntry(uint64_t* entryPointer, uint64_t newEntry)
129 {
130 	auto& entry = *reinterpret_cast<std::atomic<uint64_t>*>(entryPointer);
131 	entry.store(newEntry, std::memory_order_relaxed);
132 }
133 
134 
135 /*static*/ inline uint64_t
136 X86PagingMethod64Bit::SetTableEntryFlags(uint64_t* entryPointer, uint64_t flags)
137 {
138 	auto& entry = *reinterpret_cast<std::atomic<uint64_t>*>(entryPointer);
139 	return entry.fetch_or(flags);
140 }
141 
142 
143 /*static*/ inline uint64
144 X86PagingMethod64Bit::TestAndSetTableEntry(uint64* entry, uint64 newEntry, uint64 oldEntry)
145 {
146 	return atomic_test_and_set64((int64*)entry, newEntry, oldEntry);
147 }
148 
149 
150 /*static*/ inline uint64_t
151 X86PagingMethod64Bit::ClearTableEntry(uint64_t* entryPointer)
152 {
153 	auto& entry = *reinterpret_cast<std::atomic<uint64_t>*>(entryPointer);
154 	return entry.exchange(0);
155 }
156 
157 
158 /*static*/ inline uint64_t
159 X86PagingMethod64Bit::ClearTableEntryFlags(uint64_t* entryPointer,
160 	uint64_t flags)
161 {
162 	auto& entry = *reinterpret_cast<std::atomic<uint64_t>*>(entryPointer);
163 	return entry.fetch_and(~flags);
164 }
165 
166 
167 /*static*/ inline uint64
168 X86PagingMethod64Bit::MemoryTypeToPageTableEntryFlags(uint32 memoryType)
169 {
170 	switch (memoryType) {
171 		case B_UNCACHED_MEMORY:
172 			return X86_64_PTE_CACHING_DISABLED | X86_64_PTE_WRITE_THROUGH;
173 
174 		case B_WRITE_COMBINING_MEMORY:
175 			return x86_use_pat() ? X86_64_PTE_PAT : 0;
176 
177 		case B_WRITE_THROUGH_MEMORY:
178 			return X86_64_PTE_WRITE_THROUGH;
179 
180 		case B_WRITE_PROTECTED_MEMORY:
181 		case B_WRITE_BACK_MEMORY:
182 		default:
183 			return 0;
184 	}
185 }
186 
187 
188 #endif	// KERNEL_ARCH_X86_PAGING_64BIT_X86_PAGING_METHOD_64BIT_H
189