xref: /haiku/src/system/kernel/arch/x86/paging/64bit/X86PagingMethod64Bit.h (revision 820dca4df6c7bf955c46e8f6521b9408f50b2900)
1 /*
2  * Copyright 2012, Alex Smith, alex@alex-smith.me.uk.
3  * Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
4  * Distributed under the terms of the MIT License.
5  */
6 #ifndef KERNEL_ARCH_X86_PAGING_64BIT_X86_PAGING_METHOD_64BIT_H
7 #define KERNEL_ARCH_X86_PAGING_64BIT_X86_PAGING_METHOD_64BIT_H
8 
9 
10 #include <KernelExport.h>
11 
12 #include <lock.h>
13 #include <vm/vm_types.h>
14 
15 #include "paging/64bit/paging.h"
16 #include "paging/X86PagingMethod.h"
17 #include "paging/X86PagingStructures.h"
18 
19 
20 class TranslationMapPhysicalPageMapper;
21 class X86PhysicalPageMapper;
22 struct vm_page_reservation;
23 
24 
25 class X86PagingMethod64Bit : public X86PagingMethod {
26 public:
27 								X86PagingMethod64Bit();
28 	virtual						~X86PagingMethod64Bit();
29 
30 	virtual	status_t			Init(kernel_args* args,
31 									VMPhysicalPageMapper** _physicalPageMapper);
32 	virtual	status_t			InitPostArea(kernel_args* args);
33 
34 	virtual	status_t			CreateTranslationMap(bool kernel,
35 									VMTranslationMap** _map);
36 
37 	virtual	status_t			MapEarly(kernel_args* args,
38 									addr_t virtualAddress,
39 									phys_addr_t physicalAddress,
40 									uint8 attributes,
41 									page_num_t (*get_free_page)(kernel_args*));
42 
43 	virtual	bool				IsKernelPageAccessible(addr_t virtualAddress,
44 									uint32 protection);
45 
46 	inline	X86PhysicalPageMapper* PhysicalPageMapper() const
47 									{ return fPhysicalPageMapper; }
48 	inline	TranslationMapPhysicalPageMapper* KernelPhysicalPageMapper() const
49 									{ return fKernelPhysicalPageMapper; }
50 
51 	inline	uint64*				KernelVirtualPML4() const
52 									{ return fKernelVirtualPML4; }
53 	inline	phys_addr_t			KernelPhysicalPML4() const
54 									{ return fKernelPhysicalPML4; }
55 
56 	static	X86PagingMethod64Bit* Method();
57 
58 	static	uint64*				PageTableForAddress(uint64* virtualPML4,
59 									addr_t virtualAddress, bool isKernel,
60 									bool allocateTables,
61 									vm_page_reservation* reservation,
62 									TranslationMapPhysicalPageMapper*
63 										pageMapper, int32& mapCount);
64 	static	uint64*				PageTableEntryForAddress(uint64* virtualPML4,
65 									addr_t virtualAddress, bool isKernel,
66 									bool allocateTables,
67 									vm_page_reservation* reservation,
68 									TranslationMapPhysicalPageMapper*
69 										pageMapper, int32& mapCount);
70 
71 	static	void				PutPageTableEntryInTable(
72 									uint64* entry, phys_addr_t physicalAddress,
73 									uint32 attributes, uint32 memoryType,
74 									bool globalPage);
75 	static	uint64				SetTableEntry(uint64* entry, uint64 newEntry);
76 	static	uint64				SetTableEntryFlags(uint64* entry, uint64 flags);
77 	static	uint64				TestAndSetTableEntry(uint64* entry,
78 									uint64 newEntry, uint64 oldEntry);
79 	static	uint64				ClearTableEntry(uint64* entry);
80 	static	uint64				ClearTableEntryFlags(uint64* entry,
81 									uint64 flags);
82 
83 	static	uint64				MemoryTypeToPageTableEntryFlags(
84 									uint32 memoryType);
85 
86 private:
87 			phys_addr_t			fKernelPhysicalPML4;
88 			uint64*				fKernelVirtualPML4;
89 
90 			X86PhysicalPageMapper* fPhysicalPageMapper;
91 			TranslationMapPhysicalPageMapper* fKernelPhysicalPageMapper;
92 };
93 
94 
95 /*static*/ inline X86PagingMethod64Bit*
96 X86PagingMethod64Bit::Method()
97 {
98 	return static_cast<X86PagingMethod64Bit*>(gX86PagingMethod);
99 }
100 
101 
102 /*static*/ inline uint64
103 X86PagingMethod64Bit::SetTableEntry(uint64* entry, uint64 newEntry)
104 {
105 	return atomic_set64((int64*)entry, newEntry);
106 }
107 
108 
109 /*static*/ inline uint64
110 X86PagingMethod64Bit::SetTableEntryFlags(uint64* entry, uint64 flags)
111 {
112 	return atomic_or64((int64*)entry, flags);
113 }
114 
115 
116 /*static*/ inline uint64
117 X86PagingMethod64Bit::TestAndSetTableEntry(uint64* entry, uint64 newEntry,
118 	uint64 oldEntry)
119 {
120 	return atomic_test_and_set64((int64*)entry, newEntry, oldEntry);
121 }
122 
123 
124 /*static*/ inline uint64
125 X86PagingMethod64Bit::ClearTableEntry(uint64* entry)
126 {
127 	return SetTableEntry(entry, 0);
128 }
129 
130 
131 /*static*/ inline uint64
132 X86PagingMethod64Bit::ClearTableEntryFlags(uint64* entry, uint64 flags)
133 {
134 	return atomic_and64((int64*)entry, ~flags);
135 }
136 
137 
138 /*static*/ inline uint64
139 X86PagingMethod64Bit::MemoryTypeToPageTableEntryFlags(uint32 memoryType)
140 {
141 	// ATM we only handle the uncacheable and write-through type explicitly. For
142 	// all other types we rely on the MTRRs to be set up correctly. Since we set
143 	// the default memory type to write-back and since the uncacheable type in
144 	// the PTE overrides any MTRR attribute (though, as per the specs, that is
145 	// not recommended for performance reasons), this reduces the work we
146 	// actually *have* to do with the MTRRs to setting the remaining types
147 	// (usually only write-combining for the frame buffer).
148 	switch (memoryType) {
149 		case B_MTR_UC:
150 			return X86_64_PTE_CACHING_DISABLED | X86_64_PTE_WRITE_THROUGH;
151 
152 		case B_MTR_WC:
153 			// X86_PTE_WRITE_THROUGH would be closer, but the combination with
154 			// MTRR WC is "implementation defined" for Pentium Pro/II.
155 			return 0;
156 
157 		case B_MTR_WT:
158 			return X86_64_PTE_WRITE_THROUGH;
159 
160 		case B_MTR_WP:
161 		case B_MTR_WB:
162 		default:
163 			return 0;
164 	}
165 }
166 
167 
168 #endif	// KERNEL_ARCH_X86_PAGING_64BIT_X86_PAGING_METHOD_64BIT_H
169