xref: /haiku/src/system/kernel/arch/x86/paging/pae/X86PagingMethodPAE.h (revision ba0223da5d79c5cd27496ee0e5712921cebb7642)
1 /*
2  * Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Distributed under the terms of the MIT License.
4  */
5 #ifndef KERNEL_ARCH_X86_PAGING_PAE_X86_PAGING_METHOD_PAE_H
6 #define KERNEL_ARCH_X86_PAGING_PAE_X86_PAGING_METHOD_PAE_H
7 
8 
9 #include <KernelExport.h>
10 
11 #include <lock.h>
12 #include <vm/vm_types.h>
13 
14 #include "paging/pae/paging.h"
15 #include "paging/X86PagingMethod.h"
16 #include "paging/X86PagingStructures.h"
17 
18 
19 #if B_HAIKU_PHYSICAL_BITS == 64
20 
21 
22 class TranslationMapPhysicalPageMapper;
23 class X86PhysicalPageMapper;
24 
25 
26 class X86PagingMethodPAE final : public X86PagingMethod {
27 public:
28 								X86PagingMethodPAE();
29 	virtual						~X86PagingMethodPAE();
30 
31 	virtual	status_t			Init(kernel_args* args,
32 									VMPhysicalPageMapper** _physicalPageMapper);
33 	virtual	status_t			InitPostArea(kernel_args* args);
34 
35 	virtual	status_t			CreateTranslationMap(bool kernel,
36 									VMTranslationMap** _map);
37 
38 	virtual	status_t			MapEarly(kernel_args* args,
39 									addr_t virtualAddress,
40 									phys_addr_t physicalAddress,
41 									uint8 attributes,
42 									page_num_t (*get_free_page)(kernel_args*));
43 
44 	virtual	bool				IsKernelPageAccessible(addr_t virtualAddress,
45 									uint32 protection);
46 
47 			void*				Allocate32BitPage(
48 									phys_addr_t& _physicalAddress,
49 									void*& _handle);
50 			void				Free32BitPage(void* address,
51 									phys_addr_t physicalAddress, void* handle);
52 
53 	inline	X86PhysicalPageMapper* PhysicalPageMapper() const
54 									{ return fPhysicalPageMapper; }
55 	inline	TranslationMapPhysicalPageMapper* KernelPhysicalPageMapper() const
56 									{ return fKernelPhysicalPageMapper; }
57 
58 	inline	pae_page_directory_pointer_table_entry*
59 									KernelVirtualPageDirPointerTable() const;
60 	inline	phys_addr_t			KernelPhysicalPageDirPointerTable() const;
61 	inline	pae_page_directory_entry* const* KernelVirtualPageDirs() const
62 									{ return fKernelVirtualPageDirs; }
63 	inline	const phys_addr_t*	KernelPhysicalPageDirs() const
64 									{ return fKernelPhysicalPageDirs; }
65 
66 	static	X86PagingMethodPAE*	Method();
67 
68 	static	void				PutPageTableInPageDir(
69 									pae_page_directory_entry* entry,
70 									phys_addr_t physicalTable,
71 									uint32 attributes);
72 	static	void				PutPageTableEntryInTable(
73 									pae_page_table_entry* entry,
74 									phys_addr_t physicalAddress,
75 									uint32 attributes, uint32 memoryType,
76 									bool globalPage);
77 	static	uint64_t			SetTableEntry(uint64_t* entry,
78 									uint64_t newEntry);
79 	static	uint64_t			SetTableEntryFlags(uint64_t* entry,
80 									uint64_t flags);
81 	static	uint64_t			TestAndSetTableEntry(uint64_t* entry,
82 									uint64_t newEntry, uint64_t oldEntry);
83 	static	uint64_t			ClearTableEntry(uint64_t* entry);
84 	static	uint64_t			ClearTableEntryFlags(uint64_t* entry,
85 									uint64_t flags);
86 
87 	static	pae_page_directory_entry* PageDirEntryForAddress(
88 									pae_page_directory_entry* const* pdpt,
89 									addr_t address);
90 
91 	static	uint64				MemoryTypeToPageTableEntryFlags(
92 									uint32 memoryType);
93 
94 private:
95 			struct ToPAESwitcher;
96 			struct PhysicalPageSlotPool;
97 			friend struct PhysicalPageSlotPool;
98 
99 private:
100 	inline	int32				_GetInitialPoolCount();
101 
102 			bool				_EarlyQuery(addr_t virtualAddress,
103 									phys_addr_t* _physicalAddress);
104 			pae_page_table_entry* _EarlyGetPageTable(phys_addr_t address);
105 
106 private:
107 			X86PhysicalPageMapper* fPhysicalPageMapper;
108 			TranslationMapPhysicalPageMapper* fKernelPhysicalPageMapper;
109 
110 			void*				fEarlyPageStructures;
111 			size_t				fEarlyPageStructuresSize;
112 			pae_page_directory_pointer_table_entry*
113 									fKernelVirtualPageDirPointerTable;
114 			phys_addr_t			fKernelPhysicalPageDirPointerTable;
115 			pae_page_directory_entry* fKernelVirtualPageDirs[4];
116 			phys_addr_t			fKernelPhysicalPageDirs[4];
117 			addr_t				fFreeVirtualSlot;
118 			pae_page_table_entry* fFreeVirtualSlotPTE;
119 
120 			mutex				fFreePagesLock;
121 			vm_page*			fFreePages;
122 			page_num_t			fFreePagesCount;
123 };
124 
125 
126 pae_page_directory_pointer_table_entry*
127 X86PagingMethodPAE::KernelVirtualPageDirPointerTable() const
128 {
129 	return fKernelVirtualPageDirPointerTable;
130 }
131 
132 
133 phys_addr_t
134 X86PagingMethodPAE::KernelPhysicalPageDirPointerTable() const
135 {
136 	return fKernelPhysicalPageDirPointerTable;
137 }
138 
139 
140 /*static*/ inline X86PagingMethodPAE*
141 X86PagingMethodPAE::Method()
142 {
143 	return static_cast<X86PagingMethodPAE*>(gX86PagingMethod);
144 }
145 
146 
147 /*static*/ inline pae_page_directory_entry*
148 X86PagingMethodPAE::PageDirEntryForAddress(
149 	pae_page_directory_entry* const* pdpt, addr_t address)
150 {
151 	return pdpt[address >> 30]
152 		+ (address / kPAEPageTableRange) % kPAEPageDirEntryCount;
153 }
154 
155 
156 /*static*/ inline uint64_t
157 X86PagingMethodPAE::SetTableEntry(uint64_t* entry, uint64_t newEntry)
158 {
159 	return atomic_get_and_set64((int64*)entry, newEntry);
160 }
161 
162 
163 /*static*/ inline uint64_t
164 X86PagingMethodPAE::SetTableEntryFlags(uint64_t* entry, uint64_t flags)
165 {
166 	return atomic_or64((int64*)entry, flags);
167 }
168 
169 
170 /*static*/ inline uint64_t
171 X86PagingMethodPAE::TestAndSetTableEntry(uint64_t* entry,
172 	uint64_t newEntry, uint64_t oldEntry)
173 {
174 	return atomic_test_and_set64((int64*)entry, newEntry, oldEntry);
175 }
176 
177 
178 /*static*/ inline uint64_t
179 X86PagingMethodPAE::ClearTableEntry(uint64_t* entry)
180 {
181 	return SetTableEntry(entry, 0);
182 }
183 
184 
185 /*static*/ inline uint64_t
186 X86PagingMethodPAE::ClearTableEntryFlags(uint64_t* entry, uint64_t flags)
187 {
188 	return atomic_and64((int64*)entry, ~flags);
189 }
190 
191 
192 /*static*/ inline uint64
193 X86PagingMethodPAE::MemoryTypeToPageTableEntryFlags(uint32 memoryType)
194 {
195 	// ATM we only handle the uncacheable and write-through type explicitly. For
196 	// all other types we rely on the MTRRs to be set up correctly. Since we set
197 	// the default memory type to write-back and since the uncacheable type in
198 	// the PTE overrides any MTRR attribute (though, as per the specs, that is
199 	// not recommended for performance reasons), this reduces the work we
200 	// actually *have* to do with the MTRRs to setting the remaining types
201 	// (usually only write-combining for the frame buffer).
202 	switch (memoryType) {
203 		case B_MTR_UC:
204 			return X86_PAE_PTE_CACHING_DISABLED | X86_PAE_PTE_WRITE_THROUGH;
205 
206 		case B_MTR_WC:
207 			// X86_PTE_WRITE_THROUGH would be closer, but the combination with
208 			// MTRR WC is "implementation defined" for Pentium Pro/II.
209 			return 0;
210 
211 		case B_MTR_WT:
212 			return X86_PAE_PTE_WRITE_THROUGH;
213 
214 		case B_MTR_WP:
215 		case B_MTR_WB:
216 		default:
217 			return 0;
218 	}
219 }
220 
221 
222 #endif	// B_HAIKU_PHYSICAL_BITS == 64
223 
224 
225 #endif	// KERNEL_ARCH_X86_PAGING_PAE_X86_PAGING_METHOD_PAE_H
226