xref: /haiku/src/system/kernel/arch/x86/paging/pae/X86PagingMethodPAE.h (revision 7749d0bb0c358a3279b1b9cc76d8376e900130a5)
1 /*
2  * Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Distributed under the terms of the MIT License.
4  */
5 #ifndef KERNEL_ARCH_X86_PAGING_PAE_X86_PAGING_METHOD_PAE_H
6 #define KERNEL_ARCH_X86_PAGING_PAE_X86_PAGING_METHOD_PAE_H
7 
8 
9 #include <KernelExport.h>
10 
11 #include <lock.h>
12 #include <vm/vm_types.h>
13 
14 #include "paging/pae/paging.h"
15 #include "paging/X86PagingMethod.h"
16 #include "paging/X86PagingStructures.h"
17 
18 
19 #if B_HAIKU_PHYSICAL_BITS == 64
20 
21 
22 class TranslationMapPhysicalPageMapper;
23 class X86PhysicalPageMapper;
24 
25 
26 class X86PagingMethodPAE : public X86PagingMethod {
27 public:
28 								X86PagingMethodPAE();
29 	virtual						~X86PagingMethodPAE();
30 
31 	virtual	status_t			Init(kernel_args* args,
32 									VMPhysicalPageMapper** _physicalPageMapper);
33 	virtual	status_t			InitPostArea(kernel_args* args);
34 
35 	virtual	status_t			CreateTranslationMap(bool kernel,
36 									VMTranslationMap** _map);
37 
38 	virtual	status_t			MapEarly(kernel_args* args,
39 									addr_t virtualAddress,
40 									phys_addr_t physicalAddress,
41 									uint8 attributes,
42 									phys_addr_t (*get_free_page)(kernel_args*));
43 
44 	virtual	bool				IsKernelPageAccessible(addr_t virtualAddress,
45 									uint32 protection);
46 
47 			void*				Allocate32BitPage(
48 									phys_addr_t& _physicalAddress,
49 									void*& _handle);
50 			void				Free32BitPage(void* address,
51 									phys_addr_t physicalAddress, void* handle);
52 
53 	inline	X86PhysicalPageMapper* PhysicalPageMapper() const
54 									{ return fPhysicalPageMapper; }
55 	inline	TranslationMapPhysicalPageMapper* KernelPhysicalPageMapper() const
56 									{ return fKernelPhysicalPageMapper; }
57 
58 	inline	pae_page_directory_pointer_table_entry*
59 									KernelVirtualPageDirPointerTable() const;
60 	inline	phys_addr_t			KernelPhysicalPageDirPointerTable() const;
61 	inline	pae_page_directory_entry* const* KernelVirtualPageDirs() const
62 									{ return fKernelVirtualPageDirs; }
63 	inline	const phys_addr_t*	KernelPhysicalPageDirs() const
64 									{ return fKernelPhysicalPageDirs; }
65 
66 	static	X86PagingMethodPAE*	Method();
67 
68 	static	void				PutPageTableInPageDir(
69 									pae_page_directory_entry* entry,
70 									phys_addr_t physicalTable,
71 									uint32 attributes);
72 	static	void				PutPageTableEntryInTable(
73 									pae_page_table_entry* entry,
74 									phys_addr_t physicalAddress,
75 									uint32 attributes, uint32 memoryType,
76 									bool globalPage);
77 	static	pae_page_table_entry SetPageTableEntry(pae_page_table_entry* entry,
78 									pae_page_table_entry newEntry);
79 	static	pae_page_table_entry SetPageTableEntryFlags(
80 									pae_page_table_entry* entry, uint64 flags);
81 	static	pae_page_table_entry TestAndSetPageTableEntry(
82 									pae_page_table_entry* entry,
83 									pae_page_table_entry newEntry,
84 									pae_page_table_entry oldEntry);
85 	static	pae_page_table_entry ClearPageTableEntry(
86 									pae_page_table_entry* entry);
87 	static	pae_page_table_entry ClearPageTableEntryFlags(
88 									pae_page_table_entry* entry, uint64 flags);
89 
90 	static	pae_page_directory_entry* PageDirEntryForAddress(
91 									pae_page_directory_entry* const* pdpt,
92 									addr_t address);
93 
94 	static	uint64				MemoryTypeToPageTableEntryFlags(
95 									uint32 memoryType);
96 
97 private:
98 			struct ToPAESwitcher;
99 			struct PhysicalPageSlotPool;
100 			friend struct PhysicalPageSlotPool;
101 
102 private:
103 			bool				_EarlyQuery(addr_t virtualAddress,
104 									phys_addr_t* _physicalAddress);
105 			pae_page_table_entry* _EarlyGetPageTable(phys_addr_t address);
106 
107 private:
108 			X86PhysicalPageMapper* fPhysicalPageMapper;
109 			TranslationMapPhysicalPageMapper* fKernelPhysicalPageMapper;
110 
111 			void*				fEarlyPageStructures;
112 			size_t				fEarlyPageStructuresSize;
113 			pae_page_directory_pointer_table_entry*
114 									fKernelVirtualPageDirPointerTable;
115 			phys_addr_t			fKernelPhysicalPageDirPointerTable;
116 			pae_page_directory_entry* fKernelVirtualPageDirs[4];
117 			phys_addr_t			fKernelPhysicalPageDirs[4];
118 			addr_t				fFreeVirtualSlot;
119 			pae_page_table_entry* fFreeVirtualSlotPTE;
120 
121 			mutex				fFreePagesLock;
122 			vm_page*			fFreePages;
123 			page_num_t			fFreePagesCount;
124 };
125 
126 
127 pae_page_directory_pointer_table_entry*
128 X86PagingMethodPAE::KernelVirtualPageDirPointerTable() const
129 {
130 	return fKernelVirtualPageDirPointerTable;
131 }
132 
133 
134 phys_addr_t
135 X86PagingMethodPAE::KernelPhysicalPageDirPointerTable() const
136 {
137 	return fKernelPhysicalPageDirPointerTable;
138 }
139 
140 
141 /*static*/ inline X86PagingMethodPAE*
142 X86PagingMethodPAE::Method()
143 {
144 	return static_cast<X86PagingMethodPAE*>(gX86PagingMethod);
145 }
146 
147 
148 /*static*/ inline pae_page_directory_entry*
149 X86PagingMethodPAE::PageDirEntryForAddress(
150 	pae_page_directory_entry* const* pdpt, addr_t address)
151 {
152 	return pdpt[address >> 30]
153 		+ (address / kPAEPageTableRange) % kPAEPageDirEntryCount;
154 }
155 
156 
157 /*static*/ inline pae_page_table_entry
158 X86PagingMethodPAE::SetPageTableEntry(pae_page_table_entry* entry,
159 	pae_page_table_entry newEntry)
160 {
161 	return atomic_set64((int64*)entry, newEntry);
162 }
163 
164 
165 /*static*/ inline pae_page_table_entry
166 X86PagingMethodPAE::SetPageTableEntryFlags(pae_page_table_entry* entry,
167 	uint64 flags)
168 {
169 	return atomic_or64((int64*)entry, flags);
170 }
171 
172 
173 /*static*/ inline pae_page_table_entry
174 X86PagingMethodPAE::TestAndSetPageTableEntry(pae_page_table_entry* entry,
175 	pae_page_table_entry newEntry, pae_page_table_entry oldEntry)
176 {
177 	return atomic_test_and_set64((int64*)entry, newEntry, oldEntry);
178 }
179 
180 
181 /*static*/ inline pae_page_table_entry
182 X86PagingMethodPAE::ClearPageTableEntry(pae_page_table_entry* entry)
183 {
184 	return SetPageTableEntry(entry, 0);
185 }
186 
187 
188 /*static*/ inline pae_page_table_entry
189 X86PagingMethodPAE::ClearPageTableEntryFlags(pae_page_table_entry* entry,
190 	uint64 flags)
191 {
192 	return atomic_and64((int64*)entry, ~flags);
193 }
194 
195 
196 /*static*/ inline uint64
197 X86PagingMethodPAE::MemoryTypeToPageTableEntryFlags(uint32 memoryType)
198 {
199 	// ATM we only handle the uncacheable and write-through type explicitly. For
200 	// all other types we rely on the MTRRs to be set up correctly. Since we set
201 	// the default memory type to write-back and since the uncacheable type in
202 	// the PTE overrides any MTRR attribute (though, as per the specs, that is
203 	// not recommended for performance reasons), this reduces the work we
204 	// actually *have* to do with the MTRRs to setting the remaining types
205 	// (usually only write-combining for the frame buffer).
206 	switch (memoryType) {
207 		case B_MTR_UC:
208 			return X86_PAE_PTE_CACHING_DISABLED | X86_PAE_PTE_WRITE_THROUGH;
209 
210 		case B_MTR_WC:
211 			// X86_PTE_WRITE_THROUGH would be closer, but the combination with
212 			// MTRR WC is "implementation defined" for Pentium Pro/II.
213 			return 0;
214 
215 		case B_MTR_WT:
216 			return X86_PAE_PTE_WRITE_THROUGH;
217 
218 		case B_MTR_WP:
219 		case B_MTR_WB:
220 		default:
221 			return 0;
222 	}
223 }
224 
225 
226 #endif	// B_HAIKU_PHYSICAL_BITS == 64
227 
228 
229 #endif	// KERNEL_ARCH_X86_PAGING_PAE_X86_PAGING_METHOD_PAE_H
230