xref: /haiku/src/system/kernel/arch/x86/paging/pae/X86PagingMethodPAE.h (revision fce4895d1884da5ae6fb299d23c735c598e690b1)
1 /*
2  * Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Distributed under the terms of the MIT License.
4  */
5 #ifndef KERNEL_ARCH_X86_PAGING_PAE_X86_PAGING_METHOD_PAE_H
6 #define KERNEL_ARCH_X86_PAGING_PAE_X86_PAGING_METHOD_PAE_H
7 
8 
9 #include <KernelExport.h>
10 
11 #include <lock.h>
12 #include <vm/vm_types.h>
13 
14 #include "paging/pae/paging.h"
15 #include "paging/X86PagingMethod.h"
16 #include "paging/X86PagingStructures.h"
17 
18 
19 #if B_HAIKU_PHYSICAL_BITS == 64
20 
21 
22 class TranslationMapPhysicalPageMapper;
23 class X86PhysicalPageMapper;
24 
25 
26 class X86PagingMethodPAE : public X86PagingMethod {
27 public:
28 								X86PagingMethodPAE();
29 	virtual						~X86PagingMethodPAE();
30 
31 	virtual	status_t			Init(kernel_args* args,
32 									VMPhysicalPageMapper** _physicalPageMapper);
33 	virtual	status_t			InitPostArea(kernel_args* args);
34 
35 	virtual	status_t			CreateTranslationMap(bool kernel,
36 									VMTranslationMap** _map);
37 
38 	virtual	status_t			MapEarly(kernel_args* args,
39 									addr_t virtualAddress,
40 									phys_addr_t physicalAddress,
41 									uint8 attributes,
42 									page_num_t (*get_free_page)(kernel_args*));
43 
44 	virtual	bool				IsKernelPageAccessible(addr_t virtualAddress,
45 									uint32 protection);
46 
47 			void*				Allocate32BitPage(
48 									phys_addr_t& _physicalAddress,
49 									void*& _handle);
50 			void				Free32BitPage(void* address,
51 									phys_addr_t physicalAddress, void* handle);
52 
53 	inline	X86PhysicalPageMapper* PhysicalPageMapper() const
54 									{ return fPhysicalPageMapper; }
55 	inline	TranslationMapPhysicalPageMapper* KernelPhysicalPageMapper() const
56 									{ return fKernelPhysicalPageMapper; }
57 
58 	inline	pae_page_directory_pointer_table_entry*
59 									KernelVirtualPageDirPointerTable() const;
60 	inline	phys_addr_t			KernelPhysicalPageDirPointerTable() const;
61 	inline	pae_page_directory_entry* const* KernelVirtualPageDirs() const
62 									{ return fKernelVirtualPageDirs; }
63 	inline	const phys_addr_t*	KernelPhysicalPageDirs() const
64 									{ return fKernelPhysicalPageDirs; }
65 
66 	static	X86PagingMethodPAE*	Method();
67 
68 	static	void				PutPageTableInPageDir(
69 									pae_page_directory_entry* entry,
70 									phys_addr_t physicalTable,
71 									uint32 attributes);
72 	static	void				PutPageTableEntryInTable(
73 									pae_page_table_entry* entry,
74 									phys_addr_t physicalAddress,
75 									uint32 attributes, uint32 memoryType,
76 									bool globalPage);
77 	static	pae_page_table_entry SetPageTableEntry(pae_page_table_entry* entry,
78 									pae_page_table_entry newEntry);
79 	static	pae_page_table_entry SetPageTableEntryFlags(
80 									pae_page_table_entry* entry, uint64 flags);
81 	static	pae_page_table_entry TestAndSetPageTableEntry(
82 									pae_page_table_entry* entry,
83 									pae_page_table_entry newEntry,
84 									pae_page_table_entry oldEntry);
85 	static	pae_page_table_entry ClearPageTableEntry(
86 									pae_page_table_entry* entry);
87 	static	pae_page_table_entry ClearPageTableEntryFlags(
88 									pae_page_table_entry* entry, uint64 flags);
89 
90 	static	pae_page_directory_entry* PageDirEntryForAddress(
91 									pae_page_directory_entry* const* pdpt,
92 									addr_t address);
93 
94 	static	uint64				MemoryTypeToPageTableEntryFlags(
95 									uint32 memoryType);
96 
97 private:
98 			struct ToPAESwitcher;
99 			struct PhysicalPageSlotPool;
100 			friend struct PhysicalPageSlotPool;
101 
102 private:
103 	inline	int32				_GetInitialPoolCount();
104 
105 			bool				_EarlyQuery(addr_t virtualAddress,
106 									phys_addr_t* _physicalAddress);
107 			pae_page_table_entry* _EarlyGetPageTable(phys_addr_t address);
108 
109 private:
110 			X86PhysicalPageMapper* fPhysicalPageMapper;
111 			TranslationMapPhysicalPageMapper* fKernelPhysicalPageMapper;
112 
113 			void*				fEarlyPageStructures;
114 			size_t				fEarlyPageStructuresSize;
115 			pae_page_directory_pointer_table_entry*
116 									fKernelVirtualPageDirPointerTable;
117 			phys_addr_t			fKernelPhysicalPageDirPointerTable;
118 			pae_page_directory_entry* fKernelVirtualPageDirs[4];
119 			phys_addr_t			fKernelPhysicalPageDirs[4];
120 			addr_t				fFreeVirtualSlot;
121 			pae_page_table_entry* fFreeVirtualSlotPTE;
122 
123 			mutex				fFreePagesLock;
124 			vm_page*			fFreePages;
125 			page_num_t			fFreePagesCount;
126 };
127 
128 
129 pae_page_directory_pointer_table_entry*
130 X86PagingMethodPAE::KernelVirtualPageDirPointerTable() const
131 {
132 	return fKernelVirtualPageDirPointerTable;
133 }
134 
135 
136 phys_addr_t
137 X86PagingMethodPAE::KernelPhysicalPageDirPointerTable() const
138 {
139 	return fKernelPhysicalPageDirPointerTable;
140 }
141 
142 
143 /*static*/ inline X86PagingMethodPAE*
144 X86PagingMethodPAE::Method()
145 {
146 	return static_cast<X86PagingMethodPAE*>(gX86PagingMethod);
147 }
148 
149 
150 /*static*/ inline pae_page_directory_entry*
151 X86PagingMethodPAE::PageDirEntryForAddress(
152 	pae_page_directory_entry* const* pdpt, addr_t address)
153 {
154 	return pdpt[address >> 30]
155 		+ (address / kPAEPageTableRange) % kPAEPageDirEntryCount;
156 }
157 
158 
159 /*static*/ inline pae_page_table_entry
160 X86PagingMethodPAE::SetPageTableEntry(pae_page_table_entry* entry,
161 	pae_page_table_entry newEntry)
162 {
163 	return atomic_get_and_set64((int64*)entry, newEntry);
164 }
165 
166 
167 /*static*/ inline pae_page_table_entry
168 X86PagingMethodPAE::SetPageTableEntryFlags(pae_page_table_entry* entry,
169 	uint64 flags)
170 {
171 	return atomic_or64((int64*)entry, flags);
172 }
173 
174 
175 /*static*/ inline pae_page_table_entry
176 X86PagingMethodPAE::TestAndSetPageTableEntry(pae_page_table_entry* entry,
177 	pae_page_table_entry newEntry, pae_page_table_entry oldEntry)
178 {
179 	return atomic_test_and_set64((int64*)entry, newEntry, oldEntry);
180 }
181 
182 
183 /*static*/ inline pae_page_table_entry
184 X86PagingMethodPAE::ClearPageTableEntry(pae_page_table_entry* entry)
185 {
186 	return SetPageTableEntry(entry, 0);
187 }
188 
189 
190 /*static*/ inline pae_page_table_entry
191 X86PagingMethodPAE::ClearPageTableEntryFlags(pae_page_table_entry* entry,
192 	uint64 flags)
193 {
194 	return atomic_and64((int64*)entry, ~flags);
195 }
196 
197 
198 /*static*/ inline uint64
199 X86PagingMethodPAE::MemoryTypeToPageTableEntryFlags(uint32 memoryType)
200 {
201 	// ATM we only handle the uncacheable and write-through type explicitly. For
202 	// all other types we rely on the MTRRs to be set up correctly. Since we set
203 	// the default memory type to write-back and since the uncacheable type in
204 	// the PTE overrides any MTRR attribute (though, as per the specs, that is
205 	// not recommended for performance reasons), this reduces the work we
206 	// actually *have* to do with the MTRRs to setting the remaining types
207 	// (usually only write-combining for the frame buffer).
208 	switch (memoryType) {
209 		case B_MTR_UC:
210 			return X86_PAE_PTE_CACHING_DISABLED | X86_PAE_PTE_WRITE_THROUGH;
211 
212 		case B_MTR_WC:
213 			// X86_PTE_WRITE_THROUGH would be closer, but the combination with
214 			// MTRR WC is "implementation defined" for Pentium Pro/II.
215 			return 0;
216 
217 		case B_MTR_WT:
218 			return X86_PAE_PTE_WRITE_THROUGH;
219 
220 		case B_MTR_WP:
221 		case B_MTR_WB:
222 		default:
223 			return 0;
224 	}
225 }
226 
227 
228 #endif	// B_HAIKU_PHYSICAL_BITS == 64
229 
230 
231 #endif	// KERNEL_ARCH_X86_PAGING_PAE_X86_PAGING_METHOD_PAE_H
232