1 /* 2 * Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de. 3 * Distributed under the terms of the MIT License. 4 */ 5 #ifndef KERNEL_ARCH_X86_PAGING_PAE_X86_PAGING_METHOD_PAE_H 6 #define KERNEL_ARCH_X86_PAGING_PAE_X86_PAGING_METHOD_PAE_H 7 8 9 #include <KernelExport.h> 10 11 #include <lock.h> 12 #include <vm/vm_types.h> 13 14 #include "paging/pae/paging.h" 15 #include "paging/X86PagingMethod.h" 16 #include "paging/X86PagingStructures.h" 17 18 19 #if B_HAIKU_PHYSICAL_BITS == 64 20 21 22 class TranslationMapPhysicalPageMapper; 23 class X86PhysicalPageMapper; 24 25 26 class X86PagingMethodPAE final : public X86PagingMethod { 27 public: 28 X86PagingMethodPAE(); 29 virtual ~X86PagingMethodPAE(); 30 31 virtual status_t Init(kernel_args* args, 32 VMPhysicalPageMapper** _physicalPageMapper); 33 virtual status_t InitPostArea(kernel_args* args); 34 35 virtual status_t CreateTranslationMap(bool kernel, 36 VMTranslationMap** _map); 37 38 virtual status_t MapEarly(kernel_args* args, 39 addr_t virtualAddress, 40 phys_addr_t physicalAddress, 41 uint8 attributes, 42 page_num_t (*get_free_page)(kernel_args*)); 43 44 virtual bool IsKernelPageAccessible(addr_t virtualAddress, 45 uint32 protection); 46 47 void* Allocate32BitPage( 48 phys_addr_t& _physicalAddress, 49 void*& _handle); 50 void Free32BitPage(void* address, 51 phys_addr_t physicalAddress, void* handle); 52 53 inline X86PhysicalPageMapper* PhysicalPageMapper() const 54 { return fPhysicalPageMapper; } 55 inline TranslationMapPhysicalPageMapper* KernelPhysicalPageMapper() const 56 { return fKernelPhysicalPageMapper; } 57 58 inline pae_page_directory_pointer_table_entry* 59 KernelVirtualPageDirPointerTable() const; 60 inline phys_addr_t KernelPhysicalPageDirPointerTable() const; 61 inline pae_page_directory_entry* const* KernelVirtualPageDirs() const 62 { return fKernelVirtualPageDirs; } 63 inline const phys_addr_t* KernelPhysicalPageDirs() const 64 { return fKernelPhysicalPageDirs; } 65 66 static X86PagingMethodPAE* Method(); 67 68 static void PutPageTableInPageDir( 69 pae_page_directory_entry* entry, 70 phys_addr_t physicalTable, 71 uint32 attributes); 72 static void PutPageTableEntryInTable( 73 pae_page_table_entry* entry, 74 phys_addr_t physicalAddress, 75 uint32 attributes, uint32 memoryType, 76 bool globalPage); 77 static uint64_t SetTableEntry(uint64_t* entry, 78 uint64_t newEntry); 79 static uint64_t SetTableEntryFlags(uint64_t* entry, 80 uint64_t flags); 81 static uint64_t TestAndSetTableEntry(uint64_t* entry, 82 uint64_t newEntry, uint64_t oldEntry); 83 static uint64_t ClearTableEntry(uint64_t* entry); 84 static uint64_t ClearTableEntryFlags(uint64_t* entry, 85 uint64_t flags); 86 87 static pae_page_directory_entry* PageDirEntryForAddress( 88 pae_page_directory_entry* const* pdpt, 89 addr_t address); 90 91 static uint64 MemoryTypeToPageTableEntryFlags( 92 uint32 memoryType); 93 94 private: 95 struct ToPAESwitcher; 96 struct PhysicalPageSlotPool; 97 friend struct PhysicalPageSlotPool; 98 99 private: 100 inline int32 _GetInitialPoolCount(); 101 102 bool _EarlyQuery(addr_t virtualAddress, 103 phys_addr_t* _physicalAddress); 104 pae_page_table_entry* _EarlyGetPageTable(phys_addr_t address); 105 106 private: 107 X86PhysicalPageMapper* fPhysicalPageMapper; 108 TranslationMapPhysicalPageMapper* fKernelPhysicalPageMapper; 109 110 void* fEarlyPageStructures; 111 size_t fEarlyPageStructuresSize; 112 pae_page_directory_pointer_table_entry* 113 fKernelVirtualPageDirPointerTable; 114 phys_addr_t fKernelPhysicalPageDirPointerTable; 115 pae_page_directory_entry* fKernelVirtualPageDirs[4]; 116 phys_addr_t fKernelPhysicalPageDirs[4]; 117 addr_t fFreeVirtualSlot; 118 pae_page_table_entry* fFreeVirtualSlotPTE; 119 120 mutex fFreePagesLock; 121 vm_page* fFreePages; 122 page_num_t fFreePagesCount; 123 }; 124 125 126 pae_page_directory_pointer_table_entry* 127 X86PagingMethodPAE::KernelVirtualPageDirPointerTable() const 128 { 129 return fKernelVirtualPageDirPointerTable; 130 } 131 132 133 phys_addr_t 134 X86PagingMethodPAE::KernelPhysicalPageDirPointerTable() const 135 { 136 return fKernelPhysicalPageDirPointerTable; 137 } 138 139 140 /*static*/ inline X86PagingMethodPAE* 141 X86PagingMethodPAE::Method() 142 { 143 return static_cast<X86PagingMethodPAE*>(gX86PagingMethod); 144 } 145 146 147 /*static*/ inline pae_page_directory_entry* 148 X86PagingMethodPAE::PageDirEntryForAddress( 149 pae_page_directory_entry* const* pdpt, addr_t address) 150 { 151 return pdpt[address >> 30] 152 + (address / kPAEPageTableRange) % kPAEPageDirEntryCount; 153 } 154 155 156 /*static*/ inline uint64_t 157 X86PagingMethodPAE::SetTableEntry(uint64_t* entry, uint64_t newEntry) 158 { 159 return atomic_get_and_set64((int64*)entry, newEntry); 160 } 161 162 163 /*static*/ inline uint64_t 164 X86PagingMethodPAE::SetTableEntryFlags(uint64_t* entry, uint64_t flags) 165 { 166 return atomic_or64((int64*)entry, flags); 167 } 168 169 170 /*static*/ inline uint64_t 171 X86PagingMethodPAE::TestAndSetTableEntry(uint64_t* entry, 172 uint64_t newEntry, uint64_t oldEntry) 173 { 174 return atomic_test_and_set64((int64*)entry, newEntry, oldEntry); 175 } 176 177 178 /*static*/ inline uint64_t 179 X86PagingMethodPAE::ClearTableEntry(uint64_t* entry) 180 { 181 return SetTableEntry(entry, 0); 182 } 183 184 185 /*static*/ inline uint64_t 186 X86PagingMethodPAE::ClearTableEntryFlags(uint64_t* entry, uint64_t flags) 187 { 188 return atomic_and64((int64*)entry, ~flags); 189 } 190 191 192 /*static*/ inline uint64 193 X86PagingMethodPAE::MemoryTypeToPageTableEntryFlags(uint32 memoryType) 194 { 195 switch (memoryType) { 196 case B_UNCACHED_MEMORY: 197 return X86_PAE_PTE_CACHING_DISABLED | X86_PAE_PTE_WRITE_THROUGH; 198 199 case B_WRITE_COMBINING_MEMORY: 200 if (x86_use_pat()) 201 return X86_PAE_PTE_PAT; 202 203 // X86_PTE_WRITE_THROUGH would be closer, but the combination with 204 // MTRR WC is "implementation defined" for Pentium Pro/II. 205 return 0; 206 207 case B_WRITE_THROUGH_MEMORY: 208 return X86_PAE_PTE_WRITE_THROUGH; 209 210 case B_WRITE_PROTECTED_MEMORY: 211 case B_WRITE_BACK_MEMORY: 212 default: 213 return 0; 214 } 215 } 216 217 218 #endif // B_HAIKU_PHYSICAL_BITS == 64 219 220 221 #endif // KERNEL_ARCH_X86_PAGING_PAE_X86_PAGING_METHOD_PAE_H 222