1 /* 2 * Copyright 2014, Paweł Dziepak, pdziepak@quarnos.org. 3 * Copyright 2012, Alex Smith, alex@alex-smith.me.uk. 4 * Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de. 5 * Distributed under the terms of the MIT License. 6 */ 7 #ifndef KERNEL_ARCH_X86_PAGING_64BIT_X86_PAGING_METHOD_64BIT_H 8 #define KERNEL_ARCH_X86_PAGING_64BIT_X86_PAGING_METHOD_64BIT_H 9 10 11 #include <atomic> 12 13 #include <KernelExport.h> 14 15 #include <lock.h> 16 #include <vm/vm_types.h> 17 18 #include "paging/64bit/paging.h" 19 #include "paging/X86PagingMethod.h" 20 #include "paging/X86PagingStructures.h" 21 22 23 class TranslationMapPhysicalPageMapper; 24 class X86PhysicalPageMapper; 25 struct vm_page_reservation; 26 27 28 class X86PagingMethod64Bit final : public X86PagingMethod { 29 public: 30 X86PagingMethod64Bit(); 31 virtual ~X86PagingMethod64Bit(); 32 33 virtual status_t Init(kernel_args* args, 34 VMPhysicalPageMapper** _physicalPageMapper); 35 virtual status_t InitPostArea(kernel_args* args); 36 37 virtual status_t CreateTranslationMap(bool kernel, 38 VMTranslationMap** _map); 39 40 virtual status_t MapEarly(kernel_args* args, 41 addr_t virtualAddress, 42 phys_addr_t physicalAddress, 43 uint8 attributes, 44 page_num_t (*get_free_page)(kernel_args*)); 45 46 virtual bool IsKernelPageAccessible(addr_t virtualAddress, 47 uint32 protection); 48 49 inline X86PhysicalPageMapper* PhysicalPageMapper() const 50 { return fPhysicalPageMapper; } 51 inline TranslationMapPhysicalPageMapper* KernelPhysicalPageMapper() const 52 { return fKernelPhysicalPageMapper; } 53 54 inline uint64* KernelVirtualPML4() const 55 { return fKernelVirtualPML4; } 56 inline phys_addr_t KernelPhysicalPML4() const 57 { return fKernelPhysicalPML4; } 58 59 static X86PagingMethod64Bit* Method(); 60 61 static uint64* PageDirectoryForAddress(uint64* virtualPML4, 62 addr_t virtualAddress, bool isKernel, 63 bool allocateTables, 64 vm_page_reservation* reservation, 65 TranslationMapPhysicalPageMapper* 66 pageMapper, int32& mapCount); 67 static uint64* PageDirectoryEntryForAddress( 68 uint64* virtualPML4, addr_t virtualAddress, 69 bool isKernel, bool allocateTables, 70 vm_page_reservation* reservation, 71 TranslationMapPhysicalPageMapper* 72 pageMapper, int32& mapCount); 73 static uint64* PageTableForAddress(uint64* virtualPML4, 74 addr_t virtualAddress, bool isKernel, 75 bool allocateTables, 76 vm_page_reservation* reservation, 77 TranslationMapPhysicalPageMapper* 78 pageMapper, int32& mapCount); 79 static uint64* PageTableEntryForAddress(uint64* virtualPML4, 80 addr_t virtualAddress, bool isKernel, 81 bool allocateTables, 82 vm_page_reservation* reservation, 83 TranslationMapPhysicalPageMapper* 84 pageMapper, int32& mapCount); 85 86 static void PutPageTableEntryInTable( 87 uint64* entry, phys_addr_t physicalAddress, 88 uint32 attributes, uint32 memoryType, 89 bool globalPage); 90 static void SetTableEntry(uint64_t* entry, 91 uint64_t newEntry); 92 static uint64_t SetTableEntryFlags(uint64_t* entryPointer, 93 uint64_t flags); 94 static uint64 TestAndSetTableEntry(uint64* entry, 95 uint64 newEntry, uint64 oldEntry); 96 static uint64_t ClearTableEntry(uint64_t* entryPointer); 97 static uint64_t ClearTableEntryFlags(uint64_t* entryPointer, 98 uint64_t flags); 99 100 static uint64 MemoryTypeToPageTableEntryFlags( 101 uint32 memoryType); 102 103 private: 104 static void _EnableExecutionDisable(void* dummy, int cpu); 105 106 phys_addr_t fKernelPhysicalPML4; 107 uint64* fKernelVirtualPML4; 108 109 X86PhysicalPageMapper* fPhysicalPageMapper; 110 TranslationMapPhysicalPageMapper* fKernelPhysicalPageMapper; 111 }; 112 113 114 static_assert(sizeof(std::atomic<uint64_t>) == sizeof(uint64_t), 115 "Non-trivial representation of atomic uint64_t."); 116 117 118 /*static*/ inline X86PagingMethod64Bit* 119 X86PagingMethod64Bit::Method() 120 { 121 return static_cast<X86PagingMethod64Bit*>(gX86PagingMethod); 122 } 123 124 125 /*static*/ inline void 126 X86PagingMethod64Bit::SetTableEntry(uint64_t* entryPointer, uint64_t newEntry) 127 { 128 auto& entry = *reinterpret_cast<std::atomic<uint64_t>*>(entryPointer); 129 entry.store(newEntry, std::memory_order_relaxed); 130 } 131 132 133 /*static*/ inline uint64_t 134 X86PagingMethod64Bit::SetTableEntryFlags(uint64_t* entryPointer, uint64_t flags) 135 { 136 auto& entry = *reinterpret_cast<std::atomic<uint64_t>*>(entryPointer); 137 return entry.fetch_or(flags); 138 } 139 140 141 /*static*/ inline uint64 142 X86PagingMethod64Bit::TestAndSetTableEntry(uint64* entry, uint64 newEntry, uint64 oldEntry) 143 { 144 return atomic_test_and_set64((int64*)entry, newEntry, oldEntry); 145 } 146 147 148 /*static*/ inline uint64_t 149 X86PagingMethod64Bit::ClearTableEntry(uint64_t* entryPointer) 150 { 151 auto& entry = *reinterpret_cast<std::atomic<uint64_t>*>(entryPointer); 152 return entry.exchange(0); 153 } 154 155 156 /*static*/ inline uint64_t 157 X86PagingMethod64Bit::ClearTableEntryFlags(uint64_t* entryPointer, 158 uint64_t flags) 159 { 160 auto& entry = *reinterpret_cast<std::atomic<uint64_t>*>(entryPointer); 161 return entry.fetch_and(~flags); 162 } 163 164 165 /*static*/ inline uint64 166 X86PagingMethod64Bit::MemoryTypeToPageTableEntryFlags(uint32 memoryType) 167 { 168 // ATM we only handle the uncacheable and write-through type explicitly. For 169 // all other types we rely on the MTRRs to be set up correctly. Since we set 170 // the default memory type to write-back and since the uncacheable type in 171 // the PTE overrides any MTRR attribute (though, as per the specs, that is 172 // not recommended for performance reasons), this reduces the work we 173 // actually *have* to do with the MTRRs to setting the remaining types 174 // (usually only write-combining for the frame buffer). 175 switch (memoryType) { 176 case B_MTR_UC: 177 return X86_64_PTE_CACHING_DISABLED | X86_64_PTE_WRITE_THROUGH; 178 179 case B_MTR_WC: 180 // X86_PTE_WRITE_THROUGH would be closer, but the combination with 181 // MTRR WC is "implementation defined" for Pentium Pro/II. 182 return 0; 183 184 case B_MTR_WT: 185 return X86_64_PTE_WRITE_THROUGH; 186 187 case B_MTR_WP: 188 case B_MTR_WB: 189 default: 190 return 0; 191 } 192 } 193 194 195 #endif // KERNEL_ARCH_X86_PAGING_64BIT_X86_PAGING_METHOD_64BIT_H 196