1 /*
2 * Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Distributed under the terms of the MIT License.
4 */
5 #ifndef KERNEL_ARCH_X86_PAGING_32_BIT_X86_PAGING_METHOD_32_BIT_H
6 #define KERNEL_ARCH_X86_PAGING_32_BIT_X86_PAGING_METHOD_32_BIT_H
7
8
9 #include "paging/32bit/paging.h"
10 #include "paging/X86PagingMethod.h"
11 #include "paging/X86PagingStructures.h"
12
13
14 class TranslationMapPhysicalPageMapper;
15 class X86PhysicalPageMapper;
16
17
18 class X86PagingMethod32Bit final : public X86PagingMethod {
19 public:
20 X86PagingMethod32Bit();
21 virtual ~X86PagingMethod32Bit();
22
23 virtual status_t Init(kernel_args* args,
24 VMPhysicalPageMapper** _physicalPageMapper);
25 virtual status_t InitPostArea(kernel_args* args);
26
27 virtual status_t CreateTranslationMap(bool kernel,
28 VMTranslationMap** _map);
29
30 virtual status_t MapEarly(kernel_args* args,
31 addr_t virtualAddress,
32 phys_addr_t physicalAddress,
33 uint8 attributes,
34 page_num_t (*get_free_page)(kernel_args*));
35
36 virtual bool IsKernelPageAccessible(addr_t virtualAddress,
37 uint32 protection);
38
PageHole()39 inline page_table_entry* PageHole() const
40 { return fPageHole; }
PageHolePageDir()41 inline page_directory_entry* PageHolePageDir() const
42 { return fPageHolePageDir; }
KernelPhysicalPageDirectory()43 inline uint32 KernelPhysicalPageDirectory() const
44 { return fKernelPhysicalPageDirectory; }
KernelVirtualPageDirectory()45 inline page_directory_entry* KernelVirtualPageDirectory() const
46 { return fKernelVirtualPageDirectory; }
PhysicalPageMapper()47 inline X86PhysicalPageMapper* PhysicalPageMapper() const
48 { return fPhysicalPageMapper; }
KernelPhysicalPageMapper()49 inline TranslationMapPhysicalPageMapper* KernelPhysicalPageMapper() const
50 { return fKernelPhysicalPageMapper; }
51
52 static X86PagingMethod32Bit* Method();
53
54 static void PutPageTableInPageDir(
55 page_directory_entry* entry,
56 phys_addr_t pgtablePhysical,
57 uint32 attributes);
58 static void PutPageTableEntryInTable(
59 page_table_entry* entry,
60 phys_addr_t physicalAddress,
61 uint32 attributes, uint32 memoryType,
62 bool globalPage);
63 static page_table_entry SetPageTableEntry(page_table_entry* entry,
64 page_table_entry newEntry);
65 static page_table_entry SetPageTableEntryFlags(page_table_entry* entry,
66 uint32 flags);
67 static page_table_entry TestAndSetPageTableEntry(
68 page_table_entry* entry,
69 page_table_entry newEntry,
70 page_table_entry oldEntry);
71 static page_table_entry ClearPageTableEntry(page_table_entry* entry);
72 static page_table_entry ClearPageTableEntryFlags(
73 page_table_entry* entry, uint32 flags);
74
75 static uint32 MemoryTypeToPageTableEntryFlags(
76 uint32 memoryType);
77
78 private:
79 struct PhysicalPageSlotPool;
80 friend struct PhysicalPageSlotPool;
81
82 private:
83 inline int32 _GetInitialPoolCount();
84
85 static void _EarlyPreparePageTables(
86 page_table_entry* pageTables,
87 addr_t address, size_t size);
88 static status_t _EarlyQuery(addr_t virtualAddress,
89 phys_addr_t *_physicalAddress);
90
91 private:
92 page_table_entry* fPageHole;
93 page_directory_entry* fPageHolePageDir;
94 uint32 fKernelPhysicalPageDirectory;
95 page_directory_entry* fKernelVirtualPageDirectory;
96
97 X86PhysicalPageMapper* fPhysicalPageMapper;
98 TranslationMapPhysicalPageMapper* fKernelPhysicalPageMapper;
99 };
100
101
102 /*static*/ inline X86PagingMethod32Bit*
Method()103 X86PagingMethod32Bit::Method()
104 {
105 return static_cast<X86PagingMethod32Bit*>(gX86PagingMethod);
106 }
107
108
109 /*static*/ inline page_table_entry
SetPageTableEntry(page_table_entry * entry,page_table_entry newEntry)110 X86PagingMethod32Bit::SetPageTableEntry(page_table_entry* entry,
111 page_table_entry newEntry)
112 {
113 return atomic_get_and_set((int32*)entry, newEntry);
114 }
115
116
117 /*static*/ inline page_table_entry
SetPageTableEntryFlags(page_table_entry * entry,uint32 flags)118 X86PagingMethod32Bit::SetPageTableEntryFlags(page_table_entry* entry,
119 uint32 flags)
120 {
121 return atomic_or((int32*)entry, flags);
122 }
123
124
125 /*static*/ inline page_table_entry
TestAndSetPageTableEntry(page_table_entry * entry,page_table_entry newEntry,page_table_entry oldEntry)126 X86PagingMethod32Bit::TestAndSetPageTableEntry(page_table_entry* entry,
127 page_table_entry newEntry, page_table_entry oldEntry)
128 {
129 return atomic_test_and_set((int32*)entry, newEntry, oldEntry);
130 }
131
132
133 /*static*/ inline page_table_entry
ClearPageTableEntry(page_table_entry * entry)134 X86PagingMethod32Bit::ClearPageTableEntry(page_table_entry* entry)
135 {
136 return SetPageTableEntry(entry, 0);
137 }
138
139
140 /*static*/ inline page_table_entry
ClearPageTableEntryFlags(page_table_entry * entry,uint32 flags)141 X86PagingMethod32Bit::ClearPageTableEntryFlags(page_table_entry* entry, uint32 flags)
142 {
143 return atomic_and((int32*)entry, ~flags);
144 }
145
146
147 /*static*/ inline uint32
MemoryTypeToPageTableEntryFlags(uint32 memoryType)148 X86PagingMethod32Bit::MemoryTypeToPageTableEntryFlags(uint32 memoryType)
149 {
150 switch (memoryType) {
151 case B_UNCACHED_MEMORY:
152 return X86_PTE_CACHING_DISABLED | X86_PTE_WRITE_THROUGH;
153
154 case B_WRITE_COMBINING_MEMORY:
155 if (x86_use_pat())
156 return X86_PTE_PAT;
157
158 // X86_PTE_WRITE_THROUGH would be closer, but the combination with
159 // MTRR WC is "implementation defined" for Pentium Pro/II.
160 return 0;
161
162 case B_WRITE_THROUGH_MEMORY:
163 return X86_PTE_WRITE_THROUGH;
164
165 case B_WRITE_PROTECTED_MEMORY:
166 case B_WRITE_BACK_MEMORY:
167 default:
168 return 0;
169 }
170 }
171
172
173 #endif // KERNEL_ARCH_X86_PAGING_32_BIT_X86_PAGING_METHOD_32_BIT_H
174