xref: /haiku/src/system/kernel/arch/x86/paging/64bit/X86PagingMethod64Bit.cpp (revision 225b6382637a7346d5378ee45a6581b4e2616055)
1 /*
2  * Copyright 2012, Alex Smith, alex@alex-smith.me.uk.
3  * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
4  * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
5  * Distributed under the terms of the MIT License.
6  *
7  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
8  * Distributed under the terms of the NewOS License.
9  */
10 
11 
12 #include "paging/64bit/X86PagingMethod64Bit.h"
13 
14 #include <stdlib.h>
15 #include <string.h>
16 
17 #include <boot/kernel_args.h>
18 #include <util/AutoLock.h>
19 #include <vm/vm.h>
20 #include <vm/vm_page.h>
21 #include <vm/VMAddressSpace.h>
22 
23 #include "paging/64bit/X86PagingStructures64Bit.h"
24 #include "paging/64bit/X86VMTranslationMap64Bit.h"
25 #include "paging/x86_physical_page_mapper.h"
26 #include "paging/x86_physical_page_mapper_mapped.h"
27 
28 
29 //#define TRACE_X86_PAGING_METHOD_64BIT
30 #ifdef TRACE_X86_PAGING_METHOD_64BIT
31 #	define TRACE(x...) dprintf(x)
32 #else
33 #	define TRACE(x...) ;
34 #endif
35 
36 
37 // #pragma mark - X86PagingMethod64Bit
38 
39 
40 X86PagingMethod64Bit::X86PagingMethod64Bit()
41 	:
42 	fKernelPhysicalPML4(0),
43 	fKernelVirtualPML4(NULL),
44 	fPhysicalPageMapper(NULL),
45 	fKernelPhysicalPageMapper(NULL)
46 {
47 }
48 
49 
50 X86PagingMethod64Bit::~X86PagingMethod64Bit()
51 {
52 }
53 
54 
55 status_t
56 X86PagingMethod64Bit::Init(kernel_args* args,
57 	VMPhysicalPageMapper** _physicalPageMapper)
58 {
59 	fKernelPhysicalPML4 = args->arch_args.phys_pgdir;
60 	fKernelVirtualPML4 = (uint64*)(addr_t)args->arch_args.vir_pgdir;
61 
62 	// if availalbe enable NX-bit (No eXecute)
63 	if (x86_check_feature(IA32_FEATURE_AMD_EXT_NX, FEATURE_EXT_AMD)) {
64 		x86_write_msr(IA32_MSR_EFER, x86_read_msr(IA32_MSR_EFER)
65 			| IA32_MSR_EFER_NX);
66 	}
67 
68 	// Ensure that the user half of the address space is clear. This removes
69 	// the temporary identity mapping made by the boot loader.
70 	memset(fKernelVirtualPML4, 0, sizeof(uint64) * 256);
71 	arch_cpu_global_TLB_invalidate();
72 
73 	// Create the physical page mapper.
74 	mapped_physical_page_ops_init(args, fPhysicalPageMapper,
75 		fKernelPhysicalPageMapper);
76 
77 	*_physicalPageMapper = fPhysicalPageMapper;
78 	return B_ERROR;
79 }
80 
81 
82 status_t
83 X86PagingMethod64Bit::InitPostArea(kernel_args* args)
84 {
85 	// Create an area covering the physical map area.
86 	void* address = (void*)KERNEL_PMAP_BASE;
87 	area_id area = vm_create_null_area(VMAddressSpace::KernelID(),
88 		"physical map area", &address, B_EXACT_ADDRESS,
89 		KERNEL_PMAP_SIZE, 0);
90 	if (area < B_OK)
91 		return area;
92 
93 	// Create an area to represent the kernel PML4.
94 	area = create_area("kernel pml4", (void**)&fKernelVirtualPML4,
95 		B_EXACT_ADDRESS, B_PAGE_SIZE, B_ALREADY_WIRED,
96 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
97 	if (area < B_OK)
98 		return area;
99 
100 	return B_OK;
101 }
102 
103 
104 status_t
105 X86PagingMethod64Bit::CreateTranslationMap(bool kernel, VMTranslationMap** _map)
106 {
107 	X86VMTranslationMap64Bit* map = new(std::nothrow) X86VMTranslationMap64Bit;
108 	if (map == NULL)
109 		return B_NO_MEMORY;
110 
111 	status_t error = map->Init(kernel);
112 	if (error != B_OK) {
113 		delete map;
114 		return error;
115 	}
116 
117 	*_map = map;
118 	return B_OK;
119 }
120 
121 
122 status_t
123 X86PagingMethod64Bit::MapEarly(kernel_args* args, addr_t virtualAddress,
124 	phys_addr_t physicalAddress, uint8 attributes,
125 	page_num_t (*get_free_page)(kernel_args*))
126 {
127 	TRACE("X86PagingMethod64Bit::MapEarly(%#" B_PRIxADDR ", %#" B_PRIxPHYSADDR
128 		", %#" B_PRIx8 ")\n", virtualAddress, physicalAddress, attributes);
129 
130 	// Get the PDPT. We should be mapping on an existing PDPT at this stage.
131 	uint64* pml4e = &fKernelVirtualPML4[VADDR_TO_PML4E(virtualAddress)];
132 	ASSERT((*pml4e & X86_64_PML4E_PRESENT) != 0);
133 	uint64* virtualPDPT = (uint64*)fKernelPhysicalPageMapper->GetPageTableAt(
134 		*pml4e & X86_64_PML4E_ADDRESS_MASK);
135 
136 	// Get the page directory.
137 	uint64* pdpte = &virtualPDPT[VADDR_TO_PDPTE(virtualAddress)];
138 	uint64* virtualPageDir;
139 	if ((*pdpte & X86_64_PDPTE_PRESENT) == 0) {
140 		phys_addr_t physicalPageDir = get_free_page(args) * B_PAGE_SIZE;
141 
142 		TRACE("X86PagingMethod64Bit::MapEarly(): creating page directory for va"
143 			" %#" B_PRIxADDR " at %#" B_PRIxPHYSADDR "\n", virtualAddress,
144 			physicalPageDir);
145 
146 		SetTableEntry(pdpte, (physicalPageDir & X86_64_PDPTE_ADDRESS_MASK)
147 			| X86_64_PDPTE_PRESENT
148 			| X86_64_PDPTE_WRITABLE
149 			| X86_64_PDPTE_USER);
150 
151 		// Map it and zero it.
152 		virtualPageDir = (uint64*)fKernelPhysicalPageMapper->GetPageTableAt(
153 			physicalPageDir);
154 		memset(virtualPageDir, 0, B_PAGE_SIZE);
155 	} else {
156 		virtualPageDir = (uint64*)fKernelPhysicalPageMapper->GetPageTableAt(
157 			*pdpte & X86_64_PDPTE_ADDRESS_MASK);
158 	}
159 
160 	// Get the page table.
161 	uint64* pde = &virtualPageDir[VADDR_TO_PDE(virtualAddress)];
162 	uint64* virtualPageTable;
163 	if ((*pde & X86_64_PDE_PRESENT) == 0) {
164 		phys_addr_t physicalPageTable = get_free_page(args) * B_PAGE_SIZE;
165 
166 		TRACE("X86PagingMethod64Bit::MapEarly(): creating page table for va"
167 			" %#" B_PRIxADDR " at %#" B_PRIxPHYSADDR "\n", virtualAddress,
168 			physicalPageTable);
169 
170 		SetTableEntry(pde, (physicalPageTable & X86_64_PDE_ADDRESS_MASK)
171 			| X86_64_PDE_PRESENT
172 			| X86_64_PDE_WRITABLE
173 			| X86_64_PDE_USER);
174 
175 		// Map it and zero it.
176 		virtualPageTable = (uint64*)fKernelPhysicalPageMapper->GetPageTableAt(
177 			physicalPageTable);
178 		memset(virtualPageTable, 0, B_PAGE_SIZE);
179 	} else {
180 		virtualPageTable = (uint64*)fKernelPhysicalPageMapper->GetPageTableAt(
181 			*pde & X86_64_PDE_ADDRESS_MASK);
182 	}
183 
184 	// The page table entry must not already be mapped.
185 	uint64* pte = &virtualPageTable[VADDR_TO_PTE(virtualAddress)];
186 	ASSERT_PRINT(
187 		(*pte & X86_64_PTE_PRESENT) == 0,
188 		"virtual address: %#" B_PRIxADDR ", existing pte: %#" B_PRIx64,
189 		virtualAddress, *pte);
190 
191 	// Fill in the table entry.
192 	PutPageTableEntryInTable(pte, physicalAddress, attributes, 0,
193 		IS_KERNEL_ADDRESS(virtualAddress));
194 
195 	return B_OK;
196 }
197 
198 
199 bool
200 X86PagingMethod64Bit::IsKernelPageAccessible(addr_t virtualAddress,
201 	uint32 protection)
202 {
203 	return true;
204 }
205 
206 
207 /*!	Traverses down the paging structure hierarchy to find the page directory
208 	for a virtual address, allocating new tables if required.
209 */
210 /*static*/ uint64*
211 X86PagingMethod64Bit::PageDirectoryForAddress(uint64* virtualPML4,
212 	addr_t virtualAddress, bool isKernel, bool allocateTables,
213 	vm_page_reservation* reservation,
214 	TranslationMapPhysicalPageMapper* pageMapper, int32& mapCount)
215 {
216 	// Get the PDPT.
217 	uint64* pml4e = &virtualPML4[VADDR_TO_PML4E(virtualAddress)];
218 	if ((*pml4e & X86_64_PML4E_PRESENT) == 0) {
219 		if (!allocateTables)
220 			return NULL;
221 
222 		// Allocate a new PDPT.
223 		vm_page* page = vm_page_allocate_page(reservation,
224 			PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
225 
226 		DEBUG_PAGE_ACCESS_END(page);
227 
228 		phys_addr_t physicalPDPT
229 			= (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;
230 
231 		TRACE("X86PagingMethod64Bit::PageTableForAddress(): creating PDPT "
232 			"for va %#" B_PRIxADDR " at %#" B_PRIxPHYSADDR "\n", virtualAddress,
233 			physicalPDPT);
234 
235 		SetTableEntry(pml4e, (physicalPDPT & X86_64_PML4E_ADDRESS_MASK)
236 			| X86_64_PML4E_PRESENT
237 			| X86_64_PML4E_WRITABLE
238 			| X86_64_PML4E_USER);
239 
240 		mapCount++;
241 	}
242 
243 	uint64* virtualPDPT = (uint64*)pageMapper->GetPageTableAt(
244 		*pml4e & X86_64_PML4E_ADDRESS_MASK);
245 
246 	// Get the page directory.
247 	uint64* pdpte = &virtualPDPT[VADDR_TO_PDPTE(virtualAddress)];
248 	if ((*pdpte & X86_64_PDPTE_PRESENT) == 0) {
249 		if (!allocateTables)
250 			return NULL;
251 
252 		// Allocate a new page directory.
253 		vm_page* page = vm_page_allocate_page(reservation,
254 			PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
255 
256 		DEBUG_PAGE_ACCESS_END(page);
257 
258 		phys_addr_t physicalPageDir
259 			= (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;
260 
261 		TRACE("X86PagingMethod64Bit::PageTableForAddress(): creating page "
262 			"directory for va %#" B_PRIxADDR " at %#" B_PRIxPHYSADDR "\n",
263 			virtualAddress, physicalPageDir);
264 
265 		SetTableEntry(pdpte, (physicalPageDir & X86_64_PDPTE_ADDRESS_MASK)
266 			| X86_64_PDPTE_PRESENT
267 			| X86_64_PDPTE_WRITABLE
268 			| X86_64_PDPTE_USER);
269 
270 		mapCount++;
271 	}
272 
273 	return (uint64*)pageMapper->GetPageTableAt(
274 		*pdpte & X86_64_PDPTE_ADDRESS_MASK);
275 }
276 
277 
278 /*static*/ uint64*
279 X86PagingMethod64Bit::PageDirectoryEntryForAddress(uint64* virtualPML4,
280 	addr_t virtualAddress, bool isKernel, bool allocateTables,
281 	vm_page_reservation* reservation,
282 	TranslationMapPhysicalPageMapper* pageMapper, int32& mapCount)
283 {
284 	uint64* virtualPageDirectory = PageDirectoryForAddress(virtualPML4,
285 		virtualAddress, isKernel, allocateTables, reservation, pageMapper,
286 		mapCount);
287 	if (virtualPageDirectory == NULL)
288 		return NULL;
289 
290 	return &virtualPageDirectory[VADDR_TO_PDE(virtualAddress)];
291 }
292 
293 
294 /*!	Traverses down the paging structure hierarchy to find the page table for a
295 	virtual address, allocating new tables if required.
296 */
297 /*static*/ uint64*
298 X86PagingMethod64Bit::PageTableForAddress(uint64* virtualPML4,
299 	addr_t virtualAddress, bool isKernel, bool allocateTables,
300 	vm_page_reservation* reservation,
301 	TranslationMapPhysicalPageMapper* pageMapper, int32& mapCount)
302 {
303 	TRACE("X86PagingMethod64Bit::PageTableForAddress(%#" B_PRIxADDR ", "
304 		"%d)\n", virtualAddress, allocateTables);
305 
306 	uint64* pde = PageDirectoryEntryForAddress(virtualPML4, virtualAddress,
307 		isKernel, allocateTables, reservation, pageMapper, mapCount);
308 	if (pde == NULL)
309 		return NULL;
310 
311 	if ((*pde & X86_64_PDE_PRESENT) == 0) {
312 		if (!allocateTables)
313 			return NULL;
314 
315 		// Allocate a new page table.
316 		vm_page* page = vm_page_allocate_page(reservation,
317 			PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
318 
319 		DEBUG_PAGE_ACCESS_END(page);
320 
321 		phys_addr_t physicalPageTable
322 			= (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;
323 
324 		TRACE("X86PagingMethod64Bit::PageTableForAddress(): creating page "
325 			"table for va %#" B_PRIxADDR " at %#" B_PRIxPHYSADDR "\n",
326 			virtualAddress, physicalPageTable);
327 
328 		SetTableEntry(pde, (physicalPageTable & X86_64_PDE_ADDRESS_MASK)
329 			| X86_64_PDE_PRESENT
330 			| X86_64_PDE_WRITABLE
331 			| X86_64_PDE_USER);
332 
333 		mapCount++;
334 	}
335 
336 	// No proper large page support at the moment, but they are used for the
337 	// physical map area. Ensure that nothing tries to treat that as normal
338 	// address space.
339 	ASSERT(!(*pde & X86_64_PDE_LARGE_PAGE));
340 
341 	return (uint64*)pageMapper->GetPageTableAt(*pde & X86_64_PDE_ADDRESS_MASK);
342 }
343 
344 
345 /*static*/ uint64*
346 X86PagingMethod64Bit::PageTableEntryForAddress(uint64* virtualPML4,
347 	addr_t virtualAddress, bool isKernel, bool allocateTables,
348 	vm_page_reservation* reservation,
349 	TranslationMapPhysicalPageMapper* pageMapper, int32& mapCount)
350 {
351 	uint64* virtualPageTable = PageTableForAddress(virtualPML4, virtualAddress,
352 		isKernel, allocateTables, reservation, pageMapper, mapCount);
353 	if (virtualPageTable == NULL)
354 		return NULL;
355 
356 	return &virtualPageTable[VADDR_TO_PTE(virtualAddress)];
357 }
358 
359 
360 /*static*/ void
361 X86PagingMethod64Bit::PutPageTableEntryInTable(uint64* entry,
362 	phys_addr_t physicalAddress, uint32 attributes, uint32 memoryType,
363 	bool globalPage)
364 {
365 	uint64 page = (physicalAddress & X86_64_PTE_ADDRESS_MASK)
366 		| X86_64_PTE_PRESENT | (globalPage ? X86_64_PTE_GLOBAL : 0)
367 		| MemoryTypeToPageTableEntryFlags(memoryType);
368 
369 	// if the page is user accessible, it's automatically
370 	// accessible in kernel space, too (but with the same
371 	// protection)
372 	if ((attributes & B_USER_PROTECTION) != 0) {
373 		page |= X86_64_PTE_USER;
374 		if ((attributes & B_WRITE_AREA) != 0)
375 			page |= X86_64_PTE_WRITABLE;
376 		if ((attributes & B_EXECUTE_AREA) == 0
377 			&& x86_check_feature(IA32_FEATURE_AMD_EXT_NX, FEATURE_EXT_AMD)) {
378 			page |= X86_64_PTE_NOT_EXECUTABLE;
379 		}
380 	} else if ((attributes & B_KERNEL_WRITE_AREA) != 0)
381 		page |= X86_64_PTE_WRITABLE;
382 
383 	// put it in the page table
384 	SetTableEntry(entry, page);
385 }
386 
387