xref: /haiku/src/system/kernel/arch/x86/paging/64bit/X86PagingMethod64Bit.cpp (revision c237c4ce593ee823d9867fd997e51e4c447f5623)
1 /*
2  * Copyright 2012, Alex Smith, alex@alex-smith.me.uk.
3  * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
4  * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
5  * Distributed under the terms of the MIT License.
6  *
7  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
8  * Distributed under the terms of the NewOS License.
9  */
10 
11 
12 #include "paging/64bit/X86PagingMethod64Bit.h"
13 
14 #include <stdlib.h>
15 #include <string.h>
16 
17 #include <boot/kernel_args.h>
18 #include <util/AutoLock.h>
19 #include <vm/vm.h>
20 #include <vm/vm_page.h>
21 #include <vm/VMAddressSpace.h>
22 
23 #include "paging/64bit/X86PagingStructures64Bit.h"
24 #include "paging/64bit/X86VMTranslationMap64Bit.h"
25 #include "paging/x86_physical_page_mapper.h"
26 #include "paging/x86_physical_page_mapper_mapped.h"
27 
28 
29 //#define TRACE_X86_PAGING_METHOD_64BIT
30 #ifdef TRACE_X86_PAGING_METHOD_64BIT
31 #	define TRACE(x...) dprintf(x)
32 #else
33 #	define TRACE(x...) ;
34 #endif
35 
36 
37 bool X86PagingMethod64Bit::la57 = false;
38 
39 
40 // #pragma mark - X86PagingMethod64Bit
41 
42 
43 X86PagingMethod64Bit::X86PagingMethod64Bit(bool la57)
44 	:
45 	fKernelPhysicalPMLTop(0),
46 	fKernelVirtualPMLTop(NULL),
47 	fPhysicalPageMapper(NULL),
48 	fKernelPhysicalPageMapper(NULL)
49 {
50 	X86PagingMethod64Bit::la57 = la57;
51 }
52 
53 
54 X86PagingMethod64Bit::~X86PagingMethod64Bit()
55 {
56 }
57 
58 
59 status_t
60 X86PagingMethod64Bit::Init(kernel_args* args,
61 	VMPhysicalPageMapper** _physicalPageMapper)
62 {
63 	fKernelPhysicalPMLTop = args->arch_args.phys_pgdir;
64 	fKernelVirtualPMLTop = (uint64*)(addr_t)args->arch_args.vir_pgdir;
65 
66 	// if available enable NX-bit (No eXecute)
67 	if (x86_check_feature(IA32_FEATURE_AMD_EXT_NX, FEATURE_EXT_AMD))
68 		call_all_cpus_sync(&_EnableExecutionDisable, NULL);
69 
70 	// Ensure that the user half of the address space is clear. This removes
71 	// the temporary identity mapping made by the boot loader.
72 	memset(fKernelVirtualPMLTop, 0, sizeof(uint64) * 256);
73 	arch_cpu_global_TLB_invalidate();
74 
75 	// Create the physical page mapper.
76 	mapped_physical_page_ops_init(args, fPhysicalPageMapper,
77 		fKernelPhysicalPageMapper);
78 
79 	*_physicalPageMapper = fPhysicalPageMapper;
80 	return B_ERROR;
81 }
82 
83 
84 status_t
85 X86PagingMethod64Bit::InitPostArea(kernel_args* args)
86 {
87 	// Create an area covering the physical map area.
88 	void* address = (void*)KERNEL_PMAP_BASE;
89 	area_id area = vm_create_null_area(VMAddressSpace::KernelID(),
90 		"physical map area", &address, B_EXACT_ADDRESS,
91 		KERNEL_PMAP_SIZE, 0);
92 	if (area < B_OK)
93 		return area;
94 
95 	// Create an area to represent the kernel PMLTop.
96 	area = create_area("kernel pmltop", (void**)&fKernelVirtualPMLTop,
97 		B_EXACT_ADDRESS, B_PAGE_SIZE, B_ALREADY_WIRED,
98 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
99 	if (area < B_OK)
100 		return area;
101 
102 	return B_OK;
103 }
104 
105 
106 status_t
107 X86PagingMethod64Bit::CreateTranslationMap(bool kernel, VMTranslationMap** _map)
108 {
109 	X86VMTranslationMap64Bit* map = new(std::nothrow) X86VMTranslationMap64Bit(la57);
110 	if (map == NULL)
111 		return B_NO_MEMORY;
112 
113 	status_t error = map->Init(kernel);
114 	if (error != B_OK) {
115 		delete map;
116 		return error;
117 	}
118 
119 	*_map = map;
120 	return B_OK;
121 }
122 
123 
124 status_t
125 X86PagingMethod64Bit::MapEarly(kernel_args* args, addr_t virtualAddress,
126 	phys_addr_t physicalAddress, uint8 attributes,
127 	page_num_t (*get_free_page)(kernel_args*))
128 {
129 	TRACE("X86PagingMethod64Bit::MapEarly(%#" B_PRIxADDR ", %#" B_PRIxPHYSADDR
130 		", %#" B_PRIx8 ")\n", virtualAddress, physicalAddress, attributes);
131 
132 	uint64* virtualPML4 = fKernelVirtualPMLTop;
133 	if (la57) {
134 		// Get the PML4. We should be mapping on an existing PML4 at this stage.
135 		uint64* pml5e = &fKernelVirtualPMLTop[VADDR_TO_PML5E(virtualAddress)];
136 		ASSERT((*pml5e & X86_64_PML5E_PRESENT) != 0);
137 		virtualPML4 = (uint64*)fKernelPhysicalPageMapper->GetPageTableAt(
138 			*pml5e & X86_64_PML5E_ADDRESS_MASK);
139 	}
140 
141 	// Get the PDPT. We should be mapping on an existing PDPT at this stage.
142 	uint64* pml4e = &virtualPML4[VADDR_TO_PML4E(virtualAddress)];
143 	ASSERT((*pml4e & X86_64_PML4E_PRESENT) != 0);
144 	uint64* virtualPDPT = (uint64*)fKernelPhysicalPageMapper->GetPageTableAt(
145 		*pml4e & X86_64_PML4E_ADDRESS_MASK);
146 
147 	// Get the page directory.
148 	uint64* pdpte = &virtualPDPT[VADDR_TO_PDPTE(virtualAddress)];
149 	uint64* virtualPageDir;
150 	if ((*pdpte & X86_64_PDPTE_PRESENT) == 0) {
151 		phys_addr_t physicalPageDir = get_free_page(args) * B_PAGE_SIZE;
152 
153 		TRACE("X86PagingMethod64Bit::MapEarly(): creating page directory for va"
154 			" %#" B_PRIxADDR " at %#" B_PRIxPHYSADDR "\n", virtualAddress,
155 			physicalPageDir);
156 
157 		SetTableEntry(pdpte, (physicalPageDir & X86_64_PDPTE_ADDRESS_MASK)
158 			| X86_64_PDPTE_PRESENT
159 			| X86_64_PDPTE_WRITABLE
160 			| X86_64_PDPTE_USER);
161 
162 		// Map it and zero it.
163 		virtualPageDir = (uint64*)fKernelPhysicalPageMapper->GetPageTableAt(
164 			physicalPageDir);
165 		memset(virtualPageDir, 0, B_PAGE_SIZE);
166 	} else {
167 		virtualPageDir = (uint64*)fKernelPhysicalPageMapper->GetPageTableAt(
168 			*pdpte & X86_64_PDPTE_ADDRESS_MASK);
169 	}
170 
171 	// Get the page table.
172 	uint64* pde = &virtualPageDir[VADDR_TO_PDE(virtualAddress)];
173 	uint64* virtualPageTable;
174 	if ((*pde & X86_64_PDE_PRESENT) == 0) {
175 		phys_addr_t physicalPageTable = get_free_page(args) * B_PAGE_SIZE;
176 
177 		TRACE("X86PagingMethod64Bit::MapEarly(): creating page table for va"
178 			" %#" B_PRIxADDR " at %#" B_PRIxPHYSADDR "\n", virtualAddress,
179 			physicalPageTable);
180 
181 		SetTableEntry(pde, (physicalPageTable & X86_64_PDE_ADDRESS_MASK)
182 			| X86_64_PDE_PRESENT
183 			| X86_64_PDE_WRITABLE
184 			| X86_64_PDE_USER);
185 
186 		// Map it and zero it.
187 		virtualPageTable = (uint64*)fKernelPhysicalPageMapper->GetPageTableAt(
188 			physicalPageTable);
189 		memset(virtualPageTable, 0, B_PAGE_SIZE);
190 	} else {
191 		virtualPageTable = (uint64*)fKernelPhysicalPageMapper->GetPageTableAt(
192 			*pde & X86_64_PDE_ADDRESS_MASK);
193 	}
194 
195 	// The page table entry must not already be mapped.
196 	uint64* pte = &virtualPageTable[VADDR_TO_PTE(virtualAddress)];
197 	ASSERT_PRINT(
198 		(*pte & X86_64_PTE_PRESENT) == 0,
199 		"virtual address: %#" B_PRIxADDR ", existing pte: %#" B_PRIx64,
200 		virtualAddress, *pte);
201 
202 	// Fill in the table entry.
203 	PutPageTableEntryInTable(pte, physicalAddress, attributes, 0,
204 		IS_KERNEL_ADDRESS(virtualAddress));
205 
206 	return B_OK;
207 }
208 
209 
210 bool
211 X86PagingMethod64Bit::IsKernelPageAccessible(addr_t virtualAddress,
212 	uint32 protection)
213 {
214 	return true;
215 }
216 
217 
218 /*!	Traverses down the paging structure hierarchy to find the page directory
219 	for a virtual address, allocating new tables if required.
220 */
221 /*static*/ uint64*
222 X86PagingMethod64Bit::PageDirectoryForAddress(uint64* virtualPMLTop,
223 	addr_t virtualAddress, bool isKernel, bool allocateTables,
224 	vm_page_reservation* reservation,
225 	TranslationMapPhysicalPageMapper* pageMapper, int32& mapCount)
226 {
227 	uint64* virtualPML4 = virtualPMLTop;
228 	if (la57) {
229 		// Get the PDPT.
230 		uint64* pml5e = &virtualPMLTop[VADDR_TO_PML5E(virtualAddress)];
231 		if ((*pml5e & X86_64_PML5E_PRESENT) == 0) {
232 			if (!allocateTables)
233 				return NULL;
234 
235 			// Allocate a new PDPT.
236 			vm_page* page = vm_page_allocate_page(reservation,
237 				PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
238 
239 			DEBUG_PAGE_ACCESS_END(page);
240 
241 			phys_addr_t physicalPDPT
242 				= (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;
243 
244 			TRACE("X86PagingMethod64Bit::PageTableForAddress(): creating PML4T"
245 				" for va %#" B_PRIxADDR " at %#" B_PRIxPHYSADDR "\n",
246 				virtualAddress, physicalPDPT);
247 
248 			SetTableEntry(pml5e, (physicalPDPT & X86_64_PML5E_ADDRESS_MASK)
249 				| X86_64_PML5E_PRESENT
250 				| X86_64_PML5E_WRITABLE
251 				| X86_64_PML5E_USER);
252 
253 			mapCount++;
254 		}
255 
256 		virtualPML4 = (uint64*)pageMapper->GetPageTableAt(
257 			*pml5e & X86_64_PML5E_ADDRESS_MASK);
258 	}
259 
260 	// Get the PDPT.
261 	uint64* pml4e = &virtualPML4[VADDR_TO_PML4E(virtualAddress)];
262 	if ((*pml4e & X86_64_PML4E_PRESENT) == 0) {
263 		if (!allocateTables)
264 			return NULL;
265 
266 		// Allocate a new PDPT.
267 		vm_page* page = vm_page_allocate_page(reservation,
268 			PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
269 
270 		DEBUG_PAGE_ACCESS_END(page);
271 
272 		phys_addr_t physicalPDPT
273 			= (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;
274 
275 		TRACE("X86PagingMethod64Bit::PageTableForAddress(): creating PDPT "
276 			"for va %#" B_PRIxADDR " at %#" B_PRIxPHYSADDR "\n", virtualAddress,
277 			physicalPDPT);
278 
279 		SetTableEntry(pml4e, (physicalPDPT & X86_64_PML4E_ADDRESS_MASK)
280 			| X86_64_PML4E_PRESENT
281 			| X86_64_PML4E_WRITABLE
282 			| X86_64_PML4E_USER);
283 
284 		mapCount++;
285 	}
286 
287 	uint64* virtualPDPT = (uint64*)pageMapper->GetPageTableAt(
288 		*pml4e & X86_64_PML4E_ADDRESS_MASK);
289 
290 	// Get the page directory.
291 	uint64* pdpte = &virtualPDPT[VADDR_TO_PDPTE(virtualAddress)];
292 	if ((*pdpte & X86_64_PDPTE_PRESENT) == 0) {
293 		if (!allocateTables)
294 			return NULL;
295 
296 		// Allocate a new page directory.
297 		vm_page* page = vm_page_allocate_page(reservation,
298 			PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
299 
300 		DEBUG_PAGE_ACCESS_END(page);
301 
302 		phys_addr_t physicalPageDir
303 			= (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;
304 
305 		TRACE("X86PagingMethod64Bit::PageTableForAddress(): creating page "
306 			"directory for va %#" B_PRIxADDR " at %#" B_PRIxPHYSADDR "\n",
307 			virtualAddress, physicalPageDir);
308 
309 		SetTableEntry(pdpte, (physicalPageDir & X86_64_PDPTE_ADDRESS_MASK)
310 			| X86_64_PDPTE_PRESENT
311 			| X86_64_PDPTE_WRITABLE
312 			| X86_64_PDPTE_USER);
313 
314 		mapCount++;
315 	}
316 
317 	return (uint64*)pageMapper->GetPageTableAt(
318 		*pdpte & X86_64_PDPTE_ADDRESS_MASK);
319 }
320 
321 
322 /*static*/ uint64*
323 X86PagingMethod64Bit::PageDirectoryEntryForAddress(uint64* virtualPMLTop,
324 	addr_t virtualAddress, bool isKernel, bool allocateTables,
325 	vm_page_reservation* reservation,
326 	TranslationMapPhysicalPageMapper* pageMapper, int32& mapCount)
327 {
328 	uint64* virtualPageDirectory = PageDirectoryForAddress(virtualPMLTop,
329 		virtualAddress, isKernel, allocateTables, reservation, pageMapper,
330 		mapCount);
331 	if (virtualPageDirectory == NULL)
332 		return NULL;
333 
334 	return &virtualPageDirectory[VADDR_TO_PDE(virtualAddress)];
335 }
336 
337 
338 /*!	Traverses down the paging structure hierarchy to find the page table for a
339 	virtual address, allocating new tables if required.
340 */
341 /*static*/ uint64*
342 X86PagingMethod64Bit::PageTableForAddress(uint64* virtualPMLTop,
343 	addr_t virtualAddress, bool isKernel, bool allocateTables,
344 	vm_page_reservation* reservation,
345 	TranslationMapPhysicalPageMapper* pageMapper, int32& mapCount)
346 {
347 	TRACE("X86PagingMethod64Bit::PageTableForAddress(%#" B_PRIxADDR ", "
348 		"%d)\n", virtualAddress, allocateTables);
349 
350 	uint64* pde = PageDirectoryEntryForAddress(virtualPMLTop, virtualAddress,
351 		isKernel, allocateTables, reservation, pageMapper, mapCount);
352 	if (pde == NULL)
353 		return NULL;
354 
355 	if ((*pde & X86_64_PDE_PRESENT) == 0) {
356 		if (!allocateTables)
357 			return NULL;
358 
359 		// Allocate a new page table.
360 		vm_page* page = vm_page_allocate_page(reservation,
361 			PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
362 
363 		DEBUG_PAGE_ACCESS_END(page);
364 
365 		phys_addr_t physicalPageTable
366 			= (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;
367 
368 		TRACE("X86PagingMethod64Bit::PageTableForAddress(): creating page "
369 			"table for va %#" B_PRIxADDR " at %#" B_PRIxPHYSADDR "\n",
370 			virtualAddress, physicalPageTable);
371 
372 		SetTableEntry(pde, (physicalPageTable & X86_64_PDE_ADDRESS_MASK)
373 			| X86_64_PDE_PRESENT
374 			| X86_64_PDE_WRITABLE
375 			| X86_64_PDE_USER);
376 
377 		mapCount++;
378 	}
379 
380 	// No proper large page support at the moment, but they are used for the
381 	// physical map area. Ensure that nothing tries to treat that as normal
382 	// address space.
383 	ASSERT(!(*pde & X86_64_PDE_LARGE_PAGE));
384 
385 	return (uint64*)pageMapper->GetPageTableAt(*pde & X86_64_PDE_ADDRESS_MASK);
386 }
387 
388 
389 /*static*/ uint64*
390 X86PagingMethod64Bit::PageTableEntryForAddress(uint64* virtualPMLTop,
391 	addr_t virtualAddress, bool isKernel, bool allocateTables,
392 	vm_page_reservation* reservation,
393 	TranslationMapPhysicalPageMapper* pageMapper, int32& mapCount)
394 {
395 	uint64* virtualPageTable = PageTableForAddress(virtualPMLTop, virtualAddress,
396 		isKernel, allocateTables, reservation, pageMapper, mapCount);
397 	if (virtualPageTable == NULL)
398 		return NULL;
399 
400 	return &virtualPageTable[VADDR_TO_PTE(virtualAddress)];
401 }
402 
403 
404 /*static*/ void
405 X86PagingMethod64Bit::PutPageTableEntryInTable(uint64* entry,
406 	phys_addr_t physicalAddress, uint32 attributes, uint32 memoryType,
407 	bool globalPage)
408 {
409 	uint64 page = (physicalAddress & X86_64_PTE_ADDRESS_MASK)
410 		| X86_64_PTE_PRESENT | (globalPage ? X86_64_PTE_GLOBAL : 0)
411 		| MemoryTypeToPageTableEntryFlags(memoryType);
412 
413 	// if the page is user accessible, it's automatically
414 	// accessible in kernel space, too (but with the same
415 	// protection)
416 	if ((attributes & B_USER_PROTECTION) != 0) {
417 		page |= X86_64_PTE_USER;
418 		if ((attributes & B_WRITE_AREA) != 0)
419 			page |= X86_64_PTE_WRITABLE;
420 		if ((attributes & B_EXECUTE_AREA) == 0
421 			&& x86_check_feature(IA32_FEATURE_AMD_EXT_NX, FEATURE_EXT_AMD)) {
422 			page |= X86_64_PTE_NOT_EXECUTABLE;
423 		}
424 	} else if ((attributes & B_KERNEL_WRITE_AREA) != 0)
425 		page |= X86_64_PTE_WRITABLE;
426 
427 	// put it in the page table
428 	SetTableEntry(entry, page);
429 }
430 
431 
432 /*static*/ void
433 X86PagingMethod64Bit::_EnableExecutionDisable(void* dummy, int cpu)
434 {
435 	x86_write_msr(IA32_MSR_EFER, x86_read_msr(IA32_MSR_EFER)
436 		| IA32_MSR_EFER_NX);
437 }
438 
439