xref: /haiku/src/system/kernel/arch/arm/paging/32bit/ARMPagingMethod32Bit.cpp (revision 4a55cc230cf7566cadcbb23b1928eefff8aea9a2)
1 /*
2  * Copyright 2010, Ithamar R. Adema, ithamar.adema@team-embedded.nl
3  * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
4  * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
5  * Distributed under the terms of the MIT License.
6  *
7  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
8  * Distributed under the terms of the NewOS License.
9  */
10 
11 
12 #include "paging/32bit/ARMPagingMethod32Bit.h"
13 
14 #include <stdlib.h>
15 #include <string.h>
16 
17 #include <AutoDeleter.h>
18 
19 #include <arch/smp.h>
20 #include <arch_system_info.h>
21 #include <boot/kernel_args.h>
22 #include <int.h>
23 #include <thread.h>
24 #include <vm/vm.h>
25 #include <vm/VMAddressSpace.h>
26 
27 #include "paging/32bit/ARMPagingStructures32Bit.h"
28 #include "paging/32bit/ARMVMTranslationMap32Bit.h"
29 #include "paging/arm_physical_page_mapper.h"
30 #include "paging/arm_physical_page_mapper_large_memory.h"
31 
32 
33 //#define TRACE_ARM_PAGING_METHOD_32_BIT
34 #ifdef TRACE_ARM_PAGING_METHOD_32_BIT
35 #	define TRACE(x...) dprintf(x)
36 #else
37 #	define TRACE(x...) ;
38 #endif
39 
40 
41 #define MAX_INITIAL_POOLS	\
42 	(ROUNDUP(SMP_MAX_CPUS * TOTAL_SLOTS_PER_CPU + EXTRA_SLOTS, 1024) / 1024)
43 
44 
45 using ARMLargePhysicalPageMapper::PhysicalPageSlot;
46 
47 
48 // #pragma mark - ARMPagingMethod32Bit::PhysicalPageSlotPool
49 
50 struct ARMPagingMethod32Bit::PhysicalPageSlotPool
51 	: ARMLargePhysicalPageMapper::PhysicalPageSlotPool {
52 public:
53 	virtual						~PhysicalPageSlotPool();
54 
55 			status_t			InitInitial(kernel_args* args);
56 			status_t			InitInitialPostArea(kernel_args* args);
57 
58 			void				Init(area_id dataArea, void* data,
59 									area_id virtualArea, addr_t virtualBase);
60 
61 	virtual	status_t			AllocatePool(
62 									ARMLargePhysicalPageMapper
63 										::PhysicalPageSlotPool*& _pool);
64 	virtual	void				Map(phys_addr_t physicalAddress,
65 									addr_t virtualAddress);
66 
67 public:
68 	static	PhysicalPageSlotPool sInitialPhysicalPagePool[MAX_INITIAL_POOLS];
69 
70 private:
71 	area_id					fDataArea;
72 	area_id					fVirtualArea;
73 	addr_t					fVirtualBase;
74 	page_table_entry*		fPageTable;
75 };
76 
77 
78 ARMPagingMethod32Bit::PhysicalPageSlotPool
79 	ARMPagingMethod32Bit::PhysicalPageSlotPool::sInitialPhysicalPagePool[
80 		MAX_INITIAL_POOLS];
81 
82 
83 ARMPagingMethod32Bit::PhysicalPageSlotPool::~PhysicalPageSlotPool()
84 {
85 }
86 
87 
88 status_t
89 ARMPagingMethod32Bit::PhysicalPageSlotPool::InitInitial(kernel_args* args)
90 {
91 	// allocate a virtual address range for the pages to be mapped into
92 	addr_t virtualBase = vm_allocate_early(args, 1024 * B_PAGE_SIZE, 0, 0,
93 		kPageTableAlignment);
94 	if (virtualBase == 0) {
95 		panic("LargeMemoryPhysicalPageMapper::Init(): Failed to reserve "
96 			"physical page pool space in virtual address space!");
97 		return B_ERROR;
98 	}
99 
100 	// allocate memory for the page table and data
101 	size_t areaSize = B_PAGE_SIZE + sizeof(PhysicalPageSlot[1024]);
102 	page_table_entry* pageTable = (page_table_entry*)vm_allocate_early(args,
103 		areaSize, ~0L, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0);
104 	if (pageTable == 0) {
105 		panic("ARMPagingMethod32Bit::PhysicalPageSlotPool::InitInitial(): "
106 			"Failed to allocate memory for page table!");
107 		return B_ERROR;
108 	}
109 
110 	// prepare the page table
111 	_EarlyPreparePageTables(pageTable, virtualBase, 1024 * B_PAGE_SIZE);
112 
113 	// init the pool structure and add the initial pool
114 	Init(-1, pageTable, -1, (addr_t)virtualBase);
115 
116 	return B_OK;
117 }
118 
119 
120 status_t
121 ARMPagingMethod32Bit::PhysicalPageSlotPool::InitInitialPostArea(
122 	kernel_args* args)
123 {
124 	// create an area for the (already allocated) data
125 	size_t areaSize = B_PAGE_SIZE + sizeof(PhysicalPageSlot[1024]);
126 	void* temp = fPageTable;
127 	area_id area = create_area("physical page pool", &temp,
128 		B_EXACT_ADDRESS, areaSize, B_ALREADY_WIRED,
129 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
130 	if (area < 0) {
131 		panic("LargeMemoryPhysicalPageMapper::InitPostArea(): Failed to "
132 			"create area for physical page pool.");
133 		return area;
134 	}
135 	fDataArea = area;
136 
137 	// create an area for the virtual address space
138 	temp = (void*)fVirtualBase;
139 	area = vm_create_null_area(VMAddressSpace::KernelID(),
140 		"physical page pool space", &temp, B_EXACT_ADDRESS,
141 		1024 * B_PAGE_SIZE, 0);
142 	if (area < B_OK) {
143 		panic("LargeMemoryPhysicalPageMapper::InitPostArea(): Failed to "
144 			"create area for physical page pool space.");
145 		return area;
146 	}
147 	fVirtualArea = area;
148 
149 	return B_OK;
150 }
151 
152 
153 void
154 ARMPagingMethod32Bit::PhysicalPageSlotPool::Init(area_id dataArea, void* data,
155 	area_id virtualArea, addr_t virtualBase)
156 {
157 	fDataArea = dataArea;
158 	fVirtualArea = virtualArea;
159 	fVirtualBase = virtualBase;
160 	fPageTable = (page_table_entry*)data;
161 
162 	// init slot list
163 	fSlots = (PhysicalPageSlot*)(fPageTable + 1024);
164 	addr_t slotAddress = virtualBase;
165 	for (int32 i = 0; i < 1024; i++, slotAddress += B_PAGE_SIZE) {
166 		PhysicalPageSlot* slot = &fSlots[i];
167 		slot->next = slot + 1;
168 		slot->pool = this;
169 		slot->address = slotAddress;
170 	}
171 
172 	fSlots[1023].next = NULL;
173 		// terminate list
174 }
175 
176 
177 void
178 ARMPagingMethod32Bit::PhysicalPageSlotPool::Map(phys_addr_t physicalAddress,
179 	addr_t virtualAddress)
180 {
181 	page_table_entry& pte = fPageTable[
182 		(virtualAddress - fVirtualBase) / B_PAGE_SIZE];
183 	pte = (physicalAddress & ARM_PTE_ADDRESS_MASK)
184 		| ARM_MMU_L2_TYPE_SMALLNEW
185 		| ARM_MMU_L2_FLAG_B | ARM_MMU_L2_FLAG_C
186 		| ARM_MMU_L2_FLAG_AP_KRW | ARM_MMU_L2_FLAG_XN;
187 
188 	arch_cpu_invalidate_TLB_page(virtualAddress);
189 }
190 
191 
192 status_t
193 ARMPagingMethod32Bit::PhysicalPageSlotPool::AllocatePool(
194 	ARMLargePhysicalPageMapper::PhysicalPageSlotPool*& _pool)
195 {
196 	// create the pool structure
197 	PhysicalPageSlotPool* pool = new(std::nothrow) PhysicalPageSlotPool;
198 	if (pool == NULL)
199 		return B_NO_MEMORY;
200 	ObjectDeleter<PhysicalPageSlotPool> poolDeleter(pool);
201 
202 	// create an area that can contain the page table and the slot
203 	// structures
204 	size_t areaSize = B_PAGE_SIZE + sizeof(PhysicalPageSlot[1024]);
205 	void* data;
206 	virtual_address_restrictions virtualRestrictions = {};
207 	virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
208 	physical_address_restrictions physicalRestrictions = {};
209 	area_id dataArea = create_area_etc(B_SYSTEM_TEAM, "physical page pool",
210 		PAGE_ALIGN(areaSize), B_FULL_LOCK,
211 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_WAIT, 0,
212 		&virtualRestrictions, &physicalRestrictions, &data);
213 	if (dataArea < 0)
214 		return dataArea;
215 
216 	// create the null area for the virtual address space
217 	void* virtualBase;
218 	area_id virtualArea = vm_create_null_area(
219 		VMAddressSpace::KernelID(), "physical page pool space",
220 		&virtualBase, B_ANY_KERNEL_BLOCK_ADDRESS, 1024 * B_PAGE_SIZE,
221 		CREATE_AREA_PRIORITY_VIP);
222 	if (virtualArea < 0) {
223 		delete_area(dataArea);
224 		return virtualArea;
225 	}
226 
227 	// prepare the page table
228 	memset(data, 0, B_PAGE_SIZE);
229 
230 	// get the page table's physical address
231 	phys_addr_t physicalTable;
232 	ARMVMTranslationMap32Bit* map = static_cast<ARMVMTranslationMap32Bit*>(
233 		VMAddressSpace::Kernel()->TranslationMap());
234 	uint32 dummyFlags;
235 	cpu_status state = disable_interrupts();
236 	map->QueryInterrupt((addr_t)data, &physicalTable, &dummyFlags);
237 	restore_interrupts(state);
238 
239 	// put the page table into the page directory
240 	int32 index = VADDR_TO_PDENT((addr_t)virtualBase);
241 	page_directory_entry* entry
242 		= &map->PagingStructures32Bit()->pgdir_virt[index];
243 	PutPageTableInPageDir(entry, physicalTable, ARM_MMU_L1_FLAG_PXN);
244 	ARMPagingStructures32Bit::UpdateAllPageDirs(index, *entry);
245 
246 	// init the pool structure
247 	pool->Init(dataArea, data, virtualArea, (addr_t)virtualBase);
248 	poolDeleter.Detach();
249 	_pool = pool;
250 	return B_OK;
251 }
252 
253 
254 // #pragma mark - ARMPagingMethod32Bit
255 
256 
257 ARMPagingMethod32Bit::ARMPagingMethod32Bit()
258 	:
259 	fKernelPhysicalPageDirectory(0),
260 	fKernelVirtualPageDirectory(NULL),
261 	fPhysicalPageMapper(NULL),
262 	fKernelPhysicalPageMapper(NULL)
263 {
264 }
265 
266 
267 ARMPagingMethod32Bit::~ARMPagingMethod32Bit()
268 {
269 }
270 
271 
272 status_t
273 ARMPagingMethod32Bit::Init(kernel_args* args,
274 	VMPhysicalPageMapper** _physicalPageMapper)
275 {
276 	TRACE("ARMPagingMethod32Bit::Init(): entry\n");
277 
278 	fKernelPhysicalPageDirectory = args->arch_args.phys_pgdir;
279 	fKernelVirtualPageDirectory = (page_directory_entry*)
280 		args->arch_args.vir_pgdir;
281 
282 #ifdef TRACE_ARM_PAGING_METHOD_32_BIT
283 	TRACE("page dir: %p (physical: %#" B_PRIx32 ")\n",
284 		fKernelVirtualPageDirectory, fKernelPhysicalPageDirectory);
285 #endif
286 
287 	ARMPagingStructures32Bit::StaticInit();
288 
289 	// create the initial pools for the physical page mapper
290 	int32 poolCount = _GetInitialPoolCount();
291 	PhysicalPageSlotPool* pool = PhysicalPageSlotPool::sInitialPhysicalPagePool;
292 
293 	for (int32 i = 0; i < poolCount; i++) {
294 		new(&pool[i]) PhysicalPageSlotPool;
295 		status_t error = pool[i].InitInitial(args);
296 		if (error != B_OK) {
297 			panic("ARMPagingMethod32Bit::Init(): Failed to create initial pool "
298 				"for physical page mapper!");
299 			return error;
300 		}
301 	}
302 
303 	// create physical page mapper
304 	large_memory_physical_page_ops_init(args, pool, poolCount, sizeof(*pool),
305 		fPhysicalPageMapper, fKernelPhysicalPageMapper);
306 		// TODO: Select the best page mapper!
307 
308 	// enable global page feature if available
309 #if 0 //IRA: check for ARMv6!!
310 	if (x86_check_feature(IA32_FEATURE_PGE, FEATURE_COMMON)) {
311 		// this prevents kernel pages from being flushed from TLB on
312 		// context-switch
313 		x86_write_cr4(x86_read_cr4() | IA32_CR4_GLOBAL_PAGES);
314 	}
315 #endif
316 	TRACE("ARMPagingMethod32Bit::Init(): done\n");
317 
318 	*_physicalPageMapper = fPhysicalPageMapper;
319 	return B_OK;
320 }
321 
322 
323 status_t
324 ARMPagingMethod32Bit::InitPostArea(kernel_args* args)
325 {
326 	void *temp;
327 	area_id area;
328 
329 	temp = (void*)fKernelVirtualPageDirectory;
330 	area = create_area("kernel_pgdir", &temp, B_EXACT_ADDRESS, args->arch_args.next_pagetable,
331 		B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
332 	ASSERT_PRINT(area >= 0, "Failed mapping the kernel page directory: 0x%08lx!", area);
333 
334 	int32 poolCount = _GetInitialPoolCount();
335 	for (int32 i = 0; i < poolCount; i++) {
336 		status_t error = PhysicalPageSlotPool::sInitialPhysicalPagePool[i]
337 			.InitInitialPostArea(args);
338 		if (error != B_OK)
339 			return error;
340 	}
341 
342 	return B_OK;
343 }
344 
345 
346 status_t
347 ARMPagingMethod32Bit::CreateTranslationMap(bool kernel, VMTranslationMap** _map)
348 {
349 	ARMVMTranslationMap32Bit* map = new(std::nothrow) ARMVMTranslationMap32Bit;
350 	if (map == NULL)
351 		return B_NO_MEMORY;
352 
353 	status_t error = map->Init(kernel);
354 	if (error != B_OK) {
355 		delete map;
356 		return error;
357 	}
358 
359 	*_map = map;
360 	return B_OK;
361 }
362 
363 
364 static void
365 get_free_pgtable(kernel_args* args, phys_addr_t* phys_addr, addr_t* virt_addr)
366 {
367 	if (args->arch_args.next_pagetable >= args->arch_args.last_pagetable)
368 		panic("ran out of early page tables");
369 
370 	phys_addr_t phys = args->arch_args.phys_pgdir + args->arch_args.next_pagetable;
371 	addr_t virt = args->arch_args.vir_pgdir + args->arch_args.next_pagetable;
372 	args->arch_args.next_pagetable += ARM_MMU_L2_COARSE_TABLE_SIZE;
373 
374 	*phys_addr = phys;
375 	*virt_addr = virt;
376 }
377 
378 status_t
379 ARMPagingMethod32Bit::MapEarly(kernel_args* args, addr_t virtualAddress,
380 	phys_addr_t physicalAddress, uint8 attributes,
381 	page_num_t (*get_free_page)(kernel_args*))
382 {
383 	// check to see if a page table exists for this range
384 	int index = VADDR_TO_PDENT(virtualAddress);
385 	if ((fKernelVirtualPageDirectory[index] & ARM_PDE_TYPE_MASK) == 0) {
386 		phys_addr_t pgtable_phys;
387 		addr_t pgtable_virt;
388 		page_directory_entry *e;
389 
390 		// we need to allocate a pgtable
391 		get_free_pgtable(args, &pgtable_phys, &pgtable_virt);
392 
393 		TRACE("ARMPagingMethod32Bit::MapEarly(): asked for free page for "
394 			"pgtable. phys=%#" B_PRIxPHYSADDR ", virt=%#" B_PRIxADDR "\n",
395 			pgtable_phys, pgtable_virt);
396 
397 		// zero it out in it's new mapping
398 		memset((void*)pgtable_virt, 0, B_PAGE_SIZE);
399 
400 		// put it in the pgdir
401 		e = &fKernelVirtualPageDirectory[index];
402 		PutPageTableInPageDir(e, pgtable_phys,
403 			(virtualAddress < KERNEL_BASE) ? ARM_MMU_L1_FLAG_PXN : 0);
404 	}
405 
406 	phys_addr_t ptEntryPhys = fKernelVirtualPageDirectory[index] & ARM_PDE_ADDRESS_MASK;
407 	addr_t ptEntryVirt = ptEntryPhys - args->arch_args.phys_pgdir + args->arch_args.vir_pgdir;
408 	page_table_entry* ptEntry = (page_table_entry*)ptEntryVirt;
409 	ptEntry += VADDR_TO_PTENT(virtualAddress);
410 
411 	ASSERT_PRINT(
412 		(*ptEntry & ARM_PTE_TYPE_MASK) == 0,
413 		"virtual address: %#" B_PRIxADDR ", pde: %#" B_PRIx32
414 		", existing pte: %#" B_PRIx32, virtualAddress, fKernelVirtualPageDirectory[index],
415 		*ptEntry);
416 
417 	// now, fill in the pentry
418 	PutPageTableEntryInTable(ptEntry,
419 		physicalAddress, attributes, 0, IS_KERNEL_ADDRESS(virtualAddress));
420 
421 	return B_OK;
422 }
423 
424 
425 bool
426 ARMPagingMethod32Bit::IsKernelPageAccessible(addr_t virtualAddress,
427 	uint32 protection)
428 {
429 #if 0
430 	// We only trust the kernel team's page directory. So switch to it first.
431 	// Always set it to make sure the TLBs don't contain obsolete data.
432 	uint32 physicalPageDirectory = x86_read_cr3();
433 	x86_write_cr3(fKernelPhysicalPageDirectory);
434 
435 	// get the page directory entry for the address
436 	page_directory_entry pageDirectoryEntry;
437 	uint32 index = VADDR_TO_PDENT(virtualAddress);
438 
439 	if (physicalPageDirectory == fKernelPhysicalPageDirectory) {
440 		pageDirectoryEntry = fKernelVirtualPageDirectory[index];
441 	} else if (fPhysicalPageMapper != NULL) {
442 		// map the original page directory and get the entry
443 		void* handle;
444 		addr_t virtualPageDirectory;
445 		status_t error = fPhysicalPageMapper->GetPageDebug(
446 			physicalPageDirectory, &virtualPageDirectory, &handle);
447 		if (error == B_OK) {
448 			pageDirectoryEntry
449 				= ((page_directory_entry*)virtualPageDirectory)[index];
450 			fPhysicalPageMapper->PutPageDebug(virtualPageDirectory, handle);
451 		} else
452 			pageDirectoryEntry = 0;
453 	} else
454 		pageDirectoryEntry = 0;
455 
456 	// map the page table and get the entry
457 	page_table_entry pageTableEntry;
458 	index = VADDR_TO_PTENT(virtualAddress);
459 
460 	if ((pageDirectoryEntry & X86_PDE_PRESENT) != 0
461 			&& fPhysicalPageMapper != NULL) {
462 		void* handle;
463 		addr_t virtualPageTable;
464 		status_t error = fPhysicalPageMapper->GetPageDebug(
465 			pageDirectoryEntry & X86_PDE_ADDRESS_MASK, &virtualPageTable,
466 			&handle);
467 		if (error == B_OK) {
468 			pageTableEntry = ((page_table_entry*)virtualPageTable)[index];
469 			fPhysicalPageMapper->PutPageDebug(virtualPageTable, handle);
470 		} else
471 			pageTableEntry = 0;
472 	} else
473 		pageTableEntry = 0;
474 
475 	// switch back to the original page directory
476 	if (physicalPageDirectory != fKernelPhysicalPageDirectory)
477 		x86_write_cr3(physicalPageDirectory);
478 
479 	if ((pageTableEntry & X86_PTE_PRESENT) == 0)
480 		return false;
481 
482 	// present means kernel-readable, so check for writable
483 	return (protection & B_KERNEL_WRITE_AREA) == 0
484 		|| (pageTableEntry & X86_PTE_WRITABLE) != 0;
485 #endif
486 	//IRA: fix the above!
487 	return true;
488 }
489 
490 
491 /*static*/ void
492 ARMPagingMethod32Bit::PutPageTableInPageDir(page_directory_entry* entry,
493 	phys_addr_t pgtablePhysical, uint32 attributes)
494 {
495 	dsb();
496 
497 	*entry = (pgtablePhysical & ARM_PDE_ADDRESS_MASK) | ARM_MMU_L1_TYPE_COARSE | attributes;
498 
499 	dsb();
500 	isb();
501 }
502 
503 
504 /*static*/ void
505 ARMPagingMethod32Bit::PutPageTableEntryInTable(page_table_entry* entry,
506 	phys_addr_t physicalAddress, uint32 attributes, uint32 memoryType,
507 	bool globalPage)
508 {
509 	page_table_entry page = (physicalAddress & ARM_PTE_ADDRESS_MASK)
510 		| ARM_MMU_L2_TYPE_SMALLNEW
511 		| MemoryTypeToPageTableEntryFlags(memoryType)
512 		| AttributesToPageTableEntryFlags(attributes)
513 		| (globalPage ? 0 : ARM_MMU_L2_FLAG_NG);
514 
515 	// put it in the page table
516 	*(volatile page_table_entry*)entry = page;
517 
518 	dsb();
519 	isb();
520 }
521 
522 
523 inline int32
524 ARMPagingMethod32Bit::_GetInitialPoolCount()
525 {
526 	int32 requiredSlots = smp_get_num_cpus() * TOTAL_SLOTS_PER_CPU
527 			+ EXTRA_SLOTS;
528 	return (requiredSlots + 1023) / 1024;
529 }
530 
531 
532 /*static*/ void
533 ARMPagingMethod32Bit::_EarlyPreparePageTables(page_table_entry* pageTables,
534 	addr_t address, size_t size)
535 {
536 	ARMPagingMethod32Bit* method = ARMPagingMethod32Bit::Method();
537 	memset(pageTables, 0, 256 * (size / (B_PAGE_SIZE * 256)));
538 
539 	// put the array of pgtables directly into the kernel pagedir
540 	// these will be wired and kept mapped into virtual space to be easy to get
541 	// to
542 	{
543 		addr_t virtualTable = (addr_t)pageTables;
544 
545 		for (size_t i = 0; i < (size / (B_PAGE_SIZE * 256));
546 				i++, virtualTable += 256*sizeof(page_directory_entry)) {
547 			phys_addr_t physicalTable = 0;
548 			_EarlyQuery(virtualTable, &physicalTable);
549 			page_directory_entry* entry = method->KernelVirtualPageDirectory()
550 				+ VADDR_TO_PDENT(address) + i;
551 			PutPageTableInPageDir(entry, physicalTable,
552 				(address < KERNEL_BASE) ? ARM_MMU_L1_FLAG_PXN : 0);
553 		}
554 	}
555 }
556 
557 
558 //! TODO: currently assumes this translation map is active
559 /*static*/ status_t
560 ARMPagingMethod32Bit::_EarlyQuery(addr_t virtualAddress,
561 	phys_addr_t *_physicalAddress)
562 {
563 	ARMPagingMethod32Bit* method = ARMPagingMethod32Bit::Method();
564 	int index = VADDR_TO_PDENT(virtualAddress);
565 	if ((method->KernelVirtualPageDirectory()[index] & ARM_PDE_TYPE_MASK) == 0) {
566 		// no pagetable here
567 		return B_ERROR;
568 	}
569 
570 	phys_addr_t ptEntryPhys = method->KernelVirtualPageDirectory()[index] & ARM_PDE_ADDRESS_MASK;
571 	addr_t ptEntryVirt = ptEntryPhys -
572 		(uint32_t)method->KernelPhysicalPageDirectory() +
573 		(uint32_t)method->KernelVirtualPageDirectory();
574 
575 	page_table_entry* entry = (page_table_entry*)ptEntryVirt;
576 	entry += VADDR_TO_PTENT(virtualAddress);
577 
578 	if ((*entry & ARM_PTE_TYPE_MASK) == 0) {
579 		// page mapping not valid
580 		return B_ERROR;
581 	}
582 
583 	*_physicalAddress = (*entry & ARM_PTE_ADDRESS_MASK)
584 		| VADDR_TO_PGOFF(virtualAddress);
585 
586 	return B_OK;
587 }
588