xref: /haiku/src/system/kernel/arch/x86/paging/32bit/X86PagingMethod32Bit.cpp (revision 344ded80d400028c8f561b4b876257b94c12db4a)
1 /*
2  * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 #include "paging/32bit/X86PagingMethod32Bit.h"
12 
13 #include <stdlib.h>
14 #include <string.h>
15 
16 #include <AutoDeleter.h>
17 
18 #include <arch/smp.h>
19 #include <arch_system_info.h>
20 #include <boot/kernel_args.h>
21 #include <int.h>
22 #include <thread.h>
23 #include <vm/vm.h>
24 #include <vm/VMAddressSpace.h>
25 
26 #include "paging/32bit/X86PagingStructures32Bit.h"
27 #include "paging/32bit/X86VMTranslationMap32Bit.h"
28 #include "paging/x86_physical_page_mapper.h"
29 #include "paging/x86_physical_page_mapper_large_memory.h"
30 
31 
32 //#define TRACE_X86_PAGING_METHOD_32_BIT
33 #ifdef TRACE_X86_PAGING_METHOD_32_BIT
34 #	define TRACE(x...) dprintf(x)
35 #else
36 #	define TRACE(x...) ;
37 #endif
38 
39 
40 #define MAX_INITIAL_POOLS	\
41 	(ROUNDUP(SMP_MAX_CPUS * TOTAL_SLOTS_PER_CPU + EXTRA_SLOTS, 1024) / 1024)
42 
43 
44 using X86LargePhysicalPageMapper::PhysicalPageSlot;
45 
46 
47 // #pragma mark - X86PagingMethod32Bit::PhysicalPageSlotPool
48 
49 
50 struct X86PagingMethod32Bit::PhysicalPageSlotPool final
51 	: X86LargePhysicalPageMapper::PhysicalPageSlotPool {
52 public:
53 	virtual						~PhysicalPageSlotPool();
54 
55 			status_t			InitInitial(kernel_args* args);
56 			status_t			InitInitialPostArea(kernel_args* args);
57 
58 			void				Init(area_id dataArea, void* data,
59 									area_id virtualArea, addr_t virtualBase);
60 
61 	virtual	status_t			AllocatePool(
62 									X86LargePhysicalPageMapper
63 										::PhysicalPageSlotPool*& _pool);
64 	virtual	void				Map(phys_addr_t physicalAddress,
65 									addr_t virtualAddress);
66 
67 public:
68 	static	PhysicalPageSlotPool sInitialPhysicalPagePool[MAX_INITIAL_POOLS];
69 
70 private:
71 	area_id					fDataArea;
72 	area_id					fVirtualArea;
73 	addr_t					fVirtualBase;
74 	page_table_entry*		fPageTable;
75 };
76 
77 
78 X86PagingMethod32Bit::PhysicalPageSlotPool
79 	X86PagingMethod32Bit::PhysicalPageSlotPool::sInitialPhysicalPagePool[
80 		MAX_INITIAL_POOLS];
81 
82 
83 X86PagingMethod32Bit::PhysicalPageSlotPool::~PhysicalPageSlotPool()
84 {
85 }
86 
87 
88 status_t
89 X86PagingMethod32Bit::PhysicalPageSlotPool::InitInitial(kernel_args* args)
90 {
91 	// allocate a virtual address range for the pages to be mapped into
92 	addr_t virtualBase = vm_allocate_early(args, 1024 * B_PAGE_SIZE, 0, 0,
93 		kPageTableAlignment);
94 	if (virtualBase == 0) {
95 		panic("LargeMemoryPhysicalPageMapper::Init(): Failed to reserve "
96 			"physical page pool space in virtual address space!");
97 		return B_ERROR;
98 	}
99 
100 	// allocate memory for the page table and data
101 	size_t areaSize = B_PAGE_SIZE + sizeof(PhysicalPageSlot[1024]);
102 	page_table_entry* pageTable = (page_table_entry*)vm_allocate_early(args,
103 		areaSize, ~0L, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0);
104 	if (pageTable == 0) {
105 		panic("X86PagingMethod32Bit::PhysicalPageSlotPool::InitInitial(): "
106 			"Failed to allocate memory for page table!");
107 		return B_ERROR;
108 	}
109 
110 	// prepare the page table
111 	_EarlyPreparePageTables(pageTable, virtualBase, 1024 * B_PAGE_SIZE);
112 
113 	// init the pool structure and add the initial pool
114 	Init(-1, pageTable, -1, (addr_t)virtualBase);
115 
116 	return B_OK;
117 }
118 
119 
120 status_t
121 X86PagingMethod32Bit::PhysicalPageSlotPool::InitInitialPostArea(
122 	kernel_args* args)
123 {
124 	// create an area for the (already allocated) data
125 	size_t areaSize = B_PAGE_SIZE + sizeof(PhysicalPageSlot[1024]);
126 	void* temp = fPageTable;
127 	area_id area = create_area("physical page pool", &temp,
128 		B_EXACT_ADDRESS, areaSize, B_ALREADY_WIRED,
129 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
130 	if (area < B_OK) {
131 		panic("LargeMemoryPhysicalPageMapper::InitPostArea(): Failed to "
132 			"create area for physical page pool.");
133 		return area;
134 	}
135 	fDataArea = area;
136 
137 	// create an area for the virtual address space
138 	temp = (void*)fVirtualBase;
139 	area = vm_create_null_area(VMAddressSpace::KernelID(),
140 		"physical page pool space", &temp, B_EXACT_ADDRESS,
141 		1024 * B_PAGE_SIZE, 0);
142 	if (area < B_OK) {
143 		panic("LargeMemoryPhysicalPageMapper::InitPostArea(): Failed to "
144 			"create area for physical page pool space.");
145 		return area;
146 	}
147 	fVirtualArea = area;
148 
149 	return B_OK;
150 }
151 
152 
153 void
154 X86PagingMethod32Bit::PhysicalPageSlotPool::Init(area_id dataArea, void* data,
155 	area_id virtualArea, addr_t virtualBase)
156 {
157 	fDataArea = dataArea;
158 	fVirtualArea = virtualArea;
159 	fVirtualBase = virtualBase;
160 	fPageTable = (page_table_entry*)data;
161 
162 	// init slot list
163 	fSlots = (PhysicalPageSlot*)(fPageTable + 1024);
164 	addr_t slotAddress = virtualBase;
165 	for (int32 i = 0; i < 1024; i++, slotAddress += B_PAGE_SIZE) {
166 		PhysicalPageSlot* slot = &fSlots[i];
167 		slot->next = slot + 1;
168 		slot->pool = this;
169 		slot->address = slotAddress;
170 	}
171 
172 	fSlots[1023].next = NULL;
173 		// terminate list
174 }
175 
176 
177 void
178 X86PagingMethod32Bit::PhysicalPageSlotPool::Map(phys_addr_t physicalAddress,
179 	addr_t virtualAddress)
180 {
181 	page_table_entry& pte = fPageTable[
182 		(virtualAddress - fVirtualBase) / B_PAGE_SIZE];
183 	pte = (physicalAddress & X86_PTE_ADDRESS_MASK)
184 		| X86_PTE_WRITABLE | X86_PTE_GLOBAL | X86_PTE_PRESENT;
185 
186 	invalidate_TLB(virtualAddress);
187 }
188 
189 
190 status_t
191 X86PagingMethod32Bit::PhysicalPageSlotPool::AllocatePool(
192 	X86LargePhysicalPageMapper::PhysicalPageSlotPool*& _pool)
193 {
194 	// create the pool structure
195 	PhysicalPageSlotPool* pool = new(std::nothrow) PhysicalPageSlotPool;
196 	if (pool == NULL)
197 		return B_NO_MEMORY;
198 	ObjectDeleter<PhysicalPageSlotPool> poolDeleter(pool);
199 
200 	// create an area that can contain the page table and the slot
201 	// structures
202 	size_t areaSize = B_PAGE_SIZE + sizeof(PhysicalPageSlot[1024]);
203 	void* data;
204 	virtual_address_restrictions virtualRestrictions = {};
205 	virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
206 	physical_address_restrictions physicalRestrictions = {};
207 	area_id dataArea = create_area_etc(B_SYSTEM_TEAM, "physical page pool",
208 		PAGE_ALIGN(areaSize), B_FULL_LOCK,
209 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_WAIT, 0,
210 		&virtualRestrictions, &physicalRestrictions, &data);
211 	if (dataArea < 0)
212 		return dataArea;
213 
214 	// create the null area for the virtual address space
215 	void* virtualBase;
216 	area_id virtualArea = vm_create_null_area(
217 		VMAddressSpace::KernelID(), "physical page pool space",
218 		&virtualBase, B_ANY_KERNEL_BLOCK_ADDRESS, 1024 * B_PAGE_SIZE,
219 		CREATE_AREA_PRIORITY_VIP);
220 	if (virtualArea < 0) {
221 		delete_area(dataArea);
222 		return virtualArea;
223 	}
224 
225 	// prepare the page table
226 	memset(data, 0, B_PAGE_SIZE);
227 
228 	// get the page table's physical address
229 	phys_addr_t physicalTable;
230 	X86VMTranslationMap32Bit* map = static_cast<X86VMTranslationMap32Bit*>(
231 		VMAddressSpace::Kernel()->TranslationMap());
232 	uint32 dummyFlags;
233 	cpu_status state = disable_interrupts();
234 	map->QueryInterrupt((addr_t)data, &physicalTable, &dummyFlags);
235 	restore_interrupts(state);
236 
237 	// put the page table into the page directory
238 	int32 index = (addr_t)virtualBase / (B_PAGE_SIZE * 1024);
239 	page_directory_entry* entry
240 		= &map->PagingStructures32Bit()->pgdir_virt[index];
241 	PutPageTableInPageDir(entry, physicalTable,
242 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
243 	X86PagingStructures32Bit::UpdateAllPageDirs(index, *entry);
244 
245 	// init the pool structure
246 	pool->Init(dataArea, data, virtualArea, (addr_t)virtualBase);
247 	poolDeleter.Detach();
248 	_pool = pool;
249 	return B_OK;
250 }
251 
252 
253 // #pragma mark - X86PagingMethod32Bit
254 
255 
256 X86PagingMethod32Bit::X86PagingMethod32Bit()
257 	:
258 	fPageHole(NULL),
259 	fPageHolePageDir(NULL),
260 	fKernelPhysicalPageDirectory(0),
261 	fKernelVirtualPageDirectory(NULL),
262 	fPhysicalPageMapper(NULL),
263 	fKernelPhysicalPageMapper(NULL)
264 {
265 }
266 
267 
268 X86PagingMethod32Bit::~X86PagingMethod32Bit()
269 {
270 }
271 
272 
273 status_t
274 X86PagingMethod32Bit::Init(kernel_args* args,
275 	VMPhysicalPageMapper** _physicalPageMapper)
276 {
277 	TRACE("X86PagingMethod32Bit::Init(): entry\n");
278 
279 	// Ignore all memory beyond the maximum 32-bit address.
280 	static const phys_addr_t kLimit = 1ULL << 32;
281 	for (uint32 i = 0; i < args->num_physical_memory_ranges; i++) {
282 		addr_range& range = args->physical_memory_range[i];
283 		if (range.start >= kLimit)
284 			range.size = 0;
285 		else if ((range.start + range.size) > kLimit)
286 			range.size = kLimit - range.start;
287 	}
288 
289 	// page hole set up in stage2
290 	fPageHole = (page_table_entry*)(addr_t)args->arch_args.page_hole;
291 	// calculate where the pgdir would be
292 	fPageHolePageDir = (page_directory_entry*)
293 		(((addr_t)args->arch_args.page_hole)
294 			+ (B_PAGE_SIZE * 1024 - B_PAGE_SIZE));
295 	// clear out the bottom 2 GB, unmap everything
296 	memset(fPageHolePageDir + FIRST_USER_PGDIR_ENT, 0,
297 		sizeof(page_directory_entry) * NUM_USER_PGDIR_ENTS);
298 
299 	fKernelPhysicalPageDirectory = args->arch_args.phys_pgdir;
300 	fKernelVirtualPageDirectory = (page_directory_entry*)(addr_t)
301 		args->arch_args.vir_pgdir;
302 
303 #ifdef TRACE_X86_PAGING_METHOD_32_BIT
304 	TRACE("page hole: %p, page dir: %p\n", fPageHole, fPageHolePageDir);
305 	TRACE("page dir: %p (physical: %#" B_PRIx32 ")\n",
306 		fKernelVirtualPageDirectory, fKernelPhysicalPageDirectory);
307 #endif
308 
309 	X86PagingStructures32Bit::StaticInit();
310 
311 	// create the initial pools for the physical page mapper
312 	int32 poolCount = _GetInitialPoolCount();
313 	PhysicalPageSlotPool* pool = PhysicalPageSlotPool::sInitialPhysicalPagePool;
314 
315 	for (int32 i = 0; i < poolCount; i++) {
316 		new(&pool[i]) PhysicalPageSlotPool;
317 		status_t error = pool[i].InitInitial(args);
318 		if (error != B_OK) {
319 			panic("X86PagingMethod32Bit::Init(): Failed to create initial pool "
320 				"for physical page mapper!");
321 			return error;
322 		}
323 	}
324 
325 	// create physical page mapper
326 	large_memory_physical_page_ops_init(args, pool, poolCount, sizeof(*pool),
327 		fPhysicalPageMapper, fKernelPhysicalPageMapper);
328 		// TODO: Select the best page mapper!
329 
330 	// enable global page feature if available
331 	if (x86_check_feature(IA32_FEATURE_PGE, FEATURE_COMMON)) {
332 		// this prevents kernel pages from being flushed from TLB on
333 		// context-switch
334 		x86_write_cr4(x86_read_cr4() | IA32_CR4_GLOBAL_PAGES);
335 	}
336 
337 	TRACE("X86PagingMethod32Bit::Init(): done\n");
338 
339 	*_physicalPageMapper = fPhysicalPageMapper;
340 	return B_OK;
341 }
342 
343 
344 status_t
345 X86PagingMethod32Bit::InitPostArea(kernel_args* args)
346 {
347 	// now that the vm is initialized, create an area that represents
348 	// the page hole
349 	void *temp;
350 	area_id area;
351 
352 	// unmap the page hole hack we were using before
353 	fKernelVirtualPageDirectory[1023] = 0;
354 	fPageHolePageDir = NULL;
355 	fPageHole = NULL;
356 
357 	temp = (void*)fKernelVirtualPageDirectory;
358 	area = create_area("kernel_pgdir", &temp, B_EXACT_ADDRESS, B_PAGE_SIZE,
359 		B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
360 	if (area < B_OK)
361 		return area;
362 
363 	int32 poolCount = _GetInitialPoolCount();
364 	for (int32 i = 0; i < poolCount; i++) {
365 		status_t error = PhysicalPageSlotPool::sInitialPhysicalPagePool[i]
366 			.InitInitialPostArea(args);
367 		if (error != B_OK)
368 			return error;
369 	}
370 
371 	return B_OK;
372 }
373 
374 
375 status_t
376 X86PagingMethod32Bit::CreateTranslationMap(bool kernel, VMTranslationMap** _map)
377 {
378 	X86VMTranslationMap32Bit* map = new(std::nothrow) X86VMTranslationMap32Bit;
379 	if (map == NULL)
380 		return B_NO_MEMORY;
381 
382 	status_t error = map->Init(kernel);
383 	if (error != B_OK) {
384 		delete map;
385 		return error;
386 	}
387 
388 	*_map = map;
389 	return B_OK;
390 }
391 
392 
393 status_t
394 X86PagingMethod32Bit::MapEarly(kernel_args* args, addr_t virtualAddress,
395 	phys_addr_t physicalAddress, uint8 attributes,
396 	page_num_t (*get_free_page)(kernel_args*))
397 {
398 	// XXX horrible back door to map a page quickly regardless of translation
399 	// map object, etc. used only during VM setup.
400 	// uses a 'page hole' set up in the stage 2 bootloader. The page hole is
401 	// created by pointing one of the pgdir entries back at itself, effectively
402 	// mapping the contents of all of the 4MB of pagetables into a 4 MB region.
403 	// It's only used here, and is later unmapped.
404 
405 	// check to see if a page table exists for this range
406 	int index = VADDR_TO_PDENT(virtualAddress);
407 	if ((fPageHolePageDir[index] & X86_PDE_PRESENT) == 0) {
408 		phys_addr_t pgtable;
409 		page_directory_entry *e;
410 		// we need to allocate a pgtable
411 		pgtable = get_free_page(args);
412 		// pgtable is in pages, convert to physical address
413 		pgtable *= B_PAGE_SIZE;
414 
415 		TRACE("X86PagingMethod32Bit::MapEarly(): asked for free page for "
416 			"pgtable. %#" B_PRIxPHYSADDR "\n", pgtable);
417 
418 		// put it in the pgdir
419 		e = &fPageHolePageDir[index];
420 		PutPageTableInPageDir(e, pgtable, attributes);
421 
422 		// zero it out in it's new mapping
423 		memset((unsigned int*)((addr_t)fPageHole
424 				+ (virtualAddress / B_PAGE_SIZE / 1024) * B_PAGE_SIZE),
425 			0, B_PAGE_SIZE);
426 	}
427 
428 	ASSERT_PRINT(
429 		(fPageHole[virtualAddress / B_PAGE_SIZE] & X86_PTE_PRESENT) == 0,
430 		"virtual address: %#" B_PRIxADDR ", pde: %#" B_PRIx32
431 		", existing pte: %#" B_PRIx32, virtualAddress, fPageHolePageDir[index],
432 		fPageHole[virtualAddress / B_PAGE_SIZE]);
433 
434 	// now, fill in the pentry
435 	PutPageTableEntryInTable(fPageHole + virtualAddress / B_PAGE_SIZE,
436 		physicalAddress, attributes, 0, IS_KERNEL_ADDRESS(virtualAddress));
437 
438 	return B_OK;
439 }
440 
441 
442 bool
443 X86PagingMethod32Bit::IsKernelPageAccessible(addr_t virtualAddress,
444 	uint32 protection)
445 {
446 	// We only trust the kernel team's page directory. So switch to it first.
447 	// Always set it to make sure the TLBs don't contain obsolete data.
448 	uint32 physicalPageDirectory = x86_read_cr3();
449 	x86_write_cr3(fKernelPhysicalPageDirectory);
450 
451 	// get the page directory entry for the address
452 	page_directory_entry pageDirectoryEntry;
453 	uint32 index = VADDR_TO_PDENT(virtualAddress);
454 
455 	if (physicalPageDirectory == fKernelPhysicalPageDirectory) {
456 		pageDirectoryEntry = fKernelVirtualPageDirectory[index];
457 	} else if (fPhysicalPageMapper != NULL) {
458 		// map the original page directory and get the entry
459 		void* handle;
460 		addr_t virtualPageDirectory;
461 		status_t error = fPhysicalPageMapper->GetPageDebug(
462 			physicalPageDirectory, &virtualPageDirectory, &handle);
463 		if (error == B_OK) {
464 			pageDirectoryEntry
465 				= ((page_directory_entry*)virtualPageDirectory)[index];
466 			fPhysicalPageMapper->PutPageDebug(virtualPageDirectory, handle);
467 		} else
468 			pageDirectoryEntry = 0;
469 	} else
470 		pageDirectoryEntry = 0;
471 
472 	// map the page table and get the entry
473 	page_table_entry pageTableEntry;
474 	index = VADDR_TO_PTENT(virtualAddress);
475 
476 	if ((pageDirectoryEntry & X86_PDE_PRESENT) != 0
477 			&& fPhysicalPageMapper != NULL) {
478 		void* handle;
479 		addr_t virtualPageTable;
480 		status_t error = fPhysicalPageMapper->GetPageDebug(
481 			pageDirectoryEntry & X86_PDE_ADDRESS_MASK, &virtualPageTable,
482 			&handle);
483 		if (error == B_OK) {
484 			pageTableEntry = ((page_table_entry*)virtualPageTable)[index];
485 			fPhysicalPageMapper->PutPageDebug(virtualPageTable, handle);
486 		} else
487 			pageTableEntry = 0;
488 	} else
489 		pageTableEntry = 0;
490 
491 	// switch back to the original page directory
492 	if (physicalPageDirectory != fKernelPhysicalPageDirectory)
493 		x86_write_cr3(physicalPageDirectory);
494 
495 	if ((pageTableEntry & X86_PTE_PRESENT) == 0)
496 		return false;
497 
498 	// present means kernel-readable, so check for writable
499 	return (protection & B_KERNEL_WRITE_AREA) == 0
500 		|| (pageTableEntry & X86_PTE_WRITABLE) != 0;
501 }
502 
503 
504 /*static*/ void
505 X86PagingMethod32Bit::PutPageTableInPageDir(page_directory_entry* entry,
506 	phys_addr_t pgtablePhysical, uint32 attributes)
507 {
508 	*entry = (pgtablePhysical & X86_PDE_ADDRESS_MASK)
509 		| X86_PDE_PRESENT
510 		| X86_PDE_WRITABLE
511 		| X86_PDE_USER;
512 		// TODO: we ignore the attributes of the page table - for compatibility
513 		// with BeOS we allow having user accessible areas in the kernel address
514 		// space. This is currently being used by some drivers, mainly for the
515 		// frame buffer. Our current real time data implementation makes use of
516 		// this fact, too.
517 		// We might want to get rid of this possibility one day, especially if
518 		// we intend to port it to a platform that does not support this.
519 }
520 
521 
522 /*static*/ void
523 X86PagingMethod32Bit::PutPageTableEntryInTable(page_table_entry* entry,
524 	phys_addr_t physicalAddress, uint32 attributes, uint32 memoryType,
525 	bool globalPage)
526 {
527 	page_table_entry page = (physicalAddress & X86_PTE_ADDRESS_MASK)
528 		| X86_PTE_PRESENT | (globalPage ? X86_PTE_GLOBAL : 0)
529 		| MemoryTypeToPageTableEntryFlags(memoryType);
530 
531 	// if the page is user accessible, it's automatically
532 	// accessible in kernel space, too (but with the same
533 	// protection)
534 	if ((attributes & B_USER_PROTECTION) != 0) {
535 		page |= X86_PTE_USER;
536 		if ((attributes & B_WRITE_AREA) != 0)
537 			page |= X86_PTE_WRITABLE;
538 	} else if ((attributes & B_KERNEL_WRITE_AREA) != 0)
539 		page |= X86_PTE_WRITABLE;
540 
541 	// put it in the page table
542 	*(volatile page_table_entry*)entry = page;
543 }
544 
545 
546 inline int32
547 X86PagingMethod32Bit::_GetInitialPoolCount()
548 {
549 	int32 requiredSlots = smp_get_num_cpus() * TOTAL_SLOTS_PER_CPU
550 			+ EXTRA_SLOTS;
551 	return (requiredSlots + 1023) / 1024;
552 }
553 
554 
555 /*static*/ void
556 X86PagingMethod32Bit::_EarlyPreparePageTables(page_table_entry* pageTables,
557 	addr_t address, size_t size)
558 {
559 	memset(pageTables, 0, B_PAGE_SIZE * (size / (B_PAGE_SIZE * 1024)));
560 
561 	// put the array of pgtables directly into the kernel pagedir
562 	// these will be wired and kept mapped into virtual space to be easy to get
563 	// to
564 	{
565 		addr_t virtualTable = (addr_t)pageTables;
566 
567 		page_directory_entry* pageHolePageDir
568 			= X86PagingMethod32Bit::Method()->PageHolePageDir();
569 
570 		for (size_t i = 0; i < (size / (B_PAGE_SIZE * 1024));
571 				i++, virtualTable += B_PAGE_SIZE) {
572 			phys_addr_t physicalTable = 0;
573 			_EarlyQuery(virtualTable, &physicalTable);
574 			page_directory_entry* entry = &pageHolePageDir[
575 				(address / (B_PAGE_SIZE * 1024)) + i];
576 			PutPageTableInPageDir(entry, physicalTable,
577 				B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
578 		}
579 	}
580 }
581 
582 
583 //! TODO: currently assumes this translation map is active
584 /*static*/ status_t
585 X86PagingMethod32Bit::_EarlyQuery(addr_t virtualAddress,
586 	phys_addr_t *_physicalAddress)
587 {
588 	X86PagingMethod32Bit* method = X86PagingMethod32Bit::Method();
589 	int index = VADDR_TO_PDENT(virtualAddress);
590 	if ((method->PageHolePageDir()[index] & X86_PDE_PRESENT) == 0) {
591 		// no pagetable here
592 		return B_ERROR;
593 	}
594 
595 	page_table_entry* entry = method->PageHole() + virtualAddress / B_PAGE_SIZE;
596 	if ((*entry & X86_PTE_PRESENT) == 0) {
597 		// page mapping not valid
598 		return B_ERROR;
599 	}
600 
601 	*_physicalAddress = *entry & X86_PTE_ADDRESS_MASK;
602 	return B_OK;
603 }
604