xref: /haiku/src/system/kernel/arch/x86/paging/pae/X86VMTranslationMapPAE.cpp (revision 922e7ba1f3228e6f28db69b0ded8f86eb32dea17)
1 /*
2  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 #include "paging/pae/X86VMTranslationMapPAE.h"
12 
13 #include <int.h>
14 #include <slab/Slab.h>
15 #include <thread.h>
16 #include <util/AutoLock.h>
17 #include <vm/vm_page.h>
18 #include <vm/VMAddressSpace.h>
19 #include <vm/VMCache.h>
20 
21 #include "paging/pae/X86PagingMethodPAE.h"
22 #include "paging/pae/X86PagingStructuresPAE.h"
23 #include "paging/x86_physical_page_mapper.h"
24 
25 
26 //#define TRACE_X86_VM_TRANSLATION_MAP_PAE
27 #ifdef TRACE_X86_VM_TRANSLATION_MAP_PAE
28 #	define TRACE(x...) dprintf(x)
29 #else
30 #	define TRACE(x...) ;
31 #endif
32 
33 
34 #if B_HAIKU_PHYSICAL_BITS == 64
35 
36 
37 X86VMTranslationMapPAE::X86VMTranslationMapPAE()
38 	:
39 	fPagingStructures(NULL)
40 {
41 }
42 
43 
44 X86VMTranslationMapPAE::~X86VMTranslationMapPAE()
45 {
46 	if (fPagingStructures == NULL)
47 		return;
48 
49 	if (fPageMapper != NULL)
50 		fPageMapper->Delete();
51 
52 	// cycle through and free all of the user space page tables
53 
54 	STATIC_ASSERT(KERNEL_BASE == 0x80000000 && KERNEL_SIZE == 0x80000000);
55 		// assuming 1-1 split of the address space
56 
57 	for (uint32 k = 0; k < 2; k++) {
58 		pae_page_directory_entry* pageDir
59 			= fPagingStructures->VirtualPageDirs()[k];
60 		if (pageDir == NULL)
61 			continue;
62 
63 		for (uint32 i = 0; i < kPAEPageDirEntryCount; i++) {
64 			if ((pageDir[i] & X86_PAE_PDE_PRESENT) != 0) {
65 				phys_addr_t address = pageDir[i] & X86_PAE_PDE_ADDRESS_MASK;
66 				vm_page* page = vm_lookup_page(address / B_PAGE_SIZE);
67 				if (page == NULL)
68 					panic("X86VMTranslationMapPAE::~X86VMTranslationMapPAE: "
69 						"didn't find page table page: page address: %#"
70 						B_PRIxPHYSADDR ", virtual base: %#" B_PRIxADDR "\n",
71 						address,
72 						(k * kPAEPageDirEntryCount + i) * kPAEPageTableRange);
73 				DEBUG_PAGE_ACCESS_START(page);
74 				vm_page_set_state(page, PAGE_STATE_FREE);
75 			}
76 		}
77 	}
78 
79 	fPagingStructures->RemoveReference();
80 }
81 
82 
83 status_t
84 X86VMTranslationMapPAE::Init(bool kernel)
85 {
86 	TRACE("X86VMTranslationMapPAE::Init()\n");
87 
88 	X86VMTranslationMap::Init(kernel);
89 
90 	fPagingStructures = new(std::nothrow) X86PagingStructuresPAE;
91 	if (fPagingStructures == NULL)
92 		return B_NO_MEMORY;
93 
94 	X86PagingMethodPAE* method = X86PagingMethodPAE::Method();
95 
96 	if (kernel) {
97 		// kernel
98 		// get the physical page mapper
99 		fPageMapper = method->KernelPhysicalPageMapper();
100 
101 		// we already know the kernel pgdir mapping
102 		fPagingStructures->Init(method->KernelVirtualPageDirPointerTable(),
103 			method->KernelPhysicalPageDirPointerTable(), NULL,
104 			method->KernelVirtualPageDirs(), method->KernelPhysicalPageDirs());
105 	} else {
106 		// user
107 		// allocate a physical page mapper
108 		status_t error = method->PhysicalPageMapper()
109 			->CreateTranslationMapPhysicalPageMapper(&fPageMapper);
110 		if (error != B_OK)
111 			return error;
112 
113 		// The following code assumes that the kernel address space occupies the
114 		// upper half of the virtual address space. This simplifies things a
115 		// lot, since it allows us to just use the upper two page directories
116 		// of the kernel and create two new lower page directories for the
117 		// userland.
118 		STATIC_ASSERT(KERNEL_BASE == 0x80000000 && KERNEL_SIZE == 0x80000000);
119 
120 		// allocate the page directories (both at once)
121 		pae_page_directory_entry* virtualPageDirs[4];
122 		phys_addr_t physicalPageDirs[4];
123 		virtualPageDirs[0] = (pae_page_directory_entry*)memalign(B_PAGE_SIZE,
124 			2 * B_PAGE_SIZE);
125 		if (virtualPageDirs[0] == NULL)
126 			return B_NO_MEMORY;
127 		virtualPageDirs[1] = virtualPageDirs[0] + kPAEPageTableEntryCount;
128 
129 		// clear the userland page directories
130 		memset(virtualPageDirs[0], 0, 2 * B_PAGE_SIZE);
131 
132 		// use the upper two kernel page directories
133 		for (int32 i = 2; i < 4; i++) {
134 			virtualPageDirs[i] = method->KernelVirtualPageDirs()[i];
135 			physicalPageDirs[i] = method->KernelPhysicalPageDirs()[i];
136 		}
137 
138 		// look up the page directories' physical addresses
139 		for (int32 i = 0; i < 2; i++) {
140 			vm_get_page_mapping(VMAddressSpace::KernelID(),
141 				(addr_t)virtualPageDirs[i], &physicalPageDirs[i]);
142 		}
143 
144 		// allocate the PDPT -- needs to have a 32 bit physical address
145 		phys_addr_t physicalPDPT;
146 		void* pdptHandle;
147 		pae_page_directory_pointer_table_entry* pdpt
148 			= (pae_page_directory_pointer_table_entry*)
149 				method->Allocate32BitPage(physicalPDPT, pdptHandle);
150 		if (pdpt == NULL) {
151 			free(virtualPageDirs[0]);
152 			return B_NO_MEMORY;
153 		}
154 
155 		// init the PDPT entries
156 		for (int32 i = 0; i < 4; i++) {
157 			pdpt[i] = (physicalPageDirs[i] & X86_PAE_PDPTE_ADDRESS_MASK)
158 				| X86_PAE_PDPTE_PRESENT;
159 		}
160 
161 		// init the paging structures
162 		fPagingStructures->Init(pdpt, physicalPDPT, pdptHandle, virtualPageDirs,
163 			physicalPageDirs);
164 	}
165 
166 	return B_OK;
167 }
168 
169 
170 size_t
171 X86VMTranslationMapPAE::MaxPagesNeededToMap(addr_t start, addr_t end) const
172 {
173 	// If start == 0, the actual base address is not yet known to the caller and
174 	// we shall assume the worst case.
175 	if (start == 0) {
176 		// offset the range so it has the worst possible alignment
177 		start = kPAEPageTableRange - B_PAGE_SIZE;
178 		end += kPAEPageTableRange - B_PAGE_SIZE;
179 	}
180 
181 	return end / kPAEPageTableRange + 1 - start / kPAEPageTableRange;
182 }
183 
184 
185 status_t
186 X86VMTranslationMapPAE::Map(addr_t virtualAddress, phys_addr_t physicalAddress,
187 	uint32 attributes, uint32 memoryType, vm_page_reservation* reservation)
188 {
189 	TRACE("X86VMTranslationMapPAE::Map(): %#" B_PRIxADDR " -> %#" B_PRIxPHYSADDR
190 		"\n", virtualAddress, physicalAddress);
191 
192 	// check to see if a page table exists for this range
193 	pae_page_directory_entry* pageDirEntry
194 		= X86PagingMethodPAE::PageDirEntryForAddress(
195 			fPagingStructures->VirtualPageDirs(), virtualAddress);
196 	if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
197 		// we need to allocate a page table
198 		vm_page *page = vm_page_allocate_page(reservation,
199 			PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
200 
201 		DEBUG_PAGE_ACCESS_END(page);
202 
203 		phys_addr_t physicalPageTable
204 			= (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;
205 
206 		TRACE("X86VMTranslationMapPAE::Map(): asked for free page for "
207 			"page table: %#" B_PRIxPHYSADDR "\n", physicalPageTable);
208 
209 		// put it in the page dir
210 		X86PagingMethodPAE::PutPageTableInPageDir(pageDirEntry,
211 			physicalPageTable,
212 			attributes
213 				| ((attributes & B_USER_PROTECTION) != 0
214 						? B_WRITE_AREA : B_KERNEL_WRITE_AREA));
215 
216 		fMapCount++;
217 	}
218 
219 	// now, fill in the page table entry
220 	Thread* thread = thread_get_current_thread();
221 	ThreadCPUPinner pinner(thread);
222 
223 	pae_page_table_entry* pageTable
224 		= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
225 			*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
226 	pae_page_table_entry* entry = pageTable
227 		+ virtualAddress / B_PAGE_SIZE % kPAEPageTableEntryCount;
228 
229 	ASSERT_PRINT((*entry & X86_PAE_PTE_PRESENT) == 0,
230 		"virtual address: %#" B_PRIxADDR ", existing pte: %#" B_PRIx64,
231 		virtualAddress, *entry);
232 
233 	X86PagingMethodPAE::PutPageTableEntryInTable(entry, physicalAddress,
234 		attributes, memoryType, fIsKernelMap);
235 
236 	pinner.Unlock();
237 
238 	// Note: We don't need to invalidate the TLB for this address, as previously
239 	// the entry was not present and the TLB doesn't cache those entries.
240 
241 	fMapCount++;
242 
243 	return 0;
244 }
245 
246 
247 status_t
248 X86VMTranslationMapPAE::Unmap(addr_t start, addr_t end)
249 {
250 	start = ROUNDDOWN(start, B_PAGE_SIZE);
251 	if (start >= end)
252 		return B_OK;
253 
254 	TRACE("X86VMTranslationMapPAE::Unmap(): %#" B_PRIxADDR " - %#" B_PRIxADDR
255 		"\n", start, end);
256 
257 	do {
258 		pae_page_directory_entry* pageDirEntry
259 			= X86PagingMethodPAE::PageDirEntryForAddress(
260 				fPagingStructures->VirtualPageDirs(), start);
261 		if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
262 			// no page table here, move the start up to access the next page
263 			// table
264 			start = ROUNDUP(start + 1, kPAEPageTableRange);
265 			continue;
266 		}
267 
268 		Thread* thread = thread_get_current_thread();
269 		ThreadCPUPinner pinner(thread);
270 
271 		pae_page_table_entry* pageTable
272 			= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
273 				*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
274 
275 		uint32 index = start / B_PAGE_SIZE % kPAEPageTableEntryCount;
276 		for (; index < kPAEPageTableEntryCount && start < end;
277 				index++, start += B_PAGE_SIZE) {
278 			if ((pageTable[index] & X86_PAE_PTE_PRESENT) == 0) {
279 				// page mapping not valid
280 				continue;
281 			}
282 
283 			TRACE("X86VMTranslationMapPAE::Unmap(): removing page %#"
284 				B_PRIxADDR "\n", start);
285 
286 			pae_page_table_entry oldEntry
287 				= X86PagingMethodPAE::ClearPageTableEntryFlags(
288 					&pageTable[index], X86_PAE_PTE_PRESENT);
289 			fMapCount--;
290 
291 			if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
292 				// Note, that we only need to invalidate the address, if the
293 				// accessed flags was set, since only then the entry could have
294 				// been in any TLB.
295 				InvalidatePage(start);
296 			}
297 		}
298 	} while (start != 0 && start < end);
299 
300 	return B_OK;
301 }
302 
303 
304 /*!	Caller must have locked the cache of the page to be unmapped.
305 	This object shouldn't be locked.
306 */
307 status_t
308 X86VMTranslationMapPAE::UnmapPage(VMArea* area, addr_t address,
309 	bool updatePageQueue)
310 {
311 	ASSERT(address % B_PAGE_SIZE == 0);
312 
313 	pae_page_directory_entry* pageDirEntry
314 		= X86PagingMethodPAE::PageDirEntryForAddress(
315 			fPagingStructures->VirtualPageDirs(), address);
316 
317 	TRACE("X86VMTranslationMapPAE::UnmapPage(%#" B_PRIxADDR ")\n", address);
318 
319 	RecursiveLocker locker(fLock);
320 
321 	if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0)
322 		return B_ENTRY_NOT_FOUND;
323 
324 	ThreadCPUPinner pinner(thread_get_current_thread());
325 
326 	pae_page_table_entry* pageTable
327 		= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
328 			*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
329 
330 	pae_page_table_entry oldEntry = X86PagingMethodPAE::ClearPageTableEntry(
331 		&pageTable[address / B_PAGE_SIZE % kPAEPageTableEntryCount]);
332 
333 	pinner.Unlock();
334 
335 	if ((oldEntry & X86_PAE_PTE_PRESENT) == 0) {
336 		// page mapping not valid
337 		return B_ENTRY_NOT_FOUND;
338 	}
339 
340 	fMapCount--;
341 
342 	if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
343 		// Note, that we only need to invalidate the address, if the
344 		// accessed flags was set, since only then the entry could have been
345 		// in any TLB.
346 		InvalidatePage(address);
347 
348 		Flush();
349 
350 		// NOTE: Between clearing the page table entry and Flush() other
351 		// processors (actually even this processor with another thread of the
352 		// same team) could still access the page in question via their cached
353 		// entry. We can obviously lose a modified flag in this case, with the
354 		// effect that the page looks unmodified (and might thus be recycled),
355 		// but is actually modified.
356 		// In most cases this is harmless, but for vm_remove_all_page_mappings()
357 		// this is actually a problem.
358 		// Interestingly FreeBSD seems to ignore this problem as well
359 		// (cf. pmap_remove_all()), unless I've missed something.
360 	}
361 
362 	locker.Detach();
363 		// PageUnmapped() will unlock for us
364 
365 	PageUnmapped(area, (oldEntry & X86_PAE_PTE_ADDRESS_MASK) / B_PAGE_SIZE,
366 		(oldEntry & X86_PAE_PTE_ACCESSED) != 0,
367 		(oldEntry & X86_PAE_PTE_DIRTY) != 0, updatePageQueue);
368 
369 	return B_OK;
370 }
371 
372 
373 void
374 X86VMTranslationMapPAE::UnmapPages(VMArea* area, addr_t base, size_t size,
375 	bool updatePageQueue)
376 {
377 	if (size == 0)
378 		return;
379 
380 	addr_t start = base;
381 	addr_t end = base + size - 1;
382 
383 	TRACE("X86VMTranslationMapPAE::UnmapPages(%p, %#" B_PRIxADDR ", %#"
384 		B_PRIxADDR ")\n", area, start, end);
385 
386 	VMAreaMappings queue;
387 
388 	RecursiveLocker locker(fLock);
389 
390 	do {
391 		pae_page_directory_entry* pageDirEntry
392 			= X86PagingMethodPAE::PageDirEntryForAddress(
393 				fPagingStructures->VirtualPageDirs(), start);
394 		if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
395 			// no page table here, move the start up to access the next page
396 			// table
397 			start = ROUNDUP(start + 1, kPAEPageTableRange);
398 			continue;
399 		}
400 
401 		Thread* thread = thread_get_current_thread();
402 		ThreadCPUPinner pinner(thread);
403 
404 		pae_page_table_entry* pageTable
405 			= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
406 				*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
407 
408 		uint32 index = start / B_PAGE_SIZE % kPAEPageTableEntryCount;
409 		for (; index < kPAEPageTableEntryCount && start < end;
410 				index++, start += B_PAGE_SIZE) {
411 			pae_page_table_entry oldEntry
412 				= X86PagingMethodPAE::ClearPageTableEntry(&pageTable[index]);
413 			if ((oldEntry & X86_PAE_PTE_PRESENT) == 0)
414 				continue;
415 
416 			fMapCount--;
417 
418 			if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
419 				// Note, that we only need to invalidate the address, if the
420 				// accessed flags was set, since only then the entry could have
421 				// been in any TLB.
422 				InvalidatePage(start);
423 			}
424 
425 			if (area->cache_type != CACHE_TYPE_DEVICE) {
426 				// get the page
427 				vm_page* page = vm_lookup_page(
428 					(oldEntry & X86_PAE_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
429 				ASSERT(page != NULL);
430 
431 				DEBUG_PAGE_ACCESS_START(page);
432 
433 				// transfer the accessed/dirty flags to the page
434 				if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0)
435 					page->accessed = true;
436 				if ((oldEntry & X86_PAE_PTE_DIRTY) != 0)
437 					page->modified = true;
438 
439 				// remove the mapping object/decrement the wired_count of the
440 				// page
441 				if (area->wiring == B_NO_LOCK) {
442 					vm_page_mapping* mapping = NULL;
443 					vm_page_mappings::Iterator iterator
444 						= page->mappings.GetIterator();
445 					while ((mapping = iterator.Next()) != NULL) {
446 						if (mapping->area == area)
447 							break;
448 					}
449 
450 					ASSERT(mapping != NULL);
451 
452 					area->mappings.Remove(mapping);
453 					page->mappings.Remove(mapping);
454 					queue.Add(mapping);
455 				} else
456 					page->DecrementWiredCount();
457 
458 				if (!page->IsMapped()) {
459 					atomic_add(&gMappedPagesCount, -1);
460 
461 					if (updatePageQueue) {
462 						if (page->Cache()->temporary)
463 							vm_page_set_state(page, PAGE_STATE_INACTIVE);
464 						else if (page->modified)
465 							vm_page_set_state(page, PAGE_STATE_MODIFIED);
466 						else
467 							vm_page_set_state(page, PAGE_STATE_CACHED);
468 					}
469 				}
470 
471 				DEBUG_PAGE_ACCESS_END(page);
472 			}
473 		}
474 
475 		Flush();
476 			// flush explicitly, since we directly use the lock
477 	} while (start != 0 && start < end);
478 
479 	// TODO: As in UnmapPage() we can lose page dirty flags here. ATM it's not
480 	// really critical here, as in all cases this method is used, the unmapped
481 	// area range is unmapped for good (resized/cut) and the pages will likely
482 	// be freed.
483 
484 	locker.Unlock();
485 
486 	// free removed mappings
487 	bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
488 	uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
489 		| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
490 	while (vm_page_mapping* mapping = queue.RemoveHead())
491 		object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
492 }
493 
494 
495 void
496 X86VMTranslationMapPAE::UnmapArea(VMArea* area, bool deletingAddressSpace,
497 	bool ignoreTopCachePageFlags)
498 {
499 	if (area->cache_type == CACHE_TYPE_DEVICE || area->wiring != B_NO_LOCK) {
500 		X86VMTranslationMapPAE::UnmapPages(area, area->Base(), area->Size(),
501 			true);
502 		return;
503 	}
504 
505 	bool unmapPages = !deletingAddressSpace || !ignoreTopCachePageFlags;
506 
507 	RecursiveLocker locker(fLock);
508 
509 	VMAreaMappings mappings;
510 	mappings.MoveFrom(&area->mappings);
511 
512 	for (VMAreaMappings::Iterator it = mappings.GetIterator();
513 			vm_page_mapping* mapping = it.Next();) {
514 		vm_page* page = mapping->page;
515 		page->mappings.Remove(mapping);
516 
517 		VMCache* cache = page->Cache();
518 
519 		bool pageFullyUnmapped = false;
520 		if (!page->IsMapped()) {
521 			atomic_add(&gMappedPagesCount, -1);
522 			pageFullyUnmapped = true;
523 		}
524 
525 		if (unmapPages || cache != area->cache) {
526 			addr_t address = area->Base()
527 				+ ((page->cache_offset * B_PAGE_SIZE) - area->cache_offset);
528 
529 			pae_page_directory_entry* pageDirEntry
530 				= X86PagingMethodPAE::PageDirEntryForAddress(
531 					fPagingStructures->VirtualPageDirs(), address);
532 			if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
533 				panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
534 					"has no page dir entry", page, area, address);
535 				continue;
536 			}
537 
538 			ThreadCPUPinner pinner(thread_get_current_thread());
539 
540 			pae_page_table_entry* pageTable
541 				= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
542 					*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
543 			pae_page_table_entry oldEntry
544 				= X86PagingMethodPAE::ClearPageTableEntry(
545 					&pageTable[address / B_PAGE_SIZE
546 						% kPAEPageTableEntryCount]);
547 
548 			pinner.Unlock();
549 
550 			if ((oldEntry & X86_PAE_PTE_PRESENT) == 0) {
551 				panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
552 					"has no page table entry", page, area, address);
553 				continue;
554 			}
555 
556 			// transfer the accessed/dirty flags to the page and invalidate
557 			// the mapping, if necessary
558 			if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
559 				page->accessed = true;
560 
561 				if (!deletingAddressSpace)
562 					InvalidatePage(address);
563 			}
564 
565 			if ((oldEntry & X86_PAE_PTE_DIRTY) != 0)
566 				page->modified = true;
567 
568 			if (pageFullyUnmapped) {
569 				DEBUG_PAGE_ACCESS_START(page);
570 
571 				if (cache->temporary)
572 					vm_page_set_state(page, PAGE_STATE_INACTIVE);
573 				else if (page->modified)
574 					vm_page_set_state(page, PAGE_STATE_MODIFIED);
575 				else
576 					vm_page_set_state(page, PAGE_STATE_CACHED);
577 
578 				DEBUG_PAGE_ACCESS_END(page);
579 			}
580 		}
581 
582 		fMapCount--;
583 	}
584 
585 	Flush();
586 		// flush explicitely, since we directly use the lock
587 
588 	locker.Unlock();
589 
590 	bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
591 	uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
592 		| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
593 	while (vm_page_mapping* mapping = mappings.RemoveHead())
594 		object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
595 }
596 
597 
598 status_t
599 X86VMTranslationMapPAE::Query(addr_t virtualAddress,
600 	phys_addr_t* _physicalAddress, uint32* _flags)
601 {
602 	// default the flags to not present
603 	*_flags = 0;
604 	*_physicalAddress = 0;
605 
606 	// get the page directory entry
607 	pae_page_directory_entry* pageDirEntry
608 		= X86PagingMethodPAE::PageDirEntryForAddress(
609 			fPagingStructures->VirtualPageDirs(), virtualAddress);
610 	if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
611 		// no pagetable here
612 		return B_OK;
613 	}
614 
615 	// get the page table entry
616 	Thread* thread = thread_get_current_thread();
617 	ThreadCPUPinner pinner(thread);
618 
619 	pae_page_table_entry* pageTable
620 		= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
621 			*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
622 	pae_page_table_entry entry
623 		= pageTable[virtualAddress / B_PAGE_SIZE % kPAEPageTableEntryCount];
624 
625 	pinner.Unlock();
626 
627 	*_physicalAddress = entry & X86_PAE_PTE_ADDRESS_MASK;
628 
629 	// translate the page state flags
630 	if ((entry & X86_PAE_PTE_USER) != 0) {
631 		*_flags |= ((entry & X86_PAE_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0)
632 			| B_READ_AREA;
633 	}
634 
635 	*_flags |= ((entry & X86_PAE_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0)
636 		| B_KERNEL_READ_AREA
637 		| ((entry & X86_PAE_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
638 		| ((entry & X86_PAE_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
639 		| ((entry & X86_PAE_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0);
640 
641 	TRACE("X86VMTranslationMapPAE::Query(%#" B_PRIxADDR ") -> %#"
642 		B_PRIxPHYSADDR ":\n", *_physicalAddress, virtualAddress);
643 
644 	return B_OK;
645 }
646 
647 
648 status_t
649 X86VMTranslationMapPAE::QueryInterrupt(addr_t virtualAddress,
650 	phys_addr_t* _physicalAddress, uint32* _flags)
651 {
652 	// default the flags to not present
653 	*_flags = 0;
654 	*_physicalAddress = 0;
655 
656 	// get the page directory entry
657 	pae_page_directory_entry* pageDirEntry
658 		= X86PagingMethodPAE::PageDirEntryForAddress(
659 			fPagingStructures->VirtualPageDirs(), virtualAddress);
660 	if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
661 		// no pagetable here
662 		return B_OK;
663 	}
664 
665 	// get the page table entry
666 	pae_page_table_entry* pageTable
667 		= (pae_page_table_entry*)X86PagingMethodPAE::Method()
668 			->PhysicalPageMapper()->InterruptGetPageTableAt(
669 				*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
670 	pae_page_table_entry entry
671 		= pageTable[virtualAddress / B_PAGE_SIZE % kPAEPageTableEntryCount];
672 
673 	*_physicalAddress = entry & X86_PAE_PTE_ADDRESS_MASK;
674 
675 	// translate the page state flags
676 	if ((entry & X86_PAE_PTE_USER) != 0) {
677 		*_flags |= ((entry & X86_PAE_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0)
678 			| B_READ_AREA;
679 	}
680 
681 	*_flags |= ((entry & X86_PAE_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0)
682 		| B_KERNEL_READ_AREA
683 		| ((entry & X86_PAE_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
684 		| ((entry & X86_PAE_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
685 		| ((entry & X86_PAE_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0);
686 
687 	TRACE("X86VMTranslationMapPAE::Query(%#" B_PRIxADDR ") -> %#"
688 		B_PRIxPHYSADDR ":\n", *_physicalAddress, virtualAddress);
689 
690 	return B_OK;
691 }
692 
693 
694 status_t
695 X86VMTranslationMapPAE::Protect(addr_t start, addr_t end, uint32 attributes,
696 	uint32 memoryType)
697 {
698 	start = ROUNDDOWN(start, B_PAGE_SIZE);
699 	if (start >= end)
700 		return B_OK;
701 
702 	TRACE("X86VMTranslationMapPAE::Protect(): %#" B_PRIxADDR " - %#" B_PRIxADDR
703 		", attributes: %#" B_PRIx32 "\n", start, end, attributes);
704 
705 	// compute protection flags
706 	uint64 newProtectionFlags = 0;
707 	if ((attributes & B_USER_PROTECTION) != 0) {
708 		newProtectionFlags = X86_PAE_PTE_USER;
709 		if ((attributes & B_WRITE_AREA) != 0)
710 			newProtectionFlags |= X86_PAE_PTE_WRITABLE;
711 	} else if ((attributes & B_KERNEL_WRITE_AREA) != 0)
712 		newProtectionFlags = X86_PAE_PTE_WRITABLE;
713 
714 	do {
715 		pae_page_directory_entry* pageDirEntry
716 			= X86PagingMethodPAE::PageDirEntryForAddress(
717 				fPagingStructures->VirtualPageDirs(), start);
718 		if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
719 			// no page table here, move the start up to access the next page
720 			// table
721 			start = ROUNDUP(start + 1, kPAEPageTableRange);
722 			continue;
723 		}
724 
725 		Thread* thread = thread_get_current_thread();
726 		ThreadCPUPinner pinner(thread);
727 
728 		pae_page_table_entry* pageTable
729 			= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
730 				*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
731 
732 		uint32 index = start / B_PAGE_SIZE % kPAEPageTableEntryCount;
733 		for (; index < kPAEPageTableEntryCount && start < end;
734 				index++, start += B_PAGE_SIZE) {
735 			pae_page_table_entry entry = pageTable[index];
736 			if ((pageTable[index] & X86_PAE_PTE_PRESENT) == 0) {
737 				// page mapping not valid
738 				continue;
739 			}
740 
741 			TRACE("X86VMTranslationMapPAE::Protect(): protect page %#"
742 				B_PRIxADDR "\n", start);
743 
744 			// set the new protection flags -- we want to do that atomically,
745 			// without changing the accessed or dirty flag
746 			pae_page_table_entry oldEntry;
747 			while (true) {
748 				oldEntry = X86PagingMethodPAE::TestAndSetPageTableEntry(
749 					&pageTable[index],
750 					(entry & ~(X86_PAE_PTE_PROTECTION_MASK
751 							| X86_PAE_PTE_MEMORY_TYPE_MASK))
752 						| newProtectionFlags
753 						| X86PagingMethodPAE::MemoryTypeToPageTableEntryFlags(
754 							memoryType),
755 					entry);
756 				if (oldEntry == entry)
757 					break;
758 				entry = oldEntry;
759 			}
760 
761 			if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
762 				// Note, that we only need to invalidate the address, if the
763 				// accessed flag was set, since only then the entry could have been
764 				// in any TLB.
765 				InvalidatePage(start);
766 			}
767 		}
768 	} while (start != 0 && start < end);
769 
770 	return B_OK;
771 }
772 
773 
774 status_t
775 X86VMTranslationMapPAE::ClearFlags(addr_t address, uint32 flags)
776 {
777 	pae_page_directory_entry* pageDirEntry
778 		= X86PagingMethodPAE::PageDirEntryForAddress(
779 			fPagingStructures->VirtualPageDirs(), address);
780 	if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
781 		// no pagetable here
782 		return B_OK;
783 	}
784 
785 	uint64 flagsToClear = ((flags & PAGE_MODIFIED) ? X86_PAE_PTE_DIRTY : 0)
786 		| ((flags & PAGE_ACCESSED) ? X86_PAE_PTE_ACCESSED : 0);
787 
788 	Thread* thread = thread_get_current_thread();
789 	ThreadCPUPinner pinner(thread);
790 
791 	pae_page_table_entry* entry
792 		= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
793 				*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK)
794 			+ address / B_PAGE_SIZE % kPAEPageTableEntryCount;
795 
796 	// clear out the flags we've been requested to clear
797 	pae_page_table_entry oldEntry
798 		= X86PagingMethodPAE::ClearPageTableEntryFlags(entry, flagsToClear);
799 
800 	pinner.Unlock();
801 
802 	if ((oldEntry & flagsToClear) != 0)
803 		InvalidatePage(address);
804 
805 	return B_OK;
806 }
807 
808 
809 bool
810 X86VMTranslationMapPAE::ClearAccessedAndModified(VMArea* area, addr_t address,
811 	bool unmapIfUnaccessed, bool& _modified)
812 {
813 	ASSERT(address % B_PAGE_SIZE == 0);
814 
815 	TRACE("X86VMTranslationMap32Bit::ClearAccessedAndModified(%#" B_PRIxADDR
816 		")\n", address);
817 
818 	pae_page_directory_entry* pageDirEntry
819 		= X86PagingMethodPAE::PageDirEntryForAddress(
820 			fPagingStructures->VirtualPageDirs(), address);
821 
822 	RecursiveLocker locker(fLock);
823 
824 	if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0)
825 		return false;
826 
827 	ThreadCPUPinner pinner(thread_get_current_thread());
828 
829 	pae_page_table_entry* entry
830 		= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
831 				*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK)
832 			+ address / B_PAGE_SIZE % kPAEPageTableEntryCount;
833 
834 	// perform the deed
835 	pae_page_table_entry oldEntry;
836 
837 	if (unmapIfUnaccessed) {
838 		while (true) {
839 			oldEntry = *entry;
840 			if ((oldEntry & X86_PAE_PTE_PRESENT) == 0) {
841 				// page mapping not valid
842 				return false;
843 			}
844 
845 			if (oldEntry & X86_PAE_PTE_ACCESSED) {
846 				// page was accessed -- just clear the flags
847 				oldEntry = X86PagingMethodPAE::ClearPageTableEntryFlags(entry,
848 					X86_PAE_PTE_ACCESSED | X86_PAE_PTE_DIRTY);
849 				break;
850 			}
851 
852 			// page hasn't been accessed -- unmap it
853 			if (X86PagingMethodPAE::TestAndSetPageTableEntry(entry, 0, oldEntry)
854 					== oldEntry) {
855 				break;
856 			}
857 
858 			// something changed -- check again
859 		}
860 	} else {
861 		oldEntry = X86PagingMethodPAE::ClearPageTableEntryFlags(entry,
862 			X86_PAE_PTE_ACCESSED | X86_PAE_PTE_DIRTY);
863 	}
864 
865 	pinner.Unlock();
866 
867 	_modified = (oldEntry & X86_PAE_PTE_DIRTY) != 0;
868 
869 	if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
870 		// Note, that we only need to invalidate the address, if the
871 		// accessed flags was set, since only then the entry could have been
872 		// in any TLB.
873 		InvalidatePage(address);
874 		Flush();
875 
876 		return true;
877 	}
878 
879 	if (!unmapIfUnaccessed)
880 		return false;
881 
882 	// We have unmapped the address. Do the "high level" stuff.
883 
884 	fMapCount--;
885 
886 	locker.Detach();
887 		// UnaccessedPageUnmapped() will unlock for us
888 
889 	UnaccessedPageUnmapped(area,
890 		(oldEntry & X86_PAE_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
891 
892 	return false;
893 }
894 
895 
896 X86PagingStructures*
897 X86VMTranslationMapPAE::PagingStructures() const
898 {
899 	return fPagingStructures;
900 }
901 
902 
903 #endif	// B_HAIKU_PHYSICAL_BITS == 64
904