xref: /haiku/src/system/kernel/arch/x86/paging/pae/X86VMTranslationMapPAE.cpp (revision d382fc3a8e7edf2bb0e5e81f95bbd3ae80ea1ca0)
1 /*
2  * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 #include "paging/pae/X86VMTranslationMapPAE.h"
12 
13 #include <int.h>
14 #include <slab/Slab.h>
15 #include <thread.h>
16 #include <util/AutoLock.h>
17 #include <vm/vm_page.h>
18 #include <vm/VMAddressSpace.h>
19 #include <vm/VMCache.h>
20 
21 #include "paging/pae/X86PagingMethodPAE.h"
22 #include "paging/pae/X86PagingStructuresPAE.h"
23 #include "paging/x86_physical_page_mapper.h"
24 
25 
26 //#define TRACE_X86_VM_TRANSLATION_MAP_PAE
27 #ifdef TRACE_X86_VM_TRANSLATION_MAP_PAE
28 #	define TRACE(x...) dprintf(x)
29 #else
30 #	define TRACE(x...) ;
31 #endif
32 
33 
34 #if B_HAIKU_PHYSICAL_BITS == 64
35 
36 
37 X86VMTranslationMapPAE::X86VMTranslationMapPAE()
38 	:
39 	fPagingStructures(NULL)
40 {
41 }
42 
43 
44 X86VMTranslationMapPAE::~X86VMTranslationMapPAE()
45 {
46 	if (fPagingStructures == NULL)
47 		return;
48 
49 	if (fPageMapper != NULL)
50 		fPageMapper->Delete();
51 
52 	// cycle through and free all of the user space page tables
53 
54 	STATIC_ASSERT(KERNEL_BASE == 0x80000000 && KERNEL_SIZE == 0x80000000);
55 		// assuming 1-1 split of the address space
56 
57 	for (uint32 k = 0; k < 2; k++) {
58 		pae_page_directory_entry* pageDir
59 			= fPagingStructures->VirtualPageDirs()[k];
60 		if (pageDir == NULL)
61 			continue;
62 
63 		for (uint32 i = 0; i < kPAEPageDirEntryCount; i++) {
64 			if ((pageDir[i] & X86_PAE_PDE_PRESENT) != 0) {
65 				phys_addr_t address = pageDir[i] & X86_PAE_PDE_ADDRESS_MASK;
66 				vm_page* page = vm_lookup_page(address / B_PAGE_SIZE);
67 				if (page == NULL)
68 					panic("X86VMTranslationMapPAE::~X86VMTranslationMapPAE: "
69 						"didn't find page table page: page address: %#"
70 						B_PRIxPHYSADDR ", virtual base: %#" B_PRIxADDR "\n",
71 						address,
72 						(k * kPAEPageDirEntryCount + i) * kPAEPageTableRange);
73 				DEBUG_PAGE_ACCESS_START(page);
74 				vm_page_set_state(page, PAGE_STATE_FREE);
75 			}
76 		}
77 	}
78 
79 	fPagingStructures->RemoveReference();
80 }
81 
82 
83 status_t
84 X86VMTranslationMapPAE::Init(bool kernel)
85 {
86 	TRACE("X86VMTranslationMapPAE::Init()\n");
87 
88 	X86VMTranslationMap::Init(kernel);
89 
90 	fPagingStructures = new(std::nothrow) X86PagingStructuresPAE;
91 	if (fPagingStructures == NULL)
92 		return B_NO_MEMORY;
93 
94 	X86PagingMethodPAE* method = X86PagingMethodPAE::Method();
95 
96 	if (kernel) {
97 		// kernel
98 		// get the physical page mapper
99 		fPageMapper = method->KernelPhysicalPageMapper();
100 
101 		// we already know the kernel pgdir mapping
102 		fPagingStructures->Init(method->KernelVirtualPageDirPointerTable(),
103 			method->KernelPhysicalPageDirPointerTable(), NULL,
104 			method->KernelVirtualPageDirs(), method->KernelPhysicalPageDirs());
105 	} else {
106 		// user
107 		// allocate a physical page mapper
108 		status_t error = method->PhysicalPageMapper()
109 			->CreateTranslationMapPhysicalPageMapper(&fPageMapper);
110 		if (error != B_OK)
111 			return error;
112 
113 		// The following code assumes that the kernel address space occupies the
114 		// upper half of the virtual address space. This simplifies things a
115 		// lot, since it allows us to just use the upper two page directories
116 		// of the kernel and create two new lower page directories for the
117 		// userland.
118 		STATIC_ASSERT(KERNEL_BASE == 0x80000000 && KERNEL_SIZE == 0x80000000);
119 
120 		// allocate the page directories (both at once)
121 		pae_page_directory_entry* virtualPageDirs[4];
122 		phys_addr_t physicalPageDirs[4];
123 		virtualPageDirs[0] = (pae_page_directory_entry*)memalign(B_PAGE_SIZE,
124 			2 * B_PAGE_SIZE);
125 		if (virtualPageDirs[0] == NULL)
126 			return B_NO_MEMORY;
127 		virtualPageDirs[1] = virtualPageDirs[0] + kPAEPageTableEntryCount;
128 
129 		// clear the userland page directories
130 		memset(virtualPageDirs[0], 0, 2 * B_PAGE_SIZE);
131 
132 		// use the upper two kernel page directories
133 		for (int32 i = 2; i < 4; i++) {
134 			virtualPageDirs[i] = method->KernelVirtualPageDirs()[i];
135 			physicalPageDirs[i] = method->KernelPhysicalPageDirs()[i];
136 		}
137 
138 		// look up the page directories' physical addresses
139 		for (int32 i = 0; i < 2; i++) {
140 			vm_get_page_mapping(VMAddressSpace::KernelID(),
141 				(addr_t)virtualPageDirs[i], &physicalPageDirs[i]);
142 		}
143 
144 		// allocate the PDPT -- needs to have a 32 bit physical address
145 		phys_addr_t physicalPDPT;
146 		void* pdptHandle;
147 		pae_page_directory_pointer_table_entry* pdpt
148 			= (pae_page_directory_pointer_table_entry*)
149 				method->Allocate32BitPage(physicalPDPT, pdptHandle);
150 		if (pdpt == NULL)
151 			free(virtualPageDirs[0]);
152 
153 		// init the PDPT entries
154 		for (int32 i = 0; i < 4; i++) {
155 			pdpt[i] = (physicalPageDirs[i] & X86_PAE_PDPTE_ADDRESS_MASK)
156 				| X86_PAE_PDPTE_PRESENT;
157 		}
158 
159 		// init the paging structures
160 		fPagingStructures->Init(pdpt, physicalPDPT, pdptHandle, virtualPageDirs,
161 			physicalPageDirs);
162 	}
163 
164 	return B_OK;
165 }
166 
167 
168 size_t
169 X86VMTranslationMapPAE::MaxPagesNeededToMap(addr_t start, addr_t end) const
170 {
171 	// If start == 0, the actual base address is not yet known to the caller and
172 	// we shall assume the worst case.
173 	if (start == 0) {
174 		// offset the range so it has the worst possible alignment
175 		start = kPAEPageTableRange - B_PAGE_SIZE;
176 		end += kPAEPageTableRange - B_PAGE_SIZE;
177 	}
178 
179 	return end / kPAEPageTableRange + 1 - start / kPAEPageTableRange;
180 }
181 
182 
183 status_t
184 X86VMTranslationMapPAE::Map(addr_t virtualAddress, phys_addr_t physicalAddress,
185 	uint32 attributes, uint32 memoryType, vm_page_reservation* reservation)
186 {
187 	TRACE("X86VMTranslationMapPAE::Map(): %#" B_PRIxADDR " -> %#" B_PRIxPHYSADDR
188 		"\n", virtualAddress, physicalAddress);
189 
190 	// check to see if a page table exists for this range
191 	pae_page_directory_entry* pageDirEntry
192 		= X86PagingMethodPAE::PageDirEntryForAddress(
193 			fPagingStructures->VirtualPageDirs(), virtualAddress);
194 	if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
195 		// we need to allocate a page table
196 		vm_page *page = vm_page_allocate_page(reservation,
197 			PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
198 
199 		DEBUG_PAGE_ACCESS_END(page);
200 
201 		phys_addr_t physicalPageTable
202 			= (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;
203 
204 		TRACE("X86VMTranslationMapPAE::Map(): asked for free page for "
205 			"page table: %#" B_PRIxPHYSADDR "\n", physicalPageTable);
206 
207 		// put it in the page dir
208 		X86PagingMethodPAE::PutPageTableInPageDir(pageDirEntry,
209 			physicalPageTable,
210 			attributes
211 				| ((attributes & B_USER_PROTECTION) != 0
212 						? B_WRITE_AREA : B_KERNEL_WRITE_AREA));
213 
214 		fMapCount++;
215 	}
216 
217 	// now, fill in the page table entry
218 	struct thread* thread = thread_get_current_thread();
219 	ThreadCPUPinner pinner(thread);
220 
221 	pae_page_table_entry* pageTable
222 		= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
223 			*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
224 	pae_page_table_entry* entry = pageTable
225 		+ virtualAddress / B_PAGE_SIZE % kPAEPageTableEntryCount;
226 
227 	ASSERT_PRINT((*entry & X86_PAE_PTE_PRESENT) == 0,
228 		"virtual address: %#" B_PRIxADDR ", existing pte: %#" B_PRIx64,
229 		virtualAddress, *entry);
230 
231 	X86PagingMethodPAE::PutPageTableEntryInTable(entry, physicalAddress,
232 		attributes, memoryType, fIsKernelMap);
233 
234 	pinner.Unlock();
235 
236 	// Note: We don't need to invalidate the TLB for this address, as previously
237 	// the entry was not present and the TLB doesn't cache those entries.
238 
239 	fMapCount++;
240 
241 	return 0;
242 }
243 
244 
245 status_t
246 X86VMTranslationMapPAE::Unmap(addr_t start, addr_t end)
247 {
248 	start = ROUNDDOWN(start, B_PAGE_SIZE);
249 	if (start >= end)
250 		return B_OK;
251 
252 	TRACE("X86VMTranslationMapPAE::Unmap(): %#" B_PRIxADDR " - %#" B_PRIxADDR
253 		"\n", start, end);
254 
255 	do {
256 		pae_page_directory_entry* pageDirEntry
257 			= X86PagingMethodPAE::PageDirEntryForAddress(
258 				fPagingStructures->VirtualPageDirs(), start);
259 		if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
260 			// no page table here, move the start up to access the next page
261 			// table
262 			start = ROUNDUP(start + 1, kPAEPageTableRange);
263 			continue;
264 		}
265 
266 		struct thread* thread = thread_get_current_thread();
267 		ThreadCPUPinner pinner(thread);
268 
269 		pae_page_table_entry* pageTable
270 			= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
271 				*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
272 
273 		uint32 index = start / B_PAGE_SIZE % kPAEPageTableEntryCount;
274 		for (; index < kPAEPageTableEntryCount && start < end;
275 				index++, start += B_PAGE_SIZE) {
276 			if ((pageTable[index] & X86_PAE_PTE_PRESENT) == 0) {
277 				// page mapping not valid
278 				continue;
279 			}
280 
281 			TRACE("X86VMTranslationMapPAE::Unmap(): removing page %#"
282 				B_PRIxADDR "\n", start);
283 
284 			pae_page_table_entry oldEntry
285 				= X86PagingMethodPAE::ClearPageTableEntryFlags(
286 					&pageTable[index], X86_PAE_PTE_PRESENT);
287 			fMapCount--;
288 
289 			if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
290 				// Note, that we only need to invalidate the address, if the
291 				// accessed flags was set, since only then the entry could have
292 				// been in any TLB.
293 				InvalidatePage(start);
294 			}
295 		}
296 
297 		pinner.Unlock();
298 	} while (start != 0 && start < end);
299 
300 	return B_OK;
301 }
302 
303 
304 /*!	Caller must have locked the cache of the page to be unmapped.
305 	This object shouldn't be locked.
306 */
307 status_t
308 X86VMTranslationMapPAE::UnmapPage(VMArea* area, addr_t address,
309 	bool updatePageQueue)
310 {
311 	ASSERT(address % B_PAGE_SIZE == 0);
312 
313 	pae_page_directory_entry* pageDirEntry
314 		= X86PagingMethodPAE::PageDirEntryForAddress(
315 			fPagingStructures->VirtualPageDirs(), address);
316 
317 	TRACE("X86VMTranslationMapPAE::UnmapPage(%#" B_PRIxADDR ")\n", address);
318 
319 	RecursiveLocker locker(fLock);
320 
321 	if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0)
322 		return B_ENTRY_NOT_FOUND;
323 
324 	ThreadCPUPinner pinner(thread_get_current_thread());
325 
326 	pae_page_table_entry* pageTable
327 		= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
328 			*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
329 
330 	pae_page_table_entry oldEntry = X86PagingMethodPAE::ClearPageTableEntry(
331 		&pageTable[address / B_PAGE_SIZE % kPAEPageTableEntryCount]);
332 
333 	pinner.Unlock();
334 
335 	if ((oldEntry & X86_PAE_PTE_PRESENT) == 0) {
336 		// page mapping not valid
337 		return B_ENTRY_NOT_FOUND;
338 	}
339 
340 	fMapCount--;
341 
342 	if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
343 		// Note, that we only need to invalidate the address, if the
344 		// accessed flags was set, since only then the entry could have been
345 		// in any TLB.
346 		InvalidatePage(address);
347 
348 		Flush();
349 
350 		// NOTE: Between clearing the page table entry and Flush() other
351 		// processors (actually even this processor with another thread of the
352 		// same team) could still access the page in question via their cached
353 		// entry. We can obviously lose a modified flag in this case, with the
354 		// effect that the page looks unmodified (and might thus be recycled),
355 		// but is actually modified.
356 		// In most cases this is harmless, but for vm_remove_all_page_mappings()
357 		// this is actually a problem.
358 		// Interestingly FreeBSD seems to ignore this problem as well
359 		// (cf. pmap_remove_all()), unless I've missed something.
360 	}
361 
362 	if (area->cache_type == CACHE_TYPE_DEVICE)
363 		return B_OK;
364 
365 	// get the page
366 	vm_page* page = vm_lookup_page(
367 		(oldEntry & X86_PAE_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
368 	ASSERT(page != NULL);
369 
370 	// transfer the accessed/dirty flags to the page
371 	if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0)
372 		page->accessed = true;
373 	if ((oldEntry & X86_PAE_PTE_DIRTY) != 0)
374 		page->modified = true;
375 
376 	// TODO: Here comes a lot of paging method and even architecture independent
377 	// code. Refactor!
378 
379 	// remove the mapping object/decrement the wired_count of the page
380 	vm_page_mapping* mapping = NULL;
381 	if (area->wiring == B_NO_LOCK) {
382 		vm_page_mappings::Iterator iterator = page->mappings.GetIterator();
383 		while ((mapping = iterator.Next()) != NULL) {
384 			if (mapping->area == area) {
385 				area->mappings.Remove(mapping);
386 				page->mappings.Remove(mapping);
387 				break;
388 			}
389 		}
390 
391 		ASSERT(mapping != NULL);
392 	} else
393 		page->wired_count--;
394 
395 	locker.Unlock();
396 
397 	if (page->wired_count == 0 && page->mappings.IsEmpty()) {
398 		atomic_add(&gMappedPagesCount, -1);
399 
400 		if (updatePageQueue) {
401 			if (page->Cache()->temporary)
402 				vm_page_set_state(page, PAGE_STATE_INACTIVE);
403 			else if (page->modified)
404 				vm_page_set_state(page, PAGE_STATE_MODIFIED);
405 			else
406 				vm_page_set_state(page, PAGE_STATE_CACHED);
407 		}
408 	}
409 
410 	if (mapping != NULL) {
411 		bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
412 		object_cache_free(gPageMappingsObjectCache, mapping,
413 			CACHE_DONT_WAIT_FOR_MEMORY
414 				| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0));
415 	}
416 
417 	return B_OK;
418 }
419 
420 
421 void
422 X86VMTranslationMapPAE::UnmapPages(VMArea* area, addr_t base, size_t size,
423 	bool updatePageQueue)
424 {
425 // TODO: Implement for real!
426 	X86VMTranslationMap::UnmapPages(area, base, size, updatePageQueue);
427 }
428 
429 
430 void
431 X86VMTranslationMapPAE::UnmapArea(VMArea* area, bool deletingAddressSpace,
432 	bool ignoreTopCachePageFlags)
433 {
434 // TODO: Implement for real!
435 	X86VMTranslationMap::UnmapArea(area, deletingAddressSpace,
436 		ignoreTopCachePageFlags);
437 }
438 
439 
440 status_t
441 X86VMTranslationMapPAE::Query(addr_t virtualAddress,
442 	phys_addr_t* _physicalAddress, uint32* _flags)
443 {
444 	// default the flags to not present
445 	*_flags = 0;
446 	*_physicalAddress = 0;
447 
448 	// get the page directory entry
449 	pae_page_directory_entry* pageDirEntry
450 		= X86PagingMethodPAE::PageDirEntryForAddress(
451 			fPagingStructures->VirtualPageDirs(), virtualAddress);
452 	if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
453 		// no pagetable here
454 		return B_OK;
455 	}
456 
457 	// get the page table entry
458 	struct thread* thread = thread_get_current_thread();
459 	ThreadCPUPinner pinner(thread);
460 
461 	pae_page_table_entry* pageTable
462 		= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
463 			*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
464 	pae_page_table_entry entry
465 		= pageTable[virtualAddress / B_PAGE_SIZE % kPAEPageTableEntryCount];
466 
467 	pinner.Unlock();
468 
469 	*_physicalAddress = entry & X86_PAE_PTE_ADDRESS_MASK;
470 
471 	// translate the page state flags
472 	if ((entry & X86_PAE_PTE_USER) != 0) {
473 		*_flags |= ((entry & X86_PAE_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0)
474 			| B_READ_AREA;
475 	}
476 
477 	*_flags |= ((entry & X86_PAE_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0)
478 		| B_KERNEL_READ_AREA
479 		| ((entry & X86_PAE_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
480 		| ((entry & X86_PAE_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
481 		| ((entry & X86_PAE_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0);
482 
483 	TRACE("X86VMTranslationMapPAE::Query(%#" B_PRIxADDR ") -> %#"
484 		B_PRIxPHYSADDR ":\n", *_physicalAddress, virtualAddress);
485 
486 	return B_OK;
487 }
488 
489 
490 status_t
491 X86VMTranslationMapPAE::QueryInterrupt(addr_t virtualAddress,
492 	phys_addr_t* _physicalAddress, uint32* _flags)
493 {
494 	// default the flags to not present
495 	*_flags = 0;
496 	*_physicalAddress = 0;
497 
498 	// get the page directory entry
499 	pae_page_directory_entry* pageDirEntry
500 		= X86PagingMethodPAE::PageDirEntryForAddress(
501 			fPagingStructures->VirtualPageDirs(), virtualAddress);
502 	if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
503 		// no pagetable here
504 		return B_OK;
505 	}
506 
507 	// get the page table entry
508 	pae_page_table_entry* pageTable
509 		= (pae_page_table_entry*)X86PagingMethodPAE::Method()
510 			->PhysicalPageMapper()->InterruptGetPageTableAt(
511 				*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
512 	pae_page_table_entry entry
513 		= pageTable[virtualAddress / B_PAGE_SIZE % kPAEPageTableEntryCount];
514 
515 	*_physicalAddress = entry & X86_PAE_PTE_ADDRESS_MASK;
516 
517 	// translate the page state flags
518 	if ((entry & X86_PAE_PTE_USER) != 0) {
519 		*_flags |= ((entry & X86_PAE_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0)
520 			| B_READ_AREA;
521 	}
522 
523 	*_flags |= ((entry & X86_PAE_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0)
524 		| B_KERNEL_READ_AREA
525 		| ((entry & X86_PAE_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
526 		| ((entry & X86_PAE_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
527 		| ((entry & X86_PAE_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0);
528 
529 	TRACE("X86VMTranslationMapPAE::Query(%#" B_PRIxADDR ") -> %#"
530 		B_PRIxPHYSADDR ":\n", *_physicalAddress, virtualAddress);
531 
532 	return B_OK;
533 }
534 
535 
536 status_t
537 X86VMTranslationMapPAE::Protect(addr_t start, addr_t end, uint32 attributes,
538 	uint32 memoryType)
539 {
540 	start = ROUNDDOWN(start, B_PAGE_SIZE);
541 	if (start >= end)
542 		return B_OK;
543 
544 	TRACE("X86VMTranslationMapPAE::Protect(): %#" B_PRIxADDR " - %#" B_PRIxADDR
545 		", attributes: %#" B_PRIx32 "\n", start, end, attributes);
546 
547 	// compute protection flags
548 	uint64 newProtectionFlags = 0;
549 	if ((attributes & B_USER_PROTECTION) != 0) {
550 		newProtectionFlags = X86_PAE_PTE_USER;
551 		if ((attributes & B_WRITE_AREA) != 0)
552 			newProtectionFlags |= X86_PAE_PTE_WRITABLE;
553 	} else if ((attributes & B_KERNEL_WRITE_AREA) != 0)
554 		newProtectionFlags = X86_PAE_PTE_WRITABLE;
555 
556 	do {
557 		pae_page_directory_entry* pageDirEntry
558 			= X86PagingMethodPAE::PageDirEntryForAddress(
559 				fPagingStructures->VirtualPageDirs(), start);
560 		if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
561 			// no page table here, move the start up to access the next page
562 			// table
563 			start = ROUNDUP(start + 1, kPAEPageTableRange);
564 			continue;
565 		}
566 
567 		struct thread* thread = thread_get_current_thread();
568 		ThreadCPUPinner pinner(thread);
569 
570 		pae_page_table_entry* pageTable
571 			= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
572 				*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
573 
574 		uint32 index = start / B_PAGE_SIZE % kPAEPageTableEntryCount;
575 		for (; index < kPAEPageTableEntryCount && start < end;
576 				index++, start += B_PAGE_SIZE) {
577 			pae_page_table_entry entry = pageTable[index];
578 			if ((pageTable[index] & X86_PAE_PTE_PRESENT) == 0) {
579 				// page mapping not valid
580 				continue;
581 			}
582 
583 			TRACE("X86VMTranslationMapPAE::Protect(): protect page %#"
584 				B_PRIxADDR "\n", start);
585 
586 			// set the new protection flags -- we want to do that atomically,
587 			// without changing the accessed or dirty flag
588 			pae_page_table_entry oldEntry;
589 			while (true) {
590 				oldEntry = X86PagingMethodPAE::TestAndSetPageTableEntry(
591 					&pageTable[index],
592 					(entry & ~(X86_PAE_PTE_PROTECTION_MASK
593 						| X86_PAE_PTE_MEMORY_TYPE_MASK))
594 						| newProtectionFlags
595 						| X86PagingMethodPAE::MemoryTypeToPageTableEntryFlags(
596 							memoryType),
597 					entry);
598 				if (oldEntry == entry)
599 					break;
600 				entry = oldEntry;
601 			}
602 
603 			if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
604 				// Note, that we only need to invalidate the address, if the
605 				// accessed flag was set, since only then the entry could have been
606 				// in any TLB.
607 				InvalidatePage(start);
608 			}
609 		}
610 
611 		pinner.Unlock();
612 	} while (start != 0 && start < end);
613 
614 	return B_OK;
615 }
616 
617 
618 status_t
619 X86VMTranslationMapPAE::ClearFlags(addr_t address, uint32 flags)
620 {
621 	pae_page_directory_entry* pageDirEntry
622 		= X86PagingMethodPAE::PageDirEntryForAddress(
623 			fPagingStructures->VirtualPageDirs(), address);
624 	if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
625 		// no pagetable here
626 		return B_OK;
627 	}
628 
629 	uint64 flagsToClear = ((flags & PAGE_MODIFIED) ? X86_PAE_PTE_DIRTY : 0)
630 		| ((flags & PAGE_ACCESSED) ? X86_PAE_PTE_ACCESSED : 0);
631 
632 	struct thread* thread = thread_get_current_thread();
633 	ThreadCPUPinner pinner(thread);
634 
635 	pae_page_table_entry* entry
636 		= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
637 				*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK)
638 			+ address / B_PAGE_SIZE % kPAEPageTableEntryCount;
639 
640 	// clear out the flags we've been requested to clear
641 	pae_page_table_entry oldEntry
642 		= X86PagingMethodPAE::ClearPageTableEntryFlags(entry, flagsToClear);
643 
644 	pinner.Unlock();
645 
646 	if ((oldEntry & flagsToClear) != 0)
647 		InvalidatePage(address);
648 
649 	return B_OK;
650 }
651 
652 
653 bool
654 X86VMTranslationMapPAE::ClearAccessedAndModified(VMArea* area, addr_t address,
655 	bool unmapIfUnaccessed, bool& _modified)
656 {
657 	ASSERT(address % B_PAGE_SIZE == 0);
658 
659 	TRACE("X86VMTranslationMap32Bit::ClearAccessedAndModified(%#" B_PRIxADDR
660 		")\n", address);
661 
662 	pae_page_directory_entry* pageDirEntry
663 		= X86PagingMethodPAE::PageDirEntryForAddress(
664 			fPagingStructures->VirtualPageDirs(), address);
665 
666 	RecursiveLocker locker(fLock);
667 
668 	if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0)
669 		return false;
670 
671 	ThreadCPUPinner pinner(thread_get_current_thread());
672 
673 	pae_page_table_entry* entry
674 		= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
675 				*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK)
676 			+ address / B_PAGE_SIZE % kPAEPageTableEntryCount;
677 
678 	// perform the deed
679 	pae_page_table_entry oldEntry;
680 
681 	if (unmapIfUnaccessed) {
682 		while (true) {
683 			oldEntry = *entry;
684 			if ((oldEntry & X86_PAE_PTE_PRESENT) == 0) {
685 				// page mapping not valid
686 				return false;
687 			}
688 
689 			if (oldEntry & X86_PAE_PTE_ACCESSED) {
690 				// page was accessed -- just clear the flags
691 				oldEntry = X86PagingMethodPAE::ClearPageTableEntryFlags(entry,
692 					X86_PAE_PTE_ACCESSED | X86_PAE_PTE_DIRTY);
693 				break;
694 			}
695 
696 			// page hasn't been accessed -- unmap it
697 			if (X86PagingMethodPAE::TestAndSetPageTableEntry(entry, 0, oldEntry)
698 					== oldEntry) {
699 				break;
700 			}
701 
702 			// something changed -- check again
703 		}
704 	} else {
705 		oldEntry = X86PagingMethodPAE::ClearPageTableEntryFlags(entry,
706 			X86_PAE_PTE_ACCESSED | X86_PAE_PTE_DIRTY);
707 	}
708 
709 	pinner.Unlock();
710 
711 	_modified = (oldEntry & X86_PAE_PTE_DIRTY) != 0;
712 
713 	if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
714 		// Note, that we only need to invalidate the address, if the
715 		// accessed flags was set, since only then the entry could have been
716 		// in any TLB.
717 		InvalidatePage(address);
718 		Flush();
719 
720 		return true;
721 	}
722 
723 	if (!unmapIfUnaccessed)
724 		return false;
725 
726 	// We have unmapped the address. Do the "high level" stuff.
727 
728 	fMapCount--;
729 
730 	if (area->cache_type == CACHE_TYPE_DEVICE)
731 		return false;
732 
733 	// get the page
734 	vm_page* page = vm_lookup_page(
735 		(oldEntry & X86_PAE_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
736 	ASSERT(page != NULL);
737 
738 	// TODO: Here comes a lot of paging method and even architecture independent
739 	// code. Refactor!
740 
741 	// remove the mapping object/decrement the wired_count of the page
742 	vm_page_mapping* mapping = NULL;
743 	if (area->wiring == B_NO_LOCK) {
744 		vm_page_mappings::Iterator iterator = page->mappings.GetIterator();
745 		while ((mapping = iterator.Next()) != NULL) {
746 			if (mapping->area == area) {
747 				area->mappings.Remove(mapping);
748 				page->mappings.Remove(mapping);
749 				break;
750 			}
751 		}
752 
753 		ASSERT(mapping != NULL);
754 	} else
755 		page->wired_count--;
756 
757 	locker.Unlock();
758 
759 	if (page->wired_count == 0 && page->mappings.IsEmpty())
760 		atomic_add(&gMappedPagesCount, -1);
761 
762 	if (mapping != NULL) {
763 		object_cache_free(gPageMappingsObjectCache, mapping,
764 			CACHE_DONT_WAIT_FOR_MEMORY | CACHE_DONT_LOCK_KERNEL_SPACE);
765 			// Since this is called by the page daemon, we never want to lock
766 			// the kernel address space.
767 	}
768 
769 	return false;
770 }
771 
772 
773 X86PagingStructures*
774 X86VMTranslationMapPAE::PagingStructures() const
775 {
776 	return fPagingStructures;
777 }
778 
779 
780 #endif	// B_HAIKU_PHYSICAL_BITS == 64
781