xref: /haiku/src/system/kernel/arch/x86/paging/pae/X86VMTranslationMapPAE.cpp (revision f5821a1aee77d3b9a979b42c68a79e50b5ebaefe)
1 /*
2  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 #include "paging/pae/X86VMTranslationMapPAE.h"
12 
13 #include <int.h>
14 #include <slab/Slab.h>
15 #include <thread.h>
16 #include <util/AutoLock.h>
17 #include <vm/vm_page.h>
18 #include <vm/VMAddressSpace.h>
19 #include <vm/VMCache.h>
20 
21 #include "paging/pae/X86PagingMethodPAE.h"
22 #include "paging/pae/X86PagingStructuresPAE.h"
23 #include "paging/x86_physical_page_mapper.h"
24 
25 
26 //#define TRACE_X86_VM_TRANSLATION_MAP_PAE
27 #ifdef TRACE_X86_VM_TRANSLATION_MAP_PAE
28 #	define TRACE(x...) dprintf(x)
29 #else
30 #	define TRACE(x...) ;
31 #endif
32 
33 
34 #if B_HAIKU_PHYSICAL_BITS == 64
35 
36 
37 X86VMTranslationMapPAE::X86VMTranslationMapPAE()
38 	:
39 	fPagingStructures(NULL)
40 {
41 }
42 
43 
44 X86VMTranslationMapPAE::~X86VMTranslationMapPAE()
45 {
46 	if (fPagingStructures == NULL)
47 		return;
48 
49 	if (fPageMapper != NULL)
50 		fPageMapper->Delete();
51 
52 	// cycle through and free all of the user space page tables
53 
54 	STATIC_ASSERT(KERNEL_BASE == 0x80000000 && KERNEL_SIZE == 0x80000000);
55 		// assuming 1-1 split of the address space
56 
57 	for (uint32 k = 0; k < 2; k++) {
58 		pae_page_directory_entry* pageDir
59 			= fPagingStructures->VirtualPageDirs()[k];
60 		if (pageDir == NULL)
61 			continue;
62 
63 		for (uint32 i = 0; i < kPAEPageDirEntryCount; i++) {
64 			if ((pageDir[i] & X86_PAE_PDE_PRESENT) != 0) {
65 				phys_addr_t address = pageDir[i] & X86_PAE_PDE_ADDRESS_MASK;
66 				vm_page* page = vm_lookup_page(address / B_PAGE_SIZE);
67 				if (page == NULL)
68 					panic("X86VMTranslationMapPAE::~X86VMTranslationMapPAE: "
69 						"didn't find page table page: page address: %#"
70 						B_PRIxPHYSADDR ", virtual base: %#" B_PRIxADDR "\n",
71 						address,
72 						(k * kPAEPageDirEntryCount + i) * kPAEPageTableRange);
73 				DEBUG_PAGE_ACCESS_START(page);
74 				vm_page_set_state(page, PAGE_STATE_FREE);
75 			}
76 		}
77 	}
78 
79 	fPagingStructures->RemoveReference();
80 }
81 
82 
83 status_t
84 X86VMTranslationMapPAE::Init(bool kernel)
85 {
86 	TRACE("X86VMTranslationMapPAE::Init()\n");
87 
88 	X86VMTranslationMap::Init(kernel);
89 
90 	fPagingStructures = new(std::nothrow) X86PagingStructuresPAE;
91 	if (fPagingStructures == NULL)
92 		return B_NO_MEMORY;
93 
94 	X86PagingMethodPAE* method = X86PagingMethodPAE::Method();
95 
96 	if (kernel) {
97 		// kernel
98 		// get the physical page mapper
99 		fPageMapper = method->KernelPhysicalPageMapper();
100 
101 		// we already know the kernel pgdir mapping
102 		fPagingStructures->Init(method->KernelVirtualPageDirPointerTable(),
103 			method->KernelPhysicalPageDirPointerTable(), NULL,
104 			method->KernelVirtualPageDirs(), method->KernelPhysicalPageDirs());
105 	} else {
106 		// user
107 		// allocate a physical page mapper
108 		status_t error = method->PhysicalPageMapper()
109 			->CreateTranslationMapPhysicalPageMapper(&fPageMapper);
110 		if (error != B_OK)
111 			return error;
112 
113 		// The following code assumes that the kernel address space occupies the
114 		// upper half of the virtual address space. This simplifies things a
115 		// lot, since it allows us to just use the upper two page directories
116 		// of the kernel and create two new lower page directories for the
117 		// userland.
118 		STATIC_ASSERT(KERNEL_BASE == 0x80000000 && KERNEL_SIZE == 0x80000000);
119 
120 		// allocate the page directories (both at once)
121 		pae_page_directory_entry* virtualPageDirs[4];
122 		phys_addr_t physicalPageDirs[4];
123 		virtualPageDirs[0] = (pae_page_directory_entry*)memalign(B_PAGE_SIZE,
124 			2 * B_PAGE_SIZE);
125 		if (virtualPageDirs[0] == NULL)
126 			return B_NO_MEMORY;
127 		virtualPageDirs[1] = virtualPageDirs[0] + kPAEPageTableEntryCount;
128 
129 		// clear the userland page directories
130 		memset(virtualPageDirs[0], 0, 2 * B_PAGE_SIZE);
131 
132 		// use the upper two kernel page directories
133 		for (int32 i = 2; i < 4; i++) {
134 			virtualPageDirs[i] = method->KernelVirtualPageDirs()[i];
135 			physicalPageDirs[i] = method->KernelPhysicalPageDirs()[i];
136 		}
137 
138 		// look up the page directories' physical addresses
139 		for (int32 i = 0; i < 2; i++) {
140 			vm_get_page_mapping(VMAddressSpace::KernelID(),
141 				(addr_t)virtualPageDirs[i], &physicalPageDirs[i]);
142 		}
143 
144 		// allocate the PDPT -- needs to have a 32 bit physical address
145 		phys_addr_t physicalPDPT;
146 		void* pdptHandle;
147 		pae_page_directory_pointer_table_entry* pdpt
148 			= (pae_page_directory_pointer_table_entry*)
149 				method->Allocate32BitPage(physicalPDPT, pdptHandle);
150 		if (pdpt == NULL) {
151 			free(virtualPageDirs[0]);
152 			return B_NO_MEMORY;
153 		}
154 
155 		// init the PDPT entries
156 		for (int32 i = 0; i < 4; i++) {
157 			pdpt[i] = (physicalPageDirs[i] & X86_PAE_PDPTE_ADDRESS_MASK)
158 				| X86_PAE_PDPTE_PRESENT;
159 		}
160 
161 		// init the paging structures
162 		fPagingStructures->Init(pdpt, physicalPDPT, pdptHandle, virtualPageDirs,
163 			physicalPageDirs);
164 	}
165 
166 	return B_OK;
167 }
168 
169 
170 size_t
171 X86VMTranslationMapPAE::MaxPagesNeededToMap(addr_t start, addr_t end) const
172 {
173 	// If start == 0, the actual base address is not yet known to the caller and
174 	// we shall assume the worst case.
175 	if (start == 0) {
176 		// offset the range so it has the worst possible alignment
177 		start = kPAEPageTableRange - B_PAGE_SIZE;
178 		end += kPAEPageTableRange - B_PAGE_SIZE;
179 	}
180 
181 	return end / kPAEPageTableRange + 1 - start / kPAEPageTableRange;
182 }
183 
184 
185 status_t
186 X86VMTranslationMapPAE::Map(addr_t virtualAddress, phys_addr_t physicalAddress,
187 	uint32 attributes, uint32 memoryType, vm_page_reservation* reservation)
188 {
189 	TRACE("X86VMTranslationMapPAE::Map(): %#" B_PRIxADDR " -> %#" B_PRIxPHYSADDR
190 		"\n", virtualAddress, physicalAddress);
191 
192 	// check to see if a page table exists for this range
193 	pae_page_directory_entry* pageDirEntry
194 		= X86PagingMethodPAE::PageDirEntryForAddress(
195 			fPagingStructures->VirtualPageDirs(), virtualAddress);
196 	if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
197 		// we need to allocate a page table
198 		vm_page *page = vm_page_allocate_page(reservation,
199 			PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
200 
201 		DEBUG_PAGE_ACCESS_END(page);
202 
203 		phys_addr_t physicalPageTable
204 			= (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;
205 
206 		TRACE("X86VMTranslationMapPAE::Map(): asked for free page for "
207 			"page table: %#" B_PRIxPHYSADDR "\n", physicalPageTable);
208 
209 		// put it in the page dir
210 		X86PagingMethodPAE::PutPageTableInPageDir(pageDirEntry,
211 			physicalPageTable,
212 			attributes
213 				| ((attributes & B_USER_PROTECTION) != 0
214 						? B_WRITE_AREA : B_KERNEL_WRITE_AREA));
215 
216 		fMapCount++;
217 	}
218 
219 	// now, fill in the page table entry
220 	Thread* thread = thread_get_current_thread();
221 	ThreadCPUPinner pinner(thread);
222 
223 	pae_page_table_entry* pageTable
224 		= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
225 			*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
226 	pae_page_table_entry* entry = pageTable
227 		+ virtualAddress / B_PAGE_SIZE % kPAEPageTableEntryCount;
228 
229 	ASSERT_PRINT((*entry & X86_PAE_PTE_PRESENT) == 0,
230 		"virtual address: %#" B_PRIxADDR ", existing pte: %#" B_PRIx64,
231 		virtualAddress, *entry);
232 
233 	X86PagingMethodPAE::PutPageTableEntryInTable(entry, physicalAddress,
234 		attributes, memoryType, fIsKernelMap);
235 
236 	pinner.Unlock();
237 
238 	// Note: We don't need to invalidate the TLB for this address, as previously
239 	// the entry was not present and the TLB doesn't cache those entries.
240 
241 	fMapCount++;
242 
243 	return 0;
244 }
245 
246 
247 status_t
248 X86VMTranslationMapPAE::Unmap(addr_t start, addr_t end)
249 {
250 	start = ROUNDDOWN(start, B_PAGE_SIZE);
251 	if (start >= end)
252 		return B_OK;
253 
254 	TRACE("X86VMTranslationMapPAE::Unmap(): %#" B_PRIxADDR " - %#" B_PRIxADDR
255 		"\n", start, end);
256 
257 	do {
258 		pae_page_directory_entry* pageDirEntry
259 			= X86PagingMethodPAE::PageDirEntryForAddress(
260 				fPagingStructures->VirtualPageDirs(), start);
261 		if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
262 			// no page table here, move the start up to access the next page
263 			// table
264 			start = ROUNDUP(start + 1, kPAEPageTableRange);
265 			continue;
266 		}
267 
268 		Thread* thread = thread_get_current_thread();
269 		ThreadCPUPinner pinner(thread);
270 
271 		pae_page_table_entry* pageTable
272 			= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
273 				*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
274 
275 		uint32 index = start / B_PAGE_SIZE % kPAEPageTableEntryCount;
276 		for (; index < kPAEPageTableEntryCount && start < end;
277 				index++, start += B_PAGE_SIZE) {
278 			if ((pageTable[index] & X86_PAE_PTE_PRESENT) == 0) {
279 				// page mapping not valid
280 				continue;
281 			}
282 
283 			TRACE("X86VMTranslationMapPAE::Unmap(): removing page %#"
284 				B_PRIxADDR "\n", start);
285 
286 			pae_page_table_entry oldEntry
287 				= X86PagingMethodPAE::ClearPageTableEntryFlags(
288 					&pageTable[index], X86_PAE_PTE_PRESENT);
289 			fMapCount--;
290 
291 			if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
292 				// Note, that we only need to invalidate the address, if the
293 				// accessed flags was set, since only then the entry could have
294 				// been in any TLB.
295 				InvalidatePage(start);
296 			}
297 		}
298 	} while (start != 0 && start < end);
299 
300 	return B_OK;
301 }
302 
303 
304 status_t
305 X86VMTranslationMapPAE::DebugMarkRangePresent(addr_t start, addr_t end,
306 	bool markPresent)
307 {
308 	start = ROUNDDOWN(start, B_PAGE_SIZE);
309 	if (start >= end)
310 		return B_OK;
311 
312 	do {
313 		pae_page_directory_entry* pageDirEntry
314 			= X86PagingMethodPAE::PageDirEntryForAddress(
315 				fPagingStructures->VirtualPageDirs(), start);
316 		if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
317 			// no page table here, move the start up to access the next page
318 			// table
319 			start = ROUNDUP(start + 1, kPAEPageTableRange);
320 			continue;
321 		}
322 
323 		Thread* thread = thread_get_current_thread();
324 		ThreadCPUPinner pinner(thread);
325 
326 		pae_page_table_entry* pageTable
327 			= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
328 				*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
329 
330 		uint32 index = start / B_PAGE_SIZE % kPAEPageTableEntryCount;
331 		for (; index < kPAEPageTableEntryCount && start < end;
332 				index++, start += B_PAGE_SIZE) {
333 
334 			if ((pageTable[index] & X86_PAE_PTE_PRESENT) == 0) {
335 				if (!markPresent)
336 					continue;
337 
338 				X86PagingMethodPAE::SetPageTableEntryFlags(
339 					&pageTable[index], X86_PAE_PTE_PRESENT);
340 			} else {
341 				if (markPresent)
342 					continue;
343 
344 				pae_page_table_entry oldEntry
345 					= X86PagingMethodPAE::ClearPageTableEntryFlags(
346 						&pageTable[index], X86_PAE_PTE_PRESENT);
347 
348 				if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
349 					// Note, that we only need to invalidate the address, if the
350 					// accessed flags was set, since only then the entry could
351 					// have been in any TLB.
352 					InvalidatePage(start);
353 				}
354 			}
355 		}
356 	} while (start != 0 && start < end);
357 
358 	return B_OK;
359 }
360 
361 
362 /*!	Caller must have locked the cache of the page to be unmapped.
363 	This object shouldn't be locked.
364 */
365 status_t
366 X86VMTranslationMapPAE::UnmapPage(VMArea* area, addr_t address,
367 	bool updatePageQueue)
368 {
369 	ASSERT(address % B_PAGE_SIZE == 0);
370 
371 	pae_page_directory_entry* pageDirEntry
372 		= X86PagingMethodPAE::PageDirEntryForAddress(
373 			fPagingStructures->VirtualPageDirs(), address);
374 
375 	TRACE("X86VMTranslationMapPAE::UnmapPage(%#" B_PRIxADDR ")\n", address);
376 
377 	RecursiveLocker locker(fLock);
378 
379 	if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0)
380 		return B_ENTRY_NOT_FOUND;
381 
382 	ThreadCPUPinner pinner(thread_get_current_thread());
383 
384 	pae_page_table_entry* pageTable
385 		= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
386 			*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
387 
388 	pae_page_table_entry oldEntry = X86PagingMethodPAE::ClearPageTableEntry(
389 		&pageTable[address / B_PAGE_SIZE % kPAEPageTableEntryCount]);
390 
391 	pinner.Unlock();
392 
393 	if ((oldEntry & X86_PAE_PTE_PRESENT) == 0) {
394 		// page mapping not valid
395 		return B_ENTRY_NOT_FOUND;
396 	}
397 
398 	fMapCount--;
399 
400 	if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
401 		// Note, that we only need to invalidate the address, if the
402 		// accessed flags was set, since only then the entry could have been
403 		// in any TLB.
404 		InvalidatePage(address);
405 
406 		Flush();
407 
408 		// NOTE: Between clearing the page table entry and Flush() other
409 		// processors (actually even this processor with another thread of the
410 		// same team) could still access the page in question via their cached
411 		// entry. We can obviously lose a modified flag in this case, with the
412 		// effect that the page looks unmodified (and might thus be recycled),
413 		// but is actually modified.
414 		// In most cases this is harmless, but for vm_remove_all_page_mappings()
415 		// this is actually a problem.
416 		// Interestingly FreeBSD seems to ignore this problem as well
417 		// (cf. pmap_remove_all()), unless I've missed something.
418 	}
419 
420 	locker.Detach();
421 		// PageUnmapped() will unlock for us
422 
423 	PageUnmapped(area, (oldEntry & X86_PAE_PTE_ADDRESS_MASK) / B_PAGE_SIZE,
424 		(oldEntry & X86_PAE_PTE_ACCESSED) != 0,
425 		(oldEntry & X86_PAE_PTE_DIRTY) != 0, updatePageQueue);
426 
427 	return B_OK;
428 }
429 
430 
431 void
432 X86VMTranslationMapPAE::UnmapPages(VMArea* area, addr_t base, size_t size,
433 	bool updatePageQueue)
434 {
435 	if (size == 0)
436 		return;
437 
438 	addr_t start = base;
439 	addr_t end = base + size - 1;
440 
441 	TRACE("X86VMTranslationMapPAE::UnmapPages(%p, %#" B_PRIxADDR ", %#"
442 		B_PRIxADDR ")\n", area, start, end);
443 
444 	VMAreaMappings queue;
445 
446 	RecursiveLocker locker(fLock);
447 
448 	do {
449 		pae_page_directory_entry* pageDirEntry
450 			= X86PagingMethodPAE::PageDirEntryForAddress(
451 				fPagingStructures->VirtualPageDirs(), start);
452 		if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
453 			// no page table here, move the start up to access the next page
454 			// table
455 			start = ROUNDUP(start + 1, kPAEPageTableRange);
456 			continue;
457 		}
458 
459 		Thread* thread = thread_get_current_thread();
460 		ThreadCPUPinner pinner(thread);
461 
462 		pae_page_table_entry* pageTable
463 			= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
464 				*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
465 
466 		uint32 index = start / B_PAGE_SIZE % kPAEPageTableEntryCount;
467 		for (; index < kPAEPageTableEntryCount && start < end;
468 				index++, start += B_PAGE_SIZE) {
469 			pae_page_table_entry oldEntry
470 				= X86PagingMethodPAE::ClearPageTableEntry(&pageTable[index]);
471 			if ((oldEntry & X86_PAE_PTE_PRESENT) == 0)
472 				continue;
473 
474 			fMapCount--;
475 
476 			if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
477 				// Note, that we only need to invalidate the address, if the
478 				// accessed flags was set, since only then the entry could have
479 				// been in any TLB.
480 				InvalidatePage(start);
481 			}
482 
483 			if (area->cache_type != CACHE_TYPE_DEVICE) {
484 				// get the page
485 				vm_page* page = vm_lookup_page(
486 					(oldEntry & X86_PAE_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
487 				ASSERT(page != NULL);
488 
489 				DEBUG_PAGE_ACCESS_START(page);
490 
491 				// transfer the accessed/dirty flags to the page
492 				if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0)
493 					page->accessed = true;
494 				if ((oldEntry & X86_PAE_PTE_DIRTY) != 0)
495 					page->modified = true;
496 
497 				// remove the mapping object/decrement the wired_count of the
498 				// page
499 				if (area->wiring == B_NO_LOCK) {
500 					vm_page_mapping* mapping = NULL;
501 					vm_page_mappings::Iterator iterator
502 						= page->mappings.GetIterator();
503 					while ((mapping = iterator.Next()) != NULL) {
504 						if (mapping->area == area)
505 							break;
506 					}
507 
508 					ASSERT(mapping != NULL);
509 
510 					area->mappings.Remove(mapping);
511 					page->mappings.Remove(mapping);
512 					queue.Add(mapping);
513 				} else
514 					page->DecrementWiredCount();
515 
516 				if (!page->IsMapped()) {
517 					atomic_add(&gMappedPagesCount, -1);
518 
519 					if (updatePageQueue) {
520 						if (page->Cache()->temporary)
521 							vm_page_set_state(page, PAGE_STATE_INACTIVE);
522 						else if (page->modified)
523 							vm_page_set_state(page, PAGE_STATE_MODIFIED);
524 						else
525 							vm_page_set_state(page, PAGE_STATE_CACHED);
526 					}
527 				}
528 
529 				DEBUG_PAGE_ACCESS_END(page);
530 			}
531 		}
532 
533 		Flush();
534 			// flush explicitly, since we directly use the lock
535 	} while (start != 0 && start < end);
536 
537 	// TODO: As in UnmapPage() we can lose page dirty flags here. ATM it's not
538 	// really critical here, as in all cases this method is used, the unmapped
539 	// area range is unmapped for good (resized/cut) and the pages will likely
540 	// be freed.
541 
542 	locker.Unlock();
543 
544 	// free removed mappings
545 	bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
546 	uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
547 		| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
548 	while (vm_page_mapping* mapping = queue.RemoveHead())
549 		object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
550 }
551 
552 
553 void
554 X86VMTranslationMapPAE::UnmapArea(VMArea* area, bool deletingAddressSpace,
555 	bool ignoreTopCachePageFlags)
556 {
557 	if (area->cache_type == CACHE_TYPE_DEVICE || area->wiring != B_NO_LOCK) {
558 		X86VMTranslationMapPAE::UnmapPages(area, area->Base(), area->Size(),
559 			true);
560 		return;
561 	}
562 
563 	bool unmapPages = !deletingAddressSpace || !ignoreTopCachePageFlags;
564 
565 	RecursiveLocker locker(fLock);
566 
567 	VMAreaMappings mappings;
568 	mappings.MoveFrom(&area->mappings);
569 
570 	for (VMAreaMappings::Iterator it = mappings.GetIterator();
571 			vm_page_mapping* mapping = it.Next();) {
572 		vm_page* page = mapping->page;
573 		page->mappings.Remove(mapping);
574 
575 		VMCache* cache = page->Cache();
576 
577 		bool pageFullyUnmapped = false;
578 		if (!page->IsMapped()) {
579 			atomic_add(&gMappedPagesCount, -1);
580 			pageFullyUnmapped = true;
581 		}
582 
583 		if (unmapPages || cache != area->cache) {
584 			addr_t address = area->Base()
585 				+ ((page->cache_offset * B_PAGE_SIZE) - area->cache_offset);
586 
587 			pae_page_directory_entry* pageDirEntry
588 				= X86PagingMethodPAE::PageDirEntryForAddress(
589 					fPagingStructures->VirtualPageDirs(), address);
590 			if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
591 				panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
592 					"has no page dir entry", page, area, address);
593 				continue;
594 			}
595 
596 			ThreadCPUPinner pinner(thread_get_current_thread());
597 
598 			pae_page_table_entry* pageTable
599 				= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
600 					*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
601 			pae_page_table_entry oldEntry
602 				= X86PagingMethodPAE::ClearPageTableEntry(
603 					&pageTable[address / B_PAGE_SIZE
604 						% kPAEPageTableEntryCount]);
605 
606 			pinner.Unlock();
607 
608 			if ((oldEntry & X86_PAE_PTE_PRESENT) == 0) {
609 				panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
610 					"has no page table entry", page, area, address);
611 				continue;
612 			}
613 
614 			// transfer the accessed/dirty flags to the page and invalidate
615 			// the mapping, if necessary
616 			if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
617 				page->accessed = true;
618 
619 				if (!deletingAddressSpace)
620 					InvalidatePage(address);
621 			}
622 
623 			if ((oldEntry & X86_PAE_PTE_DIRTY) != 0)
624 				page->modified = true;
625 
626 			if (pageFullyUnmapped) {
627 				DEBUG_PAGE_ACCESS_START(page);
628 
629 				if (cache->temporary)
630 					vm_page_set_state(page, PAGE_STATE_INACTIVE);
631 				else if (page->modified)
632 					vm_page_set_state(page, PAGE_STATE_MODIFIED);
633 				else
634 					vm_page_set_state(page, PAGE_STATE_CACHED);
635 
636 				DEBUG_PAGE_ACCESS_END(page);
637 			}
638 		}
639 
640 		fMapCount--;
641 	}
642 
643 	Flush();
644 		// flush explicitely, since we directly use the lock
645 
646 	locker.Unlock();
647 
648 	bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
649 	uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
650 		| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
651 	while (vm_page_mapping* mapping = mappings.RemoveHead())
652 		object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
653 }
654 
655 
656 status_t
657 X86VMTranslationMapPAE::Query(addr_t virtualAddress,
658 	phys_addr_t* _physicalAddress, uint32* _flags)
659 {
660 	// default the flags to not present
661 	*_flags = 0;
662 	*_physicalAddress = 0;
663 
664 	// get the page directory entry
665 	pae_page_directory_entry* pageDirEntry
666 		= X86PagingMethodPAE::PageDirEntryForAddress(
667 			fPagingStructures->VirtualPageDirs(), virtualAddress);
668 	if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
669 		// no pagetable here
670 		return B_OK;
671 	}
672 
673 	// get the page table entry
674 	Thread* thread = thread_get_current_thread();
675 	ThreadCPUPinner pinner(thread);
676 
677 	pae_page_table_entry* pageTable
678 		= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
679 			*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
680 	pae_page_table_entry entry
681 		= pageTable[virtualAddress / B_PAGE_SIZE % kPAEPageTableEntryCount];
682 
683 	pinner.Unlock();
684 
685 	*_physicalAddress = entry & X86_PAE_PTE_ADDRESS_MASK;
686 
687 	// translate the page state flags
688 	if ((entry & X86_PAE_PTE_USER) != 0) {
689 		*_flags |= ((entry & X86_PAE_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0)
690 			| B_READ_AREA
691 			| ((entry & X86_PAE_PTE_NOT_EXECUTABLE) == 0 ? B_EXECUTE_AREA : 0);
692 	}
693 
694 	*_flags |= ((entry & X86_PAE_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0)
695 		| B_KERNEL_READ_AREA
696 		| ((entry & X86_PAE_PTE_NOT_EXECUTABLE) == 0
697 			? B_KERNEL_EXECUTE_AREA : 0)
698 		| ((entry & X86_PAE_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
699 		| ((entry & X86_PAE_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
700 		| ((entry & X86_PAE_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0);
701 
702 	TRACE("X86VMTranslationMapPAE::Query(%#" B_PRIxADDR ") -> %#"
703 		B_PRIxPHYSADDR ":\n", virtualAddress, *_physicalAddress);
704 
705 	return B_OK;
706 }
707 
708 
709 status_t
710 X86VMTranslationMapPAE::QueryInterrupt(addr_t virtualAddress,
711 	phys_addr_t* _physicalAddress, uint32* _flags)
712 {
713 	// default the flags to not present
714 	*_flags = 0;
715 	*_physicalAddress = 0;
716 
717 	// get the page directory entry
718 	pae_page_directory_entry* pageDirEntry
719 		= X86PagingMethodPAE::PageDirEntryForAddress(
720 			fPagingStructures->VirtualPageDirs(), virtualAddress);
721 	if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
722 		// no pagetable here
723 		return B_OK;
724 	}
725 
726 	// get the page table entry
727 	pae_page_table_entry* pageTable
728 		= (pae_page_table_entry*)X86PagingMethodPAE::Method()
729 			->PhysicalPageMapper()->InterruptGetPageTableAt(
730 				*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
731 	pae_page_table_entry entry
732 		= pageTable[virtualAddress / B_PAGE_SIZE % kPAEPageTableEntryCount];
733 
734 	*_physicalAddress = entry & X86_PAE_PTE_ADDRESS_MASK;
735 
736 	// translate the page state flags
737 	if ((entry & X86_PAE_PTE_USER) != 0) {
738 		*_flags |= ((entry & X86_PAE_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0)
739 			| B_READ_AREA
740 			| ((entry & X86_PAE_PTE_NOT_EXECUTABLE) == 0 ? B_EXECUTE_AREA : 0);
741 	}
742 
743 	*_flags |= ((entry & X86_PAE_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0)
744 		| B_KERNEL_READ_AREA
745 		| ((entry & X86_PAE_PTE_NOT_EXECUTABLE) == 0
746 			? B_KERNEL_EXECUTE_AREA : 0)
747 		| ((entry & X86_PAE_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
748 		| ((entry & X86_PAE_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
749 		| ((entry & X86_PAE_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0);
750 
751 	TRACE("X86VMTranslationMapPAE::Query(%#" B_PRIxADDR ") -> %#"
752 		B_PRIxPHYSADDR ":\n", *_physicalAddress, virtualAddress);
753 
754 	return B_OK;
755 }
756 
757 
758 status_t
759 X86VMTranslationMapPAE::Protect(addr_t start, addr_t end, uint32 attributes,
760 	uint32 memoryType)
761 {
762 	start = ROUNDDOWN(start, B_PAGE_SIZE);
763 	if (start >= end)
764 		return B_OK;
765 
766 	TRACE("X86VMTranslationMapPAE::Protect(): %#" B_PRIxADDR " - %#" B_PRIxADDR
767 		", attributes: %#" B_PRIx32 "\n", start, end, attributes);
768 
769 	// compute protection flags
770 	uint64 newProtectionFlags = 0;
771 	if ((attributes & B_USER_PROTECTION) != 0) {
772 		newProtectionFlags = X86_PAE_PTE_USER;
773 		if ((attributes & B_WRITE_AREA) != 0)
774 			newProtectionFlags |= X86_PAE_PTE_WRITABLE;
775 		if ((attributes & B_EXECUTE_AREA) == 0
776 			&& x86_check_feature(IA32_FEATURE_AMD_EXT_NX, FEATURE_EXT_AMD)) {
777 			newProtectionFlags |= X86_PAE_PTE_NOT_EXECUTABLE;
778 		}
779 	} else if ((attributes & B_KERNEL_WRITE_AREA) != 0)
780 		newProtectionFlags = X86_PAE_PTE_WRITABLE;
781 
782 	do {
783 		pae_page_directory_entry* pageDirEntry
784 			= X86PagingMethodPAE::PageDirEntryForAddress(
785 				fPagingStructures->VirtualPageDirs(), start);
786 		if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
787 			// no page table here, move the start up to access the next page
788 			// table
789 			start = ROUNDUP(start + 1, kPAEPageTableRange);
790 			continue;
791 		}
792 
793 		Thread* thread = thread_get_current_thread();
794 		ThreadCPUPinner pinner(thread);
795 
796 		pae_page_table_entry* pageTable
797 			= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
798 				*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
799 
800 		uint32 index = start / B_PAGE_SIZE % kPAEPageTableEntryCount;
801 		for (; index < kPAEPageTableEntryCount && start < end;
802 				index++, start += B_PAGE_SIZE) {
803 			pae_page_table_entry entry = pageTable[index];
804 			if ((pageTable[index] & X86_PAE_PTE_PRESENT) == 0) {
805 				// page mapping not valid
806 				continue;
807 			}
808 
809 			TRACE("X86VMTranslationMapPAE::Protect(): protect page %#"
810 				B_PRIxADDR "\n", start);
811 
812 			// set the new protection flags -- we want to do that atomically,
813 			// without changing the accessed or dirty flag
814 			pae_page_table_entry oldEntry;
815 			while (true) {
816 				oldEntry = X86PagingMethodPAE::TestAndSetPageTableEntry(
817 					&pageTable[index],
818 					(entry & ~(X86_PAE_PTE_PROTECTION_MASK
819 							| X86_PAE_PTE_MEMORY_TYPE_MASK))
820 						| newProtectionFlags
821 						| X86PagingMethodPAE::MemoryTypeToPageTableEntryFlags(
822 							memoryType),
823 					entry);
824 				if (oldEntry == entry)
825 					break;
826 				entry = oldEntry;
827 			}
828 
829 			if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
830 				// Note, that we only need to invalidate the address, if the
831 				// accessed flag was set, since only then the entry could have been
832 				// in any TLB.
833 				InvalidatePage(start);
834 			}
835 		}
836 	} while (start != 0 && start < end);
837 
838 	return B_OK;
839 }
840 
841 
842 status_t
843 X86VMTranslationMapPAE::ClearFlags(addr_t address, uint32 flags)
844 {
845 	pae_page_directory_entry* pageDirEntry
846 		= X86PagingMethodPAE::PageDirEntryForAddress(
847 			fPagingStructures->VirtualPageDirs(), address);
848 	if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
849 		// no pagetable here
850 		return B_OK;
851 	}
852 
853 	uint64 flagsToClear = ((flags & PAGE_MODIFIED) ? X86_PAE_PTE_DIRTY : 0)
854 		| ((flags & PAGE_ACCESSED) ? X86_PAE_PTE_ACCESSED : 0);
855 
856 	Thread* thread = thread_get_current_thread();
857 	ThreadCPUPinner pinner(thread);
858 
859 	pae_page_table_entry* entry
860 		= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
861 				*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK)
862 			+ address / B_PAGE_SIZE % kPAEPageTableEntryCount;
863 
864 	// clear out the flags we've been requested to clear
865 	pae_page_table_entry oldEntry
866 		= X86PagingMethodPAE::ClearPageTableEntryFlags(entry, flagsToClear);
867 
868 	pinner.Unlock();
869 
870 	if ((oldEntry & flagsToClear) != 0)
871 		InvalidatePage(address);
872 
873 	return B_OK;
874 }
875 
876 
877 bool
878 X86VMTranslationMapPAE::ClearAccessedAndModified(VMArea* area, addr_t address,
879 	bool unmapIfUnaccessed, bool& _modified)
880 {
881 	ASSERT(address % B_PAGE_SIZE == 0);
882 
883 	TRACE("X86VMTranslationMapPAE::ClearAccessedAndModified(%#" B_PRIxADDR
884 		")\n", address);
885 
886 	pae_page_directory_entry* pageDirEntry
887 		= X86PagingMethodPAE::PageDirEntryForAddress(
888 			fPagingStructures->VirtualPageDirs(), address);
889 
890 	RecursiveLocker locker(fLock);
891 
892 	if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0)
893 		return false;
894 
895 	ThreadCPUPinner pinner(thread_get_current_thread());
896 
897 	pae_page_table_entry* entry
898 		= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
899 				*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK)
900 			+ address / B_PAGE_SIZE % kPAEPageTableEntryCount;
901 
902 	// perform the deed
903 	pae_page_table_entry oldEntry;
904 
905 	if (unmapIfUnaccessed) {
906 		while (true) {
907 			oldEntry = *entry;
908 			if ((oldEntry & X86_PAE_PTE_PRESENT) == 0) {
909 				// page mapping not valid
910 				return false;
911 			}
912 
913 			if (oldEntry & X86_PAE_PTE_ACCESSED) {
914 				// page was accessed -- just clear the flags
915 				oldEntry = X86PagingMethodPAE::ClearPageTableEntryFlags(entry,
916 					X86_PAE_PTE_ACCESSED | X86_PAE_PTE_DIRTY);
917 				break;
918 			}
919 
920 			// page hasn't been accessed -- unmap it
921 			if (X86PagingMethodPAE::TestAndSetPageTableEntry(entry, 0, oldEntry)
922 					== oldEntry) {
923 				break;
924 			}
925 
926 			// something changed -- check again
927 		}
928 	} else {
929 		oldEntry = X86PagingMethodPAE::ClearPageTableEntryFlags(entry,
930 			X86_PAE_PTE_ACCESSED | X86_PAE_PTE_DIRTY);
931 	}
932 
933 	pinner.Unlock();
934 
935 	_modified = (oldEntry & X86_PAE_PTE_DIRTY) != 0;
936 
937 	if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
938 		// Note, that we only need to invalidate the address, if the
939 		// accessed flags was set, since only then the entry could have been
940 		// in any TLB.
941 		InvalidatePage(address);
942 		Flush();
943 
944 		return true;
945 	}
946 
947 	if (!unmapIfUnaccessed)
948 		return false;
949 
950 	// We have unmapped the address. Do the "high level" stuff.
951 
952 	fMapCount--;
953 
954 	locker.Detach();
955 		// UnaccessedPageUnmapped() will unlock for us
956 
957 	UnaccessedPageUnmapped(area,
958 		(oldEntry & X86_PAE_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
959 
960 	return false;
961 }
962 
963 
964 X86PagingStructures*
965 X86VMTranslationMapPAE::PagingStructures() const
966 {
967 	return fPagingStructures;
968 }
969 
970 
971 #endif	// B_HAIKU_PHYSICAL_BITS == 64
972