xref: /haiku/src/system/kernel/arch/x86/paging/pae/X86VMTranslationMapPAE.cpp (revision 60c26cd332a044bb9003091b9196cc404ebe5482)
1 /*
2  * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 #include "paging/pae/X86VMTranslationMapPAE.h"
12 
13 #include <int.h>
14 #include <slab/Slab.h>
15 #include <thread.h>
16 #include <util/AutoLock.h>
17 #include <vm/vm_page.h>
18 #include <vm/VMAddressSpace.h>
19 #include <vm/VMCache.h>
20 
21 #include "paging/pae/X86PagingMethodPAE.h"
22 #include "paging/pae/X86PagingStructuresPAE.h"
23 #include "paging/x86_physical_page_mapper.h"
24 
25 
26 //#define TRACE_X86_VM_TRANSLATION_MAP_PAE
27 #ifdef TRACE_X86_VM_TRANSLATION_MAP_PAE
28 #	define TRACE(x...) dprintf(x)
29 #else
30 #	define TRACE(x...) ;
31 #endif
32 
33 
34 #if B_HAIKU_PHYSICAL_BITS == 64
35 
36 
37 X86VMTranslationMapPAE::X86VMTranslationMapPAE()
38 	:
39 	fPagingStructures(NULL)
40 {
41 }
42 
43 
44 X86VMTranslationMapPAE::~X86VMTranslationMapPAE()
45 {
46 	if (fPagingStructures == NULL)
47 		return;
48 
49 	if (fPageMapper != NULL)
50 		fPageMapper->Delete();
51 
52 	// cycle through and free all of the user space page tables
53 
54 	STATIC_ASSERT(KERNEL_BASE == 0x80000000 && KERNEL_SIZE == 0x80000000);
55 		// assuming 1-1 split of the address space
56 
57 	for (uint32 k = 0; k < 2; k++) {
58 		pae_page_directory_entry* pageDir
59 			= fPagingStructures->VirtualPageDirs()[k];
60 		if (pageDir == NULL)
61 			continue;
62 
63 		for (uint32 i = 0; i < kPAEPageDirEntryCount; i++) {
64 			if ((pageDir[i] & X86_PAE_PDE_PRESENT) != 0) {
65 				phys_addr_t address = pageDir[i] & X86_PAE_PDE_ADDRESS_MASK;
66 				vm_page* page = vm_lookup_page(address / B_PAGE_SIZE);
67 				if (page == NULL)
68 					panic("X86VMTranslationMapPAE::~X86VMTranslationMapPAE: "
69 						"didn't find page table page: page address: %#"
70 						B_PRIxPHYSADDR ", virtual base: %#" B_PRIxADDR "\n",
71 						address,
72 						(k * kPAEPageDirEntryCount + i) * kPAEPageTableRange);
73 				DEBUG_PAGE_ACCESS_START(page);
74 				vm_page_set_state(page, PAGE_STATE_FREE);
75 			}
76 		}
77 	}
78 
79 	fPagingStructures->RemoveReference();
80 }
81 
82 
83 status_t
84 X86VMTranslationMapPAE::Init(bool kernel)
85 {
86 	TRACE("X86VMTranslationMapPAE::Init()\n");
87 
88 	X86VMTranslationMap::Init(kernel);
89 
90 	fPagingStructures = new(std::nothrow) X86PagingStructuresPAE;
91 	if (fPagingStructures == NULL)
92 		return B_NO_MEMORY;
93 
94 	X86PagingMethodPAE* method = X86PagingMethodPAE::Method();
95 
96 	if (kernel) {
97 		// kernel
98 		// get the physical page mapper
99 		fPageMapper = method->KernelPhysicalPageMapper();
100 
101 		// we already know the kernel pgdir mapping
102 		fPagingStructures->Init(method->KernelVirtualPageDirPointerTable(),
103 			method->KernelPhysicalPageDirPointerTable(), NULL,
104 			method->KernelVirtualPageDirs(), method->KernelPhysicalPageDirs());
105 	} else {
106 		// user
107 		// allocate a physical page mapper
108 		status_t error = method->PhysicalPageMapper()
109 			->CreateTranslationMapPhysicalPageMapper(&fPageMapper);
110 		if (error != B_OK)
111 			return error;
112 
113 		// The following code assumes that the kernel address space occupies the
114 		// upper half of the virtual address space. This simplifies things a
115 		// lot, since it allows us to just use the upper two page directories
116 		// of the kernel and create two new lower page directories for the
117 		// userland.
118 		STATIC_ASSERT(KERNEL_BASE == 0x80000000 && KERNEL_SIZE == 0x80000000);
119 
120 		// allocate the page directories (both at once)
121 		pae_page_directory_entry* virtualPageDirs[4];
122 		phys_addr_t physicalPageDirs[4];
123 		virtualPageDirs[0] = (pae_page_directory_entry*)memalign(B_PAGE_SIZE,
124 			2 * B_PAGE_SIZE);
125 		if (virtualPageDirs[0] == NULL)
126 			return B_NO_MEMORY;
127 		virtualPageDirs[1] = virtualPageDirs[0] + kPAEPageTableEntryCount;
128 
129 		// clear the userland page directories
130 		memset(virtualPageDirs[0], 0, 2 * B_PAGE_SIZE);
131 
132 		// use the upper two kernel page directories
133 		for (int32 i = 2; i < 4; i++) {
134 			virtualPageDirs[i] = method->KernelVirtualPageDirs()[i];
135 			physicalPageDirs[i] = method->KernelPhysicalPageDirs()[i];
136 		}
137 
138 		// look up the page directories' physical addresses
139 		for (int32 i = 0; i < 2; i++) {
140 			vm_get_page_mapping(VMAddressSpace::KernelID(),
141 				(addr_t)virtualPageDirs[i], &physicalPageDirs[i]);
142 		}
143 
144 		// allocate the PDPT -- needs to have a 32 bit physical address
145 		phys_addr_t physicalPDPT;
146 		void* pdptHandle;
147 		pae_page_directory_pointer_table_entry* pdpt
148 			= (pae_page_directory_pointer_table_entry*)
149 				method->Allocate32BitPage(physicalPDPT, pdptHandle);
150 		if (pdpt == NULL)
151 			free(virtualPageDirs[0]);
152 
153 		// init the PDPT entries
154 		for (int32 i = 0; i < 4; i++) {
155 			pdpt[i] = (physicalPageDirs[i] & X86_PAE_PDPTE_ADDRESS_MASK)
156 				| X86_PAE_PDPTE_PRESENT;
157 		}
158 
159 		// init the paging structures
160 		fPagingStructures->Init(pdpt, physicalPDPT, pdptHandle, virtualPageDirs,
161 			physicalPageDirs);
162 	}
163 
164 	return B_OK;
165 }
166 
167 
168 size_t
169 X86VMTranslationMapPAE::MaxPagesNeededToMap(addr_t start, addr_t end) const
170 {
171 	// If start == 0, the actual base address is not yet known to the caller and
172 	// we shall assume the worst case.
173 	if (start == 0) {
174 		// offset the range so it has the worst possible alignment
175 		start = kPAEPageTableRange - B_PAGE_SIZE;
176 		end += kPAEPageTableRange - B_PAGE_SIZE;
177 	}
178 
179 	return end / kPAEPageTableRange + 1 - start / kPAEPageTableRange;
180 }
181 
182 
183 status_t
184 X86VMTranslationMapPAE::Map(addr_t virtualAddress, phys_addr_t physicalAddress,
185 	uint32 attributes, uint32 memoryType, vm_page_reservation* reservation)
186 {
187 	TRACE("X86VMTranslationMapPAE::Map(): %#" B_PRIxADDR " -> %#" B_PRIxPHYSADDR
188 		"\n", virtualAddress, physicalAddress);
189 
190 	// check to see if a page table exists for this range
191 	pae_page_directory_entry* pageDirEntry
192 		= X86PagingMethodPAE::PageDirEntryForAddress(
193 			fPagingStructures->VirtualPageDirs(), virtualAddress);
194 	if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
195 		// we need to allocate a page table
196 		vm_page *page = vm_page_allocate_page(reservation,
197 			PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
198 
199 		DEBUG_PAGE_ACCESS_END(page);
200 
201 		phys_addr_t physicalPageTable
202 			= (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;
203 
204 		TRACE("X86VMTranslationMapPAE::Map(): asked for free page for "
205 			"page table: %#" B_PRIxPHYSADDR "\n", physicalPageTable);
206 
207 		// put it in the page dir
208 		X86PagingMethodPAE::PutPageTableInPageDir(pageDirEntry,
209 			physicalPageTable,
210 			attributes
211 				| ((attributes & B_USER_PROTECTION) != 0
212 						? B_WRITE_AREA : B_KERNEL_WRITE_AREA));
213 
214 		fMapCount++;
215 	}
216 
217 	// now, fill in the page table entry
218 	struct thread* thread = thread_get_current_thread();
219 	ThreadCPUPinner pinner(thread);
220 
221 	pae_page_table_entry* pageTable
222 		= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
223 			*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
224 	pae_page_table_entry* entry = pageTable
225 		+ virtualAddress / B_PAGE_SIZE % kPAEPageTableEntryCount;
226 
227 	ASSERT_PRINT((*entry & X86_PAE_PTE_PRESENT) == 0,
228 		"virtual address: %#" B_PRIxADDR ", existing pte: %#" B_PRIx64,
229 		virtualAddress, *entry);
230 
231 	X86PagingMethodPAE::PutPageTableEntryInTable(entry, physicalAddress,
232 		attributes, memoryType, fIsKernelMap);
233 
234 	pinner.Unlock();
235 
236 	// Note: We don't need to invalidate the TLB for this address, as previously
237 	// the entry was not present and the TLB doesn't cache those entries.
238 
239 	fMapCount++;
240 
241 	return 0;
242 }
243 
244 
245 status_t
246 X86VMTranslationMapPAE::Unmap(addr_t start, addr_t end)
247 {
248 	start = ROUNDDOWN(start, B_PAGE_SIZE);
249 	if (start >= end)
250 		return B_OK;
251 
252 	TRACE("X86VMTranslationMapPAE::Unmap(): %#" B_PRIxADDR " - %#" B_PRIxADDR
253 		"\n", start, end);
254 
255 	do {
256 		pae_page_directory_entry* pageDirEntry
257 			= X86PagingMethodPAE::PageDirEntryForAddress(
258 				fPagingStructures->VirtualPageDirs(), start);
259 		if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
260 			// no page table here, move the start up to access the next page
261 			// table
262 			start = ROUNDUP(start + 1, kPAEPageTableRange);
263 			continue;
264 		}
265 
266 		struct thread* thread = thread_get_current_thread();
267 		ThreadCPUPinner pinner(thread);
268 
269 		pae_page_table_entry* pageTable
270 			= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
271 				*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
272 
273 		uint32 index = start / B_PAGE_SIZE % kPAEPageTableEntryCount;
274 		for (; index < kPAEPageTableEntryCount && start < end;
275 				index++, start += B_PAGE_SIZE) {
276 			if ((pageTable[index] & X86_PAE_PTE_PRESENT) == 0) {
277 				// page mapping not valid
278 				continue;
279 			}
280 
281 			TRACE("X86VMTranslationMapPAE::Unmap(): removing page %#"
282 				B_PRIxADDR "\n", start);
283 
284 			pae_page_table_entry oldEntry
285 				= X86PagingMethodPAE::ClearPageTableEntryFlags(
286 					&pageTable[index], X86_PAE_PTE_PRESENT);
287 			fMapCount--;
288 
289 			if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
290 				// Note, that we only need to invalidate the address, if the
291 				// accessed flags was set, since only then the entry could have
292 				// been in any TLB.
293 				InvalidatePage(start);
294 			}
295 		}
296 	} while (start != 0 && start < end);
297 
298 	return B_OK;
299 }
300 
301 
302 /*!	Caller must have locked the cache of the page to be unmapped.
303 	This object shouldn't be locked.
304 */
305 status_t
306 X86VMTranslationMapPAE::UnmapPage(VMArea* area, addr_t address,
307 	bool updatePageQueue)
308 {
309 	ASSERT(address % B_PAGE_SIZE == 0);
310 
311 	pae_page_directory_entry* pageDirEntry
312 		= X86PagingMethodPAE::PageDirEntryForAddress(
313 			fPagingStructures->VirtualPageDirs(), address);
314 
315 	TRACE("X86VMTranslationMapPAE::UnmapPage(%#" B_PRIxADDR ")\n", address);
316 
317 	RecursiveLocker locker(fLock);
318 
319 	if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0)
320 		return B_ENTRY_NOT_FOUND;
321 
322 	ThreadCPUPinner pinner(thread_get_current_thread());
323 
324 	pae_page_table_entry* pageTable
325 		= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
326 			*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
327 
328 	pae_page_table_entry oldEntry = X86PagingMethodPAE::ClearPageTableEntry(
329 		&pageTable[address / B_PAGE_SIZE % kPAEPageTableEntryCount]);
330 
331 	pinner.Unlock();
332 
333 	if ((oldEntry & X86_PAE_PTE_PRESENT) == 0) {
334 		// page mapping not valid
335 		return B_ENTRY_NOT_FOUND;
336 	}
337 
338 	fMapCount--;
339 
340 	if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
341 		// Note, that we only need to invalidate the address, if the
342 		// accessed flags was set, since only then the entry could have been
343 		// in any TLB.
344 		InvalidatePage(address);
345 
346 		Flush();
347 
348 		// NOTE: Between clearing the page table entry and Flush() other
349 		// processors (actually even this processor with another thread of the
350 		// same team) could still access the page in question via their cached
351 		// entry. We can obviously lose a modified flag in this case, with the
352 		// effect that the page looks unmodified (and might thus be recycled),
353 		// but is actually modified.
354 		// In most cases this is harmless, but for vm_remove_all_page_mappings()
355 		// this is actually a problem.
356 		// Interestingly FreeBSD seems to ignore this problem as well
357 		// (cf. pmap_remove_all()), unless I've missed something.
358 	}
359 
360 	locker.Detach();
361 		// PageUnmapped() will unlock for us
362 
363 	PageUnmapped(area, (oldEntry & X86_PAE_PTE_ADDRESS_MASK) / B_PAGE_SIZE,
364 		(oldEntry & X86_PAE_PTE_ACCESSED) != 0,
365 		(oldEntry & X86_PAE_PTE_DIRTY) != 0, updatePageQueue);
366 
367 	return B_OK;
368 }
369 
370 
371 void
372 X86VMTranslationMapPAE::UnmapPages(VMArea* area, addr_t base, size_t size,
373 	bool updatePageQueue)
374 {
375 	if (size == 0)
376 		return;
377 
378 	addr_t start = base;
379 	addr_t end = base + size - 1;
380 
381 	TRACE("X86VMTranslationMapPAE::UnmapPages(%p, %#" B_PRIxADDR ", %#"
382 		B_PRIxADDR ")\n", area, start, end);
383 
384 	VMAreaMappings queue;
385 
386 	RecursiveLocker locker(fLock);
387 
388 	do {
389 		pae_page_directory_entry* pageDirEntry
390 			= X86PagingMethodPAE::PageDirEntryForAddress(
391 				fPagingStructures->VirtualPageDirs(), start);
392 		if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
393 			// no page table here, move the start up to access the next page
394 			// table
395 			start = ROUNDUP(start + 1, kPAEPageTableRange);
396 			continue;
397 		}
398 
399 		struct thread* thread = thread_get_current_thread();
400 		ThreadCPUPinner pinner(thread);
401 
402 		pae_page_table_entry* pageTable
403 			= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
404 				*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
405 
406 		uint32 index = start / B_PAGE_SIZE % kPAEPageTableEntryCount;
407 		for (; index < kPAEPageTableEntryCount && start < end;
408 				index++, start += B_PAGE_SIZE) {
409 			pae_page_table_entry oldEntry
410 				= X86PagingMethodPAE::ClearPageTableEntry(&pageTable[index]);
411 			if ((oldEntry & X86_PAE_PTE_PRESENT) == 0)
412 				continue;
413 
414 			fMapCount--;
415 
416 			if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
417 				// Note, that we only need to invalidate the address, if the
418 				// accessed flags was set, since only then the entry could have
419 				// been in any TLB.
420 				InvalidatePage(start);
421 			}
422 
423 			if (area->cache_type != CACHE_TYPE_DEVICE) {
424 				// get the page
425 				vm_page* page = vm_lookup_page(
426 					(oldEntry & X86_PAE_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
427 				ASSERT(page != NULL);
428 
429 				DEBUG_PAGE_ACCESS_START(page);
430 
431 				// transfer the accessed/dirty flags to the page
432 				if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0)
433 					page->accessed = true;
434 				if ((oldEntry & X86_PAE_PTE_DIRTY) != 0)
435 					page->modified = true;
436 
437 				// remove the mapping object/decrement the wired_count of the
438 				// page
439 				if (area->wiring == B_NO_LOCK) {
440 					vm_page_mapping* mapping = NULL;
441 					vm_page_mappings::Iterator iterator
442 						= page->mappings.GetIterator();
443 					while ((mapping = iterator.Next()) != NULL) {
444 						if (mapping->area == area)
445 							break;
446 					}
447 
448 					ASSERT(mapping != NULL);
449 
450 					area->mappings.Remove(mapping);
451 					page->mappings.Remove(mapping);
452 					queue.Add(mapping);
453 				} else
454 					page->wired_count--;
455 
456 				if (page->wired_count == 0 && page->mappings.IsEmpty()) {
457 					atomic_add(&gMappedPagesCount, -1);
458 
459 					if (updatePageQueue) {
460 						if (page->Cache()->temporary)
461 							vm_page_set_state(page, PAGE_STATE_INACTIVE);
462 						else if (page->modified)
463 							vm_page_set_state(page, PAGE_STATE_MODIFIED);
464 						else
465 							vm_page_set_state(page, PAGE_STATE_CACHED);
466 					}
467 				}
468 
469 				DEBUG_PAGE_ACCESS_END(page);
470 			}
471 		}
472 
473 		Flush();
474 			// flush explicitly, since we directly use the lock
475 	} while (start != 0 && start < end);
476 
477 	// TODO: As in UnmapPage() we can lose page dirty flags here. ATM it's not
478 	// really critical here, as in all cases this method is used, the unmapped
479 	// area range is unmapped for good (resized/cut) and the pages will likely
480 	// be freed.
481 
482 	locker.Unlock();
483 
484 	// free removed mappings
485 	bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
486 	uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
487 		| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
488 	while (vm_page_mapping* mapping = queue.RemoveHead())
489 		object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
490 }
491 
492 
493 void
494 X86VMTranslationMapPAE::UnmapArea(VMArea* area, bool deletingAddressSpace,
495 	bool ignoreTopCachePageFlags)
496 {
497 	if (area->cache_type == CACHE_TYPE_DEVICE || area->wiring != B_NO_LOCK) {
498 		X86VMTranslationMapPAE::UnmapPages(area, area->Base(), area->Size(),
499 			true);
500 		return;
501 	}
502 
503 	bool unmapPages = !deletingAddressSpace || !ignoreTopCachePageFlags;
504 
505 	RecursiveLocker locker(fLock);
506 
507 	VMAreaMappings mappings;
508 	mappings.MoveFrom(&area->mappings);
509 
510 	for (VMAreaMappings::Iterator it = mappings.GetIterator();
511 			vm_page_mapping* mapping = it.Next();) {
512 		vm_page* page = mapping->page;
513 		page->mappings.Remove(mapping);
514 
515 		VMCache* cache = page->Cache();
516 
517 		bool pageFullyUnmapped = false;
518 		if (page->wired_count == 0 && page->mappings.IsEmpty()) {
519 			atomic_add(&gMappedPagesCount, -1);
520 			pageFullyUnmapped = true;
521 		}
522 
523 		if (unmapPages || cache != area->cache) {
524 			addr_t address = area->Base()
525 				+ ((page->cache_offset * B_PAGE_SIZE) - area->cache_offset);
526 
527 			pae_page_directory_entry* pageDirEntry
528 				= X86PagingMethodPAE::PageDirEntryForAddress(
529 					fPagingStructures->VirtualPageDirs(), address);
530 			if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
531 				panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
532 					"has no page dir entry", page, area, address);
533 				continue;
534 			}
535 
536 			ThreadCPUPinner pinner(thread_get_current_thread());
537 
538 			pae_page_table_entry* pageTable
539 				= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
540 					*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
541 			pae_page_table_entry oldEntry
542 				= X86PagingMethodPAE::ClearPageTableEntry(
543 					&pageTable[address / B_PAGE_SIZE
544 						% kPAEPageTableEntryCount]);
545 
546 			pinner.Unlock();
547 
548 			if ((oldEntry & X86_PAE_PTE_PRESENT) == 0) {
549 				panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
550 					"has no page table entry", page, area, address);
551 				continue;
552 			}
553 
554 			// transfer the accessed/dirty flags to the page and invalidate
555 			// the mapping, if necessary
556 			if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
557 				page->accessed = true;
558 
559 				if (!deletingAddressSpace)
560 					InvalidatePage(address);
561 			}
562 
563 			if ((oldEntry & X86_PAE_PTE_DIRTY) != 0)
564 				page->modified = true;
565 
566 			if (pageFullyUnmapped) {
567 				DEBUG_PAGE_ACCESS_START(page);
568 
569 				if (cache->temporary)
570 					vm_page_set_state(page, PAGE_STATE_INACTIVE);
571 				else if (page->modified)
572 					vm_page_set_state(page, PAGE_STATE_MODIFIED);
573 				else
574 					vm_page_set_state(page, PAGE_STATE_CACHED);
575 
576 				DEBUG_PAGE_ACCESS_END(page);
577 			}
578 		}
579 
580 		fMapCount--;
581 	}
582 
583 	Flush();
584 		// flush explicitely, since we directly use the lock
585 
586 	locker.Unlock();
587 
588 	bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
589 	uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
590 		| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
591 	while (vm_page_mapping* mapping = mappings.RemoveHead())
592 		object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
593 }
594 
595 
596 status_t
597 X86VMTranslationMapPAE::Query(addr_t virtualAddress,
598 	phys_addr_t* _physicalAddress, uint32* _flags)
599 {
600 	// default the flags to not present
601 	*_flags = 0;
602 	*_physicalAddress = 0;
603 
604 	// get the page directory entry
605 	pae_page_directory_entry* pageDirEntry
606 		= X86PagingMethodPAE::PageDirEntryForAddress(
607 			fPagingStructures->VirtualPageDirs(), virtualAddress);
608 	if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
609 		// no pagetable here
610 		return B_OK;
611 	}
612 
613 	// get the page table entry
614 	struct thread* thread = thread_get_current_thread();
615 	ThreadCPUPinner pinner(thread);
616 
617 	pae_page_table_entry* pageTable
618 		= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
619 			*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
620 	pae_page_table_entry entry
621 		= pageTable[virtualAddress / B_PAGE_SIZE % kPAEPageTableEntryCount];
622 
623 	pinner.Unlock();
624 
625 	*_physicalAddress = entry & X86_PAE_PTE_ADDRESS_MASK;
626 
627 	// translate the page state flags
628 	if ((entry & X86_PAE_PTE_USER) != 0) {
629 		*_flags |= ((entry & X86_PAE_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0)
630 			| B_READ_AREA;
631 	}
632 
633 	*_flags |= ((entry & X86_PAE_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0)
634 		| B_KERNEL_READ_AREA
635 		| ((entry & X86_PAE_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
636 		| ((entry & X86_PAE_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
637 		| ((entry & X86_PAE_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0);
638 
639 	TRACE("X86VMTranslationMapPAE::Query(%#" B_PRIxADDR ") -> %#"
640 		B_PRIxPHYSADDR ":\n", *_physicalAddress, virtualAddress);
641 
642 	return B_OK;
643 }
644 
645 
646 status_t
647 X86VMTranslationMapPAE::QueryInterrupt(addr_t virtualAddress,
648 	phys_addr_t* _physicalAddress, uint32* _flags)
649 {
650 	// default the flags to not present
651 	*_flags = 0;
652 	*_physicalAddress = 0;
653 
654 	// get the page directory entry
655 	pae_page_directory_entry* pageDirEntry
656 		= X86PagingMethodPAE::PageDirEntryForAddress(
657 			fPagingStructures->VirtualPageDirs(), virtualAddress);
658 	if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
659 		// no pagetable here
660 		return B_OK;
661 	}
662 
663 	// get the page table entry
664 	pae_page_table_entry* pageTable
665 		= (pae_page_table_entry*)X86PagingMethodPAE::Method()
666 			->PhysicalPageMapper()->InterruptGetPageTableAt(
667 				*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
668 	pae_page_table_entry entry
669 		= pageTable[virtualAddress / B_PAGE_SIZE % kPAEPageTableEntryCount];
670 
671 	*_physicalAddress = entry & X86_PAE_PTE_ADDRESS_MASK;
672 
673 	// translate the page state flags
674 	if ((entry & X86_PAE_PTE_USER) != 0) {
675 		*_flags |= ((entry & X86_PAE_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0)
676 			| B_READ_AREA;
677 	}
678 
679 	*_flags |= ((entry & X86_PAE_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0)
680 		| B_KERNEL_READ_AREA
681 		| ((entry & X86_PAE_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
682 		| ((entry & X86_PAE_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
683 		| ((entry & X86_PAE_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0);
684 
685 	TRACE("X86VMTranslationMapPAE::Query(%#" B_PRIxADDR ") -> %#"
686 		B_PRIxPHYSADDR ":\n", *_physicalAddress, virtualAddress);
687 
688 	return B_OK;
689 }
690 
691 
692 status_t
693 X86VMTranslationMapPAE::Protect(addr_t start, addr_t end, uint32 attributes,
694 	uint32 memoryType)
695 {
696 	start = ROUNDDOWN(start, B_PAGE_SIZE);
697 	if (start >= end)
698 		return B_OK;
699 
700 	TRACE("X86VMTranslationMapPAE::Protect(): %#" B_PRIxADDR " - %#" B_PRIxADDR
701 		", attributes: %#" B_PRIx32 "\n", start, end, attributes);
702 
703 	// compute protection flags
704 	uint64 newProtectionFlags = 0;
705 	if ((attributes & B_USER_PROTECTION) != 0) {
706 		newProtectionFlags = X86_PAE_PTE_USER;
707 		if ((attributes & B_WRITE_AREA) != 0)
708 			newProtectionFlags |= X86_PAE_PTE_WRITABLE;
709 	} else if ((attributes & B_KERNEL_WRITE_AREA) != 0)
710 		newProtectionFlags = X86_PAE_PTE_WRITABLE;
711 
712 	do {
713 		pae_page_directory_entry* pageDirEntry
714 			= X86PagingMethodPAE::PageDirEntryForAddress(
715 				fPagingStructures->VirtualPageDirs(), start);
716 		if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
717 			// no page table here, move the start up to access the next page
718 			// table
719 			start = ROUNDUP(start + 1, kPAEPageTableRange);
720 			continue;
721 		}
722 
723 		struct thread* thread = thread_get_current_thread();
724 		ThreadCPUPinner pinner(thread);
725 
726 		pae_page_table_entry* pageTable
727 			= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
728 				*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
729 
730 		uint32 index = start / B_PAGE_SIZE % kPAEPageTableEntryCount;
731 		for (; index < kPAEPageTableEntryCount && start < end;
732 				index++, start += B_PAGE_SIZE) {
733 			pae_page_table_entry entry = pageTable[index];
734 			if ((pageTable[index] & X86_PAE_PTE_PRESENT) == 0) {
735 				// page mapping not valid
736 				continue;
737 			}
738 
739 			TRACE("X86VMTranslationMapPAE::Protect(): protect page %#"
740 				B_PRIxADDR "\n", start);
741 
742 			// set the new protection flags -- we want to do that atomically,
743 			// without changing the accessed or dirty flag
744 			pae_page_table_entry oldEntry;
745 			while (true) {
746 				oldEntry = X86PagingMethodPAE::TestAndSetPageTableEntry(
747 					&pageTable[index],
748 					(entry & ~(X86_PAE_PTE_PROTECTION_MASK
749 							| X86_PAE_PTE_MEMORY_TYPE_MASK))
750 						| newProtectionFlags
751 						| X86PagingMethodPAE::MemoryTypeToPageTableEntryFlags(
752 							memoryType),
753 					entry);
754 				if (oldEntry == entry)
755 					break;
756 				entry = oldEntry;
757 			}
758 
759 			if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
760 				// Note, that we only need to invalidate the address, if the
761 				// accessed flag was set, since only then the entry could have been
762 				// in any TLB.
763 				InvalidatePage(start);
764 			}
765 		}
766 	} while (start != 0 && start < end);
767 
768 	return B_OK;
769 }
770 
771 
772 status_t
773 X86VMTranslationMapPAE::ClearFlags(addr_t address, uint32 flags)
774 {
775 	pae_page_directory_entry* pageDirEntry
776 		= X86PagingMethodPAE::PageDirEntryForAddress(
777 			fPagingStructures->VirtualPageDirs(), address);
778 	if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
779 		// no pagetable here
780 		return B_OK;
781 	}
782 
783 	uint64 flagsToClear = ((flags & PAGE_MODIFIED) ? X86_PAE_PTE_DIRTY : 0)
784 		| ((flags & PAGE_ACCESSED) ? X86_PAE_PTE_ACCESSED : 0);
785 
786 	struct thread* thread = thread_get_current_thread();
787 	ThreadCPUPinner pinner(thread);
788 
789 	pae_page_table_entry* entry
790 		= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
791 				*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK)
792 			+ address / B_PAGE_SIZE % kPAEPageTableEntryCount;
793 
794 	// clear out the flags we've been requested to clear
795 	pae_page_table_entry oldEntry
796 		= X86PagingMethodPAE::ClearPageTableEntryFlags(entry, flagsToClear);
797 
798 	pinner.Unlock();
799 
800 	if ((oldEntry & flagsToClear) != 0)
801 		InvalidatePage(address);
802 
803 	return B_OK;
804 }
805 
806 
807 bool
808 X86VMTranslationMapPAE::ClearAccessedAndModified(VMArea* area, addr_t address,
809 	bool unmapIfUnaccessed, bool& _modified)
810 {
811 	ASSERT(address % B_PAGE_SIZE == 0);
812 
813 	TRACE("X86VMTranslationMap32Bit::ClearAccessedAndModified(%#" B_PRIxADDR
814 		")\n", address);
815 
816 	pae_page_directory_entry* pageDirEntry
817 		= X86PagingMethodPAE::PageDirEntryForAddress(
818 			fPagingStructures->VirtualPageDirs(), address);
819 
820 	RecursiveLocker locker(fLock);
821 
822 	if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0)
823 		return false;
824 
825 	ThreadCPUPinner pinner(thread_get_current_thread());
826 
827 	pae_page_table_entry* entry
828 		= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
829 				*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK)
830 			+ address / B_PAGE_SIZE % kPAEPageTableEntryCount;
831 
832 	// perform the deed
833 	pae_page_table_entry oldEntry;
834 
835 	if (unmapIfUnaccessed) {
836 		while (true) {
837 			oldEntry = *entry;
838 			if ((oldEntry & X86_PAE_PTE_PRESENT) == 0) {
839 				// page mapping not valid
840 				return false;
841 			}
842 
843 			if (oldEntry & X86_PAE_PTE_ACCESSED) {
844 				// page was accessed -- just clear the flags
845 				oldEntry = X86PagingMethodPAE::ClearPageTableEntryFlags(entry,
846 					X86_PAE_PTE_ACCESSED | X86_PAE_PTE_DIRTY);
847 				break;
848 			}
849 
850 			// page hasn't been accessed -- unmap it
851 			if (X86PagingMethodPAE::TestAndSetPageTableEntry(entry, 0, oldEntry)
852 					== oldEntry) {
853 				break;
854 			}
855 
856 			// something changed -- check again
857 		}
858 	} else {
859 		oldEntry = X86PagingMethodPAE::ClearPageTableEntryFlags(entry,
860 			X86_PAE_PTE_ACCESSED | X86_PAE_PTE_DIRTY);
861 	}
862 
863 	pinner.Unlock();
864 
865 	_modified = (oldEntry & X86_PAE_PTE_DIRTY) != 0;
866 
867 	if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
868 		// Note, that we only need to invalidate the address, if the
869 		// accessed flags was set, since only then the entry could have been
870 		// in any TLB.
871 		InvalidatePage(address);
872 		Flush();
873 
874 		return true;
875 	}
876 
877 	if (!unmapIfUnaccessed)
878 		return false;
879 
880 	// We have unmapped the address. Do the "high level" stuff.
881 
882 	fMapCount--;
883 
884 	locker.Detach();
885 		// UnaccessedPageUnmapped() will unlock for us
886 
887 	UnaccessedPageUnmapped(area,
888 		(oldEntry & X86_PAE_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
889 
890 	return false;
891 }
892 
893 
894 X86PagingStructures*
895 X86VMTranslationMapPAE::PagingStructures() const
896 {
897 	return fPagingStructures;
898 }
899 
900 
901 #endif	// B_HAIKU_PHYSICAL_BITS == 64
902