xref: /haiku/src/system/kernel/arch/x86/paging/32bit/X86VMTranslationMap32Bit.cpp (revision caed67a8cba83913b9c21ac2b06ebc6bd1cb3111)
1 /*
2  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 #include "paging/32bit/X86VMTranslationMap32Bit.h"
12 
13 #include <stdlib.h>
14 #include <string.h>
15 
16 #include <int.h>
17 #include <thread.h>
18 #include <slab/Slab.h>
19 #include <smp.h>
20 #include <util/AutoLock.h>
21 #include <util/ThreadAutoLock.h>
22 #include <util/queue.h>
23 #include <vm/vm_page.h>
24 #include <vm/vm_priv.h>
25 #include <vm/VMAddressSpace.h>
26 #include <vm/VMCache.h>
27 
28 #include "paging/32bit/X86PagingMethod32Bit.h"
29 #include "paging/32bit/X86PagingStructures32Bit.h"
30 #include "paging/x86_physical_page_mapper.h"
31 
32 
33 //#define TRACE_X86_VM_TRANSLATION_MAP_32_BIT
34 #ifdef TRACE_X86_VM_TRANSLATION_MAP_32_BIT
35 #	define TRACE(x...) dprintf(x)
36 #else
37 #	define TRACE(x...) ;
38 #endif
39 
40 
41 X86VMTranslationMap32Bit::X86VMTranslationMap32Bit()
42 	:
43 	fPagingStructures(NULL)
44 {
45 }
46 
47 
48 X86VMTranslationMap32Bit::~X86VMTranslationMap32Bit()
49 {
50 	if (fPagingStructures == NULL)
51 		return;
52 
53 	if (fPageMapper != NULL)
54 		fPageMapper->Delete();
55 
56 	if (fPagingStructures->pgdir_virt != NULL) {
57 		// cycle through and free all of the user space pgtables
58 		for (uint32 i = VADDR_TO_PDENT(USER_BASE);
59 				i <= VADDR_TO_PDENT(USER_BASE + (USER_SIZE - 1)); i++) {
60 			if ((fPagingStructures->pgdir_virt[i] & X86_PDE_PRESENT) != 0) {
61 				addr_t address = fPagingStructures->pgdir_virt[i]
62 					& X86_PDE_ADDRESS_MASK;
63 				vm_page* page = vm_lookup_page(address / B_PAGE_SIZE);
64 				if (!page)
65 					panic("destroy_tmap: didn't find pgtable page\n");
66 				DEBUG_PAGE_ACCESS_START(page);
67 				vm_page_set_state(page, PAGE_STATE_FREE);
68 			}
69 		}
70 	}
71 
72 	fPagingStructures->RemoveReference();
73 }
74 
75 
76 status_t
77 X86VMTranslationMap32Bit::Init(bool kernel)
78 {
79 	TRACE("X86VMTranslationMap32Bit::Init()\n");
80 
81 	X86VMTranslationMap::Init(kernel);
82 
83 	fPagingStructures = new(std::nothrow) X86PagingStructures32Bit;
84 	if (fPagingStructures == NULL)
85 		return B_NO_MEMORY;
86 
87 	X86PagingMethod32Bit* method = X86PagingMethod32Bit::Method();
88 
89 	if (!kernel) {
90 		// user
91 		// allocate a physical page mapper
92 		status_t error = method->PhysicalPageMapper()
93 			->CreateTranslationMapPhysicalPageMapper(&fPageMapper);
94 		if (error != B_OK)
95 			return error;
96 
97 		// allocate the page directory
98 		page_directory_entry* virtualPageDir = (page_directory_entry*)memalign(
99 			B_PAGE_SIZE, B_PAGE_SIZE);
100 		if (virtualPageDir == NULL)
101 			return B_NO_MEMORY;
102 
103 		// look up the page directory's physical address
104 		phys_addr_t physicalPageDir;
105 		vm_get_page_mapping(VMAddressSpace::KernelID(),
106 			(addr_t)virtualPageDir, &physicalPageDir);
107 
108 		fPagingStructures->Init(virtualPageDir, physicalPageDir,
109 			method->KernelVirtualPageDirectory());
110 	} else {
111 		// kernel
112 		// get the physical page mapper
113 		fPageMapper = method->KernelPhysicalPageMapper();
114 
115 		// we already know the kernel pgdir mapping
116 		fPagingStructures->Init(method->KernelVirtualPageDirectory(),
117 			method->KernelPhysicalPageDirectory(), NULL);
118 	}
119 
120 	return B_OK;
121 }
122 
123 
124 size_t
125 X86VMTranslationMap32Bit::MaxPagesNeededToMap(addr_t start, addr_t end) const
126 {
127 	// If start == 0, the actual base address is not yet known to the caller and
128 	// we shall assume the worst case.
129 	if (start == 0) {
130 		// offset the range so it has the worst possible alignment
131 		start = 1023 * B_PAGE_SIZE;
132 		end += 1023 * B_PAGE_SIZE;
133 	}
134 
135 	return VADDR_TO_PDENT(end) + 1 - VADDR_TO_PDENT(start);
136 }
137 
138 
139 status_t
140 X86VMTranslationMap32Bit::Map(addr_t va, phys_addr_t pa, uint32 attributes,
141 	uint32 memoryType, vm_page_reservation* reservation)
142 {
143 	TRACE("map_tmap: entry pa 0x%lx va 0x%lx\n", pa, va);
144 
145 /*
146 	dprintf("pgdir at 0x%x\n", pgdir);
147 	dprintf("index is %d\n", va / B_PAGE_SIZE / 1024);
148 	dprintf("final at 0x%x\n", &pgdir[va / B_PAGE_SIZE / 1024]);
149 	dprintf("value is 0x%x\n", *(int *)&pgdir[va / B_PAGE_SIZE / 1024]);
150 	dprintf("present bit is %d\n", pgdir[va / B_PAGE_SIZE / 1024].present);
151 	dprintf("addr is %d\n", pgdir[va / B_PAGE_SIZE / 1024].addr);
152 */
153 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
154 
155 	// check to see if a page table exists for this range
156 	uint32 index = VADDR_TO_PDENT(va);
157 	if ((pd[index] & X86_PDE_PRESENT) == 0) {
158 		phys_addr_t pgtable;
159 		vm_page *page;
160 
161 		// we need to allocate a pgtable
162 		page = vm_page_allocate_page(reservation,
163 			PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
164 
165 		DEBUG_PAGE_ACCESS_END(page);
166 
167 		pgtable = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;
168 
169 		TRACE("map_tmap: asked for free page for pgtable. 0x%lx\n", pgtable);
170 
171 		// put it in the pgdir
172 		X86PagingMethod32Bit::PutPageTableInPageDir(&pd[index], pgtable,
173 			attributes
174 				| ((attributes & B_USER_PROTECTION) != 0
175 						? B_WRITE_AREA : B_KERNEL_WRITE_AREA));
176 
177 		// update any other page directories, if it maps kernel space
178 		if (index >= FIRST_KERNEL_PGDIR_ENT
179 			&& index < (FIRST_KERNEL_PGDIR_ENT + NUM_KERNEL_PGDIR_ENTS)) {
180 			X86PagingStructures32Bit::UpdateAllPageDirs(index, pd[index]);
181 		}
182 
183 		fMapCount++;
184 	}
185 
186 	// now, fill in the pentry
187 	Thread* thread = thread_get_current_thread();
188 	ThreadCPUPinner pinner(thread);
189 
190 	page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
191 		pd[index] & X86_PDE_ADDRESS_MASK);
192 	index = VADDR_TO_PTENT(va);
193 
194 	ASSERT_PRINT((pt[index] & X86_PTE_PRESENT) == 0,
195 		"virtual address: %#" B_PRIxADDR ", existing pte: %#" B_PRIx32, va,
196 		pt[index]);
197 
198 	X86PagingMethod32Bit::PutPageTableEntryInTable(&pt[index], pa, attributes,
199 		memoryType, fIsKernelMap);
200 
201 	pinner.Unlock();
202 
203 	// Note: We don't need to invalidate the TLB for this address, as previously
204 	// the entry was not present and the TLB doesn't cache those entries.
205 
206 	fMapCount++;
207 
208 	return 0;
209 }
210 
211 
212 status_t
213 X86VMTranslationMap32Bit::Unmap(addr_t start, addr_t end)
214 {
215 	start = ROUNDDOWN(start, B_PAGE_SIZE);
216 	if (start >= end)
217 		return B_OK;
218 
219 	TRACE("unmap_tmap: asked to free pages 0x%lx to 0x%lx\n", start, end);
220 
221 	page_directory_entry *pd = fPagingStructures->pgdir_virt;
222 
223 	do {
224 		int index = VADDR_TO_PDENT(start);
225 		if ((pd[index] & X86_PDE_PRESENT) == 0) {
226 			// no page table here, move the start up to access the next page
227 			// table
228 			start = ROUNDUP(start + 1, kPageTableAlignment);
229 			continue;
230 		}
231 
232 		Thread* thread = thread_get_current_thread();
233 		ThreadCPUPinner pinner(thread);
234 
235 		page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
236 			pd[index] & X86_PDE_ADDRESS_MASK);
237 
238 		for (index = VADDR_TO_PTENT(start); (index < 1024) && (start < end);
239 				index++, start += B_PAGE_SIZE) {
240 			if ((pt[index] & X86_PTE_PRESENT) == 0) {
241 				// page mapping not valid
242 				continue;
243 			}
244 
245 			TRACE("unmap_tmap: removing page 0x%lx\n", start);
246 
247 			page_table_entry oldEntry
248 				= X86PagingMethod32Bit::ClearPageTableEntryFlags(&pt[index],
249 					X86_PTE_PRESENT);
250 			fMapCount--;
251 
252 			if ((oldEntry & X86_PTE_ACCESSED) != 0) {
253 				// Note, that we only need to invalidate the address, if the
254 				// accessed flags was set, since only then the entry could have
255 				// been in any TLB.
256 				InvalidatePage(start);
257 			}
258 		}
259 	} while (start != 0 && start < end);
260 
261 	return B_OK;
262 }
263 
264 
265 status_t
266 X86VMTranslationMap32Bit::DebugMarkRangePresent(addr_t start, addr_t end,
267 	bool markPresent)
268 {
269 	start = ROUNDDOWN(start, B_PAGE_SIZE);
270 	if (start >= end)
271 		return B_OK;
272 
273 	page_directory_entry *pd = fPagingStructures->pgdir_virt;
274 
275 	do {
276 		int index = VADDR_TO_PDENT(start);
277 		if ((pd[index] & X86_PDE_PRESENT) == 0) {
278 			// no page table here, move the start up to access the next page
279 			// table
280 			start = ROUNDUP(start + 1, kPageTableAlignment);
281 			continue;
282 		}
283 
284 		Thread* thread = thread_get_current_thread();
285 		ThreadCPUPinner pinner(thread);
286 
287 		page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
288 			pd[index] & X86_PDE_ADDRESS_MASK);
289 
290 		for (index = VADDR_TO_PTENT(start); (index < 1024) && (start < end);
291 				index++, start += B_PAGE_SIZE) {
292 			if ((pt[index] & X86_PTE_PRESENT) == 0) {
293 				if (!markPresent)
294 					continue;
295 
296 				X86PagingMethod32Bit::SetPageTableEntryFlags(&pt[index],
297 					X86_PTE_PRESENT);
298 			} else {
299 				if (markPresent)
300 					continue;
301 
302 				page_table_entry oldEntry
303 					= X86PagingMethod32Bit::ClearPageTableEntryFlags(&pt[index],
304 						X86_PTE_PRESENT);
305 
306 				if ((oldEntry & X86_PTE_ACCESSED) != 0) {
307 					// Note, that we only need to invalidate the address, if the
308 					// accessed flags was set, since only then the entry could
309 					// have been in any TLB.
310 					InvalidatePage(start);
311 				}
312 			}
313 		}
314 	} while (start != 0 && start < end);
315 
316 	return B_OK;
317 }
318 
319 
320 /*!	Caller must have locked the cache of the page to be unmapped.
321 	This object shouldn't be locked.
322 */
323 status_t
324 X86VMTranslationMap32Bit::UnmapPage(VMArea* area, addr_t address,
325 	bool updatePageQueue)
326 {
327 	ASSERT(address % B_PAGE_SIZE == 0);
328 
329 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
330 
331 	TRACE("X86VMTranslationMap32Bit::UnmapPage(%#" B_PRIxADDR ")\n", address);
332 
333 	RecursiveLocker locker(fLock);
334 
335 	int index = VADDR_TO_PDENT(address);
336 	if ((pd[index] & X86_PDE_PRESENT) == 0)
337 		return B_ENTRY_NOT_FOUND;
338 
339 	ThreadCPUPinner pinner(thread_get_current_thread());
340 
341 	page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
342 		pd[index] & X86_PDE_ADDRESS_MASK);
343 
344 	index = VADDR_TO_PTENT(address);
345 	page_table_entry oldEntry = X86PagingMethod32Bit::ClearPageTableEntry(
346 		&pt[index]);
347 
348 	pinner.Unlock();
349 
350 	if ((oldEntry & X86_PTE_PRESENT) == 0) {
351 		// page mapping not valid
352 		return B_ENTRY_NOT_FOUND;
353 	}
354 
355 	fMapCount--;
356 
357 	if ((oldEntry & X86_PTE_ACCESSED) != 0) {
358 		// Note, that we only need to invalidate the address, if the
359 		// accessed flags was set, since only then the entry could have been
360 		// in any TLB.
361 		InvalidatePage(address);
362 		Flush();
363 
364 		// NOTE: Between clearing the page table entry and Flush() other
365 		// processors (actually even this processor with another thread of the
366 		// same team) could still access the page in question via their cached
367 		// entry. We can obviously lose a modified flag in this case, with the
368 		// effect that the page looks unmodified (and might thus be recycled),
369 		// but is actually modified.
370 		// In most cases this is harmless, but for vm_remove_all_page_mappings()
371 		// this is actually a problem.
372 		// Interestingly FreeBSD seems to ignore this problem as well
373 		// (cf. pmap_remove_all()), unless I've missed something.
374 	}
375 
376 	locker.Detach();
377 		// PageUnmapped() will unlock for us
378 
379 	PageUnmapped(area, (oldEntry & X86_PTE_ADDRESS_MASK) / B_PAGE_SIZE,
380 		(oldEntry & X86_PTE_ACCESSED) != 0, (oldEntry & X86_PTE_DIRTY) != 0,
381 		updatePageQueue);
382 
383 	return B_OK;
384 }
385 
386 
387 void
388 X86VMTranslationMap32Bit::UnmapPages(VMArea* area, addr_t base, size_t size,
389 	bool updatePageQueue)
390 {
391 	if (size == 0)
392 		return;
393 
394 	addr_t start = base;
395 	addr_t end = base + size - 1;
396 
397 	TRACE("X86VMTranslationMap32Bit::UnmapPages(%p, %#" B_PRIxADDR ", %#"
398 		B_PRIxADDR ")\n", area, start, end);
399 
400 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
401 
402 	VMAreaMappings queue;
403 
404 	RecursiveLocker locker(fLock);
405 
406 	do {
407 		int index = VADDR_TO_PDENT(start);
408 		if ((pd[index] & X86_PDE_PRESENT) == 0) {
409 			// no page table here, move the start up to access the next page
410 			// table
411 			start = ROUNDUP(start + 1, kPageTableAlignment);
412 			continue;
413 		}
414 
415 		Thread* thread = thread_get_current_thread();
416 		ThreadCPUPinner pinner(thread);
417 
418 		page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
419 			pd[index] & X86_PDE_ADDRESS_MASK);
420 
421 		for (index = VADDR_TO_PTENT(start); (index < 1024) && (start < end);
422 				index++, start += B_PAGE_SIZE) {
423 			page_table_entry oldEntry
424 				= X86PagingMethod32Bit::ClearPageTableEntry(&pt[index]);
425 			if ((oldEntry & X86_PTE_PRESENT) == 0)
426 				continue;
427 
428 			fMapCount--;
429 
430 			if ((oldEntry & X86_PTE_ACCESSED) != 0) {
431 				// Note, that we only need to invalidate the address, if the
432 				// accessed flags was set, since only then the entry could have
433 				// been in any TLB.
434 				InvalidatePage(start);
435 			}
436 
437 			if (area->cache_type != CACHE_TYPE_DEVICE) {
438 				// get the page
439 				vm_page* page = vm_lookup_page(
440 					(oldEntry & X86_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
441 				ASSERT(page != NULL);
442 
443 				DEBUG_PAGE_ACCESS_START(page);
444 
445 				// transfer the accessed/dirty flags to the page
446 				if ((oldEntry & X86_PTE_ACCESSED) != 0)
447 					page->accessed = true;
448 				if ((oldEntry & X86_PTE_DIRTY) != 0)
449 					page->modified = true;
450 
451 				// remove the mapping object/decrement the wired_count of the
452 				// page
453 				if (area->wiring == B_NO_LOCK) {
454 					vm_page_mapping* mapping = NULL;
455 					vm_page_mappings::Iterator iterator
456 						= page->mappings.GetIterator();
457 					while ((mapping = iterator.Next()) != NULL) {
458 						if (mapping->area == area)
459 							break;
460 					}
461 
462 					ASSERT(mapping != NULL);
463 
464 					area->mappings.Remove(mapping);
465 					page->mappings.Remove(mapping);
466 					queue.Add(mapping);
467 				} else
468 					page->DecrementWiredCount();
469 
470 				if (!page->IsMapped()) {
471 					atomic_add(&gMappedPagesCount, -1);
472 
473 					if (updatePageQueue) {
474 						if (page->Cache()->temporary)
475 							vm_page_set_state(page, PAGE_STATE_INACTIVE);
476 						else if (page->modified)
477 							vm_page_set_state(page, PAGE_STATE_MODIFIED);
478 						else
479 							vm_page_set_state(page, PAGE_STATE_CACHED);
480 					}
481 				}
482 
483 				DEBUG_PAGE_ACCESS_END(page);
484 			}
485 		}
486 
487 		Flush();
488 			// flush explicitly, since we directly use the lock
489 	} while (start != 0 && start < end);
490 
491 	// TODO: As in UnmapPage() we can lose page dirty flags here. ATM it's not
492 	// really critical here, as in all cases this method is used, the unmapped
493 	// area range is unmapped for good (resized/cut) and the pages will likely
494 	// be freed.
495 
496 	locker.Unlock();
497 
498 	// free removed mappings
499 	bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
500 	uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
501 		| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
502 	while (vm_page_mapping* mapping = queue.RemoveHead())
503 		vm_free_page_mapping(mapping->page->physical_page_number, mapping, freeFlags);
504 }
505 
506 
507 void
508 X86VMTranslationMap32Bit::UnmapArea(VMArea* area, bool deletingAddressSpace,
509 	bool ignoreTopCachePageFlags)
510 {
511 	if (area->cache_type == CACHE_TYPE_DEVICE || area->wiring != B_NO_LOCK) {
512 		X86VMTranslationMap32Bit::UnmapPages(area, area->Base(), area->Size(),
513 			true);
514 		return;
515 	}
516 
517 	bool unmapPages = !deletingAddressSpace || !ignoreTopCachePageFlags;
518 
519 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
520 
521 	RecursiveLocker locker(fLock);
522 
523 	VMAreaMappings mappings;
524 	mappings.MoveFrom(&area->mappings);
525 
526 	for (VMAreaMappings::Iterator it = mappings.GetIterator();
527 			vm_page_mapping* mapping = it.Next();) {
528 		vm_page* page = mapping->page;
529 		page->mappings.Remove(mapping);
530 
531 		VMCache* cache = page->Cache();
532 
533 		bool pageFullyUnmapped = false;
534 		if (!page->IsMapped()) {
535 			atomic_add(&gMappedPagesCount, -1);
536 			pageFullyUnmapped = true;
537 		}
538 
539 		if (unmapPages || cache != area->cache) {
540 			addr_t address = area->Base()
541 				+ ((page->cache_offset * B_PAGE_SIZE) - area->cache_offset);
542 
543 			int index = VADDR_TO_PDENT(address);
544 			if ((pd[index] & X86_PDE_PRESENT) == 0) {
545 				panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
546 					"has no page dir entry", page, area, address);
547 				continue;
548 			}
549 
550 			ThreadCPUPinner pinner(thread_get_current_thread());
551 
552 			page_table_entry* pt
553 				= (page_table_entry*)fPageMapper->GetPageTableAt(
554 					pd[index] & X86_PDE_ADDRESS_MASK);
555 			page_table_entry oldEntry
556 				= X86PagingMethod32Bit::ClearPageTableEntry(
557 					&pt[VADDR_TO_PTENT(address)]);
558 
559 			pinner.Unlock();
560 
561 			if ((oldEntry & X86_PTE_PRESENT) == 0) {
562 				panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
563 					"has no page table entry", page, area, address);
564 				continue;
565 			}
566 
567 			// transfer the accessed/dirty flags to the page and invalidate
568 			// the mapping, if necessary
569 			if ((oldEntry & X86_PTE_ACCESSED) != 0) {
570 				page->accessed = true;
571 
572 				if (!deletingAddressSpace)
573 					InvalidatePage(address);
574 			}
575 
576 			if ((oldEntry & X86_PTE_DIRTY) != 0)
577 				page->modified = true;
578 
579 			if (pageFullyUnmapped) {
580 				DEBUG_PAGE_ACCESS_START(page);
581 
582 				if (cache->temporary)
583 					vm_page_set_state(page, PAGE_STATE_INACTIVE);
584 				else if (page->modified)
585 					vm_page_set_state(page, PAGE_STATE_MODIFIED);
586 				else
587 					vm_page_set_state(page, PAGE_STATE_CACHED);
588 
589 				DEBUG_PAGE_ACCESS_END(page);
590 			}
591 		}
592 
593 		fMapCount--;
594 	}
595 
596 	Flush();
597 		// flush explicitely, since we directly use the lock
598 
599 	locker.Unlock();
600 
601 	bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
602 	uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
603 		| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
604 	while (vm_page_mapping* mapping = mappings.RemoveHead())
605 		vm_free_page_mapping(mapping->page->physical_page_number, mapping, freeFlags);
606 }
607 
608 
609 status_t
610 X86VMTranslationMap32Bit::Query(addr_t va, phys_addr_t *_physical,
611 	uint32 *_flags)
612 {
613 	// default the flags to not present
614 	*_flags = 0;
615 	*_physical = 0;
616 
617 	int index = VADDR_TO_PDENT(va);
618 	page_directory_entry *pd = fPagingStructures->pgdir_virt;
619 	if ((pd[index] & X86_PDE_PRESENT) == 0) {
620 		// no pagetable here
621 		return B_OK;
622 	}
623 
624 	Thread* thread = thread_get_current_thread();
625 	ThreadCPUPinner pinner(thread);
626 
627 	page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
628 		pd[index] & X86_PDE_ADDRESS_MASK);
629 	page_table_entry entry = pt[VADDR_TO_PTENT(va)];
630 
631 	*_physical = entry & X86_PDE_ADDRESS_MASK;
632 
633 	// read in the page state flags
634 	if ((entry & X86_PTE_USER) != 0) {
635 		*_flags |= ((entry & X86_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0)
636 			| B_READ_AREA;
637 	}
638 
639 	*_flags |= ((entry & X86_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0)
640 		| B_KERNEL_READ_AREA
641 		| ((entry & X86_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
642 		| ((entry & X86_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
643 		| ((entry & X86_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0);
644 
645 	pinner.Unlock();
646 
647 	TRACE("query_tmap: returning pa 0x%lx for va 0x%lx\n", *_physical, va);
648 
649 	return B_OK;
650 }
651 
652 
653 status_t
654 X86VMTranslationMap32Bit::QueryInterrupt(addr_t va, phys_addr_t *_physical,
655 	uint32 *_flags)
656 {
657 	*_flags = 0;
658 	*_physical = 0;
659 
660 	int index = VADDR_TO_PDENT(va);
661 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
662 	if ((pd[index] & X86_PDE_PRESENT) == 0) {
663 		// no pagetable here
664 		return B_OK;
665 	}
666 
667 	// map page table entry
668 	page_table_entry* pt = (page_table_entry*)X86PagingMethod32Bit::Method()
669 		->PhysicalPageMapper()->InterruptGetPageTableAt(
670 			pd[index] & X86_PDE_ADDRESS_MASK);
671 	page_table_entry entry = pt[VADDR_TO_PTENT(va)];
672 
673 	*_physical = entry & X86_PDE_ADDRESS_MASK;
674 
675 	// read in the page state flags
676 	if ((entry & X86_PTE_USER) != 0) {
677 		*_flags |= ((entry & X86_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0)
678 			| B_READ_AREA;
679 	}
680 
681 	*_flags |= ((entry & X86_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0)
682 		| B_KERNEL_READ_AREA
683 		| ((entry & X86_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
684 		| ((entry & X86_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
685 		| ((entry & X86_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0);
686 
687 	return B_OK;
688 }
689 
690 
691 status_t
692 X86VMTranslationMap32Bit::Protect(addr_t start, addr_t end, uint32 attributes,
693 	uint32 memoryType)
694 {
695 	start = ROUNDDOWN(start, B_PAGE_SIZE);
696 	if (start >= end)
697 		return B_OK;
698 
699 	TRACE("protect_tmap: pages 0x%lx to 0x%lx, attributes %lx\n", start, end,
700 		attributes);
701 
702 	// compute protection flags
703 	uint32 newProtectionFlags = 0;
704 	if ((attributes & B_USER_PROTECTION) != 0) {
705 		newProtectionFlags = X86_PTE_USER;
706 		if ((attributes & B_WRITE_AREA) != 0)
707 			newProtectionFlags |= X86_PTE_WRITABLE;
708 	} else if ((attributes & B_KERNEL_WRITE_AREA) != 0)
709 		newProtectionFlags = X86_PTE_WRITABLE;
710 
711 	page_directory_entry *pd = fPagingStructures->pgdir_virt;
712 
713 	do {
714 		int index = VADDR_TO_PDENT(start);
715 		if ((pd[index] & X86_PDE_PRESENT) == 0) {
716 			// no page table here, move the start up to access the next page
717 			// table
718 			start = ROUNDUP(start + 1, kPageTableAlignment);
719 			continue;
720 		}
721 
722 		Thread* thread = thread_get_current_thread();
723 		ThreadCPUPinner pinner(thread);
724 
725 		page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
726 			pd[index] & X86_PDE_ADDRESS_MASK);
727 
728 		for (index = VADDR_TO_PTENT(start); index < 1024 && start < end;
729 				index++, start += B_PAGE_SIZE) {
730 			page_table_entry entry = pt[index];
731 			if ((entry & X86_PTE_PRESENT) == 0) {
732 				// page mapping not valid
733 				continue;
734 			}
735 
736 			TRACE("protect_tmap: protect page 0x%lx\n", start);
737 
738 			// set the new protection flags -- we want to do that atomically,
739 			// without changing the accessed or dirty flag
740 			page_table_entry oldEntry;
741 			while (true) {
742 				oldEntry = X86PagingMethod32Bit::TestAndSetPageTableEntry(
743 					&pt[index],
744 					(entry & ~(X86_PTE_PROTECTION_MASK
745 							| X86_PTE_MEMORY_TYPE_MASK))
746 						| newProtectionFlags
747 						| X86PagingMethod32Bit::MemoryTypeToPageTableEntryFlags(
748 							memoryType),
749 					entry);
750 				if (oldEntry == entry)
751 					break;
752 				entry = oldEntry;
753 			}
754 
755 			if ((oldEntry & X86_PTE_ACCESSED) != 0) {
756 				// Note, that we only need to invalidate the address, if the
757 				// accessed flag was set, since only then the entry could have
758 				// been in any TLB.
759 				InvalidatePage(start);
760 			}
761 		}
762 	} while (start != 0 && start < end);
763 
764 	return B_OK;
765 }
766 
767 
768 status_t
769 X86VMTranslationMap32Bit::ClearFlags(addr_t va, uint32 flags)
770 {
771 	int index = VADDR_TO_PDENT(va);
772 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
773 	if ((pd[index] & X86_PDE_PRESENT) == 0) {
774 		// no pagetable here
775 		return B_OK;
776 	}
777 
778 	uint32 flagsToClear = ((flags & PAGE_MODIFIED) ? X86_PTE_DIRTY : 0)
779 		| ((flags & PAGE_ACCESSED) ? X86_PTE_ACCESSED : 0);
780 
781 	Thread* thread = thread_get_current_thread();
782 	ThreadCPUPinner pinner(thread);
783 
784 	page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
785 		pd[index] & X86_PDE_ADDRESS_MASK);
786 	index = VADDR_TO_PTENT(va);
787 
788 	// clear out the flags we've been requested to clear
789 	page_table_entry oldEntry
790 		= X86PagingMethod32Bit::ClearPageTableEntryFlags(&pt[index],
791 			flagsToClear);
792 
793 	pinner.Unlock();
794 
795 	if ((oldEntry & flagsToClear) != 0)
796 		InvalidatePage(va);
797 
798 	return B_OK;
799 }
800 
801 
802 bool
803 X86VMTranslationMap32Bit::ClearAccessedAndModified(VMArea* area, addr_t address,
804 	bool unmapIfUnaccessed, bool& _modified)
805 {
806 	ASSERT(address % B_PAGE_SIZE == 0);
807 
808 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
809 
810 	TRACE("X86VMTranslationMap32Bit::ClearAccessedAndModified(%#" B_PRIxADDR
811 		")\n", address);
812 
813 	RecursiveLocker locker(fLock);
814 
815 	int index = VADDR_TO_PDENT(address);
816 	if ((pd[index] & X86_PDE_PRESENT) == 0)
817 		return false;
818 
819 	ThreadCPUPinner pinner(thread_get_current_thread());
820 
821 	page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
822 		pd[index] & X86_PDE_ADDRESS_MASK);
823 
824 	index = VADDR_TO_PTENT(address);
825 
826 	// perform the deed
827 	page_table_entry oldEntry;
828 
829 	if (unmapIfUnaccessed) {
830 		while (true) {
831 			oldEntry = pt[index];
832 			if ((oldEntry & X86_PTE_PRESENT) == 0) {
833 				// page mapping not valid
834 				return false;
835 			}
836 
837 			if (oldEntry & X86_PTE_ACCESSED) {
838 				// page was accessed -- just clear the flags
839 				oldEntry = X86PagingMethod32Bit::ClearPageTableEntryFlags(
840 					&pt[index], X86_PTE_ACCESSED | X86_PTE_DIRTY);
841 				break;
842 			}
843 
844 			// page hasn't been accessed -- unmap it
845 			if (X86PagingMethod32Bit::TestAndSetPageTableEntry(&pt[index], 0,
846 					oldEntry) == oldEntry) {
847 				break;
848 			}
849 
850 			// something changed -- check again
851 		}
852 	} else {
853 		oldEntry = X86PagingMethod32Bit::ClearPageTableEntryFlags(&pt[index],
854 			X86_PTE_ACCESSED | X86_PTE_DIRTY);
855 	}
856 
857 	pinner.Unlock();
858 
859 	_modified = (oldEntry & X86_PTE_DIRTY) != 0;
860 
861 	if ((oldEntry & X86_PTE_ACCESSED) != 0) {
862 		// Note, that we only need to invalidate the address, if the
863 		// accessed flags was set, since only then the entry could have been
864 		// in any TLB.
865 		InvalidatePage(address);
866 
867 		Flush();
868 
869 		return true;
870 	}
871 
872 	if (!unmapIfUnaccessed)
873 		return false;
874 
875 	// We have unmapped the address. Do the "high level" stuff.
876 
877 	fMapCount--;
878 
879 	locker.Detach();
880 		// UnaccessedPageUnmapped() will unlock for us
881 
882 	UnaccessedPageUnmapped(area,
883 		(oldEntry & X86_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
884 
885 	return false;
886 }
887 
888 
889 X86PagingStructures*
890 X86VMTranslationMap32Bit::PagingStructures() const
891 {
892 	return fPagingStructures;
893 }
894