xref: /haiku/src/system/kernel/arch/x86/paging/32bit/X86VMTranslationMap32Bit.cpp (revision 1deede7388b04dbeec5af85cae7164735ea9e70d)
1 /*
2  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 #include "paging/32bit/X86VMTranslationMap32Bit.h"
12 
13 #include <stdlib.h>
14 #include <string.h>
15 
16 #include <int.h>
17 #include <thread.h>
18 #include <slab/Slab.h>
19 #include <smp.h>
20 #include <util/AutoLock.h>
21 #include <util/queue.h>
22 #include <vm/vm_page.h>
23 #include <vm/vm_priv.h>
24 #include <vm/VMAddressSpace.h>
25 #include <vm/VMCache.h>
26 
27 #include "paging/32bit/X86PagingMethod32Bit.h"
28 #include "paging/32bit/X86PagingStructures32Bit.h"
29 #include "paging/x86_physical_page_mapper.h"
30 
31 
32 //#define TRACE_X86_VM_TRANSLATION_MAP_32_BIT
33 #ifdef TRACE_X86_VM_TRANSLATION_MAP_32_BIT
34 #	define TRACE(x...) dprintf(x)
35 #else
36 #	define TRACE(x...) ;
37 #endif
38 
39 
40 X86VMTranslationMap32Bit::X86VMTranslationMap32Bit()
41 	:
42 	fPagingStructures(NULL)
43 {
44 }
45 
46 
47 X86VMTranslationMap32Bit::~X86VMTranslationMap32Bit()
48 {
49 	if (fPagingStructures == NULL)
50 		return;
51 
52 	if (fPageMapper != NULL)
53 		fPageMapper->Delete();
54 
55 	if (fPagingStructures->pgdir_virt != NULL) {
56 		// cycle through and free all of the user space pgtables
57 		for (uint32 i = VADDR_TO_PDENT(USER_BASE);
58 				i <= VADDR_TO_PDENT(USER_BASE + (USER_SIZE - 1)); i++) {
59 			if ((fPagingStructures->pgdir_virt[i] & X86_PDE_PRESENT) != 0) {
60 				addr_t address = fPagingStructures->pgdir_virt[i]
61 					& X86_PDE_ADDRESS_MASK;
62 				vm_page* page = vm_lookup_page(address / B_PAGE_SIZE);
63 				if (!page)
64 					panic("destroy_tmap: didn't find pgtable page\n");
65 				DEBUG_PAGE_ACCESS_START(page);
66 				vm_page_set_state(page, PAGE_STATE_FREE);
67 			}
68 		}
69 	}
70 
71 	fPagingStructures->RemoveReference();
72 }
73 
74 
75 status_t
76 X86VMTranslationMap32Bit::Init(bool kernel)
77 {
78 	TRACE("X86VMTranslationMap32Bit::Init()\n");
79 
80 	X86VMTranslationMap::Init(kernel);
81 
82 	fPagingStructures = new(std::nothrow) X86PagingStructures32Bit;
83 	if (fPagingStructures == NULL)
84 		return B_NO_MEMORY;
85 
86 	X86PagingMethod32Bit* method = X86PagingMethod32Bit::Method();
87 
88 	if (!kernel) {
89 		// user
90 		// allocate a physical page mapper
91 		status_t error = method->PhysicalPageMapper()
92 			->CreateTranslationMapPhysicalPageMapper(&fPageMapper);
93 		if (error != B_OK)
94 			return error;
95 
96 		// allocate the page directory
97 		page_directory_entry* virtualPageDir = (page_directory_entry*)memalign(
98 			B_PAGE_SIZE, B_PAGE_SIZE);
99 		if (virtualPageDir == NULL)
100 			return B_NO_MEMORY;
101 
102 		// look up the page directory's physical address
103 		phys_addr_t physicalPageDir;
104 		vm_get_page_mapping(VMAddressSpace::KernelID(),
105 			(addr_t)virtualPageDir, &physicalPageDir);
106 
107 		fPagingStructures->Init(virtualPageDir, physicalPageDir,
108 			method->KernelVirtualPageDirectory());
109 	} else {
110 		// kernel
111 		// get the physical page mapper
112 		fPageMapper = method->KernelPhysicalPageMapper();
113 
114 		// we already know the kernel pgdir mapping
115 		fPagingStructures->Init(method->KernelVirtualPageDirectory(),
116 			method->KernelPhysicalPageDirectory(), NULL);
117 	}
118 
119 	return B_OK;
120 }
121 
122 
123 size_t
124 X86VMTranslationMap32Bit::MaxPagesNeededToMap(addr_t start, addr_t end) const
125 {
126 	// If start == 0, the actual base address is not yet known to the caller and
127 	// we shall assume the worst case.
128 	if (start == 0) {
129 		// offset the range so it has the worst possible alignment
130 		start = 1023 * B_PAGE_SIZE;
131 		end += 1023 * B_PAGE_SIZE;
132 	}
133 
134 	return VADDR_TO_PDENT(end) + 1 - VADDR_TO_PDENT(start);
135 }
136 
137 
138 status_t
139 X86VMTranslationMap32Bit::Map(addr_t va, phys_addr_t pa, uint32 attributes,
140 	uint32 memoryType, vm_page_reservation* reservation)
141 {
142 	TRACE("map_tmap: entry pa 0x%lx va 0x%lx\n", pa, va);
143 
144 /*
145 	dprintf("pgdir at 0x%x\n", pgdir);
146 	dprintf("index is %d\n", va / B_PAGE_SIZE / 1024);
147 	dprintf("final at 0x%x\n", &pgdir[va / B_PAGE_SIZE / 1024]);
148 	dprintf("value is 0x%x\n", *(int *)&pgdir[va / B_PAGE_SIZE / 1024]);
149 	dprintf("present bit is %d\n", pgdir[va / B_PAGE_SIZE / 1024].present);
150 	dprintf("addr is %d\n", pgdir[va / B_PAGE_SIZE / 1024].addr);
151 */
152 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
153 
154 	// check to see if a page table exists for this range
155 	uint32 index = VADDR_TO_PDENT(va);
156 	if ((pd[index] & X86_PDE_PRESENT) == 0) {
157 		phys_addr_t pgtable;
158 		vm_page *page;
159 
160 		// we need to allocate a pgtable
161 		page = vm_page_allocate_page(reservation,
162 			PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
163 
164 		DEBUG_PAGE_ACCESS_END(page);
165 
166 		pgtable = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;
167 
168 		TRACE("map_tmap: asked for free page for pgtable. 0x%lx\n", pgtable);
169 
170 		// put it in the pgdir
171 		X86PagingMethod32Bit::PutPageTableInPageDir(&pd[index], pgtable,
172 			attributes
173 				| ((attributes & B_USER_PROTECTION) != 0
174 						? B_WRITE_AREA : B_KERNEL_WRITE_AREA));
175 
176 		// update any other page directories, if it maps kernel space
177 		if (index >= FIRST_KERNEL_PGDIR_ENT
178 			&& index < (FIRST_KERNEL_PGDIR_ENT + NUM_KERNEL_PGDIR_ENTS)) {
179 			X86PagingStructures32Bit::UpdateAllPageDirs(index, pd[index]);
180 		}
181 
182 		fMapCount++;
183 	}
184 
185 	// now, fill in the pentry
186 	Thread* thread = thread_get_current_thread();
187 	ThreadCPUPinner pinner(thread);
188 
189 	page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
190 		pd[index] & X86_PDE_ADDRESS_MASK);
191 	index = VADDR_TO_PTENT(va);
192 
193 	ASSERT_PRINT((pt[index] & X86_PTE_PRESENT) == 0,
194 		"virtual address: %#" B_PRIxADDR ", existing pte: %#" B_PRIx32, va,
195 		pt[index]);
196 
197 	X86PagingMethod32Bit::PutPageTableEntryInTable(&pt[index], pa, attributes,
198 		memoryType, fIsKernelMap);
199 
200 	pinner.Unlock();
201 
202 	// Note: We don't need to invalidate the TLB for this address, as previously
203 	// the entry was not present and the TLB doesn't cache those entries.
204 
205 	fMapCount++;
206 
207 	return 0;
208 }
209 
210 
211 status_t
212 X86VMTranslationMap32Bit::Unmap(addr_t start, addr_t end)
213 {
214 	start = ROUNDDOWN(start, B_PAGE_SIZE);
215 	if (start >= end)
216 		return B_OK;
217 
218 	TRACE("unmap_tmap: asked to free pages 0x%lx to 0x%lx\n", start, end);
219 
220 	page_directory_entry *pd = fPagingStructures->pgdir_virt;
221 
222 	do {
223 		int index = VADDR_TO_PDENT(start);
224 		if ((pd[index] & X86_PDE_PRESENT) == 0) {
225 			// no page table here, move the start up to access the next page
226 			// table
227 			start = ROUNDUP(start + 1, kPageTableAlignment);
228 			continue;
229 		}
230 
231 		Thread* thread = thread_get_current_thread();
232 		ThreadCPUPinner pinner(thread);
233 
234 		page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
235 			pd[index] & X86_PDE_ADDRESS_MASK);
236 
237 		for (index = VADDR_TO_PTENT(start); (index < 1024) && (start < end);
238 				index++, start += B_PAGE_SIZE) {
239 			if ((pt[index] & X86_PTE_PRESENT) == 0) {
240 				// page mapping not valid
241 				continue;
242 			}
243 
244 			TRACE("unmap_tmap: removing page 0x%lx\n", start);
245 
246 			page_table_entry oldEntry
247 				= X86PagingMethod32Bit::ClearPageTableEntryFlags(&pt[index],
248 					X86_PTE_PRESENT);
249 			fMapCount--;
250 
251 			if ((oldEntry & X86_PTE_ACCESSED) != 0) {
252 				// Note, that we only need to invalidate the address, if the
253 				// accessed flags was set, since only then the entry could have
254 				// been in any TLB.
255 				InvalidatePage(start);
256 			}
257 		}
258 	} while (start != 0 && start < end);
259 
260 	return B_OK;
261 }
262 
263 
264 status_t
265 X86VMTranslationMap32Bit::DebugMarkRangePresent(addr_t start, addr_t end,
266 	bool markPresent)
267 {
268 	start = ROUNDDOWN(start, B_PAGE_SIZE);
269 	if (start >= end)
270 		return B_OK;
271 
272 	page_directory_entry *pd = fPagingStructures->pgdir_virt;
273 
274 	do {
275 		int index = VADDR_TO_PDENT(start);
276 		if ((pd[index] & X86_PDE_PRESENT) == 0) {
277 			// no page table here, move the start up to access the next page
278 			// table
279 			start = ROUNDUP(start + 1, kPageTableAlignment);
280 			continue;
281 		}
282 
283 		Thread* thread = thread_get_current_thread();
284 		ThreadCPUPinner pinner(thread);
285 
286 		page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
287 			pd[index] & X86_PDE_ADDRESS_MASK);
288 
289 		for (index = VADDR_TO_PTENT(start); (index < 1024) && (start < end);
290 				index++, start += B_PAGE_SIZE) {
291 			if ((pt[index] & X86_PTE_PRESENT) == 0) {
292 				if (!markPresent)
293 					continue;
294 
295 				X86PagingMethod32Bit::SetPageTableEntryFlags(&pt[index],
296 					X86_PTE_PRESENT);
297 			} else {
298 				if (markPresent)
299 					continue;
300 
301 				page_table_entry oldEntry
302 					= X86PagingMethod32Bit::ClearPageTableEntryFlags(&pt[index],
303 						X86_PTE_PRESENT);
304 
305 				if ((oldEntry & X86_PTE_ACCESSED) != 0) {
306 					// Note, that we only need to invalidate the address, if the
307 					// accessed flags was set, since only then the entry could
308 					// have been in any TLB.
309 					InvalidatePage(start);
310 				}
311 			}
312 		}
313 	} while (start != 0 && start < end);
314 
315 	return B_OK;
316 }
317 
318 
319 /*!	Caller must have locked the cache of the page to be unmapped.
320 	This object shouldn't be locked.
321 */
322 status_t
323 X86VMTranslationMap32Bit::UnmapPage(VMArea* area, addr_t address,
324 	bool updatePageQueue)
325 {
326 	ASSERT(address % B_PAGE_SIZE == 0);
327 
328 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
329 
330 	TRACE("X86VMTranslationMap32Bit::UnmapPage(%#" B_PRIxADDR ")\n", address);
331 
332 	RecursiveLocker locker(fLock);
333 
334 	int index = VADDR_TO_PDENT(address);
335 	if ((pd[index] & X86_PDE_PRESENT) == 0)
336 		return B_ENTRY_NOT_FOUND;
337 
338 	ThreadCPUPinner pinner(thread_get_current_thread());
339 
340 	page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
341 		pd[index] & X86_PDE_ADDRESS_MASK);
342 
343 	index = VADDR_TO_PTENT(address);
344 	page_table_entry oldEntry = X86PagingMethod32Bit::ClearPageTableEntry(
345 		&pt[index]);
346 
347 	pinner.Unlock();
348 
349 	if ((oldEntry & X86_PTE_PRESENT) == 0) {
350 		// page mapping not valid
351 		return B_ENTRY_NOT_FOUND;
352 	}
353 
354 	fMapCount--;
355 
356 	if ((oldEntry & X86_PTE_ACCESSED) != 0) {
357 		// Note, that we only need to invalidate the address, if the
358 		// accessed flags was set, since only then the entry could have been
359 		// in any TLB.
360 		InvalidatePage(address);
361 		Flush();
362 
363 		// NOTE: Between clearing the page table entry and Flush() other
364 		// processors (actually even this processor with another thread of the
365 		// same team) could still access the page in question via their cached
366 		// entry. We can obviously lose a modified flag in this case, with the
367 		// effect that the page looks unmodified (and might thus be recycled),
368 		// but is actually modified.
369 		// In most cases this is harmless, but for vm_remove_all_page_mappings()
370 		// this is actually a problem.
371 		// Interestingly FreeBSD seems to ignore this problem as well
372 		// (cf. pmap_remove_all()), unless I've missed something.
373 	}
374 
375 	locker.Detach();
376 		// PageUnmapped() will unlock for us
377 
378 	PageUnmapped(area, (oldEntry & X86_PTE_ADDRESS_MASK) / B_PAGE_SIZE,
379 		(oldEntry & X86_PTE_ACCESSED) != 0, (oldEntry & X86_PTE_DIRTY) != 0,
380 		updatePageQueue);
381 
382 	return B_OK;
383 }
384 
385 
386 void
387 X86VMTranslationMap32Bit::UnmapPages(VMArea* area, addr_t base, size_t size,
388 	bool updatePageQueue)
389 {
390 	if (size == 0)
391 		return;
392 
393 	addr_t start = base;
394 	addr_t end = base + size - 1;
395 
396 	TRACE("X86VMTranslationMap32Bit::UnmapPages(%p, %#" B_PRIxADDR ", %#"
397 		B_PRIxADDR ")\n", area, start, end);
398 
399 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
400 
401 	VMAreaMappings queue;
402 
403 	RecursiveLocker locker(fLock);
404 
405 	do {
406 		int index = VADDR_TO_PDENT(start);
407 		if ((pd[index] & X86_PDE_PRESENT) == 0) {
408 			// no page table here, move the start up to access the next page
409 			// table
410 			start = ROUNDUP(start + 1, kPageTableAlignment);
411 			continue;
412 		}
413 
414 		Thread* thread = thread_get_current_thread();
415 		ThreadCPUPinner pinner(thread);
416 
417 		page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
418 			pd[index] & X86_PDE_ADDRESS_MASK);
419 
420 		for (index = VADDR_TO_PTENT(start); (index < 1024) && (start < end);
421 				index++, start += B_PAGE_SIZE) {
422 			page_table_entry oldEntry
423 				= X86PagingMethod32Bit::ClearPageTableEntry(&pt[index]);
424 			if ((oldEntry & X86_PTE_PRESENT) == 0)
425 				continue;
426 
427 			fMapCount--;
428 
429 			if ((oldEntry & X86_PTE_ACCESSED) != 0) {
430 				// Note, that we only need to invalidate the address, if the
431 				// accessed flags was set, since only then the entry could have
432 				// been in any TLB.
433 				InvalidatePage(start);
434 			}
435 
436 			if (area->cache_type != CACHE_TYPE_DEVICE) {
437 				// get the page
438 				vm_page* page = vm_lookup_page(
439 					(oldEntry & X86_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
440 				ASSERT(page != NULL);
441 
442 				DEBUG_PAGE_ACCESS_START(page);
443 
444 				// transfer the accessed/dirty flags to the page
445 				if ((oldEntry & X86_PTE_ACCESSED) != 0)
446 					page->accessed = true;
447 				if ((oldEntry & X86_PTE_DIRTY) != 0)
448 					page->modified = true;
449 
450 				// remove the mapping object/decrement the wired_count of the
451 				// page
452 				if (area->wiring == B_NO_LOCK) {
453 					vm_page_mapping* mapping = NULL;
454 					vm_page_mappings::Iterator iterator
455 						= page->mappings.GetIterator();
456 					while ((mapping = iterator.Next()) != NULL) {
457 						if (mapping->area == area)
458 							break;
459 					}
460 
461 					ASSERT(mapping != NULL);
462 
463 					area->mappings.Remove(mapping);
464 					page->mappings.Remove(mapping);
465 					queue.Add(mapping);
466 				} else
467 					page->DecrementWiredCount();
468 
469 				if (!page->IsMapped()) {
470 					atomic_add(&gMappedPagesCount, -1);
471 
472 					if (updatePageQueue) {
473 						if (page->Cache()->temporary)
474 							vm_page_set_state(page, PAGE_STATE_INACTIVE);
475 						else if (page->modified)
476 							vm_page_set_state(page, PAGE_STATE_MODIFIED);
477 						else
478 							vm_page_set_state(page, PAGE_STATE_CACHED);
479 					}
480 				}
481 
482 				DEBUG_PAGE_ACCESS_END(page);
483 			}
484 		}
485 
486 		Flush();
487 			// flush explicitly, since we directly use the lock
488 	} while (start != 0 && start < end);
489 
490 	// TODO: As in UnmapPage() we can lose page dirty flags here. ATM it's not
491 	// really critical here, as in all cases this method is used, the unmapped
492 	// area range is unmapped for good (resized/cut) and the pages will likely
493 	// be freed.
494 
495 	locker.Unlock();
496 
497 	// free removed mappings
498 	bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
499 	uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
500 		| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
501 	while (vm_page_mapping* mapping = queue.RemoveHead())
502 		object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
503 }
504 
505 
506 void
507 X86VMTranslationMap32Bit::UnmapArea(VMArea* area, bool deletingAddressSpace,
508 	bool ignoreTopCachePageFlags)
509 {
510 	if (area->cache_type == CACHE_TYPE_DEVICE || area->wiring != B_NO_LOCK) {
511 		X86VMTranslationMap32Bit::UnmapPages(area, area->Base(), area->Size(),
512 			true);
513 		return;
514 	}
515 
516 	bool unmapPages = !deletingAddressSpace || !ignoreTopCachePageFlags;
517 
518 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
519 
520 	RecursiveLocker locker(fLock);
521 
522 	VMAreaMappings mappings;
523 	mappings.MoveFrom(&area->mappings);
524 
525 	for (VMAreaMappings::Iterator it = mappings.GetIterator();
526 			vm_page_mapping* mapping = it.Next();) {
527 		vm_page* page = mapping->page;
528 		page->mappings.Remove(mapping);
529 
530 		VMCache* cache = page->Cache();
531 
532 		bool pageFullyUnmapped = false;
533 		if (!page->IsMapped()) {
534 			atomic_add(&gMappedPagesCount, -1);
535 			pageFullyUnmapped = true;
536 		}
537 
538 		if (unmapPages || cache != area->cache) {
539 			addr_t address = area->Base()
540 				+ ((page->cache_offset * B_PAGE_SIZE) - area->cache_offset);
541 
542 			int index = VADDR_TO_PDENT(address);
543 			if ((pd[index] & X86_PDE_PRESENT) == 0) {
544 				panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
545 					"has no page dir entry", page, area, address);
546 				continue;
547 			}
548 
549 			ThreadCPUPinner pinner(thread_get_current_thread());
550 
551 			page_table_entry* pt
552 				= (page_table_entry*)fPageMapper->GetPageTableAt(
553 					pd[index] & X86_PDE_ADDRESS_MASK);
554 			page_table_entry oldEntry
555 				= X86PagingMethod32Bit::ClearPageTableEntry(
556 					&pt[VADDR_TO_PTENT(address)]);
557 
558 			pinner.Unlock();
559 
560 			if ((oldEntry & X86_PTE_PRESENT) == 0) {
561 				panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
562 					"has no page table entry", page, area, address);
563 				continue;
564 			}
565 
566 			// transfer the accessed/dirty flags to the page and invalidate
567 			// the mapping, if necessary
568 			if ((oldEntry & X86_PTE_ACCESSED) != 0) {
569 				page->accessed = true;
570 
571 				if (!deletingAddressSpace)
572 					InvalidatePage(address);
573 			}
574 
575 			if ((oldEntry & X86_PTE_DIRTY) != 0)
576 				page->modified = true;
577 
578 			if (pageFullyUnmapped) {
579 				DEBUG_PAGE_ACCESS_START(page);
580 
581 				if (cache->temporary)
582 					vm_page_set_state(page, PAGE_STATE_INACTIVE);
583 				else if (page->modified)
584 					vm_page_set_state(page, PAGE_STATE_MODIFIED);
585 				else
586 					vm_page_set_state(page, PAGE_STATE_CACHED);
587 
588 				DEBUG_PAGE_ACCESS_END(page);
589 			}
590 		}
591 
592 		fMapCount--;
593 	}
594 
595 	Flush();
596 		// flush explicitely, since we directly use the lock
597 
598 	locker.Unlock();
599 
600 	bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
601 	uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
602 		| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
603 	while (vm_page_mapping* mapping = mappings.RemoveHead())
604 		object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
605 }
606 
607 
608 status_t
609 X86VMTranslationMap32Bit::Query(addr_t va, phys_addr_t *_physical,
610 	uint32 *_flags)
611 {
612 	// default the flags to not present
613 	*_flags = 0;
614 	*_physical = 0;
615 
616 	int index = VADDR_TO_PDENT(va);
617 	page_directory_entry *pd = fPagingStructures->pgdir_virt;
618 	if ((pd[index] & X86_PDE_PRESENT) == 0) {
619 		// no pagetable here
620 		return B_OK;
621 	}
622 
623 	Thread* thread = thread_get_current_thread();
624 	ThreadCPUPinner pinner(thread);
625 
626 	page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
627 		pd[index] & X86_PDE_ADDRESS_MASK);
628 	page_table_entry entry = pt[VADDR_TO_PTENT(va)];
629 
630 	*_physical = entry & X86_PDE_ADDRESS_MASK;
631 
632 	// read in the page state flags
633 	if ((entry & X86_PTE_USER) != 0) {
634 		*_flags |= ((entry & X86_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0)
635 			| B_READ_AREA;
636 	}
637 
638 	*_flags |= ((entry & X86_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0)
639 		| B_KERNEL_READ_AREA
640 		| ((entry & X86_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
641 		| ((entry & X86_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
642 		| ((entry & X86_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0);
643 
644 	pinner.Unlock();
645 
646 	TRACE("query_tmap: returning pa 0x%lx for va 0x%lx\n", *_physical, va);
647 
648 	return B_OK;
649 }
650 
651 
652 status_t
653 X86VMTranslationMap32Bit::QueryInterrupt(addr_t va, phys_addr_t *_physical,
654 	uint32 *_flags)
655 {
656 	*_flags = 0;
657 	*_physical = 0;
658 
659 	int index = VADDR_TO_PDENT(va);
660 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
661 	if ((pd[index] & X86_PDE_PRESENT) == 0) {
662 		// no pagetable here
663 		return B_OK;
664 	}
665 
666 	// map page table entry
667 	page_table_entry* pt = (page_table_entry*)X86PagingMethod32Bit::Method()
668 		->PhysicalPageMapper()->InterruptGetPageTableAt(
669 			pd[index] & X86_PDE_ADDRESS_MASK);
670 	page_table_entry entry = pt[VADDR_TO_PTENT(va)];
671 
672 	*_physical = entry & X86_PDE_ADDRESS_MASK;
673 
674 	// read in the page state flags
675 	if ((entry & X86_PTE_USER) != 0) {
676 		*_flags |= ((entry & X86_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0)
677 			| B_READ_AREA;
678 	}
679 
680 	*_flags |= ((entry & X86_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0)
681 		| B_KERNEL_READ_AREA
682 		| ((entry & X86_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
683 		| ((entry & X86_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
684 		| ((entry & X86_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0);
685 
686 	return B_OK;
687 }
688 
689 
690 status_t
691 X86VMTranslationMap32Bit::Protect(addr_t start, addr_t end, uint32 attributes,
692 	uint32 memoryType)
693 {
694 	start = ROUNDDOWN(start, B_PAGE_SIZE);
695 	if (start >= end)
696 		return B_OK;
697 
698 	TRACE("protect_tmap: pages 0x%lx to 0x%lx, attributes %lx\n", start, end,
699 		attributes);
700 
701 	// compute protection flags
702 	uint32 newProtectionFlags = 0;
703 	if ((attributes & B_USER_PROTECTION) != 0) {
704 		newProtectionFlags = X86_PTE_USER;
705 		if ((attributes & B_WRITE_AREA) != 0)
706 			newProtectionFlags |= X86_PTE_WRITABLE;
707 	} else if ((attributes & B_KERNEL_WRITE_AREA) != 0)
708 		newProtectionFlags = X86_PTE_WRITABLE;
709 
710 	page_directory_entry *pd = fPagingStructures->pgdir_virt;
711 
712 	do {
713 		int index = VADDR_TO_PDENT(start);
714 		if ((pd[index] & X86_PDE_PRESENT) == 0) {
715 			// no page table here, move the start up to access the next page
716 			// table
717 			start = ROUNDUP(start + 1, kPageTableAlignment);
718 			continue;
719 		}
720 
721 		Thread* thread = thread_get_current_thread();
722 		ThreadCPUPinner pinner(thread);
723 
724 		page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
725 			pd[index] & X86_PDE_ADDRESS_MASK);
726 
727 		for (index = VADDR_TO_PTENT(start); index < 1024 && start < end;
728 				index++, start += B_PAGE_SIZE) {
729 			page_table_entry entry = pt[index];
730 			if ((entry & X86_PTE_PRESENT) == 0) {
731 				// page mapping not valid
732 				continue;
733 			}
734 
735 			TRACE("protect_tmap: protect page 0x%lx\n", start);
736 
737 			// set the new protection flags -- we want to do that atomically,
738 			// without changing the accessed or dirty flag
739 			page_table_entry oldEntry;
740 			while (true) {
741 				oldEntry = X86PagingMethod32Bit::TestAndSetPageTableEntry(
742 					&pt[index],
743 					(entry & ~(X86_PTE_PROTECTION_MASK
744 							| X86_PTE_MEMORY_TYPE_MASK))
745 						| newProtectionFlags
746 						| X86PagingMethod32Bit::MemoryTypeToPageTableEntryFlags(
747 							memoryType),
748 					entry);
749 				if (oldEntry == entry)
750 					break;
751 				entry = oldEntry;
752 			}
753 
754 			if ((oldEntry & X86_PTE_ACCESSED) != 0) {
755 				// Note, that we only need to invalidate the address, if the
756 				// accessed flag was set, since only then the entry could have
757 				// been in any TLB.
758 				InvalidatePage(start);
759 			}
760 		}
761 	} while (start != 0 && start < end);
762 
763 	return B_OK;
764 }
765 
766 
767 status_t
768 X86VMTranslationMap32Bit::ClearFlags(addr_t va, uint32 flags)
769 {
770 	int index = VADDR_TO_PDENT(va);
771 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
772 	if ((pd[index] & X86_PDE_PRESENT) == 0) {
773 		// no pagetable here
774 		return B_OK;
775 	}
776 
777 	uint32 flagsToClear = ((flags & PAGE_MODIFIED) ? X86_PTE_DIRTY : 0)
778 		| ((flags & PAGE_ACCESSED) ? X86_PTE_ACCESSED : 0);
779 
780 	Thread* thread = thread_get_current_thread();
781 	ThreadCPUPinner pinner(thread);
782 
783 	page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
784 		pd[index] & X86_PDE_ADDRESS_MASK);
785 	index = VADDR_TO_PTENT(va);
786 
787 	// clear out the flags we've been requested to clear
788 	page_table_entry oldEntry
789 		= X86PagingMethod32Bit::ClearPageTableEntryFlags(&pt[index],
790 			flagsToClear);
791 
792 	pinner.Unlock();
793 
794 	if ((oldEntry & flagsToClear) != 0)
795 		InvalidatePage(va);
796 
797 	return B_OK;
798 }
799 
800 
801 bool
802 X86VMTranslationMap32Bit::ClearAccessedAndModified(VMArea* area, addr_t address,
803 	bool unmapIfUnaccessed, bool& _modified)
804 {
805 	ASSERT(address % B_PAGE_SIZE == 0);
806 
807 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
808 
809 	TRACE("X86VMTranslationMap32Bit::ClearAccessedAndModified(%#" B_PRIxADDR
810 		")\n", address);
811 
812 	RecursiveLocker locker(fLock);
813 
814 	int index = VADDR_TO_PDENT(address);
815 	if ((pd[index] & X86_PDE_PRESENT) == 0)
816 		return false;
817 
818 	ThreadCPUPinner pinner(thread_get_current_thread());
819 
820 	page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
821 		pd[index] & X86_PDE_ADDRESS_MASK);
822 
823 	index = VADDR_TO_PTENT(address);
824 
825 	// perform the deed
826 	page_table_entry oldEntry;
827 
828 	if (unmapIfUnaccessed) {
829 		while (true) {
830 			oldEntry = pt[index];
831 			if ((oldEntry & X86_PTE_PRESENT) == 0) {
832 				// page mapping not valid
833 				return false;
834 			}
835 
836 			if (oldEntry & X86_PTE_ACCESSED) {
837 				// page was accessed -- just clear the flags
838 				oldEntry = X86PagingMethod32Bit::ClearPageTableEntryFlags(
839 					&pt[index], X86_PTE_ACCESSED | X86_PTE_DIRTY);
840 				break;
841 			}
842 
843 			// page hasn't been accessed -- unmap it
844 			if (X86PagingMethod32Bit::TestAndSetPageTableEntry(&pt[index], 0,
845 					oldEntry) == oldEntry) {
846 				break;
847 			}
848 
849 			// something changed -- check again
850 		}
851 	} else {
852 		oldEntry = X86PagingMethod32Bit::ClearPageTableEntryFlags(&pt[index],
853 			X86_PTE_ACCESSED | X86_PTE_DIRTY);
854 	}
855 
856 	pinner.Unlock();
857 
858 	_modified = (oldEntry & X86_PTE_DIRTY) != 0;
859 
860 	if ((oldEntry & X86_PTE_ACCESSED) != 0) {
861 		// Note, that we only need to invalidate the address, if the
862 		// accessed flags was set, since only then the entry could have been
863 		// in any TLB.
864 		InvalidatePage(address);
865 
866 		Flush();
867 
868 		return true;
869 	}
870 
871 	if (!unmapIfUnaccessed)
872 		return false;
873 
874 	// We have unmapped the address. Do the "high level" stuff.
875 
876 	fMapCount--;
877 
878 	locker.Detach();
879 		// UnaccessedPageUnmapped() will unlock for us
880 
881 	UnaccessedPageUnmapped(area,
882 		(oldEntry & X86_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
883 
884 	return false;
885 }
886 
887 
888 X86PagingStructures*
889 X86VMTranslationMap32Bit::PagingStructures() const
890 {
891 	return fPagingStructures;
892 }
893