xref: /haiku/src/system/kernel/arch/ppc/paging/classic/PPCVMTranslationMapClassic.cpp (revision a5061ecec55353a5f394759473f1fd6df04890da)
1 /*
2  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 /*	(bonefish) Some explanatory words on how address translation is implemented
11 	for the 32 bit PPC architecture.
12 
13 	I use the address type nomenclature as used in the PPC architecture
14 	specs, i.e.
15 	- effective address: An address as used by program instructions, i.e.
16 	  that's what elsewhere (e.g. in the VM implementation) is called
17 	  virtual address.
18 	- virtual address: An intermediate address computed from the effective
19 	  address via the segment registers.
20 	- physical address: An address referring to physical storage.
21 
22 	The hardware translates an effective address to a physical address using
23 	either of two mechanisms: 1) Block Address Translation (BAT) or
24 	2) segment + page translation. The first mechanism does this directly
25 	using two sets (for data/instructions) of special purpose registers.
26 	The latter mechanism is of more relevance here, though:
27 
28 	effective address (32 bit):	     [ 0 ESID  3 | 4  PIX 19 | 20 Byte 31 ]
29 								           |           |            |
30 							     (segment registers)   |            |
31 									       |           |            |
32 	virtual address (52 bit):   [ 0      VSID 23 | 24 PIX 39 | 40 Byte 51 ]
33 	                            [ 0             VPN       39 | 40 Byte 51 ]
34 								                 |                  |
35 										   (page table)             |
36 											     |                  |
37 	physical address (32 bit):       [ 0        PPN       19 | 20 Byte 31 ]
38 
39 
40 	ESID: Effective Segment ID
41 	VSID: Virtual Segment ID
42 	PIX:  Page Index
43 	VPN:  Virtual Page Number
44 	PPN:  Physical Page Number
45 
46 
47 	Unlike on x86 we can't just switch the context to another team by just
48 	setting a register to another page directory, since we only have one
49 	page table containing both kernel and user address mappings. Instead we
50 	map the effective address space of kernel and *all* teams
51 	non-intersectingly into the virtual address space (which fortunately is
52 	20 bits wider), and use the segment registers to select the section of
53 	the virtual address space for the current team. Half of the 16 segment
54 	registers (8 - 15) map the kernel addresses, so they remain unchanged.
55 
56 	The range of the virtual address space a team's effective address space
57 	is mapped to is defined by its PPCVMTranslationMap::fVSIDBase,
58 	which is the first of the 8 successive VSID values used for the team.
59 
60 	Which fVSIDBase values are already taken is defined by the set bits in
61 	the bitmap sVSIDBaseBitmap.
62 
63 
64 	TODO:
65 	* If we want to continue to use the OF services, we would need to add
66 	  its address mappings to the kernel space. Unfortunately some stuff
67 	  (especially RAM) is mapped in an address range without the kernel
68 	  address space. We probably need to map those into each team's address
69 	  space as kernel read/write areas.
70 	* The current locking scheme is insufficient. The page table is a resource
71 	  shared by all teams. We need to synchronize access to it. Probably via a
72 	  spinlock.
73  */
74 
75 #include "paging/classic/PPCVMTranslationMapClassic.h"
76 
77 #include <stdlib.h>
78 #include <string.h>
79 
80 #include <arch/cpu.h>
81 #include <arch_mmu.h>
82 #include <int.h>
83 #include <thread.h>
84 #include <slab/Slab.h>
85 #include <smp.h>
86 #include <util/AutoLock.h>
87 #include <util/queue.h>
88 #include <vm/vm_page.h>
89 #include <vm/vm_priv.h>
90 #include <vm/VMAddressSpace.h>
91 #include <vm/VMCache.h>
92 
93 #include "paging/classic/PPCPagingMethodClassic.h"
94 #include "paging/classic/PPCPagingStructuresClassic.h"
95 #include "generic_vm_physical_page_mapper.h"
96 #include "generic_vm_physical_page_ops.h"
97 #include "GenericVMPhysicalPageMapper.h"
98 
99 
100 //#define TRACE_PPC_VM_TRANSLATION_MAP_CLASSIC
101 #ifdef TRACE_PPC_VM_TRANSLATION_MAP_CLASSIC
102 #	define TRACE(x...) dprintf(x)
103 #else
104 #	define TRACE(x...) ;
105 #endif
106 
107 
108 // The VSID is a 24 bit number. The lower three bits are defined by the
109 // (effective) segment number, which leaves us with a 21 bit space of
110 // VSID bases (= 2 * 1024 * 1024).
111 #define MAX_VSID_BASES (B_PAGE_SIZE * 8)
112 static uint32 sVSIDBaseBitmap[MAX_VSID_BASES / (sizeof(uint32) * 8)];
113 static spinlock sVSIDBaseBitmapLock;
114 
115 #define VSID_BASE_SHIFT 3
116 #define VADDR_TO_VSID(vsidBase, vaddr) (vsidBase + ((vaddr) >> 28))
117 
118 
119 // #pragma mark -
120 
121 
122 PPCVMTranslationMapClassic::PPCVMTranslationMapClassic()
123 	:
124 	fPagingStructures(NULL)
125 {
126 }
127 
128 
129 PPCVMTranslationMapClassic::~PPCVMTranslationMapClassic()
130 {
131 	if (fPagingStructures == NULL)
132 		return;
133 
134 #if 0//X86
135 	if (fPageMapper != NULL)
136 		fPageMapper->Delete();
137 #endif
138 
139 	if (fMapCount > 0) {
140 		panic("vm_translation_map.destroy_tmap: map %p has positive map count %ld\n",
141 			this, fMapCount);
142 	}
143 
144 	// mark the vsid base not in use
145 	int baseBit = fVSIDBase >> VSID_BASE_SHIFT;
146 	atomic_and((int32 *)&sVSIDBaseBitmap[baseBit / 32],
147 			~(1 << (baseBit % 32)));
148 
149 #if 0//X86
150 	if (fPagingStructures->pgdir_virt != NULL) {
151 		// cycle through and free all of the user space pgtables
152 		for (uint32 i = VADDR_TO_PDENT(USER_BASE);
153 				i <= VADDR_TO_PDENT(USER_BASE + (USER_SIZE - 1)); i++) {
154 			if ((fPagingStructures->pgdir_virt[i] & PPC_PDE_PRESENT) != 0) {
155 				addr_t address = fPagingStructures->pgdir_virt[i]
156 					& PPC_PDE_ADDRESS_MASK;
157 				vm_page* page = vm_lookup_page(address / B_PAGE_SIZE);
158 				if (!page)
159 					panic("destroy_tmap: didn't find pgtable page\n");
160 				DEBUG_PAGE_ACCESS_START(page);
161 				vm_page_set_state(page, PAGE_STATE_FREE);
162 			}
163 		}
164 	}
165 #endif
166 
167 	fPagingStructures->RemoveReference();
168 }
169 
170 
171 status_t
172 PPCVMTranslationMapClassic::Init(bool kernel)
173 {
174 	TRACE("PPCVMTranslationMapClassic::Init()\n");
175 
176 	PPCVMTranslationMap::Init(kernel);
177 
178 	cpu_status state = disable_interrupts();
179 	acquire_spinlock(&sVSIDBaseBitmapLock);
180 
181 	// allocate a VSID base for this one
182 	if (kernel) {
183 		// The boot loader has set up the segment registers for identical
184 		// mapping. Two VSID bases are reserved for the kernel: 0 and 8. The
185 		// latter one for mapping the kernel address space (0x80000000...), the
186 		// former one for the lower addresses required by the Open Firmware
187 		// services.
188 		fVSIDBase = 0;
189 		sVSIDBaseBitmap[0] |= 0x3;
190 	} else {
191 		int i = 0;
192 
193 		while (i < MAX_VSID_BASES) {
194 			if (sVSIDBaseBitmap[i / 32] == 0xffffffff) {
195 				i += 32;
196 				continue;
197 			}
198 			if ((sVSIDBaseBitmap[i / 32] & (1 << (i % 32))) == 0) {
199 				// we found it
200 				sVSIDBaseBitmap[i / 32] |= 1 << (i % 32);
201 				break;
202 			}
203 			i++;
204 		}
205 		if (i >= MAX_VSID_BASES)
206 			panic("vm_translation_map_create: out of VSID bases\n");
207 		fVSIDBase = i << VSID_BASE_SHIFT;
208 	}
209 
210 	release_spinlock(&sVSIDBaseBitmapLock);
211 	restore_interrupts(state);
212 
213 	fPagingStructures = new(std::nothrow) PPCPagingStructuresClassic;
214 	if (fPagingStructures == NULL)
215 		return B_NO_MEMORY;
216 
217 	PPCPagingMethodClassic* method = PPCPagingMethodClassic::Method();
218 
219 	if (!kernel) {
220 		// user
221 #if 0//X86
222 		// allocate a physical page mapper
223 		status_t error = method->PhysicalPageMapper()
224 			->CreateTranslationMapPhysicalPageMapper(&fPageMapper);
225 		if (error != B_OK)
226 			return error;
227 #endif
228 #if 0//X86
229 		// allocate the page directory
230 		page_directory_entry* virtualPageDir = (page_directory_entry*)memalign(
231 			B_PAGE_SIZE, B_PAGE_SIZE);
232 		if (virtualPageDir == NULL)
233 			return B_NO_MEMORY;
234 
235 		// look up the page directory's physical address
236 		phys_addr_t physicalPageDir;
237 		vm_get_page_mapping(VMAddressSpace::KernelID(),
238 			(addr_t)virtualPageDir, &physicalPageDir);
239 #endif
240 
241 		fPagingStructures->Init(/*NULL, 0,
242 			method->KernelVirtualPageDirectory()*/method->PageTable());
243 	} else {
244 		// kernel
245 #if 0//X86
246 		// get the physical page mapper
247 		fPageMapper = method->KernelPhysicalPageMapper();
248 #endif
249 
250 		// we already know the kernel pgdir mapping
251 		fPagingStructures->Init(/*method->KernelVirtualPageDirectory(),
252 			method->KernelPhysicalPageDirectory(), NULL*/method->PageTable());
253 	}
254 
255 	return B_OK;
256 }
257 
258 
259 void
260 PPCVMTranslationMapClassic::ChangeASID()
261 {
262 // this code depends on the kernel being at 0x80000000, fix if we change that
263 #if KERNEL_BASE != 0x80000000
264 #error fix me
265 #endif
266 	int vsidBase = VSIDBase();
267 
268 	isync();	// synchronize context
269 	asm("mtsr	0,%0" : : "g"(vsidBase));
270 	asm("mtsr	1,%0" : : "g"(vsidBase + 1));
271 	asm("mtsr	2,%0" : : "g"(vsidBase + 2));
272 	asm("mtsr	3,%0" : : "g"(vsidBase + 3));
273 	asm("mtsr	4,%0" : : "g"(vsidBase + 4));
274 	asm("mtsr	5,%0" : : "g"(vsidBase + 5));
275 	asm("mtsr	6,%0" : : "g"(vsidBase + 6));
276 	asm("mtsr	7,%0" : : "g"(vsidBase + 7));
277 	isync();	// synchronize context
278 }
279 
280 
281 page_table_entry *
282 PPCVMTranslationMapClassic::LookupPageTableEntry(addr_t virtualAddress)
283 {
284 	// lookup the vsid based off the va
285 	uint32 virtualSegmentID = VADDR_TO_VSID(fVSIDBase, virtualAddress);
286 
287 //	dprintf("vm_translation_map.lookup_page_table_entry: vsid %ld, va 0x%lx\n", virtualSegmentID, virtualAddress);
288 
289 	PPCPagingMethodClassic* m = PPCPagingMethodClassic::Method();
290 
291 	// Search for the page table entry using the primary hash value
292 
293 	uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID, virtualAddress);
294 	page_table_entry_group *group = &(m->PageTable())[hash & m->PageTableHashMask()];
295 
296 	for (int i = 0; i < 8; i++) {
297 		page_table_entry *entry = &group->entry[i];
298 
299 		if (entry->virtual_segment_id == virtualSegmentID
300 			&& entry->secondary_hash == false
301 			&& entry->abbr_page_index == ((virtualAddress >> 22) & 0x3f))
302 			return entry;
303 	}
304 
305 	// didn't find it, try the secondary hash value
306 
307 	hash = page_table_entry::SecondaryHash(hash);
308 	group = &(m->PageTable())[hash & m->PageTableHashMask()];
309 
310 	for (int i = 0; i < 8; i++) {
311 		page_table_entry *entry = &group->entry[i];
312 
313 		if (entry->virtual_segment_id == virtualSegmentID
314 			&& entry->secondary_hash == true
315 			&& entry->abbr_page_index == ((virtualAddress >> 22) & 0x3f))
316 			return entry;
317 	}
318 
319 	return NULL;
320 }
321 
322 
323 bool
324 PPCVMTranslationMapClassic::RemovePageTableEntry(addr_t virtualAddress)
325 {
326 	page_table_entry *entry = LookupPageTableEntry(virtualAddress);
327 	if (entry == NULL)
328 		return false;
329 
330 	entry->valid = 0;
331 	ppc_sync();
332 	tlbie(virtualAddress);
333 	eieio();
334 	tlbsync();
335 	ppc_sync();
336 
337 	return true;
338 }
339 
340 
341 size_t
342 PPCVMTranslationMapClassic::MaxPagesNeededToMap(addr_t start, addr_t end) const
343 {
344 	return 0;
345 }
346 
347 
348 status_t
349 PPCVMTranslationMapClassic::Map(addr_t virtualAddress,
350 	phys_addr_t physicalAddress, uint32 attributes,
351 	uint32 memoryType, vm_page_reservation* reservation)
352 {
353 	TRACE("map_tmap: entry pa 0x%lx va 0x%lx\n", pa, va);
354 
355 	// lookup the vsid based off the va
356 	uint32 virtualSegmentID = VADDR_TO_VSID(fVSIDBase, virtualAddress);
357 	uint32 protection = 0;
358 
359 	// ToDo: check this
360 	// all kernel mappings are R/W to supervisor code
361 	if (attributes & (B_READ_AREA | B_WRITE_AREA))
362 		protection = (attributes & B_WRITE_AREA) ? PTE_READ_WRITE : PTE_READ_ONLY;
363 
364 	//dprintf("vm_translation_map.map_tmap: vsid %d, pa 0x%lx, va 0x%lx\n", vsid, pa, va);
365 
366 	PPCPagingMethodClassic* m = PPCPagingMethodClassic::Method();
367 
368 	// Search for a free page table slot using the primary hash value
369 	uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID, virtualAddress);
370 	page_table_entry_group *group = &(m->PageTable())[hash & m->PageTableHashMask()];
371 
372 	for (int i = 0; i < 8; i++) {
373 		page_table_entry *entry = &group->entry[i];
374 
375 		if (entry->valid)
376 			continue;
377 
378 		m->FillPageTableEntry(entry, virtualSegmentID, virtualAddress,
379 			physicalAddress, protection, memoryType, false);
380 		fMapCount++;
381 		return B_OK;
382 	}
383 
384 	// Didn't found one, try the secondary hash value
385 
386 	hash = page_table_entry::SecondaryHash(hash);
387 	group = &(m->PageTable())[hash & m->PageTableHashMask()];
388 
389 	for (int i = 0; i < 8; i++) {
390 		page_table_entry *entry = &group->entry[i];
391 
392 		if (entry->valid)
393 			continue;
394 
395 		m->FillPageTableEntry(entry, virtualSegmentID, virtualAddress,
396 			physicalAddress, protection, memoryType, false);
397 		fMapCount++;
398 		return B_OK;
399 	}
400 
401 	panic("vm_translation_map.map_tmap: hash table full\n");
402 	return B_ERROR;
403 
404 #if 0//X86
405 /*
406 	dprintf("pgdir at 0x%x\n", pgdir);
407 	dprintf("index is %d\n", va / B_PAGE_SIZE / 1024);
408 	dprintf("final at 0x%x\n", &pgdir[va / B_PAGE_SIZE / 1024]);
409 	dprintf("value is 0x%x\n", *(int *)&pgdir[va / B_PAGE_SIZE / 1024]);
410 	dprintf("present bit is %d\n", pgdir[va / B_PAGE_SIZE / 1024].present);
411 	dprintf("addr is %d\n", pgdir[va / B_PAGE_SIZE / 1024].addr);
412 */
413 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
414 
415 	// check to see if a page table exists for this range
416 	uint32 index = VADDR_TO_PDENT(va);
417 	if ((pd[index] & PPC_PDE_PRESENT) == 0) {
418 		phys_addr_t pgtable;
419 		vm_page *page;
420 
421 		// we need to allocate a pgtable
422 		page = vm_page_allocate_page(reservation,
423 			PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
424 
425 		DEBUG_PAGE_ACCESS_END(page);
426 
427 		pgtable = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;
428 
429 		TRACE("map_tmap: asked for free page for pgtable. 0x%lx\n", pgtable);
430 
431 		// put it in the pgdir
432 		PPCPagingMethodClassic::PutPageTableInPageDir(&pd[index], pgtable,
433 			attributes
434 				| ((attributes & B_USER_PROTECTION) != 0
435 						? B_WRITE_AREA : B_KERNEL_WRITE_AREA));
436 
437 		// update any other page directories, if it maps kernel space
438 		if (index >= FIRST_KERNEL_PGDIR_ENT
439 			&& index < (FIRST_KERNEL_PGDIR_ENT + NUM_KERNEL_PGDIR_ENTS)) {
440 			PPCPagingStructuresClassic::UpdateAllPageDirs(index, pd[index]);
441 		}
442 
443 		fMapCount++;
444 	}
445 
446 	// now, fill in the pentry
447 	Thread* thread = thread_get_current_thread();
448 	ThreadCPUPinner pinner(thread);
449 
450 	page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
451 		pd[index] & PPC_PDE_ADDRESS_MASK);
452 	index = VADDR_TO_PTENT(va);
453 
454 	ASSERT_PRINT((pt[index] & PPC_PTE_PRESENT) == 0,
455 		"virtual address: %#" B_PRIxADDR ", existing pte: %#" B_PRIx32, va,
456 		pt[index]);
457 
458 	PPCPagingMethodClassic::PutPageTableEntryInTable(&pt[index], pa, attributes,
459 		memoryType, fIsKernelMap);
460 
461 	pinner.Unlock();
462 
463 	// Note: We don't need to invalidate the TLB for this address, as previously
464 	// the entry was not present and the TLB doesn't cache those entries.
465 
466 	fMapCount++;
467 
468 	return 0;
469 #endif
470 }
471 
472 
473 status_t
474 PPCVMTranslationMapClassic::Unmap(addr_t start, addr_t end)
475 {
476 	page_table_entry *entry;
477 
478 	start = ROUNDDOWN(start, B_PAGE_SIZE);
479 	end = ROUNDUP(end, B_PAGE_SIZE);
480 
481 	if (start >= end)
482 		return B_OK;
483 
484 	TRACE("unmap_tmap: asked to free pages 0x%lx to 0x%lx\n", start, end);
485 
486 //	dprintf("vm_translation_map.unmap_tmap: start 0x%lx, end 0x%lx\n", start, end);
487 
488 	while (start < end) {
489 		if (RemovePageTableEntry(start))
490 			fMapCount--;
491 
492 		start += B_PAGE_SIZE;
493 	}
494 
495 	return B_OK;
496 
497 #if 0//X86
498 
499 	start = ROUNDDOWN(start, B_PAGE_SIZE);
500 	if (start >= end)
501 		return B_OK;
502 
503 	TRACE("unmap_tmap: asked to free pages 0x%lx to 0x%lx\n", start, end);
504 
505 	page_directory_entry *pd = fPagingStructures->pgdir_virt;
506 
507 	do {
508 		int index = VADDR_TO_PDENT(start);
509 		if ((pd[index] & PPC_PDE_PRESENT) == 0) {
510 			// no page table here, move the start up to access the next page
511 			// table
512 			start = ROUNDUP(start + 1, kPageTableAlignment);
513 			continue;
514 		}
515 
516 		Thread* thread = thread_get_current_thread();
517 		ThreadCPUPinner pinner(thread);
518 
519 		page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
520 			pd[index] & PPC_PDE_ADDRESS_MASK);
521 
522 		for (index = VADDR_TO_PTENT(start); (index < 1024) && (start < end);
523 				index++, start += B_PAGE_SIZE) {
524 			if ((pt[index] & PPC_PTE_PRESENT) == 0) {
525 				// page mapping not valid
526 				continue;
527 			}
528 
529 			TRACE("unmap_tmap: removing page 0x%lx\n", start);
530 
531 			page_table_entry oldEntry
532 				= PPCPagingMethodClassic::ClearPageTableEntryFlags(&pt[index],
533 					PPC_PTE_PRESENT);
534 			fMapCount--;
535 
536 			if ((oldEntry & PPC_PTE_ACCESSED) != 0) {
537 				// Note, that we only need to invalidate the address, if the
538 				// accessed flags was set, since only then the entry could have
539 				// been in any TLB.
540 				InvalidatePage(start);
541 			}
542 		}
543 	} while (start != 0 && start < end);
544 
545 	return B_OK;
546 #endif
547 }
548 
549 
550 status_t
551 PPCVMTranslationMapClassic::RemapAddressRange(addr_t *_virtualAddress,
552 	size_t size, bool unmap)
553 {
554 	addr_t virtualAddress = ROUNDDOWN(*_virtualAddress, B_PAGE_SIZE);
555 	size = ROUNDUP(*_virtualAddress + size - virtualAddress, B_PAGE_SIZE);
556 
557 	VMAddressSpace *addressSpace = VMAddressSpace::Kernel();
558 
559 	// reserve space in the address space
560 	void *newAddress = NULL;
561 	status_t error = vm_reserve_address_range(addressSpace->ID(), &newAddress,
562 		B_ANY_KERNEL_ADDRESS, size, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
563 	if (error != B_OK)
564 		return error;
565 
566 	// get the area's first physical page
567 	page_table_entry *entry = LookupPageTableEntry(virtualAddress);
568 	if (!entry)
569 		return B_ERROR;
570 	phys_addr_t physicalBase = (phys_addr_t)entry->physical_page_number << 12;
571 
572 	// map the pages
573 	error = ppc_map_address_range((addr_t)newAddress, physicalBase, size);
574 	if (error != B_OK)
575 		return error;
576 
577 	*_virtualAddress = (addr_t)newAddress;
578 
579 	// unmap the old pages
580 	if (unmap)
581 		ppc_unmap_address_range(virtualAddress, size);
582 
583 	return B_OK;
584 }
585 
586 
587 status_t
588 PPCVMTranslationMapClassic::DebugMarkRangePresent(addr_t start, addr_t end,
589 	bool markPresent)
590 {
591 	panic("%s: UNIMPLEMENTED", __FUNCTION__);
592 	return B_ERROR;
593 #if 0//X86
594 	start = ROUNDDOWN(start, B_PAGE_SIZE);
595 	if (start >= end)
596 		return B_OK;
597 
598 	page_directory_entry *pd = fPagingStructures->pgdir_virt;
599 
600 	do {
601 		int index = VADDR_TO_PDENT(start);
602 		if ((pd[index] & PPC_PDE_PRESENT) == 0) {
603 			// no page table here, move the start up to access the next page
604 			// table
605 			start = ROUNDUP(start + 1, kPageTableAlignment);
606 			continue;
607 		}
608 
609 		Thread* thread = thread_get_current_thread();
610 		ThreadCPUPinner pinner(thread);
611 
612 		page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
613 			pd[index] & PPC_PDE_ADDRESS_MASK);
614 
615 		for (index = VADDR_TO_PTENT(start); (index < 1024) && (start < end);
616 				index++, start += B_PAGE_SIZE) {
617 			if ((pt[index] & PPC_PTE_PRESENT) == 0) {
618 				if (!markPresent)
619 					continue;
620 
621 				PPCPagingMethodClassic::SetPageTableEntryFlags(&pt[index],
622 					PPC_PTE_PRESENT);
623 			} else {
624 				if (markPresent)
625 					continue;
626 
627 				page_table_entry oldEntry
628 					= PPCPagingMethodClassic::ClearPageTableEntryFlags(&pt[index],
629 						PPC_PTE_PRESENT);
630 
631 				if ((oldEntry & PPC_PTE_ACCESSED) != 0) {
632 					// Note, that we only need to invalidate the address, if the
633 					// accessed flags was set, since only then the entry could
634 					// have been in any TLB.
635 					InvalidatePage(start);
636 				}
637 			}
638 		}
639 	} while (start != 0 && start < end);
640 
641 	return B_OK;
642 #endif
643 }
644 
645 
646 /*!	Caller must have locked the cache of the page to be unmapped.
647 	This object shouldn't be locked.
648 */
649 status_t
650 PPCVMTranslationMapClassic::UnmapPage(VMArea* area, addr_t address,
651 	bool updatePageQueue)
652 {
653 	ASSERT(address % B_PAGE_SIZE == 0);
654 
655 	RecursiveLocker locker(fLock);
656 
657 	if (area->cache_type == CACHE_TYPE_DEVICE) {
658 		if (!RemovePageTableEntry(address))
659 			return B_ENTRY_NOT_FOUND;
660 
661 		fMapCount--;
662 		return B_OK;
663 	}
664 
665 	page_table_entry* entry = LookupPageTableEntry(address);
666 	if (entry == NULL)
667 		return B_ENTRY_NOT_FOUND;
668 
669 	page_num_t pageNumber = entry->physical_page_number;
670 	bool accessed = entry->referenced;
671 	bool modified = entry->changed;
672 
673 	RemovePageTableEntry(address);
674 
675 	fMapCount--;
676 
677 	locker.Detach();
678 		// PageUnmapped() will unlock for us
679 
680 	PageUnmapped(area, pageNumber, accessed, modified, updatePageQueue);
681 
682 	return B_OK;
683 
684 #if 0//X86
685 
686 	ASSERT(address % B_PAGE_SIZE == 0);
687 
688 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
689 
690 	TRACE("PPCVMTranslationMapClassic::UnmapPage(%#" B_PRIxADDR ")\n", address);
691 
692 	RecursiveLocker locker(fLock);
693 
694 	int index = VADDR_TO_PDENT(address);
695 	if ((pd[index] & PPC_PDE_PRESENT) == 0)
696 		return B_ENTRY_NOT_FOUND;
697 
698 	ThreadCPUPinner pinner(thread_get_current_thread());
699 
700 	page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
701 		pd[index] & PPC_PDE_ADDRESS_MASK);
702 
703 	index = VADDR_TO_PTENT(address);
704 	page_table_entry oldEntry = PPCPagingMethodClassic::ClearPageTableEntry(
705 		&pt[index]);
706 
707 	pinner.Unlock();
708 
709 	if ((oldEntry & PPC_PTE_PRESENT) == 0) {
710 		// page mapping not valid
711 		return B_ENTRY_NOT_FOUND;
712 	}
713 
714 	fMapCount--;
715 
716 	if ((oldEntry & PPC_PTE_ACCESSED) != 0) {
717 		// Note, that we only need to invalidate the address, if the
718 		// accessed flags was set, since only then the entry could have been
719 		// in any TLB.
720 		InvalidatePage(address);
721 		Flush();
722 
723 		// NOTE: Between clearing the page table entry and Flush() other
724 		// processors (actually even this processor with another thread of the
725 		// same team) could still access the page in question via their cached
726 		// entry. We can obviously lose a modified flag in this case, with the
727 		// effect that the page looks unmodified (and might thus be recycled),
728 		// but is actually modified.
729 		// In most cases this is harmless, but for vm_remove_all_page_mappings()
730 		// this is actually a problem.
731 		// Interestingly FreeBSD seems to ignore this problem as well
732 		// (cf. pmap_remove_all()), unless I've missed something.
733 	}
734 
735 	locker.Detach();
736 		// PageUnmapped() will unlock for us
737 
738 	PageUnmapped(area, (oldEntry & PPC_PTE_ADDRESS_MASK) / B_PAGE_SIZE,
739 		(oldEntry & PPC_PTE_ACCESSED) != 0, (oldEntry & PPC_PTE_DIRTY) != 0,
740 		updatePageQueue);
741 
742 	return B_OK;
743 #endif
744 }
745 
746 
747 void
748 PPCVMTranslationMapClassic::UnmapPages(VMArea* area, addr_t base, size_t size,
749 	bool updatePageQueue)
750 {
751 	panic("%s: UNIMPLEMENTED", __FUNCTION__);
752 #if 0//X86
753 	if (size == 0)
754 		return;
755 
756 	addr_t start = base;
757 	addr_t end = base + size - 1;
758 
759 	TRACE("PPCVMTranslationMapClassic::UnmapPages(%p, %#" B_PRIxADDR ", %#"
760 		B_PRIxADDR ")\n", area, start, end);
761 
762 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
763 
764 	VMAreaMappings queue;
765 
766 	RecursiveLocker locker(fLock);
767 
768 	do {
769 		int index = VADDR_TO_PDENT(start);
770 		if ((pd[index] & PPC_PDE_PRESENT) == 0) {
771 			// no page table here, move the start up to access the next page
772 			// table
773 			start = ROUNDUP(start + 1, kPageTableAlignment);
774 			continue;
775 		}
776 
777 		Thread* thread = thread_get_current_thread();
778 		ThreadCPUPinner pinner(thread);
779 
780 		page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
781 			pd[index] & PPC_PDE_ADDRESS_MASK);
782 
783 		for (index = VADDR_TO_PTENT(start); (index < 1024) && (start < end);
784 				index++, start += B_PAGE_SIZE) {
785 			page_table_entry oldEntry
786 				= PPCPagingMethodClassic::ClearPageTableEntry(&pt[index]);
787 			if ((oldEntry & PPC_PTE_PRESENT) == 0)
788 				continue;
789 
790 			fMapCount--;
791 
792 			if ((oldEntry & PPC_PTE_ACCESSED) != 0) {
793 				// Note, that we only need to invalidate the address, if the
794 				// accessed flags was set, since only then the entry could have
795 				// been in any TLB.
796 				InvalidatePage(start);
797 			}
798 
799 			if (area->cache_type != CACHE_TYPE_DEVICE) {
800 				// get the page
801 				vm_page* page = vm_lookup_page(
802 					(oldEntry & PPC_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
803 				ASSERT(page != NULL);
804 
805 				DEBUG_PAGE_ACCESS_START(page);
806 
807 				// transfer the accessed/dirty flags to the page
808 				if ((oldEntry & PPC_PTE_ACCESSED) != 0)
809 					page->accessed = true;
810 				if ((oldEntry & PPC_PTE_DIRTY) != 0)
811 					page->modified = true;
812 
813 				// remove the mapping object/decrement the wired_count of the
814 				// page
815 				if (area->wiring == B_NO_LOCK) {
816 					vm_page_mapping* mapping = NULL;
817 					vm_page_mappings::Iterator iterator
818 						= page->mappings.GetIterator();
819 					while ((mapping = iterator.Next()) != NULL) {
820 						if (mapping->area == area)
821 							break;
822 					}
823 
824 					ASSERT(mapping != NULL);
825 
826 					area->mappings.Remove(mapping);
827 					page->mappings.Remove(mapping);
828 					queue.Add(mapping);
829 				} else
830 					page->DecrementWiredCount();
831 
832 				if (!page->IsMapped()) {
833 					atomic_add(&gMappedPagesCount, -1);
834 
835 					if (updatePageQueue) {
836 						if (page->Cache()->temporary)
837 							vm_page_set_state(page, PAGE_STATE_INACTIVE);
838 						else if (page->modified)
839 							vm_page_set_state(page, PAGE_STATE_MODIFIED);
840 						else
841 							vm_page_set_state(page, PAGE_STATE_CACHED);
842 					}
843 				}
844 
845 				DEBUG_PAGE_ACCESS_END(page);
846 			}
847 		}
848 
849 		Flush();
850 			// flush explicitly, since we directly use the lock
851 	} while (start != 0 && start < end);
852 
853 	// TODO: As in UnmapPage() we can lose page dirty flags here. ATM it's not
854 	// really critical here, as in all cases this method is used, the unmapped
855 	// area range is unmapped for good (resized/cut) and the pages will likely
856 	// be freed.
857 
858 	locker.Unlock();
859 
860 	// free removed mappings
861 	bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
862 	uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
863 		| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
864 	while (vm_page_mapping* mapping = queue.RemoveHead())
865 		object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
866 #endif
867 }
868 
869 
870 void
871 PPCVMTranslationMapClassic::UnmapArea(VMArea* area, bool deletingAddressSpace,
872 	bool ignoreTopCachePageFlags)
873 {
874 	panic("%s: UNIMPLEMENTED", __FUNCTION__);
875 #if 0//X86
876 	if (area->cache_type == CACHE_TYPE_DEVICE || area->wiring != B_NO_LOCK) {
877 		PPCVMTranslationMapClassic::UnmapPages(area, area->Base(), area->Size(),
878 			true);
879 		return;
880 	}
881 
882 	bool unmapPages = !deletingAddressSpace || !ignoreTopCachePageFlags;
883 
884 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
885 
886 	RecursiveLocker locker(fLock);
887 
888 	VMAreaMappings mappings;
889 	mappings.MoveFrom(&area->mappings);
890 
891 	for (VMAreaMappings::Iterator it = mappings.GetIterator();
892 			vm_page_mapping* mapping = it.Next();) {
893 		vm_page* page = mapping->page;
894 		page->mappings.Remove(mapping);
895 
896 		VMCache* cache = page->Cache();
897 
898 		bool pageFullyUnmapped = false;
899 		if (!page->IsMapped()) {
900 			atomic_add(&gMappedPagesCount, -1);
901 			pageFullyUnmapped = true;
902 		}
903 
904 		if (unmapPages || cache != area->cache) {
905 			addr_t address = area->Base()
906 				+ ((page->cache_offset * B_PAGE_SIZE) - area->cache_offset);
907 
908 			int index = VADDR_TO_PDENT(address);
909 			if ((pd[index] & PPC_PDE_PRESENT) == 0) {
910 				panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
911 					"has no page dir entry", page, area, address);
912 				continue;
913 			}
914 
915 			ThreadCPUPinner pinner(thread_get_current_thread());
916 
917 			page_table_entry* pt
918 				= (page_table_entry*)fPageMapper->GetPageTableAt(
919 					pd[index] & PPC_PDE_ADDRESS_MASK);
920 			page_table_entry oldEntry
921 				= PPCPagingMethodClassic::ClearPageTableEntry(
922 					&pt[VADDR_TO_PTENT(address)]);
923 
924 			pinner.Unlock();
925 
926 			if ((oldEntry & PPC_PTE_PRESENT) == 0) {
927 				panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
928 					"has no page table entry", page, area, address);
929 				continue;
930 			}
931 
932 			// transfer the accessed/dirty flags to the page and invalidate
933 			// the mapping, if necessary
934 			if ((oldEntry & PPC_PTE_ACCESSED) != 0) {
935 				page->accessed = true;
936 
937 				if (!deletingAddressSpace)
938 					InvalidatePage(address);
939 			}
940 
941 			if ((oldEntry & PPC_PTE_DIRTY) != 0)
942 				page->modified = true;
943 
944 			if (pageFullyUnmapped) {
945 				DEBUG_PAGE_ACCESS_START(page);
946 
947 				if (cache->temporary)
948 					vm_page_set_state(page, PAGE_STATE_INACTIVE);
949 				else if (page->modified)
950 					vm_page_set_state(page, PAGE_STATE_MODIFIED);
951 				else
952 					vm_page_set_state(page, PAGE_STATE_CACHED);
953 
954 				DEBUG_PAGE_ACCESS_END(page);
955 			}
956 		}
957 
958 		fMapCount--;
959 	}
960 
961 	Flush();
962 		// flush explicitely, since we directly use the lock
963 
964 	locker.Unlock();
965 
966 	bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
967 	uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
968 		| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
969 	while (vm_page_mapping* mapping = mappings.RemoveHead())
970 		object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
971 #endif
972 }
973 
974 
975 status_t
976 PPCVMTranslationMapClassic::Query(addr_t va, phys_addr_t *_outPhysical,
977 	uint32 *_outFlags)
978 {
979 	page_table_entry *entry;
980 
981 	// default the flags to not present
982 	*_outFlags = 0;
983 	*_outPhysical = 0;
984 
985 	entry = LookupPageTableEntry(va);
986 	if (entry == NULL)
987 		return B_NO_ERROR;
988 
989 	// ToDo: check this!
990 	if (IS_KERNEL_ADDRESS(va))
991 		*_outFlags |= B_KERNEL_READ_AREA | (entry->page_protection == PTE_READ_ONLY ? 0 : B_KERNEL_WRITE_AREA);
992 	else
993 		*_outFlags |= B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_READ_AREA | (entry->page_protection == PTE_READ_ONLY ? 0 : B_WRITE_AREA);
994 
995 	*_outFlags |= entry->changed ? PAGE_MODIFIED : 0;
996 	*_outFlags |= entry->referenced ? PAGE_ACCESSED : 0;
997 	*_outFlags |= entry->valid ? PAGE_PRESENT : 0;
998 
999 	*_outPhysical = entry->physical_page_number * B_PAGE_SIZE;
1000 
1001 	return B_OK;
1002 
1003 #if 0//X86
1004 	// default the flags to not present
1005 	*_flags = 0;
1006 	*_physical = 0;
1007 
1008 	int index = VADDR_TO_PDENT(va);
1009 	page_directory_entry *pd = fPagingStructures->pgdir_virt;
1010 	if ((pd[index] & PPC_PDE_PRESENT) == 0) {
1011 		// no pagetable here
1012 		return B_OK;
1013 	}
1014 
1015 	Thread* thread = thread_get_current_thread();
1016 	ThreadCPUPinner pinner(thread);
1017 
1018 	page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
1019 		pd[index] & PPC_PDE_ADDRESS_MASK);
1020 	page_table_entry entry = pt[VADDR_TO_PTENT(va)];
1021 
1022 	*_physical = entry & PPC_PDE_ADDRESS_MASK;
1023 
1024 	// read in the page state flags
1025 	if ((entry & PPC_PTE_USER) != 0) {
1026 		*_flags |= ((entry & PPC_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0)
1027 			| B_READ_AREA;
1028 	}
1029 
1030 	*_flags |= ((entry & PPC_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0)
1031 		| B_KERNEL_READ_AREA
1032 		| ((entry & PPC_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
1033 		| ((entry & PPC_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
1034 		| ((entry & PPC_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0);
1035 
1036 	pinner.Unlock();
1037 
1038 	TRACE("query_tmap: returning pa 0x%lx for va 0x%lx\n", *_physical, va);
1039 
1040 	return B_OK;
1041 #endif
1042 }
1043 
1044 
1045 status_t
1046 PPCVMTranslationMapClassic::QueryInterrupt(addr_t virtualAddress,
1047 	phys_addr_t *_physicalAddress, uint32 *_flags)
1048 {
1049 	return PPCVMTranslationMapClassic::Query(virtualAddress, _physicalAddress, _flags);
1050 
1051 #if 0//X86
1052 	*_flags = 0;
1053 	*_physical = 0;
1054 
1055 	int index = VADDR_TO_PDENT(va);
1056 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
1057 	if ((pd[index] & PPC_PDE_PRESENT) == 0) {
1058 		// no pagetable here
1059 		return B_OK;
1060 	}
1061 
1062 	// map page table entry
1063 	page_table_entry* pt = (page_table_entry*)PPCPagingMethodClassic::Method()
1064 		->PhysicalPageMapper()->InterruptGetPageTableAt(
1065 			pd[index] & PPC_PDE_ADDRESS_MASK);
1066 	page_table_entry entry = pt[VADDR_TO_PTENT(va)];
1067 
1068 	*_physical = entry & PPC_PDE_ADDRESS_MASK;
1069 
1070 	// read in the page state flags
1071 	if ((entry & PPC_PTE_USER) != 0) {
1072 		*_flags |= ((entry & PPC_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0)
1073 			| B_READ_AREA;
1074 	}
1075 
1076 	*_flags |= ((entry & PPC_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0)
1077 		| B_KERNEL_READ_AREA
1078 		| ((entry & PPC_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
1079 		| ((entry & PPC_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
1080 		| ((entry & PPC_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0);
1081 
1082 	return B_OK;
1083 #endif
1084 }
1085 
1086 
1087 status_t
1088 PPCVMTranslationMapClassic::Protect(addr_t start, addr_t end, uint32 attributes,
1089 	uint32 memoryType)
1090 {
1091 	// XXX finish
1092 	return B_ERROR;
1093 #if 0//X86
1094 	start = ROUNDDOWN(start, B_PAGE_SIZE);
1095 	if (start >= end)
1096 		return B_OK;
1097 
1098 	TRACE("protect_tmap: pages 0x%lx to 0x%lx, attributes %lx\n", start, end,
1099 		attributes);
1100 
1101 	// compute protection flags
1102 	uint32 newProtectionFlags = 0;
1103 	if ((attributes & B_USER_PROTECTION) != 0) {
1104 		newProtectionFlags = PPC_PTE_USER;
1105 		if ((attributes & B_WRITE_AREA) != 0)
1106 			newProtectionFlags |= PPC_PTE_WRITABLE;
1107 	} else if ((attributes & B_KERNEL_WRITE_AREA) != 0)
1108 		newProtectionFlags = PPC_PTE_WRITABLE;
1109 
1110 	page_directory_entry *pd = fPagingStructures->pgdir_virt;
1111 
1112 	do {
1113 		int index = VADDR_TO_PDENT(start);
1114 		if ((pd[index] & PPC_PDE_PRESENT) == 0) {
1115 			// no page table here, move the start up to access the next page
1116 			// table
1117 			start = ROUNDUP(start + 1, kPageTableAlignment);
1118 			continue;
1119 		}
1120 
1121 		Thread* thread = thread_get_current_thread();
1122 		ThreadCPUPinner pinner(thread);
1123 
1124 		page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
1125 			pd[index] & PPC_PDE_ADDRESS_MASK);
1126 
1127 		for (index = VADDR_TO_PTENT(start); index < 1024 && start < end;
1128 				index++, start += B_PAGE_SIZE) {
1129 			page_table_entry entry = pt[index];
1130 			if ((entry & PPC_PTE_PRESENT) == 0) {
1131 				// page mapping not valid
1132 				continue;
1133 			}
1134 
1135 			TRACE("protect_tmap: protect page 0x%lx\n", start);
1136 
1137 			// set the new protection flags -- we want to do that atomically,
1138 			// without changing the accessed or dirty flag
1139 			page_table_entry oldEntry;
1140 			while (true) {
1141 				oldEntry = PPCPagingMethodClassic::TestAndSetPageTableEntry(
1142 					&pt[index],
1143 					(entry & ~(PPC_PTE_PROTECTION_MASK
1144 							| PPC_PTE_MEMORY_TYPE_MASK))
1145 						| newProtectionFlags
1146 						| PPCPagingMethodClassic::MemoryTypeToPageTableEntryFlags(
1147 							memoryType),
1148 					entry);
1149 				if (oldEntry == entry)
1150 					break;
1151 				entry = oldEntry;
1152 			}
1153 
1154 			if ((oldEntry & PPC_PTE_ACCESSED) != 0) {
1155 				// Note, that we only need to invalidate the address, if the
1156 				// accessed flag was set, since only then the entry could have
1157 				// been in any TLB.
1158 				InvalidatePage(start);
1159 			}
1160 		}
1161 	} while (start != 0 && start < end);
1162 
1163 	return B_OK;
1164 #endif
1165 }
1166 
1167 
1168 status_t
1169 PPCVMTranslationMapClassic::ClearFlags(addr_t virtualAddress, uint32 flags)
1170 {
1171 	page_table_entry *entry = LookupPageTableEntry(virtualAddress);
1172 	if (entry == NULL)
1173 		return B_NO_ERROR;
1174 
1175 	bool modified = false;
1176 
1177 	// clear the bits
1178 	if (flags & PAGE_MODIFIED && entry->changed) {
1179 		entry->changed = false;
1180 		modified = true;
1181 	}
1182 	if (flags & PAGE_ACCESSED && entry->referenced) {
1183 		entry->referenced = false;
1184 		modified = true;
1185 	}
1186 
1187 	// synchronize
1188 	if (modified) {
1189 		tlbie(virtualAddress);
1190 		eieio();
1191 		tlbsync();
1192 		ppc_sync();
1193 	}
1194 
1195 	return B_OK;
1196 
1197 #if 0//X86
1198 	int index = VADDR_TO_PDENT(va);
1199 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
1200 	if ((pd[index] & PPC_PDE_PRESENT) == 0) {
1201 		// no pagetable here
1202 		return B_OK;
1203 	}
1204 
1205 	uint32 flagsToClear = ((flags & PAGE_MODIFIED) ? PPC_PTE_DIRTY : 0)
1206 		| ((flags & PAGE_ACCESSED) ? PPC_PTE_ACCESSED : 0);
1207 
1208 	Thread* thread = thread_get_current_thread();
1209 	ThreadCPUPinner pinner(thread);
1210 
1211 	page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
1212 		pd[index] & PPC_PDE_ADDRESS_MASK);
1213 	index = VADDR_TO_PTENT(va);
1214 
1215 	// clear out the flags we've been requested to clear
1216 	page_table_entry oldEntry
1217 		= PPCPagingMethodClassic::ClearPageTableEntryFlags(&pt[index],
1218 			flagsToClear);
1219 
1220 	pinner.Unlock();
1221 
1222 	if ((oldEntry & flagsToClear) != 0)
1223 		InvalidatePage(va);
1224 
1225 	return B_OK;
1226 #endif
1227 }
1228 
1229 
1230 bool
1231 PPCVMTranslationMapClassic::ClearAccessedAndModified(VMArea* area,
1232 	addr_t address, bool unmapIfUnaccessed, bool& _modified)
1233 {
1234 	ASSERT(address % B_PAGE_SIZE == 0);
1235 
1236 	// TODO: Implement for real! ATM this is just an approximation using
1237 	// Query(), ClearFlags(), and UnmapPage(). See below!
1238 
1239 	RecursiveLocker locker(fLock);
1240 
1241 	uint32 flags;
1242 	phys_addr_t physicalAddress;
1243 	if (Query(address, &physicalAddress, &flags) != B_OK
1244 		|| (flags & PAGE_PRESENT) == 0) {
1245 		return false;
1246 	}
1247 
1248 	_modified = (flags & PAGE_MODIFIED) != 0;
1249 
1250 	if ((flags & (PAGE_ACCESSED | PAGE_MODIFIED)) != 0)
1251 		ClearFlags(address, flags & (PAGE_ACCESSED | PAGE_MODIFIED));
1252 
1253 	if ((flags & PAGE_ACCESSED) != 0)
1254 		return true;
1255 
1256 	if (!unmapIfUnaccessed)
1257 		return false;
1258 
1259 	locker.Unlock();
1260 
1261 	UnmapPage(area, address, false);
1262 		// TODO: Obvious race condition: Between querying and unmapping the
1263 		// page could have been accessed. We try to compensate by considering
1264 		// vm_page::{accessed,modified} (which would have been updated by
1265 		// UnmapPage()) below, but that doesn't quite match the required
1266 		// semantics of the method.
1267 
1268 	vm_page* page = vm_lookup_page(physicalAddress / B_PAGE_SIZE);
1269 	if (page == NULL)
1270 		return false;
1271 
1272 	_modified |= page->modified;
1273 
1274 	return page->accessed;
1275 
1276 #if 0//X86
1277 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
1278 
1279 	TRACE("PPCVMTranslationMapClassic::ClearAccessedAndModified(%#" B_PRIxADDR
1280 		")\n", address);
1281 
1282 	RecursiveLocker locker(fLock);
1283 
1284 	int index = VADDR_TO_PDENT(address);
1285 	if ((pd[index] & PPC_PDE_PRESENT) == 0)
1286 		return false;
1287 
1288 	ThreadCPUPinner pinner(thread_get_current_thread());
1289 
1290 	page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
1291 		pd[index] & PPC_PDE_ADDRESS_MASK);
1292 
1293 	index = VADDR_TO_PTENT(address);
1294 
1295 	// perform the deed
1296 	page_table_entry oldEntry;
1297 
1298 	if (unmapIfUnaccessed) {
1299 		while (true) {
1300 			oldEntry = pt[index];
1301 			if ((oldEntry & PPC_PTE_PRESENT) == 0) {
1302 				// page mapping not valid
1303 				return false;
1304 			}
1305 
1306 			if (oldEntry & PPC_PTE_ACCESSED) {
1307 				// page was accessed -- just clear the flags
1308 				oldEntry = PPCPagingMethodClassic::ClearPageTableEntryFlags(
1309 					&pt[index], PPC_PTE_ACCESSED | PPC_PTE_DIRTY);
1310 				break;
1311 			}
1312 
1313 			// page hasn't been accessed -- unmap it
1314 			if (PPCPagingMethodClassic::TestAndSetPageTableEntry(&pt[index], 0,
1315 					oldEntry) == oldEntry) {
1316 				break;
1317 			}
1318 
1319 			// something changed -- check again
1320 		}
1321 	} else {
1322 		oldEntry = PPCPagingMethodClassic::ClearPageTableEntryFlags(&pt[index],
1323 			PPC_PTE_ACCESSED | PPC_PTE_DIRTY);
1324 	}
1325 
1326 	pinner.Unlock();
1327 
1328 	_modified = (oldEntry & PPC_PTE_DIRTY) != 0;
1329 
1330 	if ((oldEntry & PPC_PTE_ACCESSED) != 0) {
1331 		// Note, that we only need to invalidate the address, if the
1332 		// accessed flags was set, since only then the entry could have been
1333 		// in any TLB.
1334 		InvalidatePage(address);
1335 
1336 		Flush();
1337 
1338 		return true;
1339 	}
1340 
1341 	if (!unmapIfUnaccessed)
1342 		return false;
1343 
1344 	// We have unmapped the address. Do the "high level" stuff.
1345 
1346 	fMapCount--;
1347 
1348 	locker.Detach();
1349 		// UnaccessedPageUnmapped() will unlock for us
1350 
1351 	UnaccessedPageUnmapped(area,
1352 		(oldEntry & PPC_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
1353 
1354 	return false;
1355 #endif
1356 }
1357 
1358 
1359 PPCPagingStructures*
1360 PPCVMTranslationMapClassic::PagingStructures() const
1361 {
1362 	return fPagingStructures;
1363 }
1364