xref: /haiku/src/system/kernel/arch/m68k/paging/040/M68KVMTranslationMap040.cpp (revision 579f1dbca962a2a03df54f69fdc6e9423f91f20e)
1 /*
2  * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 #include "paging/040/M68KVMTranslationMap040.h"
12 
13 #include <stdlib.h>
14 #include <string.h>
15 
16 #include <int.h>
17 #include <thread.h>
18 #include <slab/Slab.h>
19 #include <smp.h>
20 #include <util/AutoLock.h>
21 #include <util/queue.h>
22 #include <vm/vm_page.h>
23 #include <vm/vm_priv.h>
24 #include <vm/VMAddressSpace.h>
25 #include <vm/VMCache.h>
26 
27 #include "paging/040/M68KPagingMethod040.h"
28 #include "paging/040/M68KPagingStructures040.h"
29 #include "paging/m68k_physical_page_mapper.h"
30 
31 
32 #define TRACE_M68K_VM_TRANSLATION_MAP_040
33 #ifdef TRACE_M68K_VM_TRANSLATION_MAP_040
34 #	define TRACE(x...) dprintf(x)
35 #else
36 #	define TRACE(x...) ;
37 #endif
38 
39 
40 M68KVMTranslationMap040::M68KVMTranslationMap040()
41 	:
42 	fPagingStructures(NULL)
43 {
44 }
45 
46 
47 M68KVMTranslationMap040::~M68KVMTranslationMap040()
48 {
49 	if (fPagingStructures == NULL)
50 		return;
51 
52 	if (fPageMapper != NULL)
53 		fPageMapper->Delete();
54 
55 	if (fPagingStructures->pgroot_virt != NULL) {
56 		page_root_entry *pgroot_virt = fPagingStructures->pgroot_virt;
57 
58 		// cycle through and free all of the user space pgdirs & pgtables
59 		// since the size of tables don't match B_PAGE_SIZE,
60 		// we alloc several at once, based on modulos,
61 		// we make sure they are either all in the tree or none.
62 		for (uint32 i = VADDR_TO_PRENT(USER_BASE);
63 				i <= VADDR_TO_PRENT(USER_BASE + (USER_SIZE - 1)); i++) {
64 			addr_t pgdir_pn;
65 			page_directory_entry *pgdir;
66 			vm_page *dirpage;
67 
68 			if (PRE_TYPE(pgroot_virt[i]) == DT_INVALID)
69 				continue;
70 			if (PRE_TYPE(pgroot_virt[i]) != DT_ROOT) {
71 				panic("rtdir[%ld]: buggy descriptor type", i);
72 				return;
73 			}
74 			// XXX:suboptimal (done 8 times)
75 			pgdir_pn = PRE_TO_PN(pgroot_virt[i]);
76 			dirpage = vm_lookup_page(pgdir_pn);
77 			pgdir = &(((page_directory_entry *)dirpage)[i%NUM_DIRTBL_PER_PAGE]);
78 
79 			for (uint32 j = 0; j <= NUM_DIRENT_PER_TBL;
80 					j+=NUM_PAGETBL_PER_PAGE) {
81 				addr_t pgtbl_pn;
82 				page_table_entry *pgtbl;
83 				vm_page *page;
84 				if (PDE_TYPE(pgdir[j]) == DT_INVALID)
85 					continue;
86 				if (PDE_TYPE(pgdir[j]) != DT_DIR) {
87 					panic("pgroot[%ld][%ld]: buggy descriptor type", i, j);
88 					return;
89 				}
90 				pgtbl_pn = PDE_TO_PN(pgdir[j]);
91 				page = vm_lookup_page(pgtbl_pn);
92 				pgtbl = (page_table_entry *)page;
93 
94 				if (!page) {
95 					panic("destroy_tmap: didn't find pgtable page\n");
96 					return;
97 				}
98 				DEBUG_PAGE_ACCESS_START(page);
99 				vm_page_set_state(page, PAGE_STATE_FREE);
100 			}
101 			if (((i + 1) % NUM_DIRTBL_PER_PAGE) == 0) {
102 				DEBUG_PAGE_ACCESS_END(dirpage);
103 				vm_page_set_state(dirpage, PAGE_STATE_FREE);
104 			}
105 		}
106 
107 
108 
109 #if 0
110 //X86
111 		for (uint32 i = VADDR_TO_PDENT(USER_BASE);
112 				i <= VADDR_TO_PDENT(USER_BASE + (USER_SIZE - 1)); i++) {
113 			if ((fPagingStructures->pgdir_virt[i] & M68K_PDE_PRESENT) != 0) {
114 				addr_t address = fPagingStructures->pgdir_virt[i]
115 					& M68K_PDE_ADDRESS_MASK;
116 				vm_page* page = vm_lookup_page(address / B_PAGE_SIZE);
117 				if (!page)
118 					panic("destroy_tmap: didn't find pgtable page\n");
119 				DEBUG_PAGE_ACCESS_START(page);
120 				vm_page_set_state(page, PAGE_STATE_FREE);
121 			}
122 		}
123 #endif
124 	}
125 
126 	fPagingStructures->RemoveReference();
127 }
128 
129 
130 status_t
131 M68KVMTranslationMap040::Init(bool kernel)
132 {
133 	TRACE("M68KVMTranslationMap040::Init()\n");
134 
135 	M68KVMTranslationMap::Init(kernel);
136 
137 	fPagingStructures = new(std::nothrow) M68KPagingStructures040;
138 	if (fPagingStructures == NULL)
139 		return B_NO_MEMORY;
140 
141 	M68KPagingMethod040* method = M68KPagingMethod040::Method();
142 
143 	if (!kernel) {
144 		// user
145 		// allocate a physical page mapper
146 		status_t error = method->PhysicalPageMapper()
147 			->CreateTranslationMapPhysicalPageMapper(&fPageMapper);
148 		if (error != B_OK)
149 			return error;
150 
151 		// allocate the page root
152 		page_root_entry* virtualPageRoot = (page_root_entry*)memalign(
153 			SIZ_ROOTTBL, SIZ_ROOTTBL);
154 		if (virtualPageRoot == NULL)
155 			return B_NO_MEMORY;
156 
157 		// look up the page directory's physical address
158 		phys_addr_t physicalPageRoot;
159 		vm_get_page_mapping(VMAddressSpace::KernelID(),
160 			(addr_t)virtualPageRoot, &physicalPageRoot);
161 
162 		fPagingStructures->Init(virtualPageRoot, physicalPageRoot,
163 			method->KernelVirtualPageRoot());
164 	} else {
165 		// kernel
166 		// get the physical page mapper
167 		fPageMapper = method->KernelPhysicalPageMapper();
168 
169 		// we already know the kernel pgdir mapping
170 		fPagingStructures->Init(method->KernelVirtualPageRoot(),
171 			method->KernelPhysicalPageRoot(), NULL);
172 	}
173 
174 	return B_OK;
175 }
176 
177 
178 size_t
179 M68KVMTranslationMap040::MaxPagesNeededToMap(addr_t start, addr_t end) const
180 {
181 	size_t need;
182 	size_t pgdirs;
183 
184 	// If start == 0, the actual base address is not yet known to the caller and
185 	// we shall assume the worst case.
186 	if (start == 0) {
187 		// offset the range so it has the worst possible alignment
188 #warning M68K: FIXME?
189 		start = 1023 * B_PAGE_SIZE;
190 		end += 1023 * B_PAGE_SIZE;
191 	}
192 
193 	pgdirs = VADDR_TO_PRENT(end) + 1 - VADDR_TO_PRENT(start);
194 	// how much for page directories
195 	need = (pgdirs + NUM_DIRTBL_PER_PAGE - 1) / NUM_DIRTBL_PER_PAGE;
196 	// and page tables themselves
197 	need = ((pgdirs * NUM_DIRENT_PER_TBL) + NUM_PAGETBL_PER_PAGE - 1) / NUM_PAGETBL_PER_PAGE;
198 
199 	// better rounding when only 1 pgdir
200 	// XXX: do better for other cases
201 	if (pgdirs == 1) {
202 		need = 1;
203 		need += (VADDR_TO_PDENT(end) + 1 - VADDR_TO_PDENT(start) + NUM_PAGETBL_PER_PAGE - 1) / NUM_PAGETBL_PER_PAGE;
204 	}
205 
206 	return need;
207 }
208 
209 
210 status_t
211 M68KVMTranslationMap040::Map(addr_t va, phys_addr_t pa, uint32 attributes,
212 	uint32 memoryType, vm_page_reservation* reservation)
213 {
214 	TRACE("M68KVMTranslationMap040::Map: entry pa 0x%lx va 0x%lx\n", pa, va);
215 
216 /*
217 	dprintf("pgdir at 0x%x\n", pgdir);
218 	dprintf("index is %d\n", va / B_PAGE_SIZE / 1024);
219 	dprintf("final at 0x%x\n", &pgdir[va / B_PAGE_SIZE / 1024]);
220 	dprintf("value is 0x%x\n", *(int *)&pgdir[va / B_PAGE_SIZE / 1024]);
221 	dprintf("present bit is %d\n", pgdir[va / B_PAGE_SIZE / 1024].present);
222 	dprintf("addr is %d\n", pgdir[va / B_PAGE_SIZE / 1024].addr);
223 */
224 	page_root_entry *pr = fPagingStructures->pgroot_virt;
225 	page_directory_entry *pd;
226 	page_table_entry *pt;
227 	addr_t pd_pg, pt_pg;
228 	uint32 rindex, dindex, pindex;
229 
230 
231 	// check to see if a page directory exists for this range
232 	rindex = VADDR_TO_PRENT(va);
233 	if (PRE_TYPE(pr[rindex]) != DT_ROOT) {
234 		phys_addr_t pgdir;
235 		vm_page *page;
236 		uint32 i;
237 
238 		// we need to allocate a pgdir group
239 		page = vm_page_allocate_page(reservation,
240 			PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
241 
242 		DEBUG_PAGE_ACCESS_END(page);
243 
244 		pgdir = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;
245 
246 		TRACE("::Map: asked for free page for pgdir. 0x%lx\n", pgdir);
247 
248 		// for each pgdir on the allocated page:
249 		for (i = 0; i < NUM_DIRTBL_PER_PAGE; i++) {
250 			uint32 aindex = rindex & ~(NUM_DIRTBL_PER_PAGE-1); /* aligned */
251 			page_root_entry *apr = &pr[aindex + i];
252 
253 			// put in the pgroot
254 			M68KPagingMethod040::PutPageDirInPageRoot(apr, pgdir, attributes
255 				| ((attributes & B_USER_PROTECTION) != 0
256 						? B_WRITE_AREA : B_KERNEL_WRITE_AREA));
257 
258 			// update any other page roots, if it maps kernel space
259 			//XXX: suboptimal, should batch them
260 			if ((aindex+i) >= FIRST_KERNEL_PGDIR_ENT && (aindex+i)
261 					< (FIRST_KERNEL_PGDIR_ENT + NUM_KERNEL_PGDIR_ENTS))
262 				M68KPagingStructures040::UpdateAllPageDirs((aindex+i),
263 					pr[aindex+i]);
264 
265 			pgdir += SIZ_DIRTBL;
266 		}
267 		fMapCount++;
268 	}
269 	// now, fill in the pentry
270 	//XXX: is this required?
271 	Thread* thread = thread_get_current_thread();
272 	ThreadCPUPinner pinner(thread);
273 
274 	pd = (page_directory_entry*)MapperGetPageTableAt(
275 		PRE_TO_PA(pr[rindex]));
276 
277 	//pinner.Unlock();
278 
279 	// we want the table at rindex, not at rindex%(tbl/page)
280 	//pd += (rindex % NUM_DIRTBL_PER_PAGE) * NUM_DIRENT_PER_TBL;
281 
282 	// check to see if a page table exists for this range
283 	dindex = VADDR_TO_PDENT(va);
284 	if (PDE_TYPE(pd[dindex]) != DT_DIR) {
285 		phys_addr_t pgtable;
286 		vm_page *page;
287 		uint32 i;
288 
289 		// we need to allocate a pgtable group
290 		page = vm_page_allocate_page(reservation,
291 			PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
292 
293 		DEBUG_PAGE_ACCESS_END(page);
294 
295 		pgtable = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;
296 
297 		TRACE("::Map: asked for free page for pgtable. 0x%lx\n", pgtable);
298 
299 		// for each pgtable on the allocated page:
300 		for (i = 0; i < NUM_PAGETBL_PER_PAGE; i++) {
301 			uint32 aindex = dindex & ~(NUM_PAGETBL_PER_PAGE-1); /* aligned */
302 			page_directory_entry *apd = &pd[aindex + i];
303 
304 			// put in the pgdir
305 			M68KPagingMethod040::PutPageTableInPageDir(apd, pgtable, attributes
306 				| ((attributes & B_USER_PROTECTION) != 0
307 						? B_WRITE_AREA : B_KERNEL_WRITE_AREA));
308 
309 			// no need to update other page directories for kernel space;
310 			// the root-level already point to us.
311 
312 			pgtable += SIZ_PAGETBL;
313 		}
314 
315 #warning M68K: really mean map_count++ ??
316 		fMapCount++;
317 	}
318 
319 	// now, fill in the pentry
320 	//ThreadCPUPinner pinner(thread);
321 
322 	pt = (page_table_entry*)MapperGetPageTableAt(PDE_TO_PA(pd[dindex]));
323 	// we want the table at rindex, not at rindex%(tbl/page)
324 	//pt += (dindex % NUM_PAGETBL_PER_PAGE) * NUM_PAGEENT_PER_TBL;
325 
326 	pindex = VADDR_TO_PTENT(va);
327 
328 	ASSERT_PRINT((PTE_TYPE(pt[pindex]) != DT_INVALID) == 0,
329 		"virtual address: %#" B_PRIxADDR ", existing pte: %#" B_PRIx32, va,
330 		pt[pindex]);
331 
332 	M68KPagingMethod040::PutPageTableEntryInTable(&pt[pindex], pa, attributes,
333 		memoryType, fIsKernelMap);
334 
335 	pinner.Unlock();
336 
337 	// Note: We don't need to invalidate the TLB for this address, as previously
338 	// the entry was not present and the TLB doesn't cache those entries.
339 
340 	fMapCount++;
341 
342 	return B_OK;
343 }
344 
345 
346 status_t
347 M68KVMTranslationMap040::Unmap(addr_t start, addr_t end)
348 {
349 	start = ROUNDDOWN(start, B_PAGE_SIZE);
350 	if (start >= end)
351 		return B_OK;
352 
353 	TRACE("M68KVMTranslationMap040::Unmap: asked to free pages 0x%lx to 0x%lx\n", start, end);
354 
355 	page_root_entry *pr = fPagingStructures->pgroot_virt;
356 	page_directory_entry *pd;
357 	page_table_entry *pt;
358 	int index;
359 
360 	do {
361 		index = VADDR_TO_PRENT(start);
362 		if (PRE_TYPE(pr[index]) != DT_ROOT) {
363 			// no pagedir here, move the start up to access the next page
364 			// dir group
365 			start = ROUNDUP(start + 1, kPageDirAlignment);
366 			continue;
367 		}
368 
369 		Thread* thread = thread_get_current_thread();
370 		ThreadCPUPinner pinner(thread);
371 
372 		pd = (page_directory_entry*)MapperGetPageTableAt(
373 			PRE_TO_PA(pr[index]));
374 		// we want the table at rindex, not at rindex%(tbl/page)
375 		//pd += (index % NUM_DIRTBL_PER_PAGE) * NUM_DIRENT_PER_TBL;
376 
377 
378 		index = VADDR_TO_PDENT(start);
379 		if (PDE_TYPE(pd[index]) != DT_DIR) {
380 			// no pagedir here, move the start up to access the next page
381 			// table group
382 			start = ROUNDUP(start + 1, kPageTableAlignment);
383 			continue;
384 		}
385 
386 		pt = (page_table_entry*)MapperGetPageTableAt(
387 			PDE_TO_PA(pd[index]));
388 		// we want the table at rindex, not at rindex%(tbl/page)
389 		//pt += (index % NUM_PAGETBL_PER_PAGE) * NUM_PAGEENT_PER_TBL;
390 
391 		for (index = VADDR_TO_PTENT(start);
392 				(index < NUM_PAGEENT_PER_TBL) && (start < end);
393 				index++, start += B_PAGE_SIZE) {
394 			if (PTE_TYPE(pt[index]) != DT_PAGE
395 				&& PTE_TYPE(pt[index]) != DT_INDIRECT) {
396 				// page mapping not valid
397 				continue;
398 			}
399 
400 			TRACE("::Unmap: removing page 0x%lx\n", start);
401 
402 			page_table_entry oldEntry
403 				= M68KPagingMethod040::ClearPageTableEntry(&pt[index]);
404 			fMapCount--;
405 
406 			if ((oldEntry & M68K_PTE_ACCESSED) != 0) {
407 				// Note, that we only need to invalidate the address, if the
408 				// accessed flags was set, since only then the entry could have
409 				// been in any TLB.
410 				InvalidatePage(start);
411 			}
412 		}
413 	} while (start != 0 && start < end);
414 
415 	return B_OK;
416 }
417 
418 
419 /*!	Caller must have locked the cache of the page to be unmapped.
420 	This object shouldn't be locked.
421 */
422 status_t
423 M68KVMTranslationMap040::UnmapPage(VMArea* area, addr_t address,
424 	bool updatePageQueue)
425 {
426 	ASSERT(address % B_PAGE_SIZE == 0);
427 
428 	page_root_entry* pr = fPagingStructures->pgroot_virt;
429 
430 	TRACE("M68KVMTranslationMap040::UnmapPage(%#" B_PRIxADDR ")\n", address);
431 
432 	RecursiveLocker locker(fLock);
433 
434 	int index;
435 
436 	index = VADDR_TO_PRENT(address);
437 	if (PRE_TYPE(pr[index]) == DT_ROOT)
438 		return B_ENTRY_NOT_FOUND;
439 
440 	ThreadCPUPinner pinner(thread_get_current_thread());
441 
442 	page_table_entry* pd = (page_table_entry*)MapperGetPageTableAt(
443 		pr[index] & M68K_PRE_ADDRESS_MASK);
444 
445 	index = VADDR_TO_PDENT(address);
446 	if (PDE_TYPE(pd[index]) == DT_DIR)
447 		return B_ENTRY_NOT_FOUND;
448 
449 	page_table_entry* pt = (page_table_entry*)MapperGetPageTableAt(
450 		pd[index] & M68K_PDE_ADDRESS_MASK);
451 
452 	index = VADDR_TO_PTENT(address);
453 	if (PTE_TYPE(pt[index]) == DT_INDIRECT) {
454 		phys_addr_t indirectAddress = PIE_TO_TA(pt[index]);
455 		pt = (page_table_entry*)MapperGetPageTableAt(
456 			PIE_TO_TA(pt[index]), true);
457 		index = 0; // single descriptor
458 	}
459 
460 	page_table_entry oldEntry = M68KPagingMethod040::ClearPageTableEntry(
461 		&pt[index]);
462 
463 	pinner.Unlock();
464 
465 	if (PTE_TYPE(oldEntry) != DT_PAGE) {
466 		// page mapping not valid
467 		return B_ENTRY_NOT_FOUND;
468 	}
469 
470 	fMapCount--;
471 
472 	if ((oldEntry & M68K_PTE_ACCESSED) != 0) {
473 		// Note, that we only need to invalidate the address, if the
474 		// accessed flags was set, since only then the entry could have been
475 		// in any TLB.
476 		InvalidatePage(address);
477 		Flush();
478 
479 		// NOTE: Between clearing the page table entry and Flush() other
480 		// processors (actually even this processor with another thread of the
481 		// same team) could still access the page in question via their cached
482 		// entry. We can obviously lose a modified flag in this case, with the
483 		// effect that the page looks unmodified (and might thus be recycled),
484 		// but is actually modified.
485 		// In most cases this is harmless, but for vm_remove_all_page_mappings()
486 		// this is actually a problem.
487 		// Interestingly FreeBSD seems to ignore this problem as well
488 		// (cf. pmap_remove_all()), unless I've missed something.
489 	}
490 
491 	locker.Detach();
492 		// PageUnmapped() will unlock for us
493 
494 	PageUnmapped(area, (oldEntry & M68K_PTE_ADDRESS_MASK) / B_PAGE_SIZE,
495 		(oldEntry & M68K_PTE_ACCESSED) != 0, (oldEntry & M68K_PTE_DIRTY) != 0,
496 		updatePageQueue);
497 
498 	return B_OK;
499 }
500 
501 
502 void
503 M68KVMTranslationMap040::UnmapPages(VMArea* area, addr_t base, size_t size,
504 	bool updatePageQueue)
505 {
506 	int index;
507 
508 	if (size == 0)
509 		return;
510 
511 	addr_t start = base;
512 	addr_t end = base + size - 1;
513 
514 	TRACE("M68KVMTranslationMap040::UnmapPages(%p, %#" B_PRIxADDR ", %#"
515 		B_PRIxADDR ")\n", area, start, end);
516 
517 	page_root_entry* pr = fPagingStructures->pgroot_virt;
518 
519 	VMAreaMappings queue;
520 
521 	RecursiveLocker locker(fLock);
522 
523 	do {
524 		index = VADDR_TO_PRENT(start);
525 		if (PRE_TYPE(pr[index]) != DT_ROOT) {
526 			// no page table here, move the start up to access the next page
527 			// table
528 			start = ROUNDUP(start + 1, kPageDirAlignment);
529 			continue;
530 		}
531 
532 		Thread* thread = thread_get_current_thread();
533 		ThreadCPUPinner pinner(thread);
534 
535 		page_table_entry* pd = (page_directory_entry*)MapperGetPageTableAt(
536 			pr[index] & M68K_PRE_ADDRESS_MASK);
537 
538 		index = VADDR_TO_PDENT(start);
539 		if (PDE_TYPE(pd[index]) != DT_DIR) {
540 			// no page table here, move the start up to access the next page
541 			// table
542 			start = ROUNDUP(start + 1, kPageTableAlignment);
543 			continue;
544 		}
545 
546 		page_table_entry* pt = (page_table_entry*)MapperGetPageTableAt(
547 			pd[index] & M68K_PDE_ADDRESS_MASK);
548 
549 		for (index = VADDR_TO_PTENT(start); (index < 1024) && (start < end);
550 				index++, start += B_PAGE_SIZE) {
551 			page_table_entry *e = &pt[index];
552 			// fetch indirect descriptor
553 			//XXX:clear the indirect descriptor too??
554 			if (PTE_TYPE(pt[index]) == DT_INDIRECT) {
555 				phys_addr_t indirectAddress = PIE_TO_TA(pt[index]);
556 				e = (page_table_entry*)MapperGetPageTableAt(
557 					PIE_TO_TA(pt[index]));
558 			}
559 
560 			page_table_entry oldEntry
561 				= M68KPagingMethod040::ClearPageTableEntry(e);
562 			if (PTE_TYPE(oldEntry) != DT_PAGE)
563 				continue;
564 
565 			fMapCount--;
566 
567 			if ((oldEntry & M68K_PTE_ACCESSED) != 0) {
568 				// Note, that we only need to invalidate the address, if the
569 				// accessed flags was set, since only then the entry could have
570 				// been in any TLB.
571 				InvalidatePage(start);
572 			}
573 
574 			if (area->cache_type != CACHE_TYPE_DEVICE) {
575 				// get the page
576 				vm_page* page = vm_lookup_page(
577 					(oldEntry & M68K_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
578 				ASSERT(page != NULL);
579 
580 				DEBUG_PAGE_ACCESS_START(page);
581 
582 				// transfer the accessed/dirty flags to the page
583 				if ((oldEntry & M68K_PTE_ACCESSED) != 0)
584 					page->accessed = true;
585 				if ((oldEntry & M68K_PTE_DIRTY) != 0)
586 					page->modified = true;
587 
588 				// remove the mapping object/decrement the wired_count of the
589 				// page
590 				if (area->wiring == B_NO_LOCK) {
591 					vm_page_mapping* mapping = NULL;
592 					vm_page_mappings::Iterator iterator
593 						= page->mappings.GetIterator();
594 					while ((mapping = iterator.Next()) != NULL) {
595 						if (mapping->area == area)
596 							break;
597 					}
598 
599 					ASSERT(mapping != NULL);
600 
601 					area->mappings.Remove(mapping);
602 					page->mappings.Remove(mapping);
603 					queue.Add(mapping);
604 				} else
605 					page->DecrementWiredCount();
606 
607 				if (!page->IsMapped()) {
608 					atomic_add(&gMappedPagesCount, -1);
609 
610 					if (updatePageQueue) {
611 						if (page->Cache()->temporary)
612 							vm_page_set_state(page, PAGE_STATE_INACTIVE);
613 						else if (page->modified)
614 							vm_page_set_state(page, PAGE_STATE_MODIFIED);
615 						else
616 							vm_page_set_state(page, PAGE_STATE_CACHED);
617 					}
618 				}
619 
620 				DEBUG_PAGE_ACCESS_END(page);
621 			}
622 		}
623 
624 		Flush();
625 			// flush explicitly, since we directly use the lock
626 	} while (start != 0 && start < end);
627 
628 	// TODO: As in UnmapPage() we can lose page dirty flags here. ATM it's not
629 	// really critical here, as in all cases this method is used, the unmapped
630 	// area range is unmapped for good (resized/cut) and the pages will likely
631 	// be freed.
632 
633 	locker.Unlock();
634 
635 	// free removed mappings
636 	bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
637 	uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
638 		| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
639 	while (vm_page_mapping* mapping = queue.RemoveHead())
640 		object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
641 }
642 
643 
644 void
645 M68KVMTranslationMap040::UnmapArea(VMArea* area, bool deletingAddressSpace,
646 	bool ignoreTopCachePageFlags)
647 {
648 	if (area->cache_type == CACHE_TYPE_DEVICE || area->wiring != B_NO_LOCK) {
649 		M68KVMTranslationMap040::UnmapPages(area, area->Base(), area->Size(),
650 			true);
651 		return;
652 	}
653 
654 	bool unmapPages = !deletingAddressSpace || !ignoreTopCachePageFlags;
655 
656 	page_root_entry* pr = fPagingStructures->pgroot_virt;
657 
658 	RecursiveLocker locker(fLock);
659 
660 	VMAreaMappings mappings;
661 	mappings.MoveFrom(&area->mappings);
662 
663 	for (VMAreaMappings::Iterator it = mappings.GetIterator();
664 			vm_page_mapping* mapping = it.Next();) {
665 		vm_page* page = mapping->page;
666 		page->mappings.Remove(mapping);
667 
668 		VMCache* cache = page->Cache();
669 
670 		bool pageFullyUnmapped = false;
671 		if (!page->IsMapped()) {
672 			atomic_add(&gMappedPagesCount, -1);
673 			pageFullyUnmapped = true;
674 		}
675 
676 		if (unmapPages || cache != area->cache) {
677 			addr_t address = area->Base()
678 				+ ((page->cache_offset * B_PAGE_SIZE) - area->cache_offset);
679 
680 			int index;
681 			index = VADDR_TO_PRENT(address);
682 			if (PRE_TYPE(pr[index]) != DT_ROOT) {
683 				panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
684 					"has no page root entry", page, area, address);
685 				continue;
686 			}
687 
688 			ThreadCPUPinner pinner(thread_get_current_thread());
689 
690 			page_directory_entry* pd
691 				= (page_directory_entry*)MapperGetPageTableAt(
692 					pr[index] & M68K_PRE_ADDRESS_MASK);
693 
694 			index = VADDR_TO_PDENT(address);
695 			if (PDE_TYPE(pr[index]) != DT_DIR) {
696 				panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
697 					"has no page dir entry", page, area, address);
698 				continue;
699 			}
700 
701 			page_table_entry* pt
702 				= (page_table_entry*)MapperGetPageTableAt(
703 					pd[index] & M68K_PDE_ADDRESS_MASK);
704 
705 			//XXX:M68K: DT_INDIRECT here?
706 
707 			page_table_entry oldEntry
708 				= M68KPagingMethod040::ClearPageTableEntry(
709 					&pt[VADDR_TO_PTENT(address)]);
710 
711 			pinner.Unlock();
712 
713 			if (PTE_TYPE(oldEntry) != DT_PAGE) {
714 				panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
715 					"has no page table entry", page, area, address);
716 				continue;
717 			}
718 
719 			// transfer the accessed/dirty flags to the page and invalidate
720 			// the mapping, if necessary
721 			if ((oldEntry & M68K_PTE_ACCESSED) != 0) {
722 				page->accessed = true;
723 
724 				if (!deletingAddressSpace)
725 					InvalidatePage(address);
726 			}
727 
728 			if ((oldEntry & M68K_PTE_DIRTY) != 0)
729 				page->modified = true;
730 
731 			if (pageFullyUnmapped) {
732 				DEBUG_PAGE_ACCESS_START(page);
733 
734 				if (cache->temporary)
735 					vm_page_set_state(page, PAGE_STATE_INACTIVE);
736 				else if (page->modified)
737 					vm_page_set_state(page, PAGE_STATE_MODIFIED);
738 				else
739 					vm_page_set_state(page, PAGE_STATE_CACHED);
740 
741 				DEBUG_PAGE_ACCESS_END(page);
742 			}
743 		}
744 
745 		fMapCount--;
746 	}
747 
748 	Flush();
749 		// flush explicitely, since we directly use the lock
750 
751 	locker.Unlock();
752 
753 	bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
754 	uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
755 		| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
756 	while (vm_page_mapping* mapping = mappings.RemoveHead())
757 		object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
758 }
759 
760 
761 status_t
762 M68KVMTranslationMap040::Query(addr_t va, phys_addr_t *_physical,
763 	uint32 *_flags)
764 {
765 	// default the flags to not present
766 	*_flags = 0;
767 	*_physical = 0;
768 	TRACE("040::Query(0x%lx,)\n", va);
769 
770 	int index = VADDR_TO_PRENT(va);
771 	page_root_entry *pr = fPagingStructures->pgroot_virt;
772 	if (PRE_TYPE(pr[index]) != DT_ROOT) {
773 		// no pagetable here
774 		return B_OK;
775 	}
776 
777 	Thread* thread = thread_get_current_thread();
778 	ThreadCPUPinner pinner(thread);
779 
780 	page_directory_entry* pd = (page_directory_entry*)MapperGetPageTableAt(
781 		pr[index] & M68K_PDE_ADDRESS_MASK);
782 
783 	index = VADDR_TO_PDENT(va);
784 	if (PDE_TYPE(pd[index]) != DT_DIR) {
785 		// no pagetable here
786 		return B_OK;
787 	}
788 
789 	page_table_entry* pt = (page_table_entry*)MapperGetPageTableAt(
790 		pd[index] & M68K_PDE_ADDRESS_MASK);
791 
792 	index = VADDR_TO_PTENT(va);
793 	if (PTE_TYPE(pt[index]) == DT_INDIRECT) {
794 		pt = (page_table_entry*)MapperGetPageTableAt(
795 			pt[index] & M68K_PIE_ADDRESS_MASK);
796 		index = 0;
797 	}
798 
799 	page_table_entry entry = pt[index];
800 
801 	*_physical = entry & M68K_PTE_ADDRESS_MASK;
802 
803 	// read in the page state flags
804 	if ((entry & M68K_PTE_SUPERVISOR) == 0) {
805 		*_flags |= ((entry & M68K_PTE_READONLY) == 0 ? B_WRITE_AREA : 0)
806 			| B_READ_AREA;
807 	}
808 
809 	*_flags |= ((entry & M68K_PTE_READONLY) == 0 ? B_KERNEL_WRITE_AREA : 0)
810 		| B_KERNEL_READ_AREA
811 		| ((entry & M68K_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
812 		| ((entry & M68K_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
813 		| ((PTE_TYPE(entry) == DT_PAGE) ? PAGE_PRESENT : 0);
814 
815 	pinner.Unlock();
816 
817 	TRACE("query_tmap: returning pa 0x%lx for va 0x%lx\n", *_physical, va);
818 
819 	return B_OK;
820 }
821 
822 
823 status_t
824 M68KVMTranslationMap040::QueryInterrupt(addr_t va, phys_addr_t *_physical,
825 	uint32 *_flags)
826 {
827 	*_flags = 0;
828 	*_physical = 0;
829 	TRACE("040::QueryInterrupt(0x%lx,)\n", va);
830 
831 	int index = VADDR_TO_PRENT(va);
832 	page_root_entry* pr = fPagingStructures->pgroot_virt;
833 	if (PRE_TYPE(pr[index]) != DT_ROOT) {
834 		// no pagetable here
835 		return B_OK;
836 	}
837 
838 	// map page table entry
839 	phys_addr_t ppr = pr[index] & M68K_PRE_ADDRESS_MASK;
840 	page_directory_entry* pd = (page_directory_entry*)((char *)
841 		M68KPagingMethod040::Method()->PhysicalPageMapper()
842 		->InterruptGetPageTableAt(ppr & ~(B_PAGE_SIZE-1))
843 		+ (ppr % B_PAGE_SIZE));
844 
845 	index = VADDR_TO_PDENT(va);
846 	if (PDE_TYPE(pd[index]) != DT_DIR) {
847 		// no pagetable here
848 		return B_OK;
849 	}
850 
851 	phys_addr_t ppd = pd[index] & M68K_PDE_ADDRESS_MASK;
852 	page_table_entry* pt = (page_table_entry*)((char *)
853 		M68KPagingMethod040::Method()->PhysicalPageMapper()
854 		->InterruptGetPageTableAt(ppd & ~(B_PAGE_SIZE-1))
855 		+ (ppd % B_PAGE_SIZE));
856 
857 	index = VADDR_TO_PTENT(va);
858 	if (PTE_TYPE(pt[index]) == DT_INDIRECT) {
859 		phys_addr_t ppt = pt[index] & M68K_PIE_ADDRESS_MASK;
860 		pt = (page_table_entry*)((char *)
861 			M68KPagingMethod040::Method()->PhysicalPageMapper()
862 			->InterruptGetPageTableAt(ppt & ~(B_PAGE_SIZE-1))
863 			+ (ppt % B_PAGE_SIZE));
864 		index = 0;
865 	}
866 
867 	page_table_entry entry = pt[index];
868 
869 	*_physical = entry & M68K_PTE_ADDRESS_MASK;
870 
871 	// read in the page state flags
872 	if ((entry & M68K_PTE_SUPERVISOR) == 0) {
873 		*_flags |= ((entry & M68K_PTE_READONLY) == 0 ? B_WRITE_AREA : 0)
874 			| B_READ_AREA;
875 	}
876 
877 	*_flags |= ((entry & M68K_PTE_READONLY) == 0 ? B_KERNEL_WRITE_AREA : 0)
878 		| B_KERNEL_READ_AREA
879 		| ((entry & M68K_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
880 		| ((entry & M68K_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
881 		| ((PTE_TYPE(entry) == DT_PAGE) ? PAGE_PRESENT : 0);
882 
883 	return B_OK;
884 }
885 
886 
887 status_t
888 M68KVMTranslationMap040::Protect(addr_t start, addr_t end, uint32 attributes,
889 	uint32 memoryType)
890 {
891 	start = ROUNDDOWN(start, B_PAGE_SIZE);
892 	if (start >= end)
893 		return B_OK;
894 
895 	TRACE("protect_tmap: pages 0x%lx to 0x%lx, attributes %lx\n", start, end,
896 		attributes);
897 
898 	return ENOSYS;
899 #if 0
900 	// compute protection flags
901 	uint32 newProtectionFlags = 0;
902 	if ((attributes & B_USER_PROTECTION) != 0) {
903 		newProtectionFlags = M68K_PTE_USER;
904 		if ((attributes & B_WRITE_AREA) != 0)
905 			newProtectionFlags |= M68K_PTE_WRITABLE;
906 	} else if ((attributes & B_KERNEL_WRITE_AREA) != 0)
907 		newProtectionFlags = M68K_PTE_WRITABLE;
908 
909 	page_directory_entry *pd = fPagingStructures->pgdir_virt;
910 
911 	do {
912 		int index = VADDR_TO_PDENT(start);
913 		if ((pd[index] & M68K_PDE_PRESENT) == 0) {
914 			// no page table here, move the start up to access the next page
915 			// table
916 			start = ROUNDUP(start + 1, kPageTableAlignment);
917 			continue;
918 		}
919 
920 		struct thread* thread = thread_get_current_thread();
921 		ThreadCPUPinner pinner(thread);
922 
923 		page_table_entry* pt = (page_table_entry*)MapperGetPageTableAt(
924 			pd[index] & M68K_PDE_ADDRESS_MASK);
925 
926 		for (index = VADDR_TO_PTENT(start); index < 1024 && start < end;
927 				index++, start += B_PAGE_SIZE) {
928 			page_table_entry entry = pt[index];
929 			if ((entry & M68K_PTE_PRESENT) == 0) {
930 				// page mapping not valid
931 				continue;
932 			}
933 
934 			TRACE("protect_tmap: protect page 0x%lx\n", start);
935 
936 			// set the new protection flags -- we want to do that atomically,
937 			// without changing the accessed or dirty flag
938 			page_table_entry oldEntry;
939 			while (true) {
940 				oldEntry = M68KPagingMethod040::TestAndSetPageTableEntry(
941 					&pt[index],
942 					(entry & ~(M68K_PTE_PROTECTION_MASK
943 							| M68K_PTE_MEMORY_TYPE_MASK))
944 						| newProtectionFlags
945 						| M68KPagingMethod040::MemoryTypeToPageTableEntryFlags(
946 							memoryType),
947 					entry);
948 				if (oldEntry == entry)
949 					break;
950 				entry = oldEntry;
951 			}
952 
953 			if ((oldEntry & M68K_PTE_ACCESSED) != 0) {
954 				// Note, that we only need to invalidate the address, if the
955 				// accessed flag was set, since only then the entry could have
956 				// been in any TLB.
957 				InvalidatePage(start);
958 			}
959 		}
960 	} while (start != 0 && start < end);
961 	return B_OK;
962 #endif
963 }
964 
965 
966 status_t
967 M68KVMTranslationMap040::ClearFlags(addr_t va, uint32 flags)
968 {
969 	return ENOSYS;
970 #if 0
971 	int index = VADDR_TO_PDENT(va);
972 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
973 	if ((pd[index] & M68K_PDE_PRESENT) == 0) {
974 		// no pagetable here
975 		return B_OK;
976 	}
977 
978 	uint32 flagsToClear = ((flags & PAGE_MODIFIED) ? M68K_PTE_DIRTY : 0)
979 		| ((flags & PAGE_ACCESSED) ? M68K_PTE_ACCESSED : 0);
980 
981 	struct thread* thread = thread_get_current_thread();
982 	ThreadCPUPinner pinner(thread);
983 
984 	page_table_entry* pt = (page_table_entry*)MapperGetPageTableAt(
985 		pd[index] & M68K_PDE_ADDRESS_MASK);
986 	index = VADDR_TO_PTENT(va);
987 
988 	// clear out the flags we've been requested to clear
989 	page_table_entry oldEntry
990 		= M68KPagingMethod040::ClearPageTableEntryFlags(&pt[index],
991 			flagsToClear);
992 
993 	pinner.Unlock();
994 
995 	if ((oldEntry & flagsToClear) != 0)
996 		InvalidatePage(va);
997 
998 	return B_OK;
999 #endif
1000 }
1001 
1002 
1003 bool
1004 M68KVMTranslationMap040::ClearAccessedAndModified(VMArea* area, addr_t address,
1005 	bool unmapIfUnaccessed, bool& _modified)
1006 {
1007 	ASSERT(address % B_PAGE_SIZE == 0);
1008 
1009 	page_root_entry* pr = fPagingStructures->pgroot_virt;
1010 
1011 	TRACE("M68KVMTranslationMap040::ClearAccessedAndModified(%#" B_PRIxADDR
1012 		")\n", address);
1013 
1014 #if 0
1015 	RecursiveLocker locker(fLock);
1016 
1017 	int index = VADDR_TO_PDENT(address);
1018 	if ((pd[index] & M68K_PDE_PRESENT) == 0)
1019 		return false;
1020 
1021 	ThreadCPUPinner pinner(thread_get_current_thread());
1022 
1023 	page_table_entry* pt = (page_table_entry*)MapperGetPageTableAt(
1024 		pd[index] & M68K_PDE_ADDRESS_MASK);
1025 
1026 	index = VADDR_TO_PTENT(address);
1027 
1028 	// perform the deed
1029 	page_table_entry oldEntry;
1030 
1031 	if (unmapIfUnaccessed) {
1032 		while (true) {
1033 			oldEntry = pt[index];
1034 			if ((oldEntry & M68K_PTE_PRESENT) == 0) {
1035 				// page mapping not valid
1036 				return false;
1037 			}
1038 
1039 			if (oldEntry & M68K_PTE_ACCESSED) {
1040 				// page was accessed -- just clear the flags
1041 				oldEntry = M68KPagingMethod040::ClearPageTableEntryFlags(
1042 					&pt[index], M68K_PTE_ACCESSED | M68K_PTE_DIRTY);
1043 				break;
1044 			}
1045 
1046 			// page hasn't been accessed -- unmap it
1047 			if (M68KPagingMethod040::TestAndSetPageTableEntry(&pt[index], 0,
1048 					oldEntry) == oldEntry) {
1049 				break;
1050 			}
1051 
1052 			// something changed -- check again
1053 		}
1054 	} else {
1055 		oldEntry = M68KPagingMethod040::ClearPageTableEntryFlags(&pt[index],
1056 			M68K_PTE_ACCESSED | M68K_PTE_DIRTY);
1057 	}
1058 
1059 	pinner.Unlock();
1060 
1061 	_modified = (oldEntry & M68K_PTE_DIRTY) != 0;
1062 
1063 	if ((oldEntry & M68K_PTE_ACCESSED) != 0) {
1064 		// Note, that we only need to invalidate the address, if the
1065 		// accessed flags was set, since only then the entry could have been
1066 		// in any TLB.
1067 		InvalidatePage(address);
1068 
1069 		Flush();
1070 
1071 		return true;
1072 	}
1073 
1074 	if (!unmapIfUnaccessed)
1075 		return false;
1076 
1077 	// We have unmapped the address. Do the "high level" stuff.
1078 
1079 	fMapCount--;
1080 
1081 	locker.Detach();
1082 		// UnaccessedPageUnmapped() will unlock for us
1083 
1084 	UnaccessedPageUnmapped(area,
1085 		(oldEntry & M68K_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
1086 
1087 #endif
1088 	return false;
1089 }
1090 
1091 
1092 M68KPagingStructures*
1093 M68KVMTranslationMap040::PagingStructures() const
1094 {
1095 	return fPagingStructures;
1096 }
1097 
1098 
1099 inline void *
1100 M68KVMTranslationMap040::MapperGetPageTableAt(phys_addr_t physicalAddress,
1101 	bool indirect)
1102 {
1103 	// M68K fits several page tables in a single page...
1104 	uint32 offset = physicalAddress % B_PAGE_SIZE;
1105 	ASSERT((indirect && (offset % 4) == 0) || (offset % SIZ_ROOTTBL) == 0);
1106 	physicalAddress &= ~(B_PAGE_SIZE-1);
1107 	void *va = fPageMapper->GetPageTableAt(physicalAddress);
1108 	return (void *)((addr_t)va + offset);
1109 }
1110 
1111 
1112