xref: /haiku/src/system/kernel/arch/m68k/paging/040/M68KVMTranslationMap040.cpp (revision 02354704729d38c3b078c696adc1bbbd33cbcf72)
1 /*
2  * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 #include "paging/040/M68KVMTranslationMap040.h"
12 
13 #include <stdlib.h>
14 #include <string.h>
15 
16 #include <int.h>
17 #include <thread.h>
18 #include <slab/Slab.h>
19 #include <smp.h>
20 #include <util/AutoLock.h>
21 #include <util/ThreadAutoLock.h>
22 #include <util/queue.h>
23 #include <vm/vm_page.h>
24 #include <vm/vm_priv.h>
25 #include <vm/VMAddressSpace.h>
26 #include <vm/VMCache.h>
27 
28 #include "paging/040/M68KPagingMethod040.h"
29 #include "paging/040/M68KPagingStructures040.h"
30 #include "paging/m68k_physical_page_mapper.h"
31 
32 
33 #define TRACE_M68K_VM_TRANSLATION_MAP_040
34 #ifdef TRACE_M68K_VM_TRANSLATION_MAP_040
35 #	define TRACE(x...) dprintf(x)
36 #else
37 #	define TRACE(x...) ;
38 #endif
39 
40 
41 M68KVMTranslationMap040::M68KVMTranslationMap040()
42 	:
43 	fPagingStructures(NULL)
44 {
45 }
46 
47 
48 M68KVMTranslationMap040::~M68KVMTranslationMap040()
49 {
50 	if (fPagingStructures == NULL)
51 		return;
52 
53 	if (fPageMapper != NULL)
54 		fPageMapper->Delete();
55 
56 	if (fPagingStructures->pgroot_virt != NULL) {
57 		page_root_entry *pgroot_virt = fPagingStructures->pgroot_virt;
58 
59 		// cycle through and free all of the user space pgdirs & pgtables
60 		// since the size of tables don't match B_PAGE_SIZE,
61 		// we alloc several at once, based on modulos,
62 		// we make sure they are either all in the tree or none.
63 		for (uint32 i = VADDR_TO_PRENT(USER_BASE);
64 				i <= VADDR_TO_PRENT(USER_BASE + (USER_SIZE - 1)); i++) {
65 			addr_t pgdir_pn;
66 			page_directory_entry *pgdir;
67 			vm_page *dirpage;
68 
69 			if (PRE_TYPE(pgroot_virt[i]) == DT_INVALID)
70 				continue;
71 			if (PRE_TYPE(pgroot_virt[i]) != DT_ROOT) {
72 				panic("rtdir[%ld]: buggy descriptor type", i);
73 				return;
74 			}
75 			// XXX:suboptimal (done 8 times)
76 			pgdir_pn = PRE_TO_PN(pgroot_virt[i]);
77 			dirpage = vm_lookup_page(pgdir_pn);
78 			pgdir = &(((page_directory_entry *)dirpage)[i%NUM_DIRTBL_PER_PAGE]);
79 
80 			for (uint32 j = 0; j <= NUM_DIRENT_PER_TBL;
81 					j+=NUM_PAGETBL_PER_PAGE) {
82 				addr_t pgtbl_pn;
83 				page_table_entry *pgtbl;
84 				vm_page *page;
85 				if (PDE_TYPE(pgdir[j]) == DT_INVALID)
86 					continue;
87 				if (PDE_TYPE(pgdir[j]) != DT_DIR) {
88 					panic("pgroot[%ld][%ld]: buggy descriptor type", i, j);
89 					return;
90 				}
91 				pgtbl_pn = PDE_TO_PN(pgdir[j]);
92 				page = vm_lookup_page(pgtbl_pn);
93 				pgtbl = (page_table_entry *)page;
94 
95 				if (!page) {
96 					panic("destroy_tmap: didn't find pgtable page\n");
97 					return;
98 				}
99 				DEBUG_PAGE_ACCESS_START(page);
100 				vm_page_set_state(page, PAGE_STATE_FREE);
101 			}
102 			if (((i + 1) % NUM_DIRTBL_PER_PAGE) == 0) {
103 				DEBUG_PAGE_ACCESS_END(dirpage);
104 				vm_page_set_state(dirpage, PAGE_STATE_FREE);
105 			}
106 		}
107 
108 
109 
110 #if 0
111 //X86
112 		for (uint32 i = VADDR_TO_PDENT(USER_BASE);
113 				i <= VADDR_TO_PDENT(USER_BASE + (USER_SIZE - 1)); i++) {
114 			if ((fPagingStructures->pgdir_virt[i] & M68K_PDE_PRESENT) != 0) {
115 				addr_t address = fPagingStructures->pgdir_virt[i]
116 					& M68K_PDE_ADDRESS_MASK;
117 				vm_page* page = vm_lookup_page(address / B_PAGE_SIZE);
118 				if (!page)
119 					panic("destroy_tmap: didn't find pgtable page\n");
120 				DEBUG_PAGE_ACCESS_START(page);
121 				vm_page_set_state(page, PAGE_STATE_FREE);
122 			}
123 		}
124 #endif
125 	}
126 
127 	fPagingStructures->RemoveReference();
128 }
129 
130 
131 status_t
132 M68KVMTranslationMap040::Init(bool kernel)
133 {
134 	TRACE("M68KVMTranslationMap040::Init()\n");
135 
136 	M68KVMTranslationMap::Init(kernel);
137 
138 	fPagingStructures = new(std::nothrow) M68KPagingStructures040;
139 	if (fPagingStructures == NULL)
140 		return B_NO_MEMORY;
141 
142 	M68KPagingMethod040* method = M68KPagingMethod040::Method();
143 
144 	if (!kernel) {
145 		// user
146 		// allocate a physical page mapper
147 		status_t error = method->PhysicalPageMapper()
148 			->CreateTranslationMapPhysicalPageMapper(&fPageMapper);
149 		if (error != B_OK)
150 			return error;
151 
152 		// allocate the page root
153 		page_root_entry* virtualPageRoot = (page_root_entry*)memalign(
154 			SIZ_ROOTTBL, SIZ_ROOTTBL);
155 		if (virtualPageRoot == NULL)
156 			return B_NO_MEMORY;
157 
158 		// look up the page directory's physical address
159 		phys_addr_t physicalPageRoot;
160 		vm_get_page_mapping(VMAddressSpace::KernelID(),
161 			(addr_t)virtualPageRoot, &physicalPageRoot);
162 
163 		fPagingStructures->Init(virtualPageRoot, physicalPageRoot,
164 			method->KernelVirtualPageRoot());
165 	} else {
166 		// kernel
167 		// get the physical page mapper
168 		fPageMapper = method->KernelPhysicalPageMapper();
169 
170 		// we already know the kernel pgdir mapping
171 		fPagingStructures->Init(method->KernelVirtualPageRoot(),
172 			method->KernelPhysicalPageRoot(), NULL);
173 	}
174 
175 	return B_OK;
176 }
177 
178 
179 size_t
180 M68KVMTranslationMap040::MaxPagesNeededToMap(addr_t start, addr_t end) const
181 {
182 	size_t need;
183 	size_t pgdirs;
184 
185 	// If start == 0, the actual base address is not yet known to the caller and
186 	// we shall assume the worst case.
187 	if (start == 0) {
188 		// offset the range so it has the worst possible alignment
189 #warning M68K: FIXME?
190 		start = 1023 * B_PAGE_SIZE;
191 		end += 1023 * B_PAGE_SIZE;
192 	}
193 
194 	pgdirs = VADDR_TO_PRENT(end) + 1 - VADDR_TO_PRENT(start);
195 	// how much for page directories
196 	need = (pgdirs + NUM_DIRTBL_PER_PAGE - 1) / NUM_DIRTBL_PER_PAGE;
197 	// and page tables themselves
198 	need = ((pgdirs * NUM_DIRENT_PER_TBL) + NUM_PAGETBL_PER_PAGE - 1) / NUM_PAGETBL_PER_PAGE;
199 
200 	// better rounding when only 1 pgdir
201 	// XXX: do better for other cases
202 	if (pgdirs == 1) {
203 		need = 1;
204 		need += (VADDR_TO_PDENT(end) + 1 - VADDR_TO_PDENT(start) + NUM_PAGETBL_PER_PAGE - 1) / NUM_PAGETBL_PER_PAGE;
205 	}
206 
207 	return need;
208 }
209 
210 
211 status_t
212 M68KVMTranslationMap040::Map(addr_t va, phys_addr_t pa, uint32 attributes,
213 	uint32 memoryType, vm_page_reservation* reservation)
214 {
215 	TRACE("M68KVMTranslationMap040::Map: entry pa 0x%lx va 0x%lx\n", pa, va);
216 
217 /*
218 	dprintf("pgdir at 0x%x\n", pgdir);
219 	dprintf("index is %d\n", va / B_PAGE_SIZE / 1024);
220 	dprintf("final at 0x%x\n", &pgdir[va / B_PAGE_SIZE / 1024]);
221 	dprintf("value is 0x%x\n", *(int *)&pgdir[va / B_PAGE_SIZE / 1024]);
222 	dprintf("present bit is %d\n", pgdir[va / B_PAGE_SIZE / 1024].present);
223 	dprintf("addr is %d\n", pgdir[va / B_PAGE_SIZE / 1024].addr);
224 */
225 	page_root_entry *pr = fPagingStructures->pgroot_virt;
226 	page_directory_entry *pd;
227 	page_table_entry *pt;
228 	addr_t pd_pg, pt_pg;
229 	uint32 rindex, dindex, pindex;
230 
231 
232 	// check to see if a page directory exists for this range
233 	rindex = VADDR_TO_PRENT(va);
234 	if (PRE_TYPE(pr[rindex]) != DT_ROOT) {
235 		phys_addr_t pgdir;
236 		vm_page *page;
237 		uint32 i;
238 
239 		// we need to allocate a pgdir group
240 		page = vm_page_allocate_page(reservation,
241 			PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
242 
243 		DEBUG_PAGE_ACCESS_END(page);
244 
245 		pgdir = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;
246 
247 		TRACE("::Map: asked for free page for pgdir. 0x%lx\n", pgdir);
248 
249 		// for each pgdir on the allocated page:
250 		for (i = 0; i < NUM_DIRTBL_PER_PAGE; i++) {
251 			uint32 aindex = rindex & ~(NUM_DIRTBL_PER_PAGE-1); /* aligned */
252 			page_root_entry *apr = &pr[aindex + i];
253 
254 			// put in the pgroot
255 			M68KPagingMethod040::PutPageDirInPageRoot(apr, pgdir, attributes
256 				| ((attributes & B_USER_PROTECTION) != 0
257 						? B_WRITE_AREA : B_KERNEL_WRITE_AREA));
258 
259 			// update any other page roots, if it maps kernel space
260 			//XXX: suboptimal, should batch them
261 			if ((aindex+i) >= FIRST_KERNEL_PGDIR_ENT && (aindex+i)
262 					< (FIRST_KERNEL_PGDIR_ENT + NUM_KERNEL_PGDIR_ENTS))
263 				M68KPagingStructures040::UpdateAllPageDirs((aindex+i),
264 					pr[aindex+i]);
265 
266 			pgdir += SIZ_DIRTBL;
267 		}
268 		fMapCount++;
269 	}
270 	// now, fill in the pentry
271 	//XXX: is this required?
272 	Thread* thread = thread_get_current_thread();
273 	ThreadCPUPinner pinner(thread);
274 
275 	pd = (page_directory_entry*)MapperGetPageTableAt(
276 		PRE_TO_PA(pr[rindex]));
277 
278 	//pinner.Unlock();
279 
280 	// we want the table at rindex, not at rindex%(tbl/page)
281 	//pd += (rindex % NUM_DIRTBL_PER_PAGE) * NUM_DIRENT_PER_TBL;
282 
283 	// check to see if a page table exists for this range
284 	dindex = VADDR_TO_PDENT(va);
285 	if (PDE_TYPE(pd[dindex]) != DT_DIR) {
286 		phys_addr_t pgtable;
287 		vm_page *page;
288 		uint32 i;
289 
290 		// we need to allocate a pgtable group
291 		page = vm_page_allocate_page(reservation,
292 			PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
293 
294 		DEBUG_PAGE_ACCESS_END(page);
295 
296 		pgtable = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;
297 
298 		TRACE("::Map: asked for free page for pgtable. 0x%lx\n", pgtable);
299 
300 		// for each pgtable on the allocated page:
301 		for (i = 0; i < NUM_PAGETBL_PER_PAGE; i++) {
302 			uint32 aindex = dindex & ~(NUM_PAGETBL_PER_PAGE-1); /* aligned */
303 			page_directory_entry *apd = &pd[aindex + i];
304 
305 			// put in the pgdir
306 			M68KPagingMethod040::PutPageTableInPageDir(apd, pgtable, attributes
307 				| ((attributes & B_USER_PROTECTION) != 0
308 						? B_WRITE_AREA : B_KERNEL_WRITE_AREA));
309 
310 			// no need to update other page directories for kernel space;
311 			// the root-level already point to us.
312 
313 			pgtable += SIZ_PAGETBL;
314 		}
315 
316 #warning M68K: really mean map_count++ ??
317 		fMapCount++;
318 	}
319 
320 	// now, fill in the pentry
321 	//ThreadCPUPinner pinner(thread);
322 
323 	pt = (page_table_entry*)MapperGetPageTableAt(PDE_TO_PA(pd[dindex]));
324 	// we want the table at rindex, not at rindex%(tbl/page)
325 	//pt += (dindex % NUM_PAGETBL_PER_PAGE) * NUM_PAGEENT_PER_TBL;
326 
327 	pindex = VADDR_TO_PTENT(va);
328 
329 	ASSERT_PRINT((PTE_TYPE(pt[pindex]) != DT_INVALID) == 0,
330 		"virtual address: %#" B_PRIxADDR ", existing pte: %#" B_PRIx32, va,
331 		pt[pindex]);
332 
333 	M68KPagingMethod040::PutPageTableEntryInTable(&pt[pindex], pa, attributes,
334 		memoryType, fIsKernelMap);
335 
336 	pinner.Unlock();
337 
338 	// Note: We don't need to invalidate the TLB for this address, as previously
339 	// the entry was not present and the TLB doesn't cache those entries.
340 
341 	fMapCount++;
342 
343 	return B_OK;
344 }
345 
346 
347 status_t
348 M68KVMTranslationMap040::Unmap(addr_t start, addr_t end)
349 {
350 	start = ROUNDDOWN(start, B_PAGE_SIZE);
351 	if (start >= end)
352 		return B_OK;
353 
354 	TRACE("M68KVMTranslationMap040::Unmap: asked to free pages 0x%lx to 0x%lx\n", start, end);
355 
356 	page_root_entry *pr = fPagingStructures->pgroot_virt;
357 	page_directory_entry *pd;
358 	page_table_entry *pt;
359 	int index;
360 
361 	do {
362 		index = VADDR_TO_PRENT(start);
363 		if (PRE_TYPE(pr[index]) != DT_ROOT) {
364 			// no pagedir here, move the start up to access the next page
365 			// dir group
366 			start = ROUNDUP(start + 1, kPageDirAlignment);
367 			continue;
368 		}
369 
370 		Thread* thread = thread_get_current_thread();
371 		ThreadCPUPinner pinner(thread);
372 
373 		pd = (page_directory_entry*)MapperGetPageTableAt(
374 			PRE_TO_PA(pr[index]));
375 		// we want the table at rindex, not at rindex%(tbl/page)
376 		//pd += (index % NUM_DIRTBL_PER_PAGE) * NUM_DIRENT_PER_TBL;
377 
378 
379 		index = VADDR_TO_PDENT(start);
380 		if (PDE_TYPE(pd[index]) != DT_DIR) {
381 			// no pagedir here, move the start up to access the next page
382 			// table group
383 			start = ROUNDUP(start + 1, kPageTableAlignment);
384 			continue;
385 		}
386 
387 		pt = (page_table_entry*)MapperGetPageTableAt(
388 			PDE_TO_PA(pd[index]));
389 		// we want the table at rindex, not at rindex%(tbl/page)
390 		//pt += (index % NUM_PAGETBL_PER_PAGE) * NUM_PAGEENT_PER_TBL;
391 
392 		for (index = VADDR_TO_PTENT(start);
393 				(index < NUM_PAGEENT_PER_TBL) && (start < end);
394 				index++, start += B_PAGE_SIZE) {
395 			if (PTE_TYPE(pt[index]) != DT_PAGE
396 				&& PTE_TYPE(pt[index]) != DT_INDIRECT) {
397 				// page mapping not valid
398 				continue;
399 			}
400 
401 			TRACE("::Unmap: removing page 0x%lx\n", start);
402 
403 			page_table_entry oldEntry
404 				= M68KPagingMethod040::ClearPageTableEntry(&pt[index]);
405 			fMapCount--;
406 
407 			if ((oldEntry & M68K_PTE_ACCESSED) != 0) {
408 				// Note, that we only need to invalidate the address, if the
409 				// accessed flags was set, since only then the entry could have
410 				// been in any TLB.
411 				InvalidatePage(start);
412 			}
413 		}
414 	} while (start != 0 && start < end);
415 
416 	return B_OK;
417 }
418 
419 
420 /*!	Caller must have locked the cache of the page to be unmapped.
421 	This object shouldn't be locked.
422 */
423 status_t
424 M68KVMTranslationMap040::UnmapPage(VMArea* area, addr_t address,
425 	bool updatePageQueue)
426 {
427 	ASSERT(address % B_PAGE_SIZE == 0);
428 
429 	page_root_entry* pr = fPagingStructures->pgroot_virt;
430 
431 	TRACE("M68KVMTranslationMap040::UnmapPage(%#" B_PRIxADDR ")\n", address);
432 
433 	RecursiveLocker locker(fLock);
434 
435 	int index;
436 
437 	index = VADDR_TO_PRENT(address);
438 	if (PRE_TYPE(pr[index]) == DT_ROOT)
439 		return B_ENTRY_NOT_FOUND;
440 
441 	ThreadCPUPinner pinner(thread_get_current_thread());
442 
443 	page_table_entry* pd = (page_table_entry*)MapperGetPageTableAt(
444 		pr[index] & M68K_PRE_ADDRESS_MASK);
445 
446 	index = VADDR_TO_PDENT(address);
447 	if (PDE_TYPE(pd[index]) == DT_DIR)
448 		return B_ENTRY_NOT_FOUND;
449 
450 	page_table_entry* pt = (page_table_entry*)MapperGetPageTableAt(
451 		pd[index] & M68K_PDE_ADDRESS_MASK);
452 
453 	index = VADDR_TO_PTENT(address);
454 	if (PTE_TYPE(pt[index]) == DT_INDIRECT) {
455 		phys_addr_t indirectAddress = PIE_TO_TA(pt[index]);
456 		pt = (page_table_entry*)MapperGetPageTableAt(
457 			PIE_TO_TA(pt[index]), true);
458 		index = 0; // single descriptor
459 	}
460 
461 	page_table_entry oldEntry = M68KPagingMethod040::ClearPageTableEntry(
462 		&pt[index]);
463 
464 	pinner.Unlock();
465 
466 	if (PTE_TYPE(oldEntry) != DT_PAGE) {
467 		// page mapping not valid
468 		return B_ENTRY_NOT_FOUND;
469 	}
470 
471 	fMapCount--;
472 
473 	if ((oldEntry & M68K_PTE_ACCESSED) != 0) {
474 		// Note, that we only need to invalidate the address, if the
475 		// accessed flags was set, since only then the entry could have been
476 		// in any TLB.
477 		InvalidatePage(address);
478 		Flush();
479 
480 		// NOTE: Between clearing the page table entry and Flush() other
481 		// processors (actually even this processor with another thread of the
482 		// same team) could still access the page in question via their cached
483 		// entry. We can obviously lose a modified flag in this case, with the
484 		// effect that the page looks unmodified (and might thus be recycled),
485 		// but is actually modified.
486 		// In most cases this is harmless, but for vm_remove_all_page_mappings()
487 		// this is actually a problem.
488 		// Interestingly FreeBSD seems to ignore this problem as well
489 		// (cf. pmap_remove_all()), unless I've missed something.
490 	}
491 
492 	locker.Detach();
493 		// PageUnmapped() will unlock for us
494 
495 	PageUnmapped(area, (oldEntry & M68K_PTE_ADDRESS_MASK) / B_PAGE_SIZE,
496 		(oldEntry & M68K_PTE_ACCESSED) != 0, (oldEntry & M68K_PTE_DIRTY) != 0,
497 		updatePageQueue);
498 
499 	return B_OK;
500 }
501 
502 
503 void
504 M68KVMTranslationMap040::UnmapPages(VMArea* area, addr_t base, size_t size,
505 	bool updatePageQueue)
506 {
507 	int index;
508 
509 	if (size == 0)
510 		return;
511 
512 	addr_t start = base;
513 	addr_t end = base + size - 1;
514 
515 	TRACE("M68KVMTranslationMap040::UnmapPages(%p, %#" B_PRIxADDR ", %#"
516 		B_PRIxADDR ")\n", area, start, end);
517 
518 	page_root_entry* pr = fPagingStructures->pgroot_virt;
519 
520 	VMAreaMappings queue;
521 
522 	RecursiveLocker locker(fLock);
523 
524 	do {
525 		index = VADDR_TO_PRENT(start);
526 		if (PRE_TYPE(pr[index]) != DT_ROOT) {
527 			// no page table here, move the start up to access the next page
528 			// table
529 			start = ROUNDUP(start + 1, kPageDirAlignment);
530 			continue;
531 		}
532 
533 		Thread* thread = thread_get_current_thread();
534 		ThreadCPUPinner pinner(thread);
535 
536 		page_table_entry* pd = (page_directory_entry*)MapperGetPageTableAt(
537 			pr[index] & M68K_PRE_ADDRESS_MASK);
538 
539 		index = VADDR_TO_PDENT(start);
540 		if (PDE_TYPE(pd[index]) != DT_DIR) {
541 			// no page table here, move the start up to access the next page
542 			// table
543 			start = ROUNDUP(start + 1, kPageTableAlignment);
544 			continue;
545 		}
546 
547 		page_table_entry* pt = (page_table_entry*)MapperGetPageTableAt(
548 			pd[index] & M68K_PDE_ADDRESS_MASK);
549 
550 		for (index = VADDR_TO_PTENT(start); (index < 1024) && (start < end);
551 				index++, start += B_PAGE_SIZE) {
552 			page_table_entry *e = &pt[index];
553 			// fetch indirect descriptor
554 			//XXX:clear the indirect descriptor too??
555 			if (PTE_TYPE(pt[index]) == DT_INDIRECT) {
556 				phys_addr_t indirectAddress = PIE_TO_TA(pt[index]);
557 				e = (page_table_entry*)MapperGetPageTableAt(
558 					PIE_TO_TA(pt[index]));
559 			}
560 
561 			page_table_entry oldEntry
562 				= M68KPagingMethod040::ClearPageTableEntry(e);
563 			if (PTE_TYPE(oldEntry) != DT_PAGE)
564 				continue;
565 
566 			fMapCount--;
567 
568 			if ((oldEntry & M68K_PTE_ACCESSED) != 0) {
569 				// Note, that we only need to invalidate the address, if the
570 				// accessed flags was set, since only then the entry could have
571 				// been in any TLB.
572 				InvalidatePage(start);
573 			}
574 
575 			if (area->cache_type != CACHE_TYPE_DEVICE) {
576 				// get the page
577 				vm_page* page = vm_lookup_page(
578 					(oldEntry & M68K_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
579 				ASSERT(page != NULL);
580 
581 				DEBUG_PAGE_ACCESS_START(page);
582 
583 				// transfer the accessed/dirty flags to the page
584 				if ((oldEntry & M68K_PTE_ACCESSED) != 0)
585 					page->accessed = true;
586 				if ((oldEntry & M68K_PTE_DIRTY) != 0)
587 					page->modified = true;
588 
589 				// remove the mapping object/decrement the wired_count of the
590 				// page
591 				if (area->wiring == B_NO_LOCK) {
592 					vm_page_mapping* mapping = NULL;
593 					vm_page_mappings::Iterator iterator
594 						= page->mappings.GetIterator();
595 					while ((mapping = iterator.Next()) != NULL) {
596 						if (mapping->area == area)
597 							break;
598 					}
599 
600 					ASSERT(mapping != NULL);
601 
602 					area->mappings.Remove(mapping);
603 					page->mappings.Remove(mapping);
604 					queue.Add(mapping);
605 				} else
606 					page->DecrementWiredCount();
607 
608 				if (!page->IsMapped()) {
609 					atomic_add(&gMappedPagesCount, -1);
610 
611 					if (updatePageQueue) {
612 						if (page->Cache()->temporary)
613 							vm_page_set_state(page, PAGE_STATE_INACTIVE);
614 						else if (page->modified)
615 							vm_page_set_state(page, PAGE_STATE_MODIFIED);
616 						else
617 							vm_page_set_state(page, PAGE_STATE_CACHED);
618 					}
619 				}
620 
621 				DEBUG_PAGE_ACCESS_END(page);
622 			}
623 		}
624 
625 		Flush();
626 			// flush explicitly, since we directly use the lock
627 	} while (start != 0 && start < end);
628 
629 	// TODO: As in UnmapPage() we can lose page dirty flags here. ATM it's not
630 	// really critical here, as in all cases this method is used, the unmapped
631 	// area range is unmapped for good (resized/cut) and the pages will likely
632 	// be freed.
633 
634 	locker.Unlock();
635 
636 	// free removed mappings
637 	bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
638 	uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
639 		| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
640 	while (vm_page_mapping* mapping = queue.RemoveHead())
641 		object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
642 }
643 
644 
645 void
646 M68KVMTranslationMap040::UnmapArea(VMArea* area, bool deletingAddressSpace,
647 	bool ignoreTopCachePageFlags)
648 {
649 	if (area->cache_type == CACHE_TYPE_DEVICE || area->wiring != B_NO_LOCK) {
650 		M68KVMTranslationMap040::UnmapPages(area, area->Base(), area->Size(),
651 			true);
652 		return;
653 	}
654 
655 	bool unmapPages = !deletingAddressSpace || !ignoreTopCachePageFlags;
656 
657 	page_root_entry* pr = fPagingStructures->pgroot_virt;
658 
659 	RecursiveLocker locker(fLock);
660 
661 	VMAreaMappings mappings;
662 	mappings.MoveFrom(&area->mappings);
663 
664 	for (VMAreaMappings::Iterator it = mappings.GetIterator();
665 			vm_page_mapping* mapping = it.Next();) {
666 		vm_page* page = mapping->page;
667 		page->mappings.Remove(mapping);
668 
669 		VMCache* cache = page->Cache();
670 
671 		bool pageFullyUnmapped = false;
672 		if (!page->IsMapped()) {
673 			atomic_add(&gMappedPagesCount, -1);
674 			pageFullyUnmapped = true;
675 		}
676 
677 		if (unmapPages || cache != area->cache) {
678 			addr_t address = area->Base()
679 				+ ((page->cache_offset * B_PAGE_SIZE) - area->cache_offset);
680 
681 			int index;
682 			index = VADDR_TO_PRENT(address);
683 			if (PRE_TYPE(pr[index]) != DT_ROOT) {
684 				panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
685 					"has no page root entry", page, area, address);
686 				continue;
687 			}
688 
689 			ThreadCPUPinner pinner(thread_get_current_thread());
690 
691 			page_directory_entry* pd
692 				= (page_directory_entry*)MapperGetPageTableAt(
693 					pr[index] & M68K_PRE_ADDRESS_MASK);
694 
695 			index = VADDR_TO_PDENT(address);
696 			if (PDE_TYPE(pr[index]) != DT_DIR) {
697 				panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
698 					"has no page dir entry", page, area, address);
699 				continue;
700 			}
701 
702 			page_table_entry* pt
703 				= (page_table_entry*)MapperGetPageTableAt(
704 					pd[index] & M68K_PDE_ADDRESS_MASK);
705 
706 			//XXX:M68K: DT_INDIRECT here?
707 
708 			page_table_entry oldEntry
709 				= M68KPagingMethod040::ClearPageTableEntry(
710 					&pt[VADDR_TO_PTENT(address)]);
711 
712 			pinner.Unlock();
713 
714 			if (PTE_TYPE(oldEntry) != DT_PAGE) {
715 				panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
716 					"has no page table entry", page, area, address);
717 				continue;
718 			}
719 
720 			// transfer the accessed/dirty flags to the page and invalidate
721 			// the mapping, if necessary
722 			if ((oldEntry & M68K_PTE_ACCESSED) != 0) {
723 				page->accessed = true;
724 
725 				if (!deletingAddressSpace)
726 					InvalidatePage(address);
727 			}
728 
729 			if ((oldEntry & M68K_PTE_DIRTY) != 0)
730 				page->modified = true;
731 
732 			if (pageFullyUnmapped) {
733 				DEBUG_PAGE_ACCESS_START(page);
734 
735 				if (cache->temporary)
736 					vm_page_set_state(page, PAGE_STATE_INACTIVE);
737 				else if (page->modified)
738 					vm_page_set_state(page, PAGE_STATE_MODIFIED);
739 				else
740 					vm_page_set_state(page, PAGE_STATE_CACHED);
741 
742 				DEBUG_PAGE_ACCESS_END(page);
743 			}
744 		}
745 
746 		fMapCount--;
747 	}
748 
749 	Flush();
750 		// flush explicitely, since we directly use the lock
751 
752 	locker.Unlock();
753 
754 	bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
755 	uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
756 		| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
757 	while (vm_page_mapping* mapping = mappings.RemoveHead())
758 		object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
759 }
760 
761 
762 status_t
763 M68KVMTranslationMap040::Query(addr_t va, phys_addr_t *_physical,
764 	uint32 *_flags)
765 {
766 	// default the flags to not present
767 	*_flags = 0;
768 	*_physical = 0;
769 	TRACE("040::Query(0x%lx,)\n", va);
770 
771 	int index = VADDR_TO_PRENT(va);
772 	page_root_entry *pr = fPagingStructures->pgroot_virt;
773 	if (PRE_TYPE(pr[index]) != DT_ROOT) {
774 		// no pagetable here
775 		return B_OK;
776 	}
777 
778 	Thread* thread = thread_get_current_thread();
779 	ThreadCPUPinner pinner(thread);
780 
781 	page_directory_entry* pd = (page_directory_entry*)MapperGetPageTableAt(
782 		pr[index] & M68K_PDE_ADDRESS_MASK);
783 
784 	index = VADDR_TO_PDENT(va);
785 	if (PDE_TYPE(pd[index]) != DT_DIR) {
786 		// no pagetable here
787 		return B_OK;
788 	}
789 
790 	page_table_entry* pt = (page_table_entry*)MapperGetPageTableAt(
791 		pd[index] & M68K_PDE_ADDRESS_MASK);
792 
793 	index = VADDR_TO_PTENT(va);
794 	if (PTE_TYPE(pt[index]) == DT_INDIRECT) {
795 		pt = (page_table_entry*)MapperGetPageTableAt(
796 			pt[index] & M68K_PIE_ADDRESS_MASK);
797 		index = 0;
798 	}
799 
800 	page_table_entry entry = pt[index];
801 
802 	*_physical = entry & M68K_PTE_ADDRESS_MASK;
803 
804 	// read in the page state flags
805 	if ((entry & M68K_PTE_SUPERVISOR) == 0) {
806 		*_flags |= ((entry & M68K_PTE_READONLY) == 0 ? B_WRITE_AREA : 0)
807 			| B_READ_AREA;
808 	}
809 
810 	*_flags |= ((entry & M68K_PTE_READONLY) == 0 ? B_KERNEL_WRITE_AREA : 0)
811 		| B_KERNEL_READ_AREA
812 		| ((entry & M68K_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
813 		| ((entry & M68K_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
814 		| ((PTE_TYPE(entry) == DT_PAGE) ? PAGE_PRESENT : 0);
815 
816 	pinner.Unlock();
817 
818 	TRACE("query_tmap: returning pa 0x%lx for va 0x%lx\n", *_physical, va);
819 
820 	return B_OK;
821 }
822 
823 
824 status_t
825 M68KVMTranslationMap040::QueryInterrupt(addr_t va, phys_addr_t *_physical,
826 	uint32 *_flags)
827 {
828 	*_flags = 0;
829 	*_physical = 0;
830 	TRACE("040::QueryInterrupt(0x%lx,)\n", va);
831 
832 	int index = VADDR_TO_PRENT(va);
833 	page_root_entry* pr = fPagingStructures->pgroot_virt;
834 	if (PRE_TYPE(pr[index]) != DT_ROOT) {
835 		// no pagetable here
836 		return B_OK;
837 	}
838 
839 	// map page table entry
840 	phys_addr_t ppr = pr[index] & M68K_PRE_ADDRESS_MASK;
841 	page_directory_entry* pd = (page_directory_entry*)((char *)
842 		M68KPagingMethod040::Method()->PhysicalPageMapper()
843 		->InterruptGetPageTableAt(ppr & ~(B_PAGE_SIZE-1))
844 		+ (ppr % B_PAGE_SIZE));
845 
846 	index = VADDR_TO_PDENT(va);
847 	if (PDE_TYPE(pd[index]) != DT_DIR) {
848 		// no pagetable here
849 		return B_OK;
850 	}
851 
852 	phys_addr_t ppd = pd[index] & M68K_PDE_ADDRESS_MASK;
853 	page_table_entry* pt = (page_table_entry*)((char *)
854 		M68KPagingMethod040::Method()->PhysicalPageMapper()
855 		->InterruptGetPageTableAt(ppd & ~(B_PAGE_SIZE-1))
856 		+ (ppd % B_PAGE_SIZE));
857 
858 	index = VADDR_TO_PTENT(va);
859 	if (PTE_TYPE(pt[index]) == DT_INDIRECT) {
860 		phys_addr_t ppt = pt[index] & M68K_PIE_ADDRESS_MASK;
861 		pt = (page_table_entry*)((char *)
862 			M68KPagingMethod040::Method()->PhysicalPageMapper()
863 			->InterruptGetPageTableAt(ppt & ~(B_PAGE_SIZE-1))
864 			+ (ppt % B_PAGE_SIZE));
865 		index = 0;
866 	}
867 
868 	page_table_entry entry = pt[index];
869 
870 	*_physical = entry & M68K_PTE_ADDRESS_MASK;
871 
872 	// read in the page state flags
873 	if ((entry & M68K_PTE_SUPERVISOR) == 0) {
874 		*_flags |= ((entry & M68K_PTE_READONLY) == 0 ? B_WRITE_AREA : 0)
875 			| B_READ_AREA;
876 	}
877 
878 	*_flags |= ((entry & M68K_PTE_READONLY) == 0 ? B_KERNEL_WRITE_AREA : 0)
879 		| B_KERNEL_READ_AREA
880 		| ((entry & M68K_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
881 		| ((entry & M68K_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
882 		| ((PTE_TYPE(entry) == DT_PAGE) ? PAGE_PRESENT : 0);
883 
884 	return B_OK;
885 }
886 
887 
888 status_t
889 M68KVMTranslationMap040::Protect(addr_t start, addr_t end, uint32 attributes,
890 	uint32 memoryType)
891 {
892 	start = ROUNDDOWN(start, B_PAGE_SIZE);
893 	if (start >= end)
894 		return B_OK;
895 
896 	TRACE("protect_tmap: pages 0x%lx to 0x%lx, attributes %lx\n", start, end,
897 		attributes);
898 
899 	return ENOSYS;
900 #if 0
901 	// compute protection flags
902 	uint32 newProtectionFlags = 0;
903 	if ((attributes & B_USER_PROTECTION) != 0) {
904 		newProtectionFlags = M68K_PTE_USER;
905 		if ((attributes & B_WRITE_AREA) != 0)
906 			newProtectionFlags |= M68K_PTE_WRITABLE;
907 	} else if ((attributes & B_KERNEL_WRITE_AREA) != 0)
908 		newProtectionFlags = M68K_PTE_WRITABLE;
909 
910 	page_directory_entry *pd = fPagingStructures->pgdir_virt;
911 
912 	do {
913 		int index = VADDR_TO_PDENT(start);
914 		if ((pd[index] & M68K_PDE_PRESENT) == 0) {
915 			// no page table here, move the start up to access the next page
916 			// table
917 			start = ROUNDUP(start + 1, kPageTableAlignment);
918 			continue;
919 		}
920 
921 		struct thread* thread = thread_get_current_thread();
922 		ThreadCPUPinner pinner(thread);
923 
924 		page_table_entry* pt = (page_table_entry*)MapperGetPageTableAt(
925 			pd[index] & M68K_PDE_ADDRESS_MASK);
926 
927 		for (index = VADDR_TO_PTENT(start); index < 1024 && start < end;
928 				index++, start += B_PAGE_SIZE) {
929 			page_table_entry entry = pt[index];
930 			if ((entry & M68K_PTE_PRESENT) == 0) {
931 				// page mapping not valid
932 				continue;
933 			}
934 
935 			TRACE("protect_tmap: protect page 0x%lx\n", start);
936 
937 			// set the new protection flags -- we want to do that atomically,
938 			// without changing the accessed or dirty flag
939 			page_table_entry oldEntry;
940 			while (true) {
941 				oldEntry = M68KPagingMethod040::TestAndSetPageTableEntry(
942 					&pt[index],
943 					(entry & ~(M68K_PTE_PROTECTION_MASK
944 							| M68K_PTE_MEMORY_TYPE_MASK))
945 						| newProtectionFlags
946 						| M68KPagingMethod040::MemoryTypeToPageTableEntryFlags(
947 							memoryType),
948 					entry);
949 				if (oldEntry == entry)
950 					break;
951 				entry = oldEntry;
952 			}
953 
954 			if ((oldEntry & M68K_PTE_ACCESSED) != 0) {
955 				// Note, that we only need to invalidate the address, if the
956 				// accessed flag was set, since only then the entry could have
957 				// been in any TLB.
958 				InvalidatePage(start);
959 			}
960 		}
961 	} while (start != 0 && start < end);
962 	return B_OK;
963 #endif
964 }
965 
966 
967 status_t
968 M68KVMTranslationMap040::ClearFlags(addr_t va, uint32 flags)
969 {
970 	return ENOSYS;
971 #if 0
972 	int index = VADDR_TO_PDENT(va);
973 	page_directory_entry* pd = fPagingStructures->pgdir_virt;
974 	if ((pd[index] & M68K_PDE_PRESENT) == 0) {
975 		// no pagetable here
976 		return B_OK;
977 	}
978 
979 	uint32 flagsToClear = ((flags & PAGE_MODIFIED) ? M68K_PTE_DIRTY : 0)
980 		| ((flags & PAGE_ACCESSED) ? M68K_PTE_ACCESSED : 0);
981 
982 	struct thread* thread = thread_get_current_thread();
983 	ThreadCPUPinner pinner(thread);
984 
985 	page_table_entry* pt = (page_table_entry*)MapperGetPageTableAt(
986 		pd[index] & M68K_PDE_ADDRESS_MASK);
987 	index = VADDR_TO_PTENT(va);
988 
989 	// clear out the flags we've been requested to clear
990 	page_table_entry oldEntry
991 		= M68KPagingMethod040::ClearPageTableEntryFlags(&pt[index],
992 			flagsToClear);
993 
994 	pinner.Unlock();
995 
996 	if ((oldEntry & flagsToClear) != 0)
997 		InvalidatePage(va);
998 
999 	return B_OK;
1000 #endif
1001 }
1002 
1003 
1004 bool
1005 M68KVMTranslationMap040::ClearAccessedAndModified(VMArea* area, addr_t address,
1006 	bool unmapIfUnaccessed, bool& _modified)
1007 {
1008 	ASSERT(address % B_PAGE_SIZE == 0);
1009 
1010 	page_root_entry* pr = fPagingStructures->pgroot_virt;
1011 
1012 	TRACE("M68KVMTranslationMap040::ClearAccessedAndModified(%#" B_PRIxADDR
1013 		")\n", address);
1014 
1015 #if 0
1016 	RecursiveLocker locker(fLock);
1017 
1018 	int index = VADDR_TO_PDENT(address);
1019 	if ((pd[index] & M68K_PDE_PRESENT) == 0)
1020 		return false;
1021 
1022 	ThreadCPUPinner pinner(thread_get_current_thread());
1023 
1024 	page_table_entry* pt = (page_table_entry*)MapperGetPageTableAt(
1025 		pd[index] & M68K_PDE_ADDRESS_MASK);
1026 
1027 	index = VADDR_TO_PTENT(address);
1028 
1029 	// perform the deed
1030 	page_table_entry oldEntry;
1031 
1032 	if (unmapIfUnaccessed) {
1033 		while (true) {
1034 			oldEntry = pt[index];
1035 			if ((oldEntry & M68K_PTE_PRESENT) == 0) {
1036 				// page mapping not valid
1037 				return false;
1038 			}
1039 
1040 			if (oldEntry & M68K_PTE_ACCESSED) {
1041 				// page was accessed -- just clear the flags
1042 				oldEntry = M68KPagingMethod040::ClearPageTableEntryFlags(
1043 					&pt[index], M68K_PTE_ACCESSED | M68K_PTE_DIRTY);
1044 				break;
1045 			}
1046 
1047 			// page hasn't been accessed -- unmap it
1048 			if (M68KPagingMethod040::TestAndSetPageTableEntry(&pt[index], 0,
1049 					oldEntry) == oldEntry) {
1050 				break;
1051 			}
1052 
1053 			// something changed -- check again
1054 		}
1055 	} else {
1056 		oldEntry = M68KPagingMethod040::ClearPageTableEntryFlags(&pt[index],
1057 			M68K_PTE_ACCESSED | M68K_PTE_DIRTY);
1058 	}
1059 
1060 	pinner.Unlock();
1061 
1062 	_modified = (oldEntry & M68K_PTE_DIRTY) != 0;
1063 
1064 	if ((oldEntry & M68K_PTE_ACCESSED) != 0) {
1065 		// Note, that we only need to invalidate the address, if the
1066 		// accessed flags was set, since only then the entry could have been
1067 		// in any TLB.
1068 		InvalidatePage(address);
1069 
1070 		Flush();
1071 
1072 		return true;
1073 	}
1074 
1075 	if (!unmapIfUnaccessed)
1076 		return false;
1077 
1078 	// We have unmapped the address. Do the "high level" stuff.
1079 
1080 	fMapCount--;
1081 
1082 	locker.Detach();
1083 		// UnaccessedPageUnmapped() will unlock for us
1084 
1085 	UnaccessedPageUnmapped(area,
1086 		(oldEntry & M68K_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
1087 
1088 #endif
1089 	return false;
1090 }
1091 
1092 
1093 M68KPagingStructures*
1094 M68KVMTranslationMap040::PagingStructures() const
1095 {
1096 	return fPagingStructures;
1097 }
1098 
1099 
1100 inline void *
1101 M68KVMTranslationMap040::MapperGetPageTableAt(phys_addr_t physicalAddress,
1102 	bool indirect)
1103 {
1104 	// M68K fits several page tables in a single page...
1105 	uint32 offset = physicalAddress % B_PAGE_SIZE;
1106 	ASSERT((indirect && (offset % 4) == 0) || (offset % SIZ_ROOTTBL) == 0);
1107 	physicalAddress &= ~(B_PAGE_SIZE-1);
1108 	void *va = fPageMapper->GetPageTableAt(physicalAddress);
1109 	return (void *)((addr_t)va + offset);
1110 }
1111 
1112 
1113