xref: /haiku/src/system/kernel/arch/m68k/arch_vm_translation_map_impl.cpp (revision b671e9bbdbd10268a042b4f4cc4317ccd03d105e)
1 /*
2  * Copyright 2007, Haiku Inc. All rights reserved.
3  * Distributed under the terms of the MIT License.
4  *
5  * Authors:
6  * 		François Revol <revol@free.fr>
7  *
8  * Copyright 2003-2007, Axel Dörfler, axeld@pinc-software.de.
9  * Distributed under the terms of the MIT License.
10  *
11  * Copyright 2001, Travis Geiselbrecht. All rights reserved.
12  * Distributed under the terms of the NewOS License.
13  */
14 
15 #ifndef ARCH_M68K_MMU_TYPE
16 #error This file is included from arch_*_mmu.cpp
17 #endif
18 
19 /*  (mmu_man) Implementation details on 68030 and others:
20 
21 	Unlike on x86 we can't just switch the context to another team by just
22 	setting a register to another page directory, since we only have one
23 	page table containing both kernel and user address mappings.
24 	The 030 supports arbitrary layout of the page directory tree, including
25 	a 1-bit first level (2 entries top level table) that would map kernel
26 	and user land at a single place. But 040 and later only support a fixed
27 	splitting of 7/7/6 for 4K pages.
28 
29 	Since 68k SMP hardware is rare enough we don't want to support them, we
30 	can take some shortcuts.
31 
32 	As we don't want a separate user and kernel space, we'll use a single
33 	table. With the 7/7/6 split the 2nd level would require 32KB of tables,
34 	which is small enough to not want to use the list hack from x86.
35 	XXX: we use the hack for now, check later
36 
37 	Since page directories/tables don't fit exactly a page, we stuff more
38 	than one per page, and allocate them all at once, and add them at the
39 	same time to the tree. So we guarantee all higher-level entries modulo
40 	the number of tables/page are either invalid or present.
41  */
42 
43 #include <KernelExport.h>
44 #include <kernel.h>
45 #include <heap.h>
46 #include <vm.h>
47 #include <vm_address_space.h>
48 #include <vm_page.h>
49 #include <vm_priv.h>
50 #include <int.h>
51 #include <boot/kernel_args.h>
52 #include <arch/vm_translation_map.h>
53 #include <arch/cpu.h>
54 #include <arch_mmu.h>
55 #include <stdlib.h>
56 
57 #include "generic_vm_physical_page_mapper.h"
58 #include "generic_vm_physical_page_ops.h"
59 
60 
61 #define TRACE_VM_TMAP
62 #ifdef TRACE_VM_TMAP
63 #	define TRACE(x) dprintf x
64 #else
65 #	define TRACE(x) ;
66 #endif
67 
68 // 4 MB of iospace
69 //#define IOSPACE_SIZE (4*1024*1024)
70 #define IOSPACE_SIZE (16*1024*1024)
71 // 256K = 2^6*4K
72 #define IOSPACE_CHUNK_SIZE (NUM_PAGEENT_PER_TBL*B_PAGE_SIZE)
73 
74 static page_table_entry *iospace_pgtables = NULL;
75 
76 #define PAGE_INVALIDATE_CACHE_SIZE 64
77 
78 // vm_translation object stuff
79 typedef struct vm_translation_map_arch_info {
80 	page_root_entry *rtdir_virt;
81 	page_root_entry *rtdir_phys;
82 	int num_invalidate_pages;
83 	addr_t pages_to_invalidate[PAGE_INVALIDATE_CACHE_SIZE];
84 } vm_translation_map_arch_info;
85 
86 #if 1//XXX:HOLE
87 static page_table_entry *page_hole = NULL;
88 static page_directory_entry *page_hole_pgdir = NULL;
89 #endif
90 static page_root_entry *sKernelPhysicalPageRoot = NULL;
91 static page_root_entry *sKernelVirtualPageRoot = NULL;
92 static addr_t sQueryPage = NULL;
93 //static page_table_entry *sQueryPageTable;
94 //static page_directory_entry *sQueryPageDir;
95 // MUST be aligned
96 static page_table_entry sQueryDesc __attribute__ (( aligned (4) ));
97 
98 static vm_translation_map *tmap_list;
99 static spinlock tmap_list_lock;
100 
101 static addr_t sIOSpaceBase;
102 
103 #define CHATTY_TMAP 0
104 
105 #if 0
106 // use P*E_TO_* and TA_TO_P*EA !
107 #define ADDR_SHIFT(x) ((x)>>12)
108 #define ADDR_REVERSE_SHIFT(x) ((x)<<12)
109 #endif
110 
111 #define FIRST_USER_PGROOT_ENT    (VADDR_TO_PRENT(USER_BASE))
112 #define FIRST_USER_PGDIR_ENT    (VADDR_TO_PDENT(USER_BASE))
113 #define NUM_USER_PGROOT_ENTS     (VADDR_TO_PRENT(ROUNDUP(USER_SIZE, B_PAGE_SIZE * 64 * 128)))
114 #define NUM_USER_PGDIR_ENTS     (VADDR_TO_PDENT(ROUNDUP(USER_SIZE, B_PAGE_SIZE * 64)))
115 #define FIRST_KERNEL_PGROOT_ENT  (VADDR_TO_PRENT(KERNEL_BASE))
116 #define FIRST_KERNEL_PGDIR_ENT  (VADDR_TO_PDENT(KERNEL_BASE))
117 #define NUM_KERNEL_PGROOT_ENTS   (VADDR_TO_PRENT(KERNEL_SIZE))
118 #define NUM_KERNEL_PGDIR_ENTS   (VADDR_TO_PDENT(KERNEL_SIZE))
119 #define IS_KERNEL_MAP(map)		(map->arch_data->rtdir_phys == sKernelPhysicalPageRoot)
120 
121 static status_t early_query(addr_t va, addr_t *out_physical);
122 static status_t get_physical_page_tmap_internal(addr_t pa, addr_t *va, uint32 flags);
123 static status_t put_physical_page_tmap_internal(addr_t va);
124 
125 static void flush_tmap(vm_translation_map *map);
126 
127 
128 #warning M68K: RENAME
129 static void *
130 _m68k_translation_map_get_pgdir(vm_translation_map *map)
131 {
132 	return map->arch_data->rtdir_phys;
133 }
134 
135 
136 static inline void
137 init_page_root_entry(page_root_entry *entry)
138 {
139 	// DT_INVALID is 0
140 	*(page_root_entry_scalar *)entry = DFL_ROOTENT_VAL;
141 }
142 
143 
144 static inline void
145 update_page_root_entry(page_root_entry *entry, page_root_entry *with)
146 {
147 	// update page directory entry atomically
148 	*(page_root_entry_scalar *)entry = *(page_root_entry_scalar *)with;
149 }
150 
151 
152 static inline void
153 init_page_directory_entry(page_directory_entry *entry)
154 {
155 	*(page_directory_entry_scalar *)entry = DFL_DIRENT_VAL;
156 }
157 
158 
159 static inline void
160 update_page_directory_entry(page_directory_entry *entry, page_directory_entry *with)
161 {
162 	// update page directory entry atomically
163 	*(page_directory_entry_scalar *)entry = *(page_directory_entry_scalar *)with;
164 }
165 
166 
167 static inline void
168 init_page_table_entry(page_table_entry *entry)
169 {
170 	*(page_table_entry_scalar *)entry = DFL_PAGEENT_VAL;
171 }
172 
173 
174 static inline void
175 update_page_table_entry(page_table_entry *entry, page_table_entry *with)
176 {
177 	// update page table entry atomically
178 	// XXX: is it ?? (long desc?)
179 	*(page_table_entry_scalar *)entry = *(page_table_entry_scalar *)with;
180 }
181 
182 
183 static inline void
184 init_page_indirect_entry(page_indirect_entry *entry)
185 {
186 #warning M68K: is it correct ?
187 	*(page_indirect_entry_scalar *)entry = DFL_PAGEENT_VAL;
188 }
189 
190 
191 static inline void
192 update_page_indirect_entry(page_indirect_entry *entry, page_indirect_entry *with)
193 {
194 	// update page table entry atomically
195 	// XXX: is it ?? (long desc?)
196 	*(page_indirect_entry_scalar *)entry = *(page_indirect_entry_scalar *)with;
197 }
198 
199 
200 #warning M68K: allocate all kernel pgdirs at boot and remove this (also dont remove them anymore from unmap)
201 static void
202 _update_all_pgdirs(int index, page_root_entry e)
203 {
204 	vm_translation_map *entry;
205 	unsigned int state = disable_interrupts();
206 
207 	acquire_spinlock(&tmap_list_lock);
208 
209 	for(entry = tmap_list; entry != NULL; entry = entry->next)
210 		entry->arch_data->rtdir_virt[index] = e;
211 
212 	release_spinlock(&tmap_list_lock);
213 	restore_interrupts(state);
214 }
215 
216 
217 // this is used before the vm is fully up, it uses the
218 // transparent translation of the first 256MB
219 // a set up by the bootloader.
220 static status_t
221 early_query(addr_t va, addr_t *_physicalAddress)
222 {
223 	page_root_entry *pr = sKernelVirtualPageRoot;
224 	page_directory_entry *pd;
225 	page_indirect_entry *pi;
226 	page_table_entry *pt;
227 	addr_t pa;
228 	int32 index;
229 	status_t err = B_ERROR;	// no pagetable here
230 	TRACE(("%s(%p,)\n", __FUNCTION__, va));
231 
232 	index = VADDR_TO_PRENT(va);
233 	TRACE(("%s: pr[%d].type %d\n", __FUNCTION__, index, pr[index].type));
234 	if (pr && pr[index].type == DT_ROOT) {
235 		pa = PRE_TO_TA(pr[index]);
236 		// pa == va when in TT
237 		// and no need to fiddle with cache
238 		pd = (page_directory_entry *)pa;
239 
240 		index = VADDR_TO_PDENT(va);
241 		TRACE(("%s: pd[%d].type %d\n", __FUNCTION__, index,
242 				pd?(pd[index].type):-1));
243 		if (pd && pd[index].type == DT_DIR) {
244 			pa = PDE_TO_TA(pd[index]);
245 			pt = (page_table_entry *)pa;
246 
247 			index = VADDR_TO_PTENT(va);
248 			TRACE(("%s: pt[%d].type %d\n", __FUNCTION__, index,
249 					pt?(pt[index].type):-1));
250 			if (pt && pt[index].type == DT_INDIRECT) {
251 				pi = (page_indirect_entry *)pt;
252 				pa = PIE_TO_TA(pi[index]);
253 				pt = (page_table_entry *)pa;
254 				index = 0; // single descriptor
255 			}
256 
257 			if (pt && pt[index].type == DT_PAGE) {
258 				*_physicalAddress = PTE_TO_PA(pt[index]);
259 				// we should only be passed page va, but just in case.
260 				*_physicalAddress += va % B_PAGE_SIZE;
261 				err = B_OK;
262 			}
263 		}
264 	}
265 
266 	return err;
267 }
268 
269 
270 /*!	Acquires the map's recursive lock, and resets the invalidate pages counter
271 	in case it's the first locking recursion.
272 */
273 static status_t
274 lock_tmap(vm_translation_map *map)
275 {
276 	TRACE(("lock_tmap: map %p\n", map));
277 
278 	recursive_lock_lock(&map->lock);
279 	if (recursive_lock_get_recursion(&map->lock) == 1) {
280 		// we were the first one to grab the lock
281 		TRACE(("clearing invalidated page count\n"));
282 		map->arch_data->num_invalidate_pages = 0;
283 	}
284 
285 	return B_OK;
286 }
287 
288 
289 /*!	Unlocks the map, and, if we'll actually losing the recursive lock,
290 	flush all pending changes of this map (ie. flush TLB caches as
291 	needed).
292 */
293 static status_t
294 unlock_tmap(vm_translation_map *map)
295 {
296 	TRACE(("unlock_tmap: map %p\n", map));
297 
298 	if (recursive_lock_get_recursion(&map->lock) == 1) {
299 		// we're about to release it for the last time
300 		flush_tmap(map);
301 	}
302 
303 	recursive_lock_unlock(&map->lock);
304 	return B_OK;
305 }
306 
307 
308 static void
309 destroy_tmap(vm_translation_map *map)
310 {
311 	int state;
312 	vm_translation_map *entry;
313 	vm_translation_map *last = NULL;
314 	unsigned int i, j;
315 
316 	if (map == NULL)
317 		return;
318 
319 	// remove it from the tmap list
320 	state = disable_interrupts();
321 	acquire_spinlock(&tmap_list_lock);
322 
323 	entry = tmap_list;
324 	while (entry != NULL) {
325 		if (entry == map) {
326 			if (last != NULL)
327 				last->next = entry->next;
328 			else
329 				tmap_list = entry->next;
330 
331 			break;
332 		}
333 		last = entry;
334 		entry = entry->next;
335 	}
336 
337 	release_spinlock(&tmap_list_lock);
338 	restore_interrupts(state);
339 
340 	if (map->arch_data->rtdir_virt != NULL) {
341 		// cycle through and free all of the user space pgtables
342 		// since the size of tables don't match B_PAGE_SIZE,
343 		// we alloc several at once, based on modulos,
344 		// we make sure they are either all in the tree or none.
345 		for (i = VADDR_TO_PRENT(USER_BASE); i <= VADDR_TO_PRENT(USER_BASE + (USER_SIZE - 1)); i++) {
346 			addr_t pgdir_pn;
347 			page_directory_entry *pgdir;
348 			vm_page *dirpage;
349 
350 			if (map->arch_data->rtdir_virt[i].type == DT_INVALID)
351 				continue;
352 			if (map->arch_data->rtdir_virt[i].type != DT_ROOT) {
353 				panic("rtdir[%d]: buggy descriptor type", i);
354 				return;
355 			}
356 			// suboptimal (done 8 times)
357 			pgdir_pn = PRE_TO_PA(map->arch_data->rtdir_virt[i]);
358 			dirpage = vm_lookup_page(pgdir_pn);
359 			pgdir = &(((page_directory_entry *)dirpage)[i%NUM_DIRTBL_PER_PAGE]);
360 
361 			for (j = 0; j <= NUM_DIRENT_PER_TBL; j+=NUM_PAGETBL_PER_PAGE) {
362 				addr_t pgtbl_pn;
363 				page_table_entry *pgtbl;
364 				vm_page *page;
365 				if (pgdir[j].type == DT_INVALID)
366 					continue;
367 				if (pgdir[j].type != DT_DIR) {
368 					panic("rtdir[%d][%d]: buggy descriptor type", i, j);
369 					return;
370 				}
371 				pgtbl_pn = PDE_TO_PN(pgdir[j]);
372 				page = vm_lookup_page(pgtbl_pn);
373 				pgtbl = (page_table_entry *)page;
374 
375 				if (!page) {
376 					panic("destroy_tmap: didn't find pgtable page\n");
377 					return;
378 				}
379 				vm_page_set_state(page, PAGE_STATE_FREE);
380 			}
381 			if (((i+1)%NUM_DIRTBL_PER_PAGE) == 0)
382 				vm_page_set_state(dirpage, PAGE_STATE_FREE);
383 		}
384 		free(map->arch_data->rtdir_virt);
385 	}
386 
387 	free(map->arch_data);
388 	recursive_lock_destroy(&map->lock);
389 }
390 
391 
392 static void
393 put_pgdir_in_pgroot(page_root_entry *entry,
394 	addr_t pgdir_phys, uint32 attributes)
395 {
396 	page_root_entry dir;
397 	// put it in the pgdir
398 	init_page_root_entry(&dir);
399 	dir.addr = TA_TO_PREA(pgdir_phys);
400 
401 	// ToDo: we ignore the attributes of the page table - for compatibility
402 	//	with BeOS we allow having user accessible areas in the kernel address
403 	//	space. This is currently being used by some drivers, mainly for the
404 	//	frame buffer. Our current real time data implementation makes use of
405 	//	this fact, too.
406 	//	We might want to get rid of this possibility one day, especially if
407 	//	we intend to port it to a platform that does not support this.
408 	//dir.user = 1;
409 	//dir.rw = 1;
410 	dir.type = DT_ROOT;
411 	update_page_root_entry(entry, &dir);
412 }
413 
414 
415 static void
416 put_pgtable_in_pgdir(page_directory_entry *entry,
417 	addr_t pgtable_phys, uint32 attributes)
418 {
419 	page_directory_entry table;
420 	// put it in the pgdir
421 	init_page_directory_entry(&table);
422 	table.addr = TA_TO_PDEA(pgtable_phys);
423 
424 	// ToDo: we ignore the attributes of the page table - for compatibility
425 	//	with BeOS we allow having user accessible areas in the kernel address
426 	//	space. This is currently being used by some drivers, mainly for the
427 	//	frame buffer. Our current real time data implementation makes use of
428 	//	this fact, too.
429 	//	We might want to get rid of this possibility one day, especially if
430 	//	we intend to port it to a platform that does not support this.
431 	//table.user = 1;
432 	//table.rw = 1;
433 	table.type = DT_DIR;
434 	update_page_directory_entry(entry, &table);
435 }
436 
437 
438 static void
439 put_page_table_entry_in_pgtable(page_table_entry *entry,
440 	addr_t physicalAddress, uint32 attributes, bool globalPage)
441 {
442 	page_table_entry page;
443 	init_page_table_entry(&page);
444 
445 	page.addr = TA_TO_PTEA(physicalAddress);
446 
447 	// if the page is user accessible, it's automatically
448 	// accessible in kernel space, too (but with the same
449 	// protection)
450 	page.supervisor = (attributes & B_USER_PROTECTION) == 0;
451 	if (page.supervisor)
452 		page.write_protect = (attributes & B_KERNEL_WRITE_AREA) == 0;
453 	else
454 		page.write_protect = (attributes & B_WRITE_AREA) == 0;
455 	page.type = DT_PAGE;
456 
457 #ifdef PAGE_HAS_GLOBAL_BIT
458 	if (globalPage)
459 		page.global = 1;
460 #endif
461 
462 	// put it in the page table
463 	update_page_table_entry(entry, &page);
464 }
465 
466 
467 static void
468 put_page_indirect_entry_in_pgtable(page_indirect_entry *entry,
469 	addr_t physicalAddress, uint32 attributes, bool globalPage)
470 {
471 	page_indirect_entry page;
472 	init_page_indirect_entry(&page);
473 
474 	page.addr = TA_TO_PIEA(physicalAddress);
475 	page.type = DT_INDIRECT;
476 
477 	// there are no protection bits in indirect descriptor usually.
478 
479 	// put it in the page table
480 	update_page_indirect_entry(entry, &page);
481 }
482 
483 
484 static size_t
485 map_max_pages_need(vm_translation_map */*map*/, addr_t start, addr_t end)
486 {
487 	size_t need;
488 	size_t pgdirs;
489 	// If start == 0, the actual base address is not yet known to the caller
490 	// and we shall assume the worst case.
491 	if (start == 0) {
492 #warning M68K: FIXME?
493 		start = (1023) * B_PAGE_SIZE;
494 		end += start;
495 	}
496 	pgdirs = VADDR_TO_PRENT(end) + 1 - VADDR_TO_PRENT(start);
497 	// how much for page directories
498 	need = (pgdirs + NUM_DIRTBL_PER_PAGE - 1) / NUM_DIRTBL_PER_PAGE;
499 	// and page tables themselves
500 	need = ((pgdirs * NUM_DIRENT_PER_TBL) + NUM_PAGETBL_PER_PAGE - 1) / NUM_PAGETBL_PER_PAGE;
501 
502 	// better rounding when only 1 pgdir
503 	// XXX: do better for other cases
504 	if (pgdirs == 1) {
505 		need = 1;
506 		need += (VADDR_TO_PDENT(end) + 1 - VADDR_TO_PDENT(start) + NUM_PAGETBL_PER_PAGE - 1) / NUM_PAGETBL_PER_PAGE;
507 	}
508 
509 	return need;
510 }
511 
512 
513 static status_t
514 map_tmap(vm_translation_map *map, addr_t va, addr_t pa, uint32 attributes)
515 {
516 	page_root_entry *pr;
517 	page_directory_entry *pd;
518 	page_table_entry *pt;
519 	addr_t pd_pg, pt_pg;
520 	unsigned int rindex, dindex, pindex;
521 	int err;
522 
523 	TRACE(("map_tmap: entry pa 0x%lx va 0x%lx\n", pa, va));
524 
525 /*
526 	dprintf("pgdir at 0x%x\n", pgdir);
527 	dprintf("index is %d\n", va / B_PAGE_SIZE / 1024);
528 	dprintf("final at 0x%x\n", &pgdir[va / B_PAGE_SIZE / 1024]);
529 	dprintf("value is 0x%x\n", *(int *)&pgdir[va / B_PAGE_SIZE / 1024]);
530 	dprintf("present bit is %d\n", pgdir[va / B_PAGE_SIZE / 1024].present);
531 	dprintf("addr is %d\n", pgdir[va / B_PAGE_SIZE / 1024].addr);
532 */
533 	pr = map->arch_data->rtdir_virt;
534 
535 	// check to see if a page directory exists for this range
536 	rindex = VADDR_TO_PRENT(va);
537 	if (pr[rindex].type != DT_ROOT) {
538 		addr_t pgdir;
539 		vm_page *page;
540 		unsigned int i;
541 
542 		// we need to allocate a pgtable
543 		page = vm_page_allocate_page(PAGE_STATE_CLEAR, true);
544 
545 		// mark the page WIRED
546 		vm_page_set_state(page, PAGE_STATE_WIRED);
547 
548 		pgdir = page->physical_page_number * B_PAGE_SIZE;
549 
550 		TRACE(("map_tmap: asked for free page for pgdir. 0x%lx\n", pgdir));
551 
552 		// for each pgdir on the allocated page:
553 		for (i = 0; i < NUM_DIRTBL_PER_PAGE; i++) {
554 			unsigned aindex = rindex & ~(NUM_DIRTBL_PER_PAGE-1); /* aligned */
555 			page_root_entry *apr = &pr[aindex + i];
556 
557 			// put in the pgdir
558 			put_pgdir_in_pgroot(apr, pgdir, attributes
559 				| (attributes & B_USER_PROTECTION ? B_WRITE_AREA : B_KERNEL_WRITE_AREA));
560 
561 			// update any other page directories, if it maps kernel space
562 			//XXX: suboptimal, should batch them
563 			if ((aindex+i) >= FIRST_KERNEL_PGDIR_ENT
564 				&& (aindex+i) < (FIRST_KERNEL_PGDIR_ENT + NUM_KERNEL_PGDIR_ENTS))
565 				_update_all_pgdirs((aindex+i), pr[aindex+i]);
566 
567 			pgdir += SIZ_DIRTBL;
568 		}
569 #warning M68K: really mean map_count++ ??
570 		map->map_count++;
571 	}
572 	// now, fill in the pentry
573 	do {
574 		err = get_physical_page_tmap_internal(PRE_TO_PA(pr[rindex]),
575 				&pd_pg, PHYSICAL_PAGE_DONT_WAIT);
576 	} while (err < 0);
577 	pd = (page_directory_entry *)pd_pg;
578 	// we want the table at rindex, not at rindex%(tbl/page)
579 	pd += (rindex % NUM_DIRTBL_PER_PAGE) * NUM_DIRENT_PER_TBL;
580 
581 	// check to see if a page table exists for this range
582 	dindex = VADDR_TO_PDENT(va);
583 	if (pd[dindex].type != DT_DIR) {
584 		addr_t pgtable;
585 		vm_page *page;
586 		unsigned int i;
587 
588 		// we need to allocate a pgtable
589 		page = vm_page_allocate_page(PAGE_STATE_CLEAR, true);
590 
591 		// mark the page WIRED
592 		vm_page_set_state(page, PAGE_STATE_WIRED);
593 
594 		pgtable = page->physical_page_number * B_PAGE_SIZE;
595 
596 		TRACE(("map_tmap: asked for free page for pgtable. 0x%lx\n", pgtable));
597 
598 		// for each pgtable on the allocated page:
599 		for (i = 0; i < NUM_PAGETBL_PER_PAGE; i++) {
600 			unsigned aindex = dindex & ~(NUM_PAGETBL_PER_PAGE-1); /* aligned */
601 			page_directory_entry *apd = &pd[aindex + i];
602 
603 			// put in the pgdir
604 			put_pgtable_in_pgdir(apd, pgtable, attributes
605 				| (attributes & B_USER_PROTECTION ? B_WRITE_AREA : B_KERNEL_WRITE_AREA));
606 
607 			// no need to update other page directories for kernel space;
608 			// the root-level already point to us.
609 
610 			pgtable += SIZ_PAGETBL;
611 		}
612 
613 #warning M68K: really mean map_count++ ??
614 		map->map_count++;
615 	}
616 	// now, fill in the pentry
617 	do {
618 		err = get_physical_page_tmap_internal(PDE_TO_PA(pd[dindex]),
619 				&pt_pg, PHYSICAL_PAGE_DONT_WAIT);
620 	} while (err < 0);
621 	pt = (page_table_entry *)pt_pg;
622 	// we want the table at rindex, not at rindex%(tbl/page)
623 	pt += (dindex % NUM_PAGETBL_PER_PAGE) * NUM_PAGEENT_PER_TBL;
624 
625 	pindex = VADDR_TO_PTENT(va);
626 
627 	put_page_table_entry_in_pgtable(&pt[pindex], pa, attributes,
628 		IS_KERNEL_MAP(map));
629 
630 	put_physical_page_tmap_internal(pt_pg);
631 	put_physical_page_tmap_internal(pd_pg);
632 
633 	if (map->arch_data->num_invalidate_pages < PAGE_INVALIDATE_CACHE_SIZE)
634 		map->arch_data->pages_to_invalidate[map->arch_data->num_invalidate_pages] = va;
635 
636 	map->arch_data->num_invalidate_pages++;
637 
638 	map->map_count++;
639 
640 	return 0;
641 }
642 
643 
644 static status_t
645 unmap_tmap(vm_translation_map *map, addr_t start, addr_t end)
646 {
647 	page_table_entry *pt;
648 	page_directory_entry *pd;
649 	page_root_entry *pr = map->arch_data->rtdir_virt;
650 	addr_t pd_pg, pt_pg;
651 	status_t status;
652 	int index;
653 
654 	start = ROUNDDOWN(start, B_PAGE_SIZE);
655 	end = ROUNDUP(end, B_PAGE_SIZE);
656 
657 	TRACE(("unmap_tmap: asked to free pages 0x%lx to 0x%lx\n", start, end));
658 
659 restart:
660 	if (start >= end)
661 		return B_OK;
662 
663 	index = VADDR_TO_PRENT(start);
664 	if (pr[index].type != DT_ROOT) {
665 		// no pagedir here, move the start up to access the next page table
666 		start = ROUNDUP(start + 1, B_PAGE_SIZE);
667 		goto restart;
668 	}
669 
670 	do {
671 		status = get_physical_page_tmap_internal(PRE_TO_PA(pr[index]),
672 			&pd_pg, PHYSICAL_PAGE_DONT_WAIT);
673 	} while (status < B_OK);
674 	pd = (page_directory_entry *)pd_pg;
675 	// we want the table at rindex, not at rindex%(tbl/page)
676 	pd += (index % NUM_DIRTBL_PER_PAGE) * NUM_DIRENT_PER_TBL;
677 
678 	index = VADDR_TO_PDENT(start);
679 	if (pd[index].type != DT_DIR) {
680 		// no pagetable here, move the start up to access the next page table
681 		start = ROUNDUP(start + 1, B_PAGE_SIZE);
682 		put_physical_page_tmap_internal(pd_pg);
683 		goto restart;
684 	}
685 
686 	do {
687 		status = get_physical_page_tmap_internal(PDE_TO_PA(pd[index]),
688 			&pt_pg, PHYSICAL_PAGE_DONT_WAIT);
689 	} while (status < B_OK);
690 	pt = (page_table_entry *)pt_pg;
691 	// we want the table at rindex, not at rindex%(tbl/page)
692 	pt += (index % NUM_PAGETBL_PER_PAGE) * NUM_PAGEENT_PER_TBL;
693 
694 	for (index = VADDR_TO_PTENT(start);
695 			(index < NUM_PAGEENT_PER_TBL) && (start < end);
696 			index++, start += B_PAGE_SIZE) {
697 		if (pt[index].type != DT_PAGE && pt[index].type != DT_INDIRECT) {
698 			// page mapping not valid
699 			continue;
700 		}
701 
702 		TRACE(("unmap_tmap: removing page 0x%lx\n", start));
703 
704 		pt[index].type = DT_INVALID;
705 		map->map_count--;
706 
707 		if (map->arch_data->num_invalidate_pages < PAGE_INVALIDATE_CACHE_SIZE)
708 			map->arch_data->pages_to_invalidate[map->arch_data->num_invalidate_pages] = start;
709 
710 		map->arch_data->num_invalidate_pages++;
711 	}
712 
713 	put_physical_page_tmap_internal(pt_pg);
714 	put_physical_page_tmap_internal(pd_pg);
715 
716 	goto restart;
717 }
718 
719 // XXX: 040 should be able to do that with PTEST (but not 030 or 060)
720 static status_t
721 query_tmap_interrupt(vm_translation_map *map, addr_t va, addr_t *_physical,
722 	uint32 *_flags)
723 {
724 	page_root_entry *pr = map->arch_data->rtdir_virt;
725 	page_directory_entry *pd;
726 	page_indirect_entry *pi;
727 	page_table_entry *pt;
728 	addr_t physicalPageTable;
729 	int32 index;
730 	status_t err = B_ERROR;	// no pagetable here
731 
732 	if (sQueryPage == NULL)
733 		return err; // not yet initialized !?
734 
735 	index = VADDR_TO_PRENT(va);
736 	if (pr && pr[index].type == DT_ROOT) {
737 		put_page_table_entry_in_pgtable(&sQueryDesc, PRE_TO_TA(pr[index]), B_KERNEL_READ_AREA, false);
738 		arch_cpu_invalidate_TLB_range((addr_t)pt, (addr_t)pt);
739 		pd = (page_directory_entry *)sQueryPage;
740 
741 		index = VADDR_TO_PDENT(va);
742 		if (pd && pd[index].type == DT_DIR) {
743 			put_page_table_entry_in_pgtable(&sQueryDesc, PDE_TO_TA(pd[index]), B_KERNEL_READ_AREA, false);
744 			arch_cpu_invalidate_TLB_range((addr_t)pt, (addr_t)pt);
745 			pt = (page_table_entry *)sQueryPage;
746 
747 			index = VADDR_TO_PTENT(va);
748 			if (pt && pt[index].type == DT_INDIRECT) {
749 				pi = (page_indirect_entry *)pt;
750 				put_page_table_entry_in_pgtable(&sQueryDesc, PIE_TO_TA(pi[index]), B_KERNEL_READ_AREA, false);
751 				arch_cpu_invalidate_TLB_range((addr_t)pt, (addr_t)pt);
752 				pt = (page_table_entry *)sQueryPage;
753 				index = 0; // single descriptor
754 			}
755 
756 			if (pt /*&& pt[index].type == DT_PAGE*/) {
757 				*_physical = PTE_TO_PA(pt[index]);
758 				// we should only be passed page va, but just in case.
759 				*_physical += va % B_PAGE_SIZE;
760 				*_flags |= ((pt[index].write_protect ? 0 : B_KERNEL_WRITE_AREA) | B_KERNEL_READ_AREA)
761 						| (pt[index].dirty ? PAGE_MODIFIED : 0)
762 						| (pt[index].accessed ? PAGE_ACCESSED : 0)
763 						| ((pt[index].type == DT_PAGE) ? PAGE_PRESENT : 0);
764 				err = B_OK;
765 			}
766 		}
767 	}
768 
769 	// unmap the pg table from the indirect desc.
770 	sQueryDesc.type = DT_INVALID;
771 
772 	return err;
773 }
774 
775 
776 static status_t
777 query_tmap(vm_translation_map *map, addr_t va, addr_t *_physical, uint32 *_flags)
778 {
779 	page_table_entry *pt;
780 	page_indirect_entry *pi;
781 	page_directory_entry *pd;
782 	page_directory_entry *pr = map->arch_data->rtdir_virt;
783 	addr_t pd_pg, pt_pg, pi_pg;
784 	status_t status;
785 	int32 index;
786 
787 	// default the flags to not present
788 	*_flags = 0;
789 	*_physical = 0;
790 
791 	index = VADDR_TO_PRENT(va);
792 	if (pr[index].type != DT_ROOT) {
793 		// no pagetable here
794 		return B_NO_ERROR;
795 	}
796 
797 	do {
798 		status = get_physical_page_tmap_internal(PRE_TO_PA(pr[index]),
799 			&pd_pg, PHYSICAL_PAGE_DONT_WAIT);
800 	} while (status < B_OK);
801 	pd = (page_directory_entry *)pd_pg;
802 	// we want the table at rindex, not at rindex%(tbl/page)
803 	pd += (index % NUM_DIRTBL_PER_PAGE) * NUM_DIRENT_PER_TBL;
804 
805 
806 	index = VADDR_TO_PDENT(va);
807 	if (pd[index].type != DT_DIR) {
808 		// no pagetable here
809 		put_physical_page_tmap_internal(pd_pg);
810 		return B_NO_ERROR;
811 	}
812 
813 	do {
814 		status = get_physical_page_tmap_internal(PDE_TO_PA(pd[index]),
815 			&pt_pg, PHYSICAL_PAGE_DONT_WAIT);
816 	} while (status < B_OK);
817 	pt = (page_table_entry *)pt_pg;
818 	// we want the table at rindex, not at rindex%(tbl/page)
819 	pt += (index % NUM_PAGETBL_PER_PAGE) * NUM_PAGEENT_PER_TBL;
820 
821 	index = VADDR_TO_PTENT(va);
822 
823 	// handle indirect descriptor
824 	if (pt[index].type == DT_INDIRECT) {
825 		pi = (page_indirect_entry *)pt;
826 		pi_pg = pt_pg;
827 		do {
828 			status = get_physical_page_tmap_internal(PIE_TO_PA(pi[index]),
829 				&pt_pg, PHYSICAL_PAGE_DONT_WAIT);
830 		} while (status < B_OK);
831 		pt = (page_table_entry *)pt_pg;
832 		// add offset from start of page
833 		pt += PIE_TO_PO(pi[index]) / sizeof(page_table_entry);
834 		// release the indirect table page
835 		put_physical_page_tmap_internal(pi_pg);
836 	}
837 
838 	*_physical = PTE_TO_PA(pt[index]);
839 
840 	// read in the page state flags
841 	if (!pt[index].supervisor)
842 		*_flags |= (pt[index].write_protect ? 0 : B_WRITE_AREA) | B_READ_AREA;
843 
844 	*_flags |= (pt[index].write_protect ? 0 : B_KERNEL_WRITE_AREA)
845 		| B_KERNEL_READ_AREA
846 		| (pt[index].dirty ? PAGE_MODIFIED : 0)
847 		| (pt[index].accessed ? PAGE_ACCESSED : 0)
848 		| ((pt[index].type == DT_PAGE) ? PAGE_PRESENT : 0);
849 
850 	put_physical_page_tmap_internal(pt_pg);
851 	put_physical_page_tmap_internal(pd_pg);
852 
853 	TRACE(("query_tmap: returning pa 0x%lx for va 0x%lx\n", *_physical, va));
854 
855 	return B_OK;
856 }
857 
858 
859 static addr_t
860 get_mapped_size_tmap(vm_translation_map *map)
861 {
862 	return map->map_count;
863 }
864 
865 
866 static status_t
867 protect_tmap(vm_translation_map *map, addr_t start, addr_t end, uint32 attributes)
868 {
869 	page_table_entry *pt;
870 	page_directory_entry *pd;
871 	page_root_entry *pr = map->arch_data->rtdir_virt;
872 	addr_t pd_pg, pt_pg;
873 	status_t status;
874 	int index;
875 
876 	start = ROUNDDOWN(start, B_PAGE_SIZE);
877 	end = ROUNDUP(end, B_PAGE_SIZE);
878 
879 	TRACE(("protect_tmap: pages 0x%lx to 0x%lx, attributes %lx\n", start, end, attributes));
880 
881 restart:
882 	if (start >= end)
883 		return B_OK;
884 
885 	index = VADDR_TO_PRENT(start);
886 	if (pr[index].type != DT_ROOT) {
887 		// no pagedir here, move the start up to access the next page table
888 		start = ROUNDUP(start + 1, B_PAGE_SIZE);
889 		goto restart;
890 	}
891 
892 	do {
893 		status = get_physical_page_tmap_internal(PRE_TO_PA(pr[index]),
894 			&pd_pg, PHYSICAL_PAGE_DONT_WAIT);
895 	} while (status < B_OK);
896 	pd = (page_directory_entry *)pd_pg;
897 	// we want the table at rindex, not at rindex%(tbl/page)
898 	pd += (index % NUM_DIRTBL_PER_PAGE) * NUM_DIRENT_PER_TBL;
899 
900 	index = VADDR_TO_PDENT(start);
901 	if (pd[index].type != DT_DIR) {
902 		// no pagetable here, move the start up to access the next page table
903 		start = ROUNDUP(start + 1, B_PAGE_SIZE);
904 		put_physical_page_tmap_internal(pd_pg);
905 		goto restart;
906 	}
907 
908 	do {
909 		status = get_physical_page_tmap_internal(PDE_TO_PA(pd[index]),
910 			&pt_pg, PHYSICAL_PAGE_DONT_WAIT);
911 	} while (status < B_OK);
912 	pt = (page_table_entry *)pt_pg;
913 	// we want the table at rindex, not at rindex%(tbl/page)
914 	pt += (index % NUM_PAGETBL_PER_PAGE) * NUM_PAGEENT_PER_TBL;
915 
916 	for (index = VADDR_TO_PTENT(start);
917 			(index < NUM_PAGEENT_PER_TBL) && (start < end);
918 			index++, start += B_PAGE_SIZE) {
919 		// XXX: handle indirect ?
920 		if (pt[index].type != DT_PAGE /*&& pt[index].type != DT_INDIRECT*/) {
921 			// page mapping not valid
922 			continue;
923 		}
924 
925 		TRACE(("protect_tmap: protect page 0x%lx\n", start));
926 
927 		pt[index].supervisor = (attributes & B_USER_PROTECTION) == 0;
928 		if ((attributes & B_USER_PROTECTION) != 0)
929 			pt[index].write_protect = (attributes & B_WRITE_AREA) == 0;
930 		else
931 			pt[index].write_protect = (attributes & B_KERNEL_WRITE_AREA) == 0;
932 
933 		if (map->arch_data->num_invalidate_pages < PAGE_INVALIDATE_CACHE_SIZE)
934 			map->arch_data->pages_to_invalidate[map->arch_data->num_invalidate_pages] = start;
935 
936 		map->arch_data->num_invalidate_pages++;
937 	}
938 
939 	put_physical_page_tmap_internal(pt_pg);
940 	put_physical_page_tmap_internal(pd_pg);
941 
942 	goto restart;
943 }
944 
945 
946 static status_t
947 clear_flags_tmap(vm_translation_map *map, addr_t va, uint32 flags)
948 {
949 	page_table_entry *pt;
950 	page_indirect_entry *pi;
951 	page_directory_entry *pd;
952 	page_root_entry *pr = map->arch_data->rtdir_virt;
953 	addr_t pd_pg, pt_pg, pi_pg;
954 	status_t status;
955 	int index;
956 	int tlb_flush = false;
957 
958 	index = VADDR_TO_PRENT(va);
959 	if (pr[index].type != DT_ROOT) {
960 		// no pagetable here
961 		return B_NO_ERROR;
962 	}
963 
964 	do {
965 		status = get_physical_page_tmap_internal(PRE_TO_PA(pr[index]),
966 			&pd_pg, PHYSICAL_PAGE_DONT_WAIT);
967 	} while (status < B_OK);
968 	pd = (page_directory_entry *)pd_pg;
969 	// we want the table at rindex, not at rindex%(tbl/page)
970 	pd += (index % NUM_DIRTBL_PER_PAGE) * NUM_DIRENT_PER_TBL;
971 
972 
973 	index = VADDR_TO_PDENT(va);
974 	if (pd[index].type != DT_DIR) {
975 		// no pagetable here
976 		put_physical_page_tmap_internal(pd_pg);
977 		return B_NO_ERROR;
978 	}
979 
980 	do {
981 		status = get_physical_page_tmap_internal(PDE_TO_PA(pd[index]),
982 			&pt_pg, PHYSICAL_PAGE_DONT_WAIT);
983 	} while (status < B_OK);
984 	pt = (page_table_entry *)pt_pg;
985 	// we want the table at rindex, not at rindex%(tbl/page)
986 	pt += (index % NUM_PAGETBL_PER_PAGE) * NUM_PAGEENT_PER_TBL;
987 
988 	index = VADDR_TO_PTENT(va);
989 
990 	// handle indirect descriptor
991 	if (pt[index].type == DT_INDIRECT) {
992 		pi = (page_indirect_entry *)pt;
993 		pi_pg = pt_pg;
994 		do {
995 			status = get_physical_page_tmap_internal(PIE_TO_PA(pi[index]),
996 				&pt_pg, PHYSICAL_PAGE_DONT_WAIT);
997 		} while (status < B_OK);
998 		pt = (page_table_entry *)pt_pg;
999 		// add offset from start of page
1000 		pt += PIE_TO_PO(pi[index]) / sizeof(page_table_entry);
1001 		// release the indirect table page
1002 		put_physical_page_tmap_internal(pi_pg);
1003 	}
1004 
1005 	// clear out the flags we've been requested to clear
1006 	if (flags & PAGE_MODIFIED) {
1007 		pt[index].dirty = 0;
1008 		tlb_flush = true;
1009 	}
1010 	if (flags & PAGE_ACCESSED) {
1011 		pt[index].accessed = 0;
1012 		tlb_flush = true;
1013 	}
1014 
1015 	put_physical_page_tmap_internal(pt_pg);
1016 	put_physical_page_tmap_internal(pd_pg);
1017 
1018 	if (tlb_flush) {
1019 		if (map->arch_data->num_invalidate_pages < PAGE_INVALIDATE_CACHE_SIZE)
1020 			map->arch_data->pages_to_invalidate[map->arch_data->num_invalidate_pages] = va;
1021 
1022 		map->arch_data->num_invalidate_pages++;
1023 	}
1024 
1025 	return B_OK;
1026 }
1027 
1028 
1029 static void
1030 flush_tmap(vm_translation_map *map)
1031 {
1032 	cpu_status state;
1033 
1034 	if (map->arch_data->num_invalidate_pages <= 0)
1035 		return;
1036 
1037 	state = disable_interrupts();
1038 
1039 	if (map->arch_data->num_invalidate_pages > PAGE_INVALIDATE_CACHE_SIZE) {
1040 		// invalidate all pages
1041 		TRACE(("flush_tmap: %d pages to invalidate, invalidate all\n",
1042 			map->arch_data->num_invalidate_pages));
1043 
1044 		if (IS_KERNEL_MAP(map)) {
1045 			arch_cpu_global_TLB_invalidate();
1046 		} else {
1047 			arch_cpu_user_TLB_invalidate();
1048 		}
1049 	} else {
1050 		TRACE(("flush_tmap: %d pages to invalidate, invalidate list\n",
1051 			map->arch_data->num_invalidate_pages));
1052 
1053 		arch_cpu_invalidate_TLB_list(map->arch_data->pages_to_invalidate,
1054 			map->arch_data->num_invalidate_pages);
1055 	}
1056 	map->arch_data->num_invalidate_pages = 0;
1057 
1058 	restore_interrupts(state);
1059 }
1060 
1061 
1062 static status_t
1063 map_iospace_chunk(addr_t va, addr_t pa, uint32 flags)
1064 {
1065 	int i;
1066 	page_table_entry *pt;
1067 	int state;
1068 
1069 	pa &= ~(B_PAGE_SIZE - 1); // make sure it's page aligned
1070 	va &= ~(B_PAGE_SIZE - 1); // make sure it's page aligned
1071 	if (va < sIOSpaceBase || va >= (sIOSpaceBase + IOSPACE_SIZE))
1072 		panic("map_iospace_chunk: passed invalid va 0x%lx\n", va);
1073 
1074 	pt = &iospace_pgtables[(va - sIOSpaceBase) / B_PAGE_SIZE];
1075 	for (i = 0; i < NUM_PAGEENT_PER_TBL; i++, pa += B_PAGE_SIZE) {
1076 		init_page_table_entry(&pt[i]);
1077 		pt[i].addr = TA_TO_PTEA(pa);
1078 		pt[i].supervisor = 1;
1079 		pt[i].write_protect = 0;
1080 		pt[i].type = DT_PAGE;
1081 		//XXX: not cachable ?
1082 		// 040 or 060 only
1083 #ifdef MMU_HAS_GLOBAL_PAGES
1084 		pt[i].global = 1;
1085 #endif
1086 	}
1087 
1088 	state = disable_interrupts();
1089 	arch_cpu_invalidate_TLB_range(va, va + (IOSPACE_CHUNK_SIZE - B_PAGE_SIZE));
1090 	//smp_send_broadcast_ici(SMP_MSG_INVALIDATE_PAGE_RANGE,
1091 	//	va, va + (IOSPACE_CHUNK_SIZE - B_PAGE_SIZE), 0,
1092 	//	NULL, SMP_MSG_FLAG_SYNC);
1093 	restore_interrupts(state);
1094 
1095 	return B_OK;
1096 }
1097 
1098 
1099 static status_t
1100 get_physical_page_tmap_internal(addr_t pa, addr_t *va, uint32 flags)
1101 {
1102 	return generic_get_physical_page(pa, va, flags);
1103 }
1104 
1105 
1106 static status_t
1107 put_physical_page_tmap_internal(addr_t va)
1108 {
1109 	return generic_put_physical_page(va);
1110 }
1111 
1112 
1113 static status_t
1114 get_physical_page_tmap(addr_t physicalAddress, addr_t *_virtualAddress,
1115 	void **handle)
1116 {
1117 	return generic_get_physical_page(physicalAddress, _virtualAddress, 0);
1118 }
1119 
1120 
1121 static status_t
1122 put_physical_page_tmap(addr_t virtualAddress, void *handle)
1123 {
1124 	return generic_put_physical_page(virtualAddress);
1125 }
1126 
1127 
1128 static vm_translation_map_ops tmap_ops = {
1129 	destroy_tmap,
1130 	lock_tmap,
1131 	unlock_tmap,
1132 	map_max_pages_need,
1133 	map_tmap,
1134 	unmap_tmap,
1135 	query_tmap,
1136 	query_tmap_interrupt,
1137 	get_mapped_size_tmap,
1138 	protect_tmap,
1139 	clear_flags_tmap,
1140 	flush_tmap,
1141 	get_physical_page_tmap,
1142 	put_physical_page_tmap,
1143 	get_physical_page_tmap,	// *_current_cpu()
1144 	put_physical_page_tmap,	// *_current_cpu()
1145 	get_physical_page_tmap,	// *_debug()
1146 	put_physical_page_tmap,	// *_debug()
1147 		// TODO: Replace the *_current_cpu() and *_debug() versions!
1148 
1149 	generic_vm_memset_physical,
1150 	generic_vm_memcpy_from_physical,
1151 	generic_vm_memcpy_to_physical,
1152 	generic_vm_memcpy_physical_page
1153 		// TODO: Verify that this is safe to use!
1154 };
1155 
1156 
1157 //	#pragma mark -
1158 //	VM API
1159 
1160 
1161 static status_t
1162 m68k_vm_translation_map_init_map(vm_translation_map *map, bool kernel)
1163 {
1164 	if (map == NULL)
1165 		return B_BAD_VALUE;
1166 
1167 	TRACE(("vm_translation_map_create\n"));
1168 
1169 	// initialize the new object
1170 	map->ops = &tmap_ops;
1171 	map->map_count = 0;
1172 
1173 	recursive_lock_init(&map->lock, "translation map");
1174 
1175 	map->arch_data = (vm_translation_map_arch_info *)malloc(sizeof(vm_translation_map_arch_info));
1176 	if (map == NULL) {
1177 		recursive_lock_destroy(&map->lock);
1178 		return B_NO_MEMORY;
1179 	}
1180 
1181 	map->arch_data->num_invalidate_pages = 0;
1182 
1183 	if (!kernel) {
1184 		// user
1185 		// allocate a rtdir
1186 		map->arch_data->rtdir_virt = (page_root_entry *)memalign(
1187 			SIZ_ROOTTBL, SIZ_ROOTTBL);
1188 		if (map->arch_data->rtdir_virt == NULL) {
1189 			free(map->arch_data);
1190 			recursive_lock_destroy(&map->lock);
1191 			return B_NO_MEMORY;
1192 		}
1193 		vm_get_page_mapping(vm_kernel_address_space_id(),
1194 			(addr_t)map->arch_data->rtdir_virt, (addr_t *)&map->arch_data->rtdir_phys);
1195 	} else {
1196 		// kernel
1197 		// we already know the kernel pgdir mapping
1198 		map->arch_data->rtdir_virt = sKernelVirtualPageRoot;
1199 		map->arch_data->rtdir_phys = sKernelPhysicalPageRoot;
1200 	}
1201 
1202 	// zero out the bottom portion of the new rtdir
1203 	memset(map->arch_data->rtdir_virt + FIRST_USER_PGROOT_ENT, 0,
1204 		NUM_USER_PGROOT_ENTS * sizeof(page_root_entry));
1205 
1206 	// insert this new map into the map list
1207 	{
1208 		int state = disable_interrupts();
1209 		acquire_spinlock(&tmap_list_lock);
1210 
1211 		// copy the top portion of the rtdir from the current one
1212 		memcpy(map->arch_data->rtdir_virt + FIRST_KERNEL_PGROOT_ENT,
1213 			sKernelVirtualPageRoot + FIRST_KERNEL_PGROOT_ENT,
1214 			NUM_KERNEL_PGROOT_ENTS * sizeof(page_root_entry));
1215 
1216 		map->next = tmap_list;
1217 		tmap_list = map;
1218 
1219 		release_spinlock(&tmap_list_lock);
1220 		restore_interrupts(state);
1221 	}
1222 
1223 	return B_OK;
1224 }
1225 
1226 
1227 static status_t
1228 m68k_vm_translation_map_init_kernel_map_post_sem(vm_translation_map *map)
1229 {
1230 	return B_OK;
1231 }
1232 
1233 
1234 static status_t
1235 m68k_vm_translation_map_init(kernel_args *args)
1236 {
1237 	status_t error;
1238 
1239 	TRACE(("vm_translation_map_init: entry\n"));
1240 #if 0//XXX:HOLE
1241 	// page hole set up in stage2
1242 	page_hole = (page_table_entry *)args->arch_args.page_hole;
1243 	// calculate where the pgdir would be
1244 	page_hole_pgdir = (page_directory_entry *)(((unsigned int)args->arch_args.page_hole) + (B_PAGE_SIZE * 1024 - B_PAGE_SIZE));
1245 	// clear out the bottom 2 GB, unmap everything
1246 	memset(page_hole_pgdir + FIRST_USER_PGDIR_ENT, 0, sizeof(page_directory_entry) * NUM_USER_PGDIR_ENTS);
1247 #endif
1248 
1249 	sKernelPhysicalPageRoot = (page_root_entry *)args->arch_args.phys_pgroot;
1250 	sKernelVirtualPageRoot = (page_root_entry *)args->arch_args.vir_pgroot;
1251 
1252 	sQueryDesc.type = DT_INVALID;
1253 
1254 	B_INITIALIZE_SPINLOCK(&tmap_list_lock);
1255 	tmap_list = NULL;
1256 
1257 	// allocate some space to hold physical page mapping info
1258 	//XXX: check page count
1259 	// we already have all page directories allocated by the bootloader,
1260 	// we only need page tables
1261 
1262 	iospace_pgtables = (page_table_entry *)vm_allocate_early(args,
1263 		B_PAGE_SIZE * (IOSPACE_SIZE / (B_PAGE_SIZE * NUM_PAGEENT_PER_TBL * NUM_PAGETBL_PER_PAGE)), ~0L,
1264 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
1265 
1266 	TRACE(("iospace_pgtables %p\n", iospace_pgtables));
1267 
1268 	// init physical page mapper
1269 	error = generic_vm_physical_page_mapper_init(args, map_iospace_chunk,
1270 		&sIOSpaceBase, IOSPACE_SIZE, IOSPACE_CHUNK_SIZE);
1271 	if (error != B_OK)
1272 		return error;
1273 	TRACE(("iospace at %p\n", sIOSpaceBase));
1274 	// initialize our data structures
1275 	memset(iospace_pgtables, 0, B_PAGE_SIZE * (IOSPACE_SIZE / (B_PAGE_SIZE * NUM_PAGEENT_PER_TBL * NUM_PAGETBL_PER_PAGE)));
1276 
1277 	TRACE(("mapping iospace_pgtables\n"));
1278 
1279 	// put the array of pgtables directly into the kernel pagedir
1280 	// these will be wired and kept mapped into virtual space to be
1281 	// easy to get to.
1282 	// note the bootloader allocates all page directories for us
1283 	// as a contiguous block.
1284 	// we also still have transparent translation enabled, va==pa.
1285 	{
1286 		addr_t phys_pgtable;
1287 		addr_t virt_pgtable;
1288 		page_root_entry *pr = sKernelVirtualPageRoot;
1289 		page_directory_entry *pd;
1290 		page_directory_entry *e;
1291 		int index;
1292 		int i;
1293 
1294 		virt_pgtable = (addr_t)iospace_pgtables;
1295 
1296 		for (i = 0; i < (IOSPACE_SIZE / (B_PAGE_SIZE * NUM_PAGEENT_PER_TBL));
1297 			 i++, virt_pgtable += SIZ_PAGETBL) {
1298 			// early_query handles non-page-aligned addresses
1299 			early_query(virt_pgtable, &phys_pgtable);
1300 			index = VADDR_TO_PRENT(sIOSpaceBase) + i / NUM_DIRENT_PER_TBL;
1301 			pd = (page_directory_entry *)PRE_TO_TA(pr[index]);
1302 			e = &pd[(VADDR_TO_PDENT(sIOSpaceBase) + i) % NUM_DIRENT_PER_TBL];
1303 			put_pgtable_in_pgdir(e, phys_pgtable,
1304 				B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
1305 		}
1306 	}
1307 
1308 	TRACE(("vm_translation_map_init: done\n"));
1309 
1310 	return B_OK;
1311 }
1312 
1313 
1314 static status_t
1315 m68k_vm_translation_map_init_post_sem(kernel_args *args)
1316 {
1317 	return generic_vm_physical_page_mapper_init_post_sem(args);
1318 }
1319 
1320 
1321 static status_t
1322 m68k_vm_translation_map_init_post_area(kernel_args *args)
1323 {
1324 	// now that the vm is initialized, create a region that represents
1325 	// the page hole
1326 	void *temp;
1327 	status_t error;
1328 	area_id area;
1329 	addr_t queryPage;
1330 
1331 	TRACE(("vm_translation_map_init_post_area: entry\n"));
1332 
1333 	// unmap the page hole hack we were using before
1334 #warning M68K: FIXME
1335 	//sKernelVirtualPageRoot[1023].present = 0;
1336 #if 0
1337 	page_hole_pgdir = NULL;
1338 	page_hole = NULL;
1339 #endif
1340 
1341 	temp = (void *)sKernelVirtualPageRoot;
1342 	area = create_area("kernel_pgdir", &temp, B_EXACT_ADDRESS, B_PAGE_SIZE,
1343 		B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
1344 	if (area < B_OK)
1345 		return area;
1346 
1347 	temp = (void *)iospace_pgtables;
1348 	area = create_area("iospace_pgtables", &temp, B_EXACT_ADDRESS,
1349 		B_PAGE_SIZE * (IOSPACE_SIZE / (B_PAGE_SIZE * 1024)),
1350 		B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
1351 	if (area < B_OK)
1352 		return area;
1353 
1354 	error = generic_vm_physical_page_mapper_init_post_area(args);
1355 	if (error != B_OK)
1356 		return error;
1357 
1358 	// this area is used for query_tmap_interrupt()
1359 	// TODO: Note, this only works as long as all pages belong to the same
1360 	//	page table, which is not yet enforced (or even tested)!
1361 	// Note we don't support SMP which makes things simpler.
1362 
1363 	area = vm_create_null_area(vm_kernel_address_space_id(),
1364 		"interrupt query pages", (void **)&queryPage, B_ANY_ADDRESS,
1365 		B_PAGE_SIZE);
1366 	if (area < B_OK)
1367 		return area;
1368 
1369 	// insert the indirect descriptor in the tree so we can map the page we want from it.
1370 
1371 	{
1372 		page_directory_entry *pageDirEntry;
1373 		page_indirect_entry *pageTableEntry;
1374 		addr_t physicalPageDir, physicalPageTable;
1375 		addr_t physicalIndirectDesc;
1376 		int32 index;
1377 
1378 		// first get pa for the indirect descriptor
1379 
1380 		index = VADDR_TO_PRENT((addr_t)&sQueryDesc);
1381 		physicalPageDir = PRE_TO_PA(sKernelVirtualPageRoot[index]);
1382 
1383 		get_physical_page_tmap_internal(physicalPageDir,
1384 			(addr_t *)&pageDirEntry, PHYSICAL_PAGE_DONT_WAIT);
1385 
1386 		index = VADDR_TO_PDENT((addr_t)&sQueryDesc);
1387 		physicalPageTable = PDE_TO_PA(pageDirEntry[index]);
1388 
1389 		get_physical_page_tmap_internal(physicalPageTable,
1390 			(addr_t *)&pageTableEntry, PHYSICAL_PAGE_DONT_WAIT);
1391 
1392 		index = VADDR_TO_PTENT((addr_t)&sQueryDesc);
1393 
1394 		// pa of the page
1395 		physicalIndirectDesc = PTE_TO_PA(pageTableEntry[index]);
1396 		// add offset
1397 		physicalIndirectDesc += ((addr_t)&sQueryDesc) % B_PAGE_SIZE;
1398 
1399 		put_physical_page_tmap_internal((addr_t)pageTableEntry);
1400 		put_physical_page_tmap_internal((addr_t)pageDirEntry);
1401 
1402 		// then the va for the page table for the query page.
1403 
1404 		//sQueryPageTable = (page_indirect_entry *)(queryPage);
1405 
1406 		index = VADDR_TO_PRENT(queryPage);
1407 		physicalPageDir = PRE_TO_PA(sKernelVirtualPageRoot[index]);
1408 
1409 		get_physical_page_tmap_internal(physicalPageDir,
1410 			(addr_t *)&pageDirEntry, PHYSICAL_PAGE_DONT_WAIT);
1411 
1412 		index = VADDR_TO_PDENT(queryPage);
1413 		physicalPageTable = PDE_TO_PA(pageDirEntry[index]);
1414 
1415 		get_physical_page_tmap_internal(physicalPageTable,
1416 			(addr_t *)&pageTableEntry, PHYSICAL_PAGE_DONT_WAIT);
1417 
1418 		index = VADDR_TO_PTENT(queryPage);
1419 
1420 		put_page_indirect_entry_in_pgtable(&pageTableEntry[index], physicalIndirectDesc,
1421 			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, false);
1422 
1423 		put_physical_page_tmap_internal((addr_t)pageTableEntry);
1424 		put_physical_page_tmap_internal((addr_t)pageDirEntry);
1425 		//invalidate_TLB(sQueryPageTable);
1426 	}
1427 	// qmery_tmap_interrupt checks for the NULL, now it can use it
1428 	sQueryPage = queryPage;
1429 
1430 	TRACE(("vm_translation_map_init_post_area: done\n"));
1431 	return B_OK;
1432 }
1433 
1434 
1435 // almost directly taken from boot mmu code
1436 // x86:
1437 // XXX horrible back door to map a page quickly regardless of translation map object, etc.
1438 // used only during VM setup.
1439 // uses a 'page hole' set up in the stage 2 bootloader. The page hole is created by pointing one of
1440 // the pgdir entries back at itself, effectively mapping the contents of all of the 4MB of pagetables
1441 // into a 4 MB region. It's only used here, and is later unmapped.
1442 
1443 static status_t
1444 m68k_vm_translation_map_early_map(kernel_args *args, addr_t va, addr_t pa,
1445 	uint8 attributes, addr_t (*get_free_page)(kernel_args *))
1446 {
1447 	page_root_entry *pr = (page_root_entry *)sKernelPhysicalPageRoot;
1448 	page_directory_entry *pd;
1449 	page_table_entry *pt;
1450 	addr_t tbl;
1451 	uint32 index;
1452 	uint32 i;
1453 	TRACE(("early_tmap: entry pa 0x%lx va 0x%lx\n", pa, va));
1454 
1455 	// everything much simpler here because pa = va
1456 	// thanks to transparent translation which hasn't been disabled yet
1457 
1458 	index = VADDR_TO_PRENT(va);
1459 	if (pr[index].type != DT_ROOT) {
1460 		unsigned aindex = index & ~(NUM_DIRTBL_PER_PAGE-1); /* aligned */
1461 		TRACE(("missing page root entry %d ai %d\n", index, aindex));
1462 		tbl = get_free_page(args) * B_PAGE_SIZE;
1463 		if (!tbl)
1464 			return ENOMEM;
1465 		TRACE(("early_map: asked for free page for pgdir. 0x%lx\n", tbl));
1466 		// zero-out
1467 		memset((void *)tbl, 0, B_PAGE_SIZE);
1468 		// for each pgdir on the allocated page:
1469 		for (i = 0; i < NUM_DIRTBL_PER_PAGE; i++) {
1470 			put_pgdir_in_pgroot(&pr[aindex + i], tbl, attributes);
1471 			//TRACE(("inserting tbl @ %p as %08x pr[%d] %08x\n", tbl, TA_TO_PREA(tbl), aindex + i, *(uint32 *)apr));
1472 			// clear the table
1473 			//TRACE(("clearing table[%d]\n", i));
1474 			pd = (page_directory_entry *)tbl;
1475 			for (int32 j = 0; j < NUM_DIRENT_PER_TBL; j++)
1476 				*(page_directory_entry_scalar *)(&pd[j]) = DFL_DIRENT_VAL;
1477 			tbl += SIZ_DIRTBL;
1478 		}
1479 	}
1480 	pd = (page_directory_entry *)PRE_TO_TA(pr[index]);
1481 
1482 	index = VADDR_TO_PDENT(va);
1483 	if (pd[index].type != DT_DIR) {
1484 		unsigned aindex = index & ~(NUM_PAGETBL_PER_PAGE-1); /* aligned */
1485 		TRACE(("missing page dir entry %d ai %d\n", index, aindex));
1486 		tbl = get_free_page(args) * B_PAGE_SIZE;
1487 		if (!tbl)
1488 			return ENOMEM;
1489 		TRACE(("early_map: asked for free page for pgtable. 0x%lx\n", tbl));
1490 		// zero-out
1491 		memset((void *)tbl, 0, B_PAGE_SIZE);
1492 		// for each pgdir on the allocated page:
1493 		for (i = 0; i < NUM_PAGETBL_PER_PAGE; i++) {
1494 			put_pgtable_in_pgdir(&pd[aindex + i], tbl, attributes);
1495 			// clear the table
1496 			//TRACE(("clearing table[%d]\n", i));
1497 			pt = (page_table_entry *)tbl;
1498 			for (int32 j = 0; j < NUM_PAGEENT_PER_TBL; j++)
1499 				*(page_table_entry_scalar *)(&pt[j]) = DFL_PAGEENT_VAL;
1500 			tbl += SIZ_PAGETBL;
1501 		}
1502 	}
1503 	pt = (page_table_entry *)PDE_TO_TA(pd[index]);
1504 
1505 	index = VADDR_TO_PTENT(va);
1506 	put_page_table_entry_in_pgtable(&pt[index], pa, attributes,
1507 		IS_KERNEL_ADDRESS(va));
1508 
1509 	arch_cpu_invalidate_TLB_range(va, va);
1510 
1511 	return B_OK;
1512 }
1513 
1514 
1515 static bool
1516 m68k_vm_translation_map_is_kernel_page_accessible(addr_t virtualAddress,
1517 	uint32 protection)
1518 {
1519 	// TODO: Implement!
1520 	return false;
1521 }
1522