xref: /haiku/src/system/kernel/arch/m68k/arch_vm_translation_map_impl.cpp (revision c9060eb991e10e477ece52478d6743fc7691c143)
1 /*
2  * Copyright 2007, Haiku Inc. All rights reserved.
3  * Distributed under the terms of the MIT License.
4  *
5  * Authors:
6  * 		François Revol <revol@free.fr>
7  *
8  * Copyright 2003-2007, Axel Dörfler, axeld@pinc-software.de.
9  * Distributed under the terms of the MIT License.
10  *
11  * Copyright 2001, Travis Geiselbrecht. All rights reserved.
12  * Distributed under the terms of the NewOS License.
13  */
14 
15 #ifndef ARCH_M68K_MMU_TYPE
16 #error This file is included from arch_*_mmu.cpp
17 #endif
18 
19 /*  (mmu_man) Implementation details on 68030 and others:
20 
21 	Unlike on x86 we can't just switch the context to another team by just
22 	setting a register to another page directory, since we only have one
23 	page table containing both kernel and user address mappings.
24 	The 030 supports arbitrary layout of the page directory tree, including
25 	a 1-bit first level (2 entries top level table) that would map kernel
26 	and user land at a single place. But 040 and later only support a fixed
27 	splitting of 7/7/6 for 4K pages.
28 
29 	Since 68k SMP hardware is rare enough we don't want to support them, we
30 	can take some shortcuts.
31 
32 	As we don't want a separate user and kernel space, we'll use a single
33 	table. With the 7/7/6 split the 2nd level would require 32KB of tables,
34 	which is small enough to not want to use the list hack from x86.
35 	XXX: we use the hack for now, check later
36 
37 	Since page directories/tables don't fit exactly a page, we stuff more
38 	than one per page, and allocate them all at once, and add them at the
39 	same time to the tree. So we guarantee all higher-level entries modulo
40 	the number of tables/page are either invalid or present.
41  */
42 
43 #include <KernelExport.h>
44 #include <kernel.h>
45 #include <heap.h>
46 #include <vm.h>
47 #include <vm_address_space.h>
48 #include <vm_page.h>
49 #include <vm_priv.h>
50 #include <int.h>
51 #include <boot/kernel_args.h>
52 #include <arch/vm_translation_map.h>
53 #include <arch/cpu.h>
54 #include <arch_mmu.h>
55 #include <stdlib.h>
56 
57 #include "generic_vm_physical_page_mapper.h"
58 
59 
60 
61 //#define TRACE_VM_TMAP
62 #ifdef TRACE_VM_TMAP
63 #	define TRACE(x) dprintf x
64 #else
65 #	define TRACE(x) ;
66 #endif
67 
68 //XXX: that's platform specific!
69 // 14 MB of iospace
70 #define IOSPACE_SIZE (14*1024*1024)
71 // 4 MB chunks, to optimize for 4 MB pages
72 // XXX: no such thing on 68k (060 ?)
73 // 256K
74 #define IOSPACE_CHUNK_SIZE (256*1024)
75 
76 static page_table_entry *iospace_pgtables = NULL;
77 
78 #define PAGE_INVALIDATE_CACHE_SIZE 64
79 
80 // vm_translation object stuff
81 typedef struct vm_translation_map_arch_info {
82 	page_root_entry *rtdir_virt;
83 	page_root_entry *rtdir_phys;
84 	int num_invalidate_pages;
85 	addr_t pages_to_invalidate[PAGE_INVALIDATE_CACHE_SIZE];
86 } vm_translation_map_arch_info;
87 
88 #if 1//XXX ?
89 static page_table_entry *page_hole = NULL;
90 static page_directory_entry *page_hole_pgdir = NULL;
91 #endif
92 static page_root_entry *sKernelPhysicalPageRoot = NULL;
93 static page_root_entry *sKernelVirtualPageRoot = NULL;
94 static addr_t sQueryPage = NULL;
95 //static page_table_entry *sQueryPageTable;
96 //static page_directory_entry *sQueryPageDir;
97 // MUST be aligned
98 static page_table_entry sQueryDesc __attribute__ (( aligned (4) ));
99 
100 static vm_translation_map *tmap_list;
101 static spinlock tmap_list_lock;
102 
103 static addr_t sIOSpaceBase;
104 
105 #define CHATTY_TMAP 0
106 
107 #if 0
108 // use P*E_TO_* and TA_TO_P*EA !
109 #define ADDR_SHIFT(x) ((x)>>12)
110 #define ADDR_REVERSE_SHIFT(x) ((x)<<12)
111 #endif
112 
113 /* 7/7/6 split */
114 #define VADDR_TO_PRENT(va) (((va) / B_PAGE_SIZE) / (64*128))
115 #define VADDR_TO_PDENT(va) ((((va) / B_PAGE_SIZE) / 64) % 128)
116 #define VADDR_TO_PTENT(va) (((va) / B_PAGE_SIZE) % 64)
117 
118 #define FIRST_USER_PGROOT_ENT    (VADDR_TO_PRENT(USER_BASE))
119 #define FIRST_USER_PGDIR_ENT    (VADDR_TO_PDENT(USER_BASE))
120 #define NUM_USER_PGROOT_ENTS     (VADDR_TO_PRENT(ROUNDUP(USER_SIZE, B_PAGE_SIZE * 64 * 128)))
121 #define NUM_USER_PGDIR_ENTS     (VADDR_TO_PDENT(ROUNDUP(USER_SIZE, B_PAGE_SIZE * 64)))
122 #define FIRST_KERNEL_PGROOT_ENT  (VADDR_TO_PRENT(KERNEL_BASE))
123 #define FIRST_KERNEL_PGDIR_ENT  (VADDR_TO_PDENT(KERNEL_BASE))
124 #define NUM_KERNEL_PGROOT_ENTS   (VADDR_TO_PRENT(KERNEL_SIZE))
125 #define NUM_KERNEL_PGDIR_ENTS   (VADDR_TO_PDENT(KERNEL_SIZE))
126 #define IS_KERNEL_MAP(map)		(map->arch_data->rtdir_phys == sKernelPhysicalPageRoot)
127 
128 static status_t early_query(addr_t va, addr_t *out_physical);
129 static status_t get_physical_page_tmap(addr_t pa, addr_t *va, uint32 flags);
130 static status_t put_physical_page_tmap(addr_t va);
131 
132 static void flush_tmap(vm_translation_map *map);
133 
134 
135 #warning M68K: RENAME
136 static void *
137 _m68k_translation_map_get_pgdir(vm_translation_map *map)
138 {
139 	return map->arch_data->rtdir_phys;
140 }
141 
142 
143 static inline void
144 init_page_root_entry(page_root_entry *entry)
145 {
146 	// DT_INVALID is 0
147 	*(page_root_entry_scalar *)entry = DFL_ROOTENT_VAL;
148 }
149 
150 
151 static inline void
152 update_page_root_entry(page_root_entry *entry, page_root_entry *with)
153 {
154 	// update page directory entry atomically
155 	*(page_root_entry_scalar *)entry = *(page_root_entry_scalar *)with;
156 }
157 
158 
159 static inline void
160 init_page_directory_entry(page_directory_entry *entry)
161 {
162 	*(page_directory_entry_scalar *)entry = DFL_DIRENT_VAL;
163 }
164 
165 
166 static inline void
167 update_page_directory_entry(page_directory_entry *entry, page_directory_entry *with)
168 {
169 	// update page directory entry atomically
170 	*(page_directory_entry_scalar *)entry = *(page_directory_entry_scalar *)with;
171 }
172 
173 
174 static inline void
175 init_page_table_entry(page_table_entry *entry)
176 {
177 	*(page_table_entry_scalar *)entry = DFL_PAGEENT_VAL;
178 }
179 
180 
181 static inline void
182 update_page_table_entry(page_table_entry *entry, page_table_entry *with)
183 {
184 	// update page table entry atomically
185 	// XXX: is it ?? (long desc?)
186 	*(page_table_entry_scalar *)entry = *(page_table_entry_scalar *)with;
187 }
188 
189 
190 static inline void
191 init_page_indirect_entry(page_indirect_entry *entry)
192 {
193 #warning M68K: is it correct ?
194 	*(page_indirect_entry_scalar *)entry = DFL_PAGEENT_VAL;
195 }
196 
197 
198 static inline void
199 update_page_indirect_entry(page_indirect_entry *entry, page_indirect_entry *with)
200 {
201 	// update page table entry atomically
202 	// XXX: is it ?? (long desc?)
203 	*(page_indirect_entry_scalar *)entry = *(page_indirect_entry_scalar *)with;
204 }
205 
206 
207 static void
208 _update_all_pgdirs(int index, page_root_entry e)
209 {
210 	vm_translation_map *entry;
211 	unsigned int state = disable_interrupts();
212 
213 	acquire_spinlock(&tmap_list_lock);
214 
215 	for(entry = tmap_list; entry != NULL; entry = entry->next)
216 		entry->arch_data->rtdir_virt[index] = e;
217 
218 	release_spinlock(&tmap_list_lock);
219 	restore_interrupts(state);
220 }
221 
222 
223 // XXX currently assumes this translation map is active
224 
225 static status_t
226 early_query(addr_t va, addr_t *_physicalAddress)
227 {
228 	page_table_entry *pentry;
229 
230 	if (page_hole_pgdir[VADDR_TO_PDENT(va)].type != DT_DIR) {
231 		// no pagetable here
232 		return B_ERROR;
233 	}
234 #warning M68K: va or VADDR_TO_PTENT(va) ??
235 	pentry = page_hole + va / B_PAGE_SIZE;
236 	if (pentry->type != DT_PAGE) {
237 		// page mapping not valid
238 		return B_ERROR;
239 	}
240 
241 	*_physicalAddress = PTE_TO_PA(*pentry);
242 	return B_OK;
243 }
244 
245 
246 /*!	Acquires the map's recursive lock, and resets the invalidate pages counter
247 	in case it's the first locking recursion.
248 */
249 static status_t
250 lock_tmap(vm_translation_map *map)
251 {
252 	TRACE(("lock_tmap: map %p\n", map));
253 
254 	recursive_lock_lock(&map->lock);
255 	if (recursive_lock_get_recursion(&map->lock) == 1) {
256 		// we were the first one to grab the lock
257 		TRACE(("clearing invalidated page count\n"));
258 		map->arch_data->num_invalidate_pages = 0;
259 	}
260 
261 	return B_OK;
262 }
263 
264 
265 /*!	Unlocks the map, and, if we'll actually losing the recursive lock,
266 	flush all pending changes of this map (ie. flush TLB caches as
267 	needed).
268 */
269 static status_t
270 unlock_tmap(vm_translation_map *map)
271 {
272 	TRACE(("unlock_tmap: map %p\n", map));
273 
274 	if (recursive_lock_get_recursion(&map->lock) == 1) {
275 		// we're about to release it for the last time
276 		flush_tmap(map);
277 	}
278 
279 	recursive_lock_unlock(&map->lock);
280 	return B_OK;
281 }
282 
283 
284 static void
285 destroy_tmap(vm_translation_map *map)
286 {
287 	int state;
288 	vm_translation_map *entry;
289 	vm_translation_map *last = NULL;
290 	unsigned int i, j;
291 
292 	if (map == NULL)
293 		return;
294 
295 	// remove it from the tmap list
296 	state = disable_interrupts();
297 	acquire_spinlock(&tmap_list_lock);
298 
299 	entry = tmap_list;
300 	while (entry != NULL) {
301 		if (entry == map) {
302 			if (last != NULL)
303 				last->next = entry->next;
304 			else
305 				tmap_list = entry->next;
306 
307 			break;
308 		}
309 		last = entry;
310 		entry = entry->next;
311 	}
312 
313 	release_spinlock(&tmap_list_lock);
314 	restore_interrupts(state);
315 
316 	if (map->arch_data->rtdir_virt != NULL) {
317 		// cycle through and free all of the user space pgtables
318 		// since the size of tables don't match B_PAEG_SIZE,
319 		// we alloc several at once, based on modulos,
320 		// we make sure they are either all in the tree or none.
321 		for (i = VADDR_TO_PRENT(USER_BASE); i <= VADDR_TO_PRENT(USER_BASE + (USER_SIZE - 1)); i++) {
322 			addr_t pgdir_pn;
323 			page_directory_entry *pgdir;
324 			vm_page *dirpage;
325 
326 			if (map->arch_data->rtdir_virt[i].type == DT_INVALID)
327 				continue;
328 			if (map->arch_data->rtdir_virt[i].type != DT_ROOT) {
329 				panic("rtdir[%d]: buggy descriptor type", i);
330 				return;
331 			}
332 			// suboptimal (done 8 times)
333 			pgdir_pn = PRE_TO_PA(map->arch_data->rtdir_virt[i]);
334 			dirpage = vm_lookup_page(pgdir_pn);
335 			pgdir = &(((page_directory_entry *)dirpage)[i%NUM_DIRTBL_PER_PAGE]);
336 
337 			for (j = 0; j <= NUM_DIRENT_PER_TBL; j+=NUM_PAGETBL_PER_PAGE) {
338 				addr_t pgtbl_pn;
339 				page_table_entry *pgtbl;
340 				vm_page *page;
341 				if (pgdir[j].type == DT_INVALID)
342 					continue;
343 				if (pgdir[j].type != DT_DIR) {
344 					panic("rtdir[%d][%d]: buggy descriptor type", i, j);
345 					return;
346 				}
347 				pgtbl_pn = PDE_TO_PN(pgdir[j]);
348 				page = vm_lookup_page(pgtbl_pn);
349 				pgtbl = (page_table_entry *)page;
350 
351 				if (!page) {
352 					panic("destroy_tmap: didn't find pgtable page\n");
353 					return;
354 				}
355 				vm_page_set_state(page, PAGE_STATE_FREE);
356 			}
357 			if (((i+1)%NUM_DIRTBL_PER_PAGE) == 0)
358 				vm_page_set_state(dirpage, PAGE_STATE_FREE);
359 		}
360 		free(map->arch_data->rtdir_virt);
361 	}
362 
363 	free(map->arch_data);
364 	recursive_lock_destroy(&map->lock);
365 }
366 
367 
368 static void
369 put_pgdir_in_pgroot(page_root_entry *entry,
370 	addr_t pgdir_phys, uint32 attributes)
371 {
372 	page_root_entry dir;
373 	// put it in the pgdir
374 	init_page_root_entry(&dir);
375 	dir.addr = TA_TO_PREA(pgdir_phys);
376 
377 	// ToDo: we ignore the attributes of the page table - for compatibility
378 	//	with BeOS we allow having user accessible areas in the kernel address
379 	//	space. This is currently being used by some drivers, mainly for the
380 	//	frame buffer. Our current real time data implementation makes use of
381 	//	this fact, too.
382 	//	We might want to get rid of this possibility one day, especially if
383 	//	we intend to port it to a platform that does not support this.
384 	//dir.user = 1;
385 	//dir.rw = 1;
386 	dir.type = DT_ROOT;
387 	update_page_root_entry(entry, &dir);
388 }
389 
390 
391 static void
392 put_pgtable_in_pgdir(page_directory_entry *entry,
393 	addr_t pgtable_phys, uint32 attributes)
394 {
395 	page_directory_entry table;
396 	// put it in the pgdir
397 	init_page_directory_entry(&table);
398 	table.addr = TA_TO_PDEA(pgtable_phys);
399 
400 	// ToDo: we ignore the attributes of the page table - for compatibility
401 	//	with BeOS we allow having user accessible areas in the kernel address
402 	//	space. This is currently being used by some drivers, mainly for the
403 	//	frame buffer. Our current real time data implementation makes use of
404 	//	this fact, too.
405 	//	We might want to get rid of this possibility one day, especially if
406 	//	we intend to port it to a platform that does not support this.
407 	//table.user = 1;
408 	//table.rw = 1;
409 	table.type = DT_DIR;
410 	update_page_directory_entry(entry, &table);
411 }
412 
413 
414 static void
415 put_page_table_entry_in_pgtable(page_table_entry *entry,
416 	addr_t physicalAddress, uint32 attributes, bool globalPage)
417 {
418 	page_table_entry page;
419 	init_page_table_entry(&page);
420 
421 	page.addr = TA_TO_PTEA(physicalAddress);
422 
423 	// if the page is user accessible, it's automatically
424 	// accessible in kernel space, too (but with the same
425 	// protection)
426 	page.supervisor = (attributes & B_USER_PROTECTION) == 0;
427 	if (page.supervisor)
428 		page.write_protect = (attributes & B_KERNEL_WRITE_AREA) == 0;
429 	else
430 		page.write_protect = (attributes & B_WRITE_AREA) == 0;
431 	page.type = DT_PAGE;
432 
433 #ifdef PAGE_HAS_GLOBAL_BIT
434 	if (globalPage)
435 		page.global = 1;
436 #endif
437 
438 	// put it in the page table
439 	update_page_table_entry(entry, &page);
440 }
441 
442 
443 static void
444 put_page_indirect_entry_in_pgtable(page_indirect_entry *entry,
445 	addr_t physicalAddress, uint32 attributes, bool globalPage)
446 {
447 	page_indirect_entry page;
448 	init_page_indirect_entry(&page);
449 
450 	page.addr = TA_TO_PIEA(physicalAddress);
451 	page.type = DT_INDIRECT;
452 
453 	// there are no protection bits in indirect descriptor usually.
454 
455 	// put it in the page table
456 	update_page_indirect_entry(entry, &page);
457 }
458 
459 
460 static size_t
461 map_max_pages_need(vm_translation_map */*map*/, addr_t start, addr_t end)
462 {
463 	size_t need;
464 	size_t pgdirs = VADDR_TO_PRENT(end) + 1 - VADDR_TO_PRENT(start);
465 	// how much for page directories
466 	need = (pgdirs + NUM_DIRTBL_PER_PAGE - 1) / NUM_DIRTBL_PER_PAGE;
467 	// and page tables themselves
468 	need = ((pgdirs * NUM_DIRENT_PER_TBL) + NUM_PAGETBL_PER_PAGE - 1) / NUM_PAGETBL_PER_PAGE;
469 
470 	// better rounding when only 1 pgdir
471 	// XXX: do better for other cases
472 	if (pgdirs == 1) {
473 		need = 1;
474 		need += (VADDR_TO_PDENT(end) + 1 - VADDR_TO_PDENT(start) + NUM_PAGETBL_PER_PAGE - 1) / NUM_PAGETBL_PER_PAGE;
475 	}
476 
477 	return need;
478 }
479 
480 
481 static status_t
482 map_tmap(vm_translation_map *map, addr_t va, addr_t pa, uint32 attributes)
483 {
484 	page_root_entry *pr;
485 	page_directory_entry *pd;
486 	page_table_entry *pt;
487 	addr_t pd_pg, pt_pg;
488 	unsigned int rindex, dindex, pindex;
489 	int err;
490 
491 	TRACE(("map_tmap: entry pa 0x%lx va 0x%lx\n", pa, va));
492 
493 /*
494 	dprintf("pgdir at 0x%x\n", pgdir);
495 	dprintf("index is %d\n", va / B_PAGE_SIZE / 1024);
496 	dprintf("final at 0x%x\n", &pgdir[va / B_PAGE_SIZE / 1024]);
497 	dprintf("value is 0x%x\n", *(int *)&pgdir[va / B_PAGE_SIZE / 1024]);
498 	dprintf("present bit is %d\n", pgdir[va / B_PAGE_SIZE / 1024].present);
499 	dprintf("addr is %d\n", pgdir[va / B_PAGE_SIZE / 1024].addr);
500 */
501 	pr = map->arch_data->rtdir_virt;
502 
503 	// check to see if a page directory exists for this range
504 	rindex = VADDR_TO_PRENT(va);
505 	if (pr[rindex].type != DT_ROOT) {
506 		addr_t pgdir;
507 		vm_page *page;
508 		unsigned int i;
509 
510 		// we need to allocate a pgtable
511 		page = vm_page_allocate_page(PAGE_STATE_CLEAR, true);
512 
513 		// mark the page WIRED
514 		vm_page_set_state(page, PAGE_STATE_WIRED);
515 
516 		pgdir = page->physical_page_number * B_PAGE_SIZE;
517 
518 		TRACE(("map_tmap: asked for free page for pgdir. 0x%lx\n", pgdir));
519 
520 		// for each pgdir on the allocated page:
521 		for (i = 0; i < NUM_DIRTBL_PER_PAGE; i++) {
522 			unsigned aindex = rindex & ~(NUM_DIRTBL_PER_PAGE-1); /* aligned */
523 			page_root_entry *apr = &pr[aindex + i];
524 
525 			// put in the pgdir
526 			put_pgdir_in_pgroot(apr, pgdir, attributes
527 				| (attributes & B_USER_PROTECTION ? B_WRITE_AREA : B_KERNEL_WRITE_AREA));
528 
529 			// update any other page directories, if it maps kernel space
530 			//XXX: suboptimal, should batch them
531 			if ((aindex+i) >= FIRST_KERNEL_PGDIR_ENT
532 				&& (aindex+i) < (FIRST_KERNEL_PGDIR_ENT + NUM_KERNEL_PGDIR_ENTS))
533 				_update_all_pgdirs((aindex+i), pr[aindex+i]);
534 
535 			pgdir += SIZ_DIRTBL;
536 		}
537 #warning M68K: really mean map_count++ ??
538 		map->map_count++;
539 	}
540 	// now, fill in the pentry
541 	do {
542 		err = get_physical_page_tmap(PRE_TO_PA(pr[rindex]),
543 				&pd_pg, PHYSICAL_PAGE_NO_WAIT);
544 	} while (err < 0);
545 	pd = (page_directory_entry *)pd_pg;
546 	// we want the table at rindex, not at rindex%(tbl/page)
547 	pd += (rindex % NUM_DIRTBL_PER_PAGE) * NUM_DIRENT_PER_TBL;
548 
549 	// check to see if a page table exists for this range
550 	dindex = VADDR_TO_PDENT(va);
551 	if (pd[dindex].type != DT_DIR) {
552 		addr_t pgtable;
553 		vm_page *page;
554 		unsigned int i;
555 
556 		// we need to allocate a pgtable
557 		page = vm_page_allocate_page(PAGE_STATE_CLEAR, true);
558 
559 		// mark the page WIRED
560 		vm_page_set_state(page, PAGE_STATE_WIRED);
561 
562 		pgtable = page->physical_page_number * B_PAGE_SIZE;
563 
564 		TRACE(("map_tmap: asked for free page for pgtable. 0x%lx\n", pgtable));
565 
566 		// for each pgtable on the allocated page:
567 		for (i = 0; i < NUM_PAGETBL_PER_PAGE; i++) {
568 			unsigned aindex = dindex & ~(NUM_PAGETBL_PER_PAGE-1); /* aligned */
569 			page_directory_entry *apd = &pd[aindex + i];
570 
571 			// put in the pgdir
572 			put_pgtable_in_pgdir(apd, pgtable, attributes
573 				| (attributes & B_USER_PROTECTION ? B_WRITE_AREA : B_KERNEL_WRITE_AREA));
574 
575 			// no need to update other page directories for kernel space;
576 			// the root-level already point to us.
577 
578 			pgtable += SIZ_PAGETBL;
579 		}
580 
581 #warning M68K: really mean map_count++ ??
582 		map->map_count++;
583 	}
584 	// now, fill in the pentry
585 	do {
586 		err = get_physical_page_tmap(PDE_TO_PA(pd[dindex]),
587 				&pt_pg, PHYSICAL_PAGE_NO_WAIT);
588 	} while (err < 0);
589 	pt = (page_table_entry *)pt_pg;
590 	// we want the table at rindex, not at rindex%(tbl/page)
591 	pt += (dindex % NUM_PAGETBL_PER_PAGE) * NUM_PAGEENT_PER_TBL;
592 
593 	pindex = VADDR_TO_PTENT(va);
594 
595 	put_page_table_entry_in_pgtable(&pt[pindex], pa, attributes,
596 		IS_KERNEL_MAP(map));
597 
598 	put_physical_page_tmap(pt_pg);
599 	put_physical_page_tmap(pd_pg);
600 
601 	if (map->arch_data->num_invalidate_pages < PAGE_INVALIDATE_CACHE_SIZE)
602 		map->arch_data->pages_to_invalidate[map->arch_data->num_invalidate_pages] = va;
603 
604 	map->arch_data->num_invalidate_pages++;
605 
606 	map->map_count++;
607 
608 	return 0;
609 }
610 
611 
612 static status_t
613 unmap_tmap(vm_translation_map *map, addr_t start, addr_t end)
614 {
615 	page_table_entry *pt;
616 	page_directory_entry *pd;
617 	page_root_entry *pr = map->arch_data->rtdir_virt;
618 	addr_t pd_pg, pt_pg;
619 	status_t status;
620 	int index;
621 
622 	start = ROUNDOWN(start, B_PAGE_SIZE);
623 	end = ROUNDUP(end, B_PAGE_SIZE);
624 
625 	TRACE(("unmap_tmap: asked to free pages 0x%lx to 0x%lx\n", start, end));
626 
627 restart:
628 	if (start >= end)
629 		return B_OK;
630 
631 	index = VADDR_TO_PRENT(start);
632 	if (pr[index].type != DT_ROOT) {
633 		// no pagedir here, move the start up to access the next page table
634 		start = ROUNDUP(start + 1, B_PAGE_SIZE);
635 		goto restart;
636 	}
637 
638 	do {
639 		status = get_physical_page_tmap(PRE_TO_PA(pr[index]),
640 			&pd_pg, PHYSICAL_PAGE_NO_WAIT);
641 	} while (status < B_OK);
642 	pd = (page_directory_entry *)pd_pg;
643 	// we want the table at rindex, not at rindex%(tbl/page)
644 	pd += (index % NUM_DIRTBL_PER_PAGE) * NUM_DIRENT_PER_TBL;
645 
646 	index = VADDR_TO_PDENT(start);
647 	if (pd[index].type != DT_DIR) {
648 		// no pagetable here, move the start up to access the next page table
649 		start = ROUNDUP(start + 1, B_PAGE_SIZE);
650 		put_physical_page_tmap(pd_pg);
651 		goto restart;
652 	}
653 
654 	do {
655 		status = get_physical_page_tmap(PDE_TO_PA(pd[index]),
656 			&pt_pg, PHYSICAL_PAGE_NO_WAIT);
657 	} while (status < B_OK);
658 	pt = (page_table_entry *)pt_pg;
659 	// we want the table at rindex, not at rindex%(tbl/page)
660 	pt += (index % NUM_PAGETBL_PER_PAGE) * NUM_PAGEENT_PER_TBL;
661 
662 	for (index = VADDR_TO_PTENT(start);
663 			(index < NUM_PAGEENT_PER_TBL) && (start < end);
664 			index++, start += B_PAGE_SIZE) {
665 		if (pt[index].type != DT_PAGE && pt[index].type != DT_INDIRECT) {
666 			// page mapping not valid
667 			continue;
668 		}
669 
670 		TRACE(("unmap_tmap: removing page 0x%lx\n", start));
671 
672 		pt[index].type = DT_INVALID;
673 		map->map_count--;
674 
675 		if (map->arch_data->num_invalidate_pages < PAGE_INVALIDATE_CACHE_SIZE)
676 			map->arch_data->pages_to_invalidate[map->arch_data->num_invalidate_pages] = start;
677 
678 		map->arch_data->num_invalidate_pages++;
679 	}
680 
681 	put_physical_page_tmap(pt_pg);
682 	put_physical_page_tmap(pd_pg);
683 
684 	goto restart;
685 }
686 
687 // XXX: 040 should be able to do that with PTEST (but not 030 or 060)
688 static status_t
689 query_tmap_interrupt(vm_translation_map *map, addr_t va, addr_t *_physical,
690 	uint32 *_flags)
691 {
692 	page_root_entry *pr = map->arch_data->rtdir_virt;
693 	page_directory_entry *pd;
694 	page_indirect_entry *pi;
695 	page_table_entry *pt;
696 	addr_t physicalPageTable;
697 	int32 index;
698 	status_t err = B_ERROR;	// no pagetable here
699 
700 	if (sQueryPage == NULL)
701 		return err; // not yet initialized !?
702 
703 	index = VADDR_TO_PRENT(va);
704 	if (pr && pr[index].type == DT_ROOT) {
705 		put_page_table_entry_in_pgtable(&sQueryDesc, PRE_TO_TA(pr[index]), B_KERNEL_READ_AREA, false);
706 		arch_cpu_invalidate_TLB_range((addr_t)pt, (addr_t)pt);
707 		pd = (page_directory_entry *)sQueryPage;
708 		index = VADDR_TO_PDENT(va);
709 
710 		if (pd && pd[index].type == DT_DIR) {
711 			put_page_table_entry_in_pgtable(&sQueryDesc, PDE_TO_TA(pd[index]), B_KERNEL_READ_AREA, false);
712 			arch_cpu_invalidate_TLB_range((addr_t)pt, (addr_t)pt);
713 			pt = (page_table_entry *)sQueryPage;
714 			index = VADDR_TO_PTENT(va);
715 
716 			if (pt && pt[index].type == DT_INDIRECT) {
717 				pi = (page_indirect_entry *)pt;
718 				put_page_table_entry_in_pgtable(&sQueryDesc, PIE_TO_TA(pi[index]), B_KERNEL_READ_AREA, false);
719 				arch_cpu_invalidate_TLB_range((addr_t)pt, (addr_t)pt);
720 				pt = (page_table_entry *)sQueryPage;
721 				index = 0; // single descriptor
722 			}
723 
724 			if (pt /*&& pt[index].type == DT_PAGE*/) {
725 				*_physical = PTE_TO_PA(pt[index]);
726 				// we should only be passed page va, but just in case.
727 				*_physical += va % B_PAGE_SIZE;
728 				*_flags |= ((pt[index].write_protect ? 0 : B_KERNEL_WRITE_AREA) | B_KERNEL_READ_AREA)
729 						| (pt[index].dirty ? PAGE_MODIFIED : 0)
730 						| (pt[index].accessed ? PAGE_ACCESSED : 0)
731 						| ((pt[index].type == DT_PAGE) ? PAGE_PRESENT : 0);
732 				err = B_OK;
733 			}
734 		}
735 	}
736 
737 	// unmap the pg table from the indirect desc.
738 	sQueryDesc.type = DT_INVALID;
739 
740 	return err;
741 }
742 
743 
744 static status_t
745 query_tmap(vm_translation_map *map, addr_t va, addr_t *_physical, uint32 *_flags)
746 {
747 	page_table_entry *pt;
748 	page_indirect_entry *pi;
749 	page_directory_entry *pd;
750 	page_directory_entry *pr = map->arch_data->rtdir_virt;
751 	addr_t pd_pg, pt_pg, pi_pg;
752 	status_t status;
753 	int32 index;
754 
755 	// default the flags to not present
756 	*_flags = 0;
757 	*_physical = 0;
758 
759 	index = VADDR_TO_PRENT(va);
760 	if (pr[index].type != DT_ROOT) {
761 		// no pagetable here
762 		return B_NO_ERROR;
763 	}
764 
765 	do {
766 		status = get_physical_page_tmap(PRE_TO_PA(pr[index]),
767 			&pd_pg, PHYSICAL_PAGE_NO_WAIT);
768 	} while (status < B_OK);
769 	pd = (page_directory_entry *)pd_pg;
770 	// we want the table at rindex, not at rindex%(tbl/page)
771 	pd += (index % NUM_DIRTBL_PER_PAGE) * NUM_DIRENT_PER_TBL;
772 
773 
774 	index = VADDR_TO_PDENT(va);
775 	if (pd[index].type != DT_DIR) {
776 		// no pagetable here
777 		put_physical_page_tmap(pd_pg);
778 		return B_NO_ERROR;
779 	}
780 
781 	do {
782 		status = get_physical_page_tmap(PDE_TO_PA(pd[index]),
783 			&pt_pg, PHYSICAL_PAGE_NO_WAIT);
784 	} while (status < B_OK);
785 	pt = (page_table_entry *)pt_pg;
786 	// we want the table at rindex, not at rindex%(tbl/page)
787 	pt += (index % NUM_PAGETBL_PER_PAGE) * NUM_PAGEENT_PER_TBL;
788 
789 	index = VADDR_TO_PTENT(va);
790 
791 	// handle indirect descriptor
792 	if (pt[index].type == DT_INDIRECT) {
793 		pi = (page_indirect_entry *)pt;
794 		pi_pg = pt_pg;
795 		do {
796 			status = get_physical_page_tmap(PIE_TO_PA(pi[index]),
797 				&pt_pg, PHYSICAL_PAGE_NO_WAIT);
798 		} while (status < B_OK);
799 		pt = (page_table_entry *)pt_pg;
800 		// add offset from start of page
801 		pt += PIE_TO_PO(pi[index]) / sizeof(page_table_entry);
802 		// release the indirect table page
803 		put_physical_page_tmap(pi_pg);
804 	}
805 
806 	*_physical = PTE_TO_PA(pt[index]);
807 
808 	// read in the page state flags
809 	if (!pt[index].supervisor)
810 		*_flags |= (pt[index].write_protect ? 0 : B_WRITE_AREA) | B_READ_AREA;
811 
812 	*_flags |= (pt[index].write_protect ? 0 : B_KERNEL_WRITE_AREA)
813 		| B_KERNEL_READ_AREA
814 		| (pt[index].dirty ? PAGE_MODIFIED : 0)
815 		| (pt[index].accessed ? PAGE_ACCESSED : 0)
816 		| ((pt[index].type == DT_PAGE) ? PAGE_PRESENT : 0);
817 
818 	put_physical_page_tmap(pt_pg);
819 	put_physical_page_tmap(pd_pg);
820 
821 	TRACE(("query_tmap: returning pa 0x%lx for va 0x%lx\n", *_physical, va));
822 
823 	return B_OK;
824 }
825 
826 
827 static addr_t
828 get_mapped_size_tmap(vm_translation_map *map)
829 {
830 	return map->map_count;
831 }
832 
833 
834 static status_t
835 protect_tmap(vm_translation_map *map, addr_t start, addr_t end, uint32 attributes)
836 {
837 	page_table_entry *pt;
838 	page_directory_entry *pd;
839 	page_root_entry *pr = map->arch_data->rtdir_virt;
840 	addr_t pd_pg, pt_pg;
841 	status_t status;
842 	int index;
843 
844 	start = ROUNDOWN(start, B_PAGE_SIZE);
845 	end = ROUNDUP(end, B_PAGE_SIZE);
846 
847 	TRACE(("protect_tmap: pages 0x%lx to 0x%lx, attributes %lx\n", start, end, attributes));
848 
849 restart:
850 	if (start >= end)
851 		return B_OK;
852 
853 	index = VADDR_TO_PRENT(start);
854 	if (pr[index].type != DT_ROOT) {
855 		// no pagedir here, move the start up to access the next page table
856 		start = ROUNDUP(start + 1, B_PAGE_SIZE);
857 		goto restart;
858 	}
859 
860 	do {
861 		status = get_physical_page_tmap(PRE_TO_PA(pr[index]),
862 			&pd_pg, PHYSICAL_PAGE_NO_WAIT);
863 	} while (status < B_OK);
864 	pd = (page_directory_entry *)pd_pg;
865 	// we want the table at rindex, not at rindex%(tbl/page)
866 	pd += (index % NUM_DIRTBL_PER_PAGE) * NUM_DIRENT_PER_TBL;
867 
868 	index = VADDR_TO_PDENT(start);
869 	if (pd[index].type != DT_DIR) {
870 		// no pagetable here, move the start up to access the next page table
871 		start = ROUNDUP(start + 1, B_PAGE_SIZE);
872 		put_physical_page_tmap(pd_pg);
873 		goto restart;
874 	}
875 
876 	do {
877 		status = get_physical_page_tmap(PDE_TO_PA(pd[index]),
878 			&pt_pg, PHYSICAL_PAGE_NO_WAIT);
879 	} while (status < B_OK);
880 	pt = (page_table_entry *)pt_pg;
881 	// we want the table at rindex, not at rindex%(tbl/page)
882 	pt += (index % NUM_PAGETBL_PER_PAGE) * NUM_PAGEENT_PER_TBL;
883 
884 	for (index = VADDR_TO_PTENT(start);
885 			(index < NUM_PAGEENT_PER_TBL) && (start < end);
886 			index++, start += B_PAGE_SIZE) {
887 		// XXX: handle indirect ?
888 		if (pt[index].type != DT_PAGE /*&& pt[index].type != DT_INDIRECT*/) {
889 			// page mapping not valid
890 			continue;
891 		}
892 
893 		TRACE(("protect_tmap: protect page 0x%lx\n", start));
894 
895 		pt[index].supervisor = (attributes & B_USER_PROTECTION) == 0;
896 		if ((attributes & B_USER_PROTECTION) != 0)
897 			pt[index].write_protect = (attributes & B_WRITE_AREA) == 0;
898 		else
899 			pt[index].write_protect = (attributes & B_KERNEL_WRITE_AREA) == 0;
900 
901 		if (map->arch_data->num_invalidate_pages < PAGE_INVALIDATE_CACHE_SIZE)
902 			map->arch_data->pages_to_invalidate[map->arch_data->num_invalidate_pages] = start;
903 
904 		map->arch_data->num_invalidate_pages++;
905 	}
906 
907 	put_physical_page_tmap(pt_pg);
908 	put_physical_page_tmap(pd_pg);
909 
910 	goto restart;
911 }
912 
913 
914 static status_t
915 clear_flags_tmap(vm_translation_map *map, addr_t va, uint32 flags)
916 {
917 	page_table_entry *pt;
918 	page_indirect_entry *pi;
919 	page_directory_entry *pd;
920 	page_root_entry *pr = map->arch_data->rtdir_virt;
921 	addr_t pd_pg, pt_pg, pi_pg;
922 	status_t status;
923 	int index;
924 	int tlb_flush = false;
925 
926 	index = VADDR_TO_PRENT(va);
927 	if (pr[index].type != DT_ROOT) {
928 		// no pagetable here
929 		return B_NO_ERROR;
930 	}
931 
932 	do {
933 		status = get_physical_page_tmap(PRE_TO_PA(pr[index]),
934 			&pd_pg, PHYSICAL_PAGE_NO_WAIT);
935 	} while (status < B_OK);
936 	pd = (page_directory_entry *)pd_pg;
937 	// we want the table at rindex, not at rindex%(tbl/page)
938 	pd += (index % NUM_DIRTBL_PER_PAGE) * NUM_DIRENT_PER_TBL;
939 
940 
941 	index = VADDR_TO_PDENT(va);
942 	if (pd[index].type != DT_DIR) {
943 		// no pagetable here
944 		put_physical_page_tmap(pd_pg);
945 		return B_NO_ERROR;
946 	}
947 
948 	do {
949 		status = get_physical_page_tmap(PDE_TO_PA(pd[index]),
950 			&pt_pg, PHYSICAL_PAGE_NO_WAIT);
951 	} while (status < B_OK);
952 	pt = (page_table_entry *)pt_pg;
953 	// we want the table at rindex, not at rindex%(tbl/page)
954 	pt += (index % NUM_PAGETBL_PER_PAGE) * NUM_PAGEENT_PER_TBL;
955 
956 	index = VADDR_TO_PTENT(va);
957 
958 	// handle indirect descriptor
959 	if (pt[index].type == DT_INDIRECT) {
960 		pi = (page_indirect_entry *)pt;
961 		pi_pg = pt_pg;
962 		do {
963 			status = get_physical_page_tmap(PIE_TO_PA(pi[index]),
964 				&pt_pg, PHYSICAL_PAGE_NO_WAIT);
965 		} while (status < B_OK);
966 		pt = (page_table_entry *)pt_pg;
967 		// add offset from start of page
968 		pt += PIE_TO_PO(pi[index]) / sizeof(page_table_entry);
969 		// release the indirect table page
970 		put_physical_page_tmap(pi_pg);
971 	}
972 
973 	// clear out the flags we've been requested to clear
974 	if (flags & PAGE_MODIFIED) {
975 		pt[index].dirty = 0;
976 		tlb_flush = true;
977 	}
978 	if (flags & PAGE_ACCESSED) {
979 		pt[index].accessed = 0;
980 		tlb_flush = true;
981 	}
982 
983 	put_physical_page_tmap(pt_pg);
984 	put_physical_page_tmap(pd_pg);
985 
986 	if (tlb_flush) {
987 		if (map->arch_data->num_invalidate_pages < PAGE_INVALIDATE_CACHE_SIZE)
988 			map->arch_data->pages_to_invalidate[map->arch_data->num_invalidate_pages] = va;
989 
990 		map->arch_data->num_invalidate_pages++;
991 	}
992 
993 	return B_OK;
994 }
995 
996 
997 static void
998 flush_tmap(vm_translation_map *map)
999 {
1000 	cpu_status state;
1001 
1002 	if (map->arch_data->num_invalidate_pages <= 0)
1003 		return;
1004 
1005 	state = disable_interrupts();
1006 
1007 	if (map->arch_data->num_invalidate_pages > PAGE_INVALIDATE_CACHE_SIZE) {
1008 		// invalidate all pages
1009 		TRACE(("flush_tmap: %d pages to invalidate, invalidate all\n",
1010 			map->arch_data->num_invalidate_pages));
1011 
1012 		if (IS_KERNEL_MAP(map)) {
1013 			arch_cpu_global_TLB_invalidate();
1014 		} else {
1015 			arch_cpu_user_TLB_invalidate();
1016 		}
1017 	} else {
1018 		TRACE(("flush_tmap: %d pages to invalidate, invalidate list\n",
1019 			map->arch_data->num_invalidate_pages));
1020 
1021 		arch_cpu_invalidate_TLB_list(map->arch_data->pages_to_invalidate,
1022 			map->arch_data->num_invalidate_pages);
1023 	}
1024 	map->arch_data->num_invalidate_pages = 0;
1025 
1026 	restore_interrupts(state);
1027 }
1028 
1029 
1030 static status_t
1031 map_iospace_chunk(addr_t va, addr_t pa)
1032 {
1033 	int i;
1034 	page_table_entry *pt;
1035 	int state;
1036 
1037 	pa &= ~(B_PAGE_SIZE - 1); // make sure it's page aligned
1038 	va &= ~(B_PAGE_SIZE - 1); // make sure it's page aligned
1039 	if (va < sIOSpaceBase || va >= (sIOSpaceBase + IOSPACE_SIZE))
1040 		panic("map_iospace_chunk: passed invalid va 0x%lx\n", va);
1041 
1042 	pt = &iospace_pgtables[(va - sIOSpaceBase) / B_PAGE_SIZE];
1043 	for (i = 0; i < 1024; i++, pa += B_PAGE_SIZE) {
1044 		init_page_table_entry(&pt[i]);
1045 		pt[i].addr = TA_TO_PTEA(pa);
1046 		pt[i].supervisor = 1;
1047 		pt[i].write_protect = 0;
1048 		pt[i].type = DT_PAGE;
1049 		// 040 or 060 only
1050 #ifdef MMU_HAS_GLOBAL_PAGES
1051 		pt[i].global = 1;
1052 #endif
1053 	}
1054 
1055 	state = disable_interrupts();
1056 	arch_cpu_invalidate_TLB_range(va, va + (IOSPACE_CHUNK_SIZE - B_PAGE_SIZE));
1057 	//smp_send_broadcast_ici(SMP_MSG_INVALIDATE_PAGE_RANGE,
1058 	//	va, va + (IOSPACE_CHUNK_SIZE - B_PAGE_SIZE), 0,
1059 	//	NULL, SMP_MSG_FLAG_SYNC);
1060 	restore_interrupts(state);
1061 
1062 	return B_OK;
1063 }
1064 
1065 
1066 static status_t
1067 get_physical_page_tmap(addr_t pa, addr_t *va, uint32 flags)
1068 {
1069 	return generic_get_physical_page(pa, va, flags);
1070 }
1071 
1072 
1073 static status_t
1074 put_physical_page_tmap(addr_t va)
1075 {
1076 	return generic_put_physical_page(va);
1077 }
1078 
1079 
1080 static vm_translation_map_ops tmap_ops = {
1081 	destroy_tmap,
1082 	lock_tmap,
1083 	unlock_tmap,
1084 	map_max_pages_need,
1085 	map_tmap,
1086 	unmap_tmap,
1087 	query_tmap,
1088 	query_tmap_interrupt,
1089 	get_mapped_size_tmap,
1090 	protect_tmap,
1091 	clear_flags_tmap,
1092 	flush_tmap,
1093 	get_physical_page_tmap,
1094 	put_physical_page_tmap
1095 };
1096 
1097 
1098 //	#pragma mark -
1099 //	VM API
1100 
1101 
1102 static status_t
1103 m68k_vm_translation_map_init_map(vm_translation_map *map, bool kernel)
1104 {
1105 	if (map == NULL)
1106 		return B_BAD_VALUE;
1107 
1108 	TRACE(("vm_translation_map_create\n"));
1109 
1110 	// initialize the new object
1111 	map->ops = &tmap_ops;
1112 	map->map_count = 0;
1113 
1114 	recursive_lock_init(&map->lock, "translation map");
1115 
1116 	map->arch_data = (vm_translation_map_arch_info *)malloc(sizeof(vm_translation_map_arch_info));
1117 	if (map == NULL) {
1118 		recursive_lock_destroy(&map->lock);
1119 		return B_NO_MEMORY;
1120 	}
1121 
1122 	map->arch_data->num_invalidate_pages = 0;
1123 
1124 	if (!kernel) {
1125 		// user
1126 		// allocate a rtdir
1127 		map->arch_data->rtdir_virt = (page_root_entry *)memalign(
1128 			SIZ_ROOTTBL, SIZ_ROOTTBL);
1129 		if (map->arch_data->rtdir_virt == NULL) {
1130 			free(map->arch_data);
1131 			recursive_lock_destroy(&map->lock);
1132 			return B_NO_MEMORY;
1133 		}
1134 		vm_get_page_mapping(vm_kernel_address_space_id(),
1135 			(addr_t)map->arch_data->rtdir_virt, (addr_t *)&map->arch_data->rtdir_phys);
1136 	} else {
1137 		// kernel
1138 		// we already know the kernel pgdir mapping
1139 		map->arch_data->rtdir_virt = sKernelVirtualPageRoot;
1140 		map->arch_data->rtdir_phys = sKernelPhysicalPageRoot;
1141 	}
1142 
1143 	// zero out the bottom portion of the new rtdir
1144 	memset(map->arch_data->rtdir_virt + FIRST_USER_PGROOT_ENT, 0,
1145 		NUM_USER_PGROOT_ENTS * sizeof(page_root_entry));
1146 
1147 	// insert this new map into the map list
1148 	{
1149 		int state = disable_interrupts();
1150 		acquire_spinlock(&tmap_list_lock);
1151 
1152 		// copy the top portion of the rtdir from the current one
1153 		memcpy(map->arch_data->rtdir_virt + FIRST_KERNEL_PGROOT_ENT,
1154 			sKernelVirtualPageRoot + FIRST_KERNEL_PGROOT_ENT,
1155 			NUM_KERNEL_PGROOT_ENTS * sizeof(page_root_entry));
1156 
1157 		map->next = tmap_list;
1158 		tmap_list = map;
1159 
1160 		release_spinlock(&tmap_list_lock);
1161 		restore_interrupts(state);
1162 	}
1163 
1164 	return B_OK;
1165 }
1166 
1167 
1168 static status_t
1169 m68k_vm_translation_map_init_kernel_map_post_sem(vm_translation_map *map)
1170 {
1171 	return B_OK;
1172 }
1173 
1174 
1175 static status_t
1176 m68k_vm_translation_map_init(kernel_args *args)
1177 {
1178 	status_t error;
1179 
1180 	TRACE(("vm_translation_map_init: entry\n"));
1181 #if 0
1182 	// page hole set up in stage2
1183 	page_hole = (page_table_entry *)args->arch_args.page_hole;
1184 	// calculate where the pgdir would be
1185 	page_hole_pgdir = (page_directory_entry *)(((unsigned int)args->arch_args.page_hole) + (B_PAGE_SIZE * 1024 - B_PAGE_SIZE));
1186 	// clear out the bottom 2 GB, unmap everything
1187 	memset(page_hole_pgdir + FIRST_USER_PGDIR_ENT, 0, sizeof(page_directory_entry) * NUM_USER_PGDIR_ENTS);
1188 #endif
1189 
1190 	sKernelPhysicalPageRoot = (page_root_entry *)args->arch_args.phys_pgroot;
1191 	sKernelVirtualPageRoot = (page_root_entry *)args->arch_args.vir_pgroot;
1192 
1193 	sQueryDesc.type = DT_INVALID;
1194 
1195 	B_INITIALIZE_SPINLOCK(&tmap_list_lock);
1196 	tmap_list = NULL;
1197 
1198 	// allocate some space to hold physical page mapping info
1199 	//XXX: check page count
1200 #warning M68K: XXXXXXXXXXXX pt + pd? pd = memalign ?
1201 	iospace_pgtables = (page_table_entry *)vm_allocate_early(args,
1202 		B_PAGE_SIZE * (IOSPACE_SIZE / (B_PAGE_SIZE * NUM_PAGEENT_PER_TBL * NUM_PAGETBL_PER_PAGE)), ~0L,
1203 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
1204 
1205 	TRACE(("iospace_pgtables %p\n", iospace_pgtables));
1206 
1207 	// init physical page mapper
1208 	error = generic_vm_physical_page_mapper_init(args, map_iospace_chunk,
1209 		&sIOSpaceBase, IOSPACE_SIZE, IOSPACE_CHUNK_SIZE);
1210 	if (error != B_OK)
1211 		return error;
1212 
1213 	// initialize our data structures
1214 	memset(iospace_pgtables, 0, B_PAGE_SIZE * (IOSPACE_SIZE / (B_PAGE_SIZE * 1024)));
1215 
1216 	TRACE(("mapping iospace_pgtables\n"));
1217 
1218 	// put the array of pgtables directly into the kernel pagedir
1219 	// these will be wired and kept mapped into virtual space to be easy to get to
1220 	{
1221 #warning M68K: XXXXXXXXXXXX
1222 		addr_t phys_pgtable;
1223 		addr_t virt_pgtable;
1224 		page_directory_entry *e;
1225 		int i;
1226 
1227 		virt_pgtable = (addr_t)iospace_pgtables;
1228 		for (i = 0; i < (IOSPACE_SIZE / (B_PAGE_SIZE * 1024)); i++, virt_pgtable += B_PAGE_SIZE) {
1229 			early_query(virt_pgtable, &phys_pgtable);
1230 			e = &page_hole_pgdir[(sIOSpaceBase / (B_PAGE_SIZE * 1024)) + i];
1231 			put_pgtable_in_pgdir(e, phys_pgtable, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
1232 		}
1233 	}
1234 
1235 	TRACE(("vm_translation_map_init: done\n"));
1236 
1237 	return B_OK;
1238 }
1239 
1240 
1241 static status_t
1242 m68k_vm_translation_map_init_post_sem(kernel_args *args)
1243 {
1244 	return generic_vm_physical_page_mapper_init_post_sem(args);
1245 }
1246 
1247 
1248 static status_t
1249 m68k_vm_translation_map_init_post_area(kernel_args *args)
1250 {
1251 	// now that the vm is initialized, create a region that represents
1252 	// the page hole
1253 	void *temp;
1254 	status_t error;
1255 	area_id area;
1256 	addr_t queryPage;
1257 
1258 	TRACE(("vm_translation_map_init_post_area: entry\n"));
1259 
1260 	// unmap the page hole hack we were using before
1261 #warning M68K: FIXME
1262 	//sKernelVirtualPageRoot[1023].present = 0;
1263 	page_hole_pgdir = NULL;
1264 	page_hole = NULL;
1265 
1266 	temp = (void *)sKernelVirtualPageRoot;
1267 	area = create_area("kernel_pgdir", &temp, B_EXACT_ADDRESS, B_PAGE_SIZE,
1268 		B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
1269 	if (area < B_OK)
1270 		return area;
1271 
1272 	temp = (void *)iospace_pgtables;
1273 	area = create_area("iospace_pgtables", &temp, B_EXACT_ADDRESS,
1274 		B_PAGE_SIZE * (IOSPACE_SIZE / (B_PAGE_SIZE * 1024)),
1275 		B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
1276 	if (area < B_OK)
1277 		return area;
1278 
1279 	error = generic_vm_physical_page_mapper_init_post_area(args);
1280 	if (error != B_OK)
1281 		return error;
1282 
1283 	// this area is used for query_tmap_interrupt()
1284 	// TODO: Note, this only works as long as all pages belong to the same
1285 	//	page table, which is not yet enforced (or even tested)!
1286 	// Note we don't support SMP which makes things simpler.
1287 
1288 	area = vm_create_null_area(vm_kernel_address_space_id(),
1289 		"interrupt query pages", (void **)&queryPage, B_ANY_ADDRESS,
1290 		B_PAGE_SIZE);
1291 	if (area < B_OK)
1292 		return area;
1293 
1294 	// insert the indirect descriptor in the tree so we can map the page we want from it.
1295 
1296 	{
1297 		page_directory_entry *pageDirEntry;
1298 		page_indirect_entry *pageTableEntry;
1299 		addr_t physicalPageDir, physicalPageTable;
1300 		addr_t physicalIndirectDesc;
1301 		int32 index;
1302 
1303 		// first get pa for the indirect descriptor
1304 
1305 		index = VADDR_TO_PRENT((addr_t)&sQueryDesc);
1306 		physicalPageDir = PRE_TO_PA(sKernelVirtualPageRoot[index]);
1307 
1308 		get_physical_page_tmap(physicalPageDir,
1309 			(addr_t *)&pageDirEntry, PHYSICAL_PAGE_NO_WAIT);
1310 
1311 		index = VADDR_TO_PDENT((addr_t)&sQueryDesc);
1312 		physicalPageTable = PDE_TO_PA(pageDirEntry[index]);
1313 
1314 		get_physical_page_tmap(physicalPageTable,
1315 			(addr_t *)&pageTableEntry, PHYSICAL_PAGE_NO_WAIT);
1316 
1317 		index = VADDR_TO_PTENT((addr_t)&sQueryDesc);
1318 
1319 		// pa of the page
1320 		physicalIndirectDesc = PTE_TO_PA(pageTableEntry[index]);
1321 		// add offset
1322 		physicalIndirectDesc += ((addr_t)&sQueryDesc) % B_PAGE_SIZE;
1323 
1324 		put_physical_page_tmap((addr_t)pageTableEntry);
1325 		put_physical_page_tmap((addr_t)pageDirEntry);
1326 
1327 		// then the va for the page table for the query page.
1328 
1329 		//sQueryPageTable = (page_indirect_entry *)(queryPage);
1330 
1331 		index = VADDR_TO_PRENT(queryPage);
1332 		physicalPageDir = PRE_TO_PA(sKernelVirtualPageRoot[index]);
1333 
1334 		get_physical_page_tmap(physicalPageDir,
1335 			(addr_t *)&pageDirEntry, PHYSICAL_PAGE_NO_WAIT);
1336 
1337 		index = VADDR_TO_PDENT(queryPage);
1338 		physicalPageTable = PDE_TO_PA(pageDirEntry[index]);
1339 
1340 		get_physical_page_tmap(physicalPageTable,
1341 			(addr_t *)&pageTableEntry, PHYSICAL_PAGE_NO_WAIT);
1342 
1343 		index = VADDR_TO_PTENT(queryPage);
1344 
1345 		put_page_indirect_entry_in_pgtable(&pageTableEntry[index], physicalIndirectDesc,
1346 			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, false);
1347 
1348 		put_physical_page_tmap((addr_t)pageTableEntry);
1349 		put_physical_page_tmap((addr_t)pageDirEntry);
1350 		//invalidate_TLB(sQueryPageTable);
1351 	}
1352 	// qmery_tmap_interrupt checks for the NULL, now it can use it
1353 	sQueryPage = queryPage;
1354 
1355 	TRACE(("vm_translation_map_init_post_area: done\n"));
1356 	return B_OK;
1357 }
1358 
1359 
1360 // XXX horrible back door to map a page quickly regardless of translation map object, etc.
1361 // used only during VM setup.
1362 // uses a 'page hole' set up in the stage 2 bootloader. The page hole is created by pointing one of
1363 // the pgdir entries back at itself, effectively mapping the contents of all of the 4MB of pagetables
1364 // into a 4 MB region. It's only used here, and is later unmapped.
1365 
1366 static status_t
1367 m68k_vm_translation_map_early_map(kernel_args *args, addr_t va, addr_t pa,
1368 	uint8 attributes, addr_t (*get_free_page)(kernel_args *))
1369 {
1370 	int index;
1371 
1372 	TRACE(("early_tmap: entry pa 0x%lx va 0x%lx\n", pa, va));
1373 
1374 	// check to see if a page table exists for this range
1375 	index = VADDR_TO_PDENT(va);
1376 	if (page_hole_pgdir[index].type == DT_PAGE) {
1377 		addr_t pgtable;
1378 		page_directory_entry *e;
1379 		// we need to allocate a pgtable
1380 		pgtable = get_free_page(args);
1381 		// pgtable is in pages, convert to physical address
1382 		pgtable *= B_PAGE_SIZE;
1383 
1384 		TRACE(("early_map: asked for free page for pgtable. 0x%lx\n", pgtable));
1385 
1386 		// put it in the pgdir
1387 		e = &page_hole_pgdir[index];
1388 		put_pgtable_in_pgdir(e, pgtable, attributes);
1389 
1390 		// zero it out in it's new mapping
1391 		memset((unsigned int *)((unsigned int)page_hole + (va / B_PAGE_SIZE / 1024) * B_PAGE_SIZE), 0, B_PAGE_SIZE);
1392 	}
1393 
1394 	// now, fill in the pentry
1395 	put_page_table_entry_in_pgtable(page_hole + va / B_PAGE_SIZE, pa, attributes,
1396 		IS_KERNEL_ADDRESS(va));
1397 
1398 	arch_cpu_invalidate_TLB_range(va, va);
1399 
1400 	return B_OK;
1401 }
1402 
1403