xref: /haiku/src/system/kernel/arch/m68k/arch_vm_translation_map_impl.cpp (revision 25a7b01d15612846f332751841da3579db313082)
1 /*
2  * Copyright 2007, Haiku Inc. All rights reserved.
3  * Distributed under the terms of the MIT License.
4  *
5  * Authors:
6  * 		François Revol <revol@free.fr>
7  *
8  * Copyright 2003-2007, Axel Dörfler, axeld@pinc-software.de.
9  * Distributed under the terms of the MIT License.
10  *
11  * Copyright 2001, Travis Geiselbrecht. All rights reserved.
12  * Distributed under the terms of the NewOS License.
13  */
14 
15 #ifndef ARCH_M68K_MMU_TYPE
16 #error This file is included from arch_*_mmu.cpp
17 #endif
18 
19 /*  (mmu_man) Implementation details on 68030 and others:
20 
21 	Unlike on x86 we can't just switch the context to another team by just
22 	setting a register to another page directory, since we only have one
23 	page table containing both kernel and user address mappings.
24 	The 030 supports arbitrary layout of the page directory tree, including
25 	a 1-bit first level (2 entries top level table) that would map kernel
26 	and user land at a single place. But 040 and later only support a fixed
27 	splitting of 7/7/6 for 4K pages.
28 
29 	Since 68k SMP hardware is rare enough we don't want to support them, we
30 	can take some shortcuts.
31 
32 	As we don't want a separate user and kernel space, we'll use a single
33 	table. With the 7/7/6 split the 2nd level would require 32KB of tables,
34 	which is small enough to not want to use the list hack from x86.
35 	XXX: we use the hack for now, check later
36 
37 	Since page directories/tables don't fit exactly a page, we stuff more
38 	than one per page, and allocate them all at once, and add them at the
39 	same time to the tree. So we guarantee all higher-level entries modulo
40 	the number of tables/page are either invalid or present.
41  */
42 
43 #include <KernelExport.h>
44 #include <kernel.h>
45 #include <heap.h>
46 #include <vm/vm.h>
47 #include <vm/vm_page.h>
48 #include <vm/vm_priv.h>
49 #include <vm/VMAddressSpace.h>
50 #include <int.h>
51 #include <boot/kernel_args.h>
52 #include <arch/vm_translation_map.h>
53 #include <arch/cpu.h>
54 #include <arch_mmu.h>
55 #include <stdlib.h>
56 
57 #include "generic_vm_physical_page_mapper.h"
58 #include "generic_vm_physical_page_ops.h"
59 
60 
61 #define TRACE_VM_TMAP
62 #ifdef TRACE_VM_TMAP
63 #	define TRACE(x) dprintf x
64 #else
65 #	define TRACE(x) ;
66 #endif
67 
68 // 4 MB of iospace
69 //#define IOSPACE_SIZE (4*1024*1024)
70 #define IOSPACE_SIZE (16*1024*1024)
71 // 256K = 2^6*4K
72 #define IOSPACE_CHUNK_SIZE (NUM_PAGEENT_PER_TBL*B_PAGE_SIZE)
73 
74 static page_table_entry *iospace_pgtables = NULL;
75 
76 #define PAGE_INVALIDATE_CACHE_SIZE 64
77 
78 // vm_translation object stuff
79 typedef struct vm_translation_map_arch_info {
80 	page_root_entry *rtdir_virt;
81 	page_root_entry *rtdir_phys;
82 	int num_invalidate_pages;
83 	addr_t pages_to_invalidate[PAGE_INVALIDATE_CACHE_SIZE];
84 } vm_translation_map_arch_info;
85 
86 #if 1//XXX:HOLE
87 static page_table_entry *page_hole = NULL;
88 static page_directory_entry *page_hole_pgdir = NULL;
89 #endif
90 static page_root_entry *sKernelPhysicalPageRoot = NULL;
91 static page_root_entry *sKernelVirtualPageRoot = NULL;
92 static addr_t sQueryPage = NULL;
93 //static page_table_entry *sQueryPageTable;
94 //static page_directory_entry *sQueryPageDir;
95 // MUST be aligned
96 static page_table_entry sQueryDesc __attribute__ (( aligned (4) ));
97 
98 static vm_translation_map *tmap_list;
99 static spinlock tmap_list_lock;
100 
101 static addr_t sIOSpaceBase;
102 
103 #define CHATTY_TMAP 0
104 
105 #if 0
106 // use P*E_TO_* and TA_TO_P*EA !
107 #define ADDR_SHIFT(x) ((x)>>12)
108 #define ADDR_REVERSE_SHIFT(x) ((x)<<12)
109 #endif
110 
111 #define FIRST_USER_PGROOT_ENT    (VADDR_TO_PRENT(USER_BASE))
112 #define FIRST_USER_PGDIR_ENT    (VADDR_TO_PDENT(USER_BASE))
113 #define NUM_USER_PGROOT_ENTS     (VADDR_TO_PRENT(ROUNDUP(USER_SIZE, B_PAGE_SIZE * 64 * 128)))
114 #define NUM_USER_PGDIR_ENTS     (VADDR_TO_PDENT(ROUNDUP(USER_SIZE, B_PAGE_SIZE * 64)))
115 #define FIRST_KERNEL_PGROOT_ENT  (VADDR_TO_PRENT(KERNEL_BASE))
116 #define FIRST_KERNEL_PGDIR_ENT  (VADDR_TO_PDENT(KERNEL_BASE))
117 #define NUM_KERNEL_PGROOT_ENTS   (VADDR_TO_PRENT(KERNEL_SIZE))
118 #define NUM_KERNEL_PGDIR_ENTS   (VADDR_TO_PDENT(KERNEL_SIZE))
119 #define IS_KERNEL_MAP(map)		(map->arch_data->rtdir_phys == sKernelPhysicalPageRoot)
120 
121 static status_t early_query(addr_t va, addr_t *out_physical);
122 static status_t get_physical_page_tmap_internal(addr_t pa, addr_t *va, uint32 flags);
123 static status_t put_physical_page_tmap_internal(addr_t va);
124 
125 static void flush_tmap(vm_translation_map *map);
126 
127 
128 #warning M68K: RENAME
129 static void *
_m68k_translation_map_get_pgdir(vm_translation_map * map)130 _m68k_translation_map_get_pgdir(vm_translation_map *map)
131 {
132 	return map->arch_data->rtdir_phys;
133 }
134 
135 
136 static inline void
init_page_root_entry(page_root_entry * entry)137 init_page_root_entry(page_root_entry *entry)
138 {
139 	// DT_INVALID is 0
140 	*(page_root_entry_scalar *)entry = DFL_ROOTENT_VAL;
141 }
142 
143 
144 static inline void
update_page_root_entry(page_root_entry * entry,page_root_entry * with)145 update_page_root_entry(page_root_entry *entry, page_root_entry *with)
146 {
147 	// update page directory entry atomically
148 	*(page_root_entry_scalar *)entry = *(page_root_entry_scalar *)with;
149 }
150 
151 
152 static inline void
init_page_directory_entry(page_directory_entry * entry)153 init_page_directory_entry(page_directory_entry *entry)
154 {
155 	*(page_directory_entry_scalar *)entry = DFL_DIRENT_VAL;
156 }
157 
158 
159 static inline void
update_page_directory_entry(page_directory_entry * entry,page_directory_entry * with)160 update_page_directory_entry(page_directory_entry *entry, page_directory_entry *with)
161 {
162 	// update page directory entry atomically
163 	*(page_directory_entry_scalar *)entry = *(page_directory_entry_scalar *)with;
164 }
165 
166 
167 static inline void
init_page_table_entry(page_table_entry * entry)168 init_page_table_entry(page_table_entry *entry)
169 {
170 	*(page_table_entry_scalar *)entry = DFL_PAGEENT_VAL;
171 }
172 
173 
174 static inline void
update_page_table_entry(page_table_entry * entry,page_table_entry * with)175 update_page_table_entry(page_table_entry *entry, page_table_entry *with)
176 {
177 	// update page table entry atomically
178 	// XXX: is it ?? (long desc?)
179 	*(page_table_entry_scalar *)entry = *(page_table_entry_scalar *)with;
180 }
181 
182 
183 static inline void
init_page_indirect_entry(page_indirect_entry * entry)184 init_page_indirect_entry(page_indirect_entry *entry)
185 {
186 #warning M68K: is it correct ?
187 	*(page_indirect_entry_scalar *)entry = DFL_PAGEENT_VAL;
188 }
189 
190 
191 static inline void
update_page_indirect_entry(page_indirect_entry * entry,page_indirect_entry * with)192 update_page_indirect_entry(page_indirect_entry *entry, page_indirect_entry *with)
193 {
194 	// update page table entry atomically
195 	// XXX: is it ?? (long desc?)
196 	*(page_indirect_entry_scalar *)entry = *(page_indirect_entry_scalar *)with;
197 }
198 
199 
200 #warning M68K: allocate all kernel pgdirs at boot and remove this (also dont remove them anymore from unmap)
201 static void
_update_all_pgdirs(int index,page_root_entry e)202 _update_all_pgdirs(int index, page_root_entry e)
203 {
204 	vm_translation_map *entry;
205 	unsigned int state = disable_interrupts();
206 
207 	acquire_spinlock(&tmap_list_lock);
208 
209 	for(entry = tmap_list; entry != NULL; entry = entry->next)
210 		entry->arch_data->rtdir_virt[index] = e;
211 
212 	release_spinlock(&tmap_list_lock);
213 	restore_interrupts(state);
214 }
215 
216 
217 // this is used before the vm is fully up, it uses the
218 // transparent translation of the first 256MB
219 // a set up by the bootloader.
220 static status_t
early_query(addr_t va,addr_t * _physicalAddress)221 early_query(addr_t va, addr_t *_physicalAddress)
222 {
223 	page_root_entry *pr = sKernelVirtualPageRoot;
224 	page_directory_entry *pd;
225 	page_indirect_entry *pi;
226 	page_table_entry *pt;
227 	addr_t pa;
228 	int32 index;
229 	status_t err = B_ERROR;	// no pagetable here
230 	TRACE(("%s(%p,)\n", __FUNCTION__, va));
231 
232 	index = VADDR_TO_PRENT(va);
233 	TRACE(("%s: pr[%d].type %d\n", __FUNCTION__, index, pr[index].type));
234 	if (pr && pr[index].type == DT_ROOT) {
235 		pa = PRE_TO_TA(pr[index]);
236 		// pa == va when in TT
237 		// and no need to fiddle with cache
238 		pd = (page_directory_entry *)pa;
239 
240 		index = VADDR_TO_PDENT(va);
241 		TRACE(("%s: pd[%d].type %d\n", __FUNCTION__, index,
242 				pd?(pd[index].type):-1));
243 		if (pd && pd[index].type == DT_DIR) {
244 			pa = PDE_TO_TA(pd[index]);
245 			pt = (page_table_entry *)pa;
246 
247 			index = VADDR_TO_PTENT(va);
248 			TRACE(("%s: pt[%d].type %d\n", __FUNCTION__, index,
249 					pt?(pt[index].type):-1));
250 			if (pt && pt[index].type == DT_INDIRECT) {
251 				pi = (page_indirect_entry *)pt;
252 				pa = PIE_TO_TA(pi[index]);
253 				pt = (page_table_entry *)pa;
254 				index = 0; // single descriptor
255 			}
256 
257 			if (pt && pt[index].type == DT_PAGE) {
258 				*_physicalAddress = PTE_TO_PA(pt[index]);
259 				// we should only be passed page va, but just in case.
260 				*_physicalAddress += va % B_PAGE_SIZE;
261 				err = B_OK;
262 			}
263 		}
264 	}
265 
266 	return err;
267 }
268 
269 
270 /*!	Acquires the map's recursive lock, and resets the invalidate pages counter
271 	in case it's the first locking recursion.
272 */
273 static status_t
lock_tmap(vm_translation_map * map)274 lock_tmap(vm_translation_map *map)
275 {
276 	TRACE(("lock_tmap: map %p\n", map));
277 
278 	recursive_lock_lock(&map->lock);
279 	if (recursive_lock_get_recursion(&map->lock) == 1) {
280 		// we were the first one to grab the lock
281 		TRACE(("clearing invalidated page count\n"));
282 		map->arch_data->num_invalidate_pages = 0;
283 	}
284 
285 	return B_OK;
286 }
287 
288 
289 /*!	Unlocks the map, and, if we'll actually losing the recursive lock,
290 	flush all pending changes of this map (ie. flush TLB caches as
291 	needed).
292 */
293 static status_t
unlock_tmap(vm_translation_map * map)294 unlock_tmap(vm_translation_map *map)
295 {
296 	TRACE(("unlock_tmap: map %p\n", map));
297 
298 	if (recursive_lock_get_recursion(&map->lock) == 1) {
299 		// we're about to release it for the last time
300 		flush_tmap(map);
301 	}
302 
303 	recursive_lock_unlock(&map->lock);
304 	return B_OK;
305 }
306 
307 
308 static void
destroy_tmap(vm_translation_map * map)309 destroy_tmap(vm_translation_map *map)
310 {
311 	int state;
312 	vm_translation_map *entry;
313 	vm_translation_map *last = NULL;
314 	unsigned int i, j;
315 
316 	if (map == NULL)
317 		return;
318 
319 	// remove it from the tmap list
320 	state = disable_interrupts();
321 	acquire_spinlock(&tmap_list_lock);
322 
323 	entry = tmap_list;
324 	while (entry != NULL) {
325 		if (entry == map) {
326 			if (last != NULL)
327 				last->next = entry->next;
328 			else
329 				tmap_list = entry->next;
330 
331 			break;
332 		}
333 		last = entry;
334 		entry = entry->next;
335 	}
336 
337 	release_spinlock(&tmap_list_lock);
338 	restore_interrupts(state);
339 
340 	if (map->arch_data->rtdir_virt != NULL) {
341 		// cycle through and free all of the user space pgtables
342 		// since the size of tables don't match B_PAGE_SIZE,
343 		// we alloc several at once, based on modulos,
344 		// we make sure they are either all in the tree or none.
345 		for (i = VADDR_TO_PRENT(USER_BASE); i <= VADDR_TO_PRENT(USER_BASE + (USER_SIZE - 1)); i++) {
346 			addr_t pgdir_pn;
347 			page_directory_entry *pgdir;
348 			vm_page *dirpage;
349 
350 			if (map->arch_data->rtdir_virt[i].type == DT_INVALID)
351 				continue;
352 			if (map->arch_data->rtdir_virt[i].type != DT_ROOT) {
353 				panic("rtdir[%d]: buggy descriptor type", i);
354 				return;
355 			}
356 			// suboptimal (done 8 times)
357 			pgdir_pn = PRE_TO_PN(map->arch_data->rtdir_virt[i]);
358 			dirpage = vm_lookup_page(pgdir_pn);
359 			pgdir = &(((page_directory_entry *)dirpage)[i%NUM_DIRTBL_PER_PAGE]);
360 
361 			for (j = 0; j <= NUM_DIRENT_PER_TBL; j+=NUM_PAGETBL_PER_PAGE) {
362 				addr_t pgtbl_pn;
363 				page_table_entry *pgtbl;
364 				vm_page *page;
365 				if (pgdir[j].type == DT_INVALID)
366 					continue;
367 				if (pgdir[j].type != DT_DIR) {
368 					panic("rtdir[%d][%d]: buggy descriptor type", i, j);
369 					return;
370 				}
371 				pgtbl_pn = PDE_TO_PN(pgdir[j]);
372 				page = vm_lookup_page(pgtbl_pn);
373 				pgtbl = (page_table_entry *)page;
374 
375 				if (!page) {
376 					panic("destroy_tmap: didn't find pgtable page\n");
377 					return;
378 				}
379 				DEBUG_PAGE_ACCESS_START(page);
380 				vm_page_set_state(page, PAGE_STATE_FREE);
381 			}
382 			if (((i + 1) % NUM_DIRTBL_PER_PAGE) == 0) {
383 				DEBUG_PAGE_ACCESS_END(dirpage);
384 				vm_page_set_state(dirpage, PAGE_STATE_FREE);
385 			}
386 		}
387 		free(map->arch_data->rtdir_virt);
388 	}
389 
390 	free(map->arch_data);
391 	recursive_lock_destroy(&map->lock);
392 }
393 
394 
395 static void
put_pgdir_in_pgroot(page_root_entry * entry,addr_t pgdir_phys,uint32 attributes)396 put_pgdir_in_pgroot(page_root_entry *entry,
397 	addr_t pgdir_phys, uint32 attributes)
398 {
399 	page_root_entry dir;
400 	// put it in the pgdir
401 	init_page_root_entry(&dir);
402 	dir.addr = TA_TO_PREA(pgdir_phys);
403 
404 	// ToDo: we ignore the attributes of the page table - for compatibility
405 	//	with BeOS we allow having user accessible areas in the kernel address
406 	//	space. This is currently being used by some drivers, mainly for the
407 	//	frame buffer. Our current real time data implementation makes use of
408 	//	this fact, too.
409 	//	We might want to get rid of this possibility one day, especially if
410 	//	we intend to port it to a platform that does not support this.
411 	//dir.user = 1;
412 	//dir.rw = 1;
413 	dir.type = DT_ROOT;
414 	update_page_root_entry(entry, &dir);
415 }
416 
417 
418 static void
put_pgtable_in_pgdir(page_directory_entry * entry,addr_t pgtable_phys,uint32 attributes)419 put_pgtable_in_pgdir(page_directory_entry *entry,
420 	addr_t pgtable_phys, uint32 attributes)
421 {
422 	page_directory_entry table;
423 	// put it in the pgdir
424 	init_page_directory_entry(&table);
425 	table.addr = TA_TO_PDEA(pgtable_phys);
426 
427 	// ToDo: we ignore the attributes of the page table - for compatibility
428 	//	with BeOS we allow having user accessible areas in the kernel address
429 	//	space. This is currently being used by some drivers, mainly for the
430 	//	frame buffer. Our current real time data implementation makes use of
431 	//	this fact, too.
432 	//	We might want to get rid of this possibility one day, especially if
433 	//	we intend to port it to a platform that does not support this.
434 	//table.user = 1;
435 	//table.rw = 1;
436 	table.type = DT_DIR;
437 	update_page_directory_entry(entry, &table);
438 }
439 
440 
441 static void
put_page_table_entry_in_pgtable(page_table_entry * entry,addr_t physicalAddress,uint32 attributes,bool globalPage)442 put_page_table_entry_in_pgtable(page_table_entry *entry,
443 	addr_t physicalAddress, uint32 attributes, bool globalPage)
444 {
445 	page_table_entry page;
446 	init_page_table_entry(&page);
447 
448 	page.addr = TA_TO_PTEA(physicalAddress);
449 
450 	// if the page is user accessible, it's automatically
451 	// accessible in kernel space, too (but with the same
452 	// protection)
453 	page.supervisor = (attributes & B_USER_PROTECTION) == 0;
454 	if (page.supervisor)
455 		page.write_protect = (attributes & B_KERNEL_WRITE_AREA) == 0;
456 	else
457 		page.write_protect = (attributes & B_WRITE_AREA) == 0;
458 	page.type = DT_PAGE;
459 
460 #ifdef PAGE_HAS_GLOBAL_BIT
461 	if (globalPage)
462 		page.global = 1;
463 #endif
464 
465 	// put it in the page table
466 	update_page_table_entry(entry, &page);
467 }
468 
469 
470 static void
put_page_indirect_entry_in_pgtable(page_indirect_entry * entry,addr_t physicalAddress,uint32 attributes,bool globalPage)471 put_page_indirect_entry_in_pgtable(page_indirect_entry *entry,
472 	addr_t physicalAddress, uint32 attributes, bool globalPage)
473 {
474 	page_indirect_entry page;
475 	init_page_indirect_entry(&page);
476 
477 	page.addr = TA_TO_PIEA(physicalAddress);
478 	page.type = DT_INDIRECT;
479 
480 	// there are no protection bits in indirect descriptor usually.
481 
482 	// put it in the page table
483 	update_page_indirect_entry(entry, &page);
484 }
485 
486 
487 static size_t
map_max_pages_need(vm_translation_map *,addr_t start,addr_t end)488 map_max_pages_need(vm_translation_map */*map*/, addr_t start, addr_t end)
489 {
490 	size_t need;
491 	size_t pgdirs;
492 	// If start == 0, the actual base address is not yet known to the caller
493 	// and we shall assume the worst case.
494 	if (start == 0) {
495 #warning M68K: FIXME?
496 		start = (1023) * B_PAGE_SIZE;
497 		end += start;
498 	}
499 	pgdirs = VADDR_TO_PRENT(end) + 1 - VADDR_TO_PRENT(start);
500 	// how much for page directories
501 	need = (pgdirs + NUM_DIRTBL_PER_PAGE - 1) / NUM_DIRTBL_PER_PAGE;
502 	// and page tables themselves
503 	need = ((pgdirs * NUM_DIRENT_PER_TBL) + NUM_PAGETBL_PER_PAGE - 1) / NUM_PAGETBL_PER_PAGE;
504 
505 	// better rounding when only 1 pgdir
506 	// XXX: do better for other cases
507 	if (pgdirs == 1) {
508 		need = 1;
509 		need += (VADDR_TO_PDENT(end) + 1 - VADDR_TO_PDENT(start) + NUM_PAGETBL_PER_PAGE - 1) / NUM_PAGETBL_PER_PAGE;
510 	}
511 
512 	return need;
513 }
514 
515 
516 static status_t
map_tmap(vm_translation_map * map,addr_t va,addr_t pa,uint32 attributes)517 map_tmap(vm_translation_map *map, addr_t va, addr_t pa, uint32 attributes)
518 {
519 	page_root_entry *pr;
520 	page_directory_entry *pd;
521 	page_table_entry *pt;
522 	addr_t pd_pg, pt_pg;
523 	unsigned int rindex, dindex, pindex;
524 	int err;
525 
526 	TRACE(("map_tmap: entry pa 0x%lx va 0x%lx\n", pa, va));
527 
528 /*
529 	dprintf("pgdir at 0x%x\n", pgdir);
530 	dprintf("index is %d\n", va / B_PAGE_SIZE / 1024);
531 	dprintf("final at 0x%x\n", &pgdir[va / B_PAGE_SIZE / 1024]);
532 	dprintf("value is 0x%x\n", *(int *)&pgdir[va / B_PAGE_SIZE / 1024]);
533 	dprintf("present bit is %d\n", pgdir[va / B_PAGE_SIZE / 1024].present);
534 	dprintf("addr is %d\n", pgdir[va / B_PAGE_SIZE / 1024].addr);
535 */
536 	pr = map->arch_data->rtdir_virt;
537 
538 	// check to see if a page directory exists for this range
539 	rindex = VADDR_TO_PRENT(va);
540 	if (pr[rindex].type != DT_ROOT) {
541 		addr_t pgdir;
542 		vm_page *page;
543 		unsigned int i;
544 
545 		// we need to allocate a pgtable
546 		page = vm_page_allocate_page(PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
547 
548 		DEBUG_PAGE_ACCESS_END(page);
549 
550 		pgdir = page->physical_page_number * B_PAGE_SIZE;
551 
552 		TRACE(("map_tmap: asked for free page for pgdir. 0x%lx\n", pgdir));
553 
554 		// for each pgdir on the allocated page:
555 		for (i = 0; i < NUM_DIRTBL_PER_PAGE; i++) {
556 			unsigned aindex = rindex & ~(NUM_DIRTBL_PER_PAGE-1); /* aligned */
557 			page_root_entry *apr = &pr[aindex + i];
558 
559 			// put in the pgdir
560 			put_pgdir_in_pgroot(apr, pgdir, attributes
561 				| (attributes & B_USER_PROTECTION ? B_WRITE_AREA : B_KERNEL_WRITE_AREA));
562 
563 			// update any other page directories, if it maps kernel space
564 			//XXX: suboptimal, should batch them
565 			if ((aindex+i) >= FIRST_KERNEL_PGDIR_ENT
566 				&& (aindex+i) < (FIRST_KERNEL_PGDIR_ENT + NUM_KERNEL_PGDIR_ENTS))
567 				_update_all_pgdirs((aindex+i), pr[aindex+i]);
568 
569 			pgdir += SIZ_DIRTBL;
570 		}
571 #warning M68K: really mean map_count++ ??
572 		map->map_count++;
573 	}
574 	// now, fill in the pentry
575 	do {
576 		err = get_physical_page_tmap_internal(PRE_TO_PA(pr[rindex]),
577 				&pd_pg, PHYSICAL_PAGE_DONT_WAIT);
578 	} while (err < 0);
579 	pd = (page_directory_entry *)pd_pg;
580 	// we want the table at rindex, not at rindex%(tbl/page)
581 	pd += (rindex % NUM_DIRTBL_PER_PAGE) * NUM_DIRENT_PER_TBL;
582 
583 	// check to see if a page table exists for this range
584 	dindex = VADDR_TO_PDENT(va);
585 	if (pd[dindex].type != DT_DIR) {
586 		addr_t pgtable;
587 		vm_page *page;
588 		unsigned int i;
589 
590 		// we need to allocate a pgtable
591 		page = vm_page_allocate_page(PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
592 
593 		DEBUG_PAGE_ACCESS_END(page);
594 
595 		pgtable = page->physical_page_number * B_PAGE_SIZE;
596 
597 		TRACE(("map_tmap: asked for free page for pgtable. 0x%lx\n", pgtable));
598 
599 		// for each pgtable on the allocated page:
600 		for (i = 0; i < NUM_PAGETBL_PER_PAGE; i++) {
601 			unsigned aindex = dindex & ~(NUM_PAGETBL_PER_PAGE-1); /* aligned */
602 			page_directory_entry *apd = &pd[aindex + i];
603 
604 			// put in the pgdir
605 			put_pgtable_in_pgdir(apd, pgtable, attributes
606 				| (attributes & B_USER_PROTECTION ? B_WRITE_AREA : B_KERNEL_WRITE_AREA));
607 
608 			// no need to update other page directories for kernel space;
609 			// the root-level already point to us.
610 
611 			pgtable += SIZ_PAGETBL;
612 		}
613 
614 #warning M68K: really mean map_count++ ??
615 		map->map_count++;
616 	}
617 	// now, fill in the pentry
618 	do {
619 		err = get_physical_page_tmap_internal(PDE_TO_PA(pd[dindex]),
620 				&pt_pg, PHYSICAL_PAGE_DONT_WAIT);
621 	} while (err < 0);
622 	pt = (page_table_entry *)pt_pg;
623 	// we want the table at rindex, not at rindex%(tbl/page)
624 	pt += (dindex % NUM_PAGETBL_PER_PAGE) * NUM_PAGEENT_PER_TBL;
625 
626 	pindex = VADDR_TO_PTENT(va);
627 
628 	put_page_table_entry_in_pgtable(&pt[pindex], pa, attributes,
629 		IS_KERNEL_MAP(map));
630 
631 	put_physical_page_tmap_internal(pt_pg);
632 	put_physical_page_tmap_internal(pd_pg);
633 
634 	if (map->arch_data->num_invalidate_pages < PAGE_INVALIDATE_CACHE_SIZE)
635 		map->arch_data->pages_to_invalidate[map->arch_data->num_invalidate_pages] = va;
636 
637 	map->arch_data->num_invalidate_pages++;
638 
639 	map->map_count++;
640 
641 	return 0;
642 }
643 
644 
645 static status_t
unmap_tmap(vm_translation_map * map,addr_t start,addr_t end)646 unmap_tmap(vm_translation_map *map, addr_t start, addr_t end)
647 {
648 	page_table_entry *pt;
649 	page_directory_entry *pd;
650 	page_root_entry *pr = map->arch_data->rtdir_virt;
651 	addr_t pd_pg, pt_pg;
652 	status_t status;
653 	int index;
654 
655 	start = ROUNDDOWN(start, B_PAGE_SIZE);
656 	end = ROUNDUP(end, B_PAGE_SIZE);
657 
658 	TRACE(("unmap_tmap: asked to free pages 0x%lx to 0x%lx\n", start, end));
659 
660 restart:
661 	if (start >= end)
662 		return B_OK;
663 
664 	index = VADDR_TO_PRENT(start);
665 	if (pr[index].type != DT_ROOT) {
666 		// no pagedir here, move the start up to access the next page table
667 		start = ROUNDUP(start + 1, B_PAGE_SIZE);
668 		goto restart;
669 	}
670 
671 	do {
672 		status = get_physical_page_tmap_internal(PRE_TO_PA(pr[index]),
673 			&pd_pg, PHYSICAL_PAGE_DONT_WAIT);
674 	} while (status < B_OK);
675 	pd = (page_directory_entry *)pd_pg;
676 	// we want the table at rindex, not at rindex%(tbl/page)
677 	pd += (index % NUM_DIRTBL_PER_PAGE) * NUM_DIRENT_PER_TBL;
678 
679 	index = VADDR_TO_PDENT(start);
680 	if (pd[index].type != DT_DIR) {
681 		// no pagetable here, move the start up to access the next page table
682 		start = ROUNDUP(start + 1, B_PAGE_SIZE);
683 		put_physical_page_tmap_internal(pd_pg);
684 		goto restart;
685 	}
686 
687 	do {
688 		status = get_physical_page_tmap_internal(PDE_TO_PA(pd[index]),
689 			&pt_pg, PHYSICAL_PAGE_DONT_WAIT);
690 	} while (status < B_OK);
691 	pt = (page_table_entry *)pt_pg;
692 	// we want the table at rindex, not at rindex%(tbl/page)
693 	pt += (index % NUM_PAGETBL_PER_PAGE) * NUM_PAGEENT_PER_TBL;
694 
695 	for (index = VADDR_TO_PTENT(start);
696 			(index < NUM_PAGEENT_PER_TBL) && (start < end);
697 			index++, start += B_PAGE_SIZE) {
698 		if (pt[index].type != DT_PAGE && pt[index].type != DT_INDIRECT) {
699 			// page mapping not valid
700 			continue;
701 		}
702 
703 		TRACE(("unmap_tmap: removing page 0x%lx\n", start));
704 
705 		pt[index].type = DT_INVALID;
706 		map->map_count--;
707 
708 		if (map->arch_data->num_invalidate_pages < PAGE_INVALIDATE_CACHE_SIZE)
709 			map->arch_data->pages_to_invalidate[map->arch_data->num_invalidate_pages] = start;
710 
711 		map->arch_data->num_invalidate_pages++;
712 	}
713 
714 	put_physical_page_tmap_internal(pt_pg);
715 	put_physical_page_tmap_internal(pd_pg);
716 
717 	goto restart;
718 }
719 
720 // XXX: 040 should be able to do that with PTEST (but not 030 or 060)
721 static status_t
query_tmap_interrupt(vm_translation_map * map,addr_t va,addr_t * _physical,uint32 * _flags)722 query_tmap_interrupt(vm_translation_map *map, addr_t va, addr_t *_physical,
723 	uint32 *_flags)
724 {
725 	page_root_entry *pr = map->arch_data->rtdir_virt;
726 	page_directory_entry *pd;
727 	page_indirect_entry *pi;
728 	page_table_entry *pt;
729 	addr_t physicalPageTable;
730 	int32 index;
731 	status_t err = B_ERROR;	// no pagetable here
732 
733 	if (sQueryPage == NULL)
734 		return err; // not yet initialized !?
735 
736 	index = VADDR_TO_PRENT(va);
737 	if (pr && pr[index].type == DT_ROOT) {
738 		put_page_table_entry_in_pgtable(&sQueryDesc, PRE_TO_TA(pr[index]), B_KERNEL_READ_AREA, false);
739 		arch_cpu_invalidate_TLB_range((addr_t)pt, (addr_t)pt);
740 		pd = (page_directory_entry *)sQueryPage;
741 
742 		index = VADDR_TO_PDENT(va);
743 		if (pd && pd[index].type == DT_DIR) {
744 			put_page_table_entry_in_pgtable(&sQueryDesc, PDE_TO_TA(pd[index]), B_KERNEL_READ_AREA, false);
745 			arch_cpu_invalidate_TLB_range((addr_t)pt, (addr_t)pt);
746 			pt = (page_table_entry *)sQueryPage;
747 
748 			index = VADDR_TO_PTENT(va);
749 			if (pt && pt[index].type == DT_INDIRECT) {
750 				pi = (page_indirect_entry *)pt;
751 				put_page_table_entry_in_pgtable(&sQueryDesc, PIE_TO_TA(pi[index]), B_KERNEL_READ_AREA, false);
752 				arch_cpu_invalidate_TLB_range((addr_t)pt, (addr_t)pt);
753 				pt = (page_table_entry *)sQueryPage;
754 				index = 0; // single descriptor
755 			}
756 
757 			if (pt /*&& pt[index].type == DT_PAGE*/) {
758 				*_physical = PTE_TO_PA(pt[index]);
759 				// we should only be passed page va, but just in case.
760 				*_physical += va % B_PAGE_SIZE;
761 				*_flags |= ((pt[index].write_protect ? 0 : B_KERNEL_WRITE_AREA) | B_KERNEL_READ_AREA)
762 						| (pt[index].dirty ? PAGE_MODIFIED : 0)
763 						| (pt[index].accessed ? PAGE_ACCESSED : 0)
764 						| ((pt[index].type == DT_PAGE) ? PAGE_PRESENT : 0);
765 				err = B_OK;
766 			}
767 		}
768 	}
769 
770 	// unmap the pg table from the indirect desc.
771 	sQueryDesc.type = DT_INVALID;
772 
773 	return err;
774 }
775 
776 
777 static status_t
query_tmap(vm_translation_map * map,addr_t va,addr_t * _physical,uint32 * _flags)778 query_tmap(vm_translation_map *map, addr_t va, addr_t *_physical, uint32 *_flags)
779 {
780 	page_table_entry *pt;
781 	page_indirect_entry *pi;
782 	page_directory_entry *pd;
783 	page_directory_entry *pr = map->arch_data->rtdir_virt;
784 	addr_t pd_pg, pt_pg, pi_pg;
785 	status_t status;
786 	int32 index;
787 
788 	// default the flags to not present
789 	*_flags = 0;
790 	*_physical = 0;
791 
792 	index = VADDR_TO_PRENT(va);
793 	if (pr[index].type != DT_ROOT) {
794 		// no pagetable here
795 		return B_NO_ERROR;
796 	}
797 
798 	do {
799 		status = get_physical_page_tmap_internal(PRE_TO_PA(pr[index]),
800 			&pd_pg, PHYSICAL_PAGE_DONT_WAIT);
801 	} while (status < B_OK);
802 	pd = (page_directory_entry *)pd_pg;
803 	// we want the table at rindex, not at rindex%(tbl/page)
804 	pd += (index % NUM_DIRTBL_PER_PAGE) * NUM_DIRENT_PER_TBL;
805 
806 
807 	index = VADDR_TO_PDENT(va);
808 	if (pd[index].type != DT_DIR) {
809 		// no pagetable here
810 		put_physical_page_tmap_internal(pd_pg);
811 		return B_NO_ERROR;
812 	}
813 
814 	do {
815 		status = get_physical_page_tmap_internal(PDE_TO_PA(pd[index]),
816 			&pt_pg, PHYSICAL_PAGE_DONT_WAIT);
817 	} while (status < B_OK);
818 	pt = (page_table_entry *)pt_pg;
819 	// we want the table at rindex, not at rindex%(tbl/page)
820 	pt += (index % NUM_PAGETBL_PER_PAGE) * NUM_PAGEENT_PER_TBL;
821 
822 	index = VADDR_TO_PTENT(va);
823 
824 	// handle indirect descriptor
825 	if (pt[index].type == DT_INDIRECT) {
826 		pi = (page_indirect_entry *)pt;
827 		pi_pg = pt_pg;
828 		do {
829 			status = get_physical_page_tmap_internal(PIE_TO_PA(pi[index]),
830 				&pt_pg, PHYSICAL_PAGE_DONT_WAIT);
831 		} while (status < B_OK);
832 		pt = (page_table_entry *)pt_pg;
833 		// add offset from start of page
834 		pt += PIE_TO_PO(pi[index]) / sizeof(page_table_entry);
835 		// release the indirect table page
836 		put_physical_page_tmap_internal(pi_pg);
837 	}
838 
839 	*_physical = PTE_TO_PA(pt[index]);
840 
841 	// read in the page state flags
842 	if (!pt[index].supervisor)
843 		*_flags |= (pt[index].write_protect ? 0 : B_WRITE_AREA) | B_READ_AREA;
844 
845 	*_flags |= (pt[index].write_protect ? 0 : B_KERNEL_WRITE_AREA)
846 		| B_KERNEL_READ_AREA
847 		| (pt[index].dirty ? PAGE_MODIFIED : 0)
848 		| (pt[index].accessed ? PAGE_ACCESSED : 0)
849 		| ((pt[index].type == DT_PAGE) ? PAGE_PRESENT : 0);
850 
851 	put_physical_page_tmap_internal(pt_pg);
852 	put_physical_page_tmap_internal(pd_pg);
853 
854 	TRACE(("query_tmap: returning pa 0x%lx for va 0x%lx\n", *_physical, va));
855 
856 	return B_OK;
857 }
858 
859 
860 static addr_t
get_mapped_size_tmap(vm_translation_map * map)861 get_mapped_size_tmap(vm_translation_map *map)
862 {
863 	return map->map_count;
864 }
865 
866 
867 static status_t
protect_tmap(vm_translation_map * map,addr_t start,addr_t end,uint32 attributes)868 protect_tmap(vm_translation_map *map, addr_t start, addr_t end, uint32 attributes)
869 {
870 	page_table_entry *pt;
871 	page_directory_entry *pd;
872 	page_root_entry *pr = map->arch_data->rtdir_virt;
873 	addr_t pd_pg, pt_pg;
874 	status_t status;
875 	int index;
876 
877 	start = ROUNDDOWN(start, B_PAGE_SIZE);
878 	end = ROUNDUP(end, B_PAGE_SIZE);
879 
880 	TRACE(("protect_tmap: pages 0x%lx to 0x%lx, attributes %lx\n", start, end, attributes));
881 
882 restart:
883 	if (start >= end)
884 		return B_OK;
885 
886 	index = VADDR_TO_PRENT(start);
887 	if (pr[index].type != DT_ROOT) {
888 		// no pagedir here, move the start up to access the next page table
889 		start = ROUNDUP(start + 1, B_PAGE_SIZE);
890 		goto restart;
891 	}
892 
893 	do {
894 		status = get_physical_page_tmap_internal(PRE_TO_PA(pr[index]),
895 			&pd_pg, PHYSICAL_PAGE_DONT_WAIT);
896 	} while (status < B_OK);
897 	pd = (page_directory_entry *)pd_pg;
898 	// we want the table at rindex, not at rindex%(tbl/page)
899 	pd += (index % NUM_DIRTBL_PER_PAGE) * NUM_DIRENT_PER_TBL;
900 
901 	index = VADDR_TO_PDENT(start);
902 	if (pd[index].type != DT_DIR) {
903 		// no pagetable here, move the start up to access the next page table
904 		start = ROUNDUP(start + 1, B_PAGE_SIZE);
905 		put_physical_page_tmap_internal(pd_pg);
906 		goto restart;
907 	}
908 
909 	do {
910 		status = get_physical_page_tmap_internal(PDE_TO_PA(pd[index]),
911 			&pt_pg, PHYSICAL_PAGE_DONT_WAIT);
912 	} while (status < B_OK);
913 	pt = (page_table_entry *)pt_pg;
914 	// we want the table at rindex, not at rindex%(tbl/page)
915 	pt += (index % NUM_PAGETBL_PER_PAGE) * NUM_PAGEENT_PER_TBL;
916 
917 	for (index = VADDR_TO_PTENT(start);
918 			(index < NUM_PAGEENT_PER_TBL) && (start < end);
919 			index++, start += B_PAGE_SIZE) {
920 		// XXX: handle indirect ?
921 		if (pt[index].type != DT_PAGE /*&& pt[index].type != DT_INDIRECT*/) {
922 			// page mapping not valid
923 			continue;
924 		}
925 
926 		TRACE(("protect_tmap: protect page 0x%lx\n", start));
927 
928 		pt[index].supervisor = (attributes & B_USER_PROTECTION) == 0;
929 		if ((attributes & B_USER_PROTECTION) != 0)
930 			pt[index].write_protect = (attributes & B_WRITE_AREA) == 0;
931 		else
932 			pt[index].write_protect = (attributes & B_KERNEL_WRITE_AREA) == 0;
933 
934 		if (map->arch_data->num_invalidate_pages < PAGE_INVALIDATE_CACHE_SIZE)
935 			map->arch_data->pages_to_invalidate[map->arch_data->num_invalidate_pages] = start;
936 
937 		map->arch_data->num_invalidate_pages++;
938 	}
939 
940 	put_physical_page_tmap_internal(pt_pg);
941 	put_physical_page_tmap_internal(pd_pg);
942 
943 	goto restart;
944 }
945 
946 
947 static status_t
clear_flags_tmap(vm_translation_map * map,addr_t va,uint32 flags)948 clear_flags_tmap(vm_translation_map *map, addr_t va, uint32 flags)
949 {
950 	page_table_entry *pt;
951 	page_indirect_entry *pi;
952 	page_directory_entry *pd;
953 	page_root_entry *pr = map->arch_data->rtdir_virt;
954 	addr_t pd_pg, pt_pg, pi_pg;
955 	status_t status;
956 	int index;
957 	int tlb_flush = false;
958 
959 	index = VADDR_TO_PRENT(va);
960 	if (pr[index].type != DT_ROOT) {
961 		// no pagetable here
962 		return B_NO_ERROR;
963 	}
964 
965 	do {
966 		status = get_physical_page_tmap_internal(PRE_TO_PA(pr[index]),
967 			&pd_pg, PHYSICAL_PAGE_DONT_WAIT);
968 	} while (status < B_OK);
969 	pd = (page_directory_entry *)pd_pg;
970 	// we want the table at rindex, not at rindex%(tbl/page)
971 	pd += (index % NUM_DIRTBL_PER_PAGE) * NUM_DIRENT_PER_TBL;
972 
973 
974 	index = VADDR_TO_PDENT(va);
975 	if (pd[index].type != DT_DIR) {
976 		// no pagetable here
977 		put_physical_page_tmap_internal(pd_pg);
978 		return B_NO_ERROR;
979 	}
980 
981 	do {
982 		status = get_physical_page_tmap_internal(PDE_TO_PA(pd[index]),
983 			&pt_pg, PHYSICAL_PAGE_DONT_WAIT);
984 	} while (status < B_OK);
985 	pt = (page_table_entry *)pt_pg;
986 	// we want the table at rindex, not at rindex%(tbl/page)
987 	pt += (index % NUM_PAGETBL_PER_PAGE) * NUM_PAGEENT_PER_TBL;
988 
989 	index = VADDR_TO_PTENT(va);
990 
991 	// handle indirect descriptor
992 	if (pt[index].type == DT_INDIRECT) {
993 		pi = (page_indirect_entry *)pt;
994 		pi_pg = pt_pg;
995 		do {
996 			status = get_physical_page_tmap_internal(PIE_TO_PA(pi[index]),
997 				&pt_pg, PHYSICAL_PAGE_DONT_WAIT);
998 		} while (status < B_OK);
999 		pt = (page_table_entry *)pt_pg;
1000 		// add offset from start of page
1001 		pt += PIE_TO_PO(pi[index]) / sizeof(page_table_entry);
1002 		// release the indirect table page
1003 		put_physical_page_tmap_internal(pi_pg);
1004 	}
1005 
1006 	// clear out the flags we've been requested to clear
1007 	if (flags & PAGE_MODIFIED) {
1008 		pt[index].dirty = 0;
1009 		tlb_flush = true;
1010 	}
1011 	if (flags & PAGE_ACCESSED) {
1012 		pt[index].accessed = 0;
1013 		tlb_flush = true;
1014 	}
1015 
1016 	put_physical_page_tmap_internal(pt_pg);
1017 	put_physical_page_tmap_internal(pd_pg);
1018 
1019 	if (tlb_flush) {
1020 		if (map->arch_data->num_invalidate_pages < PAGE_INVALIDATE_CACHE_SIZE)
1021 			map->arch_data->pages_to_invalidate[map->arch_data->num_invalidate_pages] = va;
1022 
1023 		map->arch_data->num_invalidate_pages++;
1024 	}
1025 
1026 	return B_OK;
1027 }
1028 
1029 
1030 static void
flush_tmap(vm_translation_map * map)1031 flush_tmap(vm_translation_map *map)
1032 {
1033 	cpu_status state;
1034 
1035 	if (map->arch_data->num_invalidate_pages <= 0)
1036 		return;
1037 
1038 	state = disable_interrupts();
1039 
1040 	if (map->arch_data->num_invalidate_pages > PAGE_INVALIDATE_CACHE_SIZE) {
1041 		// invalidate all pages
1042 		TRACE(("flush_tmap: %d pages to invalidate, invalidate all\n",
1043 			map->arch_data->num_invalidate_pages));
1044 
1045 		if (IS_KERNEL_MAP(map)) {
1046 			arch_cpu_global_TLB_invalidate();
1047 		} else {
1048 			arch_cpu_user_TLB_invalidate();
1049 		}
1050 	} else {
1051 		TRACE(("flush_tmap: %d pages to invalidate, invalidate list\n",
1052 			map->arch_data->num_invalidate_pages));
1053 
1054 		arch_cpu_invalidate_TLB_list(map->arch_data->pages_to_invalidate,
1055 			map->arch_data->num_invalidate_pages);
1056 	}
1057 	map->arch_data->num_invalidate_pages = 0;
1058 
1059 	restore_interrupts(state);
1060 }
1061 
1062 
1063 static status_t
map_iospace_chunk(addr_t va,addr_t pa,uint32 flags)1064 map_iospace_chunk(addr_t va, addr_t pa, uint32 flags)
1065 {
1066 	int i;
1067 	page_table_entry *pt;
1068 	int state;
1069 
1070 	pa &= ~(B_PAGE_SIZE - 1); // make sure it's page aligned
1071 	va &= ~(B_PAGE_SIZE - 1); // make sure it's page aligned
1072 	if (va < sIOSpaceBase || va >= (sIOSpaceBase + IOSPACE_SIZE))
1073 		panic("map_iospace_chunk: passed invalid va 0x%lx\n", va);
1074 
1075 	pt = &iospace_pgtables[(va - sIOSpaceBase) / B_PAGE_SIZE];
1076 	for (i = 0; i < NUM_PAGEENT_PER_TBL; i++, pa += B_PAGE_SIZE) {
1077 		init_page_table_entry(&pt[i]);
1078 		pt[i].addr = TA_TO_PTEA(pa);
1079 		pt[i].supervisor = 1;
1080 		pt[i].write_protect = 0;
1081 		pt[i].type = DT_PAGE;
1082 		//XXX: not cachable ?
1083 		// 040 or 060 only
1084 #ifdef MMU_HAS_GLOBAL_PAGES
1085 		pt[i].global = 1;
1086 #endif
1087 	}
1088 
1089 	state = disable_interrupts();
1090 	arch_cpu_invalidate_TLB_range(va, va + (IOSPACE_CHUNK_SIZE - B_PAGE_SIZE));
1091 	//smp_send_broadcast_ici(SMP_MSG_INVALIDATE_PAGE_RANGE,
1092 	//	va, va + (IOSPACE_CHUNK_SIZE - B_PAGE_SIZE), 0,
1093 	//	NULL, SMP_MSG_FLAG_SYNC);
1094 	restore_interrupts(state);
1095 
1096 	return B_OK;
1097 }
1098 
1099 
1100 static status_t
get_physical_page_tmap_internal(addr_t pa,addr_t * va,uint32 flags)1101 get_physical_page_tmap_internal(addr_t pa, addr_t *va, uint32 flags)
1102 {
1103 	return generic_get_physical_page(pa, va, flags);
1104 }
1105 
1106 
1107 static status_t
put_physical_page_tmap_internal(addr_t va)1108 put_physical_page_tmap_internal(addr_t va)
1109 {
1110 	return generic_put_physical_page(va);
1111 }
1112 
1113 
1114 static status_t
get_physical_page_tmap(addr_t physicalAddress,addr_t * _virtualAddress,void ** handle)1115 get_physical_page_tmap(addr_t physicalAddress, addr_t *_virtualAddress,
1116 	void **handle)
1117 {
1118 	return generic_get_physical_page(physicalAddress, _virtualAddress, 0);
1119 }
1120 
1121 
1122 static status_t
put_physical_page_tmap(addr_t virtualAddress,void * handle)1123 put_physical_page_tmap(addr_t virtualAddress, void *handle)
1124 {
1125 	return generic_put_physical_page(virtualAddress);
1126 }
1127 
1128 
1129 static vm_translation_map_ops tmap_ops = {
1130 	destroy_tmap,
1131 	lock_tmap,
1132 	unlock_tmap,
1133 	map_max_pages_need,
1134 	map_tmap,
1135 	unmap_tmap,
1136 	query_tmap,
1137 	query_tmap_interrupt,
1138 	get_mapped_size_tmap,
1139 	protect_tmap,
1140 	clear_flags_tmap,
1141 	flush_tmap,
1142 	get_physical_page_tmap,
1143 	put_physical_page_tmap,
1144 	get_physical_page_tmap,	// *_current_cpu()
1145 	put_physical_page_tmap,	// *_current_cpu()
1146 	get_physical_page_tmap,	// *_debug()
1147 	put_physical_page_tmap,	// *_debug()
1148 		// TODO: Replace the *_current_cpu() and *_debug() versions!
1149 
1150 	generic_vm_memset_physical,
1151 	generic_vm_memcpy_from_physical,
1152 	generic_vm_memcpy_to_physical,
1153 	generic_vm_memcpy_physical_page
1154 		// TODO: Verify that this is safe to use!
1155 };
1156 
1157 
1158 //	#pragma mark -
1159 //	VM API
1160 
1161 
1162 static status_t
m68k_vm_translation_map_init_map(vm_translation_map * map,bool kernel)1163 m68k_vm_translation_map_init_map(vm_translation_map *map, bool kernel)
1164 {
1165 	if (map == NULL)
1166 		return B_BAD_VALUE;
1167 
1168 	TRACE(("vm_translation_map_create\n"));
1169 
1170 	// initialize the new object
1171 	map->ops = &tmap_ops;
1172 	map->map_count = 0;
1173 
1174 	recursive_lock_init(&map->lock, "translation map");
1175 
1176 	map->arch_data = (vm_translation_map_arch_info *)malloc(sizeof(vm_translation_map_arch_info));
1177 	if (map == NULL) {
1178 		recursive_lock_destroy(&map->lock);
1179 		return B_NO_MEMORY;
1180 	}
1181 
1182 	map->arch_data->num_invalidate_pages = 0;
1183 
1184 	if (!kernel) {
1185 		// user
1186 		// allocate a rtdir
1187 		map->arch_data->rtdir_virt = (page_root_entry *)memalign(
1188 			SIZ_ROOTTBL, SIZ_ROOTTBL);
1189 		if (map->arch_data->rtdir_virt == NULL) {
1190 			free(map->arch_data);
1191 			recursive_lock_destroy(&map->lock);
1192 			return B_NO_MEMORY;
1193 		}
1194 		vm_get_page_mapping(VMAddressSpace::KernelID(),
1195 			(addr_t)map->arch_data->rtdir_virt, (addr_t *)&map->arch_data->rtdir_phys);
1196 	} else {
1197 		// kernel
1198 		// we already know the kernel pgdir mapping
1199 		map->arch_data->rtdir_virt = sKernelVirtualPageRoot;
1200 		map->arch_data->rtdir_phys = sKernelPhysicalPageRoot;
1201 	}
1202 
1203 	// zero out the bottom portion of the new rtdir
1204 	memset(map->arch_data->rtdir_virt + FIRST_USER_PGROOT_ENT, 0,
1205 		NUM_USER_PGROOT_ENTS * sizeof(page_root_entry));
1206 
1207 	// insert this new map into the map list
1208 	{
1209 		int state = disable_interrupts();
1210 		acquire_spinlock(&tmap_list_lock);
1211 
1212 		// copy the top portion of the rtdir from the current one
1213 		memcpy(map->arch_data->rtdir_virt + FIRST_KERNEL_PGROOT_ENT,
1214 			sKernelVirtualPageRoot + FIRST_KERNEL_PGROOT_ENT,
1215 			NUM_KERNEL_PGROOT_ENTS * sizeof(page_root_entry));
1216 
1217 		map->next = tmap_list;
1218 		tmap_list = map;
1219 
1220 		release_spinlock(&tmap_list_lock);
1221 		restore_interrupts(state);
1222 	}
1223 
1224 	return B_OK;
1225 }
1226 
1227 
1228 static status_t
m68k_vm_translation_map_init_kernel_map_post_sem(vm_translation_map * map)1229 m68k_vm_translation_map_init_kernel_map_post_sem(vm_translation_map *map)
1230 {
1231 	return B_OK;
1232 }
1233 
1234 
1235 static status_t
m68k_vm_translation_map_init(kernel_args * args)1236 m68k_vm_translation_map_init(kernel_args *args)
1237 {
1238 	status_t error;
1239 
1240 	TRACE(("vm_translation_map_init: entry\n"));
1241 #if 0//XXX:HOLE
1242 	// page hole set up in stage2
1243 	page_hole = (page_table_entry *)args->arch_args.page_hole;
1244 	// calculate where the pgdir would be
1245 	page_hole_pgdir = (page_directory_entry *)(((unsigned int)args->arch_args.page_hole) + (B_PAGE_SIZE * 1024 - B_PAGE_SIZE));
1246 	// clear out the bottom 2 GB, unmap everything
1247 	memset(page_hole_pgdir + FIRST_USER_PGDIR_ENT, 0, sizeof(page_directory_entry) * NUM_USER_PGDIR_ENTS);
1248 #endif
1249 
1250 	sKernelPhysicalPageRoot = (page_root_entry *)args->arch_args.phys_pgroot;
1251 	sKernelVirtualPageRoot = (page_root_entry *)args->arch_args.vir_pgroot;
1252 
1253 	sQueryDesc.type = DT_INVALID;
1254 
1255 	B_INITIALIZE_SPINLOCK(&tmap_list_lock);
1256 	tmap_list = NULL;
1257 
1258 	// allocate some space to hold physical page mapping info
1259 	//XXX: check page count
1260 	// we already have all page directories allocated by the bootloader,
1261 	// we only need page tables
1262 
1263 	iospace_pgtables = (page_table_entry *)vm_allocate_early(args,
1264 		B_PAGE_SIZE * (IOSPACE_SIZE / (B_PAGE_SIZE * NUM_PAGEENT_PER_TBL * NUM_PAGETBL_PER_PAGE)), ~0L,
1265 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0);
1266 
1267 	TRACE(("iospace_pgtables %p\n", iospace_pgtables));
1268 
1269 	// init physical page mapper
1270 	error = generic_vm_physical_page_mapper_init(args, map_iospace_chunk,
1271 		&sIOSpaceBase, IOSPACE_SIZE, IOSPACE_CHUNK_SIZE);
1272 	if (error != B_OK)
1273 		return error;
1274 	TRACE(("iospace at %p\n", sIOSpaceBase));
1275 	// initialize our data structures
1276 	memset(iospace_pgtables, 0, B_PAGE_SIZE * (IOSPACE_SIZE / (B_PAGE_SIZE * NUM_PAGEENT_PER_TBL * NUM_PAGETBL_PER_PAGE)));
1277 
1278 	TRACE(("mapping iospace_pgtables\n"));
1279 
1280 	// put the array of pgtables directly into the kernel pagedir
1281 	// these will be wired and kept mapped into virtual space to be
1282 	// easy to get to.
1283 	// note the bootloader allocates all page directories for us
1284 	// as a contiguous block.
1285 	// we also still have transparent translation enabled, va==pa.
1286 	{
1287 		addr_t phys_pgtable;
1288 		addr_t virt_pgtable;
1289 		page_root_entry *pr = sKernelVirtualPageRoot;
1290 		page_directory_entry *pd;
1291 		page_directory_entry *e;
1292 		int index;
1293 		int i;
1294 
1295 		virt_pgtable = (addr_t)iospace_pgtables;
1296 
1297 		for (i = 0; i < (IOSPACE_SIZE / (B_PAGE_SIZE * NUM_PAGEENT_PER_TBL));
1298 			 i++, virt_pgtable += SIZ_PAGETBL) {
1299 			// early_query handles non-page-aligned addresses
1300 			early_query(virt_pgtable, &phys_pgtable);
1301 			index = VADDR_TO_PRENT(sIOSpaceBase) + i / NUM_DIRENT_PER_TBL;
1302 			pd = (page_directory_entry *)PRE_TO_TA(pr[index]);
1303 			e = &pd[(VADDR_TO_PDENT(sIOSpaceBase) + i) % NUM_DIRENT_PER_TBL];
1304 			put_pgtable_in_pgdir(e, phys_pgtable,
1305 				B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
1306 		}
1307 	}
1308 
1309 	TRACE(("vm_translation_map_init: done\n"));
1310 
1311 	return B_OK;
1312 }
1313 
1314 
1315 static status_t
m68k_vm_translation_map_init_post_sem(kernel_args * args)1316 m68k_vm_translation_map_init_post_sem(kernel_args *args)
1317 {
1318 	return generic_vm_physical_page_mapper_init_post_sem(args);
1319 }
1320 
1321 
1322 static status_t
m68k_vm_translation_map_init_post_area(kernel_args * args)1323 m68k_vm_translation_map_init_post_area(kernel_args *args)
1324 {
1325 	// now that the vm is initialized, create a region that represents
1326 	// the page hole
1327 	void *temp;
1328 	status_t error;
1329 	area_id area;
1330 	addr_t queryPage;
1331 
1332 	TRACE(("vm_translation_map_init_post_area: entry\n"));
1333 
1334 	// unmap the page hole hack we were using before
1335 #warning M68K: FIXME
1336 	//sKernelVirtualPageRoot[1023].present = 0;
1337 #if 0
1338 	page_hole_pgdir = NULL;
1339 	page_hole = NULL;
1340 #endif
1341 
1342 	temp = (void *)sKernelVirtualPageRoot;
1343 	area = create_area("kernel_pgdir", &temp, B_EXACT_ADDRESS, B_PAGE_SIZE,
1344 		B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
1345 	if (area < B_OK)
1346 		return area;
1347 
1348 	temp = (void *)iospace_pgtables;
1349 	area = create_area("iospace_pgtables", &temp, B_EXACT_ADDRESS,
1350 		B_PAGE_SIZE * (IOSPACE_SIZE / (B_PAGE_SIZE * 1024)),
1351 		B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
1352 	if (area < B_OK)
1353 		return area;
1354 
1355 	error = generic_vm_physical_page_mapper_init_post_area(args);
1356 	if (error != B_OK)
1357 		return error;
1358 
1359 	// this area is used for query_tmap_interrupt()
1360 	// TODO: Note, this only works as long as all pages belong to the same
1361 	//	page table, which is not yet enforced (or even tested)!
1362 	// Note we don't support SMP which makes things simpler.
1363 
1364 	area = vm_create_null_area(VMAddressSpace::KernelID(),
1365 		"interrupt query pages", (void **)&queryPage, B_ANY_ADDRESS,
1366 		B_PAGE_SIZE, 0);
1367 	if (area < B_OK)
1368 		return area;
1369 
1370 	// insert the indirect descriptor in the tree so we can map the page we want from it.
1371 
1372 	{
1373 		page_directory_entry *pageDirEntry;
1374 		page_indirect_entry *pageTableEntry;
1375 		addr_t physicalPageDir, physicalPageTable;
1376 		addr_t physicalIndirectDesc;
1377 		int32 index;
1378 
1379 		// first get pa for the indirect descriptor
1380 
1381 		index = VADDR_TO_PRENT((addr_t)&sQueryDesc);
1382 		physicalPageDir = PRE_TO_PA(sKernelVirtualPageRoot[index]);
1383 
1384 		get_physical_page_tmap_internal(physicalPageDir,
1385 			(addr_t *)&pageDirEntry, PHYSICAL_PAGE_DONT_WAIT);
1386 
1387 		index = VADDR_TO_PDENT((addr_t)&sQueryDesc);
1388 		physicalPageTable = PDE_TO_PA(pageDirEntry[index]);
1389 
1390 		get_physical_page_tmap_internal(physicalPageTable,
1391 			(addr_t *)&pageTableEntry, PHYSICAL_PAGE_DONT_WAIT);
1392 
1393 		index = VADDR_TO_PTENT((addr_t)&sQueryDesc);
1394 
1395 		// pa of the page
1396 		physicalIndirectDesc = PTE_TO_PA(pageTableEntry[index]);
1397 		// add offset
1398 		physicalIndirectDesc += ((addr_t)&sQueryDesc) % B_PAGE_SIZE;
1399 
1400 		put_physical_page_tmap_internal((addr_t)pageTableEntry);
1401 		put_physical_page_tmap_internal((addr_t)pageDirEntry);
1402 
1403 		// then the va for the page table for the query page.
1404 
1405 		//sQueryPageTable = (page_indirect_entry *)(queryPage);
1406 
1407 		index = VADDR_TO_PRENT(queryPage);
1408 		physicalPageDir = PRE_TO_PA(sKernelVirtualPageRoot[index]);
1409 
1410 		get_physical_page_tmap_internal(physicalPageDir,
1411 			(addr_t *)&pageDirEntry, PHYSICAL_PAGE_DONT_WAIT);
1412 
1413 		index = VADDR_TO_PDENT(queryPage);
1414 		physicalPageTable = PDE_TO_PA(pageDirEntry[index]);
1415 
1416 		get_physical_page_tmap_internal(physicalPageTable,
1417 			(addr_t *)&pageTableEntry, PHYSICAL_PAGE_DONT_WAIT);
1418 
1419 		index = VADDR_TO_PTENT(queryPage);
1420 
1421 		put_page_indirect_entry_in_pgtable(&pageTableEntry[index], physicalIndirectDesc,
1422 			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, false);
1423 
1424 		put_physical_page_tmap_internal((addr_t)pageTableEntry);
1425 		put_physical_page_tmap_internal((addr_t)pageDirEntry);
1426 		//invalidate_TLB(sQueryPageTable);
1427 	}
1428 	// qmery_tmap_interrupt checks for the NULL, now it can use it
1429 	sQueryPage = queryPage;
1430 
1431 	TRACE(("vm_translation_map_init_post_area: done\n"));
1432 	return B_OK;
1433 }
1434 
1435 
1436 // almost directly taken from boot mmu code
1437 // x86:
1438 // XXX horrible back door to map a page quickly regardless of translation map object, etc.
1439 // used only during VM setup.
1440 // uses a 'page hole' set up in the stage 2 bootloader. The page hole is created by pointing one of
1441 // the pgdir entries back at itself, effectively mapping the contents of all of the 4MB of pagetables
1442 // into a 4 MB region. It's only used here, and is later unmapped.
1443 
1444 static status_t
m68k_vm_translation_map_early_map(kernel_args * args,addr_t va,addr_t pa,uint8 attributes,addr_t (* get_free_page)(kernel_args *))1445 m68k_vm_translation_map_early_map(kernel_args *args, addr_t va, addr_t pa,
1446 	uint8 attributes, addr_t (*get_free_page)(kernel_args *))
1447 {
1448 	page_root_entry *pr = (page_root_entry *)sKernelPhysicalPageRoot;
1449 	page_directory_entry *pd;
1450 	page_table_entry *pt;
1451 	addr_t tbl;
1452 	uint32 index;
1453 	uint32 i;
1454 	TRACE(("early_tmap: entry pa 0x%lx va 0x%lx\n", pa, va));
1455 
1456 	// everything much simpler here because pa = va
1457 	// thanks to transparent translation which hasn't been disabled yet
1458 
1459 	index = VADDR_TO_PRENT(va);
1460 	if (pr[index].type != DT_ROOT) {
1461 		unsigned aindex = index & ~(NUM_DIRTBL_PER_PAGE-1); /* aligned */
1462 		TRACE(("missing page root entry %d ai %d\n", index, aindex));
1463 		tbl = get_free_page(args) * B_PAGE_SIZE;
1464 		if (!tbl)
1465 			return ENOMEM;
1466 		TRACE(("early_map: asked for free page for pgdir. 0x%lx\n", tbl));
1467 		// zero-out
1468 		memset((void *)tbl, 0, B_PAGE_SIZE);
1469 		// for each pgdir on the allocated page:
1470 		for (i = 0; i < NUM_DIRTBL_PER_PAGE; i++) {
1471 			put_pgdir_in_pgroot(&pr[aindex + i], tbl, attributes);
1472 			//TRACE(("inserting tbl @ %p as %08x pr[%d] %08x\n", tbl, TA_TO_PREA(tbl), aindex + i, *(uint32 *)apr));
1473 			// clear the table
1474 			//TRACE(("clearing table[%d]\n", i));
1475 			pd = (page_directory_entry *)tbl;
1476 			for (int32 j = 0; j < NUM_DIRENT_PER_TBL; j++)
1477 				*(page_directory_entry_scalar *)(&pd[j]) = DFL_DIRENT_VAL;
1478 			tbl += SIZ_DIRTBL;
1479 		}
1480 	}
1481 	pd = (page_directory_entry *)PRE_TO_TA(pr[index]);
1482 
1483 	index = VADDR_TO_PDENT(va);
1484 	if (pd[index].type != DT_DIR) {
1485 		unsigned aindex = index & ~(NUM_PAGETBL_PER_PAGE-1); /* aligned */
1486 		TRACE(("missing page dir entry %d ai %d\n", index, aindex));
1487 		tbl = get_free_page(args) * B_PAGE_SIZE;
1488 		if (!tbl)
1489 			return ENOMEM;
1490 		TRACE(("early_map: asked for free page for pgtable. 0x%lx\n", tbl));
1491 		// zero-out
1492 		memset((void *)tbl, 0, B_PAGE_SIZE);
1493 		// for each pgdir on the allocated page:
1494 		for (i = 0; i < NUM_PAGETBL_PER_PAGE; i++) {
1495 			put_pgtable_in_pgdir(&pd[aindex + i], tbl, attributes);
1496 			// clear the table
1497 			//TRACE(("clearing table[%d]\n", i));
1498 			pt = (page_table_entry *)tbl;
1499 			for (int32 j = 0; j < NUM_PAGEENT_PER_TBL; j++)
1500 				*(page_table_entry_scalar *)(&pt[j]) = DFL_PAGEENT_VAL;
1501 			tbl += SIZ_PAGETBL;
1502 		}
1503 	}
1504 	pt = (page_table_entry *)PDE_TO_TA(pd[index]);
1505 
1506 	index = VADDR_TO_PTENT(va);
1507 	put_page_table_entry_in_pgtable(&pt[index], pa, attributes,
1508 		IS_KERNEL_ADDRESS(va));
1509 
1510 	arch_cpu_invalidate_TLB_range(va, va);
1511 
1512 	return B_OK;
1513 }
1514 
1515 
1516 static bool
m68k_vm_translation_map_is_kernel_page_accessible(addr_t virtualAddress,uint32 protection)1517 m68k_vm_translation_map_is_kernel_page_accessible(addr_t virtualAddress,
1518 	uint32 protection)
1519 {
1520 	// TODO: Implement!
1521 	return false;
1522 }
1523