xref: /haiku/src/system/kernel/arch/ppc/arch_vm_translation_map.cpp (revision 60c26cd332a044bb9003091b9196cc404ebe5482)
1 /*
2  * Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2003-2007, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 /*	(bonefish) Some explanatory words on how address translation is implemented
11 	for the 32 bit PPC architecture.
12 
13 	I use the address type nomenclature as used in the PPC architecture
14 	specs, i.e.
15 	- effective address: An address as used by program instructions, i.e.
16 	  that's what elsewhere (e.g. in the VM implementation) is called
17 	  virtual address.
18 	- virtual address: An intermediate address computed from the effective
19 	  address via the segment registers.
20 	- physical address: An address referring to physical storage.
21 
22 	The hardware translates an effective address to a physical address using
23 	either of two mechanisms: 1) Block Address Translation (BAT) or
24 	2) segment + page translation. The first mechanism does this directly
25 	using two sets (for data/instructions) of special purpose registers.
26 	The latter mechanism is of more relevance here, though:
27 
28 	effective address (32 bit):	     [ 0 ESID  3 | 4  PIX 19 | 20 Byte 31 ]
29 								           |           |            |
30 							     (segment registers)   |            |
31 									       |           |            |
32 	virtual address (52 bit):   [ 0      VSID 23 | 24 PIX 39 | 40 Byte 51 ]
33 	                            [ 0             VPN       39 | 40 Byte 51 ]
34 								                 |                  |
35 										   (page table)             |
36 											     |                  |
37 	physical address (32 bit):       [ 0        PPN       19 | 20 Byte 31 ]
38 
39 
40 	ESID: Effective Segment ID
41 	VSID: Virtual Segment ID
42 	PIX:  Page Index
43 	VPN:  Virtual Page Number
44 	PPN:  Physical Page Number
45 
46 
47 	Unlike on x86 we can't just switch the context to another team by just
48 	setting a register to another page directory, since we only have one
49 	page table containing both kernel and user address mappings. Instead we
50 	map the effective address space of kernel and *all* teams
51 	non-intersectingly into the virtual address space (which fortunately is
52 	20 bits wider), and use the segment registers to select the section of
53 	the virtual address space for the current team. Half of the 16 segment
54 	registers (8 - 15) map the kernel addresses, so they remain unchanged.
55 
56 	The range of the virtual address space a team's effective address space
57 	is mapped to is defined by its PPCVMTranslationMap::fVSIDBase,
58 	which is the first of the 8 successive VSID values used for the team.
59 
60 	Which fVSIDBase values are already taken is defined by the set bits in
61 	the bitmap sVSIDBaseBitmap.
62 
63 
64 	TODO:
65 	* If we want to continue to use the OF services, we would need to add
66 	  its address mappings to the kernel space. Unfortunately some stuff
67 	  (especially RAM) is mapped in an address range without the kernel
68 	  address space. We probably need to map those into each team's address
69 	  space as kernel read/write areas.
70 	* The current locking scheme is insufficient. The page table is a resource
71 	  shared by all teams. We need to synchronize access to it. Probably via a
72 	  spinlock.
73  */
74 
75 #include <arch/vm_translation_map.h>
76 
77 #include <stdlib.h>
78 
79 #include <KernelExport.h>
80 
81 #include <arch/cpu.h>
82 #include <arch_mmu.h>
83 #include <boot/kernel_args.h>
84 #include <int.h>
85 #include <kernel.h>
86 #include <slab/Slab.h>
87 #include <vm/vm.h>
88 #include <vm/vm_page.h>
89 #include <vm/vm_priv.h>
90 #include <vm/VMAddressSpace.h>
91 #include <vm/VMCache.h>
92 
93 #include <util/AutoLock.h>
94 
95 #include "generic_vm_physical_page_mapper.h"
96 #include "generic_vm_physical_page_ops.h"
97 #include "GenericVMPhysicalPageMapper.h"
98 
99 
100 static struct page_table_entry_group *sPageTable;
101 static size_t sPageTableSize;
102 static uint32 sPageTableHashMask;
103 static area_id sPageTableArea;
104 
105 // 64 MB of iospace
106 #define IOSPACE_SIZE (64*1024*1024)
107 // We only have small (4 KB) pages. The only reason for choosing greater chunk
108 // size is to keep the waste of memory limited, since the generic page mapper
109 // allocates structures per physical/virtual chunk.
110 // TODO: Implement a page mapper more suitable for small pages!
111 #define IOSPACE_CHUNK_SIZE (16 * B_PAGE_SIZE)
112 
113 static addr_t sIOSpaceBase;
114 
115 static GenericVMPhysicalPageMapper sPhysicalPageMapper;
116 
117 // The VSID is a 24 bit number. The lower three bits are defined by the
118 // (effective) segment number, which leaves us with a 21 bit space of
119 // VSID bases (= 2 * 1024 * 1024).
120 #define MAX_VSID_BASES (PAGE_SIZE * 8)
121 static uint32 sVSIDBaseBitmap[MAX_VSID_BASES / (sizeof(uint32) * 8)];
122 static spinlock sVSIDBaseBitmapLock;
123 
124 #define VSID_BASE_SHIFT 3
125 #define VADDR_TO_VSID(vsidBase, vaddr) (vsidBase + ((vaddr) >> 28))
126 
127 
128 struct PPCVMTranslationMap : VMTranslationMap {
129 								PPCVMTranslationMap();
130 	virtual						~PPCVMTranslationMap();
131 
132 			status_t			Init(bool kernel);
133 
134 	inline	int					VSIDBase() const	{ return fVSIDBase; }
135 
136 			page_table_entry*	LookupPageTableEntry(addr_t virtualAddress);
137 			bool				RemovePageTableEntry(addr_t virtualAddress);
138 
139 	virtual	bool	 			Lock();
140 	virtual	void				Unlock();
141 
142 	virtual	addr_t				MappedSize() const;
143 	virtual	size_t				MaxPagesNeededToMap(addr_t start,
144 									addr_t end) const;
145 
146 	virtual	status_t			Map(addr_t virtualAddress,
147 									phys_addr_t physicalAddress,
148 									uint32 attributes, uint32 memoryType,
149 									vm_page_reservation* reservation);
150 	virtual	status_t			Unmap(addr_t start, addr_t end);
151 
152 	virtual	status_t			UnmapPage(VMArea* area, addr_t address,
153 									bool updatePageQueue);
154 
155 	virtual	status_t			Query(addr_t virtualAddress,
156 									phys_addr_t* _physicalAddress,
157 									uint32* _flags);
158 	virtual	status_t			QueryInterrupt(addr_t virtualAddress,
159 									phys_addr_t* _physicalAddress,
160 									uint32* _flags);
161 
162 	virtual	status_t			Protect(addr_t base, addr_t top,
163 									uint32 attributes, uint32 memoryType);
164 	virtual	status_t			ClearFlags(addr_t virtualAddress,
165 									uint32 flags);
166 
167 	virtual	bool				ClearAccessedAndModified(
168 									VMArea* area, addr_t address,
169 									bool unmapIfUnaccessed,
170 									bool& _modified);
171 
172 	virtual	void				Flush();
173 
174 protected:
175 			int					fVSIDBase;
176 };
177 
178 
179 void
180 ppc_translation_map_change_asid(VMTranslationMap *map)
181 {
182 // this code depends on the kernel being at 0x80000000, fix if we change that
183 #if KERNEL_BASE != 0x80000000
184 #error fix me
185 #endif
186 	int vsidBase = static_cast<PPCVMTranslationMap*>(map)->VSIDBase();
187 
188 	isync();	// synchronize context
189 	asm("mtsr	0,%0" : : "g"(vsidBase));
190 	asm("mtsr	1,%0" : : "g"(vsidBase + 1));
191 	asm("mtsr	2,%0" : : "g"(vsidBase + 2));
192 	asm("mtsr	3,%0" : : "g"(vsidBase + 3));
193 	asm("mtsr	4,%0" : : "g"(vsidBase + 4));
194 	asm("mtsr	5,%0" : : "g"(vsidBase + 5));
195 	asm("mtsr	6,%0" : : "g"(vsidBase + 6));
196 	asm("mtsr	7,%0" : : "g"(vsidBase + 7));
197 	isync();	// synchronize context
198 }
199 
200 
201 static void
202 fill_page_table_entry(page_table_entry *entry, uint32 virtualSegmentID,
203 	addr_t virtualAddress, phys_addr_t physicalAddress, uint8 protection,
204 	bool secondaryHash)
205 {
206 	// lower 32 bit - set at once
207 	entry->physical_page_number = physicalAddress / B_PAGE_SIZE;
208 	entry->_reserved0 = 0;
209 	entry->referenced = false;
210 	entry->changed = false;
211 	entry->write_through = false;
212 	entry->caching_inhibited = false;
213 	entry->memory_coherent = false;
214 	entry->guarded = false;
215 	entry->_reserved1 = 0;
216 	entry->page_protection = protection & 0x3;
217 	eieio();
218 		// we need to make sure that the lower 32 bit were
219 		// already written when the entry becomes valid
220 
221 	// upper 32 bit
222 	entry->virtual_segment_id = virtualSegmentID;
223 	entry->secondary_hash = secondaryHash;
224 	entry->abbr_page_index = (virtualAddress >> 22) & 0x3f;
225 	entry->valid = true;
226 
227 	ppc_sync();
228 }
229 
230 
231 page_table_entry *
232 PPCVMTranslationMap::LookupPageTableEntry(addr_t virtualAddress)
233 {
234 	// lookup the vsid based off the va
235 	uint32 virtualSegmentID = VADDR_TO_VSID(fVSIDBase, virtualAddress);
236 
237 //	dprintf("vm_translation_map.lookup_page_table_entry: vsid %ld, va 0x%lx\n", virtualSegmentID, virtualAddress);
238 
239 	// Search for the page table entry using the primary hash value
240 
241 	uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID, virtualAddress);
242 	page_table_entry_group *group = &sPageTable[hash & sPageTableHashMask];
243 
244 	for (int i = 0; i < 8; i++) {
245 		page_table_entry *entry = &group->entry[i];
246 
247 		if (entry->virtual_segment_id == virtualSegmentID
248 			&& entry->secondary_hash == false
249 			&& entry->abbr_page_index == ((virtualAddress >> 22) & 0x3f))
250 			return entry;
251 	}
252 
253 	// didn't find it, try the secondary hash value
254 
255 	hash = page_table_entry::SecondaryHash(hash);
256 	group = &sPageTable[hash & sPageTableHashMask];
257 
258 	for (int i = 0; i < 8; i++) {
259 		page_table_entry *entry = &group->entry[i];
260 
261 		if (entry->virtual_segment_id == virtualSegmentID
262 			&& entry->secondary_hash == true
263 			&& entry->abbr_page_index == ((virtualAddress >> 22) & 0x3f))
264 			return entry;
265 	}
266 
267 	return NULL;
268 }
269 
270 
271 bool
272 PPCVMTranslationMap::RemovePageTableEntry(addr_t virtualAddress)
273 {
274 	page_table_entry *entry = LookupPageTableEntry(virtualAddress);
275 	if (entry == NULL)
276 		return false;
277 
278 	entry->valid = 0;
279 	ppc_sync();
280 	tlbie(virtualAddress);
281 	eieio();
282 	tlbsync();
283 	ppc_sync();
284 
285 	return true;
286 }
287 
288 
289 static status_t
290 map_iospace_chunk(addr_t va, phys_addr_t pa, uint32 flags)
291 {
292 	pa &= ~(B_PAGE_SIZE - 1); // make sure it's page aligned
293 	va &= ~(B_PAGE_SIZE - 1); // make sure it's page aligned
294 	if (va < sIOSpaceBase || va >= (sIOSpaceBase + IOSPACE_SIZE))
295 		panic("map_iospace_chunk: passed invalid va 0x%lx\n", va);
296 
297 	// map the pages
298 	return ppc_map_address_range(va, pa, IOSPACE_CHUNK_SIZE);
299 }
300 
301 
302 // #pragma mark -
303 
304 
305 PPCVMTranslationMap::PPCVMTranslationMap()
306 {
307 }
308 
309 
310 PPCVMTranslationMap::~PPCVMTranslationMap()
311 {
312 	if (fMapCount > 0) {
313 		panic("vm_translation_map.destroy_tmap: map %p has positive map count %ld\n",
314 			this, fMapCount);
315 	}
316 
317 	// mark the vsid base not in use
318 	int baseBit = fVSIDBase >> VSID_BASE_SHIFT;
319 	atomic_and((vint32 *)&sVSIDBaseBitmap[baseBit / 32],
320 			~(1 << (baseBit % 32)));
321 }
322 
323 
324 status_t
325 PPCVMTranslationMap::Init(bool kernel)
326 {
327 	cpu_status state = disable_interrupts();
328 	acquire_spinlock(&sVSIDBaseBitmapLock);
329 
330 	// allocate a VSID base for this one
331 	if (kernel) {
332 		// The boot loader has set up the segment registers for identical
333 		// mapping. Two VSID bases are reserved for the kernel: 0 and 8. The
334 		// latter one for mapping the kernel address space (0x80000000...), the
335 		// former one for the lower addresses required by the Open Firmware
336 		// services.
337 		fVSIDBase = 0;
338 		sVSIDBaseBitmap[0] |= 0x3;
339 	} else {
340 		int i = 0;
341 
342 		while (i < MAX_VSID_BASES) {
343 			if (sVSIDBaseBitmap[i / 32] == 0xffffffff) {
344 				i += 32;
345 				continue;
346 			}
347 			if ((sVSIDBaseBitmap[i / 32] & (1 << (i % 32))) == 0) {
348 				// we found it
349 				sVSIDBaseBitmap[i / 32] |= 1 << (i % 32);
350 				break;
351 			}
352 			i++;
353 		}
354 		if (i >= MAX_VSID_BASES)
355 			panic("vm_translation_map_create: out of VSID bases\n");
356 		fVSIDBase = i << VSID_BASE_SHIFT;
357 	}
358 
359 	release_spinlock(&sVSIDBaseBitmapLock);
360 	restore_interrupts(state);
361 
362 	return B_OK;
363 }
364 
365 
366 bool
367 PPCVMTranslationMap::Lock()
368 {
369 	recursive_lock_lock(&fLock);
370 	return true;
371 }
372 
373 
374 void
375 PPCVMTranslationMap::Unlock()
376 {
377 	recursive_lock_unlock(&fLock);
378 }
379 
380 
381 size_t
382 PPCVMTranslationMap::MaxPagesNeededToMap(addr_t start, addr_t end) const
383 {
384 	return 0;
385 }
386 
387 
388 status_t
389 PPCVMTranslationMap::Map(addr_t virtualAddress, phys_addr_t physicalAddress,
390 	uint32 attributes, uint32 memoryType, vm_page_reservation* reservation)
391 {
392 // TODO: Support memory types!
393 	// lookup the vsid based off the va
394 	uint32 virtualSegmentID = VADDR_TO_VSID(fVSIDBase, virtualAddress);
395 	uint32 protection = 0;
396 
397 	// ToDo: check this
398 	// all kernel mappings are R/W to supervisor code
399 	if (attributes & (B_READ_AREA | B_WRITE_AREA))
400 		protection = (attributes & B_WRITE_AREA) ? PTE_READ_WRITE : PTE_READ_ONLY;
401 
402 	//dprintf("vm_translation_map.map_tmap: vsid %d, pa 0x%lx, va 0x%lx\n", vsid, pa, va);
403 
404 	// Search for a free page table slot using the primary hash value
405 
406 	uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID, virtualAddress);
407 	page_table_entry_group *group = &sPageTable[hash & sPageTableHashMask];
408 
409 	for (int i = 0; i < 8; i++) {
410 		page_table_entry *entry = &group->entry[i];
411 
412 		if (entry->valid)
413 			continue;
414 
415 		fill_page_table_entry(entry, virtualSegmentID, virtualAddress, physicalAddress,
416 			protection, false);
417 		fMapCount++;
418 		return B_OK;
419 	}
420 
421 	// Didn't found one, try the secondary hash value
422 
423 	hash = page_table_entry::SecondaryHash(hash);
424 	group = &sPageTable[hash & sPageTableHashMask];
425 
426 	for (int i = 0; i < 8; i++) {
427 		page_table_entry *entry = &group->entry[i];
428 
429 		if (entry->valid)
430 			continue;
431 
432 		fill_page_table_entry(entry, virtualSegmentID, virtualAddress, physicalAddress,
433 			protection, false);
434 		fMapCount++;
435 		return B_OK;
436 	}
437 
438 	panic("vm_translation_map.map_tmap: hash table full\n");
439 	return B_ERROR;
440 }
441 
442 
443 status_t
444 PPCVMTranslationMap::Unmap(addr_t start, addr_t end)
445 {
446 	page_table_entry *entry;
447 
448 	start = ROUNDDOWN(start, B_PAGE_SIZE);
449 	end = ROUNDUP(end, B_PAGE_SIZE);
450 
451 //	dprintf("vm_translation_map.unmap_tmap: start 0x%lx, end 0x%lx\n", start, end);
452 
453 	while (start < end) {
454 		if (RemovePageTableEntry(start))
455 			fMapCount--;
456 
457 		start += B_PAGE_SIZE;
458 	}
459 
460 	return B_OK;
461 }
462 
463 
464 status_t
465 PPCVMTranslationMap::UnmapPage(VMArea* area, addr_t address,
466 	bool updatePageQueue)
467 {
468 	ASSERT(address % B_PAGE_SIZE == 0);
469 
470 	RecursiveLocker locker(fLock);
471 
472 	if (area->cache_type == CACHE_TYPE_DEVICE) {
473 		if (!RemovePageTableEntry(address))
474 			return B_ENTRY_NOT_FOUND;
475 
476 		fMapCount--;
477 		return B_OK;
478 	}
479 
480 	page_table_entry* entry = LookupPageTableEntry(address);
481 	if (entry == NULL)
482 		return B_ENTRY_NOT_FOUND;
483 
484 	page_num_t pageNumber = entry->physical_page_number;
485 	bool accessed = entry->referenced;
486 	bool modified = entry->changed;
487 
488 	RemovePageTableEntry(address);
489 
490 	fMapCount--;
491 
492 	locker.Detach();
493 		// PageUnmapped() will unlock for us
494 
495 	PageUnmapped(area, pageNumber, accessed, modified, updatePageQueue);
496 
497 	return B_OK;
498 }
499 
500 
501 status_t
502 PPCVMTranslationMap::Query(addr_t va, phys_addr_t *_outPhysical,
503 	uint32 *_outFlags)
504 {
505 	page_table_entry *entry;
506 
507 	// default the flags to not present
508 	*_outFlags = 0;
509 	*_outPhysical = 0;
510 
511 	entry = LookupPageTableEntry(va);
512 	if (entry == NULL)
513 		return B_NO_ERROR;
514 
515 	// ToDo: check this!
516 	if (IS_KERNEL_ADDRESS(va))
517 		*_outFlags |= B_KERNEL_READ_AREA | (entry->page_protection == PTE_READ_ONLY ? 0 : B_KERNEL_WRITE_AREA);
518 	else
519 		*_outFlags |= B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_READ_AREA | (entry->page_protection == PTE_READ_ONLY ? 0 : B_WRITE_AREA);
520 
521 	*_outFlags |= entry->changed ? PAGE_MODIFIED : 0;
522 	*_outFlags |= entry->referenced ? PAGE_ACCESSED : 0;
523 	*_outFlags |= entry->valid ? PAGE_PRESENT : 0;
524 
525 	*_outPhysical = entry->physical_page_number * B_PAGE_SIZE;
526 
527 	return B_OK;
528 }
529 
530 
531 status_t
532 PPCVMTranslationMap::QueryInterrupt(addr_t virtualAddress,
533 	phys_addr_t* _physicalAddress, uint32* _flags)
534 {
535 	return PPCVMTranslationMap::Query(virtualAddress, _physicalAddress, _flags);
536 }
537 
538 
539 addr_t
540 PPCVMTranslationMap::MappedSize() const
541 {
542 	return fMapCount;
543 }
544 
545 
546 status_t
547 PPCVMTranslationMap::Protect(addr_t base, addr_t top, uint32 attributes,
548 	uint32 memoryType)
549 {
550 	// XXX finish
551 	return B_ERROR;
552 }
553 
554 
555 status_t
556 PPCVMTranslationMap::ClearFlags(addr_t virtualAddress, uint32 flags)
557 {
558 	page_table_entry *entry = LookupPageTableEntry(virtualAddress);
559 	if (entry == NULL)
560 		return B_NO_ERROR;
561 
562 	bool modified = false;
563 
564 	// clear the bits
565 	if (flags & PAGE_MODIFIED && entry->changed) {
566 		entry->changed = false;
567 		modified = true;
568 	}
569 	if (flags & PAGE_ACCESSED && entry->referenced) {
570 		entry->referenced = false;
571 		modified = true;
572 	}
573 
574 	// synchronize
575 	if (modified) {
576 		tlbie(virtualAddress);
577 		eieio();
578 		tlbsync();
579 		ppc_sync();
580 	}
581 
582 	return B_OK;
583 }
584 
585 
586 bool
587 PPCVMTranslationMap::ClearAccessedAndModified(VMArea* area, addr_t address,
588 	bool unmapIfUnaccessed, bool& _modified)
589 {
590 	// TODO: Implement for real! ATM this is just an approximation using
591 	// Query(), ClearFlags(), and UnmapPage(). See below!
592 
593 	RecursiveLocker locker(fLock);
594 
595 	uint32 flags;
596 	phys_addr_t physicalAddress;
597 	if (Query(address, &physicalAddress, &flags) != B_OK
598 		|| (flags & PAGE_PRESENT) == 0) {
599 		return false;
600 	}
601 
602 	_modified = (flags & PAGE_MODIFIED) != 0;
603 
604 	if ((flags & (PAGE_ACCESSED | PAGE_MODIFIED)) != 0)
605 		ClearFlags(address, flags & (PAGE_ACCESSED | PAGE_MODIFIED));
606 
607 	if ((flags & PAGE_ACCESSED) != 0)
608 		return true;
609 
610 	if (!unmapIfUnaccessed)
611 		return false;
612 
613 	locker.Unlock();
614 
615 	UnmapPage(area, address, false);
616 		// TODO: Obvious race condition: Between querying and unmapping the
617 		// page could have been accessed. We try to compensate by considering
618 		// vm_page::{accessed,modified} (which would have been updated by
619 		// UnmapPage()) below, but that doesn't quite match the required
620 		// semantics of the method.
621 
622 	vm_page* page = vm_lookup_page(physicalAddress / B_PAGE_SIZE);
623 	if (page == NULL)
624 		return false;
625 
626 	_modified |= page->modified;
627 
628 	return page->accessed;
629 }
630 
631 
632 void
633 PPCVMTranslationMap::Flush()
634 {
635 // TODO: arch_cpu_global_TLB_invalidate() is extremely expensive and doesn't
636 // even cut it here. We are supposed to invalidate all TLB entries for this
637 // map on all CPUs. We should loop over the virtual pages and invoke tlbie
638 // instead (which marks the entry invalid on all CPUs).
639 	arch_cpu_global_TLB_invalidate();
640 }
641 
642 
643 static status_t
644 get_physical_page_tmap(phys_addr_t physicalAddress, addr_t *_virtualAddress,
645 	void **handle)
646 {
647 	return generic_get_physical_page(physicalAddress, _virtualAddress, 0);
648 }
649 
650 
651 static status_t
652 put_physical_page_tmap(addr_t virtualAddress, void *handle)
653 {
654 	return generic_put_physical_page(virtualAddress);
655 }
656 
657 
658 //  #pragma mark -
659 //  VM API
660 
661 
662 status_t
663 arch_vm_translation_map_create_map(bool kernel, VMTranslationMap** _map)
664 {
665 	PPCVMTranslationMap* map = new(std::nothrow) PPCVMTranslationMap;
666 	if (map == NULL)
667 		return B_NO_MEMORY;
668 
669 	status_t error = map->Init(kernel);
670 	if (error != B_OK) {
671 		delete map;
672 		return error;
673 	}
674 
675 	*_map = map;
676 	return B_OK;
677 }
678 
679 
680 status_t
681 arch_vm_translation_map_init(kernel_args *args,
682 	VMPhysicalPageMapper** _physicalPageMapper)
683 {
684 	sPageTable = (page_table_entry_group *)args->arch_args.page_table.start;
685 	sPageTableSize = args->arch_args.page_table.size;
686 	sPageTableHashMask = sPageTableSize / sizeof(page_table_entry_group) - 1;
687 
688 	// init physical page mapper
689 	status_t error = generic_vm_physical_page_mapper_init(args,
690 		map_iospace_chunk, &sIOSpaceBase, IOSPACE_SIZE, IOSPACE_CHUNK_SIZE);
691 	if (error != B_OK)
692 		return error;
693 
694 	new(&sPhysicalPageMapper) GenericVMPhysicalPageMapper;
695 
696 	*_physicalPageMapper = &sPhysicalPageMapper;
697 	return B_OK;
698 }
699 
700 
701 status_t
702 arch_vm_translation_map_init_post_area(kernel_args *args)
703 {
704 	// If the page table doesn't lie within the kernel address space, we
705 	// remap it.
706 	if (!IS_KERNEL_ADDRESS(sPageTable)) {
707 		addr_t newAddress = (addr_t)sPageTable;
708 		status_t error = ppc_remap_address_range(&newAddress, sPageTableSize,
709 			false);
710 		if (error != B_OK) {
711 			panic("arch_vm_translation_map_init_post_area(): Failed to remap "
712 				"the page table!");
713 			return error;
714 		}
715 
716 		// set the new page table address
717 		addr_t oldVirtualBase = (addr_t)(sPageTable);
718 		sPageTable = (page_table_entry_group*)newAddress;
719 
720 		// unmap the old pages
721 		ppc_unmap_address_range(oldVirtualBase, sPageTableSize);
722 
723 // TODO: We should probably map the page table via BAT. It is relatively large,
724 // and due to being a hash table the access patterns might look sporadic, which
725 // certainly isn't to the liking of the TLB.
726 	}
727 
728 	// create an area to cover the page table
729 	sPageTableArea = create_area("page_table", (void **)&sPageTable, B_EXACT_ADDRESS,
730 		sPageTableSize, B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
731 
732 	// init physical page mapper
733 	status_t error = generic_vm_physical_page_mapper_init_post_area(args);
734 	if (error != B_OK)
735 		return error;
736 
737 	return B_OK;
738 }
739 
740 
741 status_t
742 arch_vm_translation_map_init_post_sem(kernel_args *args)
743 {
744 	// init physical page mapper
745 	return generic_vm_physical_page_mapper_init_post_sem(args);
746 }
747 
748 
749 /**	Directly maps a page without having knowledge of any kernel structures.
750  *	Used only during VM setup.
751  *	It currently ignores the "attributes" parameter and sets all pages
752  *	read/write.
753  */
754 
755 status_t
756 arch_vm_translation_map_early_map(kernel_args *ka, addr_t virtualAddress,
757 	phys_addr_t physicalAddress, uint8 attributes,
758 	phys_addr_t (*get_free_page)(kernel_args *))
759 {
760 	uint32 virtualSegmentID = get_sr((void *)virtualAddress) & 0xffffff;
761 
762 	uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID, (uint32)virtualAddress);
763 	page_table_entry_group *group = &sPageTable[hash & sPageTableHashMask];
764 
765 	for (int32 i = 0; i < 8; i++) {
766 		// 8 entries in a group
767 		if (group->entry[i].valid)
768 			continue;
769 
770 		fill_page_table_entry(&group->entry[i], virtualSegmentID, virtualAddress, physicalAddress, PTE_READ_WRITE, false);
771 		return B_OK;
772 	}
773 
774 	hash = page_table_entry::SecondaryHash(hash);
775 	group = &sPageTable[hash & sPageTableHashMask];
776 
777 	for (int32 i = 0; i < 8; i++) {
778 		if (group->entry[i].valid)
779 			continue;
780 
781 		fill_page_table_entry(&group->entry[i], virtualSegmentID, virtualAddress, physicalAddress, PTE_READ_WRITE, true);
782 		return B_OK;
783 	}
784 
785 	return B_ERROR;
786 }
787 
788 
789 // XXX currently assumes this translation map is active
790 
791 status_t
792 arch_vm_translation_map_early_query(addr_t va, phys_addr_t *out_physical)
793 {
794 	//PANIC_UNIMPLEMENTED();
795 	panic("vm_translation_map_quick_query(): not yet implemented\n");
796 	return B_OK;
797 }
798 
799 
800 // #pragma mark -
801 
802 
803 status_t
804 ppc_map_address_range(addr_t virtualAddress, phys_addr_t physicalAddress,
805 	size_t size)
806 {
807 	addr_t virtualEnd = ROUNDUP(virtualAddress + size, B_PAGE_SIZE);
808 	virtualAddress = ROUNDDOWN(virtualAddress, B_PAGE_SIZE);
809 	physicalAddress = ROUNDDOWN(physicalAddress, B_PAGE_SIZE);
810 
811 	VMAddressSpace *addressSpace = VMAddressSpace::Kernel();
812 	PPCVMTranslationMap* map = static_cast<PPCVMTranslationMap*>(
813 		addressSpace->TranslationMap());
814 
815 	vm_page_reservation reservation;
816 	vm_page_reserve_pages(&reservation, 0, VM_PRIORITY_USER);
817 		// We don't need any pages for mapping.
818 
819 	// map the pages
820 	for (; virtualAddress < virtualEnd;
821 		 virtualAddress += B_PAGE_SIZE, physicalAddress += B_PAGE_SIZE) {
822 		status_t error = map->Map(virtualAddress, physicalAddress,
823 			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0, &reservation);
824 		if (error != B_OK)
825 			return error;
826 	}
827 
828 	return B_OK;
829 }
830 
831 
832 void
833 ppc_unmap_address_range(addr_t virtualAddress, size_t size)
834 {
835 	addr_t virtualEnd = ROUNDUP(virtualAddress + size, B_PAGE_SIZE);
836 	virtualAddress = ROUNDDOWN(virtualAddress, B_PAGE_SIZE);
837 
838 	VMAddressSpace *addressSpace = VMAddressSpace::Kernel();
839 
840 	PPCVMTranslationMap* map = static_cast<PPCVMTranslationMap*>(
841 		addressSpace->TranslationMap());
842 	for (0; virtualAddress < virtualEnd; virtualAddress += B_PAGE_SIZE)
843 		map->RemovePageTableEntry(virtualAddress);
844 }
845 
846 
847 status_t
848 ppc_remap_address_range(addr_t *_virtualAddress, size_t size, bool unmap)
849 {
850 	addr_t virtualAddress = ROUNDDOWN(*_virtualAddress, B_PAGE_SIZE);
851 	size = ROUNDUP(*_virtualAddress + size - virtualAddress, B_PAGE_SIZE);
852 
853 	VMAddressSpace *addressSpace = VMAddressSpace::Kernel();
854 
855 	// reserve space in the address space
856 	void *newAddress = NULL;
857 	status_t error = vm_reserve_address_range(addressSpace->ID(), &newAddress,
858 		B_ANY_KERNEL_ADDRESS, size, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
859 	if (error != B_OK)
860 		return error;
861 
862 	// get the area's first physical page
863 	PPCVMTranslationMap* map = static_cast<PPCVMTranslationMap*>(
864 		addressSpace->TranslationMap());
865 	page_table_entry *entry = map->LookupPageTableEntry(virtualAddress);
866 	if (!entry)
867 		return B_ERROR;
868 	phys_addr_t physicalBase = (phys_addr_t)entry->physical_page_number << 12;
869 
870 	// map the pages
871 	error = ppc_map_address_range((addr_t)newAddress, physicalBase, size);
872 	if (error != B_OK)
873 		return error;
874 
875 	*_virtualAddress = (addr_t)newAddress;
876 
877 	// unmap the old pages
878 	if (unmap)
879 		ppc_unmap_address_range(virtualAddress, size);
880 
881 	return B_OK;
882 }
883 
884 
885 bool
886 arch_vm_translation_map_is_kernel_page_accessible(addr_t virtualAddress,
887 	uint32 protection)
888 {
889 	// TODO: Implement!
890 	return false;
891 }
892