xref: /haiku/src/system/kernel/arch/ppc/arch_vm_translation_map.cpp (revision b55a57da7173b9af0432bd3e148d03f06161d036)
1 /*
2  * Copyright 2003-2007, Axel Dörfler, axeld@pinc-software.de.
3  * Distributed under the terms of the MIT License.
4  *
5  * Copyright 2001, Travis Geiselbrecht. All rights reserved.
6  * Distributed under the terms of the NewOS License.
7  */
8 
9 /*	(bonefish) Some explanatory words on how address translation is implemented
10 	for the 32 bit PPC architecture.
11 
12 	I use the address type nomenclature as used in the PPC architecture
13 	specs, i.e.
14 	- effective address: An address as used by program instructions, i.e.
15 	  that's what elsewhere (e.g. in the VM implementation) is called
16 	  virtual address.
17 	- virtual address: An intermediate address computed from the effective
18 	  address via the segment registers.
19 	- physical address: An address referring to physical storage.
20 
21 	The hardware translates an effective address to a physical address using
22 	either of two mechanisms: 1) Block Address Translation (BAT) or
23 	2) segment + page translation. The first mechanism does this directly
24 	using two sets (for data/instructions) of special purpose registers.
25 	The latter mechanism is of more relevance here, though:
26 
27 	effective address (32 bit):	     [ 0 ESID  3 | 4  PIX 19 | 20 Byte 31 ]
28 								           |           |            |
29 							     (segment registers)   |            |
30 									       |           |            |
31 	virtual address (52 bit):   [ 0      VSID 23 | 24 PIX 39 | 40 Byte 51 ]
32 	                            [ 0             VPN       39 | 40 Byte 51 ]
33 								                 |                  |
34 										   (page table)             |
35 											     |                  |
36 	physical address (32 bit):       [ 0        PPN       19 | 20 Byte 31 ]
37 
38 
39 	ESID: Effective Segment ID
40 	VSID: Virtual Segment ID
41 	PIX:  Page Index
42 	VPN:  Virtual Page Number
43 	PPN:  Physical Page Number
44 
45 
46 	Unlike on x86 we can't just switch the context to another team by just
47 	setting a register to another page directory, since we only have one
48 	page table containing both kernel and user address mappings. Instead we
49 	map the effective address space of kernel and *all* teams
50 	non-intersectingly into the virtual address space (which fortunately is
51 	20 bits wider), and use the segment registers to select the section of
52 	the virtual address space for the current team. Half of the 16 segment
53 	registers (8 - 15) map the kernel addresses, so they remain unchanged.
54 
55 	The range of the virtual address space a team's effective address space
56 	is mapped to is defined by its PPCVMTranslationMap::fVSIDBase,
57 	which is the first of the 8 successive VSID values used for the team.
58 
59 	Which fVSIDBase values are already taken is defined by the set bits in
60 	the bitmap sVSIDBaseBitmap.
61 
62 
63 	TODO:
64 	* If we want to continue to use the OF services, we would need to add
65 	  its address mappings to the kernel space. Unfortunately some stuff
66 	  (especially RAM) is mapped in an address range without the kernel
67 	  address space. We probably need to map those into each team's address
68 	  space as kernel read/write areas.
69 	* The current locking scheme is insufficient. The page table is a resource
70 	  shared by all teams. We need to synchronize access to it. Probably via a
71 	  spinlock.
72  */
73 
74 #include <KernelExport.h>
75 #include <kernel.h>
76 #include <vm/vm.h>
77 #include <vm/vm_priv.h>
78 #include <vm/VMAddressSpace.h>
79 #include <int.h>
80 #include <boot/kernel_args.h>
81 #include <arch/vm_translation_map.h>
82 #include <arch/cpu.h>
83 #include <arch_mmu.h>
84 #include <stdlib.h>
85 
86 #include "generic_vm_physical_page_mapper.h"
87 #include "generic_vm_physical_page_ops.h"
88 #include "GenericVMPhysicalPageMapper.h"
89 
90 
91 static struct page_table_entry_group *sPageTable;
92 static size_t sPageTableSize;
93 static uint32 sPageTableHashMask;
94 static area_id sPageTableArea;
95 
96 // 64 MB of iospace
97 #define IOSPACE_SIZE (64*1024*1024)
98 // We only have small (4 KB) pages. The only reason for choosing greater chunk
99 // size is to keep the waste of memory limited, since the generic page mapper
100 // allocates structures per physical/virtual chunk.
101 // TODO: Implement a page mapper more suitable for small pages!
102 #define IOSPACE_CHUNK_SIZE (16 * B_PAGE_SIZE)
103 
104 static addr_t sIOSpaceBase;
105 
106 static GenericVMPhysicalPageMapper sPhysicalPageMapper;
107 
108 // The VSID is a 24 bit number. The lower three bits are defined by the
109 // (effective) segment number, which leaves us with a 21 bit space of
110 // VSID bases (= 2 * 1024 * 1024).
111 #define MAX_VSID_BASES (PAGE_SIZE * 8)
112 static uint32 sVSIDBaseBitmap[MAX_VSID_BASES / (sizeof(uint32) * 8)];
113 static spinlock sVSIDBaseBitmapLock;
114 
115 #define VSID_BASE_SHIFT 3
116 #define VADDR_TO_VSID(vsidBase, vaddr) (vsidBase + ((vaddr) >> 28))
117 
118 
119 struct PPCVMTranslationMap : VMTranslationMap {
120 								PPCVMTranslationMap();
121 	virtual						~PPCVMTranslationMap();
122 
123 			status_t			Init(bool kernel);
124 
125 	inline	int					VSIDBase() const	{ return fVSIDBase; }
126 
127 			page_table_entry*	LookupPageTableEntry(addr_t virtualAddress);
128 			bool				RemovePageTableEntry(addr_t virtualAddress);
129 
130 	virtual	status_t			InitPostSem();
131 
132 	virtual	status_t 			Lock();
133 	virtual	status_t			Unlock();
134 
135 	virtual	addr_t				MappedSize() const;
136 	virtual	size_t				MaxPagesNeededToMap(addr_t start,
137 									addr_t end) const;
138 
139 	virtual	status_t			Map(addr_t virtualAddress,
140 									addr_t physicalAddress,
141 									uint32 attributes);
142 	virtual	status_t			Unmap(addr_t start, addr_t end);
143 
144 	virtual	status_t			Query(addr_t virtualAddress,
145 									addr_t* _physicalAddress,
146 									uint32* _flags);
147 	virtual	status_t			QueryInterrupt(addr_t virtualAddress,
148 									addr_t* _physicalAddress,
149 									uint32* _flags);
150 
151 	virtual	status_t			Protect(addr_t base, addr_t top,
152 									uint32 attributes);
153 	virtual	status_t			ClearFlags(addr_t virtualAddress,
154 									uint32 flags);
155 
156 	virtual	void				Flush();
157 
158 protected:
159 			int					fVSIDBase;
160 };
161 
162 
163 void
164 ppc_translation_map_change_asid(VMTranslationMap *map)
165 {
166 // this code depends on the kernel being at 0x80000000, fix if we change that
167 #if KERNEL_BASE != 0x80000000
168 #error fix me
169 #endif
170 	int vsidBase = static_cast<PPCVMTranslationMap*>(map)->VSIDBase();
171 
172 	isync();	// synchronize context
173 	asm("mtsr	0,%0" : : "g"(vsidBase));
174 	asm("mtsr	1,%0" : : "g"(vsidBase + 1));
175 	asm("mtsr	2,%0" : : "g"(vsidBase + 2));
176 	asm("mtsr	3,%0" : : "g"(vsidBase + 3));
177 	asm("mtsr	4,%0" : : "g"(vsidBase + 4));
178 	asm("mtsr	5,%0" : : "g"(vsidBase + 5));
179 	asm("mtsr	6,%0" : : "g"(vsidBase + 6));
180 	asm("mtsr	7,%0" : : "g"(vsidBase + 7));
181 	isync();	// synchronize context
182 }
183 
184 
185 static void
186 fill_page_table_entry(page_table_entry *entry, uint32 virtualSegmentID,
187 	addr_t virtualAddress, addr_t physicalAddress, uint8 protection,
188 	bool secondaryHash)
189 {
190 	// lower 32 bit - set at once
191 	entry->physical_page_number = physicalAddress / B_PAGE_SIZE;
192 	entry->_reserved0 = 0;
193 	entry->referenced = false;
194 	entry->changed = false;
195 	entry->write_through = false;
196 	entry->caching_inhibited = false;
197 	entry->memory_coherent = false;
198 	entry->guarded = false;
199 	entry->_reserved1 = 0;
200 	entry->page_protection = protection & 0x3;
201 	eieio();
202 		// we need to make sure that the lower 32 bit were
203 		// already written when the entry becomes valid
204 
205 	// upper 32 bit
206 	entry->virtual_segment_id = virtualSegmentID;
207 	entry->secondary_hash = secondaryHash;
208 	entry->abbr_page_index = (virtualAddress >> 22) & 0x3f;
209 	entry->valid = true;
210 
211 	ppc_sync();
212 }
213 
214 
215 page_table_entry *
216 PPCVMTranslationMap::LookupPageTableEntry(addr_t virtualAddress)
217 {
218 	// lookup the vsid based off the va
219 	uint32 virtualSegmentID = VADDR_TO_VSID(fVSIDBase, virtualAddress);
220 
221 //	dprintf("vm_translation_map.lookup_page_table_entry: vsid %ld, va 0x%lx\n", virtualSegmentID, virtualAddress);
222 
223 	// Search for the page table entry using the primary hash value
224 
225 	uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID, virtualAddress);
226 	page_table_entry_group *group = &sPageTable[hash & sPageTableHashMask];
227 
228 	for (int i = 0; i < 8; i++) {
229 		page_table_entry *entry = &group->entry[i];
230 
231 		if (entry->virtual_segment_id == virtualSegmentID
232 			&& entry->secondary_hash == false
233 			&& entry->abbr_page_index == ((virtualAddress >> 22) & 0x3f))
234 			return entry;
235 	}
236 
237 	// Didn't found it, try the secondary hash value
238 
239 	hash = page_table_entry::SecondaryHash(hash);
240 	group = &sPageTable[hash & sPageTableHashMask];
241 
242 	for (int i = 0; i < 8; i++) {
243 		page_table_entry *entry = &group->entry[i];
244 
245 		if (entry->virtual_segment_id == virtualSegmentID
246 			&& entry->secondary_hash == true
247 			&& entry->abbr_page_index == ((virtualAddress >> 22) & 0x3f))
248 			return entry;
249 	}
250 
251 	return NULL;
252 }
253 
254 
255 bool
256 PPCVMTranslationMap::RemovePageTableEntry(addr_t virtualAddress)
257 {
258 	page_table_entry *entry = LookupPageTableEntry(virtualAddress);
259 	if (entry) {
260 		entry->valid = 0;
261 		ppc_sync();
262 		tlbie(virtualAddress);
263 		eieio();
264 		tlbsync();
265 		ppc_sync();
266 	}
267 
268 	return entry;
269 }
270 
271 
272 static status_t
273 map_iospace_chunk(addr_t va, addr_t pa, uint32 flags)
274 {
275 	pa &= ~(B_PAGE_SIZE - 1); // make sure it's page aligned
276 	va &= ~(B_PAGE_SIZE - 1); // make sure it's page aligned
277 	if (va < sIOSpaceBase || va >= (sIOSpaceBase + IOSPACE_SIZE))
278 		panic("map_iospace_chunk: passed invalid va 0x%lx\n", va);
279 
280 	// map the pages
281 	return ppc_map_address_range(va, pa, IOSPACE_CHUNK_SIZE);
282 }
283 
284 
285 // #pragma mark -
286 
287 
288 PPCVMTranslationMap::PPCVMTranslationMap()
289 {
290 }
291 
292 
293 PPCVMTranslationMap::~PPCVMTranslationMap()
294 {
295 	if (fMapCount > 0) {
296 		panic("vm_translation_map.destroy_tmap: map %p has positive map count %ld\n",
297 			this, fMapCount);
298 	}
299 
300 	// mark the vsid base not in use
301 	int baseBit = fVSIDBase >> VSID_BASE_SHIFT;
302 	atomic_and((vint32 *)&sVSIDBaseBitmap[baseBit / 32],
303 			~(1 << (baseBit % 32)));
304 }
305 
306 
307 status_t
308 PPCVMTranslationMap::Init(bool kernel)
309 {
310 	cpu_status state = disable_interrupts();
311 	acquire_spinlock(&sVSIDBaseBitmapLock);
312 
313 	// allocate a VSID base for this one
314 	if (kernel) {
315 		// The boot loader has set up the segment registers for identical
316 		// mapping. Two VSID bases are reserved for the kernel: 0 and 8. The
317 		// latter one for mapping the kernel address space (0x80000000...), the
318 		// former one for the lower addresses required by the Open Firmware
319 		// services.
320 		fVSIDBase = 0;
321 		sVSIDBaseBitmap[0] |= 0x3;
322 	} else {
323 		int i = 0;
324 
325 		while (i < MAX_VSID_BASES) {
326 			if (sVSIDBaseBitmap[i / 32] == 0xffffffff) {
327 				i += 32;
328 				continue;
329 			}
330 			if ((sVSIDBaseBitmap[i / 32] & (1 << (i % 32))) == 0) {
331 				// we found it
332 				sVSIDBaseBitmap[i / 32] |= 1 << (i % 32);
333 				break;
334 			}
335 			i++;
336 		}
337 		if (i >= MAX_VSID_BASES)
338 			panic("vm_translation_map_create: out of VSID bases\n");
339 		fVSIDBase = i << VSID_BASE_SHIFT;
340 	}
341 
342 	release_spinlock(&sVSIDBaseBitmapLock);
343 	restore_interrupts(state);
344 
345 	return B_OK;
346 }
347 
348 
349 status_t
350 PPCVMTranslationMap::InitPostSem()
351 {
352 	return B_OK;
353 }
354 
355 
356 status_t
357 PPCVMTranslationMap::Lock()
358 {
359 	recursive_lock_lock(&fLock);
360 	return 0;
361 }
362 
363 
364 status_t
365 PPCVMTranslationMap::Unlock()
366 {
367 	recursive_lock_unlock(&fLock);
368 	return 0;
369 }
370 
371 
372 size_t
373 PPCVMTranslationMap::MaxPagesNeededToMap(addr_t start, addr_t end) const
374 {
375 	return 0;
376 }
377 
378 
379 status_t
380 PPCVMTranslationMap::Map(addr_t virtualAddress, addr_t physicalAddress,
381 	uint32 attributes)
382 {
383 	// lookup the vsid based off the va
384 	uint32 virtualSegmentID = VADDR_TO_VSID(fVSIDBase, virtualAddress);
385 	uint32 protection = 0;
386 
387 	// ToDo: check this
388 	// all kernel mappings are R/W to supervisor code
389 	if (attributes & (B_READ_AREA | B_WRITE_AREA))
390 		protection = (attributes & B_WRITE_AREA) ? PTE_READ_WRITE : PTE_READ_ONLY;
391 
392 	//dprintf("vm_translation_map.map_tmap: vsid %d, pa 0x%lx, va 0x%lx\n", vsid, pa, va);
393 
394 	// Search for a free page table slot using the primary hash value
395 
396 	uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID, virtualAddress);
397 	page_table_entry_group *group = &sPageTable[hash & sPageTableHashMask];
398 
399 	for (int i = 0; i < 8; i++) {
400 		page_table_entry *entry = &group->entry[i];
401 
402 		if (entry->valid)
403 			continue;
404 
405 		fill_page_table_entry(entry, virtualSegmentID, virtualAddress, physicalAddress,
406 			protection, false);
407 		fMapCount++;
408 		return B_OK;
409 	}
410 
411 	// Didn't found one, try the secondary hash value
412 
413 	hash = page_table_entry::SecondaryHash(hash);
414 	group = &sPageTable[hash & sPageTableHashMask];
415 
416 	for (int i = 0; i < 8; i++) {
417 		page_table_entry *entry = &group->entry[i];
418 
419 		if (entry->valid)
420 			continue;
421 
422 		fill_page_table_entry(entry, virtualSegmentID, virtualAddress, physicalAddress,
423 			protection, false);
424 		fMapCount++;
425 		return B_OK;
426 	}
427 
428 	panic("vm_translation_map.map_tmap: hash table full\n");
429 	return B_ERROR;
430 }
431 
432 
433 status_t
434 PPCVMTranslationMap::Unmap(addr_t start, addr_t end)
435 {
436 	page_table_entry *entry;
437 
438 	start = ROUNDDOWN(start, B_PAGE_SIZE);
439 	end = ROUNDUP(end, B_PAGE_SIZE);
440 
441 //	dprintf("vm_translation_map.unmap_tmap: start 0x%lx, end 0x%lx\n", start, end);
442 
443 	while (start < end) {
444 		if (RemovePageTableEntry(start))
445 			fMapCount--;
446 
447 		start += B_PAGE_SIZE;
448 	}
449 
450 	return B_OK;
451 }
452 
453 
454 status_t
455 PPCVMTranslationMap::Query(addr_t va, addr_t *_outPhysical, uint32 *_outFlags)
456 {
457 	page_table_entry *entry;
458 
459 	// default the flags to not present
460 	*_outFlags = 0;
461 	*_outPhysical = 0;
462 
463 	entry = LookupPageTableEntry(va);
464 	if (entry == NULL)
465 		return B_NO_ERROR;
466 
467 	// ToDo: check this!
468 	if (IS_KERNEL_ADDRESS(va))
469 		*_outFlags |= B_KERNEL_READ_AREA | (entry->page_protection == PTE_READ_ONLY ? 0 : B_KERNEL_WRITE_AREA);
470 	else
471 		*_outFlags |= B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_READ_AREA | (entry->page_protection == PTE_READ_ONLY ? 0 : B_WRITE_AREA);
472 
473 	*_outFlags |= entry->changed ? PAGE_MODIFIED : 0;
474 	*_outFlags |= entry->referenced ? PAGE_ACCESSED : 0;
475 	*_outFlags |= entry->valid ? PAGE_PRESENT : 0;
476 
477 	*_outPhysical = entry->physical_page_number * B_PAGE_SIZE;
478 
479 	return B_OK;
480 }
481 
482 
483 addr_t
484 PPCVMTranslationMap::MappedSize() const
485 {
486 	return fMapCount;
487 }
488 
489 
490 status_t
491 PPCVMTranslationMap::Protect(addr_t base, addr_t top, uint32 attributes)
492 {
493 	// XXX finish
494 	return B_ERROR;
495 }
496 
497 
498 status_t
499 PPCVMTranslationMap::ClearFlags(addr_t virtualAddress, uint32 flags)
500 {
501 	page_table_entry *entry = LookupPageTableEntry(virtualAddress);
502 	if (entry == NULL)
503 		return B_NO_ERROR;
504 
505 	bool modified = false;
506 
507 	// clear the bits
508 	if (flags & PAGE_MODIFIED && entry->changed) {
509 		entry->changed = false;
510 		modified = true;
511 	}
512 	if (flags & PAGE_ACCESSED && entry->referenced) {
513 		entry->referenced = false;
514 		modified = true;
515 	}
516 
517 	// synchronize
518 	if (modified) {
519 		tlbie(virtualAddress);
520 		eieio();
521 		tlbsync();
522 		ppc_sync();
523 	}
524 
525 	return B_OK;
526 }
527 
528 
529 void
530 PPCVMTranslationMap::Flush()
531 {
532 // TODO: arch_cpu_global_TLB_invalidate() is extremely expensive and doesn't
533 // even cut it here. We are supposed to invalidate all TLB entries for this
534 // map on all CPUs. We should loop over the virtual pages and invoke tlbie
535 // instead (which marks the entry invalid on all CPUs).
536 	arch_cpu_global_TLB_invalidate();
537 }
538 
539 
540 static status_t
541 get_physical_page_tmap(addr_t physicalAddress, addr_t *_virtualAddress,
542 	void **handle)
543 {
544 	return generic_get_physical_page(physicalAddress, _virtualAddress, 0);
545 }
546 
547 
548 static status_t
549 put_physical_page_tmap(addr_t virtualAddress, void *handle)
550 {
551 	return generic_put_physical_page(virtualAddress);
552 }
553 
554 
555 //  #pragma mark -
556 //  VM API
557 
558 
559 status_t
560 arch_vm_translation_map_create_map(bool kernel, VMTranslationMap** _map)
561 {
562 	PPCVMTranslationMap* map = new(std::nothrow) PPCVMTranslationMap;
563 	if (map == NULL)
564 		return B_NO_MEMORY;
565 
566 	status_t error = map->Init(kernel);
567 	if (error != B_OK) {
568 		delete map;
569 		return error;
570 	}
571 
572 	*_map = map;
573 	return B_OK;
574 }
575 
576 
577 status_t
578 arch_vm_translation_map_init(kernel_args *args,
579 	VMPhysicalPageMapper** _physicalPageMapper)
580 {
581 	sPageTable = (page_table_entry_group *)args->arch_args.page_table.start;
582 	sPageTableSize = args->arch_args.page_table.size;
583 	sPageTableHashMask = sPageTableSize / sizeof(page_table_entry_group) - 1;
584 
585 	// init physical page mapper
586 	status_t error = generic_vm_physical_page_mapper_init(args,
587 		map_iospace_chunk, &sIOSpaceBase, IOSPACE_SIZE, IOSPACE_CHUNK_SIZE);
588 	if (error != B_OK)
589 		return error;
590 
591 	new(&sPhysicalPageMapper) GenericVMPhysicalPageMapper;
592 
593 	*_physicalPageMapper = &sPhysicalPageMapper;
594 	return B_OK;
595 }
596 
597 
598 status_t
599 arch_vm_translation_map_init_post_area(kernel_args *args)
600 {
601 	// If the page table doesn't lie within the kernel address space, we
602 	// remap it.
603 	if (!IS_KERNEL_ADDRESS(sPageTable)) {
604 		addr_t newAddress = (addr_t)sPageTable;
605 		status_t error = ppc_remap_address_range(&newAddress, sPageTableSize,
606 			false);
607 		if (error != B_OK) {
608 			panic("arch_vm_translation_map_init_post_area(): Failed to remap "
609 				"the page table!");
610 			return error;
611 		}
612 
613 		// set the new page table address
614 		addr_t oldVirtualBase = (addr_t)(sPageTable);
615 		sPageTable = (page_table_entry_group*)newAddress;
616 
617 		// unmap the old pages
618 		ppc_unmap_address_range(oldVirtualBase, sPageTableSize);
619 
620 // TODO: We should probably map the page table via BAT. It is relatively large,
621 // and due to being a hash table the access patterns might look sporadic, which
622 // certainly isn't to the liking of the TLB.
623 	}
624 
625 	// create an area to cover the page table
626 	sPageTableArea = create_area("page_table", (void **)&sPageTable, B_EXACT_ADDRESS,
627 		sPageTableSize, B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
628 
629 	// init physical page mapper
630 	status_t error = generic_vm_physical_page_mapper_init_post_area(args);
631 	if (error != B_OK)
632 		return error;
633 
634 	return B_OK;
635 }
636 
637 
638 status_t
639 arch_vm_translation_map_init_post_sem(kernel_args *args)
640 {
641 	// init physical page mapper
642 	return generic_vm_physical_page_mapper_init_post_sem(args);
643 }
644 
645 
646 /**	Directly maps a page without having knowledge of any kernel structures.
647  *	Used only during VM setup.
648  *	It currently ignores the "attributes" parameter and sets all pages
649  *	read/write.
650  */
651 
652 status_t
653 arch_vm_translation_map_early_map(kernel_args *ka, addr_t virtualAddress, addr_t physicalAddress,
654 	uint8 attributes, addr_t (*get_free_page)(kernel_args *))
655 {
656 	uint32 virtualSegmentID = get_sr((void *)virtualAddress) & 0xffffff;
657 
658 	uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID, (uint32)virtualAddress);
659 	page_table_entry_group *group = &sPageTable[hash & sPageTableHashMask];
660 
661 	for (int32 i = 0; i < 8; i++) {
662 		// 8 entries in a group
663 		if (group->entry[i].valid)
664 			continue;
665 
666 		fill_page_table_entry(&group->entry[i], virtualSegmentID, virtualAddress, physicalAddress, PTE_READ_WRITE, false);
667 		return B_OK;
668 	}
669 
670 	hash = page_table_entry::SecondaryHash(hash);
671 	group = &sPageTable[hash & sPageTableHashMask];
672 
673 	for (int32 i = 0; i < 8; i++) {
674 		if (group->entry[i].valid)
675 			continue;
676 
677 		fill_page_table_entry(&group->entry[i], virtualSegmentID, virtualAddress, physicalAddress, PTE_READ_WRITE, true);
678 		return B_OK;
679 	}
680 
681 	return B_ERROR;
682 }
683 
684 
685 // XXX currently assumes this translation map is active
686 
687 status_t
688 arch_vm_translation_map_early_query(addr_t va, addr_t *out_physical)
689 {
690 	//PANIC_UNIMPLEMENTED();
691 	panic("vm_translation_map_quick_query(): not yet implemented\n");
692 	return B_OK;
693 }
694 
695 
696 // #pragma mark -
697 
698 
699 status_t
700 ppc_map_address_range(addr_t virtualAddress, addr_t physicalAddress,
701 	size_t size)
702 {
703 	addr_t virtualEnd = ROUNDUP(virtualAddress + size, B_PAGE_SIZE);
704 	virtualAddress = ROUNDDOWN(virtualAddress, B_PAGE_SIZE);
705 	physicalAddress = ROUNDDOWN(physicalAddress, B_PAGE_SIZE);
706 
707 	VMAddressSpace *addressSpace = VMAddressSpace::Kernel();
708 	PPCVMTranslationMap* map = static_cast<PPCVMTranslationMap*>(
709 		addressSpace->TranslationMap());
710 
711 	// map the pages
712 	for (; virtualAddress < virtualEnd;
713 		 virtualAddress += B_PAGE_SIZE, physicalAddress += B_PAGE_SIZE) {
714 		status_t error = map->Map(virtualAddress, physicalAddress,
715 			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
716 		if (error != B_OK)
717 			return error;
718 	}
719 
720 	return B_OK;
721 }
722 
723 
724 void
725 ppc_unmap_address_range(addr_t virtualAddress, size_t size)
726 {
727 	addr_t virtualEnd = ROUNDUP(virtualAddress + size, B_PAGE_SIZE);
728 	virtualAddress = ROUNDDOWN(virtualAddress, B_PAGE_SIZE);
729 
730 	VMAddressSpace *addressSpace = VMAddressSpace::Kernel();
731 
732 	PPCVMTranslationMap* map = static_cast<PPCVMTranslationMap*>(
733 		addressSpace->TranslationMap());
734 	for (0; virtualAddress < virtualEnd; virtualAddress += B_PAGE_SIZE)
735 		map->RemovePageTableEntry(virtualAddress);
736 }
737 
738 
739 status_t
740 ppc_remap_address_range(addr_t *_virtualAddress, size_t size, bool unmap)
741 {
742 	addr_t virtualAddress = ROUNDDOWN(*_virtualAddress, B_PAGE_SIZE);
743 	size = ROUNDUP(*_virtualAddress + size - virtualAddress, B_PAGE_SIZE);
744 
745 	VMAddressSpace *addressSpace = VMAddressSpace::Kernel();
746 
747 	// reserve space in the address space
748 	void *newAddress = NULL;
749 	status_t error = vm_reserve_address_range(addressSpace->ID(), &newAddress,
750 		B_ANY_KERNEL_ADDRESS, size, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
751 	if (error != B_OK)
752 		return error;
753 
754 	// get the area's first physical page
755 	PPCVMTranslationMap* map = static_cast<PPCVMTranslationMap*>(
756 		addressSpace->TranslationMap());
757 	page_table_entry *entry = map->LookupPageTableEntry(virtualAddress);
758 	if (!entry)
759 		return B_ERROR;
760 	addr_t physicalBase = entry->physical_page_number << 12;
761 
762 	// map the pages
763 	error = ppc_map_address_range((addr_t)newAddress, physicalBase, size);
764 	if (error != B_OK)
765 		return error;
766 
767 	*_virtualAddress = (addr_t)newAddress;
768 
769 	// unmap the old pages
770 	if (unmap)
771 		ppc_unmap_address_range(virtualAddress, size);
772 
773 	return B_OK;
774 }
775 
776 
777 bool
778 arch_vm_translation_map_is_kernel_page_accessible(addr_t virtualAddress,
779 	uint32 protection)
780 {
781 	// TODO: Implement!
782 	return false;
783 }
784