xref: /haiku/src/system/kernel/arch/ppc/arch_vm_translation_map.cpp (revision 302f62604763c95777d6d04cca456e876f471c4f)
1 /*
2  * Copyright 2003-2006, Axel Dörfler, axeld@pinc-software.de.
3  * Distributed under the terms of the MIT License.
4  *
5  * Copyright 2001, Travis Geiselbrecht. All rights reserved.
6  * Distributed under the terms of the NewOS License.
7  */
8 
9 /*	(bonefish) Some explanatory words on how address translation is implemented
10 	for the 32 bit PPC architecture.
11 
12 	I use the address type nomenclature as used in the PPC architecture
13 	specs, i.e.
14 	- effective address: An address as used by program instructions, i.e.
15 	  that's what elsewhere (e.g. in the VM implementation) is called
16 	  virtual address.
17 	- virtual address: An intermediate address computed from the effective
18 	  address via the segment registers.
19 	- physical address: An address referring to physical storage.
20 
21 	The hardware translates an effective address to a physical address using
22 	either of two mechanisms: 1) Block Address Translation (BAT) or
23 	2) segment + page translation. The first mechanism does this directly
24 	using two sets (for data/instructions) of special purpose registers.
25 	The latter mechanism is of more relevance here, though:
26 
27 	effective address (32 bit):	     [ 0 ESID  3 | 4  PIX 19 | 20 Byte 31 ]
28 								           |           |            |
29 							     (segment registers)   |            |
30 									       |           |            |
31 	virtual address (52 bit):   [ 0      VSID 23 | 24 PIX 39 | 40 Byte 51 ]
32 	                            [ 0             VPN       39 | 40 Byte 51 ]
33 								                 |                  |
34 										   (page table)             |
35 											     |                  |
36 	physical address (32 bit):       [ 0        PPN       19 | 20 Byte 31 ]
37 
38 
39 	ESID: Effective Segment ID
40 	VSID: Virtual Segment ID
41 	PIX:  Page Index
42 	VPN:  Virtual Page Number
43 	PPN:  Physical Page Number
44 
45 
46 	Unlike on x86 we can't just switch the context to another team by just
47 	setting a register to another page directory, since we only have one
48 	page table containing both kernel and user address mappings. Instead we
49 	map the effective address space of kernel and *all* teams
50 	non-intersectingly into the virtual address space (which fortunately is
51 	20 bits wider), and use the segment registers to select the section of
52 	the virtual address space for the current team. Half of the 16 segment
53 	registers (8 - 15) map the kernel addresses, so they remain unchanged.
54 
55 	The range of the virtual address space a team's effective address space
56 	is mapped to is defined by its vm_translation_map_arch_info::vsid_base,
57 	which is the first of the 8 successive VSID values used for the team.
58 
59 	Which vsid_base values are already taken is defined by the set bits in
60 	the bitmap sVSIDBaseBitmap.
61 
62 
63 	TODO:
64 	* If we want to continue to use the OF services, we would need to add
65 	  its address mappings to the kernel space. Unfortunately some stuff
66 	  (especially RAM) is mapped in an address range without the kernel
67 	  address space. We probably need to map those into each team's address
68 	  space as kernel read/write areas.
69 	* The current locking scheme is insufficient. The page table is a resource
70 	  shared by all teams. We need to synchronize access to it. Probably via a
71 	  spinlock.
72  */
73 
74 #include <KernelExport.h>
75 #include <kernel.h>
76 #include <vm.h>
77 #include <vm_address_space.h>
78 #include <vm_priv.h>
79 #include <int.h>
80 #include <boot/kernel_args.h>
81 #include <arch/vm_translation_map.h>
82 #include <arch/cpu.h>
83 #include <arch_mmu.h>
84 #include <stdlib.h>
85 
86 #include "generic_vm_physical_page_mapper.h"
87 
88 static struct page_table_entry_group *sPageTable;
89 static size_t sPageTableSize;
90 static uint32 sPageTableHashMask;
91 static area_id sPageTableArea;
92 
93 
94 // 64 MB of iospace
95 #define IOSPACE_SIZE (64*1024*1024)
96 // We only have small (4 KB) pages. The only reason for choosing greater chunk
97 // size is to keep the waste of memory limited, since the generic page mapper
98 // allocates structures per physical/virtual chunk.
99 // TODO: Implement a page mapper more suitable for small pages!
100 #define IOSPACE_CHUNK_SIZE (16 * B_PAGE_SIZE)
101 
102 static addr_t sIOSpaceBase;
103 
104 
105 // The VSID is a 24 bit number. The lower three bits are defined by the
106 // (effective) segment number, which leaves us with a 21 bit space of
107 // VSID bases (= 2 * 1024 * 1024).
108 #define MAX_VSID_BASES (PAGE_SIZE * 8)
109 static uint32 sVSIDBaseBitmap[MAX_VSID_BASES / (sizeof(uint32) * 8)];
110 static spinlock sVSIDBaseBitmapLock;
111 
112 #define VSID_BASE_SHIFT 3
113 #define VADDR_TO_VSID(map, vaddr) \
114 	((map)->arch_data->vsid_base + ((vaddr) >> 28))
115 
116 // vm_translation object stuff
117 typedef struct vm_translation_map_arch_info {
118 	int vsid_base;	// used VSIDs are vside_base ... vsid_base + 7
119 } vm_translation_map_arch_info;
120 
121 
122 void
123 ppc_translation_map_change_asid(vm_translation_map *map)
124 {
125 // this code depends on the kernel being at 0x80000000, fix if we change that
126 #if KERNEL_BASE != 0x80000000
127 #error fix me
128 #endif
129 	int vsidBase = map->arch_data->vsid_base;
130 
131 	isync();	// synchronize context
132 	asm("mtsr	0,%0" : : "g"(vsidBase));
133 	asm("mtsr	1,%0" : : "g"(vsidBase + 1));
134 	asm("mtsr	2,%0" : : "g"(vsidBase + 2));
135 	asm("mtsr	3,%0" : : "g"(vsidBase + 3));
136 	asm("mtsr	4,%0" : : "g"(vsidBase + 4));
137 	asm("mtsr	5,%0" : : "g"(vsidBase + 5));
138 	asm("mtsr	6,%0" : : "g"(vsidBase + 6));
139 	asm("mtsr	7,%0" : : "g"(vsidBase + 7));
140 	isync();	// synchronize context
141 }
142 
143 
144 static status_t
145 lock_tmap(vm_translation_map *map)
146 {
147 	recursive_lock_lock(&map->lock);
148 	return 0;
149 }
150 
151 
152 static status_t
153 unlock_tmap(vm_translation_map *map)
154 {
155 	recursive_lock_unlock(&map->lock);
156 	return 0;
157 }
158 
159 
160 static void
161 destroy_tmap(vm_translation_map *map)
162 {
163 	if (map->map_count > 0) {
164 		panic("vm_translation_map.destroy_tmap: map %p has positive map count %ld\n",
165 			map, map->map_count);
166 	}
167 
168 	// mark the vsid base not in use
169 	int baseBit = map->arch_data->vsid_base >> VSID_BASE_SHIFT;
170 	atomic_and((vint32 *)&sVSIDBaseBitmap[baseBit / 32],
171 			~(1 << (baseBit % 32)));
172 
173 	free(map->arch_data);
174 	recursive_lock_destroy(&map->lock);
175 }
176 
177 
178 static void
179 fill_page_table_entry(page_table_entry *entry, uint32 virtualSegmentID,
180 	addr_t virtualAddress, addr_t physicalAddress, uint8 protection,
181 	bool secondaryHash)
182 {
183 	// lower 32 bit - set at once
184 	entry->physical_page_number = physicalAddress / B_PAGE_SIZE;
185 	entry->_reserved0 = 0;
186 	entry->referenced = false;
187 	entry->changed = false;
188 	entry->write_through = false;
189 	entry->caching_inhibited = false;
190 	entry->memory_coherent = false;
191 	entry->guarded = false;
192 	entry->_reserved1 = 0;
193 	entry->page_protection = protection & 0x3;
194 	eieio();
195 		// we need to make sure that the lower 32 bit were
196 		// already written when the entry becomes valid
197 
198 	// upper 32 bit
199 	entry->virtual_segment_id = virtualSegmentID;
200 	entry->secondary_hash = secondaryHash;
201 	entry->abbr_page_index = (virtualAddress >> 22) & 0x3f;
202 	entry->valid = true;
203 
204 	ppc_sync();
205 }
206 
207 
208 static status_t
209 map_tmap(vm_translation_map *map, addr_t virtualAddress, addr_t physicalAddress, uint32 attributes)
210 {
211 	// lookup the vsid based off the va
212 	uint32 virtualSegmentID = VADDR_TO_VSID(map, virtualAddress);
213 	uint32 protection = 0;
214 
215 	// ToDo: check this
216 	// all kernel mappings are R/W to supervisor code
217 	if (attributes & (B_READ_AREA | B_WRITE_AREA))
218 		protection = (attributes & B_WRITE_AREA) ? PTE_READ_WRITE : PTE_READ_ONLY;
219 
220 	//dprintf("vm_translation_map.map_tmap: vsid %d, pa 0x%lx, va 0x%lx\n", vsid, pa, va);
221 
222 	// Search for a free page table slot using the primary hash value
223 
224 	uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID, virtualAddress);
225 	page_table_entry_group *group = &sPageTable[hash & sPageTableHashMask];
226 
227 	for (int i = 0; i < 8; i++) {
228 		page_table_entry *entry = &group->entry[i];
229 
230 		if (entry->valid)
231 			continue;
232 
233 		fill_page_table_entry(entry, virtualSegmentID, virtualAddress, physicalAddress,
234 			protection, false);
235 		map->map_count++;
236 		return B_OK;
237 	}
238 
239 	// Didn't found one, try the secondary hash value
240 
241 	hash = page_table_entry::SecondaryHash(hash);
242 	group = &sPageTable[hash & sPageTableHashMask];
243 
244 	for (int i = 0; i < 8; i++) {
245 		page_table_entry *entry = &group->entry[i];
246 
247 		if (entry->valid)
248 			continue;
249 
250 		fill_page_table_entry(entry, virtualSegmentID, virtualAddress, physicalAddress,
251 			protection, false);
252 		map->map_count++;
253 		return B_OK;
254 	}
255 
256 	panic("vm_translation_map.map_tmap: hash table full\n");
257 	return B_ERROR;
258 }
259 
260 
261 static page_table_entry *
262 lookup_page_table_entry(vm_translation_map *map, addr_t virtualAddress)
263 {
264 	// lookup the vsid based off the va
265 	uint32 virtualSegmentID = VADDR_TO_VSID(map, virtualAddress);
266 
267 //	dprintf("vm_translation_map.lookup_page_table_entry: vsid %d, va 0x%lx\n", vsid, va);
268 
269 
270 	// Search for the page table entry using the primary hash value
271 
272 	uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID, virtualAddress);
273 	page_table_entry_group *group = &sPageTable[hash & sPageTableHashMask];
274 
275 	for (int i = 0; i < 8; i++) {
276 		page_table_entry *entry = &group->entry[i];
277 
278 		if (entry->virtual_segment_id == virtualSegmentID
279 			&& entry->secondary_hash == false
280 			&& entry->abbr_page_index == ((virtualAddress >> 22) & 0x3f))
281 			return entry;
282 	}
283 
284 	// Didn't found it, try the secondary hash value
285 
286 	hash = page_table_entry::SecondaryHash(hash);
287 	group = &sPageTable[hash & sPageTableHashMask];
288 
289 	for (int i = 0; i < 8; i++) {
290 		page_table_entry *entry = &group->entry[i];
291 
292 		if (entry->virtual_segment_id == virtualSegmentID
293 			&& entry->secondary_hash == true
294 			&& entry->abbr_page_index == ((virtualAddress >> 22) & 0x3f))
295 			return entry;
296 	}
297 
298 	return NULL;
299 }
300 
301 
302 static bool
303 remove_page_table_entry(vm_translation_map *map, addr_t virtualAddress)
304 {
305 	page_table_entry *entry = lookup_page_table_entry(map, virtualAddress);
306 	if (entry) {
307 		entry->valid = 0;
308 		ppc_sync();
309 		tlbie(virtualAddress);
310 		eieio();
311 		tlbsync();
312 		ppc_sync();
313 	}
314 
315 	return entry;
316 }
317 
318 
319 static status_t
320 unmap_tmap(vm_translation_map *map, addr_t start, addr_t end)
321 {
322 	page_table_entry *entry;
323 
324 	start = ROUNDOWN(start, B_PAGE_SIZE);
325 	end = ROUNDUP(end, B_PAGE_SIZE);
326 
327 //	dprintf("vm_translation_map.unmap_tmap: start 0x%lx, end 0x%lx\n", start, end);
328 
329 	while (start < end) {
330 		if (remove_page_table_entry(map, start))
331 			map->map_count--;
332 
333 		start += B_PAGE_SIZE;
334 	}
335 
336 	return B_OK;
337 }
338 
339 
340 static status_t
341 query_tmap_interrupt(vm_translation_map *map, addr_t va, addr_t *_outPhysical)
342 {
343 	page_table_entry *entry;
344 
345 	// default the flags to not present
346 	*_outPhysical = 0;
347 
348 	entry = lookup_page_table_entry(map, va);
349 	if (entry == NULL)
350 		return B_ERROR;
351 
352 	*_outPhysical = entry->physical_page_number * B_PAGE_SIZE;
353 	return B_OK;
354 }
355 
356 
357 static status_t
358 query_tmap(vm_translation_map *map, addr_t va, addr_t *_outPhysical, uint32 *_outFlags)
359 {
360 	page_table_entry *entry;
361 
362 	// default the flags to not present
363 	*_outFlags = 0;
364 	*_outPhysical = 0;
365 
366 	entry = lookup_page_table_entry(map, va);
367 	if (entry == NULL)
368 		return B_NO_ERROR;
369 
370 	// ToDo: check this!
371 	if (IS_KERNEL_ADDRESS(va))
372 		*_outFlags |= B_KERNEL_READ_AREA | (entry->page_protection == PTE_READ_ONLY ? 0 : B_KERNEL_WRITE_AREA);
373 	else
374 		*_outFlags |= B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_READ_AREA | (entry->page_protection == PTE_READ_ONLY ? 0 : B_WRITE_AREA);
375 
376 	*_outFlags |= entry->changed ? PAGE_MODIFIED : 0;
377 	*_outFlags |= entry->referenced ? PAGE_ACCESSED : 0;
378 	*_outFlags |= entry->valid ? PAGE_PRESENT : 0;
379 
380 	*_outPhysical = entry->physical_page_number * B_PAGE_SIZE;
381 
382 	return B_OK;
383 }
384 
385 
386 static status_t
387 map_iospace_chunk(addr_t va, addr_t pa)
388 {
389 	pa &= ~(B_PAGE_SIZE - 1); // make sure it's page aligned
390 	va &= ~(B_PAGE_SIZE - 1); // make sure it's page aligned
391 	if (va < sIOSpaceBase || va >= (sIOSpaceBase + IOSPACE_SIZE))
392 		panic("map_iospace_chunk: passed invalid va 0x%lx\n", va);
393 
394 	// map the pages
395 	return ppc_map_address_range(va, pa, IOSPACE_CHUNK_SIZE);
396 }
397 
398 
399 static addr_t
400 get_mapped_size_tmap(vm_translation_map *map)
401 {
402 	return map->map_count;
403 }
404 
405 
406 static status_t
407 protect_tmap(vm_translation_map *map, addr_t base, addr_t top, uint32 attributes)
408 {
409 	// XXX finish
410 	return B_ERROR;
411 }
412 
413 
414 static status_t
415 clear_flags_tmap(vm_translation_map *map, addr_t virtualAddress, uint32 flags)
416 {
417 	page_table_entry *entry = lookup_page_table_entry(map, virtualAddress);
418 	if (entry == NULL)
419 		return B_NO_ERROR;
420 
421 	bool modified = false;
422 
423 	// clear the bits
424 	if (flags & PAGE_MODIFIED && entry->changed) {
425 		entry->changed = false;
426 		modified = true;
427 	}
428 	if (flags & PAGE_ACCESSED && entry->referenced) {
429 		entry->referenced = false;
430 		modified = true;
431 	}
432 
433 	// synchronize
434 	if (modified) {
435 		tlbie(virtualAddress);
436 		eieio();
437 		tlbsync();
438 		ppc_sync();
439 	}
440 
441 	return B_OK;
442 }
443 
444 
445 static void
446 flush_tmap(vm_translation_map *map)
447 {
448 // TODO: arch_cpu_global_TLB_invalidate() is extremely expensive and doesn't
449 // even cut it here. We are supposed to invalidate all TLB entries for this
450 // map on all CPUs. We should loop over the virtual pages and invoke tlbie
451 // instead (which marks the entry invalid on all CPUs).
452 	arch_cpu_global_TLB_invalidate();
453 }
454 
455 
456 static status_t
457 get_physical_page_tmap(addr_t pa, addr_t *va, uint32 flags)
458 {
459 	return generic_get_physical_page(pa, va, flags);
460 }
461 
462 
463 static status_t
464 put_physical_page_tmap(addr_t va)
465 {
466 	return generic_put_physical_page(va);
467 }
468 
469 
470 static vm_translation_map_ops tmap_ops = {
471 	destroy_tmap,
472 	lock_tmap,
473 	unlock_tmap,
474 	map_tmap,
475 	unmap_tmap,
476 	query_tmap,
477 	query_tmap_interrupt,
478 	get_mapped_size_tmap,
479 	protect_tmap,
480 	clear_flags_tmap,
481 	flush_tmap,
482 	get_physical_page_tmap,
483 	put_physical_page_tmap
484 };
485 
486 
487 //  #pragma mark -
488 //  VM API
489 
490 
491 status_t
492 arch_vm_translation_map_init_map(vm_translation_map *map, bool kernel)
493 {
494 	// initialize the new object
495 	map->ops = &tmap_ops;
496 	map->map_count = 0;
497 
498 	if (!kernel) {
499 		// During the boot process, there are no semaphores available at this
500 		// point, so we only try to create the translation map lock if we're
501 		// initialize a user translation map.
502 		// vm_translation_map_init_kernel_map_post_sem() is used to complete
503 		// the kernel translation map.
504 		if (recursive_lock_init(&map->lock, "translation map") < B_OK)
505 			return map->lock.sem;
506 	}
507 
508 	map->arch_data = (vm_translation_map_arch_info *)malloc(sizeof(vm_translation_map_arch_info));
509 	if (map->arch_data == NULL) {
510 		if (!kernel)
511 			recursive_lock_destroy(&map->lock);
512 		return B_NO_MEMORY;
513 	}
514 
515 	cpu_status state = disable_interrupts();
516 	acquire_spinlock(&sVSIDBaseBitmapLock);
517 
518 	// allocate a VSID base for this one
519 	if (kernel) {
520 		// The boot loader has set up the segment registers for identical
521 		// mapping. Two VSID bases are reserved for the kernel: 0 and 8. The
522 		// latter one for mapping the kernel address space (0x80000000...), the
523 		// former one for the lower addresses required by the Open Firmware
524 		// services.
525 		map->arch_data->vsid_base = 0;
526 		sVSIDBaseBitmap[0] |= 0x3;
527 	} else {
528 		int i = 0;
529 
530 		while (i < MAX_VSID_BASES) {
531 			if (sVSIDBaseBitmap[i / 32] == 0xffffffff) {
532 				i += 32;
533 				continue;
534 			}
535 			if ((sVSIDBaseBitmap[i / 32] & (1 << (i % 32))) == 0) {
536 				// we found it
537 				sVSIDBaseBitmap[i / 32] |= 1 << (i % 32);
538 				break;
539 			}
540 			i++;
541 		}
542 		if (i >= MAX_VSID_BASES)
543 			panic("vm_translation_map_create: out of VSID bases\n");
544 		map->arch_data->vsid_base = i << VSID_BASE_SHIFT;
545 	}
546 
547 	release_spinlock(&sVSIDBaseBitmapLock);
548 	restore_interrupts(state);
549 
550 	return B_OK;
551 }
552 
553 
554 status_t
555 arch_vm_translation_map_init_kernel_map_post_sem(vm_translation_map *map)
556 {
557 	if (recursive_lock_init(&map->lock, "translation map") < B_OK)
558 		return map->lock.sem;
559 
560 	return B_OK;
561 }
562 
563 
564 status_t
565 arch_vm_translation_map_init(kernel_args *args)
566 {
567 	sPageTable = (page_table_entry_group *)args->arch_args.page_table.start;
568 	sPageTableSize = args->arch_args.page_table.size;
569 	sPageTableHashMask = sPageTableSize / sizeof(page_table_entry_group) - 1;
570 
571 	// init physical page mapper
572 	status_t error = generic_vm_physical_page_mapper_init(args,
573 		map_iospace_chunk, &sIOSpaceBase, IOSPACE_SIZE, IOSPACE_CHUNK_SIZE);
574 	if (error != B_OK)
575 		return error;
576 
577 	return B_OK;
578 }
579 
580 
581 status_t
582 arch_vm_translation_map_init_post_area(kernel_args *args)
583 {
584 	// If the page table doesn't lie within the kernel address space, we
585 	// remap it.
586 	if (!IS_KERNEL_ADDRESS(sPageTable)) {
587 		addr_t newAddress = (addr_t)sPageTable;
588 		status_t error = ppc_remap_address_range(&newAddress, sPageTableSize,
589 			false);
590 		if (error != B_OK) {
591 			panic("arch_vm_translation_map_init_post_area(): Failed to remap "
592 				"the page table!");
593 			return error;
594 		}
595 
596 		// set the new page table address
597 		addr_t oldVirtualBase = (addr_t)(sPageTable);
598 		sPageTable = (page_table_entry_group*)newAddress;
599 
600 		// unmap the old pages
601 		ppc_unmap_address_range(oldVirtualBase, sPageTableSize);
602 
603 // TODO: We should probably map the page table via BAT. It is relatively large,
604 // and due to being a hash table the access patterns might look sporadic, which
605 // certainly isn't to the liking of the TLB.
606 	}
607 
608 	// create an area to cover the page table
609 	sPageTableArea = create_area("page_table", (void **)&sPageTable, B_EXACT_ADDRESS,
610 		sPageTableSize, B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
611 
612 	// init physical page mapper
613 	status_t error = generic_vm_physical_page_mapper_init_post_area(args);
614 	if (error != B_OK)
615 		return error;
616 
617 	return B_OK;
618 }
619 
620 
621 status_t
622 arch_vm_translation_map_init_post_sem(kernel_args *args)
623 {
624 	// init physical page mapper
625 	return generic_vm_physical_page_mapper_init_post_sem(args);
626 }
627 
628 
629 /**	Directly maps a page without having knowledge of any kernel structures.
630  *	Used only during VM setup.
631  *	It currently ignores the "attributes" parameter and sets all pages
632  *	read/write.
633  */
634 
635 status_t
636 arch_vm_translation_map_early_map(kernel_args *ka, addr_t virtualAddress, addr_t physicalAddress,
637 	uint8 attributes, addr_t (*get_free_page)(kernel_args *))
638 {
639 	uint32 virtualSegmentID = get_sr((void *)virtualAddress) & 0xffffff;
640 
641 	uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID, (uint32)virtualAddress);
642 	page_table_entry_group *group = &sPageTable[hash & sPageTableHashMask];
643 
644 	for (int32 i = 0; i < 8; i++) {
645 		// 8 entries in a group
646 		if (group->entry[i].valid)
647 			continue;
648 
649 		fill_page_table_entry(&group->entry[i], virtualSegmentID, virtualAddress, physicalAddress, PTE_READ_WRITE, false);
650 		return B_OK;
651 	}
652 
653 	hash = page_table_entry::SecondaryHash(hash);
654 	group = &sPageTable[hash & sPageTableHashMask];
655 
656 	for (int32 i = 0; i < 8; i++) {
657 		if (group->entry[i].valid)
658 			continue;
659 
660 		fill_page_table_entry(&group->entry[i], virtualSegmentID, virtualAddress, physicalAddress, PTE_READ_WRITE, true);
661 		return B_OK;
662 	}
663 
664 	return B_ERROR;
665 }
666 
667 
668 // XXX currently assumes this translation map is active
669 
670 status_t
671 arch_vm_translation_map_early_query(addr_t va, addr_t *out_physical)
672 {
673 	//PANIC_UNIMPLEMENTED();
674 	panic("vm_translation_map_quick_query(): not yet implemented\n");
675 	return B_OK;
676 }
677 
678 
679 // #pragma mark -
680 
681 
682 status_t
683 ppc_map_address_range(addr_t virtualAddress, addr_t physicalAddress,
684 	size_t size)
685 {
686 	addr_t virtualEnd = ROUNDUP(virtualAddress + size, B_PAGE_SIZE);
687 	virtualAddress = ROUNDOWN(virtualAddress, B_PAGE_SIZE);
688 	physicalAddress = ROUNDOWN(physicalAddress, B_PAGE_SIZE);
689 
690 	vm_address_space *addressSpace = vm_kernel_address_space();
691 
692 	// map the pages
693 	for (; virtualAddress < virtualEnd;
694 		 virtualAddress += B_PAGE_SIZE, physicalAddress += B_PAGE_SIZE) {
695 		status_t error = map_tmap(&addressSpace->translation_map,
696 			virtualAddress, physicalAddress,
697 			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
698 		if (error != B_OK)
699 			return error;
700 	}
701 
702 	return B_OK;
703 }
704 
705 
706 void
707 ppc_unmap_address_range(addr_t virtualAddress, size_t size)
708 {
709 	addr_t virtualEnd = ROUNDUP(virtualAddress + size, B_PAGE_SIZE);
710 	virtualAddress = ROUNDOWN(virtualAddress, B_PAGE_SIZE);
711 
712 	vm_address_space *addressSpace = vm_kernel_address_space();
713 
714 	for (0; virtualAddress < virtualEnd; virtualAddress += B_PAGE_SIZE)
715 		remove_page_table_entry(&addressSpace->translation_map, virtualAddress);
716 }
717 
718 
719 status_t
720 ppc_remap_address_range(addr_t *_virtualAddress, size_t size, bool unmap)
721 {
722 	addr_t virtualAddress = ROUNDOWN(*_virtualAddress, B_PAGE_SIZE);
723 	size = ROUNDUP(*_virtualAddress + size - virtualAddress, B_PAGE_SIZE);
724 
725 	vm_address_space *addressSpace = vm_kernel_address_space();
726 
727 	// reserve space in the address space
728 	void *newAddress = NULL;
729 	status_t error = vm_reserve_address_range(addressSpace->id, &newAddress,
730 		B_ANY_KERNEL_ADDRESS, size, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
731 	if (error != B_OK)
732 		return error;
733 
734 	// get the area's first physical page
735 	page_table_entry *entry = lookup_page_table_entry(
736 		&addressSpace->translation_map, virtualAddress);
737 	if (!entry)
738 		return B_ERROR;
739 	addr_t physicalBase = entry->physical_page_number << 12;
740 
741 	// map the pages
742 	error = ppc_map_address_range((addr_t)newAddress, physicalBase, size);
743 	if (error != B_OK)
744 		return error;
745 
746 	*_virtualAddress = (addr_t)newAddress;
747 
748 	// unmap the old pages
749 	if (unmap)
750 		ppc_unmap_address_range(virtualAddress, size);
751 
752 	return B_OK;
753 }
754 
755