xref: /haiku/src/system/kernel/arch/ppc/arch_vm_translation_map.cpp (revision a4ef4a49150f118d47324242917a596a3f8f8bd5)
1 /*
2  * Copyright 2003-2007, Axel Dörfler, axeld@pinc-software.de.
3  * Distributed under the terms of the MIT License.
4  *
5  * Copyright 2001, Travis Geiselbrecht. All rights reserved.
6  * Distributed under the terms of the NewOS License.
7  */
8 
9 /*	(bonefish) Some explanatory words on how address translation is implemented
10 	for the 32 bit PPC architecture.
11 
12 	I use the address type nomenclature as used in the PPC architecture
13 	specs, i.e.
14 	- effective address: An address as used by program instructions, i.e.
15 	  that's what elsewhere (e.g. in the VM implementation) is called
16 	  virtual address.
17 	- virtual address: An intermediate address computed from the effective
18 	  address via the segment registers.
19 	- physical address: An address referring to physical storage.
20 
21 	The hardware translates an effective address to a physical address using
22 	either of two mechanisms: 1) Block Address Translation (BAT) or
23 	2) segment + page translation. The first mechanism does this directly
24 	using two sets (for data/instructions) of special purpose registers.
25 	The latter mechanism is of more relevance here, though:
26 
27 	effective address (32 bit):	     [ 0 ESID  3 | 4  PIX 19 | 20 Byte 31 ]
28 								           |           |            |
29 							     (segment registers)   |            |
30 									       |           |            |
31 	virtual address (52 bit):   [ 0      VSID 23 | 24 PIX 39 | 40 Byte 51 ]
32 	                            [ 0             VPN       39 | 40 Byte 51 ]
33 								                 |                  |
34 										   (page table)             |
35 											     |                  |
36 	physical address (32 bit):       [ 0        PPN       19 | 20 Byte 31 ]
37 
38 
39 	ESID: Effective Segment ID
40 	VSID: Virtual Segment ID
41 	PIX:  Page Index
42 	VPN:  Virtual Page Number
43 	PPN:  Physical Page Number
44 
45 
46 	Unlike on x86 we can't just switch the context to another team by just
47 	setting a register to another page directory, since we only have one
48 	page table containing both kernel and user address mappings. Instead we
49 	map the effective address space of kernel and *all* teams
50 	non-intersectingly into the virtual address space (which fortunately is
51 	20 bits wider), and use the segment registers to select the section of
52 	the virtual address space for the current team. Half of the 16 segment
53 	registers (8 - 15) map the kernel addresses, so they remain unchanged.
54 
55 	The range of the virtual address space a team's effective address space
56 	is mapped to is defined by its vm_translation_map_arch_info::vsid_base,
57 	which is the first of the 8 successive VSID values used for the team.
58 
59 	Which vsid_base values are already taken is defined by the set bits in
60 	the bitmap sVSIDBaseBitmap.
61 
62 
63 	TODO:
64 	* If we want to continue to use the OF services, we would need to add
65 	  its address mappings to the kernel space. Unfortunately some stuff
66 	  (especially RAM) is mapped in an address range without the kernel
67 	  address space. We probably need to map those into each team's address
68 	  space as kernel read/write areas.
69 	* The current locking scheme is insufficient. The page table is a resource
70 	  shared by all teams. We need to synchronize access to it. Probably via a
71 	  spinlock.
72  */
73 
74 #include <KernelExport.h>
75 #include <kernel.h>
76 #include <vm.h>
77 #include <vm_address_space.h>
78 #include <vm_priv.h>
79 #include <int.h>
80 #include <boot/kernel_args.h>
81 #include <arch/vm_translation_map.h>
82 #include <arch/cpu.h>
83 #include <arch_mmu.h>
84 #include <stdlib.h>
85 
86 #include "generic_vm_physical_page_mapper.h"
87 #include "generic_vm_physical_page_ops.h"
88 
89 
90 static struct page_table_entry_group *sPageTable;
91 static size_t sPageTableSize;
92 static uint32 sPageTableHashMask;
93 static area_id sPageTableArea;
94 
95 
96 // 64 MB of iospace
97 #define IOSPACE_SIZE (64*1024*1024)
98 // We only have small (4 KB) pages. The only reason for choosing greater chunk
99 // size is to keep the waste of memory limited, since the generic page mapper
100 // allocates structures per physical/virtual chunk.
101 // TODO: Implement a page mapper more suitable for small pages!
102 #define IOSPACE_CHUNK_SIZE (16 * B_PAGE_SIZE)
103 
104 static addr_t sIOSpaceBase;
105 
106 
107 // The VSID is a 24 bit number. The lower three bits are defined by the
108 // (effective) segment number, which leaves us with a 21 bit space of
109 // VSID bases (= 2 * 1024 * 1024).
110 #define MAX_VSID_BASES (PAGE_SIZE * 8)
111 static uint32 sVSIDBaseBitmap[MAX_VSID_BASES / (sizeof(uint32) * 8)];
112 static spinlock sVSIDBaseBitmapLock;
113 
114 #define VSID_BASE_SHIFT 3
115 #define VADDR_TO_VSID(map, vaddr) \
116 	((map)->arch_data->vsid_base + ((vaddr) >> 28))
117 
118 // vm_translation object stuff
119 typedef struct vm_translation_map_arch_info {
120 	int vsid_base;	// used VSIDs are vside_base ... vsid_base + 7
121 } vm_translation_map_arch_info;
122 
123 
124 void
125 ppc_translation_map_change_asid(vm_translation_map *map)
126 {
127 // this code depends on the kernel being at 0x80000000, fix if we change that
128 #if KERNEL_BASE != 0x80000000
129 #error fix me
130 #endif
131 	int vsidBase = map->arch_data->vsid_base;
132 
133 	isync();	// synchronize context
134 	asm("mtsr	0,%0" : : "g"(vsidBase));
135 	asm("mtsr	1,%0" : : "g"(vsidBase + 1));
136 	asm("mtsr	2,%0" : : "g"(vsidBase + 2));
137 	asm("mtsr	3,%0" : : "g"(vsidBase + 3));
138 	asm("mtsr	4,%0" : : "g"(vsidBase + 4));
139 	asm("mtsr	5,%0" : : "g"(vsidBase + 5));
140 	asm("mtsr	6,%0" : : "g"(vsidBase + 6));
141 	asm("mtsr	7,%0" : : "g"(vsidBase + 7));
142 	isync();	// synchronize context
143 }
144 
145 
146 static status_t
147 lock_tmap(vm_translation_map *map)
148 {
149 	recursive_lock_lock(&map->lock);
150 	return 0;
151 }
152 
153 
154 static status_t
155 unlock_tmap(vm_translation_map *map)
156 {
157 	recursive_lock_unlock(&map->lock);
158 	return 0;
159 }
160 
161 
162 static void
163 destroy_tmap(vm_translation_map *map)
164 {
165 	if (map->map_count > 0) {
166 		panic("vm_translation_map.destroy_tmap: map %p has positive map count %ld\n",
167 			map, map->map_count);
168 	}
169 
170 	// mark the vsid base not in use
171 	int baseBit = map->arch_data->vsid_base >> VSID_BASE_SHIFT;
172 	atomic_and((vint32 *)&sVSIDBaseBitmap[baseBit / 32],
173 			~(1 << (baseBit % 32)));
174 
175 	free(map->arch_data);
176 	recursive_lock_destroy(&map->lock);
177 }
178 
179 
180 static void
181 fill_page_table_entry(page_table_entry *entry, uint32 virtualSegmentID,
182 	addr_t virtualAddress, addr_t physicalAddress, uint8 protection,
183 	bool secondaryHash)
184 {
185 	// lower 32 bit - set at once
186 	entry->physical_page_number = physicalAddress / B_PAGE_SIZE;
187 	entry->_reserved0 = 0;
188 	entry->referenced = false;
189 	entry->changed = false;
190 	entry->write_through = false;
191 	entry->caching_inhibited = false;
192 	entry->memory_coherent = false;
193 	entry->guarded = false;
194 	entry->_reserved1 = 0;
195 	entry->page_protection = protection & 0x3;
196 	eieio();
197 		// we need to make sure that the lower 32 bit were
198 		// already written when the entry becomes valid
199 
200 	// upper 32 bit
201 	entry->virtual_segment_id = virtualSegmentID;
202 	entry->secondary_hash = secondaryHash;
203 	entry->abbr_page_index = (virtualAddress >> 22) & 0x3f;
204 	entry->valid = true;
205 
206 	ppc_sync();
207 }
208 
209 
210 static size_t
211 map_max_pages_need(vm_translation_map *map, addr_t start, addr_t end)
212 {
213 	return 0;
214 }
215 
216 
217 static status_t
218 map_tmap(vm_translation_map *map, addr_t virtualAddress, addr_t physicalAddress, uint32 attributes)
219 {
220 	// lookup the vsid based off the va
221 	uint32 virtualSegmentID = VADDR_TO_VSID(map, virtualAddress);
222 	uint32 protection = 0;
223 
224 	// ToDo: check this
225 	// all kernel mappings are R/W to supervisor code
226 	if (attributes & (B_READ_AREA | B_WRITE_AREA))
227 		protection = (attributes & B_WRITE_AREA) ? PTE_READ_WRITE : PTE_READ_ONLY;
228 
229 	//dprintf("vm_translation_map.map_tmap: vsid %d, pa 0x%lx, va 0x%lx\n", vsid, pa, va);
230 
231 	// Search for a free page table slot using the primary hash value
232 
233 	uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID, virtualAddress);
234 	page_table_entry_group *group = &sPageTable[hash & sPageTableHashMask];
235 
236 	for (int i = 0; i < 8; i++) {
237 		page_table_entry *entry = &group->entry[i];
238 
239 		if (entry->valid)
240 			continue;
241 
242 		fill_page_table_entry(entry, virtualSegmentID, virtualAddress, physicalAddress,
243 			protection, false);
244 		map->map_count++;
245 		return B_OK;
246 	}
247 
248 	// Didn't found one, try the secondary hash value
249 
250 	hash = page_table_entry::SecondaryHash(hash);
251 	group = &sPageTable[hash & sPageTableHashMask];
252 
253 	for (int i = 0; i < 8; i++) {
254 		page_table_entry *entry = &group->entry[i];
255 
256 		if (entry->valid)
257 			continue;
258 
259 		fill_page_table_entry(entry, virtualSegmentID, virtualAddress, physicalAddress,
260 			protection, false);
261 		map->map_count++;
262 		return B_OK;
263 	}
264 
265 	panic("vm_translation_map.map_tmap: hash table full\n");
266 	return B_ERROR;
267 }
268 
269 
270 static page_table_entry *
271 lookup_page_table_entry(vm_translation_map *map, addr_t virtualAddress)
272 {
273 	// lookup the vsid based off the va
274 	uint32 virtualSegmentID = VADDR_TO_VSID(map, virtualAddress);
275 
276 //	dprintf("vm_translation_map.lookup_page_table_entry: vsid %d, va 0x%lx\n", vsid, va);
277 
278 
279 	// Search for the page table entry using the primary hash value
280 
281 	uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID, virtualAddress);
282 	page_table_entry_group *group = &sPageTable[hash & sPageTableHashMask];
283 
284 	for (int i = 0; i < 8; i++) {
285 		page_table_entry *entry = &group->entry[i];
286 
287 		if (entry->virtual_segment_id == virtualSegmentID
288 			&& entry->secondary_hash == false
289 			&& entry->abbr_page_index == ((virtualAddress >> 22) & 0x3f))
290 			return entry;
291 	}
292 
293 	// Didn't found it, try the secondary hash value
294 
295 	hash = page_table_entry::SecondaryHash(hash);
296 	group = &sPageTable[hash & sPageTableHashMask];
297 
298 	for (int i = 0; i < 8; i++) {
299 		page_table_entry *entry = &group->entry[i];
300 
301 		if (entry->virtual_segment_id == virtualSegmentID
302 			&& entry->secondary_hash == true
303 			&& entry->abbr_page_index == ((virtualAddress >> 22) & 0x3f))
304 			return entry;
305 	}
306 
307 	return NULL;
308 }
309 
310 
311 static bool
312 remove_page_table_entry(vm_translation_map *map, addr_t virtualAddress)
313 {
314 	page_table_entry *entry = lookup_page_table_entry(map, virtualAddress);
315 	if (entry) {
316 		entry->valid = 0;
317 		ppc_sync();
318 		tlbie(virtualAddress);
319 		eieio();
320 		tlbsync();
321 		ppc_sync();
322 	}
323 
324 	return entry;
325 }
326 
327 
328 static status_t
329 unmap_tmap(vm_translation_map *map, addr_t start, addr_t end)
330 {
331 	page_table_entry *entry;
332 
333 	start = ROUNDOWN(start, B_PAGE_SIZE);
334 	end = ROUNDUP(end, B_PAGE_SIZE);
335 
336 //	dprintf("vm_translation_map.unmap_tmap: start 0x%lx, end 0x%lx\n", start, end);
337 
338 	while (start < end) {
339 		if (remove_page_table_entry(map, start))
340 			map->map_count--;
341 
342 		start += B_PAGE_SIZE;
343 	}
344 
345 	return B_OK;
346 }
347 
348 
349 static status_t
350 query_tmap(vm_translation_map *map, addr_t va, addr_t *_outPhysical, uint32 *_outFlags)
351 {
352 	page_table_entry *entry;
353 
354 	// default the flags to not present
355 	*_outFlags = 0;
356 	*_outPhysical = 0;
357 
358 	entry = lookup_page_table_entry(map, va);
359 	if (entry == NULL)
360 		return B_NO_ERROR;
361 
362 	// ToDo: check this!
363 	if (IS_KERNEL_ADDRESS(va))
364 		*_outFlags |= B_KERNEL_READ_AREA | (entry->page_protection == PTE_READ_ONLY ? 0 : B_KERNEL_WRITE_AREA);
365 	else
366 		*_outFlags |= B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_READ_AREA | (entry->page_protection == PTE_READ_ONLY ? 0 : B_WRITE_AREA);
367 
368 	*_outFlags |= entry->changed ? PAGE_MODIFIED : 0;
369 	*_outFlags |= entry->referenced ? PAGE_ACCESSED : 0;
370 	*_outFlags |= entry->valid ? PAGE_PRESENT : 0;
371 
372 	*_outPhysical = entry->physical_page_number * B_PAGE_SIZE;
373 
374 	return B_OK;
375 }
376 
377 
378 static status_t
379 map_iospace_chunk(addr_t va, addr_t pa, uint32 flags)
380 {
381 	pa &= ~(B_PAGE_SIZE - 1); // make sure it's page aligned
382 	va &= ~(B_PAGE_SIZE - 1); // make sure it's page aligned
383 	if (va < sIOSpaceBase || va >= (sIOSpaceBase + IOSPACE_SIZE))
384 		panic("map_iospace_chunk: passed invalid va 0x%lx\n", va);
385 
386 	// map the pages
387 	return ppc_map_address_range(va, pa, IOSPACE_CHUNK_SIZE);
388 }
389 
390 
391 static addr_t
392 get_mapped_size_tmap(vm_translation_map *map)
393 {
394 	return map->map_count;
395 }
396 
397 
398 static status_t
399 protect_tmap(vm_translation_map *map, addr_t base, addr_t top, uint32 attributes)
400 {
401 	// XXX finish
402 	return B_ERROR;
403 }
404 
405 
406 static status_t
407 clear_flags_tmap(vm_translation_map *map, addr_t virtualAddress, uint32 flags)
408 {
409 	page_table_entry *entry = lookup_page_table_entry(map, virtualAddress);
410 	if (entry == NULL)
411 		return B_NO_ERROR;
412 
413 	bool modified = false;
414 
415 	// clear the bits
416 	if (flags & PAGE_MODIFIED && entry->changed) {
417 		entry->changed = false;
418 		modified = true;
419 	}
420 	if (flags & PAGE_ACCESSED && entry->referenced) {
421 		entry->referenced = false;
422 		modified = true;
423 	}
424 
425 	// synchronize
426 	if (modified) {
427 		tlbie(virtualAddress);
428 		eieio();
429 		tlbsync();
430 		ppc_sync();
431 	}
432 
433 	return B_OK;
434 }
435 
436 
437 static void
438 flush_tmap(vm_translation_map *map)
439 {
440 // TODO: arch_cpu_global_TLB_invalidate() is extremely expensive and doesn't
441 // even cut it here. We are supposed to invalidate all TLB entries for this
442 // map on all CPUs. We should loop over the virtual pages and invoke tlbie
443 // instead (which marks the entry invalid on all CPUs).
444 	arch_cpu_global_TLB_invalidate();
445 }
446 
447 
448 static status_t
449 get_physical_page_tmap(addr_t physicalAddress, addr_t *_virtualAddress,
450 	void **handle)
451 {
452 	return generic_get_physical_page(physicalAddress, _virtualAddress, 0);
453 }
454 
455 
456 static status_t
457 put_physical_page_tmap(addr_t virtualAddress, void *handle)
458 {
459 	return generic_put_physical_page(virtualAddress);
460 }
461 
462 
463 static vm_translation_map_ops tmap_ops = {
464 	destroy_tmap,
465 	lock_tmap,
466 	unlock_tmap,
467 	map_max_pages_need,
468 	map_tmap,
469 	unmap_tmap,
470 	query_tmap,
471 	query_tmap,
472 	get_mapped_size_tmap,
473 	protect_tmap,
474 	clear_flags_tmap,
475 	flush_tmap,
476 	get_physical_page_tmap,
477 	put_physical_page_tmap,
478 	get_physical_page_tmap,	// *_current_cpu()
479 	put_physical_page_tmap,	// *_current_cpu()
480 	get_physical_page_tmap,	// *_debug()
481 	put_physical_page_tmap,	// *_debug()
482 		// TODO: Replace the *_current_cpu() and *_debug() versions!
483 
484 	generic_vm_memset_physical,
485 	generic_vm_memcpy_from_physical,
486 	generic_vm_memcpy_to_physical,
487 	generic_vm_memcpy_physical_page
488 		// TODO: Verify that this is safe to use!
489 };
490 
491 
492 //  #pragma mark -
493 //  VM API
494 
495 
496 status_t
497 arch_vm_translation_map_init_map(vm_translation_map *map, bool kernel)
498 {
499 	// initialize the new object
500 	map->ops = &tmap_ops;
501 	map->map_count = 0;
502 
503 	recursive_lock_init(&map->lock, "translation map");
504 
505 	map->arch_data = (vm_translation_map_arch_info *)malloc(sizeof(vm_translation_map_arch_info));
506 	if (map->arch_data == NULL) {
507 		if (!kernel)
508 			recursive_lock_destroy(&map->lock);
509 		return B_NO_MEMORY;
510 	}
511 
512 	cpu_status state = disable_interrupts();
513 	acquire_spinlock(&sVSIDBaseBitmapLock);
514 
515 	// allocate a VSID base for this one
516 	if (kernel) {
517 		// The boot loader has set up the segment registers for identical
518 		// mapping. Two VSID bases are reserved for the kernel: 0 and 8. The
519 		// latter one for mapping the kernel address space (0x80000000...), the
520 		// former one for the lower addresses required by the Open Firmware
521 		// services.
522 		map->arch_data->vsid_base = 0;
523 		sVSIDBaseBitmap[0] |= 0x3;
524 	} else {
525 		int i = 0;
526 
527 		while (i < MAX_VSID_BASES) {
528 			if (sVSIDBaseBitmap[i / 32] == 0xffffffff) {
529 				i += 32;
530 				continue;
531 			}
532 			if ((sVSIDBaseBitmap[i / 32] & (1 << (i % 32))) == 0) {
533 				// we found it
534 				sVSIDBaseBitmap[i / 32] |= 1 << (i % 32);
535 				break;
536 			}
537 			i++;
538 		}
539 		if (i >= MAX_VSID_BASES)
540 			panic("vm_translation_map_create: out of VSID bases\n");
541 		map->arch_data->vsid_base = i << VSID_BASE_SHIFT;
542 	}
543 
544 	release_spinlock(&sVSIDBaseBitmapLock);
545 	restore_interrupts(state);
546 
547 	return B_OK;
548 }
549 
550 
551 status_t
552 arch_vm_translation_map_init_kernel_map_post_sem(vm_translation_map *map)
553 {
554 	return B_OK;
555 }
556 
557 
558 status_t
559 arch_vm_translation_map_init(kernel_args *args)
560 {
561 	sPageTable = (page_table_entry_group *)args->arch_args.page_table.start;
562 	sPageTableSize = args->arch_args.page_table.size;
563 	sPageTableHashMask = sPageTableSize / sizeof(page_table_entry_group) - 1;
564 
565 	// init physical page mapper
566 	status_t error = generic_vm_physical_page_mapper_init(args,
567 		map_iospace_chunk, &sIOSpaceBase, IOSPACE_SIZE, IOSPACE_CHUNK_SIZE);
568 	if (error != B_OK)
569 		return error;
570 
571 	return B_OK;
572 }
573 
574 
575 status_t
576 arch_vm_translation_map_init_post_area(kernel_args *args)
577 {
578 	// If the page table doesn't lie within the kernel address space, we
579 	// remap it.
580 	if (!IS_KERNEL_ADDRESS(sPageTable)) {
581 		addr_t newAddress = (addr_t)sPageTable;
582 		status_t error = ppc_remap_address_range(&newAddress, sPageTableSize,
583 			false);
584 		if (error != B_OK) {
585 			panic("arch_vm_translation_map_init_post_area(): Failed to remap "
586 				"the page table!");
587 			return error;
588 		}
589 
590 		// set the new page table address
591 		addr_t oldVirtualBase = (addr_t)(sPageTable);
592 		sPageTable = (page_table_entry_group*)newAddress;
593 
594 		// unmap the old pages
595 		ppc_unmap_address_range(oldVirtualBase, sPageTableSize);
596 
597 // TODO: We should probably map the page table via BAT. It is relatively large,
598 // and due to being a hash table the access patterns might look sporadic, which
599 // certainly isn't to the liking of the TLB.
600 	}
601 
602 	// create an area to cover the page table
603 	sPageTableArea = create_area("page_table", (void **)&sPageTable, B_EXACT_ADDRESS,
604 		sPageTableSize, B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
605 
606 	// init physical page mapper
607 	status_t error = generic_vm_physical_page_mapper_init_post_area(args);
608 	if (error != B_OK)
609 		return error;
610 
611 	return B_OK;
612 }
613 
614 
615 status_t
616 arch_vm_translation_map_init_post_sem(kernel_args *args)
617 {
618 	// init physical page mapper
619 	return generic_vm_physical_page_mapper_init_post_sem(args);
620 }
621 
622 
623 /**	Directly maps a page without having knowledge of any kernel structures.
624  *	Used only during VM setup.
625  *	It currently ignores the "attributes" parameter and sets all pages
626  *	read/write.
627  */
628 
629 status_t
630 arch_vm_translation_map_early_map(kernel_args *ka, addr_t virtualAddress, addr_t physicalAddress,
631 	uint8 attributes, addr_t (*get_free_page)(kernel_args *))
632 {
633 	uint32 virtualSegmentID = get_sr((void *)virtualAddress) & 0xffffff;
634 
635 	uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID, (uint32)virtualAddress);
636 	page_table_entry_group *group = &sPageTable[hash & sPageTableHashMask];
637 
638 	for (int32 i = 0; i < 8; i++) {
639 		// 8 entries in a group
640 		if (group->entry[i].valid)
641 			continue;
642 
643 		fill_page_table_entry(&group->entry[i], virtualSegmentID, virtualAddress, physicalAddress, PTE_READ_WRITE, false);
644 		return B_OK;
645 	}
646 
647 	hash = page_table_entry::SecondaryHash(hash);
648 	group = &sPageTable[hash & sPageTableHashMask];
649 
650 	for (int32 i = 0; i < 8; i++) {
651 		if (group->entry[i].valid)
652 			continue;
653 
654 		fill_page_table_entry(&group->entry[i], virtualSegmentID, virtualAddress, physicalAddress, PTE_READ_WRITE, true);
655 		return B_OK;
656 	}
657 
658 	return B_ERROR;
659 }
660 
661 
662 // XXX currently assumes this translation map is active
663 
664 status_t
665 arch_vm_translation_map_early_query(addr_t va, addr_t *out_physical)
666 {
667 	//PANIC_UNIMPLEMENTED();
668 	panic("vm_translation_map_quick_query(): not yet implemented\n");
669 	return B_OK;
670 }
671 
672 
673 // #pragma mark -
674 
675 
676 status_t
677 ppc_map_address_range(addr_t virtualAddress, addr_t physicalAddress,
678 	size_t size)
679 {
680 	addr_t virtualEnd = ROUNDUP(virtualAddress + size, B_PAGE_SIZE);
681 	virtualAddress = ROUNDOWN(virtualAddress, B_PAGE_SIZE);
682 	physicalAddress = ROUNDOWN(physicalAddress, B_PAGE_SIZE);
683 
684 	vm_address_space *addressSpace = vm_kernel_address_space();
685 
686 	// map the pages
687 	for (; virtualAddress < virtualEnd;
688 		 virtualAddress += B_PAGE_SIZE, physicalAddress += B_PAGE_SIZE) {
689 		status_t error = map_tmap(&addressSpace->translation_map,
690 			virtualAddress, physicalAddress,
691 			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
692 		if (error != B_OK)
693 			return error;
694 	}
695 
696 	return B_OK;
697 }
698 
699 
700 void
701 ppc_unmap_address_range(addr_t virtualAddress, size_t size)
702 {
703 	addr_t virtualEnd = ROUNDUP(virtualAddress + size, B_PAGE_SIZE);
704 	virtualAddress = ROUNDOWN(virtualAddress, B_PAGE_SIZE);
705 
706 	vm_address_space *addressSpace = vm_kernel_address_space();
707 
708 	for (0; virtualAddress < virtualEnd; virtualAddress += B_PAGE_SIZE)
709 		remove_page_table_entry(&addressSpace->translation_map, virtualAddress);
710 }
711 
712 
713 status_t
714 ppc_remap_address_range(addr_t *_virtualAddress, size_t size, bool unmap)
715 {
716 	addr_t virtualAddress = ROUNDOWN(*_virtualAddress, B_PAGE_SIZE);
717 	size = ROUNDUP(*_virtualAddress + size - virtualAddress, B_PAGE_SIZE);
718 
719 	vm_address_space *addressSpace = vm_kernel_address_space();
720 
721 	// reserve space in the address space
722 	void *newAddress = NULL;
723 	status_t error = vm_reserve_address_range(addressSpace->id, &newAddress,
724 		B_ANY_KERNEL_ADDRESS, size, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
725 	if (error != B_OK)
726 		return error;
727 
728 	// get the area's first physical page
729 	page_table_entry *entry = lookup_page_table_entry(
730 		&addressSpace->translation_map, virtualAddress);
731 	if (!entry)
732 		return B_ERROR;
733 	addr_t physicalBase = entry->physical_page_number << 12;
734 
735 	// map the pages
736 	error = ppc_map_address_range((addr_t)newAddress, physicalBase, size);
737 	if (error != B_OK)
738 		return error;
739 
740 	*_virtualAddress = (addr_t)newAddress;
741 
742 	// unmap the old pages
743 	if (unmap)
744 		ppc_unmap_address_range(virtualAddress, size);
745 
746 	return B_OK;
747 }
748 
749