xref: /haiku/src/system/kernel/arch/ppc/arch_vm_translation_map.cpp (revision 1acbe440b8dd798953bec31d18ee589aa3f71b73)
1 /*
2  * Copyright 2003-2006, Axel Dörfler, axeld@pinc-software.de.
3  * Distributed under the terms of the MIT License.
4  *
5  * Copyright 2001, Travis Geiselbrecht. All rights reserved.
6  * Distributed under the terms of the NewOS License.
7  */
8 
9 /*	(bonefish) Some explanatory words on how address translation is implemented
10 	for the 32 bit PPC architecture.
11 
12 	I use the address type nomenclature as used in the PPC architecture
13 	specs, i.e.
14 	- effective address: An address as used by program instructions, i.e.
15 	  that's what elsewhere (e.g. in the VM implementation) is called
16 	  virtual address.
17 	- virtual address: An intermediate address computed from the effective
18 	  address via the segment registers.
19 	- physical address: An address referring to physical storage.
20 
21 	The hardware translates an effective address to a physical address using
22 	either of two mechanisms: 1) Block Address Translation (BAT) or
23 	2) segment + page translation. The first mechanism does this directly
24 	using two sets (for data/instructions) of special purpose registers.
25 	The latter mechanism is of more relevance here, though:
26 
27 	effective address (32 bit):	     [ 0 ESID  3 | 4  PIX 19 | 20 Byte 31 ]
28 								           |           |            |
29 							     (segment registers)   |            |
30 									       |           |            |
31 	virtual address (52 bit):   [ 0      VSID 23 | 24 PIX 39 | 40 Byte 51 ]
32 	                            [ 0             VPN       39 | 40 Byte 51 ]
33 								                 |                  |
34 										   (page table)             |
35 											     |                  |
36 	physical address (32 bit):       [ 0        PPN       19 | 20 Byte 31 ]
37 
38 
39 	ESID: Effective Segment ID
40 	VSID: Virtual Segment ID
41 	PIX:  Page Index
42 	VPN:  Virtual Page Number
43 	PPN:  Physical Page Number
44 
45 
46 	Unlike on x86 we can't just switch the context to another team by just
47 	setting a register to another page directory, since we only have one
48 	page table containing both kernel and user address mappings. Instead we
49 	map the effective address space of kernel and *all* teams
50 	non-intersectingly into the virtual address space (which fortunately is
51 	20 bits wider), and use the segment registers to select the section of
52 	the virtual address space for the current team. Half of the 16 segment
53 	registers (8 - 15) map the kernel addresses, so they remain unchanged.
54 
55 	The range of the virtual address space a team's effective address space
56 	is mapped to is defined by its vm_translation_map_arch_info::vsid_base,
57 	which is the first of the 8 successive VSID values used for the team.
58 
59 	Which vsid_base values are already taken is defined by the set bits in
60 	the bitmap sVSIDBaseBitmap.
61 
62 
63 	TODO:
64 	* If we want to continue to use the OF services, we would need to add
65 	  its address mappings to the kernel space. Unfortunately some stuff
66 	  (especially RAM) is mapped in an address range without the kernel
67 	  address space. We probably need to map those into each team's address
68 	  space as kernel read/write areas.
69 	* The current locking scheme is insufficient. The page table is a resource
70 	  shared by all teams. We need to synchronize access to it. Probably via a
71 	  spinlock.
72  */
73 
74 #include <KernelExport.h>
75 #include <kernel.h>
76 #include <vm.h>
77 #include <vm_address_space.h>
78 #include <vm_priv.h>
79 #include <int.h>
80 #include <boot/kernel_args.h>
81 #include <arch/vm_translation_map.h>
82 #include <arch/cpu.h>
83 #include <arch_mmu.h>
84 #include <stdlib.h>
85 
86 #include "generic_vm_physical_page_mapper.h"
87 
88 static struct page_table_entry_group *sPageTable;
89 static size_t sPageTableSize;
90 static uint32 sPageTableHashMask;
91 static area_id sPageTableArea;
92 
93 
94 // 64 MB of iospace
95 #define IOSPACE_SIZE (64*1024*1024)
96 // We only have small (4 KB) pages. The only reason for choosing greater chunk
97 // size is to keep the waste of memory limited, since the generic page mapper
98 // allocates structures per physical/virtual chunk.
99 // TODO: Implement a page mapper more suitable for small pages!
100 #define IOSPACE_CHUNK_SIZE (16 * B_PAGE_SIZE)
101 
102 static addr_t sIOSpaceBase;
103 
104 
105 // The VSID is a 24 bit number. The lower three bits are defined by the
106 // (effective) segment number, which leaves us with a 21 bit space of
107 // VSID bases (= 2 * 1024 * 1024).
108 #define MAX_VSID_BASES (PAGE_SIZE * 8)
109 static uint32 sVSIDBaseBitmap[MAX_VSID_BASES / (sizeof(uint32) * 8)];
110 static spinlock sVSIDBaseBitmapLock;
111 
112 #define VSID_BASE_SHIFT 3
113 #define VADDR_TO_VSID(map, vaddr) \
114 	((map)->arch_data->vsid_base + ((vaddr) >> 28))
115 
116 // vm_translation object stuff
117 typedef struct vm_translation_map_arch_info {
118 	int vsid_base;	// used VSIDs are vside_base ... vsid_base + 7
119 } vm_translation_map_arch_info;
120 
121 
122 void
123 ppc_translation_map_change_asid(vm_translation_map *map)
124 {
125 // this code depends on the kernel being at 0x80000000, fix if we change that
126 #if KERNEL_BASE != 0x80000000
127 #error fix me
128 #endif
129 	int vsidBase = map->arch_data->vsid_base;
130 
131 	isync();	// synchronize context
132 	asm("mtsr	0,%0" : : "g"(vsidBase));
133 	asm("mtsr	1,%0" : : "g"(vsidBase + 1));
134 	asm("mtsr	2,%0" : : "g"(vsidBase + 2));
135 	asm("mtsr	3,%0" : : "g"(vsidBase + 3));
136 	asm("mtsr	4,%0" : : "g"(vsidBase + 4));
137 	asm("mtsr	5,%0" : : "g"(vsidBase + 5));
138 	asm("mtsr	6,%0" : : "g"(vsidBase + 6));
139 	asm("mtsr	7,%0" : : "g"(vsidBase + 7));
140 	isync();	// synchronize context
141 }
142 
143 
144 static status_t
145 lock_tmap(vm_translation_map *map)
146 {
147 	recursive_lock_lock(&map->lock);
148 	return 0;
149 }
150 
151 
152 static status_t
153 unlock_tmap(vm_translation_map *map)
154 {
155 	recursive_lock_unlock(&map->lock);
156 	return 0;
157 }
158 
159 
160 static void
161 destroy_tmap(vm_translation_map *map)
162 {
163 	if (map->map_count > 0) {
164 		panic("vm_translation_map.destroy_tmap: map %p has positive map count %ld\n",
165 			map, map->map_count);
166 	}
167 
168 	// mark the vsid base not in use
169 	int baseBit = map->arch_data->vsid_base >> VSID_BASE_SHIFT;
170 	atomic_and((vint32 *)&sVSIDBaseBitmap[baseBit / 32],
171 			~(1 << (baseBit % 32)));
172 
173 	free(map->arch_data);
174 	recursive_lock_destroy(&map->lock);
175 }
176 
177 
178 static void
179 fill_page_table_entry(page_table_entry *entry, uint32 virtualSegmentID,
180 	addr_t virtualAddress, addr_t physicalAddress, uint8 protection,
181 	bool secondaryHash)
182 {
183 	// lower 32 bit - set at once
184 	entry->physical_page_number = physicalAddress / B_PAGE_SIZE;
185 	entry->_reserved0 = 0;
186 	entry->referenced = false;
187 	entry->changed = false;
188 	entry->write_through = false;
189 	entry->caching_inhibited = false;
190 	entry->memory_coherent = false;
191 	entry->guarded = false;
192 	entry->_reserved1 = 0;
193 	entry->page_protection = protection & 0x3;
194 	eieio();
195 		// we need to make sure that the lower 32 bit were
196 		// already written when the entry becomes valid
197 
198 	// upper 32 bit
199 	entry->virtual_segment_id = virtualSegmentID;
200 	entry->secondary_hash = secondaryHash;
201 	entry->abbr_page_index = (virtualAddress >> 22) & 0x3f;
202 	entry->valid = true;
203 
204 	ppc_sync();
205 }
206 
207 
208 static status_t
209 map_tmap(vm_translation_map *map, addr_t virtualAddress, addr_t physicalAddress, uint32 attributes)
210 {
211 	// lookup the vsid based off the va
212 	uint32 virtualSegmentID = VADDR_TO_VSID(map, virtualAddress);
213 	uint32 protection = 0;
214 
215 	// ToDo: check this
216 	// all kernel mappings are R/W to supervisor code
217 	if (attributes & (B_READ_AREA | B_WRITE_AREA))
218 		protection = (attributes & B_WRITE_AREA) ? PTE_READ_WRITE : PTE_READ_ONLY;
219 
220 	//dprintf("vm_translation_map.map_tmap: vsid %d, pa 0x%lx, va 0x%lx\n", vsid, pa, va);
221 
222 	// Search for a free page table slot using the primary hash value
223 
224 	uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID, virtualAddress);
225 	page_table_entry_group *group = &sPageTable[hash & sPageTableHashMask];
226 
227 	for (int i = 0; i < 8; i++) {
228 		page_table_entry *entry = &group->entry[i];
229 
230 		if (entry->valid)
231 			continue;
232 
233 		fill_page_table_entry(entry, virtualSegmentID, virtualAddress, physicalAddress,
234 			protection, false);
235 		map->map_count++;
236 		return B_OK;
237 	}
238 
239 	// Didn't found one, try the secondary hash value
240 
241 	hash = page_table_entry::SecondaryHash(hash);
242 	group = &sPageTable[hash & sPageTableHashMask];
243 
244 	for (int i = 0; i < 8; i++) {
245 		page_table_entry *entry = &group->entry[i];
246 
247 		if (entry->valid)
248 			continue;
249 
250 		fill_page_table_entry(entry, virtualSegmentID, virtualAddress, physicalAddress,
251 			protection, false);
252 		map->map_count++;
253 		return B_OK;
254 	}
255 
256 	panic("vm_translation_map.map_tmap: hash table full\n");
257 	return B_ERROR;
258 }
259 
260 
261 static page_table_entry *
262 lookup_page_table_entry(vm_translation_map *map, addr_t virtualAddress)
263 {
264 	// lookup the vsid based off the va
265 	uint32 virtualSegmentID = VADDR_TO_VSID(map, virtualAddress);
266 
267 //	dprintf("vm_translation_map.lookup_page_table_entry: vsid %d, va 0x%lx\n", vsid, va);
268 
269 
270 	// Search for the page table entry using the primary hash value
271 
272 	uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID, virtualAddress);
273 	page_table_entry_group *group = &sPageTable[hash & sPageTableHashMask];
274 
275 	for (int i = 0; i < 8; i++) {
276 		page_table_entry *entry = &group->entry[i];
277 
278 		if (entry->virtual_segment_id == virtualSegmentID
279 			&& entry->secondary_hash == false
280 			&& entry->abbr_page_index == ((virtualAddress >> 22) & 0x3f))
281 			return entry;
282 	}
283 
284 	// Didn't found it, try the secondary hash value
285 
286 	hash = page_table_entry::SecondaryHash(hash);
287 	group = &sPageTable[hash & sPageTableHashMask];
288 
289 	for (int i = 0; i < 8; i++) {
290 		page_table_entry *entry = &group->entry[i];
291 
292 		if (entry->virtual_segment_id == virtualSegmentID
293 			&& entry->secondary_hash == true
294 			&& entry->abbr_page_index == ((virtualAddress >> 22) & 0x3f))
295 			return entry;
296 	}
297 
298 	return NULL;
299 }
300 
301 
302 static bool
303 remove_page_table_entry(vm_translation_map *map, addr_t virtualAddress)
304 {
305 	page_table_entry *entry = lookup_page_table_entry(map, virtualAddress);
306 	if (entry) {
307 		entry->valid = 0;
308 		ppc_sync();
309 		tlbie(virtualAddress);
310 		eieio();
311 		tlbsync();
312 		ppc_sync();
313 	}
314 
315 	return entry;
316 }
317 
318 
319 static status_t
320 unmap_tmap(vm_translation_map *map, addr_t start, addr_t end)
321 {
322 	page_table_entry *entry;
323 
324 	start = ROUNDOWN(start, B_PAGE_SIZE);
325 	end = ROUNDUP(end, B_PAGE_SIZE);
326 
327 //	dprintf("vm_translation_map.unmap_tmap: start 0x%lx, end 0x%lx\n", start, end);
328 
329 	while (start < end) {
330 		if (remove_page_table_entry(map, start))
331 			map->map_count--;
332 
333 		start += B_PAGE_SIZE;
334 	}
335 
336 	return B_OK;
337 }
338 
339 
340 static status_t
341 query_tmap(vm_translation_map *map, addr_t va, addr_t *_outPhysical, uint32 *_outFlags)
342 {
343 	page_table_entry *entry;
344 
345 	// default the flags to not present
346 	*_outFlags = 0;
347 	*_outPhysical = 0;
348 
349 	entry = lookup_page_table_entry(map, va);
350 	if (entry == NULL)
351 		return B_NO_ERROR;
352 
353 	// ToDo: check this!
354 	if (IS_KERNEL_ADDRESS(va))
355 		*_outFlags |= B_KERNEL_READ_AREA | (entry->page_protection == PTE_READ_ONLY ? 0 : B_KERNEL_WRITE_AREA);
356 	else
357 		*_outFlags |= B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_READ_AREA | (entry->page_protection == PTE_READ_ONLY ? 0 : B_WRITE_AREA);
358 
359 	*_outFlags |= entry->changed ? PAGE_MODIFIED : 0;
360 	*_outFlags |= entry->referenced ? PAGE_ACCESSED : 0;
361 	*_outFlags |= entry->valid ? PAGE_PRESENT : 0;
362 
363 	*_outPhysical = entry->physical_page_number * B_PAGE_SIZE;
364 
365 	return B_OK;
366 }
367 
368 
369 static status_t
370 map_iospace_chunk(addr_t va, addr_t pa)
371 {
372 	pa &= ~(B_PAGE_SIZE - 1); // make sure it's page aligned
373 	va &= ~(B_PAGE_SIZE - 1); // make sure it's page aligned
374 	if (va < sIOSpaceBase || va >= (sIOSpaceBase + IOSPACE_SIZE))
375 		panic("map_iospace_chunk: passed invalid va 0x%lx\n", va);
376 
377 	// map the pages
378 	return ppc_map_address_range(va, pa, IOSPACE_CHUNK_SIZE);
379 }
380 
381 
382 static addr_t
383 get_mapped_size_tmap(vm_translation_map *map)
384 {
385 	return map->map_count;
386 }
387 
388 
389 static status_t
390 protect_tmap(vm_translation_map *map, addr_t base, addr_t top, uint32 attributes)
391 {
392 	// XXX finish
393 	return B_ERROR;
394 }
395 
396 
397 static status_t
398 clear_flags_tmap(vm_translation_map *map, addr_t virtualAddress, uint32 flags)
399 {
400 	page_table_entry *entry = lookup_page_table_entry(map, virtualAddress);
401 	if (entry == NULL)
402 		return B_NO_ERROR;
403 
404 	bool modified = false;
405 
406 	// clear the bits
407 	if (flags & PAGE_MODIFIED && entry->changed) {
408 		entry->changed = false;
409 		modified = true;
410 	}
411 	if (flags & PAGE_ACCESSED && entry->referenced) {
412 		entry->referenced = false;
413 		modified = true;
414 	}
415 
416 	// synchronize
417 	if (modified) {
418 		tlbie(virtualAddress);
419 		eieio();
420 		tlbsync();
421 		ppc_sync();
422 	}
423 
424 	return B_OK;
425 }
426 
427 
428 static void
429 flush_tmap(vm_translation_map *map)
430 {
431 // TODO: arch_cpu_global_TLB_invalidate() is extremely expensive and doesn't
432 // even cut it here. We are supposed to invalidate all TLB entries for this
433 // map on all CPUs. We should loop over the virtual pages and invoke tlbie
434 // instead (which marks the entry invalid on all CPUs).
435 	arch_cpu_global_TLB_invalidate();
436 }
437 
438 
439 static status_t
440 get_physical_page_tmap(addr_t pa, addr_t *va, uint32 flags)
441 {
442 	return generic_get_physical_page(pa, va, flags);
443 }
444 
445 
446 static status_t
447 put_physical_page_tmap(addr_t va)
448 {
449 	return generic_put_physical_page(va);
450 }
451 
452 
453 static vm_translation_map_ops tmap_ops = {
454 	destroy_tmap,
455 	lock_tmap,
456 	unlock_tmap,
457 	map_tmap,
458 	unmap_tmap,
459 	query_tmap,
460 	query_tmap,
461 	get_mapped_size_tmap,
462 	protect_tmap,
463 	clear_flags_tmap,
464 	flush_tmap,
465 	get_physical_page_tmap,
466 	put_physical_page_tmap
467 };
468 
469 
470 //  #pragma mark -
471 //  VM API
472 
473 
474 status_t
475 arch_vm_translation_map_init_map(vm_translation_map *map, bool kernel)
476 {
477 	// initialize the new object
478 	map->ops = &tmap_ops;
479 	map->map_count = 0;
480 
481 	if (!kernel) {
482 		// During the boot process, there are no semaphores available at this
483 		// point, so we only try to create the translation map lock if we're
484 		// initialize a user translation map.
485 		// vm_translation_map_init_kernel_map_post_sem() is used to complete
486 		// the kernel translation map.
487 		if (recursive_lock_init(&map->lock, "translation map") < B_OK)
488 			return map->lock.sem;
489 	}
490 
491 	map->arch_data = (vm_translation_map_arch_info *)malloc(sizeof(vm_translation_map_arch_info));
492 	if (map->arch_data == NULL) {
493 		if (!kernel)
494 			recursive_lock_destroy(&map->lock);
495 		return B_NO_MEMORY;
496 	}
497 
498 	cpu_status state = disable_interrupts();
499 	acquire_spinlock(&sVSIDBaseBitmapLock);
500 
501 	// allocate a VSID base for this one
502 	if (kernel) {
503 		// The boot loader has set up the segment registers for identical
504 		// mapping. Two VSID bases are reserved for the kernel: 0 and 8. The
505 		// latter one for mapping the kernel address space (0x80000000...), the
506 		// former one for the lower addresses required by the Open Firmware
507 		// services.
508 		map->arch_data->vsid_base = 0;
509 		sVSIDBaseBitmap[0] |= 0x3;
510 	} else {
511 		int i = 0;
512 
513 		while (i < MAX_VSID_BASES) {
514 			if (sVSIDBaseBitmap[i / 32] == 0xffffffff) {
515 				i += 32;
516 				continue;
517 			}
518 			if ((sVSIDBaseBitmap[i / 32] & (1 << (i % 32))) == 0) {
519 				// we found it
520 				sVSIDBaseBitmap[i / 32] |= 1 << (i % 32);
521 				break;
522 			}
523 			i++;
524 		}
525 		if (i >= MAX_VSID_BASES)
526 			panic("vm_translation_map_create: out of VSID bases\n");
527 		map->arch_data->vsid_base = i << VSID_BASE_SHIFT;
528 	}
529 
530 	release_spinlock(&sVSIDBaseBitmapLock);
531 	restore_interrupts(state);
532 
533 	return B_OK;
534 }
535 
536 
537 status_t
538 arch_vm_translation_map_init_kernel_map_post_sem(vm_translation_map *map)
539 {
540 	if (recursive_lock_init(&map->lock, "translation map") < B_OK)
541 		return map->lock.sem;
542 
543 	return B_OK;
544 }
545 
546 
547 status_t
548 arch_vm_translation_map_init(kernel_args *args)
549 {
550 	sPageTable = (page_table_entry_group *)args->arch_args.page_table.start;
551 	sPageTableSize = args->arch_args.page_table.size;
552 	sPageTableHashMask = sPageTableSize / sizeof(page_table_entry_group) - 1;
553 
554 	// init physical page mapper
555 	status_t error = generic_vm_physical_page_mapper_init(args,
556 		map_iospace_chunk, &sIOSpaceBase, IOSPACE_SIZE, IOSPACE_CHUNK_SIZE);
557 	if (error != B_OK)
558 		return error;
559 
560 	return B_OK;
561 }
562 
563 
564 status_t
565 arch_vm_translation_map_init_post_area(kernel_args *args)
566 {
567 	// If the page table doesn't lie within the kernel address space, we
568 	// remap it.
569 	if (!IS_KERNEL_ADDRESS(sPageTable)) {
570 		addr_t newAddress = (addr_t)sPageTable;
571 		status_t error = ppc_remap_address_range(&newAddress, sPageTableSize,
572 			false);
573 		if (error != B_OK) {
574 			panic("arch_vm_translation_map_init_post_area(): Failed to remap "
575 				"the page table!");
576 			return error;
577 		}
578 
579 		// set the new page table address
580 		addr_t oldVirtualBase = (addr_t)(sPageTable);
581 		sPageTable = (page_table_entry_group*)newAddress;
582 
583 		// unmap the old pages
584 		ppc_unmap_address_range(oldVirtualBase, sPageTableSize);
585 
586 // TODO: We should probably map the page table via BAT. It is relatively large,
587 // and due to being a hash table the access patterns might look sporadic, which
588 // certainly isn't to the liking of the TLB.
589 	}
590 
591 	// create an area to cover the page table
592 	sPageTableArea = create_area("page_table", (void **)&sPageTable, B_EXACT_ADDRESS,
593 		sPageTableSize, B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
594 
595 	// init physical page mapper
596 	status_t error = generic_vm_physical_page_mapper_init_post_area(args);
597 	if (error != B_OK)
598 		return error;
599 
600 	return B_OK;
601 }
602 
603 
604 status_t
605 arch_vm_translation_map_init_post_sem(kernel_args *args)
606 {
607 	// init physical page mapper
608 	return generic_vm_physical_page_mapper_init_post_sem(args);
609 }
610 
611 
612 /**	Directly maps a page without having knowledge of any kernel structures.
613  *	Used only during VM setup.
614  *	It currently ignores the "attributes" parameter and sets all pages
615  *	read/write.
616  */
617 
618 status_t
619 arch_vm_translation_map_early_map(kernel_args *ka, addr_t virtualAddress, addr_t physicalAddress,
620 	uint8 attributes, addr_t (*get_free_page)(kernel_args *))
621 {
622 	uint32 virtualSegmentID = get_sr((void *)virtualAddress) & 0xffffff;
623 
624 	uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID, (uint32)virtualAddress);
625 	page_table_entry_group *group = &sPageTable[hash & sPageTableHashMask];
626 
627 	for (int32 i = 0; i < 8; i++) {
628 		// 8 entries in a group
629 		if (group->entry[i].valid)
630 			continue;
631 
632 		fill_page_table_entry(&group->entry[i], virtualSegmentID, virtualAddress, physicalAddress, PTE_READ_WRITE, false);
633 		return B_OK;
634 	}
635 
636 	hash = page_table_entry::SecondaryHash(hash);
637 	group = &sPageTable[hash & sPageTableHashMask];
638 
639 	for (int32 i = 0; i < 8; i++) {
640 		if (group->entry[i].valid)
641 			continue;
642 
643 		fill_page_table_entry(&group->entry[i], virtualSegmentID, virtualAddress, physicalAddress, PTE_READ_WRITE, true);
644 		return B_OK;
645 	}
646 
647 	return B_ERROR;
648 }
649 
650 
651 // XXX currently assumes this translation map is active
652 
653 status_t
654 arch_vm_translation_map_early_query(addr_t va, addr_t *out_physical)
655 {
656 	//PANIC_UNIMPLEMENTED();
657 	panic("vm_translation_map_quick_query(): not yet implemented\n");
658 	return B_OK;
659 }
660 
661 
662 // #pragma mark -
663 
664 
665 status_t
666 ppc_map_address_range(addr_t virtualAddress, addr_t physicalAddress,
667 	size_t size)
668 {
669 	addr_t virtualEnd = ROUNDUP(virtualAddress + size, B_PAGE_SIZE);
670 	virtualAddress = ROUNDOWN(virtualAddress, B_PAGE_SIZE);
671 	physicalAddress = ROUNDOWN(physicalAddress, B_PAGE_SIZE);
672 
673 	vm_address_space *addressSpace = vm_kernel_address_space();
674 
675 	// map the pages
676 	for (; virtualAddress < virtualEnd;
677 		 virtualAddress += B_PAGE_SIZE, physicalAddress += B_PAGE_SIZE) {
678 		status_t error = map_tmap(&addressSpace->translation_map,
679 			virtualAddress, physicalAddress,
680 			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
681 		if (error != B_OK)
682 			return error;
683 	}
684 
685 	return B_OK;
686 }
687 
688 
689 void
690 ppc_unmap_address_range(addr_t virtualAddress, size_t size)
691 {
692 	addr_t virtualEnd = ROUNDUP(virtualAddress + size, B_PAGE_SIZE);
693 	virtualAddress = ROUNDOWN(virtualAddress, B_PAGE_SIZE);
694 
695 	vm_address_space *addressSpace = vm_kernel_address_space();
696 
697 	for (0; virtualAddress < virtualEnd; virtualAddress += B_PAGE_SIZE)
698 		remove_page_table_entry(&addressSpace->translation_map, virtualAddress);
699 }
700 
701 
702 status_t
703 ppc_remap_address_range(addr_t *_virtualAddress, size_t size, bool unmap)
704 {
705 	addr_t virtualAddress = ROUNDOWN(*_virtualAddress, B_PAGE_SIZE);
706 	size = ROUNDUP(*_virtualAddress + size - virtualAddress, B_PAGE_SIZE);
707 
708 	vm_address_space *addressSpace = vm_kernel_address_space();
709 
710 	// reserve space in the address space
711 	void *newAddress = NULL;
712 	status_t error = vm_reserve_address_range(addressSpace->id, &newAddress,
713 		B_ANY_KERNEL_ADDRESS, size, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
714 	if (error != B_OK)
715 		return error;
716 
717 	// get the area's first physical page
718 	page_table_entry *entry = lookup_page_table_entry(
719 		&addressSpace->translation_map, virtualAddress);
720 	if (!entry)
721 		return B_ERROR;
722 	addr_t physicalBase = entry->physical_page_number << 12;
723 
724 	// map the pages
725 	error = ppc_map_address_range((addr_t)newAddress, physicalBase, size);
726 	if (error != B_OK)
727 		return error;
728 
729 	*_virtualAddress = (addr_t)newAddress;
730 
731 	// unmap the old pages
732 	if (unmap)
733 		ppc_unmap_address_range(virtualAddress, size);
734 
735 	return B_OK;
736 }
737 
738