xref: /haiku/src/system/boot/platform/openfirmware/arch/ppc/mmu.cpp (revision 1345706a9ff6ad0dc041339a02d4259998b0765d)
1 /*
2  * Copyright 2003-2009, Axel Dörfler, axeld@pinc-software.de.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include <platform_arch.h>
8 #include <boot/addr_range.h>
9 #include <boot/kernel_args.h>
10 #include <boot/platform.h>
11 #include <boot/stage2.h>
12 #include <boot/stdio.h>
13 #include <platform/openfirmware/openfirmware.h>
14 #include <arch_cpu.h>
15 #include <arch_mmu.h>
16 #include <kernel.h>
17 
18 #include <OS.h>
19 
20 
21 // set protection to WIMGNPP: -----PP
22 // PP:	00 - no access
23 //		01 - read only
24 //		10 - read/write
25 //		11 - read only
26 #define PAGE_READ_ONLY	0x01
27 #define PAGE_READ_WRITE	0x02
28 
29 // NULL is actually a possible physical address...
30 //#define PHYSINVAL ((void *)-1)
31 #define PHYSINVAL NULL
32 
33 segment_descriptor sSegments[16];
34 page_table_entry_group *sPageTable;
35 uint32 sPageTableHashMask;
36 
37 
38 // begin and end of the boot loader
39 extern "C" uint8 __text_begin;
40 extern "C" uint8 _end;
41 
42 
43 #if 0
44 static status_t
45 insert_virtual_range_to_keep(void *start, uint32 size)
46 {
47 	return insert_memory_range(gKernelArgs.arch_args.virtual_ranges_to_keep,
48 		gKernelArgs.arch_args.num_virtual_ranges_to_keep,
49 		MAX_VIRTUAL_RANGES_TO_KEEP, start, size);
50 }
51 #endif
52 
53 
54 static status_t
55 remove_virtual_range_to_keep(void *start, uint32 size)
56 {
57 	return remove_address_range(gKernelArgs.arch_args.virtual_ranges_to_keep,
58 		&gKernelArgs.arch_args.num_virtual_ranges_to_keep,
59 		MAX_VIRTUAL_RANGES_TO_KEEP, (addr_t)start, size);
60 }
61 
62 
63 static status_t
64 find_physical_memory_ranges(size_t &total)
65 {
66 	int memory, package;
67 	printf("checking for memory...\n");
68 	if (of_getprop(gChosen, "memory", &memory, sizeof(int)) == OF_FAILED)
69 		return B_ERROR;
70 	package = of_instance_to_package(memory);
71 
72 	total = 0;
73 
74 	struct of_region regions[64];
75 	int count;
76 	count = of_getprop(package, "reg", regions, sizeof(regions));
77 	if (count == OF_FAILED)
78 		count = of_getprop(memory, "reg", regions, sizeof(regions));
79 	if (count == OF_FAILED)
80 		return B_ERROR;
81 	count /= sizeof(of_region);
82 
83 	for (int32 i = 0; i < count; i++) {
84 		if (regions[i].size <= 0) {
85 			printf("%ld: empty region\n", i);
86 			continue;
87 		}
88 		printf("%ld: base = %p, size = %lu\n", i, regions[i].base,
89 			regions[i].size);
90 
91 		total += regions[i].size;
92 
93 		if (insert_physical_memory_range((addr_t)regions[i].base,
94 				regions[i].size) != B_OK) {
95 			printf("cannot map physical memory range (num ranges = %lu)!\n",
96 				gKernelArgs.num_physical_memory_ranges);
97 			return B_ERROR;
98 		}
99 	}
100 
101 	return B_OK;
102 }
103 
104 
105 static bool
106 is_virtual_allocated(void *address, size_t size)
107 {
108 	addr_t foundBase;
109 	return !get_free_address_range(gKernelArgs.virtual_allocated_range,
110 			gKernelArgs.num_virtual_allocated_ranges, (addr_t)address, size,
111 			&foundBase)
112 		|| foundBase != (addr_t)address;
113 }
114 
115 
116 static bool
117 is_physical_allocated(void *address, size_t size)
118 {
119 	phys_addr_t foundBase;
120 	return !get_free_physical_address_range(
121 			gKernelArgs.physical_allocated_range,
122 			gKernelArgs.num_physical_allocated_ranges, (addr_t)address, size,
123 			&foundBase)
124 		|| foundBase != (addr_t)address;
125 }
126 
127 
128 static bool
129 is_physical_memory(void *address, size_t size)
130 {
131 	return is_physical_address_range_covered(gKernelArgs.physical_memory_range,
132 		gKernelArgs.num_physical_memory_ranges, (addr_t)address, size);
133 }
134 
135 
136 static bool
137 is_physical_memory(void *address)
138 {
139 	return is_physical_memory(address, 1);
140 }
141 
142 
143 static void
144 fill_page_table_entry(page_table_entry *entry, uint32 virtualSegmentID,
145 	void *virtualAddress, void *physicalAddress, uint8 mode, bool secondaryHash)
146 {
147 	// lower 32 bit - set at once
148 	((uint32 *)entry)[1]
149 		= (((uint32)physicalAddress / B_PAGE_SIZE) << 12) | mode;
150 	/*entry->physical_page_number = (uint32)physicalAddress / B_PAGE_SIZE;
151 	entry->_reserved0 = 0;
152 	entry->referenced = false;
153 	entry->changed = false;
154 	entry->write_through = (mode >> 6) & 1;
155 	entry->caching_inhibited = (mode >> 5) & 1;
156 	entry->memory_coherent = (mode >> 4) & 1;
157 	entry->guarded = (mode >> 3) & 1;
158 	entry->_reserved1 = 0;
159 	entry->page_protection = mode & 0x3;*/
160 	eieio();
161 		// we need to make sure that the lower 32 bit were
162 		// already written when the entry becomes valid
163 
164 	// upper 32 bit
165 	entry->virtual_segment_id = virtualSegmentID;
166 	entry->secondary_hash = secondaryHash;
167 	entry->abbr_page_index = ((uint32)virtualAddress >> 22) & 0x3f;
168 	entry->valid = true;
169 }
170 
171 
172 static void
173 map_page(void *virtualAddress, void *physicalAddress, uint8 mode)
174 {
175 	uint32 virtualSegmentID
176 		= sSegments[addr_t(virtualAddress) >> 28].virtual_segment_id;
177 
178 	uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID,
179 		(uint32)virtualAddress);
180 	page_table_entry_group *group = &sPageTable[hash & sPageTableHashMask];
181 
182 	for (int32 i = 0; i < 8; i++) {
183 		// 8 entries in a group
184 		if (group->entry[i].valid)
185 			continue;
186 
187 		fill_page_table_entry(&group->entry[i], virtualSegmentID,
188 			virtualAddress, physicalAddress, mode, false);
189 		//printf("map: va = %p -> %p, mode = %d, hash = %lu\n", virtualAddress, physicalAddress, mode, hash);
190 		return;
191 	}
192 
193 	hash = page_table_entry::SecondaryHash(hash);
194 	group = &sPageTable[hash & sPageTableHashMask];
195 
196 	for (int32 i = 0; i < 8; i++) {
197 		if (group->entry[i].valid)
198 			continue;
199 
200 		fill_page_table_entry(&group->entry[i], virtualSegmentID,
201 			virtualAddress, physicalAddress, mode, true);
202 		//printf("map: va = %p -> %p, mode = %d, second hash = %lu\n", virtualAddress, physicalAddress, mode, hash);
203 		return;
204 	}
205 
206 	panic("out of page table entries! (you would think this could not happen "
207 		"in a boot loader...)\n");
208 }
209 
210 
211 static void
212 map_range(void *virtualAddress, void *physicalAddress, size_t size, uint8 mode)
213 {
214 	for (uint32 offset = 0; offset < size; offset += B_PAGE_SIZE) {
215 		map_page((void *)(uint32(virtualAddress) + offset),
216 			(void *)(uint32(physicalAddress) + offset), mode);
217 	}
218 }
219 
220 
221 static status_t
222 find_allocated_ranges(void *oldPageTable, void *pageTable,
223 	page_table_entry_group **_physicalPageTable, void **_exceptionHandlers)
224 {
225 	// we have to preserve the OpenFirmware established mappings
226 	// if we want to continue to use its service after we've
227 	// taken over (we will probably need less translations once
228 	// we have proper driver support for the target hardware).
229 	int mmu;
230 	if (of_getprop(gChosen, "mmu", &mmu, sizeof(int)) == OF_FAILED) {
231 		puts("no OF mmu");
232 		return B_ERROR;
233 	}
234 	mmu = of_instance_to_package(mmu);
235 
236 	struct translation_map {
237 		void	*virtual_address;
238 		int		length;
239 		void	*physical_address;
240 		int		mode;
241 	} translations[64];
242 
243 	int length = of_getprop(mmu, "translations", &translations,
244 		sizeof(translations));
245 	if (length == OF_FAILED) {
246 		puts("no OF translations");
247 		return B_ERROR;
248 	}
249 	length = length / sizeof(struct translation_map);
250 	uint32 total = 0;
251 	printf("found %d translations\n", length);
252 
253 	for (int i = 0; i < length; i++) {
254 		struct translation_map *map = &translations[i];
255 		bool keepRange = true;
256 		//printf("%i: map: %p, length %d -> physical: %p, mode %d\n", i, map->virtual_address, map->length, map->physical_address, map->mode);
257 
258 		// insert range in physical allocated, if it points to physical memory
259 
260 		if (is_physical_memory(map->physical_address)
261 			&& insert_physical_allocated_range((addr_t)map->physical_address,
262 					map->length) != B_OK) {
263 			printf("cannot map physical allocated range (num ranges = %lu)!\n",
264 				gKernelArgs.num_physical_allocated_ranges);
265 			return B_ERROR;
266 		}
267 
268 		if (map->virtual_address == pageTable) {
269 			puts("found page table!");
270 			*_physicalPageTable
271 				= (page_table_entry_group *)map->physical_address;
272 			keepRange = false;
273 				// we keep it explicitely anyway
274 		}
275 		if ((addr_t)map->physical_address <= 0x100
276 			&& (addr_t)map->physical_address + map->length >= 0x1000) {
277 			puts("found exception handlers!");
278 			*_exceptionHandlers = map->virtual_address;
279 			keepRange = false;
280 				// we keep it explicitely anyway
281 		}
282 		if (map->virtual_address == oldPageTable)
283 			keepRange = false;
284 
285 		// insert range in virtual allocated
286 
287 		if (insert_virtual_allocated_range((addr_t)map->virtual_address,
288 				map->length) != B_OK) {
289 			printf("cannot map virtual allocated range (num ranges = %lu)!\n",
290 				gKernelArgs.num_virtual_allocated_ranges);
291 		}
292 
293 		// map range into the page table
294 
295 		map_range(map->virtual_address, map->physical_address, map->length,
296 			map->mode);
297 
298 		// insert range in virtual ranges to keep
299 
300 // TODO: ATM keeping the ranges doesn't make much sense. The OF usually identity
301 // maps stuff, which means that RAM will most likely be mapped < 2 GB, which we
302 // cannot preserve, since that doesn't lie in the kernel address space. Mappings
303 // >= 2 GB are probably memory mapped hardware registers or the frame buffer
304 // (i.e. non-RAM), which we don't handle correctly ATM.
305 #if 0
306 		if (keepRange) {
307 			if (insert_virtual_range_to_keep(map->virtual_address,
308 					map->length) != B_OK) {
309 				printf("cannot map virtual range to keep (num ranges = %lu)!\n",
310 					gKernelArgs.num_virtual_allocated_ranges);
311 			}
312 		}
313 #endif
314 
315 		total += map->length;
316 	}
317 	//printf("total mapped: %lu\n", total);
318 
319 	// remove the boot loader code from the virtual ranges to keep in the
320 	// kernel
321 	if (remove_virtual_range_to_keep(&__text_begin, &_end - &__text_begin)
322 			!= B_OK) {
323 		printf("find_allocated_ranges(): Failed to remove boot loader range "
324 			"from virtual ranges to keep.\n");
325 	}
326 
327 	return B_OK;
328 }
329 
330 
331 /*!	Computes the recommended minimal page table size as
332 	described in table 7-22 of the PowerPC "Programming
333 	Environment for 32-Bit Microprocessors".
334 	The page table size ranges from 64 kB (for 8 MB RAM)
335 	to 32 MB (for 4 GB RAM).
336 */
337 static size_t
338 suggested_page_table_size(size_t total)
339 {
340 	uint32 max = 23;
341 		// 2^23 == 8 MB
342 
343 	while (max < 32) {
344 		if (total <= (1UL << max))
345 			break;
346 
347 		max++;
348 	}
349 
350 	return 1UL << (max - 7);
351 		// 2^(23 - 7) == 64 kB
352 }
353 
354 
355 static void *
356 find_physical_memory_range(size_t size)
357 {
358 	for (uint32 i = 0; i < gKernelArgs.num_physical_memory_ranges; i++) {
359 		if (gKernelArgs.physical_memory_range[i].size > size)
360 			return (void *)gKernelArgs.physical_memory_range[i].start;
361 	}
362 	return PHYSINVAL;
363 }
364 
365 
366 static void *
367 find_free_physical_range(size_t size)
368 {
369 	// just do a simple linear search at the end of the allocated
370 	// ranges (dumb memory allocation)
371 	if (gKernelArgs.num_physical_allocated_ranges == 0) {
372 		if (gKernelArgs.num_physical_memory_ranges == 0)
373 			return PHYSINVAL;
374 
375 		return find_physical_memory_range(size);
376 	}
377 
378 	for (uint32 i = 0; i < gKernelArgs.num_physical_allocated_ranges; i++) {
379 		void *address = (void *)(gKernelArgs.physical_allocated_range[i].start
380 			+ gKernelArgs.physical_allocated_range[i].size);
381 		if (!is_physical_allocated(address, size)
382 			&& is_physical_memory(address, size))
383 			return address;
384 	}
385 	return PHYSINVAL;
386 }
387 
388 
389 static void *
390 find_free_virtual_range(void *base, size_t size)
391 {
392 	if (base && !is_virtual_allocated(base, size))
393 		return base;
394 
395 	void *firstFound = NULL;
396 	void *firstBaseFound = NULL;
397 	for (uint32 i = 0; i < gKernelArgs.num_virtual_allocated_ranges; i++) {
398 		void *address = (void *)(gKernelArgs.virtual_allocated_range[i].start
399 			+ gKernelArgs.virtual_allocated_range[i].size);
400 		if (!is_virtual_allocated(address, size)) {
401 			if (!base)
402 				return address;
403 
404 			if (firstFound == NULL)
405 				firstFound = address;
406 			if (address >= base
407 				&& (firstBaseFound == NULL || address < firstBaseFound)) {
408 				firstBaseFound = address;
409 			}
410 		}
411 	}
412 	return (firstBaseFound ? firstBaseFound : firstFound);
413 }
414 
415 
416 extern "C" void *
417 arch_mmu_allocate(void *_virtualAddress, size_t size, uint8 _protection,
418 	bool exactAddress)
419 {
420 	// we only know page sizes
421 	size = ROUNDUP(size, B_PAGE_SIZE);
422 
423 	uint8 protection = 0;
424 	if (_protection & B_WRITE_AREA)
425 		protection = PAGE_READ_WRITE;
426 	else
427 		protection = PAGE_READ_ONLY;
428 
429 	// If no address is given, use the KERNEL_BASE as base address, since
430 	// that avoids trouble in the kernel, when we decide to keep the region.
431 	void *virtualAddress = _virtualAddress;
432 	if (!virtualAddress)
433 		virtualAddress = (void*)KERNEL_BASE;
434 
435 	// find free address large enough to hold "size"
436 	virtualAddress = find_free_virtual_range(virtualAddress, size);
437 	if (virtualAddress == NULL)
438 		return NULL;
439 
440 	// fail if the exact address was requested, but is not free
441 	if (exactAddress && _virtualAddress && virtualAddress != _virtualAddress) {
442 		dprintf("arch_mmu_allocate(): exact address requested, but virtual "
443 			"range (base: %p, size: %lu) is not free.\n",
444 			_virtualAddress, size);
445 		return NULL;
446 	}
447 
448 	// we have a free virtual range for the allocation, now
449 	// have a look for free physical memory as well (we assume
450 	// that a) there is enough memory, and b) failing is fatal
451 	// so that we don't have to optimize for these cases :)
452 
453 	void *physicalAddress = find_free_physical_range(size);
454 	if (physicalAddress == PHYSINVAL) {
455 		dprintf("arch_mmu_allocate(base: %p, size: %lu) no free physical "
456 			"address\n", virtualAddress, size);
457 		return NULL;
458 	}
459 
460 	// everything went fine, so lets mark the space as used.
461 
462 	printf("mmu_alloc: va %p, pa %p, size %u\n", virtualAddress,
463 		physicalAddress, size);
464 	insert_virtual_allocated_range((addr_t)virtualAddress, size);
465 	insert_physical_allocated_range((addr_t)physicalAddress, size);
466 
467 	map_range(virtualAddress, physicalAddress, size, protection);
468 
469 	return virtualAddress;
470 }
471 
472 
473 extern "C" status_t
474 arch_mmu_free(void *address, size_t size)
475 {
476 	// TODO: implement freeing a region!
477 	return B_OK;
478 }
479 
480 
481 static inline void
482 invalidate_tlb(void)
483 {
484 	//asm volatile("tlbia");
485 		// "tlbia" is obviously not available on every CPU...
486 
487 	// Note: this flushes the whole 4 GB address space - it
488 	//		would probably be a good idea to do less here
489 
490 	addr_t address = 0;
491 	for (uint32 i = 0; i < 0x100000; i++) {
492 		asm volatile("tlbie %0" : : "r" (address));
493 		address += B_PAGE_SIZE;
494 	}
495 	tlbsync();
496 }
497 
498 
499 //	#pragma mark - OpenFirmware callbacks and public API
500 
501 
502 static int
503 map_callback(struct of_arguments *args)
504 {
505 	void *physicalAddress = (void *)args->Argument(0);
506 	void *virtualAddress = (void *)args->Argument(1);
507 	int length = args->Argument(2);
508 	int mode = args->Argument(3);
509 	int &error = args->ReturnValue(0);
510 
511 	// insert range in physical allocated if needed
512 
513 	if (is_physical_memory(physicalAddress)
514 		&& insert_physical_allocated_range((addr_t)physicalAddress, length)
515 			!= B_OK) {
516 		error = -1;
517 		return OF_FAILED;
518 	}
519 
520 	// insert range in virtual allocated
521 
522 	if (insert_virtual_allocated_range((addr_t)virtualAddress, length)
523 			!= B_OK) {
524 		error = -2;
525 		return OF_FAILED;
526 	}
527 
528 	// map range into the page table
529 
530 	map_range(virtualAddress, physicalAddress, length, mode);
531 
532 	return B_OK;
533 }
534 
535 
536 static int
537 unmap_callback(struct of_arguments *args)
538 {
539 /*	void *address = (void *)args->Argument(0);
540 	int length = args->Argument(1);
541 	int &error = args->ReturnValue(0);
542 */
543 	// TODO: to be implemented
544 
545 	return OF_FAILED;
546 }
547 
548 
549 static int
550 translate_callback(struct of_arguments *args)
551 {
552 	addr_t virtualAddress = (addr_t)args->Argument(0);
553 	int &error = args->ReturnValue(0);
554 	int &physicalAddress = args->ReturnValue(1);
555 	int &mode = args->ReturnValue(2);
556 
557 	// Find page table entry for this address
558 
559 	uint32 virtualSegmentID
560 		= sSegments[addr_t(virtualAddress) >> 28].virtual_segment_id;
561 
562 	uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID,
563 		(uint32)virtualAddress);
564 	page_table_entry_group *group = &sPageTable[hash & sPageTableHashMask];
565 	page_table_entry *entry = NULL;
566 
567 	for (int32 i = 0; i < 8; i++) {
568 		entry = &group->entry[i];
569 
570 		if (entry->valid
571 			&& entry->virtual_segment_id == virtualSegmentID
572 			&& entry->secondary_hash == false
573 			&& entry->abbr_page_index == ((virtualAddress >> 22) & 0x3f))
574 			goto success;
575 	}
576 
577 	hash = page_table_entry::SecondaryHash(hash);
578 	group = &sPageTable[hash & sPageTableHashMask];
579 
580 	for (int32 i = 0; i < 8; i++) {
581 		entry = &group->entry[i];
582 
583 		if (entry->valid
584 			&& entry->virtual_segment_id == virtualSegmentID
585 			&& entry->secondary_hash == true
586 			&& entry->abbr_page_index == ((virtualAddress >> 22) & 0x3f))
587 			goto success;
588 	}
589 
590 	// could not find the translation
591 	error = B_ENTRY_NOT_FOUND;
592 	return OF_FAILED;
593 
594 success:
595 	// we found the entry in question
596 	physicalAddress = (int)(entry->physical_page_number * B_PAGE_SIZE);
597 	mode = (entry->write_through << 6)		// WIMGxPP
598 		| (entry->caching_inhibited << 5)
599 		| (entry->memory_coherent << 4)
600 		| (entry->guarded << 3)
601 		| entry->page_protection;
602 	error = B_OK;
603 
604 	return B_OK;
605 }
606 
607 
608 static int
609 alloc_real_mem_callback(struct of_arguments *args)
610 {
611 /*	addr_t minAddress = (addr_t)args->Argument(0);
612 	addr_t maxAddress = (addr_t)args->Argument(1);
613 	int length = args->Argument(2);
614 	int mode = args->Argument(3);
615 	int &error = args->ReturnValue(0);
616 	int &physicalAddress = args->ReturnValue(1);
617 */
618 	// ToDo: to be implemented
619 
620 	return OF_FAILED;
621 }
622 
623 
624 /** Dispatches the callback to the responsible function */
625 
626 static int
627 callback(struct of_arguments *args)
628 {
629 	const char *name = args->name;
630 printf("CALLBACK: %s\n", name);
631 
632 	if (!strcmp(name, "map"))
633 		return map_callback(args);
634 	else if (!strcmp(name, "unmap"))
635 		return unmap_callback(args);
636 	else if (!strcmp(name, "translate"))
637 		return translate_callback(args);
638 	else if (!strcmp(name, "alloc-real-mem"))
639 		return alloc_real_mem_callback(args);
640 
641 	return OF_FAILED;
642 }
643 
644 
645 extern "C" status_t
646 arch_set_callback(void)
647 {
648 	// set OpenFirmware callbacks - it will ask us for memory after that
649 	// instead of maintaining it itself
650 
651 	void *oldCallback = NULL;
652 	if (of_call_client_function("set-callback", 1, 1, &callback, &oldCallback)
653 			== OF_FAILED) {
654 		puts("set-callback failed!");
655 		return B_ERROR;
656 	}
657 	//printf("old callback = %p\n", old);
658 
659 	return B_OK;
660 }
661 
662 
663 extern "C" status_t
664 arch_mmu_init(void)
665 {
666 	// get map of physical memory (fill in kernel_args structure)
667 
668 	size_t total;
669 	if (find_physical_memory_ranges(total) != B_OK) {
670 		puts("could not find physical memory ranges!");
671 		return B_ERROR;
672 	}
673 	printf("total physical memory = %u MB\n", total / (1024*1024));
674 
675 	// get OpenFirmware's current page table
676 
677 	page_table_entry_group *oldTable;
678 	page_table_entry_group *table;
679 	size_t tableSize;
680 	ppc_get_page_table(&table, &tableSize);
681 
682 	oldTable = table;
683 
684 	bool realMode = false;
685 	// TODO: read these values out of the OF settings
686 	addr_t realBase = 0;
687 	addr_t realSize = 0x400000;
688 
689 	// can we just keep the page table?
690 	size_t suggestedTableSize = suggested_page_table_size(total);
691 	printf("suggested page table size = %u\n", suggestedTableSize);
692 	if (tableSize < suggestedTableSize) {
693 		// nah, we need a new one!
694 		printf("need new page table, size = %u!\n", suggestedTableSize);
695 		table = (page_table_entry_group *)of_claim(NULL, suggestedTableSize,
696 			suggestedTableSize);
697 			// KERNEL_BASE would be better as virtual address, but
698 			// at least with Apple's OpenFirmware, it makes no
699 			// difference - we will have to remap it later
700 		if (table == (void *)OF_FAILED) {
701 			panic("Could not allocate new page table (size = %ld)!!\n",
702 				suggestedTableSize);
703 			return B_NO_MEMORY;
704 		}
705 		if (table == NULL) {
706 			// work-around for the broken Pegasos OpenFirmware
707 			puts("broken OpenFirmware detected (claim doesn't work).");
708 			realMode = true;
709 
710 			addr_t tableBase = 0;
711 			for (int32 i = 0; tableBase < realBase + realSize * 3; i++) {
712 				tableBase = suggestedTableSize * i;
713 			}
714 
715 			table = (page_table_entry_group *)tableBase;
716 		}
717 
718 		printf("new table at: %p\n", table);
719 		sPageTable = table;
720 		tableSize = suggestedTableSize;
721 	} else {
722 		// ToDo: we could check if the page table is much too large
723 		//	and create a smaller one in this case (in order to save
724 		//	memory).
725 		sPageTable = table;
726 	}
727 
728 	sPageTableHashMask = tableSize / sizeof(page_table_entry_group) - 1;
729 	if (sPageTable != oldTable)
730 		memset(sPageTable, 0, tableSize);
731 
732 	// turn off address translation via the page table/segment mechanism,
733 	// identity map the first 256 MB (where our code/data reside)
734 
735 	printf("MSR: %p\n", (void *)get_msr());
736 
737 #if 0
738 	block_address_translation bat;
739 
740 	bat.length = BAT_LENGTH_256MB;
741 	bat.kernel_valid = true;
742 	bat.memory_coherent = true;
743 	bat.protection = BAT_READ_WRITE;
744 
745 	set_ibat0(&bat);
746 	set_dbat0(&bat);
747 	isync();
748 #endif
749 
750 	// initialize segment descriptors, but don't set the registers
751 	// until we're about to take over the page table - we're mapping
752 	// pages into our table using these values
753 
754 	for (int32 i = 0; i < 16; i++)
755 		sSegments[i].virtual_segment_id = i;
756 
757 	// find already allocated ranges of physical memory
758 	// and the virtual address space
759 
760 	page_table_entry_group *physicalTable = NULL;
761 	void *exceptionHandlers = (void *)-1;
762 	if (find_allocated_ranges(oldTable, table, &physicalTable,
763 			&exceptionHandlers) != B_OK) {
764 		puts("find_allocated_ranges() failed!");
765 		//return B_ERROR;
766 	}
767 
768 #if 0
769 	block_address_translation bats[8];
770 	getibats(bats);
771 	for (int32 i = 0; i < 8; i++) {
772 		printf("page index %u, length %u, ppn %u\n", bats[i].page_index,
773 			bats[i].length, bats[i].physical_block_number);
774 	}
775 #endif
776 
777 	if (physicalTable == NULL) {
778 		puts("arch_mmu_init(): Didn't find physical address of page table!");
779 		if (!realMode)
780 			return B_ERROR;
781 
782 		// Pegasos work-around
783 		//map_range((void *)realBase, (void *)realBase, realSize * 2, PAGE_READ_WRITE);
784 		//map_range((void *)(total - realSize), (void *)(total - realSize), realSize, PAGE_READ_WRITE);
785 		//map_range((void *)table, (void *)table, tableSize, PAGE_READ_WRITE);
786 		insert_physical_allocated_range(realBase, realSize * 2);
787 		insert_virtual_allocated_range(realBase, realSize * 2);
788 		insert_physical_allocated_range(total - realSize, realSize);
789 		insert_virtual_allocated_range(total - realSize, realSize);
790 		insert_physical_allocated_range((addr_t)table, tableSize);
791 		insert_virtual_allocated_range((addr_t)table, tableSize);
792 
793 		// QEMU OpenHackware work-around
794 		insert_physical_allocated_range(0x05800000, 0x06000000 - 0x05800000);
795 		insert_virtual_allocated_range(0x05800000, 0x06000000 - 0x05800000);
796 
797 		physicalTable = table;
798 	}
799 
800 	if (exceptionHandlers == (void *)-1) {
801 		// TODO: create mapping for the exception handlers
802 		puts("no mapping for the exception handlers!");
803 	}
804 
805 	// Set the Open Firmware memory callback. From now on the Open Firmware
806 	// will ask us for memory.
807 	arch_set_callback();
808 
809 	// set up new page table and turn on translation again
810 
811 	for (int32 i = 0; i < 16; i++) {
812 		ppc_set_segment_register((void *)(i * 0x10000000), sSegments[i]);
813 			// one segment describes 256 MB of memory
814 	}
815 
816 	ppc_set_page_table(physicalTable, tableSize);
817 	invalidate_tlb();
818 
819 	if (!realMode) {
820 		// clear BATs
821 		reset_ibats();
822 		reset_dbats();
823 		ppc_sync();
824 		isync();
825 	}
826 
827 	set_msr(MSR_MACHINE_CHECK_ENABLED | MSR_FP_AVAILABLE
828 		| MSR_INST_ADDRESS_TRANSLATION | MSR_DATA_ADDRESS_TRANSLATION);
829 
830 	// set kernel args
831 
832 	printf("virt_allocated: %lu\n", gKernelArgs.num_virtual_allocated_ranges);
833 	printf("phys_allocated: %lu\n", gKernelArgs.num_physical_allocated_ranges);
834 	printf("phys_memory: %lu\n", gKernelArgs.num_physical_memory_ranges);
835 
836 	gKernelArgs.arch_args.page_table.start = (addr_t)sPageTable;
837 	gKernelArgs.arch_args.page_table.size = tableSize;
838 
839 	gKernelArgs.arch_args.exception_handlers.start = (addr_t)exceptionHandlers;
840 	gKernelArgs.arch_args.exception_handlers.size = B_PAGE_SIZE;
841 
842 	return B_OK;
843 }
844 
845