xref: /haiku/src/system/boot/platform/openfirmware/arch/ppc/mmu.cpp (revision 5c6260dc232fcb2d4d5d1103c1623dba9663b753)
1 /*
2  * Copyright 2003-2009, Axel Dörfler, axeld@pinc-software.de.
3  * Copyright 2010-2011, Haiku, Inc. All Rights Reserved.
4  * All rights reserved. Distributed under the terms of the MIT License.
5  *
6  * Authors:
7  *		Axel Dörfler, axeld@pinc-software.de.
8  *		Alexander von Gluck, kallisti5@unixzen.com
9  */
10 
11 
12 #include <OS.h>
13 
14 #include <platform_arch.h>
15 #include <boot/addr_range.h>
16 #include <boot/kernel_args.h>
17 #include <boot/platform.h>
18 #include <boot/stage2.h>
19 #include <boot/stdio.h>
20 #include <platform/openfirmware/openfirmware.h>
21 #include <arch_cpu.h>
22 #include <arch_mmu.h>
23 #include <kernel.h>
24 
25 #include "support.h"
26 
27 
28 // set protection to WIMGNPP: -----PP
29 // PP:	00 - no access
30 //		01 - read only
31 //		10 - read/write
32 //		11 - read only
33 #define PAGE_READ_ONLY	0x01
34 #define PAGE_READ_WRITE	0x02
35 
36 // NULL is actually a possible physical address...
37 //#define PHYSINVAL ((void *)-1)
38 #define PHYSINVAL NULL
39 
40 //#define TRACE_MMU
41 #ifdef TRACE_MMU
42 #   define TRACE(x...) dprintf(x)
43 #else
44 #   define TRACE(x...) ;
45 #endif
46 
47 
48 segment_descriptor sSegments[16];
49 page_table_entry_group *sPageTable;
50 uint32 sPageTableHashMask;
51 
52 
53 // begin and end of the boot loader
54 extern "C" uint8 __text_begin;
55 extern "C" uint8 _end;
56 
57 
58 static status_t
59 insert_virtual_range_to_keep(void *start, uint32 size)
60 {
61 	return insert_address_range(gKernelArgs.arch_args.virtual_ranges_to_keep,
62 		&gKernelArgs.arch_args.num_virtual_ranges_to_keep,
63 		MAX_VIRTUAL_RANGES_TO_KEEP, (addr_t)start, size);
64 }
65 
66 
67 static status_t
68 remove_virtual_range_to_keep(void *start, uint32 size)
69 {
70 	return remove_address_range(gKernelArgs.arch_args.virtual_ranges_to_keep,
71 		&gKernelArgs.arch_args.num_virtual_ranges_to_keep,
72 		MAX_VIRTUAL_RANGES_TO_KEEP, (addr_t)start, size);
73 }
74 
75 
76 static status_t
77 find_physical_memory_ranges(size_t &total)
78 {
79 	int memory;
80 	dprintf("checking for memory...\n");
81 	if (of_getprop(gChosen, "memory", &memory, sizeof(int)) == OF_FAILED)
82 		return B_ERROR;
83 	int package = of_instance_to_package(memory);
84 
85 	total = 0;
86 
87 	// Memory base addresses are provided in 32 or 64 bit flavors
88 	// #address-cells and #size-cells matches the number of 32-bit 'cells'
89 	// representing the length of the base address and size fields
90 	int root = of_finddevice("/");
91 	int32 regAddressCells = of_address_cells(root);
92 	int32 regSizeCells = of_size_cells(root);
93 	if (regAddressCells == OF_FAILED || regSizeCells == OF_FAILED) {
94 		dprintf("finding base/size length counts failed, assume 32-bit.\n");
95 		regAddressCells = 1;
96 		regSizeCells = 1;
97 	}
98 
99 	// NOTE : Size Cells of 2 is possible in theory... but I haven't seen it yet.
100 	if (regAddressCells > 2 || regSizeCells > 1) {
101 		panic("%s: Unsupported OpenFirmware cell count detected.\n"
102 		"Address Cells: %" B_PRId32 "; Size Cells: %" B_PRId32
103 		" (CPU > 64bit?).\n", __func__, regAddressCells, regSizeCells);
104 		return B_ERROR;
105 	}
106 
107 	// On 64-bit PowerPC systems (G5), our mem base range address is larger
108 	if (regAddressCells == 2) {
109 		struct of_region<uint64> regions[64];
110 		int count = of_getprop(package, "reg", regions, sizeof(regions));
111 		if (count == OF_FAILED)
112 			count = of_getprop(memory, "reg", regions, sizeof(regions));
113 		if (count == OF_FAILED)
114 			return B_ERROR;
115 		count /= sizeof(regions[0]);
116 
117 		for (int32 i = 0; i < count; i++) {
118 			if (regions[i].size <= 0) {
119 				dprintf("%ld: empty region\n", i);
120 				continue;
121 			}
122 			dprintf("%" B_PRIu32 ": base = %" B_PRIu64 ","
123 				"size = %" B_PRIu32 "\n", i, regions[i].base, regions[i].size);
124 
125 			total += regions[i].size;
126 
127 			if (insert_physical_memory_range((addr_t)regions[i].base,
128 					regions[i].size) != B_OK) {
129 				dprintf("cannot map physical memory range "
130 					"(num ranges = %" B_PRIu32 ")!\n",
131 					gKernelArgs.num_physical_memory_ranges);
132 				return B_ERROR;
133 			}
134 		}
135 		return B_OK;
136 	}
137 
138 	// Otherwise, normal 32-bit PowerPC G3 or G4 have a smaller 32-bit one
139 	struct of_region<uint32> regions[64];
140 	int count = of_getprop(package, "reg", regions, sizeof(regions));
141 	if (count == OF_FAILED)
142 		count = of_getprop(memory, "reg", regions, sizeof(regions));
143 	if (count == OF_FAILED)
144 		return B_ERROR;
145 	count /= sizeof(regions[0]);
146 
147 	for (int32 i = 0; i < count; i++) {
148 		if (regions[i].size <= 0) {
149 			dprintf("%ld: empty region\n", i);
150 			continue;
151 		}
152 		dprintf("%" B_PRIu32 ": base = %" B_PRIu32 ","
153 			"size = %" B_PRIu32 "\n", i, regions[i].base, regions[i].size);
154 
155 		total += regions[i].size;
156 
157 		if (insert_physical_memory_range((addr_t)regions[i].base,
158 				regions[i].size) != B_OK) {
159 			dprintf("cannot map physical memory range "
160 				"(num ranges = %" B_PRIu32 ")!\n",
161 				gKernelArgs.num_physical_memory_ranges);
162 			return B_ERROR;
163 		}
164 	}
165 
166 	return B_OK;
167 }
168 
169 
170 static bool
171 is_virtual_allocated(void *address, size_t size)
172 {
173 	addr_t foundBase;
174 	return !get_free_address_range(gKernelArgs.virtual_allocated_range,
175 		gKernelArgs.num_virtual_allocated_ranges, (addr_t)address, size,
176 		&foundBase) || foundBase != (addr_t)address;
177 }
178 
179 
180 static bool
181 is_physical_allocated(void *address, size_t size)
182 {
183 	phys_addr_t foundBase;
184 	return !get_free_physical_address_range(
185 		gKernelArgs.physical_allocated_range,
186 		gKernelArgs.num_physical_allocated_ranges, (addr_t)address, size,
187 		&foundBase) || foundBase != (addr_t)address;
188 }
189 
190 
191 static bool
192 is_physical_memory(void *address, size_t size)
193 {
194 	return is_physical_address_range_covered(gKernelArgs.physical_memory_range,
195 		gKernelArgs.num_physical_memory_ranges, (addr_t)address, size);
196 }
197 
198 
199 static bool
200 is_physical_memory(void *address)
201 {
202 	return is_physical_memory(address, 1);
203 }
204 
205 
206 static void
207 fill_page_table_entry(page_table_entry *entry, uint32 virtualSegmentID,
208 	void *virtualAddress, void *physicalAddress, uint8 mode, bool secondaryHash)
209 {
210 	// lower 32 bit - set at once
211 	((uint32 *)entry)[1]
212 		= (((uint32)physicalAddress / B_PAGE_SIZE) << 12) | mode;
213 	/*entry->physical_page_number = (uint32)physicalAddress / B_PAGE_SIZE;
214 	entry->_reserved0 = 0;
215 	entry->referenced = false;
216 	entry->changed = false;
217 	entry->write_through = (mode >> 6) & 1;
218 	entry->caching_inhibited = (mode >> 5) & 1;
219 	entry->memory_coherent = (mode >> 4) & 1;
220 	entry->guarded = (mode >> 3) & 1;
221 	entry->_reserved1 = 0;
222 	entry->page_protection = mode & 0x3;*/
223 	eieio();
224 		// we need to make sure that the lower 32 bit were
225 		// already written when the entry becomes valid
226 
227 	// upper 32 bit
228 	entry->virtual_segment_id = virtualSegmentID;
229 	entry->secondary_hash = secondaryHash;
230 	entry->abbr_page_index = ((uint32)virtualAddress >> 22) & 0x3f;
231 	entry->valid = true;
232 }
233 
234 
235 static void
236 map_page(void *virtualAddress, void *physicalAddress, uint8 mode)
237 {
238 	uint32 virtualSegmentID
239 		= sSegments[addr_t(virtualAddress) >> 28].virtual_segment_id;
240 
241 	uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID,
242 		(uint32)virtualAddress);
243 	page_table_entry_group *group = &sPageTable[hash & sPageTableHashMask];
244 
245 	for (int32 i = 0; i < 8; i++) {
246 		// 8 entries in a group
247 		if (group->entry[i].valid)
248 			continue;
249 
250 		fill_page_table_entry(&group->entry[i], virtualSegmentID,
251 			virtualAddress, physicalAddress, mode, false);
252 		//TRACE("map: va = %p -> %p, mode = %d, hash = %lu\n",
253 		//	virtualAddress, physicalAddress, mode, hash);
254 		return;
255 	}
256 
257 	hash = page_table_entry::SecondaryHash(hash);
258 	group = &sPageTable[hash & sPageTableHashMask];
259 
260 	for (int32 i = 0; i < 8; i++) {
261 		if (group->entry[i].valid)
262 			continue;
263 
264 		fill_page_table_entry(&group->entry[i], virtualSegmentID,
265 			virtualAddress, physicalAddress, mode, true);
266 		//TRACE("map: va = %p -> %p, mode = %d, second hash = %lu\n",
267 		//	virtualAddress, physicalAddress, mode, hash);
268 		return;
269 	}
270 
271 	panic("%s: out of page table entries!\n", __func__);
272 }
273 
274 
275 static void
276 map_range(void *virtualAddress, void *physicalAddress, size_t size, uint8 mode)
277 {
278 	for (uint32 offset = 0; offset < size; offset += B_PAGE_SIZE) {
279 		map_page((void *)(uint32(virtualAddress) + offset),
280 			(void *)(uint32(physicalAddress) + offset), mode);
281 	}
282 }
283 
284 
285 static status_t
286 find_allocated_ranges(void *oldPageTable, void *pageTable,
287 	page_table_entry_group **_physicalPageTable, void **_exceptionHandlers)
288 {
289 	// we have to preserve the OpenFirmware established mappings
290 	// if we want to continue to use its service after we've
291 	// taken over (we will probably need less translations once
292 	// we have proper driver support for the target hardware).
293 	int mmu;
294 	if (of_getprop(gChosen, "mmu", &mmu, sizeof(int)) == OF_FAILED) {
295 		dprintf("%s: Error: no OpenFirmware mmu\n", __func__);
296 		return B_ERROR;
297 	}
298 	mmu = of_instance_to_package(mmu);
299 
300 	struct translation_map {
301 		void	*virtual_address;
302 		int		length;
303 		void	*physical_address;
304 		int		mode;
305 	} translations[64];
306 
307 	int length = of_getprop(mmu, "translations", &translations,
308 		sizeof(translations));
309 	if (length == OF_FAILED) {
310 		dprintf("Error: no OF translations.\n");
311 		return B_ERROR;
312 	}
313 	length = length / sizeof(struct translation_map);
314 	uint32 total = 0;
315 	dprintf("found %d translations\n", length);
316 
317 	for (int i = 0; i < length; i++) {
318 		struct translation_map *map = &translations[i];
319 		bool keepRange = true;
320 		TRACE("%i: map: %p, length %d -> physical: %p, mode %d\n", i,
321 			map->virtual_address, map->length,
322 			map->physical_address, map->mode);
323 
324 		// insert range in physical allocated, if it points to physical memory
325 
326 		if (is_physical_memory(map->physical_address)
327 			&& insert_physical_allocated_range((addr_t)map->physical_address,
328 				map->length) != B_OK) {
329 			dprintf("cannot map physical allocated range "
330 				"(num ranges = %" B_PRIu32 ")!\n",
331 				gKernelArgs.num_physical_allocated_ranges);
332 			return B_ERROR;
333 		}
334 
335 		if (map->virtual_address == pageTable) {
336 			dprintf("%i: found page table at va %p\n", i,
337 				map->virtual_address);
338 			*_physicalPageTable
339 				= (page_table_entry_group *)map->physical_address;
340 			keepRange = false;
341 				// we keep it explicitely anyway
342 		}
343 		if ((addr_t)map->physical_address <= 0x100
344 			&& (addr_t)map->physical_address + map->length >= 0x1000) {
345 			dprintf("%i: found exception handlers at va %p\n", i,
346 				map->virtual_address);
347 			*_exceptionHandlers = map->virtual_address;
348 			keepRange = false;
349 				// we keep it explicitely anyway
350 		}
351 		if (map->virtual_address == oldPageTable)
352 			keepRange = false;
353 
354 		// insert range in virtual allocated
355 
356 		if (insert_virtual_allocated_range((addr_t)map->virtual_address,
357 				map->length) != B_OK) {
358 			dprintf("cannot map virtual allocated range "
359 				"(num ranges = %" B_PRIu32 ")!\n",
360 				gKernelArgs.num_virtual_allocated_ranges);
361 		}
362 
363 		// map range into the page table
364 
365 		map_range(map->virtual_address, map->physical_address, map->length,
366 			map->mode);
367 
368 		// insert range in virtual ranges to keep
369 
370 		if (keepRange) {
371 			TRACE("%i: keeping free range starting at va %p\n", i,
372 				map->virtual_address);
373 
374 			if (insert_virtual_range_to_keep(map->virtual_address,
375 					map->length) != B_OK) {
376 				dprintf("cannot map virtual range to keep "
377 					"(num ranges = %" B_PRIu32 ")\n",
378 					gKernelArgs.num_virtual_allocated_ranges);
379 			}
380 		}
381 
382 		total += map->length;
383 	}
384 	dprintf("total size kept: %" B_PRIu32 "\n", total);
385 
386 	// remove the boot loader code from the virtual ranges to keep in the
387 	// kernel
388 	if (remove_virtual_range_to_keep(&__text_begin, &_end - &__text_begin)
389 			!= B_OK) {
390 		dprintf("%s: Failed to remove boot loader range "
391 			"from virtual ranges to keep.\n", __func__);
392 	}
393 
394 	return B_OK;
395 }
396 
397 
398 /*!	Computes the recommended minimal page table size as
399 	described in table 7-22 of the PowerPC "Programming
400 	Environment for 32-Bit Microprocessors".
401 	The page table size ranges from 64 kB (for 8 MB RAM)
402 	to 32 MB (for 4 GB RAM).
403 */
404 static size_t
405 suggested_page_table_size(size_t total)
406 {
407 	uint32 max = 23;
408 		// 2^23 == 8 MB
409 
410 	while (max < 32) {
411 		if (total <= (1UL << max))
412 			break;
413 
414 		max++;
415 	}
416 
417 	return 1UL << (max - 7);
418 		// 2^(23 - 7) == 64 kB
419 }
420 
421 
422 static void *
423 find_physical_memory_range(size_t size)
424 {
425 	for (uint32 i = 0; i < gKernelArgs.num_physical_memory_ranges; i++) {
426 		if (gKernelArgs.physical_memory_range[i].size > size)
427 			return (void *)gKernelArgs.physical_memory_range[i].start;
428 	}
429 	return PHYSINVAL;
430 }
431 
432 
433 static void *
434 find_free_physical_range(size_t size)
435 {
436 	// just do a simple linear search at the end of the allocated
437 	// ranges (dumb memory allocation)
438 	if (gKernelArgs.num_physical_allocated_ranges == 0) {
439 		if (gKernelArgs.num_physical_memory_ranges == 0)
440 			return PHYSINVAL;
441 
442 		return find_physical_memory_range(size);
443 	}
444 
445 	for (uint32 i = 0; i < gKernelArgs.num_physical_allocated_ranges; i++) {
446 		void *address = (void *)(gKernelArgs.physical_allocated_range[i].start
447 			+ gKernelArgs.physical_allocated_range[i].size);
448 		if (!is_physical_allocated(address, size)
449 			&& is_physical_memory(address, size))
450 			return address;
451 	}
452 	return PHYSINVAL;
453 }
454 
455 
456 static void *
457 find_free_virtual_range(void *base, size_t size)
458 {
459 	if (base && !is_virtual_allocated(base, size))
460 		return base;
461 
462 	void *firstFound = NULL;
463 	void *firstBaseFound = NULL;
464 	for (uint32 i = 0; i < gKernelArgs.num_virtual_allocated_ranges; i++) {
465 		void *address = (void *)(gKernelArgs.virtual_allocated_range[i].start
466 			+ gKernelArgs.virtual_allocated_range[i].size);
467 		if (!is_virtual_allocated(address, size)) {
468 			if (!base)
469 				return address;
470 
471 			if (firstFound == NULL)
472 				firstFound = address;
473 			if (address >= base
474 				&& (firstBaseFound == NULL || address < firstBaseFound)) {
475 				firstBaseFound = address;
476 			}
477 		}
478 	}
479 	return (firstBaseFound ? firstBaseFound : firstFound);
480 }
481 
482 
483 extern "C" void *
484 arch_mmu_allocate(void *_virtualAddress, size_t size, uint8 _protection,
485 	bool exactAddress)
486 {
487 	// we only know page sizes
488 	size = ROUNDUP(size, B_PAGE_SIZE);
489 
490 	uint8 protection = 0;
491 	if (_protection & B_WRITE_AREA)
492 		protection = PAGE_READ_WRITE;
493 	else
494 		protection = PAGE_READ_ONLY;
495 
496 	// If no address is given, use the KERNEL_BASE as base address, since
497 	// that avoids trouble in the kernel, when we decide to keep the region.
498 	void *virtualAddress = _virtualAddress;
499 	if (!virtualAddress)
500 		virtualAddress = (void*)KERNEL_BASE;
501 
502 	// find free address large enough to hold "size"
503 	virtualAddress = find_free_virtual_range(virtualAddress, size);
504 	if (virtualAddress == NULL)
505 		return NULL;
506 
507 	// fail if the exact address was requested, but is not free
508 	if (exactAddress && _virtualAddress && virtualAddress != _virtualAddress) {
509 		dprintf("arch_mmu_allocate(): exact address requested, but virtual "
510 			"range (base: %p, size: %" B_PRIuSIZE ") is not free.\n",
511 			_virtualAddress, size);
512 		return NULL;
513 	}
514 
515 	// we have a free virtual range for the allocation, now
516 	// have a look for free physical memory as well (we assume
517 	// that a) there is enough memory, and b) failing is fatal
518 	// so that we don't have to optimize for these cases :)
519 
520 	void *physicalAddress = find_free_physical_range(size);
521 	if (physicalAddress == PHYSINVAL) {
522 		dprintf("arch_mmu_allocate(base: %p, size: %" B_PRIuSIZE ") "
523 			"no free physical address\n", virtualAddress, size);
524 		return NULL;
525 	}
526 
527 	// everything went fine, so lets mark the space as used.
528 
529 	dprintf("mmu_alloc: va %p, pa %p, size %" B_PRIuSIZE "\n", virtualAddress,
530 		physicalAddress, size);
531 	insert_virtual_allocated_range((addr_t)virtualAddress, size);
532 	insert_physical_allocated_range((addr_t)physicalAddress, size);
533 
534 	map_range(virtualAddress, physicalAddress, size, protection);
535 
536 	return virtualAddress;
537 }
538 
539 
540 extern "C" status_t
541 arch_mmu_free(void *address, size_t size)
542 {
543 	// TODO: implement freeing a region!
544 	return B_OK;
545 }
546 
547 
548 static inline void
549 invalidate_tlb(void)
550 {
551 	//asm volatile("tlbia");
552 		// "tlbia" is obviously not available on every CPU...
553 
554 	// Note: this flushes the whole 4 GB address space - it
555 	//		would probably be a good idea to do less here
556 
557 	addr_t address = 0;
558 	for (uint32 i = 0; i < 0x100000; i++) {
559 		asm volatile("tlbie %0" : : "r" (address));
560 		address += B_PAGE_SIZE;
561 	}
562 	tlbsync();
563 }
564 
565 
566 //	#pragma mark - OpenFirmware callbacks and public API
567 
568 
569 static int
570 map_callback(struct of_arguments *args)
571 {
572 	void *physicalAddress = (void *)args->Argument(0);
573 	void *virtualAddress = (void *)args->Argument(1);
574 	int length = args->Argument(2);
575 	int mode = args->Argument(3);
576 	int &error = args->ReturnValue(0);
577 
578 	// insert range in physical allocated if needed
579 
580 	if (is_physical_memory(physicalAddress)
581 		&& insert_physical_allocated_range((addr_t)physicalAddress, length)
582 			!= B_OK) {
583 		error = -1;
584 		return OF_FAILED;
585 	}
586 
587 	// insert range in virtual allocated
588 
589 	if (insert_virtual_allocated_range((addr_t)virtualAddress, length)
590 			!= B_OK) {
591 		error = -2;
592 		return OF_FAILED;
593 	}
594 
595 	// map range into the page table
596 
597 	map_range(virtualAddress, physicalAddress, length, mode);
598 
599 	return B_OK;
600 }
601 
602 
603 static int
604 unmap_callback(struct of_arguments *args)
605 {
606 /*	void *address = (void *)args->Argument(0);
607 	int length = args->Argument(1);
608 	int &error = args->ReturnValue(0);
609 */
610 	// TODO: to be implemented
611 
612 	return OF_FAILED;
613 }
614 
615 
616 static int
617 translate_callback(struct of_arguments *args)
618 {
619 	addr_t virtualAddress = (addr_t)args->Argument(0);
620 	int &error = args->ReturnValue(0);
621 	int &physicalAddress = args->ReturnValue(1);
622 	int &mode = args->ReturnValue(2);
623 
624 	// Find page table entry for this address
625 
626 	uint32 virtualSegmentID
627 		= sSegments[addr_t(virtualAddress) >> 28].virtual_segment_id;
628 
629 	uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID,
630 		(uint32)virtualAddress);
631 	page_table_entry_group *group = &sPageTable[hash & sPageTableHashMask];
632 	page_table_entry *entry = NULL;
633 
634 	for (int32 i = 0; i < 8; i++) {
635 		entry = &group->entry[i];
636 
637 		if (entry->valid
638 			&& entry->virtual_segment_id == virtualSegmentID
639 			&& entry->secondary_hash == false
640 			&& entry->abbr_page_index == ((virtualAddress >> 22) & 0x3f))
641 			goto success;
642 	}
643 
644 	hash = page_table_entry::SecondaryHash(hash);
645 	group = &sPageTable[hash & sPageTableHashMask];
646 
647 	for (int32 i = 0; i < 8; i++) {
648 		entry = &group->entry[i];
649 
650 		if (entry->valid
651 			&& entry->virtual_segment_id == virtualSegmentID
652 			&& entry->secondary_hash == true
653 			&& entry->abbr_page_index == ((virtualAddress >> 22) & 0x3f))
654 			goto success;
655 	}
656 
657 	// could not find the translation
658 	error = B_ENTRY_NOT_FOUND;
659 	return OF_FAILED;
660 
661 success:
662 	// we found the entry in question
663 	physicalAddress = (int)(entry->physical_page_number * B_PAGE_SIZE);
664 	mode = (entry->write_through << 6)		// WIMGxPP
665 		| (entry->caching_inhibited << 5)
666 		| (entry->memory_coherent << 4)
667 		| (entry->guarded << 3)
668 		| entry->page_protection;
669 	error = B_OK;
670 
671 	return B_OK;
672 }
673 
674 
675 static int
676 alloc_real_mem_callback(struct of_arguments *args)
677 {
678 /*	addr_t minAddress = (addr_t)args->Argument(0);
679 	addr_t maxAddress = (addr_t)args->Argument(1);
680 	int length = args->Argument(2);
681 	int mode = args->Argument(3);
682 	int &error = args->ReturnValue(0);
683 	int &physicalAddress = args->ReturnValue(1);
684 */
685 	// ToDo: to be implemented
686 
687 	return OF_FAILED;
688 }
689 
690 
691 /** Dispatches the callback to the responsible function */
692 
693 static int
694 callback(struct of_arguments *args)
695 {
696 	const char *name = args->name;
697 	TRACE("OF CALLBACK: %s\n", name);
698 
699 	if (!strcmp(name, "map"))
700 		return map_callback(args);
701 	else if (!strcmp(name, "unmap"))
702 		return unmap_callback(args);
703 	else if (!strcmp(name, "translate"))
704 		return translate_callback(args);
705 	else if (!strcmp(name, "alloc-real-mem"))
706 		return alloc_real_mem_callback(args);
707 
708 	return OF_FAILED;
709 }
710 
711 
712 extern "C" status_t
713 arch_set_callback(void)
714 {
715 	// set OpenFirmware callbacks - it will ask us for memory after that
716 	// instead of maintaining it itself
717 
718 	void *oldCallback = NULL;
719 	if (of_call_client_function("set-callback", 1, 1, &callback, &oldCallback)
720 			== OF_FAILED) {
721 		dprintf("Error: OpenFirmware set-callback failed\n");
722 		return B_ERROR;
723 	}
724 	TRACE("old callback = %p; new callback = %p\n", oldCallback, callback);
725 
726 	return B_OK;
727 }
728 
729 
730 extern "C" status_t
731 arch_mmu_init(void)
732 {
733 	// get map of physical memory (fill in kernel_args structure)
734 
735 	size_t total;
736 	if (find_physical_memory_ranges(total) != B_OK) {
737 		dprintf("Error: could not find physical memory ranges!\n");
738 		return B_ERROR;
739 	}
740 	dprintf("total physical memory = %" B_PRId32 "MB\n", total / (1024 * 1024));
741 
742 	// get OpenFirmware's current page table
743 
744 	page_table_entry_group *oldTable;
745 	page_table_entry_group *table;
746 	size_t tableSize;
747 	ppc_get_page_table(&table, &tableSize);
748 
749 	oldTable = table;
750 
751 	bool realMode = false;
752 
753 	// TODO: read these values out of the OF settings
754 	// NOTE: I've only ever seen -1 (0xffffffff) for these values in
755 	//       OpenFirmware.. even after loading the bootloader -- Alex
756 	addr_t realBase = 0;
757 	addr_t realSize = 0x400000;
758 
759 	// can we just keep the page table?
760 	size_t suggestedTableSize = suggested_page_table_size(total);
761 	dprintf("suggested page table size = %" B_PRIuSIZE "\n",
762 		suggestedTableSize);
763 	if (tableSize < suggestedTableSize) {
764 		// nah, we need a new one!
765 		dprintf("need new page table, size = %" B_PRIuSIZE "!\n",
766 			suggestedTableSize);
767 		table = (page_table_entry_group *)of_claim(NULL, suggestedTableSize,
768 			suggestedTableSize);
769 			// KERNEL_BASE would be better as virtual address, but
770 			// at least with Apple's OpenFirmware, it makes no
771 			// difference - we will have to remap it later
772 		if (table == (void *)OF_FAILED) {
773 			panic("Could not allocate new page table "
774 				"(size = %" B_PRIuSIZE ")!!\n", suggestedTableSize);
775 			return B_NO_MEMORY;
776 		}
777 		if (table == NULL) {
778 			// work-around for the broken Pegasos OpenFirmware
779 			dprintf("broken OpenFirmware detected (claim doesn't work)\n");
780 			realMode = true;
781 
782 			addr_t tableBase = 0;
783 			for (int32 i = 0; tableBase < realBase + realSize * 3; i++) {
784 				tableBase = suggestedTableSize * i;
785 			}
786 
787 			table = (page_table_entry_group *)tableBase;
788 		}
789 
790 		dprintf("new table at: %p\n", table);
791 		sPageTable = table;
792 		tableSize = suggestedTableSize;
793 	} else {
794 		// ToDo: we could check if the page table is much too large
795 		//	and create a smaller one in this case (in order to save
796 		//	memory).
797 		sPageTable = table;
798 	}
799 
800 	sPageTableHashMask = tableSize / sizeof(page_table_entry_group) - 1;
801 	if (sPageTable != oldTable)
802 		memset(sPageTable, 0, tableSize);
803 
804 	// turn off address translation via the page table/segment mechanism,
805 	// identity map the first 256 MB (where our code/data reside)
806 
807 	dprintf("MSR: %p\n", (void *)get_msr());
808 
809 	#if 0
810 	block_address_translation bat;
811 
812 	bat.length = BAT_LENGTH_256MB;
813 	bat.kernel_valid = true;
814 	bat.memory_coherent = true;
815 	bat.protection = BAT_READ_WRITE;
816 
817 	set_ibat0(&bat);
818 	set_dbat0(&bat);
819 	isync();
820 	#endif
821 
822 	// initialize segment descriptors, but don't set the registers
823 	// until we're about to take over the page table - we're mapping
824 	// pages into our table using these values
825 
826 	for (int32 i = 0; i < 16; i++)
827 		sSegments[i].virtual_segment_id = i;
828 
829 	// find already allocated ranges of physical memory
830 	// and the virtual address space
831 
832 	page_table_entry_group *physicalTable = NULL;
833 	void *exceptionHandlers = (void *)-1;
834 	if (find_allocated_ranges(oldTable, table, &physicalTable,
835 			&exceptionHandlers) != B_OK) {
836 		dprintf("Error: find_allocated_ranges() failed\n");
837 		return B_ERROR;
838 	}
839 
840 #if 0
841 	block_address_translation bats[8];
842 	getibats(bats);
843 	for (int32 i = 0; i < 8; i++) {
844 		printf("page index %u, length %u, ppn %u\n", bats[i].page_index,
845 			bats[i].length, bats[i].physical_block_number);
846 	}
847 #endif
848 
849 	if (physicalTable == NULL) {
850 		dprintf("%s: Didn't find physical address of page table\n", __func__);
851 		if (!realMode)
852 			return B_ERROR;
853 
854 		// Pegasos work-around
855 		#if 0
856 		map_range((void *)realBase, (void *)realBase,
857 			realSize * 2, PAGE_READ_WRITE);
858 		map_range((void *)(total - realSize), (void *)(total - realSize),
859 			realSize, PAGE_READ_WRITE);
860 		map_range((void *)table, (void *)table, tableSize, PAGE_READ_WRITE);
861 		#endif
862 		insert_physical_allocated_range(realBase, realSize * 2);
863 		insert_virtual_allocated_range(realBase, realSize * 2);
864 		insert_physical_allocated_range(total - realSize, realSize);
865 		insert_virtual_allocated_range(total - realSize, realSize);
866 		insert_physical_allocated_range((addr_t)table, tableSize);
867 		insert_virtual_allocated_range((addr_t)table, tableSize);
868 
869 		// QEMU OpenHackware work-around
870 		insert_physical_allocated_range(0x05800000, 0x06000000 - 0x05800000);
871 		insert_virtual_allocated_range(0x05800000, 0x06000000 - 0x05800000);
872 
873 		physicalTable = table;
874 	}
875 
876 	if (exceptionHandlers == (void *)-1) {
877 		// TODO: create mapping for the exception handlers
878 		dprintf("Error: no mapping for the exception handlers!\n");
879 	}
880 
881 	// Set the Open Firmware memory callback. From now on the Open Firmware
882 	// will ask us for memory.
883 	arch_set_callback();
884 
885 	// set up new page table and turn on translation again
886 
887 	for (int32 i = 0; i < 16; i++) {
888 		ppc_set_segment_register((void *)(i * 0x10000000), sSegments[i]);
889 			// one segment describes 256 MB of memory
890 	}
891 
892 	ppc_set_page_table(physicalTable, tableSize);
893 	invalidate_tlb();
894 
895 	if (!realMode) {
896 		// clear BATs
897 		reset_ibats();
898 		reset_dbats();
899 		ppc_sync();
900 		isync();
901 	}
902 
903 	set_msr(MSR_MACHINE_CHECK_ENABLED | MSR_FP_AVAILABLE
904 		| MSR_INST_ADDRESS_TRANSLATION | MSR_DATA_ADDRESS_TRANSLATION);
905 
906 	// set kernel args
907 
908 	dprintf("virt_allocated: %" B_PRIu32 "\n",
909 		gKernelArgs.num_virtual_allocated_ranges);
910 	dprintf("phys_allocated: %" B_PRIu32 "\n",
911 		gKernelArgs.num_physical_allocated_ranges);
912 	dprintf("phys_memory: %" B_PRIu32 "\n",
913 		gKernelArgs.num_physical_memory_ranges);
914 
915 	gKernelArgs.arch_args.page_table.start = (addr_t)sPageTable;
916 	gKernelArgs.arch_args.page_table.size = tableSize;
917 
918 	gKernelArgs.arch_args.exception_handlers.start = (addr_t)exceptionHandlers;
919 	gKernelArgs.arch_args.exception_handlers.size = B_PAGE_SIZE;
920 
921 	return B_OK;
922 }
923 
924