xref: /haiku/src/system/boot/platform/openfirmware/arch/ppc/mmu.cpp (revision b671e9bbdbd10268a042b4f4cc4317ccd03d105e)
1 /*
2  * Copyright 2003-2009, Axel Dörfler, axeld@pinc-software.de.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include <platform_arch.h>
8 #include <boot/platform.h>
9 #include <boot/stage2.h>
10 #include <boot/stdio.h>
11 #include <platform/openfirmware/openfirmware.h>
12 #include <arch_cpu.h>
13 #include <arch_mmu.h>
14 #include <kernel.h>
15 
16 #include <OS.h>
17 
18 
19 // set protection to WIMGNPP: -----PP
20 // PP:	00 - no access
21 //		01 - read only
22 //		10 - read/write
23 //		11 - read only
24 #define PAGE_READ_ONLY	0x01
25 #define PAGE_READ_WRITE	0x02
26 
27 // NULL is actually a possible physical address...
28 //#define PHYSINVAL ((void *)-1)
29 #define PHYSINVAL NULL
30 
31 segment_descriptor sSegments[16];
32 page_table_entry_group *sPageTable;
33 uint32 sPageTableHashMask;
34 
35 
36 // begin and end of the boot loader
37 extern "C" uint8 __text_begin;
38 extern "C" uint8 _end;
39 
40 
41 static void
42 remove_range_index(addr_range *ranges, uint32 &numRanges, uint32 index)
43 {
44 	if (index + 1 == numRanges) {
45 		// remove last range
46 		numRanges--;
47 		return;
48 	}
49 
50 	memmove(&ranges[index], &ranges[index + 1],
51 		sizeof(addr_range) * (numRanges - 1 - index));
52 	numRanges--;
53 }
54 
55 
56 static status_t
57 insert_memory_range(addr_range *ranges, uint32 &numRanges, uint32 maxRanges,
58 	const void *_start, uint32 _size)
59 {
60 	addr_t start = ROUNDDOWN(addr_t(_start), B_PAGE_SIZE);
61 	addr_t end = ROUNDUP(addr_t(_start) + _size, B_PAGE_SIZE);
62 	addr_t size = end - start;
63 	if (size == 0)
64 		return B_OK;
65 
66 	for (uint32 i = 0; i < numRanges; i++) {
67 		addr_t rangeStart = ranges[i].start;
68 		addr_t rangeEnd = rangeStart + ranges[i].size;
69 
70 		if (end < rangeStart || start > rangeEnd) {
71 			// ranges don't intersect or touch each other
72 			continue;
73 		}
74 		if (start >= rangeStart && end <= rangeEnd) {
75 			// range is already completely covered
76 			return B_OK;
77 		}
78 
79 		if (start < rangeStart) {
80 			// prepend to the existing range
81 			ranges[i].start = start;
82 			ranges[i].size += rangeStart - start;
83 		}
84 		if (end > ranges[i].start + ranges[i].size) {
85 			// append to the existing range
86 			ranges[i].size = end - ranges[i].start;
87 		}
88 
89 		// join ranges if possible
90 
91 		for (uint32 j = 0; j < numRanges; j++) {
92 			if (i == j)
93 				continue;
94 
95 			rangeStart = ranges[i].start;
96 			rangeEnd = rangeStart + ranges[i].size;
97 			addr_t joinStart = ranges[j].start;
98 			addr_t joinEnd = joinStart + ranges[j].size;
99 
100 			if (rangeStart <= joinEnd && joinEnd <= rangeEnd) {
101 				// join range that used to be before the current one, or
102 				// the one that's now entirely included by the current one
103 				if (joinStart < rangeStart) {
104 					ranges[i].size += rangeStart - joinStart;
105 					ranges[i].start = joinStart;
106 				}
107 
108 				remove_range_index(ranges, numRanges, j--);
109 			} else if (joinStart <= rangeEnd && joinEnd > rangeEnd) {
110 				// join range that used to be after the current one
111 				ranges[i].size += joinEnd - rangeEnd;
112 
113 				remove_range_index(ranges, numRanges, j--);
114 			}
115 		}
116 		return B_OK;
117 	}
118 
119 	// no range matched, we need to create a new one
120 
121 	if (numRanges >= maxRanges)
122 		return B_ENTRY_NOT_FOUND;
123 
124 	ranges[numRanges].start = (addr_t)start;
125 	ranges[numRanges].size = size;
126 	numRanges++;
127 
128 	return B_OK;
129 }
130 
131 
132 static status_t
133 remove_memory_range(addr_range *ranges, uint32 &numRanges, uint32 maxRanges,
134 	const void *_start, uint32 _size)
135 {
136 	addr_t start = ROUNDDOWN(addr_t(_start), B_PAGE_SIZE);
137 	addr_t end = ROUNDUP(addr_t(_start) + _size, B_PAGE_SIZE);
138 
139 	for (uint32 i = 0; i < numRanges; i++) {
140 		addr_t rangeStart = ranges[i].start;
141 		addr_t rangeEnd = rangeStart + ranges[i].size;
142 
143 		if (start <= rangeStart) {
144 			if (end <= rangeStart) {
145 				// no intersection
146 			} else if (end >= rangeEnd) {
147 				// remove the complete range
148 				remove_range_index(ranges, numRanges, i);
149 				i--;
150 			} else {
151 				// remove the head of the range
152 				ranges[i].start = end;
153 				ranges[i].size = rangeEnd - end;
154 			}
155 		} else if (end >= rangeEnd) {
156 			if (start < rangeEnd) {
157 				// remove the tail
158 				ranges[i].size = start - rangeStart;
159 			}	// else: no intersection
160 		} else {
161 			// rangeStart < start < end < rangeEnd
162 			// The ugly case: We have to remove something from the middle of
163 			// the range. We keep the head of the range and insert its tail
164 			// as a new range.
165 			ranges[i].size = start - rangeStart;
166 			return insert_memory_range(ranges, numRanges, maxRanges,
167 				(void*)end, rangeEnd - end);
168 		}
169 	}
170 
171 	return B_OK;
172 }
173 
174 
175 static status_t
176 insert_physical_memory_range(void *start, uint32 size)
177 {
178 	return insert_memory_range(gKernelArgs.physical_memory_range,
179 		gKernelArgs.num_physical_memory_ranges, MAX_PHYSICAL_MEMORY_RANGE,
180 		start, size);
181 }
182 
183 
184 static status_t
185 insert_physical_allocated_range(void *start, uint32 size)
186 {
187 	return insert_memory_range(gKernelArgs.physical_allocated_range,
188 		gKernelArgs.num_physical_allocated_ranges, MAX_PHYSICAL_ALLOCATED_RANGE,
189 		start, size);
190 }
191 
192 
193 static status_t
194 insert_virtual_allocated_range(void *start, uint32 size)
195 {
196 	return insert_memory_range(gKernelArgs.virtual_allocated_range,
197 		gKernelArgs.num_virtual_allocated_ranges, MAX_VIRTUAL_ALLOCATED_RANGE,
198 		start, size);
199 }
200 
201 
202 static status_t
203 insert_virtual_range_to_keep(void *start, uint32 size)
204 {
205 	return insert_memory_range(gKernelArgs.arch_args.virtual_ranges_to_keep,
206 		gKernelArgs.arch_args.num_virtual_ranges_to_keep,
207 		MAX_VIRTUAL_RANGES_TO_KEEP, start, size);
208 }
209 
210 
211 static status_t
212 remove_virtual_range_to_keep(void *start, uint32 size)
213 {
214 	return remove_memory_range(gKernelArgs.arch_args.virtual_ranges_to_keep,
215 		gKernelArgs.arch_args.num_virtual_ranges_to_keep,
216 		MAX_VIRTUAL_RANGES_TO_KEEP, start, size);
217 }
218 
219 
220 static status_t
221 find_physical_memory_ranges(size_t &total)
222 {
223 	int memory, package;
224 	printf("checking for memory...\n");
225 	if (of_getprop(gChosen, "memory", &memory, sizeof(int)) == OF_FAILED)
226 		return B_ERROR;
227 	package = of_instance_to_package(memory);
228 
229 	total = 0;
230 
231 	struct of_region regions[64];
232 	int count;
233 	count = of_getprop(package, "reg", regions, sizeof(regions));
234 	if (count == OF_FAILED)
235 		count = of_getprop(memory, "reg", regions, sizeof(regions));
236 	if (count == OF_FAILED)
237 		return B_ERROR;
238 	count /= sizeof(of_region);
239 
240 	for (int32 i = 0; i < count; i++) {
241 		if (regions[i].size <= 0) {
242 			printf("%ld: empty region\n", i);
243 			continue;
244 		}
245 		printf("%ld: base = %p, size = %lu\n", i, regions[i].base,
246 			regions[i].size);
247 
248 		total += regions[i].size;
249 
250 		if (insert_physical_memory_range(regions[i].base, regions[i].size)
251 				!= B_OK) {
252 			printf("cannot map physical memory range (num ranges = %lu)!\n",
253 				gKernelArgs.num_physical_memory_ranges);
254 			return B_ERROR;
255 		}
256 	}
257 
258 	return B_OK;
259 }
260 
261 
262 static bool
263 is_in_range(addr_range *ranges, uint32 numRanges, void *address, size_t size)
264 {
265 	// Note: This function returns whether any single allocated range
266 	// completely contains the given range. If the given range crosses
267 	// allocated range boundaries, but is nevertheless covered completely, the
268 	// function returns false. But since the range management code joins
269 	// touching ranges, this should never happen.
270 	addr_t start = (addr_t)address;
271 	addr_t end = start + size;
272 
273 	for (uint32 i = 0; i < numRanges; i++) {
274 		addr_t rangeStart = ranges[i].start;
275 		addr_t rangeEnd = rangeStart + ranges[i].size;
276 
277 		if ((start >= rangeStart && start < rangeEnd)
278 			|| (end >= rangeStart && end < rangeEnd))
279 			return true;
280 	}
281 	return false;
282 }
283 
284 
285 static bool
286 intersects_ranges(addr_range *ranges, uint32 numRanges, void *address,
287 	size_t size)
288 {
289 	addr_t start = (addr_t)address;
290 	addr_t end = start + size;
291 
292 	for (uint32 i = 0; i < numRanges; i++) {
293 		addr_t rangeStart = ranges[i].start;
294 		addr_t rangeEnd = rangeStart + ranges[i].size;
295 
296 		if ((start >= rangeStart && start < rangeEnd)
297 			|| (rangeStart >= start && rangeStart < end)) {
298 			return true;
299 		}
300 	}
301 	return false;
302 }
303 
304 
305 static bool
306 is_virtual_allocated(void *address, size_t size)
307 {
308 	return intersects_ranges(gKernelArgs.virtual_allocated_range,
309 		gKernelArgs.num_virtual_allocated_ranges, address, size);
310 }
311 
312 
313 static bool
314 is_physical_allocated(void *address, size_t size)
315 {
316 	return intersects_ranges(gKernelArgs.physical_allocated_range,
317 		gKernelArgs.num_physical_allocated_ranges, address, size);
318 }
319 
320 
321 static bool
322 is_physical_memory(void *address, size_t size)
323 {
324 	return is_in_range(gKernelArgs.physical_memory_range,
325 		gKernelArgs.num_physical_memory_ranges, address, size);
326 }
327 
328 
329 static bool
330 is_physical_memory(void *address)
331 {
332 	return is_physical_memory(address, 0);
333 }
334 
335 
336 static void
337 fill_page_table_entry(page_table_entry *entry, uint32 virtualSegmentID,
338 	void *virtualAddress, void *physicalAddress, uint8 mode, bool secondaryHash)
339 {
340 	// lower 32 bit - set at once
341 	((uint32 *)entry)[1]
342 		= (((uint32)physicalAddress / B_PAGE_SIZE) << 12) | mode;
343 	/*entry->physical_page_number = (uint32)physicalAddress / B_PAGE_SIZE;
344 	entry->_reserved0 = 0;
345 	entry->referenced = false;
346 	entry->changed = false;
347 	entry->write_through = (mode >> 6) & 1;
348 	entry->caching_inhibited = (mode >> 5) & 1;
349 	entry->memory_coherent = (mode >> 4) & 1;
350 	entry->guarded = (mode >> 3) & 1;
351 	entry->_reserved1 = 0;
352 	entry->page_protection = mode & 0x3;*/
353 	eieio();
354 		// we need to make sure that the lower 32 bit were
355 		// already written when the entry becomes valid
356 
357 	// upper 32 bit
358 	entry->virtual_segment_id = virtualSegmentID;
359 	entry->secondary_hash = secondaryHash;
360 	entry->abbr_page_index = ((uint32)virtualAddress >> 22) & 0x3f;
361 	entry->valid = true;
362 }
363 
364 
365 static void
366 map_page(void *virtualAddress, void *physicalAddress, uint8 mode)
367 {
368 	uint32 virtualSegmentID
369 		= sSegments[addr_t(virtualAddress) >> 28].virtual_segment_id;
370 
371 	uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID,
372 		(uint32)virtualAddress);
373 	page_table_entry_group *group = &sPageTable[hash & sPageTableHashMask];
374 
375 	for (int32 i = 0; i < 8; i++) {
376 		// 8 entries in a group
377 		if (group->entry[i].valid)
378 			continue;
379 
380 		fill_page_table_entry(&group->entry[i], virtualSegmentID,
381 			virtualAddress, physicalAddress, mode, false);
382 		//printf("map: va = %p -> %p, mode = %d, hash = %lu\n", virtualAddress, physicalAddress, mode, hash);
383 		return;
384 	}
385 
386 	hash = page_table_entry::SecondaryHash(hash);
387 	group = &sPageTable[hash & sPageTableHashMask];
388 
389 	for (int32 i = 0; i < 8; i++) {
390 		if (group->entry[i].valid)
391 			continue;
392 
393 		fill_page_table_entry(&group->entry[i], virtualSegmentID,
394 			virtualAddress, physicalAddress, mode, true);
395 		//printf("map: va = %p -> %p, mode = %d, second hash = %lu\n", virtualAddress, physicalAddress, mode, hash);
396 		return;
397 	}
398 
399 	panic("out of page table entries! (you would think this could not happen "
400 		"in a boot loader...)\n");
401 }
402 
403 
404 static void
405 map_range(void *virtualAddress, void *physicalAddress, size_t size, uint8 mode)
406 {
407 	for (uint32 offset = 0; offset < size; offset += B_PAGE_SIZE) {
408 		map_page((void *)(uint32(virtualAddress) + offset),
409 			(void *)(uint32(physicalAddress) + offset), mode);
410 	}
411 }
412 
413 
414 static status_t
415 find_allocated_ranges(void *oldPageTable, void *pageTable,
416 	page_table_entry_group **_physicalPageTable, void **_exceptionHandlers)
417 {
418 	// we have to preserve the OpenFirmware established mappings
419 	// if we want to continue to use its service after we've
420 	// taken over (we will probably need less translations once
421 	// we have proper driver support for the target hardware).
422 	int mmu;
423 	if (of_getprop(gChosen, "mmu", &mmu, sizeof(int)) == OF_FAILED) {
424 		puts("no OF mmu");
425 		return B_ERROR;
426 	}
427 	mmu = of_instance_to_package(mmu);
428 
429 	struct translation_map {
430 		void	*virtual_address;
431 		int		length;
432 		void	*physical_address;
433 		int		mode;
434 	} translations[64];
435 
436 	int length = of_getprop(mmu, "translations", &translations,
437 		sizeof(translations));
438 	if (length == OF_FAILED) {
439 		puts("no OF translations");
440 		return B_ERROR;
441 	}
442 	length = length / sizeof(struct translation_map);
443 	uint32 total = 0;
444 	printf("found %d translations\n", length);
445 
446 	for (int i = 0; i < length; i++) {
447 		struct translation_map *map = &translations[i];
448 		bool keepRange = true;
449 		//printf("%i: map: %p, length %d -> physical: %p, mode %d\n", i, map->virtual_address, map->length, map->physical_address, map->mode);
450 
451 		// insert range in physical allocated, if it points to physical memory
452 
453 		if (is_physical_memory(map->physical_address)
454 			&& insert_physical_allocated_range(map->physical_address,
455 					map->length) != B_OK) {
456 			printf("cannot map physical allocated range (num ranges = %lu)!\n",
457 				gKernelArgs.num_physical_allocated_ranges);
458 			return B_ERROR;
459 		}
460 
461 		if (map->virtual_address == pageTable) {
462 			puts("found page table!");
463 			*_physicalPageTable
464 				= (page_table_entry_group *)map->physical_address;
465 			keepRange = false;	// we keep it explicitely anyway
466 		}
467 		if ((addr_t)map->physical_address <= 0x100
468 			&& (addr_t)map->physical_address + map->length >= 0x1000) {
469 			puts("found exception handlers!");
470 			*_exceptionHandlers = map->virtual_address;
471 			keepRange = false;	// we keep it explicitely anyway
472 		}
473 		if (map->virtual_address == oldPageTable)
474 			keepRange = false;
475 
476 		// insert range in virtual allocated
477 
478 		if (insert_virtual_allocated_range(map->virtual_address,
479 				map->length) != B_OK) {
480 			printf("cannot map virtual allocated range (num ranges = %lu)!\n",
481 				gKernelArgs.num_virtual_allocated_ranges);
482 		}
483 
484 		// map range into the page table
485 
486 		map_range(map->virtual_address, map->physical_address, map->length,
487 			map->mode);
488 
489 		// insert range in virtual ranges to keep
490 
491 		if (keepRange) {
492 			if (insert_virtual_range_to_keep(map->virtual_address,
493 					map->length) != B_OK) {
494 				printf("cannot map virtual range to keep (num ranges = %lu)!\n",
495 					gKernelArgs.num_virtual_allocated_ranges);
496 			}
497 		}
498 
499 		total += map->length;
500 	}
501 	//printf("total mapped: %lu\n", total);
502 
503 	// remove the boot loader code from the virtual ranges to keep in the
504 	// kernel
505 	if (remove_virtual_range_to_keep(&__text_begin, &_end - &__text_begin)
506 			!= B_OK) {
507 		printf("find_allocated_ranges(): Failed to remove boot loader range "
508 			"from virtual ranges to keep.\n");
509 	}
510 
511 	return B_OK;
512 }
513 
514 
515 /*!	Computes the recommended minimal page table size as
516 	described in table 7-22 of the PowerPC "Programming
517 	Environment for 32-Bit Microprocessors".
518 	The page table size ranges from 64 kB (for 8 MB RAM)
519 	to 32 MB (for 4 GB RAM).
520 */
521 static size_t
522 suggested_page_table_size(size_t total)
523 {
524 	uint32 max = 23;
525 		// 2^23 == 8 MB
526 
527 	while (max < 32) {
528 		if (total <= (1UL << max))
529 			break;
530 
531 		max++;
532 	}
533 
534 	return 1UL << (max - 7);
535 		// 2^(23 - 7) == 64 kB
536 }
537 
538 
539 static void *
540 find_physical_memory_range(size_t size)
541 {
542 	for (uint32 i = 0; i < gKernelArgs.num_physical_memory_ranges; i++) {
543 		if (gKernelArgs.physical_memory_range[i].size > size)
544 			return (void *)gKernelArgs.physical_memory_range[i].start;
545 	}
546 	return PHYSINVAL;
547 }
548 
549 
550 static void *
551 find_free_physical_range(size_t size)
552 {
553 	// just do a simple linear search at the end of the allocated
554 	// ranges (dumb memory allocation)
555 	if (gKernelArgs.num_physical_allocated_ranges == 0) {
556 		if (gKernelArgs.num_physical_memory_ranges == 0)
557 			return PHYSINVAL;
558 
559 		return find_physical_memory_range(size);
560 	}
561 
562 	for (uint32 i = 0; i < gKernelArgs.num_physical_allocated_ranges; i++) {
563 		void *address = (void *)(gKernelArgs.physical_allocated_range[i].start
564 			+ gKernelArgs.physical_allocated_range[i].size);
565 		if (!is_physical_allocated(address, size)
566 			&& is_physical_memory(address, size))
567 			return address;
568 	}
569 	return PHYSINVAL;
570 }
571 
572 
573 static void *
574 find_free_virtual_range(void *base, size_t size)
575 {
576 	if (base && !is_virtual_allocated(base, size))
577 		return base;
578 
579 	void *firstFound = NULL;
580 	void *firstBaseFound = NULL;
581 	for (uint32 i = 0; i < gKernelArgs.num_virtual_allocated_ranges; i++) {
582 		void *address = (void *)(gKernelArgs.virtual_allocated_range[i].start
583 			+ gKernelArgs.virtual_allocated_range[i].size);
584 		if (!is_virtual_allocated(address, size)) {
585 			if (!base)
586 				return address;
587 
588 			if (firstFound == NULL)
589 				firstFound = address;
590 			if (address >= base
591 				&& (firstBaseFound == NULL || address < firstBaseFound)) {
592 				firstBaseFound = address;
593 			}
594 		}
595 	}
596 	return (firstBaseFound ? firstBaseFound : firstFound);
597 }
598 
599 
600 extern "C" void *
601 arch_mmu_allocate(void *_virtualAddress, size_t size, uint8 _protection,
602 	bool exactAddress)
603 {
604 	// we only know page sizes
605 	size = ROUNDUP(size, B_PAGE_SIZE);
606 
607 	uint8 protection = 0;
608 	if (_protection & B_WRITE_AREA)
609 		protection = PAGE_READ_WRITE;
610 	else
611 		protection = PAGE_READ_ONLY;
612 
613 	// If no address is given, use the KERNEL_BASE as base address, since
614 	// that avoids trouble in the kernel, when we decide to keep the region.
615 	void *virtualAddress = _virtualAddress;
616 	if (!virtualAddress)
617 		virtualAddress = (void*)KERNEL_BASE;
618 
619 	// find free address large enough to hold "size"
620 	virtualAddress = find_free_virtual_range(virtualAddress, size);
621 	if (virtualAddress == NULL)
622 		return NULL;
623 
624 	// fail if the exact address was requested, but is not free
625 	if (exactAddress && _virtualAddress && virtualAddress != _virtualAddress) {
626 		dprintf("arch_mmu_allocate(): exact address requested, but virtual "
627 			"range (base: %p, size: %lu) is not free.\n",
628 			_virtualAddress, size);
629 		return NULL;
630 	}
631 
632 	// we have a free virtual range for the allocation, now
633 	// have a look for free physical memory as well (we assume
634 	// that a) there is enough memory, and b) failing is fatal
635 	// so that we don't have to optimize for these cases :)
636 
637 	void *physicalAddress = find_free_physical_range(size);
638 	if (physicalAddress == PHYSINVAL) {
639 		dprintf("arch_mmu_allocate(base: %p, size: %lu) no free physical "
640 			"address\n", virtualAddress, size);
641 		return NULL;
642 	}
643 
644 	// everything went fine, so lets mark the space as used.
645 
646 	printf("mmu_alloc: va %p, pa %p, size %u\n", virtualAddress,
647 		physicalAddress, size);
648 	insert_virtual_allocated_range(virtualAddress, size);
649 	insert_physical_allocated_range(physicalAddress, size);
650 
651 	map_range(virtualAddress, physicalAddress, size, protection);
652 
653 	return virtualAddress;
654 }
655 
656 
657 extern "C" status_t
658 arch_mmu_free(void *address, size_t size)
659 {
660 	// TODO: implement freeing a region!
661 	return B_OK;
662 }
663 
664 
665 static inline void
666 invalidate_tlb(void)
667 {
668 	//asm volatile("tlbia");
669 		// "tlbia" is obviously not available on every CPU...
670 
671 	// Note: this flushes the whole 4 GB address space - it
672 	//		would probably be a good idea to do less here
673 
674 	addr_t address = 0;
675 	for (uint32 i = 0; i < 0x100000; i++) {
676 		asm volatile("tlbie %0" : : "r" (address));
677 		address += B_PAGE_SIZE;
678 	}
679 	tlbsync();
680 }
681 
682 
683 //	#pragma mark - OpenFirmware callbacks and public API
684 
685 
686 static int
687 map_callback(struct of_arguments *args)
688 {
689 	void *physicalAddress = (void *)args->Argument(0);
690 	void *virtualAddress = (void *)args->Argument(1);
691 	int length = args->Argument(2);
692 	int mode = args->Argument(3);
693 	int &error = args->ReturnValue(0);
694 
695 	// insert range in physical allocated if needed
696 
697 	if (is_physical_memory(physicalAddress)
698 		&& insert_physical_allocated_range(physicalAddress, length) != B_OK) {
699 		error = -1;
700 		return OF_FAILED;
701 	}
702 
703 	// insert range in virtual allocated
704 
705 	if (insert_virtual_allocated_range(virtualAddress, length) != B_OK) {
706 		error = -2;
707 		return OF_FAILED;
708 	}
709 
710 	// map range into the page table
711 
712 	map_range(virtualAddress, physicalAddress, length, mode);
713 
714 	return B_OK;
715 }
716 
717 
718 static int
719 unmap_callback(struct of_arguments *args)
720 {
721 /*	void *address = (void *)args->Argument(0);
722 	int length = args->Argument(1);
723 	int &error = args->ReturnValue(0);
724 */
725 	// TODO: to be implemented
726 
727 	return OF_FAILED;
728 }
729 
730 
731 static int
732 translate_callback(struct of_arguments *args)
733 {
734 	addr_t virtualAddress = (addr_t)args->Argument(0);
735 	int &error = args->ReturnValue(0);
736 	int &physicalAddress = args->ReturnValue(1);
737 	int &mode = args->ReturnValue(2);
738 
739 	// Find page table entry for this address
740 
741 	uint32 virtualSegmentID
742 		= sSegments[addr_t(virtualAddress) >> 28].virtual_segment_id;
743 
744 	uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID,
745 		(uint32)virtualAddress);
746 	page_table_entry_group *group = &sPageTable[hash & sPageTableHashMask];
747 	page_table_entry *entry = NULL;
748 
749 	for (int32 i = 0; i < 8; i++) {
750 		entry = &group->entry[i];
751 
752 		if (entry->valid
753 			&& entry->virtual_segment_id == virtualSegmentID
754 			&& entry->secondary_hash == false
755 			&& entry->abbr_page_index == ((virtualAddress >> 22) & 0x3f))
756 			goto success;
757 	}
758 
759 	hash = page_table_entry::SecondaryHash(hash);
760 	group = &sPageTable[hash & sPageTableHashMask];
761 
762 	for (int32 i = 0; i < 8; i++) {
763 		entry = &group->entry[i];
764 
765 		if (entry->valid
766 			&& entry->virtual_segment_id == virtualSegmentID
767 			&& entry->secondary_hash == true
768 			&& entry->abbr_page_index == ((virtualAddress >> 22) & 0x3f))
769 			goto success;
770 	}
771 
772 	// could not find the translation
773 	error = B_ENTRY_NOT_FOUND;
774 	return OF_FAILED;
775 
776 success:
777 	// we found the entry in question
778 	physicalAddress = (int)(entry->physical_page_number * B_PAGE_SIZE);
779 	mode = (entry->write_through << 6)		// WIMGxPP
780 		| (entry->caching_inhibited << 5)
781 		| (entry->memory_coherent << 4)
782 		| (entry->guarded << 3)
783 		| entry->page_protection;
784 	error = B_OK;
785 
786 	return B_OK;
787 }
788 
789 
790 static int
791 alloc_real_mem_callback(struct of_arguments *args)
792 {
793 /*	addr_t minAddress = (addr_t)args->Argument(0);
794 	addr_t maxAddress = (addr_t)args->Argument(1);
795 	int length = args->Argument(2);
796 	int mode = args->Argument(3);
797 	int &error = args->ReturnValue(0);
798 	int &physicalAddress = args->ReturnValue(1);
799 */
800 	// ToDo: to be implemented
801 
802 	return OF_FAILED;
803 }
804 
805 
806 /** Dispatches the callback to the responsible function */
807 
808 static int
809 callback(struct of_arguments *args)
810 {
811 	const char *name = args->name;
812 printf("CALLBACK: %s\n", name);
813 
814 	if (!strcmp(name, "map"))
815 		return map_callback(args);
816 	else if (!strcmp(name, "unmap"))
817 		return unmap_callback(args);
818 	else if (!strcmp(name, "translate"))
819 		return translate_callback(args);
820 	else if (!strcmp(name, "alloc-real-mem"))
821 		return alloc_real_mem_callback(args);
822 
823 	return OF_FAILED;
824 }
825 
826 
827 extern "C" status_t
828 arch_set_callback(void)
829 {
830 	// set OpenFirmware callbacks - it will ask us for memory after that
831 	// instead of maintaining it itself
832 
833 	void *oldCallback = NULL;
834 	if (of_call_client_function("set-callback", 1, 1, &callback, &oldCallback)
835 			== OF_FAILED) {
836 		puts("set-callback failed!");
837 		return B_ERROR;
838 	}
839 	//printf("old callback = %p\n", old);
840 
841 	return B_OK;
842 }
843 
844 
845 extern "C" status_t
846 arch_mmu_init(void)
847 {
848 	// get map of physical memory (fill in kernel_args structure)
849 
850 	size_t total;
851 	if (find_physical_memory_ranges(total) != B_OK) {
852 		puts("could not find physical memory ranges!");
853 		return B_ERROR;
854 	}
855 	printf("total physical memory = %u MB\n", total / (1024*1024));
856 
857 	// get OpenFirmware's current page table
858 
859 	page_table_entry_group *oldTable;
860 	page_table_entry_group *table;
861 	size_t tableSize;
862 	ppc_get_page_table(&table, &tableSize);
863 
864 	oldTable = table;
865 
866 	bool realMode = false;
867 	// TODO: read these values out of the OF settings
868 	addr_t realBase = 0;
869 	addr_t realSize = 0x400000;
870 
871 	// can we just keep the page table?
872 	size_t suggestedTableSize = suggested_page_table_size(total);
873 	printf("suggested page table size = %u\n", suggestedTableSize);
874 	if (tableSize < suggestedTableSize) {
875 		// nah, we need a new one!
876 		printf("need new page table, size = %u!\n", suggestedTableSize);
877 		table = (page_table_entry_group *)of_claim(NULL, suggestedTableSize,
878 			suggestedTableSize);
879 			// KERNEL_BASE would be better as virtual address, but
880 			// at least with Apple's OpenFirmware, it makes no
881 			// difference - we will have to remap it later
882 		if (table == (void *)OF_FAILED) {
883 			panic("Could not allocate new page table (size = %ld)!!\n",
884 				suggestedTableSize);
885 			return B_NO_MEMORY;
886 		}
887 		if (table == NULL) {
888 			// work-around for the broken Pegasos OpenFirmware
889 			puts("broken OpenFirmware detected (claim doesn't work).");
890 			realMode = true;
891 
892 			addr_t tableBase = 0;
893 			for (int32 i = 0; tableBase < realBase + realSize * 3; i++) {
894 				tableBase = suggestedTableSize * i;
895 			}
896 
897 			table = (page_table_entry_group *)tableBase;
898 		}
899 
900 		printf("new table at: %p\n", table);
901 		sPageTable = table;
902 		tableSize = suggestedTableSize;
903 	} else {
904 		// ToDo: we could check if the page table is much too large
905 		//	and create a smaller one in this case (in order to save
906 		//	memory).
907 		sPageTable = table;
908 	}
909 
910 	sPageTableHashMask = tableSize / sizeof(page_table_entry_group) - 1;
911 	if (sPageTable != oldTable)
912 		memset(sPageTable, 0, tableSize);
913 
914 	// turn off address translation via the page table/segment mechanism,
915 	// identity map the first 256 MB (where our code/data reside)
916 
917 	printf("MSR: %p\n", (void *)get_msr());
918 
919 #if 0
920 	block_address_translation bat;
921 
922 	bat.length = BAT_LENGTH_256MB;
923 	bat.kernel_valid = true;
924 	bat.memory_coherent = true;
925 	bat.protection = BAT_READ_WRITE;
926 
927 	set_ibat0(&bat);
928 	set_dbat0(&bat);
929 	isync();
930 #endif
931 
932 	// initialize segment descriptors, but don't set the registers
933 	// until we're about to take over the page table - we're mapping
934 	// pages into our table using these values
935 
936 	for (int32 i = 0; i < 16; i++)
937 		sSegments[i].virtual_segment_id = i;
938 
939 	// find already allocated ranges of physical memory
940 	// and the virtual address space
941 
942 	page_table_entry_group *physicalTable = NULL;
943 	void *exceptionHandlers = (void *)-1;
944 	if (find_allocated_ranges(oldTable, table, &physicalTable,
945 			&exceptionHandlers) != B_OK) {
946 		puts("find_allocated_ranges() failed!");
947 		//return B_ERROR;
948 	}
949 
950 #if 0
951 	block_address_translation bats[8];
952 	getibats(bats);
953 	for (int32 i = 0; i < 8; i++) {
954 		printf("page index %u, length %u, ppn %u\n", bats[i].page_index,
955 			bats[i].length, bats[i].physical_block_number);
956 	}
957 #endif
958 
959 	if (physicalTable == NULL) {
960 		puts("arch_mmu_init(): Didn't find physical address of page table!");
961 		if (!realMode)
962 			return B_ERROR;
963 
964 		// Pegasos work-around
965 		//map_range((void *)realBase, (void *)realBase, realSize * 2, PAGE_READ_WRITE);
966 		//map_range((void *)(total - realSize), (void *)(total - realSize), realSize, PAGE_READ_WRITE);
967 		//map_range((void *)table, (void *)table, tableSize, PAGE_READ_WRITE);
968 		insert_physical_allocated_range((void *)realBase, realSize * 2);
969 		insert_virtual_allocated_range((void *)realBase, realSize * 2);
970 		insert_physical_allocated_range((void *)(total - realSize), realSize);
971 		insert_virtual_allocated_range((void *)(total - realSize), realSize);
972 		insert_physical_allocated_range((void *)table, tableSize);
973 		insert_virtual_allocated_range((void *)table, tableSize);
974 
975 		// QEMU OpenHackware work-around
976 		insert_physical_allocated_range((void *)0x05800000, 0x06000000 - 0x05800000);
977 		insert_virtual_allocated_range((void *)0x05800000, 0x06000000 - 0x05800000);
978 
979 		physicalTable = table;
980 	}
981 
982 	if (exceptionHandlers == (void *)-1) {
983 		// TODO: create mapping for the exception handlers
984 		puts("no mapping for the exception handlers!");
985 	}
986 
987 	// Set the Open Firmware memory callback. From now on the Open Firmware
988 	// will ask us for memory.
989 	arch_set_callback();
990 
991 	// set up new page table and turn on translation again
992 
993 	for (int32 i = 0; i < 16; i++) {
994 		ppc_set_segment_register((void *)(i * 0x10000000), sSegments[i]);
995 			// one segment describes 256 MB of memory
996 	}
997 
998 	ppc_set_page_table(physicalTable, tableSize);
999 	invalidate_tlb();
1000 
1001 	if (!realMode) {
1002 		// clear BATs
1003 		reset_ibats();
1004 		reset_dbats();
1005 		ppc_sync();
1006 		isync();
1007 	}
1008 
1009 	set_msr(MSR_MACHINE_CHECK_ENABLED | MSR_FP_AVAILABLE
1010 		| MSR_INST_ADDRESS_TRANSLATION | MSR_DATA_ADDRESS_TRANSLATION);
1011 
1012 	// set kernel args
1013 
1014 	printf("virt_allocated: %lu\n", gKernelArgs.num_virtual_allocated_ranges);
1015 	printf("phys_allocated: %lu\n", gKernelArgs.num_physical_allocated_ranges);
1016 	printf("phys_memory: %lu\n", gKernelArgs.num_physical_memory_ranges);
1017 
1018 	gKernelArgs.arch_args.page_table.start = (addr_t)sPageTable;
1019 	gKernelArgs.arch_args.page_table.size = tableSize;
1020 
1021 	gKernelArgs.arch_args.exception_handlers.start = (addr_t)exceptionHandlers;
1022 	gKernelArgs.arch_args.exception_handlers.size = B_PAGE_SIZE;
1023 
1024 	return B_OK;
1025 }
1026 
1027