xref: /haiku/src/system/boot/platform/openfirmware/arch/ppc/mmu.cpp (revision e7c8829c5d8e5d34a2a1e111f1c06aceff256013)
1 /*
2  * Copyright 2003-2006, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include <platform_arch.h>
8 #include <boot/platform.h>
9 #include <boot/stage2.h>
10 #include <boot/stdio.h>
11 #include <platform/openfirmware/openfirmware.h>
12 #include <arch_cpu.h>
13 #include <arch_mmu.h>
14 #include <kernel.h>
15 
16 #include <OS.h>
17 
18 
19 // set protection to WIMGNPP: -----PP
20 // PP:	00 - no access
21 //		01 - read only
22 //		10 - read/write
23 //		11 - read only
24 #define PAGE_READ_ONLY	0x01
25 #define PAGE_READ_WRITE	0x02
26 
27 // NULL is actually a possible physical address...
28 //#define PHYSINVAL ((void *)-1)
29 #define PHYSINVAL NULL
30 
31 segment_descriptor sSegments[16];
32 page_table_entry_group *sPageTable;
33 uint32 sPageTableHashMask;
34 
35 
36 // begin and end of the boot loader
37 extern "C" uint8 __text_begin;
38 extern "C" uint8 _end;
39 
40 
41 static void
42 remove_range_index(addr_range *ranges, uint32 &numRanges, uint32 index)
43 {
44 	if (index + 1 == numRanges) {
45 		// remove last range
46 		numRanges--;
47 		return;
48 	}
49 
50 	memmove(&ranges[index], &ranges[index + 1], sizeof(addr_range) * (numRanges - 1 - index));
51 	numRanges--;
52 }
53 
54 
55 static status_t
56 insert_memory_range(addr_range *ranges, uint32 &numRanges, uint32 maxRanges,
57 	const void *_start, uint32 _size)
58 {
59 	addr_t start = ROUNDOWN(addr_t(_start), B_PAGE_SIZE);
60 	addr_t end = ROUNDUP(addr_t(_start) + _size, B_PAGE_SIZE);
61 	addr_t size = end - start;
62 	if (size == 0)
63 		return B_OK;
64 
65 	for (uint32 i = 0; i < numRanges; i++) {
66 		addr_t rangeStart = ranges[i].start;
67 		addr_t rangeEnd = rangeStart + ranges[i].size;
68 
69 		if (end < rangeStart || start > rangeEnd) {
70 			// ranges don't intersect or touch each other
71 			continue;
72 		}
73 		if (start >= rangeStart && end <= rangeEnd) {
74 			// range is already completely covered
75 			return B_OK;
76 		}
77 
78 		if (start < rangeStart) {
79 			// prepend to the existing range
80 			ranges[i].start = start;
81 			ranges[i].size += rangeStart - start;
82 		}
83 		if (end > ranges[i].start + ranges[i].size) {
84 			// append to the existing range
85 			ranges[i].size = end - ranges[i].start;
86 		}
87 
88 		// join ranges if possible
89 
90 		for (uint32 j = 0; j < numRanges; j++) {
91 			if (i == j)
92 				continue;
93 
94 			rangeStart = ranges[i].start;
95 			rangeEnd = rangeStart + ranges[i].size;
96 			addr_t joinStart = ranges[j].start;
97 			addr_t joinEnd = joinStart + ranges[j].size;
98 
99 			if (rangeStart <= joinEnd && joinEnd <= rangeEnd) {
100 				// join range that used to be before the current one, or
101 				// the one that's now entirely included by the current one
102 				if (joinStart < rangeStart) {
103 					ranges[i].size += rangeStart - joinStart;
104 					ranges[i].start = joinStart;
105 				}
106 
107 				remove_range_index(ranges, numRanges, j--);
108 			} else if (joinStart <= rangeEnd && joinEnd > rangeEnd) {
109 				// join range that used to be after the current one
110 				ranges[i].size += joinEnd - rangeEnd;
111 
112 				remove_range_index(ranges, numRanges, j--);
113 			}
114 		}
115 		return B_OK;
116 	}
117 
118 	// no range matched, we need to create a new one
119 
120 	if (numRanges >= maxRanges)
121 		return B_ENTRY_NOT_FOUND;
122 
123 	ranges[numRanges].start = (addr_t)start;
124 	ranges[numRanges].size = size;
125 	numRanges++;
126 
127 	return B_OK;
128 }
129 
130 
131 static status_t
132 remove_memory_range(addr_range *ranges, uint32 &numRanges, uint32 maxRanges,
133 	const void *_start, uint32 _size)
134 {
135 	addr_t start = ROUNDOWN(addr_t(_start), B_PAGE_SIZE);
136 	addr_t end = ROUNDUP(addr_t(_start) + _size, B_PAGE_SIZE);
137 
138 	for (uint32 i = 0; i < numRanges; i++) {
139 		addr_t rangeStart = ranges[i].start;
140 		addr_t rangeEnd = rangeStart + ranges[i].size;
141 
142 		if (start <= rangeStart) {
143 			if (end <= rangeStart) {
144 				// no intersection
145 			} else if (end >= rangeEnd) {
146 				// remove the complete range
147 				remove_range_index(ranges, numRanges, i);
148 				i--;
149 			} else {
150 				// remove the head of the range
151 				ranges[i].start = end;
152 				ranges[i].size = rangeEnd - end;
153 			}
154 		} else if (end >= rangeEnd) {
155 			if (start < rangeEnd) {
156 				// remove the tail
157 				ranges[i].size = start - rangeStart;
158 			}	// else: no intersection
159 		} else {
160 			// rangeStart < start < end < rangeEnd
161 			// The ugly case: We have to remove something from the middle of
162 			// the range. We keep the head of the range and insert its tail
163 			// as a new range.
164 			ranges[i].size = start - rangeStart;
165 			return insert_memory_range(ranges, numRanges, maxRanges,
166 				(void*)end, rangeEnd - end);
167 		}
168 	}
169 
170 	return B_OK;
171 }
172 
173 
174 static status_t
175 insert_physical_memory_range(void *start, uint32 size)
176 {
177 	return insert_memory_range(gKernelArgs.physical_memory_range,
178 				gKernelArgs.num_physical_memory_ranges, MAX_PHYSICAL_MEMORY_RANGE,
179 				start, size);
180 }
181 
182 
183 static status_t
184 insert_physical_allocated_range(void *start, uint32 size)
185 {
186 	return insert_memory_range(gKernelArgs.physical_allocated_range,
187 				gKernelArgs.num_physical_allocated_ranges, MAX_PHYSICAL_ALLOCATED_RANGE,
188 				start, size);
189 }
190 
191 
192 static status_t
193 insert_virtual_allocated_range(void *start, uint32 size)
194 {
195 	return insert_memory_range(gKernelArgs.virtual_allocated_range,
196 				gKernelArgs.num_virtual_allocated_ranges, MAX_VIRTUAL_ALLOCATED_RANGE,
197 				start, size);
198 }
199 
200 
201 static status_t
202 insert_virtual_range_to_keep(void *start, uint32 size)
203 {
204 	return insert_memory_range(gKernelArgs.arch_args.virtual_ranges_to_keep,
205 		gKernelArgs.arch_args.num_virtual_ranges_to_keep,
206 		MAX_VIRTUAL_RANGES_TO_KEEP, start, size);
207 }
208 
209 
210 static status_t
211 remove_virtual_range_to_keep(void *start, uint32 size)
212 {
213 	return remove_memory_range(gKernelArgs.arch_args.virtual_ranges_to_keep,
214 		gKernelArgs.arch_args.num_virtual_ranges_to_keep,
215 		MAX_VIRTUAL_RANGES_TO_KEEP, start, size);
216 }
217 
218 
219 static status_t
220 find_physical_memory_ranges(size_t &total)
221 {
222 	int memory, package;
223 	printf("checking for memory...\n");
224 	if (of_getprop(gChosen, "memory", &memory, sizeof(int)) == OF_FAILED)
225 		return B_ERROR;
226 	package = of_instance_to_package(memory);
227 
228 	total = 0;
229 
230 	struct of_region regions[64];
231 	int count;
232 	count = of_getprop(package, "reg", regions, sizeof(regions));
233 	if (count == OF_FAILED)
234 		count = of_getprop(memory, "reg", regions, sizeof(regions));
235 	if (count == OF_FAILED)
236 		return B_ERROR;
237 	count /= sizeof(of_region);
238 
239 	for (int32 i = 0; i < count; i++) {
240 		if (regions[i].size <= 0) {
241 			printf("%ld: empty region\n", i);
242 			continue;
243 		}
244 		printf("%ld: base = %p, size = %lu\n", i, regions[i].base, regions[i].size);
245 
246 		total += regions[i].size;
247 
248 		if (insert_physical_memory_range(regions[i].base, regions[i].size) < B_OK) {
249 			printf("cannot map physical memory range (num ranges = %lu)!\n", gKernelArgs.num_physical_memory_ranges);
250 			return B_ERROR;
251 		}
252 	}
253 
254 	return B_OK;
255 }
256 
257 
258 static bool
259 is_in_range(addr_range *ranges, uint32 numRanges, void *address, size_t size)
260 {
261 	// Note: This function returns whether any single allocated range
262 	// completely contains the given range. If the given range crosses
263 	// allocated range boundaries, but is nevertheless covered completely, the
264 	// function returns false. But since the range management code joins
265 	// touching ranges, this should never happen.
266 	addr_t start = (addr_t)address;
267 	addr_t end = start + size;
268 
269 	for (uint32 i = 0; i < numRanges; i++) {
270 		addr_t rangeStart = ranges[i].start;
271 		addr_t rangeEnd = rangeStart + ranges[i].size;
272 
273 		if ((start >= rangeStart && start < rangeEnd)
274 			|| (end >= rangeStart && end < rangeEnd))
275 			return true;
276 	}
277 	return false;
278 }
279 
280 
281 static bool
282 intersects_ranges(addr_range *ranges, uint32 numRanges, void *address,
283 	size_t size)
284 {
285 	addr_t start = (addr_t)address;
286 	addr_t end = start + size;
287 
288 	for (uint32 i = 0; i < numRanges; i++) {
289 		addr_t rangeStart = ranges[i].start;
290 		addr_t rangeEnd = rangeStart + ranges[i].size;
291 
292 		if ((start >= rangeStart && start < rangeEnd)
293 			|| (rangeStart >= start && rangeStart < end)) {
294 			return true;
295 		}
296 	}
297 	return false;
298 }
299 
300 
301 static bool
302 is_virtual_allocated(void *address, size_t size)
303 {
304 	return intersects_ranges(gKernelArgs.virtual_allocated_range,
305 				gKernelArgs.num_virtual_allocated_ranges,
306 				address, size);
307 }
308 
309 
310 static bool
311 is_physical_allocated(void *address, size_t size)
312 {
313 	return intersects_ranges(gKernelArgs.physical_allocated_range,
314 				gKernelArgs.num_physical_allocated_ranges,
315 				address, size);
316 }
317 
318 
319 static bool
320 is_physical_memory(void *address, size_t size)
321 {
322 	return is_in_range(gKernelArgs.physical_memory_range,
323 				gKernelArgs.num_physical_memory_ranges,
324 				address, size);
325 }
326 
327 
328 static bool
329 is_physical_memory(void *address)
330 {
331 	return is_physical_memory(address, 0);
332 }
333 
334 
335 static void
336 fill_page_table_entry(page_table_entry *entry, uint32 virtualSegmentID, void *virtualAddress, void *physicalAddress, uint8 mode, bool secondaryHash)
337 {
338 	// lower 32 bit - set at once
339 	((uint32 *)entry)[1] = (((uint32)physicalAddress / B_PAGE_SIZE) << 12) | mode;
340 	/*entry->physical_page_number = (uint32)physicalAddress / B_PAGE_SIZE;
341 	entry->_reserved0 = 0;
342 	entry->referenced = false;
343 	entry->changed = false;
344 	entry->write_through = (mode >> 6) & 1;
345 	entry->caching_inhibited = (mode >> 5) & 1;
346 	entry->memory_coherent = (mode >> 4) & 1;
347 	entry->guarded = (mode >> 3) & 1;
348 	entry->_reserved1 = 0;
349 	entry->page_protection = mode & 0x3;*/
350 	eieio();
351 		// we need to make sure that the lower 32 bit were
352 		// already written when the entry becomes valid
353 
354 	// upper 32 bit
355 	entry->virtual_segment_id = virtualSegmentID;
356 	entry->secondary_hash = secondaryHash;
357 	entry->abbr_page_index = ((uint32)virtualAddress >> 22) & 0x3f;
358 	entry->valid = true;
359 }
360 
361 
362 static void
363 map_page(void *virtualAddress, void *physicalAddress, uint8 mode)
364 {
365 	uint32 virtualSegmentID = sSegments[addr_t(virtualAddress) >> 28].virtual_segment_id;
366 
367 	uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID, (uint32)virtualAddress);
368 	page_table_entry_group *group = &sPageTable[hash & sPageTableHashMask];
369 
370 	for (int32 i = 0; i < 8; i++) {
371 		// 8 entries in a group
372 		if (group->entry[i].valid)
373 			continue;
374 
375 		fill_page_table_entry(&group->entry[i], virtualSegmentID, virtualAddress, physicalAddress, mode, false);
376 		//printf("map: va = %p -> %p, mode = %d, hash = %lu\n", virtualAddress, physicalAddress, mode, hash);
377 		return;
378 	}
379 
380 	hash = page_table_entry::SecondaryHash(hash);
381 	group = &sPageTable[hash & sPageTableHashMask];
382 
383 	for (int32 i = 0; i < 8; i++) {
384 		if (group->entry[i].valid)
385 			continue;
386 
387 		fill_page_table_entry(&group->entry[i], virtualSegmentID, virtualAddress, physicalAddress, mode, true);
388 		//printf("map: va = %p -> %p, mode = %d, second hash = %lu\n", virtualAddress, physicalAddress, mode, hash);
389 		return;
390 	}
391 
392 	panic("out of page table entries! (you would think this could not happen in a boot loader...)\n");
393 }
394 
395 
396 static void
397 map_range(void *virtualAddress, void *physicalAddress, size_t size, uint8 mode)
398 {
399 	for (uint32 offset = 0; offset < size; offset += B_PAGE_SIZE) {
400 		map_page((void *)(uint32(virtualAddress) + offset),
401 			(void *)(uint32(physicalAddress) + offset), mode);
402 	}
403 }
404 
405 
406 static status_t
407 find_allocated_ranges(void *oldPageTable, void *pageTable,
408 	page_table_entry_group **_physicalPageTable, void **_exceptionHandlers)
409 {
410 	// we have to preserve the OpenFirmware established mappings
411 	// if we want to continue to use its service after we've
412 	// taken over (we will probably need less translations once
413 	// we have proper driver support for the target hardware).
414 	int mmu;
415 	if (of_getprop(gChosen, "mmu", &mmu, sizeof(int)) == OF_FAILED) {
416 		puts("no OF mmu");
417 		return B_ERROR;
418 	}
419 	mmu = of_instance_to_package(mmu);
420 
421 	struct translation_map {
422 		void	*virtual_address;
423 		int		length;
424 		void	*physical_address;
425 		int		mode;
426 	} translations[64];
427 	int length = of_getprop(mmu, "translations", &translations, sizeof(translations));
428 	if (length == OF_FAILED) {
429 		puts("no OF translations");
430 		return B_ERROR;
431 	}
432 	length = length / sizeof(struct translation_map);
433 	uint32 total = 0;
434 	printf("found %d translations\n", length);
435 
436 	for (int i = 0; i < length; i++) {
437 		struct translation_map *map = &translations[i];
438 		bool keepRange = true;
439 		//printf("%i: map: %p, length %d -> physical: %p, mode %d\n", i, map->virtual_address, map->length, map->physical_address, map->mode);
440 
441 		// insert range in physical allocated, if it points to physical memory
442 
443 		if (is_physical_memory(map->physical_address)
444 			&& insert_physical_allocated_range(map->physical_address,
445 					map->length) < B_OK) {
446 			printf("cannot map physical allocated range (num ranges = %lu)!\n", gKernelArgs.num_physical_allocated_ranges);
447 			return B_ERROR;
448 		}
449 
450 		if (map->virtual_address == pageTable) {
451 			puts("found page table!");
452 			*_physicalPageTable = (page_table_entry_group *)map->physical_address;
453 			keepRange = false;	// we keep it explicitely anyway
454 		}
455 		if ((addr_t)map->physical_address <= 0x100
456 			&& (addr_t)map->physical_address + map->length >= 0x1000) {
457 			puts("found exception handlers!");
458 			*_exceptionHandlers = map->virtual_address;
459 			keepRange = false;	// we keep it explicitely anyway
460 		}
461 		if (map->virtual_address == oldPageTable)
462 			keepRange = false;
463 
464 		// insert range in virtual allocated
465 
466 		if (insert_virtual_allocated_range(map->virtual_address,
467 				map->length) < B_OK) {
468 			printf("cannot map virtual allocated range (num ranges = %lu)!\n", gKernelArgs.num_virtual_allocated_ranges);
469 		}
470 
471 		// map range into the page table
472 
473 		map_range(map->virtual_address, map->physical_address, map->length, map->mode);
474 
475 		// insert range in virtual ranges to keep
476 
477 		if (keepRange) {
478 			if (insert_virtual_range_to_keep(map->virtual_address,
479 					map->length) < B_OK) {
480 				printf("cannot map virtual range to keep (num ranges = %lu)!\n",
481 					gKernelArgs.num_virtual_allocated_ranges);
482 			}
483 		}
484 
485 		total += map->length;
486 	}
487 	//printf("total mapped: %lu\n", total);
488 
489 	// remove the boot loader code from the virtual ranges to keep in the
490 	// kernel
491 	if (remove_virtual_range_to_keep(&__text_begin, &_end - &__text_begin)
492 			!= B_OK) {
493 		printf("find_allocated_ranges(): Failed to remove boot loader range "
494 			"from virtual ranges to keep.\n");
495 	}
496 
497 	return B_OK;
498 }
499 
500 
501 /** Computes the recommended minimal page table size as
502  *	described in table 7-22 of the PowerPC "Programming
503  *	Environment for 32-Bit Microprocessors".
504  *	The page table size ranges from 64 kB (for 8 MB RAM)
505  *	to 32 MB (for 4 GB RAM).
506  */
507 
508 static size_t
509 suggested_page_table_size(size_t total)
510 {
511 	uint32 max = 23;
512 		// 2^23 == 8 MB
513 
514 	while (max < 32) {
515 		if (total <= (1UL << max))
516 			break;
517 
518 		max++;
519 	}
520 
521 	return 1UL << (max - 7);
522 		// 2^(23 - 7) == 64 kB
523 }
524 
525 
526 static void *
527 find_physical_memory_range(size_t size)
528 {
529 	for (uint32 i = 0; i < gKernelArgs.num_physical_memory_ranges; i++) {
530 		if (gKernelArgs.physical_memory_range[i].size > size)
531 			return (void *)gKernelArgs.physical_memory_range[i].start;
532 	}
533 	return PHYSINVAL;
534 }
535 
536 
537 static void *
538 find_free_physical_range(size_t size)
539 {
540 	// just do a simple linear search at the end of the allocated
541 	// ranges (dumb memory allocation)
542 	if (gKernelArgs.num_physical_allocated_ranges == 0) {
543 		if (gKernelArgs.num_physical_memory_ranges == 0)
544 			return PHYSINVAL;
545 
546 		return find_physical_memory_range(size);
547 	}
548 
549 	for (uint32 i = 0; i < gKernelArgs.num_physical_allocated_ranges; i++) {
550 		void *address = (void *)(gKernelArgs.physical_allocated_range[i].start + gKernelArgs.physical_allocated_range[i].size);
551 		if (!is_physical_allocated(address, size) && is_physical_memory(address, size))
552 			return address;
553 	}
554 	return PHYSINVAL;
555 }
556 
557 
558 static void *
559 find_free_virtual_range(void *base, size_t size)
560 {
561 	if (base && !is_virtual_allocated(base, size))
562 		return base;
563 
564 	void *firstFound = NULL;
565 	void *firstBaseFound = NULL;
566 	for (uint32 i = 0; i < gKernelArgs.num_virtual_allocated_ranges; i++) {
567 		void *address = (void *)(gKernelArgs.virtual_allocated_range[i].start + gKernelArgs.virtual_allocated_range[i].size);
568 		if (!is_virtual_allocated(address, size)) {
569 			if (!base)
570 				return address;
571 
572 			if (firstFound == NULL)
573 				firstFound = address;
574 			if (address >= base
575 				&& (firstBaseFound == NULL || address < firstBaseFound)) {
576 				firstBaseFound = address;
577 			}
578 		}
579 	}
580 	return (firstBaseFound ? firstBaseFound : firstFound);
581 }
582 
583 
584 extern "C" void *
585 arch_mmu_allocate(void *_virtualAddress, size_t size, uint8 _protection,
586 	bool exactAddress)
587 {
588 	// we only know page sizes
589 	size = ROUNDUP(size, B_PAGE_SIZE);
590 
591 	uint8 protection = 0;
592 	if (_protection & B_WRITE_AREA)
593 		protection = PAGE_READ_WRITE;
594 	else
595 		protection = PAGE_READ_ONLY;
596 
597 	// If no address is given, use the KERNEL_BASE as base address, since
598 	// that avoids trouble in the kernel, when we decide to keep the region.
599 	void *virtualAddress = _virtualAddress;
600 	if (!virtualAddress)
601 		virtualAddress = (void*)KERNEL_BASE;
602 
603 	// find free address large enough to hold "size"
604 	virtualAddress = find_free_virtual_range(virtualAddress, size);
605 	if (virtualAddress == NULL)
606 		return NULL;
607 
608 	// fail if the exact address was requested, but is not free
609 	if (exactAddress && _virtualAddress && virtualAddress != _virtualAddress) {
610 		dprintf("arch_mmu_allocate(): exact address requested, but virtual "
611 			"range (base: %p, size: %lu) is not free.\n",
612 			_virtualAddress, size);
613 		return NULL;
614 	}
615 
616 	// we have a free virtual range for the allocation, now
617 	// have a look for free physical memory as well (we assume
618 	// that a) there is enough memory, and b) failing is fatal
619 	// so that we don't have to optimize for these cases :)
620 
621 	void *physicalAddress = find_free_physical_range(size);
622 	if (physicalAddress == PHYSINVAL) {
623 		dprintf("arch_mmu_allocate(base: %p, size: %lu) no free physical "
624 			"address\n", virtualAddress, size);
625 		return NULL;
626 	}
627 
628 	// everything went fine, so lets mark the space as used.
629 
630 	printf("mmu_alloc: va %p, pa %p, size %u\n", virtualAddress, physicalAddress, size);
631 	insert_virtual_allocated_range(virtualAddress, size);
632 	insert_physical_allocated_range(physicalAddress, size);
633 
634 	map_range(virtualAddress, physicalAddress, size, protection);
635 
636 	return virtualAddress;
637 }
638 
639 
640 extern "C" status_t
641 arch_mmu_free(void *address, size_t size)
642 {
643 	// ToDo: implement freeing a region!
644 	return B_OK;
645 }
646 
647 
648 static inline void
649 invalidate_tlb(void)
650 {
651 	//asm volatile("tlbia");
652 		// "tlbia" is obviously not available on every CPU...
653 
654 	// Note: this flushes the whole 4 GB address space - it
655 	//		would probably be a good idea to do less here
656 
657 	addr_t address = 0;
658 	for (uint32 i = 0; i < 0x100000; i++) {
659 		asm volatile("tlbie %0" : : "r" (address));
660 		address += B_PAGE_SIZE;
661 	}
662 	tlbsync();
663 }
664 
665 
666 //	#pragma mark -
667 //	OpenFirmware callbacks and public API
668 
669 
670 static int
671 map_callback(struct of_arguments *args)
672 {
673 	void *physicalAddress = (void *)args->Argument(0);
674 	void *virtualAddress = (void *)args->Argument(1);
675 	int length = args->Argument(2);
676 	int mode = args->Argument(3);
677 	int &error = args->ReturnValue(0);
678 
679 	// insert range in physical allocated if needed
680 
681 	if (is_physical_memory(physicalAddress)
682 		&& insert_physical_allocated_range(physicalAddress, length) < B_OK) {
683 		error = -1;
684 		return OF_FAILED;
685 	}
686 
687 	// insert range in virtual allocated
688 
689 	if (insert_virtual_allocated_range(virtualAddress, length) < B_OK) {
690 		error = -2;
691 		return OF_FAILED;
692 	}
693 
694 	// map range into the page table
695 
696 	map_range(virtualAddress, physicalAddress, length, mode);
697 
698 	return B_OK;
699 }
700 
701 
702 static int
703 unmap_callback(struct of_arguments *args)
704 {
705 /*	void *address = (void *)args->Argument(0);
706 	int length = args->Argument(1);
707 	int &error = args->ReturnValue(0);
708 */
709 	// ToDo: to be implemented
710 
711 	return OF_FAILED;
712 }
713 
714 
715 static int
716 translate_callback(struct of_arguments *args)
717 {
718 	addr_t virtualAddress = (addr_t)args->Argument(0);
719 	int &error = args->ReturnValue(0);
720 	int &physicalAddress = args->ReturnValue(1);
721 	int &mode = args->ReturnValue(2);
722 
723 	// Find page table entry for this address
724 
725 	uint32 virtualSegmentID = sSegments[addr_t(virtualAddress) >> 28].virtual_segment_id;
726 
727 	uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID, (uint32)virtualAddress);
728 	page_table_entry_group *group = &sPageTable[hash & sPageTableHashMask];
729 	page_table_entry *entry = NULL;
730 
731 	for (int32 i = 0; i < 8; i++) {
732 		entry = &group->entry[i];
733 
734 		if (entry->valid
735 			&& entry->virtual_segment_id == virtualSegmentID
736 			&& entry->secondary_hash == false
737 			&& entry->abbr_page_index == ((virtualAddress >> 22) & 0x3f))
738 			goto success;
739 	}
740 
741 	hash = page_table_entry::SecondaryHash(hash);
742 	group = &sPageTable[hash & sPageTableHashMask];
743 
744 	for (int32 i = 0; i < 8; i++) {
745 		entry = &group->entry[i];
746 
747 		if (entry->valid
748 			&& entry->virtual_segment_id == virtualSegmentID
749 			&& entry->secondary_hash == true
750 			&& entry->abbr_page_index == ((virtualAddress >> 22) & 0x3f))
751 			goto success;
752 	}
753 
754 	// could not find the translation
755 	error = B_ENTRY_NOT_FOUND;
756 	return OF_FAILED;
757 
758 success:
759 	// we found the entry in question
760 	physicalAddress = (int)(entry->physical_page_number * B_PAGE_SIZE);
761 	mode = (entry->write_through << 6)		// WIMGxPP
762 			| (entry->caching_inhibited << 5)
763 			| (entry->memory_coherent << 4)
764 			| (entry->guarded << 3)
765 			| entry->page_protection;
766 	error = B_OK;
767 
768 	return B_OK;
769 }
770 
771 
772 static int
773 alloc_real_mem_callback(struct of_arguments *args)
774 {
775 /*	addr_t minAddress = (addr_t)args->Argument(0);
776 	addr_t maxAddress = (addr_t)args->Argument(1);
777 	int length = args->Argument(2);
778 	int mode = args->Argument(3);
779 	int &error = args->ReturnValue(0);
780 	int &physicalAddress = args->ReturnValue(1);
781 */
782 	// ToDo: to be implemented
783 
784 	return OF_FAILED;
785 }
786 
787 
788 /** Dispatches the callback to the responsible function */
789 
790 static int
791 callback(struct of_arguments *args)
792 {
793 	const char *name = args->name;
794 printf("CALLBACK: %s\n", name);
795 
796 	if (!strcmp(name, "map"))
797 		return map_callback(args);
798 	else if (!strcmp(name, "unmap"))
799 		return unmap_callback(args);
800 	else if (!strcmp(name, "translate"))
801 		return translate_callback(args);
802 	else if (!strcmp(name, "alloc-real-mem"))
803 		return alloc_real_mem_callback(args);
804 
805 	return OF_FAILED;
806 }
807 
808 
809 extern "C" status_t
810 arch_set_callback(void)
811 {
812 	// set OpenFirmware callbacks - it will ask us for memory after that
813 	// instead of maintaining it itself
814 
815 	void *oldCallback = NULL;
816 	if (of_call_client_function("set-callback", 1, 1, &callback, &oldCallback)
817 			== OF_FAILED) {
818 		puts("set-callback failed!");
819 		return B_ERROR;
820 	}
821 	//printf("old callback = %p\n", old);
822 
823 	return B_OK;
824 }
825 
826 
827 extern "C" status_t
828 arch_mmu_init(void)
829 {
830 	// get map of physical memory (fill in kernel_args structure)
831 
832 	size_t total;
833 	if (find_physical_memory_ranges(total) < B_OK) {
834 		puts("could not find physical memory ranges!");
835 		return B_ERROR;
836 	}
837 	printf("total physical memory = %u MB\n", total / (1024*1024));
838 
839 	// get OpenFirmware's current page table
840 
841 	page_table_entry_group *oldTable;
842 	page_table_entry_group *table;
843 	size_t tableSize;
844 	ppc_get_page_table(&table, &tableSize);
845 
846 	oldTable = table;
847 
848 	bool realMode = false;
849 	// TODO: read these values out of the OF settings
850 	addr_t realBase = 0;
851 	addr_t realSize = 0x400000;
852 
853 	// can we just keep the page table?
854 	size_t suggestedTableSize = suggested_page_table_size(total);
855 	printf("suggested page table size = %u\n", suggestedTableSize);
856 	if (tableSize < suggestedTableSize) {
857 		// nah, we need a new one!
858 		printf("need new page table, size = %u!\n", suggestedTableSize);
859 		table = (page_table_entry_group *)of_claim(NULL, suggestedTableSize, suggestedTableSize);
860 			// KERNEL_BASE would be better as virtual address, but
861 			// at least with Apple's OpenFirmware, it makes no
862 			// difference - we will have to remap it later
863 		if (table == (void *)OF_FAILED) {
864 			panic("Could not allocate new page table (size = %ld)!!\n", suggestedTableSize);
865 			return B_NO_MEMORY;
866 		}
867 		if (table == NULL) {
868 			// work-around for the broken Pegasos OpenFirmware
869 			puts("broken OpenFirmware detected (claim doesn't work).");
870 			realMode = true;
871 
872 			addr_t tableBase = 0;
873 			for (int32 i = 0; tableBase < realBase + realSize * 3; i++) {
874 				tableBase = suggestedTableSize * i;
875 			}
876 
877 			table = (page_table_entry_group *)tableBase;
878 		}
879 
880 		printf("new table at: %p\n", table);
881 		sPageTable = table;
882 		tableSize = suggestedTableSize;
883 	} else {
884 		// ToDo: we could check if the page table is much too large
885 		//	and create a smaller one in this case (in order to save
886 		//	memory).
887 		sPageTable = table;
888 	}
889 
890 	sPageTableHashMask = tableSize / sizeof(page_table_entry_group) - 1;
891 	if (sPageTable != oldTable)
892 		memset(sPageTable, 0, tableSize);
893 
894 	// turn off address translation via the page table/segment mechanism,
895 	// identity map the first 256 MB (where our code/data reside)
896 
897 	printf("MSR: %p\n", (void *)get_msr());
898 
899 #if 0
900 	block_address_translation bat;
901 
902 	bat.length = BAT_LENGTH_256MB;
903 	bat.kernel_valid = true;
904 	bat.memory_coherent = true;
905 	bat.protection = BAT_READ_WRITE;
906 
907 	set_ibat0(&bat);
908 	set_dbat0(&bat);
909 	isync();
910 puts("2");
911 #endif
912 
913 	// initialize segment descriptors, but don't set the registers
914 	// until we're about to take over the page table - we're mapping
915 	// pages into our table using these values
916 
917 	for (int32 i = 0; i < 16; i++)
918 		sSegments[i].virtual_segment_id = i;
919 
920 	// find already allocated ranges of physical memory
921 	// and the virtual address space
922 
923 	page_table_entry_group *physicalTable = NULL;
924 	void *exceptionHandlers = (void *)-1;
925 	if (find_allocated_ranges(oldTable, table, &physicalTable,
926 			&exceptionHandlers) < B_OK) {
927 		puts("find_allocated_ranges() failed!");
928 		//return B_ERROR;
929 	}
930 
931 #if 0
932 	block_address_translation bats[8];
933 	getibats(bats);
934 	for (int32 i = 0; i < 8; i++)
935 		printf("page index %u, length %u, ppn %u\n", bats[i].page_index, bats[i].length, bats[i].physical_block_number);
936 #endif
937 
938 	if (physicalTable == NULL) {
939 		puts("arch_mmu_init(): Didn't find physical address of page table!");
940 		if (!realMode)
941 			return B_ERROR;
942 
943 		// Pegasos work-around
944 		//map_range((void *)realBase, (void *)realBase, realSize * 2, PAGE_READ_WRITE);
945 		//map_range((void *)(total - realSize), (void *)(total - realSize), realSize, PAGE_READ_WRITE);
946 		//map_range((void *)table, (void *)table, tableSize, PAGE_READ_WRITE);
947 		insert_physical_allocated_range((void *)realBase, realSize * 2);
948 		insert_virtual_allocated_range((void *)realBase, realSize * 2);
949 		insert_physical_allocated_range((void *)(total - realSize), realSize);
950 		insert_virtual_allocated_range((void *)(total - realSize), realSize);
951 		insert_physical_allocated_range((void *)table, tableSize);
952 		insert_virtual_allocated_range((void *)table, tableSize);
953 
954 		// QEMU OpenHackware work-around
955 		insert_physical_allocated_range((void *)0x05800000, 0x06000000 - 0x05800000);
956 		insert_virtual_allocated_range((void *)0x05800000, 0x06000000 - 0x05800000);
957 
958 		physicalTable = table;
959 	}
960 
961 	if (exceptionHandlers == (void *)-1) {
962 		// ToDo: create mapping for the exception handlers
963 		puts("no mapping for the exception handlers!");
964 	}
965 
966 	// Set the Open Firmware memory callback. From now on the Open Firmware
967 	// will ask us for memory.
968 	arch_set_callback();
969 
970 	// set up new page table and turn on translation again
971 
972 	for (int32 i = 0; i < 16; i++) {
973 		ppc_set_segment_register((void *)(i * 0x10000000), sSegments[i]);
974 			// one segment describes 256 MB of memory
975 	}
976 
977 	ppc_set_page_table(physicalTable, tableSize);
978 	invalidate_tlb();
979 
980 	if (!realMode) {
981 		// clear BATs
982 		reset_ibats();
983 		reset_dbats();
984 	}
985 
986 	set_msr(MSR_MACHINE_CHECK_ENABLED | MSR_FP_AVAILABLE
987 			| MSR_INST_ADDRESS_TRANSLATION
988 			| MSR_DATA_ADDRESS_TRANSLATION);
989 
990 	// set kernel args
991 
992 	printf("virt_allocated: %lu\n", gKernelArgs.num_virtual_allocated_ranges);
993 	printf("phys_allocated: %lu\n", gKernelArgs.num_physical_allocated_ranges);
994 	printf("phys_memory: %lu\n", gKernelArgs.num_physical_memory_ranges);
995 
996 	gKernelArgs.arch_args.page_table.start = (addr_t)sPageTable;
997 	gKernelArgs.arch_args.page_table.size = tableSize;
998 
999 	gKernelArgs.arch_args.exception_handlers.start = (addr_t)exceptionHandlers;
1000 	gKernelArgs.arch_args.exception_handlers.size = B_PAGE_SIZE;
1001 
1002 	return B_OK;
1003 }
1004 
1005