xref: /haiku/src/system/boot/platform/openfirmware/arch/ppc/mmu.cpp (revision 3cb015b1ee509d69c643506e8ff573808c86dcfc)
1 /*
2  * Copyright 2003-2006, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include <platform_arch.h>
8 #include <boot/platform.h>
9 #include <boot/stage2.h>
10 #include <boot/stdio.h>
11 #include <platform/openfirmware/openfirmware.h>
12 #include <arch_cpu.h>
13 #include <arch_mmu.h>
14 #include <kernel.h>
15 
16 #include <OS.h>
17 
18 
19 // set protection to WIMGNPP: -----PP
20 // PP:	00 - no access
21 //		01 - read only
22 //		10 - read/write
23 //		11 - read only
24 #define PAGE_READ_ONLY	0x01
25 #define PAGE_READ_WRITE	0x02
26 
27 
28 segment_descriptor sSegments[16];
29 page_table_entry_group *sPageTable;
30 uint32 sPageTableHashMask;
31 
32 
33 // begin and end of the boot loader
34 extern "C" uint8 __text_begin;
35 extern "C" uint8 _end;
36 
37 
38 static void
39 remove_range_index(addr_range *ranges, uint32 &numRanges, uint32 index)
40 {
41 	if (index + 1 == numRanges) {
42 		// remove last range
43 		numRanges--;
44 		return;
45 	}
46 
47 	memmove(&ranges[index], &ranges[index + 1], sizeof(addr_range) * (numRanges - 1 - index));
48 	numRanges--;
49 }
50 
51 
52 static status_t
53 insert_memory_range(addr_range *ranges, uint32 &numRanges, uint32 maxRanges,
54 	const void *_start, uint32 _size)
55 {
56 	addr_t start = ROUNDOWN(addr_t(_start), B_PAGE_SIZE);
57 	addr_t end = ROUNDUP(addr_t(_start) + _size, B_PAGE_SIZE);
58 	addr_t size = end - start;
59 	if (size == 0)
60 		return B_OK;
61 
62 	for (uint32 i = 0; i < numRanges; i++) {
63 		addr_t rangeStart = ranges[i].start;
64 		addr_t rangeEnd = rangeStart + ranges[i].size;
65 
66 		if (end < rangeStart || start > rangeEnd) {
67 			// ranges don't intersect or touch each other
68 			continue;
69 		}
70 		if (start >= rangeStart && end <= rangeEnd) {
71 			// range is already completely covered
72 			return B_OK;
73 		}
74 
75 		if (start < rangeStart) {
76 			// prepend to the existing range
77 			ranges[i].start = start;
78 			ranges[i].size += rangeStart - start;
79 		}
80 		if (end > ranges[i].start + ranges[i].size) {
81 			// append to the existing range
82 			ranges[i].size = end - ranges[i].start;
83 		}
84 
85 		// join ranges if possible
86 
87 		for (uint32 j = 0; j < numRanges; j++) {
88 			if (i == j)
89 				continue;
90 
91 			rangeStart = ranges[i].start;
92 			rangeEnd = rangeStart + ranges[i].size;
93 			addr_t joinStart = ranges[j].start;
94 			addr_t joinEnd = joinStart + ranges[j].size;
95 
96 			if (rangeStart <= joinEnd && joinEnd <= rangeEnd) {
97 				// join range that used to be before the current one, or
98 				// the one that's now entirely included by the current one
99 				if (joinStart < rangeStart) {
100 					ranges[i].size += rangeStart - joinStart;
101 					ranges[i].start = joinStart;
102 				}
103 
104 				remove_range_index(ranges, numRanges, j--);
105 			} else if (joinStart <= rangeEnd && joinEnd > rangeEnd) {
106 				// join range that used to be after the current one
107 				ranges[i].size += joinEnd - rangeEnd;
108 
109 				remove_range_index(ranges, numRanges, j--);
110 			}
111 		}
112 		return B_OK;
113 	}
114 
115 	// no range matched, we need to create a new one
116 
117 	if (numRanges >= maxRanges)
118 		return B_ENTRY_NOT_FOUND;
119 
120 	ranges[numRanges].start = (addr_t)start;
121 	ranges[numRanges].size = size;
122 	numRanges++;
123 
124 	return B_OK;
125 }
126 
127 
128 static status_t
129 remove_memory_range(addr_range *ranges, uint32 &numRanges, uint32 maxRanges,
130 	const void *_start, uint32 _size)
131 {
132 	addr_t start = ROUNDOWN(addr_t(_start), B_PAGE_SIZE);
133 	addr_t end = ROUNDUP(addr_t(_start) + _size, B_PAGE_SIZE);
134 
135 	for (uint32 i = 0; i < numRanges; i++) {
136 		addr_t rangeStart = ranges[i].start;
137 		addr_t rangeEnd = rangeStart + ranges[i].size;
138 
139 		if (start <= rangeStart) {
140 			if (end <= rangeStart) {
141 				// no intersection
142 			} else if (end >= rangeEnd) {
143 				// remove the complete range
144 				remove_range_index(ranges, numRanges, i);
145 				i--;
146 			} else {
147 				// remove the head of the range
148 				ranges[i].start = end;
149 				ranges[i].size = rangeEnd - end;
150 			}
151 		} else if (end >= rangeEnd) {
152 			if (start < rangeEnd) {
153 				// remove the tail
154 				ranges[i].size = start - rangeStart;
155 			}	// else: no intersection
156 		} else {
157 			// rangeStart < start < end < rangeEnd
158 			// The ugly case: We have to remove something from the middle of
159 			// the range. We keep the head of the range and insert its tail
160 			// as a new range.
161 			ranges[i].size = start - rangeStart;
162 			return insert_memory_range(ranges, numRanges, maxRanges,
163 				(void*)end, rangeEnd - end);
164 		}
165 	}
166 
167 	return B_OK;
168 }
169 
170 
171 static status_t
172 insert_physical_memory_range(void *start, uint32 size)
173 {
174 	return insert_memory_range(gKernelArgs.physical_memory_range,
175 				gKernelArgs.num_physical_memory_ranges, MAX_PHYSICAL_MEMORY_RANGE,
176 				start, size);
177 }
178 
179 
180 static status_t
181 insert_physical_allocated_range(void *start, uint32 size)
182 {
183 	return insert_memory_range(gKernelArgs.physical_allocated_range,
184 				gKernelArgs.num_physical_allocated_ranges, MAX_PHYSICAL_ALLOCATED_RANGE,
185 				start, size);
186 }
187 
188 
189 static status_t
190 insert_virtual_allocated_range(void *start, uint32 size)
191 {
192 	return insert_memory_range(gKernelArgs.virtual_allocated_range,
193 				gKernelArgs.num_virtual_allocated_ranges, MAX_VIRTUAL_ALLOCATED_RANGE,
194 				start, size);
195 }
196 
197 
198 static status_t
199 insert_virtual_range_to_keep(void *start, uint32 size)
200 {
201 	return insert_memory_range(gKernelArgs.arch_args.virtual_ranges_to_keep,
202 		gKernelArgs.arch_args.num_virtual_ranges_to_keep,
203 		MAX_VIRTUAL_RANGES_TO_KEEP, start, size);
204 }
205 
206 
207 static status_t
208 remove_virtual_range_to_keep(void *start, uint32 size)
209 {
210 	return remove_memory_range(gKernelArgs.arch_args.virtual_ranges_to_keep,
211 		gKernelArgs.arch_args.num_virtual_ranges_to_keep,
212 		MAX_VIRTUAL_RANGES_TO_KEEP, start, size);
213 }
214 
215 
216 static status_t
217 find_physical_memory_ranges(size_t &total)
218 {
219 	int memory;
220 	if (of_getprop(gChosen, "memory", &memory, sizeof(int)) == OF_FAILED)
221 		return B_ERROR;
222 	memory = of_instance_to_package(memory);
223 
224 	total = 0;
225 
226 	struct of_region regions[64];
227 	int count = of_getprop(memory, "reg", regions, sizeof(regions));
228 	if (count == OF_FAILED)
229 		return B_ERROR;
230 	count /= sizeof(of_region);
231 
232 	for (int32 i = 0; i < count; i++) {
233 		if (regions[i].size <= 0) {
234 			printf("%ld: empty region\n", i);
235 			continue;
236 		}
237 		printf("%ld: base = %p, size = %lu\n", i, regions[i].base, regions[i].size);
238 
239 		total += regions[i].size;
240 
241 		if (insert_physical_memory_range(regions[i].base, regions[i].size) < B_OK) {
242 			printf("cannot map physical memory range (num ranges = %lu)!\n", gKernelArgs.num_physical_memory_ranges);
243 			return B_ERROR;
244 		}
245 	}
246 
247 	return B_OK;
248 }
249 
250 
251 static bool
252 is_in_range(addr_range *ranges, uint32 numRanges, void *address, size_t size)
253 {
254 	// Note: This function returns whether any single allocated range
255 	// completely contains the given range. If the given range crosses
256 	// allocated range boundaries, but is nevertheless covered completely, the
257 	// function returns false. But since the range management code joins
258 	// touching ranges, this should never happen.
259 	addr_t start = (addr_t)address;
260 	addr_t end = start + size;
261 
262 	for (uint32 i = 0; i < numRanges; i++) {
263 		addr_t rangeStart = ranges[i].start;
264 		addr_t rangeEnd = rangeStart + ranges[i].size;
265 
266 		if ((start >= rangeStart && start < rangeEnd)
267 			|| (end >= rangeStart && end < rangeEnd))
268 			return true;
269 	}
270 	return false;
271 }
272 
273 
274 static bool
275 intersects_ranges(addr_range *ranges, uint32 numRanges, void *address,
276 	size_t size)
277 {
278 	addr_t start = (addr_t)address;
279 	addr_t end = start + size;
280 
281 	for (uint32 i = 0; i < numRanges; i++) {
282 		addr_t rangeStart = ranges[i].start;
283 		addr_t rangeEnd = rangeStart + ranges[i].size;
284 
285 		if ((start >= rangeStart && start < rangeEnd)
286 			|| (rangeStart >= start && rangeStart < end)) {
287 			return true;
288 		}
289 	}
290 	return false;
291 }
292 
293 
294 static bool
295 is_virtual_allocated(void *address, size_t size)
296 {
297 	return intersects_ranges(gKernelArgs.virtual_allocated_range,
298 				gKernelArgs.num_virtual_allocated_ranges,
299 				address, size);
300 }
301 
302 
303 static bool
304 is_physical_allocated(void *address, size_t size)
305 {
306 	return intersects_ranges(gKernelArgs.physical_allocated_range,
307 				gKernelArgs.num_physical_allocated_ranges,
308 				address, size);
309 }
310 
311 
312 static bool
313 is_physical_memory(void *address, size_t size)
314 {
315 	return is_in_range(gKernelArgs.physical_memory_range,
316 				gKernelArgs.num_physical_memory_ranges,
317 				address, size);
318 }
319 
320 
321 static bool
322 is_physical_memory(void *address)
323 {
324 	return is_physical_memory(address, 0);
325 }
326 
327 
328 static void
329 fill_page_table_entry(page_table_entry *entry, uint32 virtualSegmentID, void *virtualAddress, void *physicalAddress, uint8 mode, bool secondaryHash)
330 {
331 	// lower 32 bit - set at once
332 	((uint32 *)entry)[1] = (((uint32)physicalAddress / B_PAGE_SIZE) << 12) | mode;
333 	/*entry->physical_page_number = (uint32)physicalAddress / B_PAGE_SIZE;
334 	entry->_reserved0 = 0;
335 	entry->referenced = false;
336 	entry->changed = false;
337 	entry->write_through = (mode >> 6) & 1;
338 	entry->caching_inhibited = (mode >> 5) & 1;
339 	entry->memory_coherent = (mode >> 4) & 1;
340 	entry->guarded = (mode >> 3) & 1;
341 	entry->_reserved1 = 0;
342 	entry->page_protection = mode & 0x3;*/
343 	eieio();
344 		// we need to make sure that the lower 32 bit were
345 		// already written when the entry becomes valid
346 
347 	// upper 32 bit
348 	entry->virtual_segment_id = virtualSegmentID;
349 	entry->secondary_hash = secondaryHash;
350 	entry->abbr_page_index = ((uint32)virtualAddress >> 22) & 0x3f;
351 	entry->valid = true;
352 }
353 
354 
355 static void
356 map_page(void *virtualAddress, void *physicalAddress, uint8 mode)
357 {
358 	uint32 virtualSegmentID = sSegments[addr_t(virtualAddress) >> 28].virtual_segment_id;
359 
360 	uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID, (uint32)virtualAddress);
361 	page_table_entry_group *group = &sPageTable[hash & sPageTableHashMask];
362 
363 	for (int32 i = 0; i < 8; i++) {
364 		// 8 entries in a group
365 		if (group->entry[i].valid)
366 			continue;
367 
368 		fill_page_table_entry(&group->entry[i], virtualSegmentID, virtualAddress, physicalAddress, mode, false);
369 		//printf("map: va = %p -> %p, mode = %d, hash = %lu\n", virtualAddress, physicalAddress, mode, hash);
370 		return;
371 	}
372 
373 	hash = page_table_entry::SecondaryHash(hash);
374 	group = &sPageTable[hash & sPageTableHashMask];
375 
376 	for (int32 i = 0; i < 8; i++) {
377 		if (group->entry[i].valid)
378 			continue;
379 
380 		fill_page_table_entry(&group->entry[i], virtualSegmentID, virtualAddress, physicalAddress, mode, true);
381 		//printf("map: va = %p -> %p, mode = %d, second hash = %lu\n", virtualAddress, physicalAddress, mode, hash);
382 		return;
383 	}
384 
385 	panic("out of page table entries! (you would think this could not happen in a boot loader...)\n");
386 }
387 
388 
389 static void
390 map_range(void *virtualAddress, void *physicalAddress, size_t size, uint8 mode)
391 {
392 	for (uint32 offset = 0; offset < size; offset += B_PAGE_SIZE) {
393 		map_page((void *)(uint32(virtualAddress) + offset),
394 			(void *)(uint32(physicalAddress) + offset), mode);
395 	}
396 }
397 
398 
399 static status_t
400 find_allocated_ranges(void *oldPageTable, void *pageTable,
401 	page_table_entry_group **_physicalPageTable, void **_exceptionHandlers)
402 {
403 	// we have to preserve the OpenFirmware established mappings
404 	// if we want to continue to use its service after we've
405 	// taken over (we will probably need less translations once
406 	// we have proper driver support for the target hardware).
407 	int mmu;
408 	if (of_getprop(gChosen, "mmu", &mmu, sizeof(int)) == OF_FAILED) {
409 		puts("no OF mmu");
410 		return B_ERROR;
411 	}
412 	mmu = of_instance_to_package(mmu);
413 
414 	struct translation_map {
415 		void	*virtual_address;
416 		int		length;
417 		void	*physical_address;
418 		int		mode;
419 	} translations[64];
420 	int length = of_getprop(mmu, "translations", &translations, sizeof(translations));
421 	if (length == OF_FAILED) {
422 		puts("no OF translations");
423 		return B_ERROR;
424 	}
425 	length = length / sizeof(struct translation_map);
426 	uint32 total = 0;
427 	printf("found %d translations\n", length);
428 
429 	for (int i = 0; i < length; i++) {
430 		struct translation_map *map = &translations[i];
431 		bool keepRange = true;
432 		//printf("%i: map: %p, length %d -> physical: %p, mode %d\n", i, map->virtual_address, map->length, map->physical_address, map->mode);
433 
434 		// insert range in physical allocated, if it points to physical memory
435 
436 		if (is_physical_memory(map->physical_address)
437 			&& insert_physical_allocated_range(map->physical_address,
438 					map->length) < B_OK) {
439 			printf("cannot map physical allocated range (num ranges = %lu)!\n", gKernelArgs.num_physical_allocated_ranges);
440 			return B_ERROR;
441 		}
442 
443 		if (map->virtual_address == pageTable) {
444 			puts("found page table!");
445 			*_physicalPageTable = (page_table_entry_group *)map->physical_address;
446 			keepRange = false;	// we keep it explicitely anyway
447 		}
448 		if ((addr_t)map->physical_address <= 0x100
449 			&& (addr_t)map->physical_address + map->length >= 0x1000) {
450 			puts("found exception handlers!");
451 			*_exceptionHandlers = map->virtual_address;
452 			keepRange = false;	// we keep it explicitely anyway
453 		}
454 		if (map->virtual_address == oldPageTable)
455 			keepRange = false;
456 
457 		// insert range in virtual allocated
458 
459 		if (insert_virtual_allocated_range(map->virtual_address,
460 				map->length) < B_OK) {
461 			printf("cannot map virtual allocated range (num ranges = %lu)!\n", gKernelArgs.num_virtual_allocated_ranges);
462 		}
463 
464 		// map range into the page table
465 
466 		map_range(map->virtual_address, map->physical_address, map->length, map->mode);
467 
468 		// insert range in virtual ranges to keep
469 
470 		if (keepRange) {
471 			if (insert_virtual_range_to_keep(map->virtual_address,
472 					map->length) < B_OK) {
473 				printf("cannot map virtual range to keep (num ranges = %lu)!\n",
474 					gKernelArgs.num_virtual_allocated_ranges);
475 			}
476 		}
477 
478 		total += map->length;
479 	}
480 	//printf("total mapped: %lu\n", total);
481 
482 	// remove the boot loader code from the virtual ranges to keep in the
483 	// kernel
484 	if (remove_virtual_range_to_keep(&__text_begin, &_end - &__text_begin)
485 			!= B_OK) {
486 		printf("find_allocated_ranges(): Failed to remove boot loader range "
487 			"from virtual ranges to keep.\n");
488 	}
489 
490 	return B_OK;
491 }
492 
493 
494 /** Computes the recommended minimal page table size as
495  *	described in table 7-22 of the PowerPC "Programming
496  *	Environment for 32-Bit Microprocessors".
497  *	The page table size ranges from 64 kB (for 8 MB RAM)
498  *	to 32 MB (for 4 GB RAM).
499  */
500 
501 static size_t
502 suggested_page_table_size(size_t total)
503 {
504 	uint32 max = 23;
505 		// 2^23 == 8 MB
506 
507 	while (max < 32) {
508 		if (total <= (1UL << max))
509 			break;
510 
511 		max++;
512 	}
513 
514 	return 1UL << (max - 7);
515 		// 2^(23 - 7) == 64 kB
516 }
517 
518 
519 static void *
520 find_physical_memory_range(size_t size)
521 {
522 	for (uint32 i = 0; i < gKernelArgs.num_physical_memory_ranges; i++) {
523 		if (gKernelArgs.physical_memory_range[i].size > size)
524 			return (void *)gKernelArgs.physical_memory_range[i].start;
525 	}
526 	return NULL;
527 }
528 
529 
530 static void *
531 find_free_physical_range(size_t size)
532 {
533 	// just do a simple linear search at the end of the allocated
534 	// ranges (dumb memory allocation)
535 	if (gKernelArgs.num_physical_allocated_ranges == 0) {
536 		if (gKernelArgs.num_physical_memory_ranges == 0)
537 			return NULL;
538 
539 		return find_physical_memory_range(size);
540 	}
541 
542 	for (uint32 i = 0; i < gKernelArgs.num_physical_allocated_ranges; i++) {
543 		void *address = (void *)(gKernelArgs.physical_allocated_range[i].start + gKernelArgs.physical_allocated_range[i].size);
544 		if (!is_physical_allocated(address, size) && is_physical_memory(address, size))
545 			return address;
546 	}
547 	return NULL;
548 }
549 
550 
551 static void *
552 find_free_virtual_range(void *base, size_t size)
553 {
554 	if (base && !is_virtual_allocated(base, size))
555 		return base;
556 
557 	void *firstFound = NULL;
558 	void *firstBaseFound = NULL;
559 	for (uint32 i = 0; i < gKernelArgs.num_virtual_allocated_ranges; i++) {
560 		void *address = (void *)(gKernelArgs.virtual_allocated_range[i].start + gKernelArgs.virtual_allocated_range[i].size);
561 		if (!is_virtual_allocated(address, size)) {
562 			if (!base)
563 				return address;
564 
565 			if (firstFound == NULL)
566 				firstFound = address;
567 			if (address >= base
568 				&& (firstBaseFound == NULL || address < firstBaseFound)) {
569 				firstBaseFound = address;
570 			}
571 		}
572 	}
573 	return (firstBaseFound ? firstBaseFound : firstFound);
574 }
575 
576 
577 extern "C" void *
578 arch_mmu_allocate(void *_virtualAddress, size_t size, uint8 _protection,
579 	bool exactAddress)
580 {
581 	// we only know page sizes
582 	size = ROUNDUP(size, B_PAGE_SIZE);
583 
584 	uint8 protection = 0;
585 	if (_protection & B_WRITE_AREA)
586 		protection = PAGE_READ_WRITE;
587 	else
588 		protection = PAGE_READ_ONLY;
589 
590 	// If no address is given, use the KERNEL_BASE as base address, since
591 	// that avoids trouble in the kernel, when we decide to keep the region.
592 	void *virtualAddress = _virtualAddress;
593 	if (!virtualAddress)
594 		virtualAddress = (void*)KERNEL_BASE;
595 
596 	// find free address large enough to hold "size"
597 	virtualAddress = find_free_virtual_range(virtualAddress, size);
598 	if (virtualAddress == NULL)
599 		return NULL;
600 
601 	// fail if the exact address was requested, but is not free
602 	if (exactAddress && _virtualAddress && virtualAddress != _virtualAddress) {
603 		dprintf("arch_mmu_allocate(): exact address requested, but virtual "
604 			"range (base: %p, size: %lu) is not free.\n",
605 			_virtualAddress, size);
606 		return NULL;
607 	}
608 
609 	// we have a free virtual range for the allocation, now
610 	// have a look for free physical memory as well (we assume
611 	// that a) there is enough memory, and b) failing is fatal
612 	// so that we don't have to optimize for these cases :)
613 
614 	void *physicalAddress = find_free_physical_range(size);
615 	if (physicalAddress == NULL) {
616 		dprintf("arch_mmu_allocate(base: %p, size: %lu) no free physical "
617 			"address\n", virtualAddress, size);
618 		return NULL;
619 	}
620 
621 	// everything went fine, so lets mark the space as used.
622 
623 	printf("mmu_alloc: va %p, pa %p, size %u\n", virtualAddress, physicalAddress, size);
624 	insert_virtual_allocated_range(virtualAddress, size);
625 	insert_physical_allocated_range(physicalAddress, size);
626 
627 	map_range(virtualAddress, physicalAddress, size, protection);
628 
629 	return virtualAddress;
630 }
631 
632 
633 extern "C" status_t
634 arch_mmu_free(void *address, size_t size)
635 {
636 	// ToDo: implement freeing a region!
637 	return B_OK;
638 }
639 
640 
641 static inline void
642 invalidate_tlb(void)
643 {
644 	//asm volatile("tlbia");
645 		// "tlbia" is obviously not available on every CPU...
646 
647 	// Note: this flushes the whole 4 GB address space - it
648 	//		would probably be a good idea to do less here
649 
650 	addr_t address = 0;
651 	for (uint32 i = 0; i < 0x100000; i++) {
652 		asm volatile("tlbie %0" : : "r" (address));
653 		address += B_PAGE_SIZE;
654 	}
655 	tlbsync();
656 }
657 
658 
659 //	#pragma mark -
660 //	OpenFirmware callbacks and public API
661 
662 
663 static int
664 map_callback(struct of_arguments *args)
665 {
666 	void *physicalAddress = (void *)args->Argument(0);
667 	void *virtualAddress = (void *)args->Argument(1);
668 	int length = args->Argument(2);
669 	int mode = args->Argument(3);
670 	int &error = args->ReturnValue(0);
671 
672 	// insert range in physical allocated if needed
673 
674 	if (is_physical_memory(physicalAddress)
675 		&& insert_physical_allocated_range(physicalAddress, length) < B_OK) {
676 		error = -1;
677 		return OF_FAILED;
678 	}
679 
680 	// insert range in virtual allocated
681 
682 	if (insert_virtual_allocated_range(virtualAddress, length) < B_OK) {
683 		error = -2;
684 		return OF_FAILED;
685 	}
686 
687 	// map range into the page table
688 
689 	map_range(virtualAddress, physicalAddress, length, mode);
690 
691 	return B_OK;
692 }
693 
694 
695 static int
696 unmap_callback(struct of_arguments *args)
697 {
698 /*	void *address = (void *)args->Argument(0);
699 	int length = args->Argument(1);
700 	int &error = args->ReturnValue(0);
701 */
702 	// ToDo: to be implemented
703 
704 	return OF_FAILED;
705 }
706 
707 
708 static int
709 translate_callback(struct of_arguments *args)
710 {
711 	addr_t virtualAddress = (addr_t)args->Argument(0);
712 	int &error = args->ReturnValue(0);
713 	int &physicalAddress = args->ReturnValue(1);
714 	int &mode = args->ReturnValue(2);
715 
716 	// Find page table entry for this address
717 
718 	uint32 virtualSegmentID = sSegments[addr_t(virtualAddress) >> 28].virtual_segment_id;
719 
720 	uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID, (uint32)virtualAddress);
721 	page_table_entry_group *group = &sPageTable[hash & sPageTableHashMask];
722 	page_table_entry *entry = NULL;
723 
724 	for (int32 i = 0; i < 8; i++) {
725 		entry = &group->entry[i];
726 
727 		if (entry->valid
728 			&& entry->virtual_segment_id == virtualSegmentID
729 			&& entry->secondary_hash == false
730 			&& entry->abbr_page_index == ((virtualAddress >> 22) & 0x3f))
731 			goto success;
732 	}
733 
734 	hash = page_table_entry::SecondaryHash(hash);
735 	group = &sPageTable[hash & sPageTableHashMask];
736 
737 	for (int32 i = 0; i < 8; i++) {
738 		entry = &group->entry[i];
739 
740 		if (entry->valid
741 			&& entry->virtual_segment_id == virtualSegmentID
742 			&& entry->secondary_hash == true
743 			&& entry->abbr_page_index == ((virtualAddress >> 22) & 0x3f))
744 			goto success;
745 	}
746 
747 	// could not find the translation
748 	error = B_ENTRY_NOT_FOUND;
749 	return OF_FAILED;
750 
751 success:
752 	// we found the entry in question
753 	physicalAddress = (int)(entry->physical_page_number * B_PAGE_SIZE);
754 	mode = (entry->write_through << 6)		// WIMGxPP
755 			| (entry->caching_inhibited << 5)
756 			| (entry->memory_coherent << 4)
757 			| (entry->guarded << 3)
758 			| entry->page_protection;
759 	error = B_OK;
760 
761 	return B_OK;
762 }
763 
764 
765 static int
766 alloc_real_mem_callback(struct of_arguments *args)
767 {
768 /*	addr_t minAddress = (addr_t)args->Argument(0);
769 	addr_t maxAddress = (addr_t)args->Argument(1);
770 	int length = args->Argument(2);
771 	int mode = args->Argument(3);
772 	int &error = args->ReturnValue(0);
773 	int &physicalAddress = args->ReturnValue(1);
774 */
775 	// ToDo: to be implemented
776 
777 	return OF_FAILED;
778 }
779 
780 
781 /** Dispatches the callback to the responsible function */
782 
783 static int
784 callback(struct of_arguments *args)
785 {
786 	const char *name = args->name;
787 printf("CALLBACK: %s\n", name);
788 
789 	if (!strcmp(name, "map"))
790 		return map_callback(args);
791 	else if (!strcmp(name, "unmap"))
792 		return unmap_callback(args);
793 	else if (!strcmp(name, "translate"))
794 		return translate_callback(args);
795 	else if (!strcmp(name, "alloc-real-mem"))
796 		return alloc_real_mem_callback(args);
797 
798 	return OF_FAILED;
799 }
800 
801 
802 extern "C" status_t
803 arch_set_callback(void)
804 {
805 	// set OpenFirmware callbacks - it will ask us for memory after that
806 	// instead of maintaining it itself
807 
808 	void *oldCallback = NULL;
809 	if (of_call_client_function("set-callback", 1, 1, &callback, &oldCallback)
810 			== OF_FAILED) {
811 		puts("set-callback failed!");
812 		return B_ERROR;
813 	}
814 	//printf("old callback = %p\n", old);
815 
816 	return B_OK;
817 }
818 
819 
820 extern "C" status_t
821 arch_mmu_init(void)
822 {
823 	// get map of physical memory (fill in kernel_args structure)
824 
825 	size_t total;
826 	if (find_physical_memory_ranges(total) < B_OK) {
827 		puts("could not find physical memory ranges!");
828 		return B_ERROR;
829 	}
830 	printf("total physical memory = %u MB\n", total / (1024*1024));
831 
832 	// get OpenFirmware's current page table
833 
834 	page_table_entry_group *oldTable;
835 	page_table_entry_group *table;
836 	size_t tableSize;
837 	ppc_get_page_table(&table, &tableSize);
838 
839 	oldTable = table;
840 
841 	bool realMode = false;
842 	// TODO: read these values out of the OF settings
843 	addr_t realBase = 0;
844 	addr_t realSize = 0x400000;
845 
846 	// can we just keep the page table?
847 	size_t suggestedTableSize = suggested_page_table_size(total);
848 	printf("suggested page table size = %u\n", suggestedTableSize);
849 	if (tableSize < suggestedTableSize) {
850 		// nah, we need a new one!
851 		printf("need new page table, size = %u!\n", suggestedTableSize);
852 		table = (page_table_entry_group *)of_claim(NULL, suggestedTableSize, suggestedTableSize);
853 			// KERNEL_BASE would be better as virtual address, but
854 			// at least with Apple's OpenFirmware, it makes no
855 			// difference - we will have to remap it later
856 		if (table == (void *)OF_FAILED) {
857 			panic("Could not allocate new page table (size = %ld)!!\n", suggestedTableSize);
858 			return B_NO_MEMORY;
859 		}
860 		if (table == NULL) {
861 			// work-around for the broken Pegasos OpenFirmware
862 			puts("broken OpenFirmware detected (claim doesn't work).");
863 			realMode = true;
864 
865 			addr_t tableBase = 0;
866 			for (int32 i = 0; tableBase < realBase + realSize * 3; i++) {
867 				tableBase = suggestedTableSize * i;
868 			}
869 
870 			table = (page_table_entry_group *)tableBase;
871 		}
872 
873 		printf("new table at: %p\n", table);
874 		sPageTable = table;
875 		tableSize = suggestedTableSize;
876 	} else {
877 		// ToDo: we could check if the page table is much too large
878 		//	and create a smaller one in this case (in order to save
879 		//	memory).
880 		sPageTable = table;
881 	}
882 
883 	sPageTableHashMask = tableSize / sizeof(page_table_entry_group) - 1;
884 	if (sPageTable != oldTable)
885 		memset(sPageTable, 0, tableSize);
886 
887 	// turn off address translation via the page table/segment mechanism,
888 	// identity map the first 256 MB (where our code/data reside)
889 
890 	printf("MSR: %p\n", (void *)get_msr());
891 
892 //	block_address_translation bat;
893 
894 /*	bat.length = BAT_LENGTH_256MB;
895 	bat.kernel_valid = true;
896 	bat.memory_coherent = true;
897 	bat.protection = BAT_READ_WRITE;
898 
899 	set_ibat0(&bat);
900 	set_dbat0(&bat);
901 	isync();
902 puts("2");*/
903 
904 	// initialize segment descriptors, but don't set the registers
905 	// until we're about to take over the page table - we're mapping
906 	// pages into our table using these values
907 
908 	for (int32 i = 0; i < 16; i++)
909 		sSegments[i].virtual_segment_id = i;
910 
911 	// find already allocated ranges of physical memory
912 	// and the virtual address space
913 
914 	page_table_entry_group *physicalTable = NULL;
915 	void *exceptionHandlers = (void *)-1;
916 	if (find_allocated_ranges(oldTable, table, &physicalTable,
917 			&exceptionHandlers) < B_OK) {
918 		puts("find_allocated_ranges() failed!");
919 		return B_ERROR;
920 	}
921 
922 #if 0
923 	block_address_translation bats[8];
924 	getibats(bats);
925 	for (int32 i = 0; i < 8; i++)
926 		printf("page index %u, length %u, ppn %u\n", bats[i].page_index, bats[i].length, bats[i].physical_block_number);
927 #endif
928 
929 	if (physicalTable == NULL) {
930 		puts("arch_mmu_init(): Didn't find physical address of page table!");
931 		if (!realMode)
932 			return B_ERROR;
933 
934 		// Pegasos work-around
935 		//map_range((void *)realBase, (void *)realBase, realSize * 2, PAGE_READ_WRITE);
936 		//map_range((void *)(total - realSize), (void *)(total - realSize), realSize, PAGE_READ_WRITE);
937 		//map_range((void *)table, (void *)table, tableSize, PAGE_READ_WRITE);
938 		insert_physical_allocated_range((void *)realBase, realSize * 2);
939 		insert_virtual_allocated_range((void *)realBase, realSize * 2);
940 		insert_physical_allocated_range((void *)(total - realSize), realSize);
941 		insert_virtual_allocated_range((void *)(total - realSize), realSize);
942 		insert_physical_allocated_range((void *)table, tableSize);
943 		insert_virtual_allocated_range((void *)table, tableSize);
944 
945 		physicalTable = table;
946 	}
947 
948 	if (exceptionHandlers == (void *)-1) {
949 		// ToDo: create mapping for the exception handlers
950 		puts("no mapping for the exception handlers!");
951 	}
952 
953 	// Set the Open Firmware memory callback. From now on the Open Firmware
954 	// will ask us for memory.
955 	arch_set_callback();
956 
957 	// set up new page table and turn on translation again
958 
959 	for (int32 i = 0; i < 16; i++) {
960 		ppc_set_segment_register((void *)(i * 0x10000000), sSegments[i]);
961 			// one segment describes 256 MB of memory
962 	}
963 
964 	ppc_set_page_table(physicalTable, tableSize);
965 	invalidate_tlb();
966 
967 	if (!realMode) {
968 		// clear BATs
969 		reset_ibats();
970 		reset_dbats();
971 	}
972 
973 	set_msr(MSR_MACHINE_CHECK_ENABLED | MSR_FP_AVAILABLE
974 			| MSR_INST_ADDRESS_TRANSLATION
975 			| MSR_DATA_ADDRESS_TRANSLATION);
976 
977 	// set kernel args
978 
979 	printf("virt_allocated: %lu\n", gKernelArgs.num_virtual_allocated_ranges);
980 	printf("phys_allocated: %lu\n", gKernelArgs.num_physical_allocated_ranges);
981 	printf("phys_memory: %lu\n", gKernelArgs.num_physical_memory_ranges);
982 
983 	gKernelArgs.arch_args.page_table.start = (addr_t)sPageTable;
984 	gKernelArgs.arch_args.page_table.size = tableSize;
985 
986 	gKernelArgs.arch_args.exception_handlers.start = (addr_t)exceptionHandlers;
987 	gKernelArgs.arch_args.exception_handlers.size = B_PAGE_SIZE;
988 
989 	return B_OK;
990 }
991 
992