1 /*
2 * Copyright 2003-2009, Axel Dörfler, axeld@pinc-software.de.
3 * Copyright 2010-2011, Haiku, Inc. All Rights Reserved.
4 * All rights reserved. Distributed under the terms of the MIT License.
5 *
6 * Authors:
7 * Axel Dörfler, axeld@pinc-software.de.
8 * Alexander von Gluck, kallisti5@unixzen.com
9 */
10
11
12 #include <OS.h>
13
14 #include <platform_arch.h>
15 #include <boot/addr_range.h>
16 #include <boot/kernel_args.h>
17 #include <boot/platform.h>
18 #include <boot/stage2.h>
19 #include <boot/stdio.h>
20 #include <platform/openfirmware/openfirmware.h>
21 #include <arch_cpu.h>
22 #include <arch_mmu.h>
23 #include <kernel.h>
24
25 #include "support.h"
26
27
28 // set protection to WIMGNPP: -----PP
29 // PP: 00 - no access
30 // 01 - read only
31 // 10 - read/write
32 // 11 - read only
33 #define PAGE_READ_ONLY 0x01
34 #define PAGE_READ_WRITE 0x02
35
36 // NULL is actually a possible physical address...
37 //#define PHYSINVAL ((void *)-1)
38 #define PHYSINVAL NULL
39
40 //#define TRACE_MMU
41 #ifdef TRACE_MMU
42 # define TRACE(x...) dprintf(x)
43 #else
44 # define TRACE(x...) ;
45 #endif
46
47
48 segment_descriptor sSegments[16];
49 page_table_entry_group *sPageTable;
50 uint32 sPageTableHashMask;
51
52
53 // begin and end of the boot loader
54 extern "C" uint8 __text_begin;
55 extern "C" uint8 _end;
56
57
58 static status_t
insert_virtual_range_to_keep(void * start,uint32 size)59 insert_virtual_range_to_keep(void *start, uint32 size)
60 {
61 return insert_address_range(gKernelArgs.arch_args.virtual_ranges_to_keep,
62 &gKernelArgs.arch_args.num_virtual_ranges_to_keep,
63 MAX_VIRTUAL_RANGES_TO_KEEP, (addr_t)start, size);
64 }
65
66
67 static status_t
remove_virtual_range_to_keep(void * start,uint32 size)68 remove_virtual_range_to_keep(void *start, uint32 size)
69 {
70 return remove_address_range(gKernelArgs.arch_args.virtual_ranges_to_keep,
71 &gKernelArgs.arch_args.num_virtual_ranges_to_keep,
72 MAX_VIRTUAL_RANGES_TO_KEEP, (addr_t)start, size);
73 }
74
75
76 static status_t
find_physical_memory_ranges(size_t & total)77 find_physical_memory_ranges(size_t &total)
78 {
79 int memory;
80 dprintf("checking for memory...\n");
81 if (of_getprop(gChosen, "memory", &memory, sizeof(int)) == OF_FAILED)
82 return B_ERROR;
83 int package = of_instance_to_package(memory);
84
85 total = 0;
86
87 // Memory base addresses are provided in 32 or 64 bit flavors
88 // #address-cells and #size-cells matches the number of 32-bit 'cells'
89 // representing the length of the base address and size fields
90 int root = of_finddevice("/");
91 int32 regAddressCells = of_address_cells(root);
92 int32 regSizeCells = of_size_cells(root);
93 if (regAddressCells == OF_FAILED || regSizeCells == OF_FAILED) {
94 dprintf("finding base/size length counts failed, assume 32-bit.\n");
95 regAddressCells = 1;
96 regSizeCells = 1;
97 }
98
99 // NOTE : Size Cells of 2 is possible in theory... but I haven't seen it yet.
100 if (regAddressCells > 2 || regSizeCells > 1) {
101 panic("%s: Unsupported OpenFirmware cell count detected.\n"
102 "Address Cells: %" B_PRId32 "; Size Cells: %" B_PRId32
103 " (CPU > 64bit?).\n", __func__, regAddressCells, regSizeCells);
104 return B_ERROR;
105 }
106
107 // On 64-bit PowerPC systems (G5), our mem base range address is larger
108 if (regAddressCells == 2) {
109 struct of_region<uint64, uint32> regions[64];
110 int count = of_getprop(package, "reg", regions, sizeof(regions));
111 if (count == OF_FAILED)
112 count = of_getprop(memory, "reg", regions, sizeof(regions));
113 if (count == OF_FAILED)
114 return B_ERROR;
115 count /= sizeof(regions[0]);
116
117 for (int32 i = 0; i < count; i++) {
118 if (regions[i].size <= 0) {
119 dprintf("%ld: empty region\n", i);
120 continue;
121 }
122 dprintf("%" B_PRIu32 ": base = %" B_PRIu64 ","
123 "size = %" B_PRIu32 "\n", i, regions[i].base, regions[i].size);
124
125 total += regions[i].size;
126
127 if (insert_physical_memory_range((addr_t)regions[i].base,
128 regions[i].size) != B_OK) {
129 dprintf("cannot map physical memory range "
130 "(num ranges = %" B_PRIu32 ")!\n",
131 gKernelArgs.num_physical_memory_ranges);
132 return B_ERROR;
133 }
134 }
135 return B_OK;
136 }
137
138 // Otherwise, normal 32-bit PowerPC G3 or G4 have a smaller 32-bit one
139 struct of_region<uint32, uint32> regions[64];
140 int count = of_getprop(package, "reg", regions, sizeof(regions));
141 if (count == OF_FAILED)
142 count = of_getprop(memory, "reg", regions, sizeof(regions));
143 if (count == OF_FAILED)
144 return B_ERROR;
145 count /= sizeof(regions[0]);
146
147 for (int32 i = 0; i < count; i++) {
148 if (regions[i].size <= 0) {
149 dprintf("%ld: empty region\n", i);
150 continue;
151 }
152 dprintf("%" B_PRIu32 ": base = %" B_PRIu32 ","
153 "size = %" B_PRIu32 "\n", i, regions[i].base, regions[i].size);
154
155 total += regions[i].size;
156
157 if (insert_physical_memory_range((addr_t)regions[i].base,
158 regions[i].size) != B_OK) {
159 dprintf("cannot map physical memory range "
160 "(num ranges = %" B_PRIu32 ")!\n",
161 gKernelArgs.num_physical_memory_ranges);
162 return B_ERROR;
163 }
164 }
165
166 return B_OK;
167 }
168
169
170 static bool
is_virtual_allocated(void * address,size_t size)171 is_virtual_allocated(void *address, size_t size)
172 {
173 uint64 foundBase;
174 return !get_free_address_range(gKernelArgs.virtual_allocated_range,
175 gKernelArgs.num_virtual_allocated_ranges, (addr_t)address, size,
176 &foundBase) || foundBase != (addr_t)address;
177 }
178
179
180 static bool
is_physical_allocated(void * address,size_t size)181 is_physical_allocated(void *address, size_t size)
182 {
183 uint64 foundBase;
184 return !get_free_address_range(gKernelArgs.physical_allocated_range,
185 gKernelArgs.num_physical_allocated_ranges, (addr_t)address, size,
186 &foundBase) || foundBase != (addr_t)address;
187 }
188
189
190 static bool
is_physical_memory(void * address,size_t size)191 is_physical_memory(void *address, size_t size)
192 {
193 return is_address_range_covered(gKernelArgs.physical_memory_range,
194 gKernelArgs.num_physical_memory_ranges, (addr_t)address, size);
195 }
196
197
198 static bool
is_physical_memory(void * address)199 is_physical_memory(void *address)
200 {
201 return is_physical_memory(address, 1);
202 }
203
204
205 static void
fill_page_table_entry(page_table_entry * entry,uint32 virtualSegmentID,void * virtualAddress,void * physicalAddress,uint8 mode,bool secondaryHash)206 fill_page_table_entry(page_table_entry *entry, uint32 virtualSegmentID,
207 void *virtualAddress, void *physicalAddress, uint8 mode, bool secondaryHash)
208 {
209 // lower 32 bit - set at once
210 ((uint32 *)entry)[1]
211 = (((uint32)physicalAddress / B_PAGE_SIZE) << 12) | mode;
212 /*entry->physical_page_number = (uint32)physicalAddress / B_PAGE_SIZE;
213 entry->_reserved0 = 0;
214 entry->referenced = false;
215 entry->changed = false;
216 entry->write_through = (mode >> 6) & 1;
217 entry->caching_inhibited = (mode >> 5) & 1;
218 entry->memory_coherent = (mode >> 4) & 1;
219 entry->guarded = (mode >> 3) & 1;
220 entry->_reserved1 = 0;
221 entry->page_protection = mode & 0x3;*/
222 eieio();
223 // we need to make sure that the lower 32 bit were
224 // already written when the entry becomes valid
225
226 // upper 32 bit
227 entry->virtual_segment_id = virtualSegmentID;
228 entry->secondary_hash = secondaryHash;
229 entry->abbr_page_index = ((uint32)virtualAddress >> 22) & 0x3f;
230 entry->valid = true;
231 }
232
233
234 static void
map_page(void * virtualAddress,void * physicalAddress,uint8 mode)235 map_page(void *virtualAddress, void *physicalAddress, uint8 mode)
236 {
237 uint32 virtualSegmentID
238 = sSegments[addr_t(virtualAddress) >> 28].virtual_segment_id;
239
240 uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID,
241 (uint32)virtualAddress);
242 page_table_entry_group *group = &sPageTable[hash & sPageTableHashMask];
243
244 for (int32 i = 0; i < 8; i++) {
245 // 8 entries in a group
246 if (group->entry[i].valid)
247 continue;
248
249 fill_page_table_entry(&group->entry[i], virtualSegmentID,
250 virtualAddress, physicalAddress, mode, false);
251 //TRACE("map: va = %p -> %p, mode = %d, hash = %lu\n",
252 // virtualAddress, physicalAddress, mode, hash);
253 return;
254 }
255
256 hash = page_table_entry::SecondaryHash(hash);
257 group = &sPageTable[hash & sPageTableHashMask];
258
259 for (int32 i = 0; i < 8; i++) {
260 if (group->entry[i].valid)
261 continue;
262
263 fill_page_table_entry(&group->entry[i], virtualSegmentID,
264 virtualAddress, physicalAddress, mode, true);
265 //TRACE("map: va = %p -> %p, mode = %d, second hash = %lu\n",
266 // virtualAddress, physicalAddress, mode, hash);
267 return;
268 }
269
270 panic("%s: out of page table entries!\n", __func__);
271 }
272
273
274 static void
map_range(void * virtualAddress,void * physicalAddress,size_t size,uint8 mode)275 map_range(void *virtualAddress, void *physicalAddress, size_t size, uint8 mode)
276 {
277 for (uint32 offset = 0; offset < size; offset += B_PAGE_SIZE) {
278 map_page((void *)(uint32(virtualAddress) + offset),
279 (void *)(uint32(physicalAddress) + offset), mode);
280 }
281 }
282
283
284 static status_t
find_allocated_ranges(void * oldPageTable,void * pageTable,page_table_entry_group ** _physicalPageTable,void ** _exceptionHandlers)285 find_allocated_ranges(void *oldPageTable, void *pageTable,
286 page_table_entry_group **_physicalPageTable, void **_exceptionHandlers)
287 {
288 // we have to preserve the OpenFirmware established mappings
289 // if we want to continue to use its service after we've
290 // taken over (we will probably need less translations once
291 // we have proper driver support for the target hardware).
292 int mmu;
293 if (of_getprop(gChosen, "mmu", &mmu, sizeof(int)) == OF_FAILED) {
294 dprintf("%s: Error: no OpenFirmware mmu\n", __func__);
295 return B_ERROR;
296 }
297 mmu = of_instance_to_package(mmu);
298
299 struct translation_map {
300 void *virtual_address;
301 int length;
302 void *physical_address;
303 int mode;
304 } translations[64];
305
306 int length = of_getprop(mmu, "translations", &translations,
307 sizeof(translations));
308 if (length == OF_FAILED) {
309 dprintf("Error: no OF translations.\n");
310 return B_ERROR;
311 }
312 length = length / sizeof(struct translation_map);
313 uint32 total = 0;
314 dprintf("found %d translations\n", length);
315
316 for (int i = 0; i < length; i++) {
317 struct translation_map *map = &translations[i];
318 bool keepRange = true;
319 TRACE("%i: map: %p, length %d -> physical: %p, mode %d\n", i,
320 map->virtual_address, map->length,
321 map->physical_address, map->mode);
322
323 // insert range in physical allocated, if it points to physical memory
324
325 if (is_physical_memory(map->physical_address)
326 && insert_physical_allocated_range((addr_t)map->physical_address,
327 map->length) != B_OK) {
328 dprintf("cannot map physical allocated range "
329 "(num ranges = %" B_PRIu32 ")!\n",
330 gKernelArgs.num_physical_allocated_ranges);
331 return B_ERROR;
332 }
333
334 if (map->virtual_address == pageTable) {
335 dprintf("%i: found page table at va %p\n", i,
336 map->virtual_address);
337 *_physicalPageTable
338 = (page_table_entry_group *)map->physical_address;
339 keepRange = false;
340 // we keep it explicitely anyway
341 }
342 if ((addr_t)map->physical_address <= 0x100
343 && (addr_t)map->physical_address + map->length >= 0x1000) {
344 dprintf("%i: found exception handlers at va %p\n", i,
345 map->virtual_address);
346 *_exceptionHandlers = map->virtual_address;
347 keepRange = false;
348 // we keep it explicitely anyway
349 }
350 if (map->virtual_address == oldPageTable)
351 keepRange = false;
352
353 // insert range in virtual allocated
354
355 if (insert_virtual_allocated_range((addr_t)map->virtual_address,
356 map->length) != B_OK) {
357 dprintf("cannot map virtual allocated range "
358 "(num ranges = %" B_PRIu32 ")!\n",
359 gKernelArgs.num_virtual_allocated_ranges);
360 }
361
362 // map range into the page table
363
364 map_range(map->virtual_address, map->physical_address, map->length,
365 map->mode);
366
367 // insert range in virtual ranges to keep
368
369 if (keepRange) {
370 TRACE("%i: keeping free range starting at va %p\n", i,
371 map->virtual_address);
372
373 if (insert_virtual_range_to_keep(map->virtual_address,
374 map->length) != B_OK) {
375 dprintf("cannot map virtual range to keep "
376 "(num ranges = %" B_PRIu32 ")\n",
377 gKernelArgs.num_virtual_allocated_ranges);
378 }
379 }
380
381 total += map->length;
382 }
383 dprintf("total size kept: %" B_PRIu32 "\n", total);
384
385 // remove the boot loader code from the virtual ranges to keep in the
386 // kernel
387 if (remove_virtual_range_to_keep(&__text_begin, &_end - &__text_begin)
388 != B_OK) {
389 dprintf("%s: Failed to remove boot loader range "
390 "from virtual ranges to keep.\n", __func__);
391 }
392
393 return B_OK;
394 }
395
396
397 /*! Computes the recommended minimal page table size as
398 described in table 7-22 of the PowerPC "Programming
399 Environment for 32-Bit Microprocessors".
400 The page table size ranges from 64 kB (for 8 MB RAM)
401 to 32 MB (for 4 GB RAM).
402 */
403 static size_t
suggested_page_table_size(size_t total)404 suggested_page_table_size(size_t total)
405 {
406 uint32 max = 23;
407 // 2^23 == 8 MB
408
409 while (max < 32) {
410 if (total <= (1UL << max))
411 break;
412
413 max++;
414 }
415
416 return 1UL << (max - 7);
417 // 2^(23 - 7) == 64 kB
418 }
419
420
421 static void *
find_physical_memory_range(size_t size)422 find_physical_memory_range(size_t size)
423 {
424 for (uint32 i = 0; i < gKernelArgs.num_physical_memory_ranges; i++) {
425 if (gKernelArgs.physical_memory_range[i].size > size)
426 return (void *)(addr_t)gKernelArgs.physical_memory_range[i].start;
427 }
428 return PHYSINVAL;
429 }
430
431
432 static void *
find_free_physical_range(size_t size)433 find_free_physical_range(size_t size)
434 {
435 // just do a simple linear search at the end of the allocated
436 // ranges (dumb memory allocation)
437 if (gKernelArgs.num_physical_allocated_ranges == 0) {
438 if (gKernelArgs.num_physical_memory_ranges == 0)
439 return PHYSINVAL;
440
441 return find_physical_memory_range(size);
442 }
443
444 for (uint32 i = 0; i < gKernelArgs.num_physical_allocated_ranges; i++) {
445 void *address
446 = (void *)(addr_t)(gKernelArgs.physical_allocated_range[i].start
447 + gKernelArgs.physical_allocated_range[i].size);
448 if (!is_physical_allocated(address, size)
449 && is_physical_memory(address, size))
450 return address;
451 }
452 return PHYSINVAL;
453 }
454
455
456 static void *
find_free_virtual_range(void * base,size_t size)457 find_free_virtual_range(void *base, size_t size)
458 {
459 if (base && !is_virtual_allocated(base, size))
460 return base;
461
462 void *firstFound = NULL;
463 void *firstBaseFound = NULL;
464 for (uint32 i = 0; i < gKernelArgs.num_virtual_allocated_ranges; i++) {
465 void *address
466 = (void *)(addr_t)(gKernelArgs.virtual_allocated_range[i].start
467 + gKernelArgs.virtual_allocated_range[i].size);
468 if (!is_virtual_allocated(address, size)) {
469 if (!base)
470 return address;
471
472 if (firstFound == NULL)
473 firstFound = address;
474 if (address >= base
475 && (firstBaseFound == NULL || address < firstBaseFound)) {
476 firstBaseFound = address;
477 }
478 }
479 }
480 return (firstBaseFound ? firstBaseFound : firstFound);
481 }
482
483
484 extern "C" void *
arch_mmu_allocate(void * _virtualAddress,size_t size,uint8 _protection,bool exactAddress)485 arch_mmu_allocate(void *_virtualAddress, size_t size, uint8 _protection,
486 bool exactAddress)
487 {
488 // we only know page sizes
489 size = ROUNDUP(size, B_PAGE_SIZE);
490
491 uint8 protection = 0;
492 if (_protection & B_WRITE_AREA)
493 protection = PAGE_READ_WRITE;
494 else
495 protection = PAGE_READ_ONLY;
496
497 // If no address is given, use the KERNEL_BASE as base address, since
498 // that avoids trouble in the kernel, when we decide to keep the region.
499 void *virtualAddress = _virtualAddress;
500 if (!virtualAddress)
501 virtualAddress = (void*)KERNEL_BASE;
502
503 // find free address large enough to hold "size"
504 virtualAddress = find_free_virtual_range(virtualAddress, size);
505 if (virtualAddress == NULL)
506 return NULL;
507
508 // fail if the exact address was requested, but is not free
509 if (exactAddress && _virtualAddress && virtualAddress != _virtualAddress) {
510 dprintf("arch_mmu_allocate(): exact address requested, but virtual "
511 "range (base: %p, size: %" B_PRIuSIZE ") is not free.\n",
512 _virtualAddress, size);
513 return NULL;
514 }
515
516 // we have a free virtual range for the allocation, now
517 // have a look for free physical memory as well (we assume
518 // that a) there is enough memory, and b) failing is fatal
519 // so that we don't have to optimize for these cases :)
520
521 void *physicalAddress = find_free_physical_range(size);
522 if (physicalAddress == PHYSINVAL) {
523 dprintf("arch_mmu_allocate(base: %p, size: %" B_PRIuSIZE ") "
524 "no free physical address\n", virtualAddress, size);
525 return NULL;
526 }
527
528 // everything went fine, so lets mark the space as used.
529
530 dprintf("mmu_alloc: va %p, pa %p, size %" B_PRIuSIZE "\n", virtualAddress,
531 physicalAddress, size);
532 insert_virtual_allocated_range((addr_t)virtualAddress, size);
533 insert_physical_allocated_range((addr_t)physicalAddress, size);
534
535 map_range(virtualAddress, physicalAddress, size, protection);
536
537 return virtualAddress;
538 }
539
540
541 extern "C" status_t
arch_mmu_free(void * address,size_t size)542 arch_mmu_free(void *address, size_t size)
543 {
544 // TODO: implement freeing a region!
545 return B_OK;
546 }
547
548
549 static inline void
invalidate_tlb(void)550 invalidate_tlb(void)
551 {
552 //asm volatile("tlbia");
553 // "tlbia" is obviously not available on every CPU...
554
555 // Note: this flushes the whole 4 GB address space - it
556 // would probably be a good idea to do less here
557
558 addr_t address = 0;
559 for (uint32 i = 0; i < 0x100000; i++) {
560 asm volatile("tlbie %0" : : "r" (address));
561 address += B_PAGE_SIZE;
562 }
563 tlbsync();
564 }
565
566
567 // #pragma mark - OpenFirmware callbacks and public API
568
569
570 static int
map_callback(struct of_arguments * args)571 map_callback(struct of_arguments *args)
572 {
573 void *physicalAddress = (void *)args->Argument(0);
574 void *virtualAddress = (void *)args->Argument(1);
575 int length = args->Argument(2);
576 int mode = args->Argument(3);
577 intptr_t &error = args->ReturnValue(0);
578
579 // insert range in physical allocated if needed
580
581 if (is_physical_memory(physicalAddress)
582 && insert_physical_allocated_range((addr_t)physicalAddress, length)
583 != B_OK) {
584 error = -1;
585 return OF_FAILED;
586 }
587
588 // insert range in virtual allocated
589
590 if (insert_virtual_allocated_range((addr_t)virtualAddress, length)
591 != B_OK) {
592 error = -2;
593 return OF_FAILED;
594 }
595
596 // map range into the page table
597
598 map_range(virtualAddress, physicalAddress, length, mode);
599
600 return B_OK;
601 }
602
603
604 static int
unmap_callback(struct of_arguments * args)605 unmap_callback(struct of_arguments *args)
606 {
607 /* void *address = (void *)args->Argument(0);
608 int length = args->Argument(1);
609 int &error = args->ReturnValue(0);
610 */
611 // TODO: to be implemented
612
613 return OF_FAILED;
614 }
615
616
617 static int
translate_callback(struct of_arguments * args)618 translate_callback(struct of_arguments *args)
619 {
620 addr_t virtualAddress = (addr_t)args->Argument(0);
621 intptr_t &error = args->ReturnValue(0);
622 intptr_t &physicalAddress = args->ReturnValue(1);
623 intptr_t &mode = args->ReturnValue(2);
624
625 // Find page table entry for this address
626
627 uint32 virtualSegmentID
628 = sSegments[addr_t(virtualAddress) >> 28].virtual_segment_id;
629
630 uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID,
631 (uint32)virtualAddress);
632 page_table_entry_group *group = &sPageTable[hash & sPageTableHashMask];
633 page_table_entry *entry = NULL;
634
635 for (int32 i = 0; i < 8; i++) {
636 entry = &group->entry[i];
637
638 if (entry->valid
639 && entry->virtual_segment_id == virtualSegmentID
640 && entry->secondary_hash == false
641 && entry->abbr_page_index == ((virtualAddress >> 22) & 0x3f))
642 goto success;
643 }
644
645 hash = page_table_entry::SecondaryHash(hash);
646 group = &sPageTable[hash & sPageTableHashMask];
647
648 for (int32 i = 0; i < 8; i++) {
649 entry = &group->entry[i];
650
651 if (entry->valid
652 && entry->virtual_segment_id == virtualSegmentID
653 && entry->secondary_hash == true
654 && entry->abbr_page_index == ((virtualAddress >> 22) & 0x3f))
655 goto success;
656 }
657
658 // could not find the translation
659 error = B_ENTRY_NOT_FOUND;
660 return OF_FAILED;
661
662 success:
663 // we found the entry in question
664 physicalAddress = (int)(entry->physical_page_number * B_PAGE_SIZE);
665 mode = (entry->write_through << 6) // WIMGxPP
666 | (entry->caching_inhibited << 5)
667 | (entry->memory_coherent << 4)
668 | (entry->guarded << 3)
669 | entry->page_protection;
670 error = B_OK;
671
672 return B_OK;
673 }
674
675
676 static int
alloc_real_mem_callback(struct of_arguments * args)677 alloc_real_mem_callback(struct of_arguments *args)
678 {
679 /* addr_t minAddress = (addr_t)args->Argument(0);
680 addr_t maxAddress = (addr_t)args->Argument(1);
681 int length = args->Argument(2);
682 int mode = args->Argument(3);
683 int &error = args->ReturnValue(0);
684 int &physicalAddress = args->ReturnValue(1);
685 */
686 // ToDo: to be implemented
687
688 return OF_FAILED;
689 }
690
691
692 /** Dispatches the callback to the responsible function */
693
694 static int
callback(struct of_arguments * args)695 callback(struct of_arguments *args)
696 {
697 const char *name = args->name;
698 TRACE("OF CALLBACK: %s\n", name);
699
700 if (!strcmp(name, "map"))
701 return map_callback(args);
702 else if (!strcmp(name, "unmap"))
703 return unmap_callback(args);
704 else if (!strcmp(name, "translate"))
705 return translate_callback(args);
706 else if (!strcmp(name, "alloc-real-mem"))
707 return alloc_real_mem_callback(args);
708
709 return OF_FAILED;
710 }
711
712
713 extern "C" status_t
arch_set_callback(void)714 arch_set_callback(void)
715 {
716 // set OpenFirmware callbacks - it will ask us for memory after that
717 // instead of maintaining it itself
718
719 void *oldCallback = NULL;
720 if (of_call_client_function("set-callback", 1, 1, &callback, &oldCallback)
721 == OF_FAILED) {
722 dprintf("Error: OpenFirmware set-callback failed\n");
723 return B_ERROR;
724 }
725 TRACE("old callback = %p; new callback = %p\n", oldCallback, callback);
726
727 return B_OK;
728 }
729
730
731 extern "C" status_t
arch_mmu_init(void)732 arch_mmu_init(void)
733 {
734 // get map of physical memory (fill in kernel_args structure)
735
736 size_t total;
737 if (find_physical_memory_ranges(total) != B_OK) {
738 dprintf("Error: could not find physical memory ranges!\n");
739 return B_ERROR;
740 }
741 dprintf("total physical memory = %" B_PRId32 "MB\n", total / (1024 * 1024));
742
743 // get OpenFirmware's current page table
744
745 page_table_entry_group *oldTable;
746 page_table_entry_group *table;
747 size_t tableSize;
748 ppc_get_page_table(&table, &tableSize);
749
750 oldTable = table;
751
752 bool realMode = false;
753
754 // TODO: read these values out of the OF settings
755 // NOTE: I've only ever seen -1 (0xffffffff) for these values in
756 // OpenFirmware.. even after loading the bootloader -- Alex
757 addr_t realBase = 0;
758 addr_t realSize = 0x400000;
759
760 // can we just keep the page table?
761 size_t suggestedTableSize = suggested_page_table_size(total);
762 dprintf("current page table size = %" B_PRIuSIZE "\n", tableSize);
763 dprintf("suggested page table size = %" B_PRIuSIZE "\n",
764 suggestedTableSize);
765 if (tableSize < suggestedTableSize) {
766 // nah, we need a new one!
767 dprintf("need new page table, size = %" B_PRIuSIZE "!\n",
768 suggestedTableSize);
769 table = (page_table_entry_group *)of_claim(NULL, suggestedTableSize,
770 suggestedTableSize);
771 // KERNEL_BASE would be better as virtual address, but
772 // at least with Apple's OpenFirmware, it makes no
773 // difference - we will have to remap it later
774 if (table == (void *)OF_FAILED) {
775 panic("Could not allocate new page table "
776 "(size = %" B_PRIuSIZE ")!!\n", suggestedTableSize);
777 return B_NO_MEMORY;
778 }
779 if (table == NULL) {
780 // work-around for the broken Pegasos OpenFirmware
781 dprintf("broken OpenFirmware detected (claim doesn't work)\n");
782 realMode = true;
783
784 addr_t tableBase = 0;
785 for (int32 i = 0; tableBase < realBase + realSize * 3; i++) {
786 tableBase = suggestedTableSize * i;
787 }
788
789 table = (page_table_entry_group *)tableBase;
790 }
791
792 dprintf("OpenFirmware gave us a new page table at: %p\n", table);
793 sPageTable = table;
794 tableSize = suggestedTableSize;
795 } else {
796 // ToDo: we could check if the page table is much too large
797 // and create a smaller one in this case (in order to save
798 // memory).
799 dprintf("using original OpenFirmware page table at: %p\n", table);
800 sPageTable = table;
801 }
802
803 sPageTableHashMask = tableSize / sizeof(page_table_entry_group) - 1;
804 if (sPageTable != oldTable)
805 memset(sPageTable, 0, tableSize);
806
807 // turn off address translation via the page table/segment mechanism,
808 // identity map the first 256 MB (where our code/data reside)
809
810 dprintf("MSR: %p\n", (void *)get_msr());
811
812 #if 0
813 block_address_translation bat;
814
815 bat.length = BAT_LENGTH_256MB;
816 bat.kernel_valid = true;
817 bat.memory_coherent = true;
818 bat.protection = BAT_READ_WRITE;
819
820 set_ibat0(&bat);
821 set_dbat0(&bat);
822 isync();
823 #endif
824
825 // initialize segment descriptors, but don't set the registers
826 // until we're about to take over the page table - we're mapping
827 // pages into our table using these values
828
829 for (int32 i = 0; i < 16; i++)
830 sSegments[i].virtual_segment_id = i;
831
832 // find already allocated ranges of physical memory
833 // and the virtual address space
834
835 page_table_entry_group *physicalTable = NULL;
836 void *exceptionHandlers = (void *)-1;
837 if (find_allocated_ranges(oldTable, table, &physicalTable,
838 &exceptionHandlers) != B_OK) {
839 dprintf("Error: find_allocated_ranges() failed\n");
840 return B_ERROR;
841 }
842
843 #if 0
844 block_address_translation bats[8];
845 getibats(bats);
846 for (int32 i = 0; i < 8; i++) {
847 printf("page index %u, length %u, ppn %u\n", bats[i].page_index,
848 bats[i].length, bats[i].physical_block_number);
849 }
850 #endif
851
852 if (physicalTable == NULL) {
853 dprintf("%s: Didn't find physical address of page table\n", __func__);
854 if (!realMode)
855 return B_ERROR;
856
857 // Pegasos work-around
858 #if 0
859 map_range((void *)realBase, (void *)realBase,
860 realSize * 2, PAGE_READ_WRITE);
861 map_range((void *)(total - realSize), (void *)(total - realSize),
862 realSize, PAGE_READ_WRITE);
863 map_range((void *)table, (void *)table, tableSize, PAGE_READ_WRITE);
864 #endif
865 insert_physical_allocated_range(realBase, realSize * 2);
866 insert_virtual_allocated_range(realBase, realSize * 2);
867 insert_physical_allocated_range(total - realSize, realSize);
868 insert_virtual_allocated_range(total - realSize, realSize);
869 insert_physical_allocated_range((addr_t)table, tableSize);
870 insert_virtual_allocated_range((addr_t)table, tableSize);
871
872 // QEMU OpenHackware work-around
873 insert_physical_allocated_range(0x05800000, 0x06000000 - 0x05800000);
874 insert_virtual_allocated_range(0x05800000, 0x06000000 - 0x05800000);
875
876 physicalTable = table;
877 }
878
879 if (exceptionHandlers == (void *)-1) {
880 // TODO: create mapping for the exception handlers
881 dprintf("Error: no mapping for the exception handlers!\n");
882 }
883
884 // Set the Open Firmware memory callback. From now on the Open Firmware
885 // will ask us for memory.
886 arch_set_callback();
887
888 // set up new page table and turn on translation again
889
890 for (uint32 i = 0; i < 16; i++) {
891 ppc_set_segment_register((void *)(i * 0x10000000), sSegments[i]);
892 // one segment describes 256 MB of memory
893 }
894
895 ppc_set_page_table(physicalTable, tableSize);
896 invalidate_tlb();
897
898 if (!realMode) {
899 // clear BATs
900 reset_ibats();
901 reset_dbats();
902 ppc_sync();
903 isync();
904 }
905
906 set_msr(MSR_MACHINE_CHECK_ENABLED | MSR_FP_AVAILABLE
907 | MSR_INST_ADDRESS_TRANSLATION | MSR_DATA_ADDRESS_TRANSLATION);
908
909 // set kernel args
910
911 dprintf("virt_allocated: %" B_PRIu32 "\n",
912 gKernelArgs.num_virtual_allocated_ranges);
913 dprintf("phys_allocated: %" B_PRIu32 "\n",
914 gKernelArgs.num_physical_allocated_ranges);
915 dprintf("phys_memory: %" B_PRIu32 "\n",
916 gKernelArgs.num_physical_memory_ranges);
917
918 gKernelArgs.arch_args.page_table.start = (addr_t)sPageTable;
919 gKernelArgs.arch_args.page_table.size = tableSize;
920
921 gKernelArgs.arch_args.exception_handlers.start = (addr_t)exceptionHandlers;
922 gKernelArgs.arch_args.exception_handlers.size = B_PAGE_SIZE;
923
924 return B_OK;
925 }
926
927