1 /* 2 * Copyright 2003-2005, Axel Dörfler, axeld@pinc-software.de. 3 * Distributed under the terms of the MIT License. 4 * 5 * Copyright 2001, Travis Geiselbrecht. All rights reserved. 6 * Distributed under the terms of the NewOS License. 7 */ 8 9 #include <KernelExport.h> 10 11 #include <kernel.h> 12 #include <boot/kernel_args.h> 13 14 #include <arch/vm.h> 15 #include <arch_mmu.h> 16 17 18 //#define TRACE_ARCH_VM 19 #ifdef TRACE_ARCH_VM 20 # define TRACE(x) dprintf x 21 #else 22 # define TRACE(x) ; 23 #endif 24 25 26 status_t 27 arch_vm_init(kernel_args *args) 28 { 29 return B_OK; 30 } 31 32 33 status_t 34 arch_vm_init2(kernel_args *args) 35 { 36 // int bats[8]; 37 // int i; 38 39 #if 0 40 // print out any bat mappings 41 getibats(bats); 42 dprintf("ibats:\n"); 43 for(i = 0; i < 4; i++) 44 dprintf("0x%x 0x%x\n", bats[i*2], bats[i*2+1]); 45 getdbats(bats); 46 dprintf("dbats:\n"); 47 for(i = 0; i < 4; i++) 48 dprintf("0x%x 0x%x\n", bats[i*2], bats[i*2+1]); 49 #endif 50 51 #if 1 52 // turn off the first 2 BAT mappings (3 & 4 are used by the lower level code) 53 block_address_translation bat; 54 bat.Clear(); 55 56 set_ibat0(&bat); 57 set_ibat1(&bat); 58 set_dbat0(&bat); 59 set_dbat1(&bat); 60 /* getibats(bats); 61 memset(bats, 0, 2 * 2); 62 setibats(bats); 63 getdbats(bats); 64 memset(bats, 0, 2 * 2); 65 setdbats(bats); 66 */ 67 #endif 68 #if 0 69 // just clear the first BAT mapping (0 - 256MB) 70 dprintf("msr 0x%x\n", getmsr()); 71 { 72 unsigned int reg; 73 asm("mr %0,1" : "=r"(reg)); 74 dprintf("sp 0x%x\n", reg); 75 } 76 dprintf("ka %p\n", ka); 77 78 getibats(bats); 79 dprintf("ibats:\n"); 80 for(i = 0; i < 4; i++) 81 dprintf("0x%x 0x%x\n", bats[i*2], bats[i*2+1]); 82 bats[0] = bats[1] = 0; 83 setibats(bats); 84 getdbats(bats); 85 dprintf("dbats:\n"); 86 for(i = 0; i < 4; i++) 87 dprintf("0x%x 0x%x\n", bats[i*2], bats[i*2+1]); 88 bats[0] = bats[1] = 0; 89 setdbats(bats); 90 #endif 91 return B_OK; 92 } 93 94 95 status_t 96 arch_vm_init_post_area(kernel_args *args) 97 { 98 return B_OK; 99 } 100 101 102 status_t 103 arch_vm_init_end(kernel_args *args) 104 { 105 TRACE(("arch_vm_init_end(): %lu virtual ranges to keep:\n", 106 args->arch_args.num_virtual_ranges_to_keep)); 107 108 for (int i = 0; i < (int)args->arch_args.num_virtual_ranges_to_keep; i++) { 109 addr_range &range = args->arch_args.virtual_ranges_to_keep[i]; 110 111 TRACE((" start: %p, size: 0x%lx\n", (void*)range.start, range.size)); 112 113 // skip ranges outside the kernel address space 114 if (!IS_KERNEL_ADDRESS(range.start)) { 115 TRACE((" no kernel address, skipping...\n")); 116 continue; 117 } 118 119 void *address = (void*)range.start; 120 area_id area = create_area("boot loader reserved area", &address, 121 B_EXACT_ADDRESS, range.size, B_ALREADY_WIRED, 122 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA); 123 if (area < 0) { 124 panic("arch_vm_init_end(): Failed to create area for boot loader " 125 "reserved area: %p - %p\n", (void*)range.start, 126 (void*)(range.start + range.size)); 127 } 128 } 129 130 // Throw away any address space mappings we've inherited from the boot 131 // loader and have not yet turned into an area. 132 vm_free_unused_boot_loader_range(0, 0xffffffff - B_PAGE_SIZE + 1); 133 134 return B_OK; 135 } 136 137 138 status_t 139 arch_vm_init_post_modules(kernel_args *args) 140 { 141 return B_OK; 142 } 143 144 145 void 146 arch_vm_aspace_swap(vm_address_space *aspace) 147 { 148 } 149 150 151 bool 152 arch_vm_supports_protection(uint32 protection) 153 { 154 return true; 155 } 156 157 158 void 159 arch_vm_unset_memory_type(vm_area *area) 160 { 161 } 162 163 164 status_t 165 arch_vm_set_memory_type(vm_area *area, addr_t physicalBase, uint32 type) 166 { 167 if (type == 0) 168 return B_OK; 169 170 return B_ERROR; 171 } 172