1 /* 2 * Copyright 2003-2005, Axel Dörfler, axeld@pinc-software.de. 3 * Distributed under the terms of the MIT License. 4 * 5 * Copyright 2001, Travis Geiselbrecht. All rights reserved. 6 * Distributed under the terms of the NewOS License. 7 */ 8 9 #include <KernelExport.h> 10 11 #include <kernel.h> 12 #include <boot/kernel_args.h> 13 14 #include <vm/vm.h> 15 #include <arch/vm.h> 16 #include <arch_mmu.h> 17 18 19 //#define TRACE_ARCH_VM 20 #ifdef TRACE_ARCH_VM 21 # define TRACE(x) dprintf x 22 #else 23 # define TRACE(x) ; 24 #endif 25 26 27 status_t 28 arch_vm_init(kernel_args *args) 29 { 30 return B_OK; 31 } 32 33 34 status_t 35 arch_vm_init2(kernel_args *args) 36 { 37 // int bats[8]; 38 // int i; 39 40 #if 0 41 // print out any bat mappings 42 getibats(bats); 43 dprintf("ibats:\n"); 44 for(i = 0; i < 4; i++) 45 dprintf("0x%x 0x%x\n", bats[i*2], bats[i*2+1]); 46 getdbats(bats); 47 dprintf("dbats:\n"); 48 for(i = 0; i < 4; i++) 49 dprintf("0x%x 0x%x\n", bats[i*2], bats[i*2+1]); 50 #endif 51 52 #if 1 53 // turn off the first 2 BAT mappings (3 & 4 are used by the lower level code) 54 block_address_translation bat; 55 bat.Clear(); 56 57 set_ibat0(&bat); 58 set_ibat1(&bat); 59 set_dbat0(&bat); 60 set_dbat1(&bat); 61 /* getibats(bats); 62 memset(bats, 0, 2 * 2); 63 setibats(bats); 64 getdbats(bats); 65 memset(bats, 0, 2 * 2); 66 setdbats(bats); 67 */ 68 #endif 69 #if 0 70 // just clear the first BAT mapping (0 - 256MB) 71 dprintf("msr 0x%x\n", getmsr()); 72 { 73 unsigned int reg; 74 asm("mr %0,1" : "=r"(reg)); 75 dprintf("sp 0x%x\n", reg); 76 } 77 dprintf("ka %p\n", ka); 78 79 getibats(bats); 80 dprintf("ibats:\n"); 81 for(i = 0; i < 4; i++) 82 dprintf("0x%x 0x%x\n", bats[i*2], bats[i*2+1]); 83 bats[0] = bats[1] = 0; 84 setibats(bats); 85 getdbats(bats); 86 dprintf("dbats:\n"); 87 for(i = 0; i < 4; i++) 88 dprintf("0x%x 0x%x\n", bats[i*2], bats[i*2+1]); 89 bats[0] = bats[1] = 0; 90 setdbats(bats); 91 #endif 92 return B_OK; 93 } 94 95 96 status_t 97 arch_vm_init_post_area(kernel_args *args) 98 { 99 return B_OK; 100 } 101 102 103 status_t 104 arch_vm_init_end(kernel_args *args) 105 { 106 TRACE(("arch_vm_init_end(): %lu virtual ranges to keep:\n", 107 args->arch_args.num_virtual_ranges_to_keep)); 108 109 for (int i = 0; i < (int)args->arch_args.num_virtual_ranges_to_keep; i++) { 110 addr_range &range = args->arch_args.virtual_ranges_to_keep[i]; 111 112 TRACE((" start: %p, size: 0x%lx\n", (void*)range.start, range.size)); 113 114 // skip ranges outside the kernel address space 115 if (!IS_KERNEL_ADDRESS(range.start)) { 116 TRACE((" no kernel address, skipping...\n")); 117 continue; 118 } 119 120 void *address = (void*)range.start; 121 area_id area = create_area("boot loader reserved area", &address, 122 B_EXACT_ADDRESS, range.size, B_ALREADY_WIRED, 123 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA); 124 if (area < 0) { 125 panic("arch_vm_init_end(): Failed to create area for boot loader " 126 "reserved area: %p - %p\n", (void*)range.start, 127 (void*)(range.start + range.size)); 128 } 129 } 130 131 // Throw away any address space mappings we've inherited from the boot 132 // loader and have not yet turned into an area. 133 vm_free_unused_boot_loader_range(0, 0xffffffff - B_PAGE_SIZE + 1); 134 135 return B_OK; 136 } 137 138 139 status_t 140 arch_vm_init_post_modules(kernel_args *args) 141 { 142 return B_OK; 143 } 144 145 146 void 147 arch_vm_aspace_swap(struct VMAddressSpace *from, struct VMAddressSpace *to) 148 { 149 } 150 151 152 bool 153 arch_vm_supports_protection(uint32 protection) 154 { 155 return true; 156 } 157 158 159 void 160 arch_vm_unset_memory_type(VMArea *area) 161 { 162 } 163 164 165 status_t 166 arch_vm_set_memory_type(VMArea *area, phys_addr_t physicalBase, uint32 type) 167 { 168 if (type == 0) 169 return B_OK; 170 171 return B_ERROR; 172 } 173