1 /* 2 * Copyright 2010, Ithamar R. Adema, ithamar.adema@team-embedded.nl 3 * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de. 4 * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved. 5 * Distributed under the terms of the MIT License. 6 * 7 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved. 8 * Distributed under the terms of the NewOS License. 9 */ 10 11 12 #include <arch/vm_translation_map.h> 13 14 #include <boot/kernel_args.h> 15 16 #include "paging/32bit/ARMPagingMethod32Bit.h" 17 //#include "paging/pae/ARMPagingMethodPAE.h" 18 19 20 //#define TRACE_VM_TMAP 21 #ifdef TRACE_VM_TMAP 22 # define TRACE(x...) dprintf(x) 23 #else 24 # define TRACE(x...) ; 25 #endif 26 27 28 static union { 29 uint64 align; 30 char thirty_two[sizeof(ARMPagingMethod32Bit)]; 31 #if B_HAIKU_PHYSICAL_BITS == 64 32 char pae[sizeof(ARMPagingMethodPAE)]; 33 #endif 34 } sPagingMethodBuffer; 35 36 37 // #pragma mark - VM API 38 39 40 status_t 41 arch_vm_translation_map_create_map(bool kernel, VMTranslationMap** _map) 42 { 43 return gARMPagingMethod->CreateTranslationMap(kernel, _map); 44 } 45 46 47 status_t 48 arch_vm_translation_map_init(kernel_args *args, 49 VMPhysicalPageMapper** _physicalPageMapper) 50 { 51 TRACE("vm_translation_map_init: entry\n"); 52 53 #ifdef TRACE_VM_TMAP 54 TRACE("physical memory ranges:\n"); 55 for (uint32 i = 0; i < args->num_physical_memory_ranges; i++) { 56 phys_addr_t start = args->physical_memory_range[i].start; 57 phys_addr_t end = start + args->physical_memory_range[i].size; 58 TRACE(" %#10" B_PRIxPHYSADDR " - %#10" B_PRIxPHYSADDR "\n", start, 59 end); 60 } 61 62 TRACE("allocated physical ranges:\n"); 63 for (uint32 i = 0; i < args->num_physical_allocated_ranges; i++) { 64 phys_addr_t start = args->physical_allocated_range[i].start; 65 phys_addr_t end = start + args->physical_allocated_range[i].size; 66 TRACE(" %#10" B_PRIxPHYSADDR " - %#10" B_PRIxPHYSADDR "\n", start, 67 end); 68 } 69 70 TRACE("allocated virtual ranges:\n"); 71 for (uint32 i = 0; i < args->num_virtual_allocated_ranges; i++) { 72 addr_t start = args->virtual_allocated_range[i].start; 73 addr_t end = start + args->virtual_allocated_range[i].size; 74 TRACE(" %#10" B_PRIxADDR " - %#10" B_PRIxADDR "\n", start, end); 75 } 76 #endif 77 78 #if B_HAIKU_PHYSICAL_BITS == 64 //IRA: Check 64 bit code and adjust for ARM 79 bool paeAvailable = x86_check_feature(IA32_FEATURE_PAE, FEATURE_COMMON); 80 bool paeNeeded = false; 81 for (uint32 i = 0; i < args->num_physical_memory_ranges; i++) { 82 phys_addr_t end = args->physical_memory_range[i].start 83 + args->physical_memory_range[i].size; 84 if (end > 0x100000000LL) { 85 paeNeeded = true; 86 break; 87 } 88 } 89 90 if (paeAvailable && paeNeeded) { 91 dprintf("using PAE paging\n"); 92 gARMPagingMethod = new(&sPagingMethodBuffer) ARMPagingMethodPAE; 93 } else { 94 dprintf("using 32 bit paging (PAE not %s)\n", 95 paeNeeded ? "available" : "needed"); 96 gARMPagingMethod = new(&sPagingMethodBuffer) ARMPagingMethod32Bit; 97 } 98 #else 99 gARMPagingMethod = new(&sPagingMethodBuffer) ARMPagingMethod32Bit; 100 #endif 101 102 return gARMPagingMethod->Init(args, _physicalPageMapper); 103 } 104 105 106 status_t 107 arch_vm_translation_map_init_post_sem(kernel_args *args) 108 { 109 return B_OK; 110 } 111 112 113 status_t 114 arch_vm_translation_map_init_post_area(kernel_args *args) 115 { 116 TRACE("vm_translation_map_init_post_area: entry\n"); 117 118 return gARMPagingMethod->InitPostArea(args); 119 } 120 121 122 status_t 123 arch_vm_translation_map_early_map(kernel_args *args, addr_t va, phys_addr_t pa, 124 uint8 attributes, phys_addr_t (*get_free_page)(kernel_args *)) 125 { 126 TRACE("early_tmap: entry pa 0x%lx va 0x%lx\n", pa, va); 127 128 return gARMPagingMethod->MapEarly(args, va, pa, attributes, get_free_page); 129 } 130 131 132 /*! Verifies that the page at the given virtual address can be accessed in the 133 current context. 134 135 This function is invoked in the kernel debugger. Paranoid checking is in 136 order. 137 138 \param virtualAddress The virtual address to be checked. 139 \param protection The area protection for which to check. Valid is a bitwise 140 or of one or more of \c B_KERNEL_READ_AREA or \c B_KERNEL_WRITE_AREA. 141 \return \c true, if the address can be accessed in all ways specified by 142 \a protection, \c false otherwise. 143 */ 144 bool 145 arch_vm_translation_map_is_kernel_page_accessible(addr_t virtualAddress, 146 uint32 protection) 147 { 148 if (!gARMPagingMethod) 149 return true; 150 151 return gARMPagingMethod->IsKernelPageAccessible(virtualAddress, protection); 152 } 153