1 /* 2 * Copyright 2007, François Revol, revol@free.fr. 3 * Distributed under the terms of the MIT License. 4 * 5 * Copyright 2003-2005, Axel Dörfler, axeld@pinc-software.de. 6 * Distributed under the terms of the MIT License. 7 * 8 * Copyright 2001, Travis Geiselbrecht. All rights reserved. 9 * Distributed under the terms of the NewOS License. 10 */ 11 12 13 #include <KernelExport.h> 14 15 #include <arch/cpu.h> 16 #include <boot/kernel_args.h> 17 #include <commpage.h> 18 #include <elf.h> 19 20 21 int arch_cpu_type; 22 int arch_fpu_type; 23 int arch_mmu_type; 24 int arch_platform; 25 26 status_t 27 arch_cpu_preboot_init_percpu(kernel_args *args, int curr_cpu) 28 { 29 // enable FPU 30 //ppc:set_msr(get_msr() | MSR_FP_AVAILABLE); 31 32 // The current thread must be NULL for all CPUs till we have threads. 33 // Some boot code relies on this. 34 arch_thread_set_current_thread(NULL); 35 36 return B_OK; 37 } 38 39 40 status_t 41 arch_cpu_init_percpu(kernel_args *args, int curr_cpu) 42 { 43 if (curr_cpu != 0) 44 panic("No SMP support on ARM yet!\n"); 45 46 return 0; 47 } 48 49 50 status_t 51 arch_cpu_init(kernel_args *args) 52 { 53 arch_cpu_type = args->arch_args.cpu_type; 54 arch_fpu_type = args->arch_args.fpu_type; 55 arch_mmu_type = args->arch_args.mmu_type; 56 arch_platform = args->arch_args.platform; 57 arch_platform = args->arch_args.machine; 58 59 return B_OK; 60 } 61 62 63 status_t 64 arch_cpu_init_post_vm(kernel_args *args) 65 { 66 return B_OK; 67 } 68 69 70 status_t 71 arch_cpu_init_post_modules(kernel_args *args) 72 { 73 // add the functions to the commpage image 74 image_id image = get_commpage_image(); 75 76 return B_OK; 77 } 78 79 80 status_t 81 arch_cpu_shutdown(bool reboot) 82 { 83 while(1) 84 arch_cpu_idle(); 85 86 // never reached 87 return B_ERROR; 88 } 89 90 91 void 92 arch_cpu_sync_icache(void *address, size_t len) 93 { 94 uint32 Rd = 0; 95 asm volatile ("mcr p15, 0, %[c7format], c7, c5, 0" 96 : : [c7format] "r" (Rd) ); 97 } 98 99 100 void 101 arch_cpu_memory_read_barrier(void) 102 { 103 // TODO: check if we need more here 104 // (or just call the inline version?) 105 // cf. headers/private/kernel/arch/arm/arch_atomic.h 106 asm volatile ("" : : : "memory"); 107 } 108 109 110 void 111 arch_cpu_memory_write_barrier(void) 112 { 113 // TODO: check if we need more here 114 // (or just call the inline version?) 115 // cf. headers/private/kernel/arch/arm/arch_atomic.h 116 asm volatile ("" : : : "memory"); 117 } 118 119 120 void 121 arch_cpu_invalidate_TLB_range(addr_t start, addr_t end) 122 { 123 int32 num_pages = end / B_PAGE_SIZE - start / B_PAGE_SIZE; 124 while (num_pages-- >= 0) { 125 asm volatile ("mcr p15, 0, %[c8format], c8, c6, 1" 126 : : [c8format] "r" (start) ); 127 start += B_PAGE_SIZE; 128 } 129 } 130 131 132 void 133 arch_cpu_invalidate_TLB_list(addr_t pages[], int num_pages) 134 { 135 for (int i = 0; i < num_pages; i++) { 136 asm volatile ("mcr p15, 0, %[c8format], c8, c6, 1": 137 : [c8format] "r" (pages[i]) ); 138 } 139 } 140 141 142 void 143 arch_cpu_global_TLB_invalidate(void) 144 { 145 uint32 Rd = 0; 146 asm volatile ("mcr p15, 0, %[c8format], c8, c7, 0" 147 : : [c8format] "r" (Rd) ); 148 } 149 150 151 void 152 arch_cpu_user_TLB_invalidate(void) 153 {/* 154 cpu_ops.flush_insn_pipeline(); 155 cpu_ops.flush_atc_user(); 156 cpu_ops.flush_insn_pipeline(); 157 */ 158 #warning WRITEME 159 } 160