1/* 2 * Copyright 2009, Wischert, johanneswi@gmail.com. 3 * All rights reserved. Distributed under the terms of the MIT License. 4 * 5 * Copyright 2003, Travis Geiselbrecht. All rights reserved. 6 * Distributed under the terms of the NewOS License. 7 */ 8 9#include <arch/arm/arch_cpu.h> 10 11#include <asm_defs.h> 12 13#include "asm_offsets.h" 14 15.text 16 17 18/* int mmu_read_c1(void); */ 19FUNCTION(mmu_read_c1): 20 mrc p15, 0, r0, c1, c0, 0 21 bx lr 22FUNCTION_END(mmu_read_c1) 23 24 25/* void mmu_write_c1(int val); */ 26FUNCTION(mmu_write_c1): 27 mcr p15, 0, r0, c1, c0, 0 28 bx lr 29FUNCTION_END(mmu_write_c1) 30 31 32/* NOTE: the I bit in cpsr (bit 7) is *set* to disable... */ 33 34 35/* void arch_int_enable_interrupts(void) */ 36FUNCTION(arch_int_enable_interrupts): 37 mrs r0, cpsr 38 bic r0, r0, #(1<<7) 39 msr cpsr_c, r0 40 bx lr 41FUNCTION_END(arch_int_enable_interrupts) 42 43 44/* int arch_int_disable_interrupts(void) */ 45FUNCTION(arch_int_disable_interrupts): 46 mrs r0, cpsr 47 orr r1, r0, #(1<<7) 48 msr cpsr_c, r1 49 bx lr 50FUNCTION_END(arch_int_disable_interrupts) 51 52 53/* void arch_int_restore_interrupts(int oldState) */ 54FUNCTION(arch_int_restore_interrupts): 55 mrs r1, cpsr 56 and r0, r0, #(1<<7) 57 bic r1, r1, #(1<<7) 58 orr r1, r1, r0 59 msr cpsr_c, r1 60 bx lr 61FUNCTION_END(arch_int_restore_interrupts) 62 63 64/* bool arch_int_are_interrupts_enabled(void) */ 65FUNCTION(arch_int_are_interrupts_enabled): 66 mrs r0, cpsr 67 and r0, r0, #(1<<7) /*read the I bit*/ 68 cmp r0, #0 69 moveq r0, #1 70 movne r0, #0 71 bx lr 72FUNCTION_END(arch_int_are_interrupts_enabled) 73 74 75/* void arm_context_switch(struct arch_thread* oldState, 76 struct arch_thread* newState); */ 77FUNCTION(arm_context_switch): 78 stmfd sp!, { r0-r12, lr } 79 str sp, [r0] 80 ldr sp, [r1] 81 ldmfd sp!, { r0-r12, lr } 82 bx lr 83FUNCTION_END(arm_context_switch) 84 85 86/* addr_t arm_get_fsr(void); */ 87FUNCTION(arm_get_fsr): 88 mrc p15, 0, r0, c5, c0, 0 @ get FSR 89 bx lr 90FUNCTION_END(arm_get_fsr) 91 92 93/* addr_t arm_get_far(void); */ 94FUNCTION(arm_get_far): 95 mrc p15, 0, r0, c6, c0, 0 @ get FAR 96 bx lr 97FUNCTION_END(arm_get_far) 98 99 100/* addr_t arm_get_fp(void); */ 101FUNCTION(arm_get_fp): 102 mov r0, fp @ get framepointer 103 bx lr 104FUNCTION_END(arm_get_fp); 105 106 107/* status_t arch_cpu_user_memcpy(void *to, const void *from, size_t size, addr_t *faultHandler) */ 108FUNCTION(arch_cpu_user_memcpy): 109 stmfd sp!, { r4-r6, lr } 110 111 ldr r6, [r3] 112 ldr r4, =.L_user_memcpy_error 113 str r4, [r3] /* set fault handler */ 114 mov r4, r2, lsr #2 /* size / 4 */ 1151: 116 ldr r5, [r1] 117 str r5, [r0] 118 add r1, #4 119 add r0, #4 120 subs r4, #1 121 bne 1b 122 123 ands r4, r2, #3 /* size % 4 */ 124 beq 3f 125 1262: 127 ldrb r5, [r1] 128 strb r5, [r0] 129 add r1, #1 130 add r0, #1 131 subs r4, #1 132 bne 2b 1333: 134 str r6, [r3] /* restore fault handler */ 135 mov r0, #0 136 ldmfd sp!, { r4-r6, pc } 137 138.L_user_memcpy_error: 139 str r6, [r3] /* restore fault handler */ 140 mov r0, #-1 141 142 ldmfd sp!, { r4-r6, pc } 143FUNCTION_END(arch_cpu_user_memcpy) 144 145/* status_t arch_cpu_user_memset(void *to, char c, size_t count, addr_t *faultHandler) */ 146FUNCTION(arch_cpu_user_memset): 147 stmfd sp!, { r4-r5, lr } 148 149 ldr r5, [r3] 150 ldr r4, =.L_user_memset_error 151 str r4, [r3] 152 153 and r1, r1, #0xff 154 add r1, r1, lsl #8 155 add r1, r1, lsl #16 156 add r1, r1, lsl #24 157 158 mov r4, r2, lsr #2 /* count / 4 */ 1591: 160 str r1, [r0] 161 add r0, r0, #4 162 subs r4, r4, #1 163 bne 1b 164 165 and r4, r2, #3 /* count % 4 */ 1662: 167 strb r1, [r0] 168 add r0, r0, #1 169 subs r4, r4, #1 170 bne 2b 171 172 mov r0, #0 173 str r5, [r3] 174 175 ldmfd sp!, { r4-r5, pc } 176 177.L_user_memset_error: 178 mov r0, #-1 179 str r5, [r3] 180 181 ldmfd sp!, { r4-r5, pc } 182FUNCTION_END(arch_cpu_user_memset) 183 184/* ssize_t arch_cpu_user_strlcpy(void *to, const void *from, size_t size, addr_t *faultHandler) */ 185FUNCTION(arch_cpu_user_strlcpy): 186 stmfd sp!, { r4-r6, lr } 187 ldr r5, [r3] 188 ldr r4, =.L_user_strlcpy_error 189 str r4, [r3] 190 mov r6, #0 1911: 192 ldrb r4, [r1, r6] 193 strb r4, [r0, r6] 194 add r6, r6, #1 195 cmp r4, #0 196 beq 2f 197 cmp r6, r2 /* reached max length? */ 198 blt 1b 1992: 200 mov r4, #0 201 strb r4, [r0, r6] 202 203 mov r0, r6 /* return length */ 204 str r5, [r3] /* restore fault handler */ 205 206 ldmfd sp!, { r4-r6, pc } 207 208.L_user_strlcpy_error: 209 mov r0, #-1 210 str r5, [r3] 211 212 ldmfd sp!, { r4-r6, pc } 213FUNCTION_END(arch_cpu_user_strlcpy) 214 215 216/*! \fn void arch_debug_call_with_fault_handler(cpu_ent* cpu, 217 jmp_buf jumpBuffer, void (*function)(void*), void* parameter) 218 219 Called by debug_call_with_fault_handler() to do the dirty work of setting 220 the fault handler and calling the function. If the function causes a page 221 fault, the arch_debug_call_with_fault_handler() calls longjmp() with the 222 given \a jumpBuffer. Otherwise it returns normally. 223 224 debug_call_with_fault_handler() has already saved the CPU's fault_handler 225 and fault_handler_stack_pointer and will reset them later, so 226 arch_debug_call_with_fault_handler() doesn't need to care about it. 227 228 \param cpu The \c cpu_ent for the current CPU. 229 \param jumpBuffer Buffer to be used for longjmp(). 230 \param function The function to be called. 231 \param parameter The parameter to be passed to the function to be called. 232*/ 233FUNCTION(arch_debug_call_with_fault_handler): 234 stmfd sp!, { r4, lr } 235 236 // Set fault handler address, and fault handler stack pointer address. We 237 // don't need to save the previous values, since that's done by the caller. 238 ldr r4, =1f 239 str r4, [r0, #CPU_ENT_fault_handler] 240 str sp, [r0, #CPU_ENT_fault_handler_stack_pointer] 241 mov r4, r1 242 243 // call the function 244 mov r0, r3 245 blx r2 246 247 // regular return 248 ldmfd sp!, { r4, pc } 249 250 // fault -- return via longjmp(jumpBuffer, 1) 2511: 252 mov r0, r1 253 mov r1, #1 254 bl longjmp 255FUNCTION_END(arch_debug_call_with_fault_handler) 256