xref: /haiku/src/system/kernel/arch/arm/arch_asm.S (revision 52f7c9389475e19fc21487b38064b4390eeb6fea)
1/*
2 * Copyright 2022, Haiku Inc. All rights reserved.
3 * Distributed under the terms of the MIT License.
4 *
5 * Copyright 2009, Wischert, johanneswi@gmail.com.
6 * All rights reserved. Distributed under the terms of the MIT License.
7 *
8 * Copyright 2003, Travis Geiselbrecht. All rights reserved.
9 * Distributed under the terms of the NewOS License.
10 */
11
12#include <arch/arm/arch_cpu.h>
13#include <arch/arm/arch_cpu_defs.h>
14
15#include <asm_defs.h>
16
17#include "asm_offsets.h"
18#include "syscall_numbers.h"
19
20.text
21
22
23/* int mmu_read_c1(void); */
24FUNCTION(mmu_read_c1):
25	mrc	p15, 0, r0, c1, c0, 0
26	bx	lr
27FUNCTION_END(mmu_read_c1)
28
29
30/* void mmu_write_c1(int val); */
31FUNCTION(mmu_write_c1):
32	mcr	p15, 0, r0, c1, c0, 0
33	bx	lr
34FUNCTION_END(mmu_write_c1)
35
36
37/* NOTE: the I bit in cpsr (bit 7) is *set* to disable... */
38
39
40/* void arch_int_enable_interrupts(void) */
41FUNCTION(arch_int_enable_interrupts):
42        mrs     r0, cpsr
43        bic     r0, r0, #(1<<7)
44        msr     cpsr_c, r0
45        bx      lr
46FUNCTION_END(arch_int_enable_interrupts)
47
48
49/* int arch_int_disable_interrupts(void) */
50FUNCTION(arch_int_disable_interrupts):
51        mrs     r0, cpsr
52        orr     r1, r0, #(1<<7)
53        msr     cpsr_c, r1
54        bx      lr
55FUNCTION_END(arch_int_disable_interrupts)
56
57
58/* void arch_int_restore_interrupts(int oldState) */
59FUNCTION(arch_int_restore_interrupts):
60	mrs     r1, cpsr
61	and	r0, r0, #(1<<7)
62	bic     r1, r1, #(1<<7)
63        orr     r1, r1, r0
64        msr     cpsr_c, r1
65	bx 	lr
66FUNCTION_END(arch_int_restore_interrupts)
67
68
69/* bool arch_int_are_interrupts_enabled(void) */
70FUNCTION(arch_int_are_interrupts_enabled):
71        mrs     r0, cpsr
72        and     r0, r0, #(1<<7)		/*read the I bit*/
73	cmp 	r0, #0
74	moveq	r0, #1
75	movne	r0, #0
76	bx 	lr
77FUNCTION_END(arch_int_are_interrupts_enabled)
78
79
80/* void arm_context_switch(struct arch_thread* oldState,
81	struct arch_thread* newState); */
82FUNCTION(arm_context_switch):
83	stmfd   sp!, { r0-r12, lr }
84	str	sp, [r0]
85	ldr	sp, [r1]
86	ldmfd   sp!, { r0-r12, lr }
87	bx	lr
88FUNCTION_END(arm_context_switch)
89
90
91/* addr_t arm_get_fsr(void); */
92FUNCTION(arm_get_fsr):
93	mrc	p15, 0, r0, c5, c0, 0		@ get FSR
94	bx	lr
95FUNCTION_END(arm_get_fsr)
96
97
98/* addr_t arm_get_far(void); */
99FUNCTION(arm_get_far):
100	mrc	p15, 0, r0, c6, c0, 0		@ get FAR
101	bx	lr
102FUNCTION_END(arm_get_far)
103
104
105/* addr_t arm_get_fp(void); */
106FUNCTION(arm_get_fp):
107	mov	r0, fp				@ get framepointer
108	bx	lr
109FUNCTION_END(arm_get_fp);
110
111
112/* status_t arch_cpu_user_memcpy(void *to, const void *from, size_t size, addr_t *faultHandler) */
113FUNCTION(_arch_cpu_user_memcpy):
114	stmfd   sp!, { r4-r6, lr }
115
116	ldr	r6, [r3]
117	ldr	r4, =.L_user_memcpy_error
118	str	r4, [r3]	/* set fault handler */
119	mov	r4, r2, lsr #2	/* size / 4 */
1201:
121	ldr	r5, [r1]
122	str	r5, [r0]
123	add	r1, #4
124	add	r0, #4
125	subs	r4, #1
126	bne	1b
127
128	ands	r4, r2, #3	/* size % 4 */
129	beq	3f
130
1312:
132	ldrb	r5, [r1]
133	strb	r5, [r0]
134	add	r1, #1
135	add	r0, #1
136	subs	r4, #1
137	bne	2b
1383:
139	str	r6, [r3]	/* restore fault handler */
140	mov	r0, #0
141	ldmfd   sp!, { r4-r6, pc }
142
143.L_user_memcpy_error:
144	str	r6, [r3]	/* restore fault handler */
145	mov	r0, #-1
146
147	ldmfd   sp!, { r4-r6, pc }
148FUNCTION_END(_arch_cpu_user_memcpy)
149
150/* status_t arch_cpu_user_memset(void *to, char c, size_t count, addr_t *faultHandler) */
151FUNCTION(_arch_cpu_user_memset):
152	stmfd   sp!, { r4-r5, lr }
153
154	ldr	r5, [r3]
155	ldr	r4, =.L_user_memset_error
156	str	r4, [r3]
157
158	and	r1, r1, #0xff
159	add	r1, r1, lsl #8
160	add	r1, r1, lsl #16
161	add	r1, r1, lsl #24
162
163	mov	r4, r2, lsr #2	/* count / 4 */
1641:
165	str	r1, [r0]
166	add	r0, r0, #4
167	subs	r4, r4, #1
168	bne	1b
169
170	and	r4, r2, #3	/* count % 4 */
1712:
172	strb	r1, [r0]
173	add	r0, r0, #1
174	subs	r4, r4, #1
175	bne	2b
176
177	mov	r0, #0
178	str	r5, [r3]
179
180	ldmfd   sp!, { r4-r5, pc }
181
182.L_user_memset_error:
183	mov	r0, #-1
184	str	r5, [r3]
185
186	ldmfd   sp!, { r4-r5, pc }
187FUNCTION_END(_arch_cpu_user_memset)
188
189/* ssize_t arch_cpu_user_strlcpy(void *to, const void *from, size_t size, addr_t *faultHandler) */
190FUNCTION(_arch_cpu_user_strlcpy):
191	stmfd   sp!, { r4-r6, lr }
192	ldr	r5, [r3]
193	ldr	r4, =.L_user_strlcpy_error
194	str	r4, [r3]
195	mov	r6, #0
1961:
197	ldrb	r4, [r1, r6]
198	strb	r4, [r0, r6]
199	add	r6, r6, #1
200	cmp	r4, #0
201	beq	2f
202	cmp	r6, r2		/* reached max length? */
203	blt	1b
2042:
205	mov	r4, #0
206	strb	r4, [r0, r6]
207
208	mov	r0, r6		/* return length */
209	str	r5, [r3]	/* restore fault handler */
210
211	ldmfd   sp!, { r4-r6, pc }
212
213.L_user_strlcpy_error:
214	mov	r0, #-1
215	str	r5, [r3]
216
217	ldmfd   sp!, { r4-r6, pc }
218FUNCTION_END(_arch_cpu_user_strlcpy)
219
220
221/*!	\fn void arch_debug_call_with_fault_handler(cpu_ent* cpu,
222		jmp_buf jumpBuffer, void (*function)(void*), void* parameter)
223
224	Called by debug_call_with_fault_handler() to do the dirty work of setting
225	the fault handler and calling the function. If the function causes a page
226	fault, the arch_debug_call_with_fault_handler() calls longjmp() with the
227	given \a jumpBuffer. Otherwise it returns normally.
228
229	debug_call_with_fault_handler() has already saved the CPU's fault_handler
230	and fault_handler_stack_pointer and will reset them later, so
231	arch_debug_call_with_fault_handler() doesn't need to care about it.
232
233	\param cpu The \c cpu_ent for the current CPU.
234	\param jumpBuffer Buffer to be used for longjmp().
235	\param function The function to be called.
236	\param parameter The parameter to be passed to the function to be called.
237*/
238FUNCTION(arch_debug_call_with_fault_handler):
239	stmfd   sp!, { r1, r4, lr }
240
241	// Set fault handler address, and fault handler stack pointer address. We
242	// don't need to save the previous values, since that's done by the caller.
243	ldr	r4, =1f
244	str	r4, [r0, #CPU_ENT_fault_handler]
245	str	sp, [r0, #CPU_ENT_fault_handler_stack_pointer]
246	mov	r4, r1
247
248	// call the function
249	mov	r0, r3
250	blx	r2
251
252	// regular return
253	ldmfd   sp!, { r1, r4, pc }
254
255	// fault -- return via longjmp(jumpBuffer, 1)
2561:
257	ldmfd   sp!, { r0, r4, lr } // restore jumpBuffer in r0 (was r1)
258	mov	r1, #1
259	b	longjmp
260FUNCTION_END(arch_debug_call_with_fault_handler)
261
262
263FUNCTION(arch_return_to_userland):
264	// set SPSR to user mode, IRQ enabled, FIQ disabled
265	mrs		ip, cpsr
266	bic		ip, ip, #(CPSR_MODE_MASK | CPSR_T | CPSR_F | CPSR_I)
267	orr		ip, ip, #(CPSR_MODE_USR | CPSR_F)
268	msr		spsr, ip
269
270	// use system mode to load user mode SP and LR
271	ldr		r4, [r0, #IFRAME_usr_sp]
272	ldr		r5, [r0, #IFRAME_usr_lr]
273	mrs		ip, cpsr
274	bic		ip, ip, #(CPSR_MODE_MASK)
275	orr		ip, ip, #(CPSR_MODE_SYS)
276	msr		cpsr, ip
277	mov		sp, r4
278	mov		lr, r5
279	bic		ip, ip, #(CPSR_MODE_MASK)
280	orr		ip, ip, #(CPSR_MODE_SVC)
281	msr		cpsr, ip
282
283	// load user mode entry point in LR
284	ldr		lr, [r0, #IFRAME_pc]
285
286	// load general purpose registers
287	mov		sp, r0
288	add		sp, sp, #4
289	ldmfd	sp!, { r0-r12 }
290
291	// jump to user mode entry point
292	movs	pc, lr
293FUNCTION_END(arch_return_to_userland)
294
295
296FUNCTION(arch_user_thread_exit):
297	svc		SYSCALL_EXIT_THREAD
298	bx		lr
299FUNCTION_END(arch_user_thread_exit)
300