xref: /haiku/src/system/kernel/arch/arm/arch_asm.S (revision 4a55cc230cf7566cadcbb23b1928eefff8aea9a2)
1/*
2 * Copyright 2022, Haiku Inc. All rights reserved.
3 * Distributed under the terms of the MIT License.
4 *
5 * Copyright 2009, Wischert, johanneswi@gmail.com.
6 * All rights reserved. Distributed under the terms of the MIT License.
7 *
8 * Copyright 2003, Travis Geiselbrecht. All rights reserved.
9 * Distributed under the terms of the NewOS License.
10 */
11
12#include <arch/arm/arch_cpu.h>
13#include <arch/arm/arch_cpu_defs.h>
14
15#include <asm_defs.h>
16
17#include "asm_offsets.h"
18#include "syscall_numbers.h"
19
20.text
21
22
23/* int mmu_read_c1(void); */
24FUNCTION(mmu_read_c1):
25	mrc	p15, 0, r0, c1, c0, 0
26	bx	lr
27FUNCTION_END(mmu_read_c1)
28
29
30/* void mmu_write_c1(int val); */
31FUNCTION(mmu_write_c1):
32	mcr	p15, 0, r0, c1, c0, 0
33	bx	lr
34FUNCTION_END(mmu_write_c1)
35
36
37/* NOTE: the I bit in cpsr (bit 7) is *set* to disable... */
38
39
40/* void arch_int_enable_interrupts(void) */
41FUNCTION(arch_int_enable_interrupts):
42        mrs     r0, cpsr
43        bic     r0, r0, #(1<<7)
44        msr     cpsr_c, r0
45        bx      lr
46FUNCTION_END(arch_int_enable_interrupts)
47
48
49/* int arch_int_disable_interrupts(void) */
50FUNCTION(arch_int_disable_interrupts):
51        mrs     r0, cpsr
52        orr     r1, r0, #(1<<7)
53        msr     cpsr_c, r1
54        bx      lr
55FUNCTION_END(arch_int_disable_interrupts)
56
57
58/* void arch_int_restore_interrupts(int oldState) */
59FUNCTION(arch_int_restore_interrupts):
60	mrs     r1, cpsr
61	and	r0, r0, #(1<<7)
62	bic     r1, r1, #(1<<7)
63        orr     r1, r1, r0
64        msr     cpsr_c, r1
65	bx 	lr
66FUNCTION_END(arch_int_restore_interrupts)
67
68
69/* bool arch_int_are_interrupts_enabled(void) */
70FUNCTION(arch_int_are_interrupts_enabled):
71        mrs     r0, cpsr
72        and     r0, r0, #(1<<7)		/*read the I bit*/
73	cmp 	r0, #0
74	moveq	r0, #1
75	movne	r0, #0
76	bx 	lr
77FUNCTION_END(arch_int_are_interrupts_enabled)
78
79
80/* void arm_context_switch(struct arch_thread* oldState,
81	struct arch_thread* newState); */
82FUNCTION(arm_context_switch):
83	stmfd   sp!, { r0-r12, lr }
84	str	sp, [r0]
85	ldr	sp, [r1]
86	ldmfd   sp!, { r0-r12, lr }
87	bx	lr
88FUNCTION_END(arm_context_switch)
89
90
91/* void arm_save_fpu(struct arch_fpu_context* context); */
92FUNCTION(arm_save_fpu):
93	fstmiad		r0!, {d0-d15}
94	fstmiad		r0!, {d16-d31}
95	vmrs		r1, fpscr
96	str			r1, [r0]
97	bx			lr
98FUNCTION_END(arm_save_fpu)
99
100
101/* void arm_restore_fpu(struct arch_fpu_context* context); */
102FUNCTION(arm_restore_fpu):
103	fldmiad		r0!, {d0-d15}
104	fldmiad		r0!, {d16-d31}
105	ldr			r1, [r0]
106	vmsr		fpscr, r1
107	bx			lr
108FUNCTION_END(arm_restore_fpu)
109
110
111/* uint32 arm_get_dfsr(void); */
112FUNCTION(arm_get_dfsr):
113	mrc	p15, 0, r0, c5, c0, 0		@ get DFSR
114	bx	lr
115FUNCTION_END(arm_get_dfsr)
116
117
118/* uint32 arm_get_ifsr(void) */
119FUNCTION(arm_get_ifsr):
120	mrc p15, 0, r0, c5, c0, 1		@ get IFSR
121	bx	lr
122FUNCTION_END(arm_get_ifsr)
123
124
125/* addr_t arm_get_dfar(void); */
126FUNCTION(arm_get_dfar):
127	mrc	p15, 0, r0, c6, c0, 0		@ get DFAR
128	bx	lr
129FUNCTION_END(arm_get_dfar)
130
131
132/* addr_t arm_get_ifar(void) */
133FUNCTION(arm_get_ifar):
134	MRC p15, 0, r0, c6, c0, 2		@ get IFAR
135	bx	lr
136FUNCTION_END(arm_get_ifar)
137
138
139/* addr_t arm_get_fp(void); */
140FUNCTION(arm_get_fp):
141	mov	r0, fp				@ get framepointer
142	bx	lr
143FUNCTION_END(arm_get_fp);
144
145
146/* status_t arch_cpu_user_memcpy(void *to, const void *from, size_t size, addr_t *faultHandler) */
147FUNCTION(_arch_cpu_user_memcpy):
148	stmfd   sp!, { r4-r6, lr }
149
150	ldr	r6, [r3]
151	ldr	r4, =.L_user_memcpy_error
152	str	r4, [r3]	/* set fault handler */
153	mov	r4, r2, lsr #2	/* size / 4 */
1541:
155	ldr	r5, [r1]
156	str	r5, [r0]
157	add	r1, #4
158	add	r0, #4
159	subs	r4, #1
160	bne	1b
161
162	ands	r4, r2, #3	/* size % 4 */
163	beq	3f
164
1652:
166	ldrb	r5, [r1]
167	strb	r5, [r0]
168	add	r1, #1
169	add	r0, #1
170	subs	r4, #1
171	bne	2b
1723:
173	str	r6, [r3]	/* restore fault handler */
174	mov	r0, #0
175	ldmfd   sp!, { r4-r6, pc }
176
177.L_user_memcpy_error:
178	str	r6, [r3]	/* restore fault handler */
179	mov	r0, #-1
180
181	ldmfd   sp!, { r4-r6, pc }
182FUNCTION_END(_arch_cpu_user_memcpy)
183
184/* status_t arch_cpu_user_memset(void *to, char c, size_t count, addr_t *faultHandler) */
185FUNCTION(_arch_cpu_user_memset):
186	stmfd   sp!, { r4-r5, lr }
187
188	ldr	r5, [r3]
189	ldr	r4, =.L_user_memset_error
190	str	r4, [r3]
191
192	and	r1, r1, #0xff
193	add	r1, r1, lsl #8
194	add	r1, r1, lsl #16
195	add	r1, r1, lsl #24
196
197	mov	r4, r2, lsr #2	/* count / 4 */
1981:
199	str	r1, [r0]
200	add	r0, r0, #4
201	subs	r4, r4, #1
202	bne	1b
203
204	and	r4, r2, #3	/* count % 4 */
2052:
206	strb	r1, [r0]
207	add	r0, r0, #1
208	subs	r4, r4, #1
209	bne	2b
210
211	mov	r0, #0
212	str	r5, [r3]
213
214	ldmfd   sp!, { r4-r5, pc }
215
216.L_user_memset_error:
217	mov	r0, #-1
218	str	r5, [r3]
219
220	ldmfd   sp!, { r4-r5, pc }
221FUNCTION_END(_arch_cpu_user_memset)
222
223/* ssize_t arch_cpu_user_strlcpy(void *to, const void *from, size_t size, addr_t *faultHandler) */
224FUNCTION(_arch_cpu_user_strlcpy):
225	stmfd   sp!, { r4-r6, lr }
226	ldr	r5, [r3]
227	ldr	r4, =.L_user_strlcpy_error
228	str	r4, [r3]
229	mov	r6, #0
2301:
231	ldrb	r4, [r1, r6]
232	strb	r4, [r0, r6]
233	add	r6, r6, #1
234	cmp	r4, #0
235	beq	2f
236	cmp	r6, r2		/* reached max length? */
237	blt	1b
2382:
239	mov	r4, #0
240	strb	r4, [r0, r6]
241
242	mov	r0, r6		/* return length */
243	str	r5, [r3]	/* restore fault handler */
244
245	ldmfd   sp!, { r4-r6, pc }
246
247.L_user_strlcpy_error:
248	mov	r0, #-1
249	str	r5, [r3]
250
251	ldmfd   sp!, { r4-r6, pc }
252FUNCTION_END(_arch_cpu_user_strlcpy)
253
254
255/*!	\fn void arch_debug_call_with_fault_handler(cpu_ent* cpu,
256		jmp_buf jumpBuffer, void (*function)(void*), void* parameter)
257
258	Called by debug_call_with_fault_handler() to do the dirty work of setting
259	the fault handler and calling the function. If the function causes a page
260	fault, the arch_debug_call_with_fault_handler() calls longjmp() with the
261	given \a jumpBuffer. Otherwise it returns normally.
262
263	debug_call_with_fault_handler() has already saved the CPU's fault_handler
264	and fault_handler_stack_pointer and will reset them later, so
265	arch_debug_call_with_fault_handler() doesn't need to care about it.
266
267	\param cpu The \c cpu_ent for the current CPU.
268	\param jumpBuffer Buffer to be used for longjmp().
269	\param function The function to be called.
270	\param parameter The parameter to be passed to the function to be called.
271*/
272FUNCTION(arch_debug_call_with_fault_handler):
273	stmfd   sp!, { r1, r4, lr }
274
275	// Set fault handler address, and fault handler stack pointer address. We
276	// don't need to save the previous values, since that's done by the caller.
277	ldr	r4, =1f
278	str	r4, [r0, #CPU_ENT_fault_handler]
279	str	sp, [r0, #CPU_ENT_fault_handler_stack_pointer]
280	mov	r4, r1
281
282	// call the function
283	mov	r0, r3
284	blx	r2
285
286	// regular return
287	ldmfd   sp!, { r1, r4, pc }
288
289	// fault -- return via longjmp(jumpBuffer, 1)
2901:
291	ldmfd   sp!, { r0, r4, lr } // restore jumpBuffer in r0 (was r1)
292	mov	r1, #1
293	b	longjmp
294FUNCTION_END(arch_debug_call_with_fault_handler)
295
296
297FUNCTION(arch_return_to_userland):
298	// set SPSR to user mode, IRQ enabled, FIQ disabled
299	mrs		ip, cpsr
300	bic		ip, ip, #(CPSR_MODE_MASK | CPSR_T | CPSR_F | CPSR_I)
301	orr		ip, ip, #(CPSR_MODE_USR | CPSR_F)
302	msr		spsr, ip
303
304	// use system mode to load user mode SP and LR
305	ldr		r4, [r0, #IFRAME_usr_sp]
306	ldr		r5, [r0, #IFRAME_usr_lr]
307	mrs		ip, cpsr
308	bic		ip, ip, #(CPSR_MODE_MASK)
309	orr		ip, ip, #(CPSR_MODE_SYS)
310	msr		cpsr, ip
311	mov		sp, r4
312	mov		lr, r5
313	bic		ip, ip, #(CPSR_MODE_MASK)
314	orr		ip, ip, #(CPSR_MODE_SVC)
315	msr		cpsr, ip
316
317	// load user mode entry point in LR
318	ldr		lr, [r0, #IFRAME_pc]
319
320	// load general purpose registers
321	mov		sp, r0
322	add		sp, sp, #4
323	ldmfd	sp!, { r0-r12 }
324
325	// jump to user mode entry point
326	movs	pc, lr
327FUNCTION_END(arch_return_to_userland)
328
329
330FUNCTION(arch_user_thread_exit):
331	svc		SYSCALL_EXIT_THREAD
332	bx		lr
333FUNCTION_END(arch_user_thread_exit)
334