xref: /haiku/src/system/kernel/arch/arm64/arch_asm.S (revision fc7456e9b1ec38c941134ed6d01c438cf289381e)
1/*
2 * Copyright 2019-2022 Haiku, Inc. All Rights Reserved.
3 * Distributed under the terms of the MIT License.
4 */
5#include <arch/arm/arch_cpu.h>
6#include <asm_defs.h>
7#include "asm_offsets.h"
8#include "syscall_numbers.h"
9
10.text
11
12FUNCTION(_thread_exit_syscall):
13	svc #((SYSCALL_EXIT_THREAD << 5) | 1)
14FUNCTION_END(_thread_exit_syscall)
15
16.macro xchg_sp xt
17add	sp, sp, \xt
18sub	\xt, sp, \xt
19sub	sp, sp, \xt
20.endm
21
22.macro EXCEPTION_ENTRY el
23	// interrupts are automatically disabled by hardware
24
25	// avoid using sp in case it is misaligned
26	// swap sp with x19 and use it instead
27	xchg_sp x19
28
29	// x19 is now the stack top, make room for IFRAME
30	sub x19, x19, #(IFRAME_sizeof)
31
32	stp	    x0,  x1, [x19, #(IFRAME_x + 0 * 8)]
33	stp	    x2,  x3, [x19, #(IFRAME_x + 2 * 8)]
34	stp	    x4,  x5, [x19, #(IFRAME_x + 4 * 8)]
35	stp	    x6,  x7, [x19, #(IFRAME_x + 6 * 8)]
36	stp	    x8,  x9, [x19, #(IFRAME_x + 8 * 8)]
37	stp	   x10, x11, [x19, #(IFRAME_x + 10 * 8)]
38	stp	   x12, x13, [x19, #(IFRAME_x + 12 * 8)]
39	stp	   x14, x15, [x19, #(IFRAME_x + 14 * 8)]
40	stp	   x16, x17, [x19, #(IFRAME_x + 16 * 8)]
41	mov    x0,   sp  // original x19 that we swapped with sp
42	stp	   x18,  x0, [x19, #(IFRAME_x + 18 * 8)]
43	stp	   x20, x21, [x19, #(IFRAME_x + 20 * 8)]
44	stp	   x22, x23, [x19, #(IFRAME_x + 22 * 8)]
45	stp	   x24, x25, [x19, #(IFRAME_x + 24 * 8)]
46	stp	   x26, x27, [x19, #(IFRAME_x + 26 * 8)]
47	stp	   x28, fp,  [x19, #(IFRAME_x + 28 * 8)]
48	str	   x30,      [x19, #(IFRAME_lr)]
49
50.if \el == 0
51	mrs x0, SP_EL0
52.else
53	// add sizeof back here to store original sp
54	add x0, x19, #(IFRAME_sizeof)
55.endif
56
57	mrs x1, ELR_EL1
58	mrs x2, SPSR_EL1
59	mrs x3, ESR_EL1
60	mrs x4, FAR_EL1
61
62	str x0, [x19, #(IFRAME_sp)]
63	str x1, [x19, #(IFRAME_elr)]
64	str x2, [x19, #(IFRAME_spsr)]
65	str x3, [x19, #(IFRAME_esr)]
66	str x4, [x19, #(IFRAME_far)]
67.endm
68
69.macro EXCEPTION_RETURN el
70	// x19 is callee-saved so it still points to IFRAME
71	// x0, x1, x18, x19 will be restored at the very end
72
73	ldr x0,  [x19, #(IFRAME_elr)]
74	ldr x1,  [x19, #(IFRAME_spsr)]
75	ldr x18, [x19, #(IFRAME_sp)]
76
77	// x0 and x1 will be restored later
78	ldp	    x2,  x3, [x19, #(IFRAME_x + 2 * 8)]
79	ldp	    x4,  x5, [x19, #(IFRAME_x + 4 * 8)]
80	ldp	    x6,  x7, [x19, #(IFRAME_x + 6 * 8)]
81	ldp	    x8,  x9, [x19, #(IFRAME_x + 8 * 8)]
82	ldp	   x10, x11, [x19, #(IFRAME_x + 10 * 8)]
83	ldp	   x12, x13, [x19, #(IFRAME_x + 12 * 8)]
84	ldp	   x14, x15, [x19, #(IFRAME_x + 14 * 8)]
85	ldp	   x16, x17, [x19, #(IFRAME_x + 16 * 8)]
86	// x18 and x19 will be restored later
87	ldp	   x20, x21, [x19, #(IFRAME_x + 20 * 8)]
88	ldp	   x22, x23, [x19, #(IFRAME_x + 22 * 8)]
89	ldp	   x24, x25, [x19, #(IFRAME_x + 24 * 8)]
90	ldp	   x26, x27, [x19, #(IFRAME_x + 26 * 8)]
91	ldp	   x28, fp,  [x19, #(IFRAME_x + 28 * 8)]
92	ldr	   x30, [x19, #(IFRAME_lr)]
93
94	// disable interrupts before restoring ELR/SPSR/sp
95	msr DAIFSet, #0xf
96
97	msr ELR_EL1, x0
98	msr SPSR_EL1, x1
99
100.if \el == 0
101	// load stack pointer for EL0 from IFRAME
102	msr SP_EL0, x18
103
104	// unwind our own stack pointer
105	add sp, x19, #(IFRAME_sizeof)
106.else
107	// we stored original pointer to IFRAME, no need to unwind again there
108	mov sp, x18
109.endif
110
111	// finally restore remaining registers
112	ldp x0,   x1, [x19, #(IFRAME_x + 0 * 8)]
113	ldp x18, x19, [x19, #(IFRAME_x + 18 * 8)]
114
115	eret
116.endm
117
118.macro EXCEPTION_HANDLER el name func
119	STATIC_FUNCTION(handle_\name):
120		EXCEPTION_ENTRY \el
121
122		// prepare aligned sp for C function
123		and sp, x19, #0xfffffffffffffff0
124
125		// call C handler, passing IFRAME in x0
126		// handler can enable interrupts if it wants to
127		mov x0, x19
128		mov x29, x0
129		bl \func
130
131		EXCEPTION_RETURN \el
132	FUNCTION_END(handle_\name)
133.endm
134
135.macro	vector	name
136	.align 7
137	b	handle_\name
138.endm
139
140.macro	vempty
141	.align 7
142	brk	0xfff
143	1: b	1b
144.endm
145
146.align 11
147.globl _exception_vectors
148_exception_vectors:
149	vempty             /* Synchronous EL1t */
150	vempty             /* IRQ EL1t */
151	vempty             /* FIQ EL1t */
152	vempty             /* Error EL1t */
153
154	vector el1h_sync   /* Synchronous EL1h */
155	vector el1h_irq    /* IRQ EL1h */
156	vector el1h_fiq    /* FIQ EL1h */
157	vector el1h_error  /* Error EL1h */
158
159	vector el0_sync    /* Synchronous 64-bit EL0 */
160	vector el0_irq     /* IRQ 64-bit EL0 */
161	vector el0_fiq     /* FIQ 64-bit EL0 */
162	vector el0_error   /* Error 64-bit EL0 */
163
164	vempty             /* Synchronous 32-bit EL0 */
165	vempty             /* IRQ 32-bit EL0 */
166	vempty             /* FIQ 32-bit EL0 */
167	vempty             /* Error 32-bit EL0 */
168
169EXCEPTION_HANDLER 1 el1h_sync do_sync_handler
170EXCEPTION_HANDLER 1 el1h_irq do_irq_handler
171EXCEPTION_HANDLER 1 el1h_fiq do_fiq_handler
172EXCEPTION_HANDLER 1 el1h_error do_error_handler
173
174EXCEPTION_HANDLER 0 el0_sync do_sync_handler
175EXCEPTION_HANDLER 0 el0_irq do_irq_handler
176EXCEPTION_HANDLER 0 el0_fiq do_fiq_handler
177EXCEPTION_HANDLER 0 el0_error do_error_handler
178
179FUNCTION(_eret_with_iframe):
180	mov x20, xzr
181	mov x21, xzr
182	mov x22, xzr
183	mov x23, xzr
184	mov x24, xzr
185	mov x25, xzr
186	mov x26, xzr
187	mov x27, xzr
188	mov x28, xzr
189	mov x29, xzr
190
191	mov x19, x0
192	EXCEPTION_RETURN 0
193FUNCTION_END(_eret_with_iframe)
194
195FUNCTION(_fp_save):
196	stp q0, q1, [x0], #32
197	stp q2, q3, [x0], #32
198	stp q4, q5, [x0], #32
199	stp q6, q7, [x0], #32
200	stp q8, q9, [x0], #32
201	stp q10, q11, [x0], #32
202	stp q12, q13, [x0], #32
203	stp q14, q15, [x0], #32
204	stp q16, q17, [x0], #32
205	stp q18, q19, [x0], #32
206	stp q20, q21, [x0], #32
207	stp q22, q23, [x0], #32
208	stp q24, q25, [x0], #32
209	stp q26, q27, [x0], #32
210	stp q28, q29, [x0], #32
211	stp q30, q31, [x0], #32
212	mrs x1, FPSR
213	mrs x2, FPCR
214	str x1, [x0], #8
215	str x2, [x0], #8
216
217	// reset FPCR and FPSR to prevent userspace state affecting kernel
218	msr FPSR, xzr
219	cmp x2, xzr
220	beq 1f
221	msr FPCR, xzr
2221:
223	ret
224FUNCTION_END(_fp_save)
225
226FUNCTION(_fp_restore):
227	ldp q0, q1, [x0], #32
228	ldp q2, q3, [x0], #32
229	ldp q4, q5, [x0], #32
230	ldp q6, q7, [x0], #32
231	ldp q8, q9, [x0], #32
232	ldp q10, q11, [x0], #32
233	ldp q12, q13, [x0], #32
234	ldp q14, q15, [x0], #32
235	ldp q16, q17, [x0], #32
236	ldp q18, q19, [x0], #32
237	ldp q20, q21, [x0], #32
238	ldp q22, q23, [x0], #32
239	ldp q24, q25, [x0], #32
240	ldp q26, q27, [x0], #32
241	ldp q28, q29, [x0], #32
242	ldp q30, q31, [x0], #32
243
244	ldr x1, [x0], #8
245	msr FPSR, x1
246
247	// avoid restoring FPCR if it hasn't changed
248	ldr x2, [x0], #8
249	mrs x3, FPCR
250	cmp x3, x2
251	beq 1f
252	msr FPCR, x2
2531:
254	ret
255FUNCTION_END(_fp_restore)
256
257FUNCTION(_arch_context_swap):
258	// save
259	stp x19, x20, [x0], #16
260	stp x21, x22, [x0], #16
261	stp x23, x24, [x0], #16
262	stp x25, x26, [x0], #16
263	stp x27, x28, [x0], #16
264	stp x29, x30, [x0], #16
265
266	mov x2, sp
267	mrs x3, TPIDR_EL0
268	stp  x2,  x3, [x0], #16
269
270	stp  d8,  d9, [x0], #16
271	stp d10, d11, [x0], #16
272	stp d12, d13, [x0], #16
273	stp d14, d15, [x0], #16
274
275	// restore
276	ldp x19, x20, [x1], #16
277	ldp x21, x22, [x1], #16
278	ldp x23, x24, [x1], #16
279	ldp x25, x26, [x1], #16
280	ldp x27, x28, [x1], #16
281	ldp x29, x30, [x1], #16
282
283	ldp  x2,  x3, [x1], #16
284	mov sp, x2
285	msr TPIDR_EL0, x3
286
287	ldp  d8,  d9, [x1], #16
288	ldp d10, d11, [x1], #16
289	ldp d12, d13, [x1], #16
290	ldp d14, d15, [x1], #16
291
292	// pass x29 as argument to thread entry function
293	mov x0, x29
294	ret
295FUNCTION_END(_arch_context_swap)
296
297/*!	\fn void arch_debug_call_with_fault_handler(cpu_ent* cpu,
298		jmp_buf jumpBuffer, void (*function)(void*), void* parameter)
299
300	Called by debug_call_with_fault_handler() to do the dirty work of setting
301	the fault handler and calling the function. If the function causes a page
302	fault, the arch_debug_call_with_fault_handler() calls longjmp() with the
303	given \a jumpBuffer. Otherwise it returns normally.
304
305	debug_call_with_fault_handler() has already saved the CPU's fault_handler
306	and fault_handler_stack_pointer and will reset them later, so
307	arch_debug_call_with_fault_handler() doesn't need to care about it.
308
309	\param cpu The \c cpu_ent for the current CPU.
310	\param jumpBuffer Buffer to be used for longjmp().
311	\param function The function to be called.
312	\param parameter The parameter to be passed to the function to be called.
313*/
314FUNCTION(arch_debug_call_with_fault_handler):
315	adrp x4, fault
316	add x4, x4, :lo12:fault
317	str x4, [x0, #CPU_ENT_fault_handler]
318	str x1, [x0, #CPU_ENT_fault_handler_stack_pointer]
319
320	mov x0, x3
321	br x2
322
323fault:
324	mov x0, sp
325	mov x1, #1
326	b longjmp
327FUNCTION_END(arch_debug_call_with_fault_handler)
328
329
330/* addr_t arm64_get_fp(void) */
331FUNCTION(arm64_get_fp):
332	mov x0, x29
333	ret
334FUNCTION_END(arm64_get_fp)
335