xref: /haiku/src/system/kernel/arch/riscv64/arch_asm.S (revision 02354704729d38c3b078c696adc1bbbd33cbcf72)
1/*
2 * Copyright 2019-2021, Haiku, Inc. All Rights Reserved
3 * Distributed under the terms of the MIT License.
4 */
5
6
7#include <asm_defs.h>
8#include "syscall_numbers.h"
9
10
11.text
12
13
14FUNCTION(MSyscall):
15	ecall
16	ret
17FUNCTION_END(MSyscall)
18
19
20FUNCTION(arch_setjmp):
21	sd ra,   0*8(a0)
22	sd s0,   1*8(a0)
23	sd s1,   2*8(a0)
24	sd s2,   3*8(a0)
25	sd s3,   4*8(a0)
26	sd s4,   5*8(a0)
27	sd s5,   6*8(a0)
28	sd s6,   7*8(a0)
29	sd s7,   8*8(a0)
30	sd s8,   9*8(a0)
31	sd s9,  10*8(a0)
32	sd s10, 11*8(a0)
33	sd s11, 12*8(a0)
34	sd sp,  13*8(a0)
35	csrr t0, satp
36	sd t0,  14*8(a0)
37
38	li    a0, 0
39	ret
40FUNCTION_END(arch_setjmp)
41
42
43FUNCTION(arch_longjmp):
44	ld ra,   0*8(a0)
45	ld s0,   1*8(a0)
46	ld s1,   2*8(a0)
47	ld s2,   3*8(a0)
48	ld s3,   4*8(a0)
49	ld s4,   5*8(a0)
50	ld s5,   6*8(a0)
51	ld s6,   7*8(a0)
52	ld s7,   8*8(a0)
53	ld s8,   9*8(a0)
54	ld s9,  10*8(a0)
55	ld s10, 11*8(a0)
56	ld s11, 12*8(a0)
57	ld sp,  13*8(a0)
58	ld t0,  14*8(a0)
59	csrw satp, t0
60	sfence.vma
61
62	seqz a0, a1
63	add  a0, a0, a1   # a0 = (a1 == 0) ? 1 : a1
64	ret
65FUNCTION_END(arch_longjmp)
66
67
68FUNCTION(save_fpu):
69	fsd f0,   0*8(a0)
70	fsd f1,   1*8(a0)
71	fsd f2,   2*8(a0)
72	fsd f3,   3*8(a0)
73	fsd f4,   4*8(a0)
74	fsd f5,   5*8(a0)
75	fsd f6,   6*8(a0)
76	fsd f7,   7*8(a0)
77	fsd f8,   8*8(a0)
78	fsd f9,   9*8(a0)
79	fsd f10, 10*8(a0)
80	fsd f11, 11*8(a0)
81	fsd f12, 12*8(a0)
82	fsd f13, 13*8(a0)
83	fsd f14, 14*8(a0)
84	fsd f15, 15*8(a0)
85	fsd f16, 16*8(a0)
86	fsd f17, 17*8(a0)
87	fsd f18, 18*8(a0)
88	fsd f19, 19*8(a0)
89	fsd f20, 20*8(a0)
90	fsd f21, 21*8(a0)
91	fsd f22, 22*8(a0)
92	fsd f23, 23*8(a0)
93	fsd f24, 24*8(a0)
94	fsd f25, 25*8(a0)
95	fsd f26, 26*8(a0)
96	fsd f27, 27*8(a0)
97	fsd f28, 28*8(a0)
98	fsd f29, 29*8(a0)
99	fsd f30, 30*8(a0)
100	fsd f31, 31*8(a0)
101	frcsr t0
102	sd  t0,  32*8(a0)
103
104	ret
105FUNCTION_END(save_fpu)
106
107
108FUNCTION(restore_fpu):
109	fld f0,   0*8(a0)
110	fld f1,   1*8(a0)
111	fld f2,   2*8(a0)
112	fld f3,   3*8(a0)
113	fld f4,   4*8(a0)
114	fld f5,   5*8(a0)
115	fld f6,   6*8(a0)
116	fld f7,   7*8(a0)
117	fld f8,   8*8(a0)
118	fld f9,   9*8(a0)
119	fld f10, 10*8(a0)
120	fld f11, 11*8(a0)
121	fld f12, 12*8(a0)
122	fld f13, 13*8(a0)
123	fld f14, 14*8(a0)
124	fld f15, 15*8(a0)
125	fld f16, 16*8(a0)
126	fld f17, 17*8(a0)
127	fld f18, 18*8(a0)
128	fld f19, 19*8(a0)
129	fld f20, 20*8(a0)
130	fld f21, 21*8(a0)
131	fld f22, 22*8(a0)
132	fld f23, 23*8(a0)
133	fld f24, 24*8(a0)
134	fld f25, 25*8(a0)
135	fld f26, 26*8(a0)
136	fld f27, 27*8(a0)
137	fld f28, 28*8(a0)
138	fld f29, 29*8(a0)
139	fld f30, 30*8(a0)
140	fld f31, 31*8(a0)
141	ld  t0,  32*8(a0)
142	fscsr t0
143
144	ret
145FUNCTION_END(restore_fpu)
146
147
148FUNCTION(arch_thread_entry):
149	mv a0, s2
150	jalr s1
151FUNCTION_END(arch_thread_entry)
152
153
154FUNCTION(arch_enter_userspace):
155	mv sp, a2
156	sret
157FUNCTION_END(arch_enter_userspace)
158
159
160FUNCTION(arch_longjmp_iframe):
161	mv sp, a0
162	call SVecURet
163FUNCTION_END(arch_longjmp_iframe)
164
165
166FUNCTION(arch_user_thread_exit):
167	li t0, SYSCALL_EXIT_THREAD
168	ecall
169	ret
170FUNCTION_END(arch_user_thread_exit)
171