xref: /haiku/src/system/kernel/arch/x86/64/arch.S (revision 0754c319592cd8a523959d85fb06ab23c64a98a6)
1/*
2 * Copyright 2012, Alex Smith, alex@alex-smith.me.uk.
3 * Copyright 2003-2007, Axel Dörfler, axeld@pinc-software.de.
4 * Copyright 2012, Rene Gollent, rene@gollent.com.
5 * Distributed under the terms of the MIT License.
6 *
7 * Copyright 2001, Travis Geiselbrecht. All rights reserved.
8 * Copyright 2002, Michael Noisternig. All rights reserved.
9 * Distributed under the terms of the NewOS License.
10 */
11
12
13#include <asm_defs.h>
14
15#include "asm_offsets.h"
16#include "syscall_numbers.h"
17
18
19.text
20
21
22/* void x86_fxsave(void* fpuState); */
23FUNCTION(x86_fxsave):
24	fxsave	(%rdi)
25	ret
26FUNCTION_END(x86_fxsave)
27
28
29/* void x86_fxrstor(const void* fpuState); */
30FUNCTION(x86_fxrstor):
31	fxrstor	(%rdi)
32	ret
33FUNCTION_END(x86_fxrstor)
34
35
36/* void x86_noop_swap(void *oldFpuState, const void *newFpuState); */
37FUNCTION(x86_noop_swap):
38	nop
39	ret
40FUNCTION_END(x86_noop_swap)
41
42
43/* void x86_fxsave_swap(void* oldFpuState, const void* newFpuState); */
44FUNCTION(x86_fxsave_swap):
45	fxsave	(%rdi)
46	fxrstor	(%rsi)
47	ret
48FUNCTION_END(x86_fxsave_swap)
49
50
51/* addr_t x86_get_stack_frame(); */
52FUNCTION(x86_get_stack_frame):
53	mov		%rbp, %rax
54	ret
55FUNCTION_END(x86_get_stack_frame)
56
57
58/* void x86_64_thread_entry(); */
59FUNCTION(x86_64_thread_entry):
60	xorq	%rbp, %rbp
61
62	movq	%rsp, %rax
63	addq	$16, %rsp
64	andq	$0xfffffffffffffff0, %rsp
65	subq	$8, %rsp
66
67	movq	8(%rax), %rdi
68	jmp		*(%rax)
69FUNCTION_END(x86_64_thread_entry)
70
71
72/* void x86_swap_pgdir(uint64 newPageDir); */
73FUNCTION(x86_swap_pgdir):
74	movq	%rdi, %cr3
75	ret
76FUNCTION_END(x86_swap_pgdir)
77
78
79/* thread exit stub */
80.align 8
81FUNCTION(x86_userspace_thread_exit):
82	movq	%rax, %rdi
83	movq	$SYSCALL_EXIT_THREAD, %rax
84	syscall
85.align 8
86FUNCTION_END(x86_userspace_thread_exit)
87SYMBOL(x86_end_userspace_thread_exit):
88
89
90null_idt_descr:
91	.word	0
92	.quad	0
93
94FUNCTION(x86_reboot):
95	lidt	null_idt_descr
96	int		$0
97done:
98	jmp		done
99FUNCTION_END(x86_reboot)
100
101
102/* status_t arch_cpu_user_memcpy(void* to, const void* from, size_t size,
103		addr_t* faultHandler) */
104FUNCTION(arch_cpu_user_memcpy):
105	// faultHandler -> r8, size -> rcx.
106	movq	%rcx, %r8
107	movq	%rdx, %rcx
108
109	// Set the fault handler, preserve old in rax.
110	movq	(%r8), %rax
111	movq	$.L_user_memcpy_error, (%r8)
112
113	// Move by quadwords.
114	cld
115	movq	%rcx, %r9
116	shrq	$3, %rcx
117	rep
118	movsq
119
120	// Move any remaining data by bytes.
121	movq	%r9, %rcx
122	andq	$7, %rcx
123	rep
124	movsb
125
126	// Restore the old fault handler and return.
127	movq	%rax, (%r8)
128	xorl	%eax, %eax
129	ret
130
131.L_user_memcpy_error:
132	// Restore the old fault handler. Return a generic error, the wrapper
133	// routine will deal with it.
134	movq	%rax, (%r8)
135	movl	$-1, %eax
136	ret
137FUNCTION_END(arch_cpu_user_memcpy)
138
139
140/* status_t arch_cpu_user_memset(void* to, char c, size_t count,
141		addr_t* faultHandler) */
142FUNCTION(arch_cpu_user_memset):
143	// c -> al, faultHandler -> r8, size -> rcx.
144	movw	%si, %ax
145	movq	%rcx, %r8
146	movq	%rdx, %rcx
147
148	// Set the fault handler, preserve old in rdx.
149	movq	(%r8), %rdx
150	movq	$.L_user_memset_error, (%r8)
151
152	rep
153	stosb
154
155	// Restore the old fault handler and return.
156	movq	%rdx, (%r8)
157	xorl	%eax, %eax
158	ret
159
160.L_user_memset_error:
161	// Restore the old fault handler. Return a generic error, the wrapper
162	// routine will deal with it.
163	movq	%rdx, (%r8)
164	movl	$-1, %eax
165	ret
166FUNCTION_END(arch_cpu_user_memset)
167
168
169/* ssize_t arch_cpu_user_strlcpy(void* to, const void* from, size_t size,
170		addr_t* faultHandler) */
171FUNCTION(arch_cpu_user_strlcpy):
172	// faultHandler -> r8, size -> rcx, source -> r9 (original value needed to
173	// calculate return value).
174	movq	%rcx, %r8
175	movq	%rdx, %rcx
176	movq	%rsi, %r9
177
178	// Set the fault handler, preserve old in rax.
179	movq	(%r8), %rax
180	movq	$.L_user_strlcpy_error, (%r8)
181
182	// Check for 0 length.
183	cmp		$0, %rcx
184	je		.L_user_strlcpy_source_count
185
186	// Copy at most count - 1 bytes.
187	dec		%rcx
188
189	// If count is now 0, skip straight to null terminating as our loop will
190	// otherwise overflow.
191	jnz		.L_user_strlcpy_copy_begin
192	movb	$0, (%rdi)
193	jmp		.L_user_strlcpy_source_count
194
195.L_user_strlcpy_copy_begin:
196	cld
197.L_user_strlcpy_copy_loop:
198	// Move data by bytes.
199	lodsb
200	stosb
201	test	%al, %al
202	jz		.L_user_strlcpy_source_done
203	loop	.L_user_strlcpy_copy_loop
204
205	// Null terminate string.
206	movb	$0, (%rdi)
207	dec		%rsi
208
209.L_user_strlcpy_source_count:
210	// Count remaining bytes in src
211	not		%rcx
212		# %rcx was 0 and is now max
213	xor		%al, %al
214	movq	%rsi, %rdi
215	repnz
216	scasb
217	movq	%rdi, %rsi
218
219.L_user_strlcpy_source_done:
220	// Restore the old fault handler
221	movq	%rax, (%r8)
222
223	// Calculate total string length and return.
224	movq	%rsi, %rax
225	subq	%r9, %rax
226	dec		%rax
227	ret
228
229.L_user_strlcpy_error:
230	// Restore the old fault handler. Return a generic error, the wrapper
231	// routine will deal with it.
232	movq	%rax, (%r8)
233	movq	$-1, %rax
234	ret
235FUNCTION_END(arch_cpu_user_strlcpy)
236
237
238/*!	\fn void arch_debug_call_with_fault_handler(cpu_ent* cpu,
239		jmp_buf jumpBuffer, void (*function)(void*), void* parameter)
240
241	Called by debug_call_with_fault_handler() to do the dirty work of setting
242	the fault handler and calling the function. If the function causes a page
243	fault, the arch_debug_call_with_fault_handler() calls longjmp() with the
244	given \a jumpBuffer. Otherwise it returns normally.
245
246	debug_call_with_fault_handler() has already saved the CPU's fault_handler
247	and fault_handler_stack_pointer and will reset them later, so
248	arch_debug_call_with_fault_handler() doesn't need to care about it.
249
250	\param cpu The \c cpu_ent for the current CPU.
251	\param jumpBuffer Buffer to be used for longjmp().
252	\param function The function to be called.
253	\param parameter The parameter to be passed to the function to be called.
254*/
255FUNCTION(arch_debug_call_with_fault_handler):
256	push	%rbp
257	movq	%rsp, %rbp
258
259	// Preserve the jump buffer address for the fault return.
260	push	%rsi
261
262	// Set fault handler address, and fault handler stack pointer address. We
263	// don't need to save the previous values, since that's done by the caller.
264	movq	$.L_debug_call_fault_handler, CPU_ENT_fault_handler(%rdi)
265	movq	%rbp, CPU_ENT_fault_handler_stack_pointer(%rdi)
266
267	// Call the function.
268	movq	%rcx, %rdi
269	call	*%rdx
270
271	// Regular return.
272	movq	%rbp, %rsp
273	pop		%rbp
274	ret
275
276.L_debug_call_fault_handler:
277	// Fault -- return via longjmp(jumpBuffer, 1)
278	movq	%rbp, %rsp
279	movq	-8(%rsp), %rdi
280	movq	$1, %rsi
281	call	longjmp
282FUNCTION_END(arch_debug_call_with_fault_handler)
283