xref: /haiku/src/system/kernel/arch/x86/64/arch.S (revision 29f8805f6c70f1c819eb58ac2220647d8e40d6e7)
1/*
2 * Copyright 2012, Alex Smith, alex@alex-smith.me.uk.
3 * Copyright 2003-2007, Axel Dörfler, axeld@pinc-software.de.
4 * Copyright 2012, Rene Gollent, rene@gollent.com.
5 * Distributed under the terms of the MIT License.
6 *
7 * Copyright 2001, Travis Geiselbrecht. All rights reserved.
8 * Copyright 2002, Michael Noisternig. All rights reserved.
9 * Distributed under the terms of the NewOS License.
10 */
11
12
13#include <asm_defs.h>
14
15#include "asm_offsets.h"
16#include "syscall_numbers.h"
17
18
19.text
20
21
22/* void x86_fxsave(void* fpuState); */
23FUNCTION(x86_fxsave):
24	fxsave	(%rdi)
25	ret
26FUNCTION_END(x86_fxsave)
27
28
29/* void x86_fxrstor(const void* fpuState); */
30FUNCTION(x86_fxrstor):
31	fxrstor	(%rdi)
32	ret
33FUNCTION_END(x86_fxrstor)
34
35
36/* void x86_noop_swap(void *oldFpuState, const void *newFpuState); */
37FUNCTION(x86_noop_swap):
38	nop
39	ret
40FUNCTION_END(x86_noop_swap)
41
42
43/* void x86_fxsave_swap(void* oldFpuState, const void* newFpuState); */
44FUNCTION(x86_fxsave_swap):
45	fxsave	(%rdi)
46	fxrstor	(%rsi)
47	ret
48FUNCTION_END(x86_fxsave_swap)
49
50
51/* addr_t x86_get_stack_frame(); */
52FUNCTION(x86_get_stack_frame):
53	mov		%rbp, %rax
54	ret
55FUNCTION_END(x86_get_stack_frame)
56
57
58/* uint64 x86_read_msr(uint32 register); */
59FUNCTION(x86_read_msr):
60	mov		%edi, %ecx
61	rdmsr
62	shl		$32, %rdx
63	mov		%eax, %eax
64	or		%rdx, %rax
65	ret
66FUNCTION_END(x86_read_msr)
67
68
69/* void x86_write_msr(uint32 register, uint64 value); */
70FUNCTION(x86_write_msr):
71	mov		%rsi, %rdx
72	mov		%esi, %eax
73	mov		%edi, %ecx
74	shr		$32, %rdx
75	wrmsr
76	ret
77FUNCTION_END(x86_write_msr)
78
79
80/* void x86_64_thread_entry(); */
81FUNCTION(x86_64_thread_entry):
82	movq	%r15, %rdi
83	jmp		*%r14
84FUNCTION_END(x86_64_thread_entry)
85
86
87/* void x86_context_switch(struct arch_thread* oldState,
88		struct arch_thread* newState); */
89FUNCTION(x86_context_switch):
90	// Just need to save callee-save registers: RBP, RBX, R12-15.
91	push	%r15
92	push	%r14
93	push	%r13
94	push	%r12
95	push	%rbp
96	push	%rbx
97
98	// Swap the stack pointers.
99	movq	%rsp, ARCH_THREAD_current_stack(%rdi)
100	movq	ARCH_THREAD_current_stack(%rsi), %rsp
101
102	// Restore callee-save registers.
103	pop		%rbx
104	pop		%rbp
105	pop		%r12
106	pop		%r13
107	pop		%r14
108	pop		%r15
109
110	ret
111FUNCTION_END(x86_context_switch)
112
113
114/* void x86_swap_pgdir(uint64 newPageDir); */
115FUNCTION(x86_swap_pgdir):
116	movq	%rdi, %cr3
117	ret
118FUNCTION_END(x86_swap_pgdir)
119
120
121/* thread exit stub */
122.align 8
123FUNCTION(x86_userspace_thread_exit):
124	movq	%rax, %rdi
125	movq	$SYSCALL_EXIT_THREAD, %rax
126	syscall
127.align 8
128FUNCTION_END(x86_userspace_thread_exit)
129SYMBOL(x86_end_userspace_thread_exit):
130
131
132null_idt_descr:
133	.word	0
134	.quad	0
135
136FUNCTION(x86_reboot):
137	lidt	null_idt_descr
138	int		$0
139done:
140	jmp		done
141FUNCTION_END(x86_reboot)
142
143
144/* status_t arch_cpu_user_memcpy(void* to, const void* from, size_t size,
145		addr_t* faultHandler) */
146FUNCTION(arch_cpu_user_memcpy):
147	// faultHandler -> r8, size -> rcx.
148	movq	%rcx, %r8
149	movq	%rdx, %rcx
150
151	// Set the fault handler, preserve old in rax.
152	movq	(%r8), %rax
153	movq	$.L_user_memcpy_error, (%r8)
154
155	// Move by quadwords.
156	cld
157	movq	%rcx, %r9
158	shrq	$3, %rcx
159	rep
160	movsq
161
162	// Move any remaining data by bytes.
163	movq	%r9, %rcx
164	andq	$7, %rcx
165	rep
166	movsb
167
168	// Restore the old fault handler and return.
169	movq	%rax, (%r8)
170	xorl	%eax, %eax
171	ret
172
173.L_user_memcpy_error:
174	// Restore the old fault handler. Return a generic error, the wrapper
175	// routine will deal with it.
176	movq	%rax, (%r8)
177	movl	$-1, %eax
178	ret
179FUNCTION_END(arch_cpu_user_memcpy)
180
181
182/* status_t arch_cpu_user_memset(void* to, char c, size_t count,
183		addr_t* faultHandler) */
184FUNCTION(arch_cpu_user_memset):
185	// c -> al, faultHandler -> r8, size -> rcx.
186	movw	%si, %ax
187	movq	%rcx, %r8
188	movq	%rdx, %rcx
189
190	// Set the fault handler, preserve old in rdx.
191	movq	(%r8), %rdx
192	movq	$.L_user_memset_error, (%r8)
193
194	rep
195	stosb
196
197	// Restore the old fault handler and return.
198	movq	%rdx, (%r8)
199	xorl	%eax, %eax
200	ret
201
202.L_user_memset_error:
203	// Restore the old fault handler. Return a generic error, the wrapper
204	// routine will deal with it.
205	movq	%rdx, (%r8)
206	movl	$-1, %eax
207	ret
208FUNCTION_END(arch_cpu_user_memset)
209
210
211/* ssize_t arch_cpu_user_strlcpy(void* to, const void* from, size_t size,
212		addr_t* faultHandler) */
213FUNCTION(arch_cpu_user_strlcpy):
214	// faultHandler -> r8, size -> rcx, source -> r9 (original value needed to
215	// calculate return value).
216	movq	%rcx, %r8
217	movq	%rdx, %rcx
218	movq	%rsi, %r9
219
220	// Set the fault handler, preserve old in rax.
221	movq	(%r8), %rax
222	movq	$.L_user_strlcpy_error, (%r8)
223
224	// Check for 0 length.
225	cmp		$0, %rcx
226	je		.L_user_strlcpy_source_count
227
228	// Copy at most count - 1 bytes.
229	dec		%rcx
230
231	// If count is now 0, skip straight to null terminating as our loop will
232	// otherwise overflow.
233	jnz		.L_user_strlcpy_copy_begin
234	movb	$0, (%rdi)
235	jmp		.L_user_strlcpy_source_count
236
237.L_user_strlcpy_copy_begin:
238	cld
239.L_user_strlcpy_copy_loop:
240	// Move data by bytes.
241	lodsb
242	stosb
243	test	%al, %al
244	jz		.L_user_strlcpy_source_done
245	loop	.L_user_strlcpy_copy_loop
246
247	// Null terminate string.
248	movb	$0, (%rdi)
249	dec		%rsi
250
251.L_user_strlcpy_source_count:
252	// Count remaining bytes in src
253	not		%rcx
254		# %rcx was 0 and is now max
255	xor		%al, %al
256	movq	%rsi, %rdi
257	repnz
258	scasb
259	movq	%rdi, %rsi
260
261.L_user_strlcpy_source_done:
262	// Restore the old fault handler
263	movq	%rax, (%r8)
264
265	// Calculate total string length and return.
266	movq	%rsi, %rax
267	subq	%r9, %rax
268	dec		%rax
269	ret
270
271.L_user_strlcpy_error:
272	// Restore the old fault handler. Return a generic error, the wrapper
273	// routine will deal with it.
274	movq	%rax, (%r8)
275	movq	$-1, %rax
276	ret
277FUNCTION_END(arch_cpu_user_strlcpy)
278
279
280/*!	\fn void arch_debug_call_with_fault_handler(cpu_ent* cpu,
281		jmp_buf jumpBuffer, void (*function)(void*), void* parameter)
282
283	Called by debug_call_with_fault_handler() to do the dirty work of setting
284	the fault handler and calling the function. If the function causes a page
285	fault, the arch_debug_call_with_fault_handler() calls longjmp() with the
286	given \a jumpBuffer. Otherwise it returns normally.
287
288	debug_call_with_fault_handler() has already saved the CPU's fault_handler
289	and fault_handler_stack_pointer and will reset them later, so
290	arch_debug_call_with_fault_handler() doesn't need to care about it.
291
292	\param cpu The \c cpu_ent for the current CPU.
293	\param jumpBuffer Buffer to be used for longjmp().
294	\param function The function to be called.
295	\param parameter The parameter to be passed to the function to be called.
296*/
297FUNCTION(arch_debug_call_with_fault_handler):
298	push	%rbp
299	movq	%rsp, %rbp
300
301	// Preserve the jump buffer address for the fault return.
302	push	%rsi
303
304	// Set fault handler address, and fault handler stack pointer address. We
305	// don't need to save the previous values, since that's done by the caller.
306	movq	$.L_debug_call_fault_handler, CPU_ENT_fault_handler(%rdi)
307	movq	%rbp, CPU_ENT_fault_handler_stack_pointer(%rdi)
308
309	// Call the function.
310	movq	%rcx, %rdi
311	call	*%rdx
312
313	// Regular return.
314	movq	%rbp, %rsp
315	pop		%rbp
316	ret
317
318.L_debug_call_fault_handler:
319	// Fault -- return via longjmp(jumpBuffer, 1)
320	movq	%rbp, %rsp
321	movq	-8(%rsp), %rdi
322	movq	$1, %rsi
323	call	longjmp
324FUNCTION_END(arch_debug_call_with_fault_handler)
325