xref: /haiku/src/system/kernel/arch/x86/64/entry_compat.S (revision fc7456e9b1ec38c941134ed6d01c438cf289381e)
1/*
2 * Copyright 2018, Jérôme Duval, jerome.duval@gmail.com.
3 * Copyright 2012, Alex Smith, alex@alex-smith.me.uk.
4 * Distributed under the terms of the MIT License.
5 */
6
7
8#include <asm_defs.h>
9
10#include <thread_types.h>
11
12#include <arch/x86/descriptors.h>
13#include <arch/x86/arch_altcodepatch.h>
14#include <arch/x86/arch_cpu.h>
15#include <arch/x86/arch_kernel.h>
16#define COMMPAGE_COMPAT
17#include <commpage_defs.h>
18
19#include "asm_offsets.h"
20#include "syscall_numbers.h"
21#include "syscall_table.h"
22
23
24// Push the remainder of the interrupt frame onto the stack.
25#define PUSH_IFRAME_BOTTOM(iframeType)	\
26	push	%rax;	/* orig_rax */		\
27	push	%rax;						\
28	push	%rbx;						\
29	push	%rcx;						\
30	push	%rdx;						\
31	push	%rdi;						\
32	push	%rsi;						\
33	push	%rbp;						\
34	push	%r8;						\
35	push	%r9;						\
36	push	%r10;						\
37	push	%r11;						\
38	push	%r12;						\
39	push	%r13;						\
40	push	%r14;						\
41	push	%r15;						\
42	pushq	$0;							\
43	push	$iframeType;
44
45
46// Restore the interrupt frame.
47#define RESTORE_IFRAME()				\
48	add		$16, %rsp;					\
49	pop		%r15;						\
50	pop		%r14;						\
51	pop		%r13;						\
52	pop		%r12;						\
53	pop		%r11;						\
54	pop		%r10;						\
55	pop		%r9;						\
56	pop		%r8;						\
57	pop		%rbp;						\
58	pop		%rsi;						\
59	pop		%rdi;						\
60	pop		%rdx;						\
61	pop		%rcx;						\
62	pop		%rbx;						\
63	pop		%rax;						\
64	addq	$24, %rsp;
65
66
67// The macros below require R12 to contain the current thread pointer. R12 is
68// callee-save so will be preserved through all function calls and only needs
69// to be obtained once. R13 is used to store the system call start time, will
70// also be preserved.
71
72#define LOCK_THREAD_TIME()										\
73	leaq	THREAD_time_lock(%r12), %rdi;						\
74	call	acquire_spinlock;
75
76#define UNLOCK_THREAD_TIME()									\
77	leaq	THREAD_time_lock(%r12), %rdi;						\
78	call	release_spinlock;									\
79
80#define UPDATE_THREAD_USER_TIME()								\
81	LOCK_THREAD_TIME()											\
82																\
83	call	system_time;										\
84	movq	%rax, %r13;											\
85																\
86	/* thread->user_time += now - thread->last_time; */			\
87	subq	THREAD_last_time(%r12), %rax;						\
88	addq	%rax, THREAD_user_time(%r12);						\
89																\
90	/* thread->last_time = now; */								\
91	movq	%r13, THREAD_last_time(%r12);						\
92																\
93	/* thread->in_kernel = true; */								\
94	movb	$1, THREAD_in_kernel(%r12);							\
95																\
96	UNLOCK_THREAD_TIME()
97
98#define UPDATE_THREAD_KERNEL_TIME()								\
99	LOCK_THREAD_TIME()											\
100																\
101	call	system_time;										\
102	movq	%rax, %r13;											\
103																\
104	/* thread->kernel_time += now - thread->last_time; */		\
105	subq	THREAD_last_time(%r12), %rax;						\
106	addq	%rax, THREAD_kernel_time(%r12);						\
107																\
108	/* thread->last_time = now; */								\
109	movq	%r13, THREAD_last_time(%r12);						\
110																\
111	/* thread->in_kernel = false; */							\
112	movb	$0, THREAD_in_kernel(%r12);							\
113																\
114	UNLOCK_THREAD_TIME()
115
116#define STOP_USER_DEBUGGING()									\
117	testl	$(THREAD_FLAGS_BREAKPOINTS_INSTALLED				\
118			| THREAD_FLAGS_SINGLE_STEP), THREAD_flags(%r12);	\
119	jz		1f;													\
120	call	x86_exit_user_debug_at_kernel_entry;				\
121  1:
122
123#define CLEAR_FPU_STATE() \
124	pxor %xmm0, %xmm0; \
125	pxor %xmm1, %xmm1; \
126	pxor %xmm2, %xmm2; \
127	pxor %xmm3, %xmm3; \
128	pxor %xmm4, %xmm4; \
129	pxor %xmm5, %xmm5; \
130	pxor %xmm6, %xmm6; \
131	pxor %xmm7, %xmm7; \
132	pxor %xmm8, %xmm8; \
133	pxor %xmm9, %xmm9; \
134	pxor %xmm10, %xmm10; \
135	pxor %xmm11, %xmm11; \
136	pxor %xmm12, %xmm12; \
137	pxor %xmm13, %xmm13; \
138	pxor %xmm14, %xmm14; \
139	pxor %xmm15, %xmm15
140
141
142// SYSCALL entry point.
143FUNCTION(x86_64_syscall32_entry):
144	// TODO: implement for AMD SYSCALL
145	sysret
146FUNCTION_END(x86_64_syscall32_entry)
147
148
149// SYSENTER entry point.
150//	ecx - user esp
151FUNCTION(x86_64_sysenter32_entry):
152	swapgs
153
154	// Set up an iframe on the stack (ECX = saved ESP).
155	push	$USER_DATA_SELECTOR			// ss
156	// zero extend %ecx
157	movl	%ecx, %ecx
158	push	%rcx						// rsp
159	pushfq								// flags
160	orl		$(1 << 9), (%rsp)		// set the IF (interrupts) bit
161	push	$USER32_CODE_SELECTOR		// cs
162
163	movq	%gs:0, %rdx
164	movq	THREAD_team(%rdx), %rdx
165	movq	TEAM_commpage_address(%rdx), %rdx
166	ASM_STAC
167	add		4 * COMMPAGE_ENTRY_X86_SYSCALL(%rdx), %rdx
168	ASM_CLAC
169	add		$4, %rdx				// sysenter is at offset 2, 2 bytes long
170	push	%rdx						// ip
171
172	push	$0							// error_code
173	push	$99							// vector
174	PUSH_IFRAME_BOTTOM(IFRAME_TYPE_SYSCALL)
175
176	cld
177
178	// Frame pointer is the iframe.
179	movq	%rsp, %rbp
180	andq	$~15, %rsp
181
182	// Preserve call number (R14 is callee-save), get thread pointer.
183	movq	%rax, %r14
184	movq	%gs:0, %r12
185
186	STOP_USER_DEBUGGING()
187	UPDATE_THREAD_USER_TIME()
188
189	// No longer need interrupts disabled.
190	sti
191
192	// Check whether the syscall number is valid.
193	cmpq	$SYSCALL_COUNT, %r14
194	jae		.Lsyscall_return
195
196	// Get the system call table entry. Note I'm hardcoding the shift because
197	// sizeof(syscall_info) is 16 and scale factors of 16 aren't supported,
198	// so can't just do leaq kSyscallInfos(, %rax, SYSCALL_INFO_sizeof).
199	movq	%r14, %rax
200	shlq	$4, %rax
201	leaq	kSyscallCompatInfos(, %rax, 1), %rax
202
203	// Restore the arguments from the stack.
204	movq	SYSCALL_INFO_parameter_size(%rax), %rcx
205
206	// Get the address to copy from.
207	movq	IFRAME_user_sp(%rbp), %rsi
208	addq	$4, %rsi
209	movabs	$(USER_BASE + USER_SIZE), %rdx
210	cmp		%rdx, %rsi
211	jae		.Lbad_syscall_args
212
213	// Make space on the stack for the double size.
214	shlq	$1, %rcx
215	cmpq	$48, %rcx
216	ja		.Lprepare_stack
217	movq	$48, %rcx
218.Lprepare_stack:
219	subq	%rcx, %rsp
220	andq	$~15, %rsp
221	movq	%rsp, %rdi
222
223	// Get the extended system call table entry.
224	movq	%r14, %r15
225	imulq	$ EXTENDED_SYSCALL_INFO_sizeof, %r15
226	leaq	kExtendedSyscallCompatInfos(, %r15, 1), %r15
227	xor		%rcx, %rcx
228	movl	EXTENDED_SYSCALL_INFO_parameter_count(%r15), %ecx
229	leaq	EXTENDED_SYSCALL_INFO_parameters(%r15), %r15
230
231	// Set a fault handler.
232	movq	$.Lbad_syscall_args, THREAD_fault_handler(%r12)
233
234	ASM_STAC
235
236	jmp 	2f
237	// Copy them by doublewords.
2381:
239	// Advance to next parameter
240	addq	$ SYSCALL_PARAMETER_INFO_sizeof, %r15
241	subq	$1, %rcx
2422:
243	cmpq	$0, %rcx
244	je		4f
245	movsd
246	cmpl	$0x8, SYSCALL_PARAMETER_INFO_used_size(%r15)
247	je		3f
248	movl	$0,	(%rdi)
249	addq	$4, %rdi
250	jmp		1b
2513:
252	// Copy the next doubleword
253	movsd
254	jmp		1b
2554:
256	ASM_CLAC
257	movq	$0, THREAD_fault_handler(%r12)
258
259.Lperform_syscall:
260	testl	$THREAD_FLAGS_DEBUGGER_INSTALLED, THREAD_flags(%r12)
261	jnz		.Lpre_syscall_debug
262
263.Lpre_syscall_debug_done:
264	// arguments on the stack, copy in the registers
265	pop		%rdi
266	pop		%rsi
267	pop		%rdx
268	pop		%rcx
269	pop		%r8
270	pop		%r9
271
272	// TODO: pre-syscall tracing
273
274	// Call the function and save its return value.
275	call	*SYSCALL_INFO_function(%rax)
276	movq	%rax, %rdx
277	movq	%rax, IFRAME_ax(%rbp)
278	shrq	$32, %rdx
279	movq	%rdx, IFRAME_dx(%rbp)
280
281	// TODO: post-syscall tracing
282
283.Lsyscall_return:
284	// Restore the original stack pointer and return.
285	movq	%rbp, %rsp
286
287	testl	$(THREAD_FLAGS_DEBUGGER_INSTALLED | THREAD_FLAGS_SIGNALS_PENDING \
288			| THREAD_FLAGS_DEBUG_THREAD | THREAD_FLAGS_BREAKPOINTS_DEFINED \
289			| THREAD_FLAGS_TRAP_FOR_CORE_DUMP \
290			| THREAD_FLAGS_64_BIT_SYSCALL_RETURN \
291			| THREAD_FLAGS_RESTART_SYSCALL | THREAD_FLAGS_SYSCALL_RESTARTED) \
292			, THREAD_flags(%r12)
293	jnz		.Lpost_syscall_work
294
295	cli
296
297	UPDATE_THREAD_KERNEL_TIME()
298
299	// If we've just restored a signal frame, use the IRET path.
300	cmpq	$SYSCALL_RESTORE_SIGNAL_FRAME, %r14
301	je		.Lrestore_fpu
302
303	CLEAR_FPU_STATE()
304
305	// Restore the iframe and RCX/RDX for SYSRET.
306	RESTORE_IFRAME()
307	pop		%rdx
308	addq	$8, %rsp
309	andl  $~0x200,(%rsp)
310	popfq
311	pop		%rcx
312
313	// Restore previous GS base and return.
314	swapgs
315	sti
316	sysexit
317
318
319.Lpre_syscall_debug:
320	// preserve registers
321	push	%rdi
322	push	%rsi
323
324	// user_debug_pre_syscall expects a pointer to a block of arguments, need
325	// to push the register arguments onto the stack.
326	movq	%r14, %rdi				// syscall number
327	movq	0x10(%rsp), %rsi
328	push	%rax
329	call	user_debug_pre_syscall
330	pop		%rax
331
332	// restore registers
333	pop		%rsi
334	pop		%rdi
335	jmp		.Lpre_syscall_debug_done
336
337.Lpost_syscall_work:
338	// Clear the restarted flag.
339	testl	$(THREAD_FLAGS_64_BIT_SYSCALL_RETURN \
340				| THREAD_FLAGS_SYSCALL_RESTARTED), THREAD_flags(%r12)
341	jz		2f
3421:
343	movl	THREAD_flags(%r12), %eax
344	movl	%eax, %edx
345	andl	$~(THREAD_FLAGS_64_BIT_SYSCALL_RETURN \
346				| THREAD_FLAGS_SYSCALL_RESTARTED), %edx
347	lock
348	cmpxchgl	%edx, THREAD_flags(%r12)
349	jnz		1b
3502:
351	testl	$THREAD_FLAGS_DEBUGGER_INSTALLED, THREAD_flags(%r12)
352	jz		1f
353
354	// Post-syscall debugging. Same as above, need a block of arguments.
355	// TODO: restore arguments from the stack
356	push	IFRAME_r9(%rbp)
357	push	IFRAME_r8(%rbp)
358	push	IFRAME_r10(%rbp)
359	push	IFRAME_dx(%rbp)
360	push	IFRAME_si(%rbp)
361	push	IFRAME_di(%rbp)
362	movq	%r14, %rdi				// syscall number
363	movq	%rsp, %rsi
364	movq	IFRAME_ax(%rbp), %rdx	// return value
365	call	user_debug_post_syscall
366	addq	$48, %rsp
3671:
368	// Do we need to handle signals?
369	testl	$(THREAD_FLAGS_SIGNALS_PENDING | THREAD_FLAGS_DEBUG_THREAD \
370			| THREAD_FLAGS_TRAP_FOR_CORE_DUMP) \
371			, THREAD_flags(%r12)
372	jnz		.Lpost_syscall_handle_signals
373	cli
374	call	thread_at_kernel_exit_no_signals
375
376.Lpost_syscall_work_done:
377	// Handle syscall restarting.
378	testl	$THREAD_FLAGS_RESTART_SYSCALL, THREAD_flags(%r12)
379	jz		1f
380	movq	%rsp, %rdi
381	call	x86_restart_syscall
3821:
383	// Install breakpoints, if defined.
384	testl	$THREAD_FLAGS_BREAKPOINTS_DEFINED, THREAD_flags(%r12)
385	jz		1f
386	movq	%rbp, %rdi
387	call	x86_init_user_debug_at_kernel_exit
3881:
389	// On this return path it is possible that the frame has been modified,
390	// for example to execute a signal handler. In this case it is safer to
391	// return via IRET.
392	CLEAR_FPU_STATE()
393	jmp .Liret
394
395.Lrestore_fpu:
396	movq	IFRAME_fpu(%rbp), %rax
397	fxrstorq	(%rax)
398.Liret:
399	// Restore the saved registers.
400	RESTORE_IFRAME()
401
402	// Restore the previous GS base and return.
403	swapgs
404	iretq
405
406.Lpost_syscall_handle_signals:
407	call	thread_at_kernel_exit
408	jmp		.Lpost_syscall_work_done
409
410.Lbad_syscall_args:
411	movq	$0, THREAD_fault_handler(%r12)
412	movq	%rbp, %rsp
413	jmp		.Lsyscall_return
414FUNCTION_END(x86_64_sysenter32_entry)
415
416
417/* thread exit stub */
418// TODO: build with the x86 compiler
419FUNCTION(x86_sysenter32_userspace_thread_exit):
420	.byte	0x50		// push %eax
421	mov		$SYSCALL_EXIT_THREAD, %eax
422	.byte	0x89,0xe1	// mov %esp, %ecx
423	sysenter
424FUNCTION_END(x86_sysenter32_userspace_thread_exit)
425SYMBOL(x86_sysenter32_userspace_thread_exit_end):
426
427