xref: /haiku/src/system/kernel/arch/x86/64/entry_compat.S (revision d12bb8b14803d030b4a8fba91131e4bb96c4f406)
1/*
2 * Copyright 2018, Jérôme Duval, jerome.duval@gmail.com.
3 * Copyright 2012, Alex Smith, alex@alex-smith.me.uk.
4 * Distributed under the terms of the MIT License.
5 */
6
7
8#include <asm_defs.h>
9
10#include <thread_types.h>
11
12#include <arch/x86/descriptors.h>
13#include <arch/x86/arch_altcodepatch.h>
14#include <arch/x86/arch_cpu.h>
15#include <arch/x86/arch_kernel.h>
16#define COMMPAGE_COMPAT
17#include <commpage_defs.h>
18
19#include "asm_offsets.h"
20#include "syscall_numbers.h"
21#include "syscall_table.h"
22
23
24// Push the remainder of the interrupt frame onto the stack.
25#define PUSH_IFRAME_BOTTOM(iframeType)	\
26	push	%rax;	/* orig_rax */		\
27	push	%rax;						\
28	push	%rbx;						\
29	push	%rcx;						\
30	push	%rdx;						\
31	push	%rdi;						\
32	push	%rsi;						\
33	push	%rbp;						\
34	push	%r8;						\
35	push	%r9;						\
36	push	%r10;						\
37	push	%r11;						\
38	push	%r12;						\
39	push	%r13;						\
40	push	%r14;						\
41	push	%r15;						\
42	pushq	$0;							\
43	push	$iframeType;
44
45
46// Restore the interrupt frame.
47#define RESTORE_IFRAME()				\
48	add		$16, %rsp;					\
49	pop		%r15;						\
50	pop		%r14;						\
51	pop		%r13;						\
52	pop		%r12;						\
53	pop		%r11;						\
54	pop		%r10;						\
55	pop		%r9;						\
56	pop		%r8;						\
57	pop		%rbp;						\
58	pop		%rsi;						\
59	pop		%rdi;						\
60	pop		%rdx;						\
61	pop		%rcx;						\
62	pop		%rbx;						\
63	pop		%rax;						\
64	addq	$24, %rsp;
65
66
67// The macros below require R12 to contain the current thread pointer. R12 is
68// callee-save so will be preserved through all function calls and only needs
69// to be obtained once. R13 is used to store the system call start time, will
70// also be preserved.
71
72#define LOCK_THREAD_TIME()										\
73	leaq	THREAD_time_lock(%r12), %rdi;						\
74	call	acquire_spinlock;
75
76#define UNLOCK_THREAD_TIME()									\
77	leaq	THREAD_time_lock(%r12), %rdi;						\
78	call	release_spinlock;									\
79
80#define UPDATE_THREAD_USER_TIME()								\
81	LOCK_THREAD_TIME()											\
82																\
83	call	system_time;										\
84																\
85	/* Preserve system_time for post syscall debug */			\
86	movq	%rax, %r13;											\
87																\
88	/* thread->user_time += now - thread->last_time; */			\
89	subq	THREAD_last_time(%r12), %rax;						\
90	addq	%rax, THREAD_user_time(%r12);						\
91																\
92	/* thread->last_time = now; */								\
93	movq	%r13, THREAD_last_time(%r12);						\
94																\
95	/* thread->in_kernel = true; */								\
96	movb	$1, THREAD_in_kernel(%r12);							\
97																\
98	UNLOCK_THREAD_TIME()
99
100#define UPDATE_THREAD_KERNEL_TIME()								\
101	LOCK_THREAD_TIME()											\
102																\
103	call	system_time;										\
104	movq	%rax, %r13;											\
105																\
106	/* thread->kernel_time += now - thread->last_time; */		\
107	subq	THREAD_last_time(%r12), %rax;						\
108	addq	%rax, THREAD_kernel_time(%r12);						\
109																\
110	/* thread->last_time = now; */								\
111	movq	%r13, THREAD_last_time(%r12);						\
112																\
113	/* thread->in_kernel = false; */							\
114	movb	$0, THREAD_in_kernel(%r12);							\
115																\
116	UNLOCK_THREAD_TIME()
117
118#define STOP_USER_DEBUGGING()									\
119	testl	$(THREAD_FLAGS_BREAKPOINTS_INSTALLED				\
120			| THREAD_FLAGS_SINGLE_STEP), THREAD_flags(%r12);	\
121	jz		1f;													\
122	call	x86_exit_user_debug_at_kernel_entry;				\
123  1:
124
125#define CLEAR_FPU_STATE() \
126	pxor %xmm0, %xmm0; \
127	pxor %xmm1, %xmm1; \
128	pxor %xmm2, %xmm2; \
129	pxor %xmm3, %xmm3; \
130	pxor %xmm4, %xmm4; \
131	pxor %xmm5, %xmm5; \
132	pxor %xmm6, %xmm6; \
133	pxor %xmm7, %xmm7; \
134	pxor %xmm8, %xmm8; \
135	pxor %xmm9, %xmm9; \
136	pxor %xmm10, %xmm10; \
137	pxor %xmm11, %xmm11; \
138	pxor %xmm12, %xmm12; \
139	pxor %xmm13, %xmm13; \
140	pxor %xmm14, %xmm14; \
141	pxor %xmm15, %xmm15
142
143
144// SYSCALL entry point.
145FUNCTION(x86_64_syscall32_entry):
146	// TODO: implement for AMD SYSCALL
147	sysret
148FUNCTION_END(x86_64_syscall32_entry)
149
150
151// SYSENTER entry point.
152//	ecx - user esp
153FUNCTION(x86_64_sysenter32_entry):
154	swapgs
155	lfence
156
157	// Set up an iframe on the stack (ECX = saved ESP).
158	push	$USER_DATA_SELECTOR			// ss
159	// zero extend %ecx
160	movl	%ecx, %ecx
161	push	%rcx						// rsp
162	pushfq								// flags
163	orl		$(1 << 9), (%rsp)		// set the IF (interrupts) bit
164	push	$USER32_CODE_SELECTOR		// cs
165
166	movq	%gs:0, %rdx
167	movq	THREAD_team(%rdx), %rdx
168	movq	TEAM_commpage_address(%rdx), %rdx
169	ASM_STAC
170	add		4 * COMMPAGE_ENTRY_X86_SYSCALL(%rdx), %rdx
171	ASM_CLAC
172	add		$4, %rdx				// sysenter is at offset 2, 2 bytes long
173	push	%rdx						// ip
174
175	push	$0							// error_code
176	push	$99							// vector
177	PUSH_IFRAME_BOTTOM(IFRAME_TYPE_SYSCALL)
178
179	cld
180
181	// Frame pointer is the iframe.
182	movq	%rsp, %rbp
183	andq	$~15, %rsp
184
185	// Preserve call number (R14 is callee-save), get thread pointer.
186	movq	%rax, %r14
187	movq	%gs:0, %r12
188
189	STOP_USER_DEBUGGING()
190	UPDATE_THREAD_USER_TIME()
191
192	// No longer need interrupts disabled.
193	sti
194
195	// Check whether the syscall number is valid.
196	cmpq	$SYSCALL_COUNT, %r14
197	jae		.Lsyscall_return
198
199	// Get the system call table entry. Note I'm hardcoding the shift because
200	// sizeof(syscall_info) is 16 and scale factors of 16 aren't supported,
201	// so can't just do leaq kSyscallInfos(, %rax, SYSCALL_INFO_sizeof).
202	movq	%r14, %rax
203	shlq	$4, %rax
204	leaq	kSyscallCompatInfos(, %rax, 1), %rax
205
206	// Restore the arguments from the stack.
207	movq	SYSCALL_INFO_parameter_size(%rax), %rcx
208
209	// Get the address to copy from.
210	movq	IFRAME_user_sp(%rbp), %rsi
211	addq	$4, %rsi
212	movabs	$(USER_BASE + USER_SIZE), %rdx
213	cmp		%rdx, %rsi
214	jae		.Lbad_syscall_args
215
216	// Make space on the stack for the double size.
217	shlq	$1, %rcx
218	cmpq	$48, %rcx
219	ja		.Lprepare_stack
220	movq	$48, %rcx
221.Lprepare_stack:
222	subq	%rcx, %rsp
223	andq	$~15, %rsp
224	movq	%rsp, %rdi
225
226	// Get the extended system call table entry.
227	movq	%r14, %r15
228	imulq	$ EXTENDED_SYSCALL_INFO_sizeof, %r15
229	leaq	kExtendedSyscallCompatInfos(, %r15, 1), %r15
230	xor		%rcx, %rcx
231	movl	EXTENDED_SYSCALL_INFO_parameter_count(%r15), %ecx
232	leaq	EXTENDED_SYSCALL_INFO_parameters(%r15), %r15
233
234	// Set a fault handler.
235	movq	$.Lbad_syscall_args, THREAD_fault_handler(%r12)
236
237	ASM_STAC
238
239	jmp 	2f
240	// Copy them by doublewords.
2411:
242	// Advance to next parameter
243	addq	$ SYSCALL_PARAMETER_INFO_sizeof, %r15
244	subq	$1, %rcx
2452:
246	cmpq	$0, %rcx
247	je		4f
248	movsd
249	cmpl	$0x8, SYSCALL_PARAMETER_INFO_used_size(%r15)
250	je		3f
251	movl	$0,	(%rdi)
252	addq	$4, %rdi
253	jmp		1b
2543:
255	// Copy the next doubleword
256	movsd
257	jmp		1b
2584:
259	ASM_CLAC
260	movq	$0, THREAD_fault_handler(%r12)
261
262.Lperform_syscall:
263	testl	$THREAD_FLAGS_DEBUGGER_INSTALLED, THREAD_flags(%r12)
264	jnz		.Lpre_syscall_debug
265
266.Lpre_syscall_debug_done:
267	// arguments on the stack, copy in the registers
268	pop		%rdi
269	pop		%rsi
270	pop		%rdx
271	pop		%rcx
272	pop		%r8
273	pop		%r9
274
275	// TODO: pre-syscall tracing
276
277	// Call the function and save its return value.
278	call	*SYSCALL_INFO_function(%rax)
279	movq	%rax, %rdx
280	movq	%rax, IFRAME_ax(%rbp)
281	shrq	$32, %rdx
282	movq	%rdx, IFRAME_dx(%rbp)
283
284	// TODO: post-syscall tracing
285
286.Lsyscall_return:
287	// Restore the original stack pointer and return.
288	movq	%rbp, %rsp
289
290	testl	$(THREAD_FLAGS_DEBUGGER_INSTALLED | THREAD_FLAGS_SIGNALS_PENDING \
291			| THREAD_FLAGS_DEBUG_THREAD | THREAD_FLAGS_BREAKPOINTS_DEFINED \
292			| THREAD_FLAGS_TRAP_FOR_CORE_DUMP \
293			| THREAD_FLAGS_64_BIT_SYSCALL_RETURN \
294			| THREAD_FLAGS_RESTART_SYSCALL | THREAD_FLAGS_SYSCALL_RESTARTED) \
295			, THREAD_flags(%r12)
296	jnz		.Lpost_syscall_work
297
298	cli
299
300	UPDATE_THREAD_KERNEL_TIME()
301
302	// If we've just restored a signal frame, use the IRET path.
303	cmpq	$SYSCALL_RESTORE_SIGNAL_FRAME, %r14
304	je		.Lrestore_fpu
305
306	CLEAR_FPU_STATE()
307
308	// Restore the iframe and RCX/RDX for SYSRET.
309	RESTORE_IFRAME()
310	pop		%rdx
311	addq	$8, %rsp
312	andl  $~0x200,(%rsp)
313	popfq
314	pop		%rcx
315
316	// Restore previous GS base and return.
317	swapgs
318	lfence
319	sti
320	sysexit
321
322
323.Lpre_syscall_debug:
324	// preserve registers
325	push	%rdi
326	push	%rsi
327
328	// user_debug_pre_syscall expects a pointer to a block of arguments, need
329	// to push the register arguments onto the stack.
330	movq	%r14, %rdi				// syscall number
331	movq	0x10(%rsp), %rsi
332	push	%rax
333	call	user_debug_pre_syscall
334	pop		%rax
335
336	// restore registers
337	pop		%rsi
338	pop		%rdi
339	jmp		.Lpre_syscall_debug_done
340
341.Lpost_syscall_work:
342	// Clear the restarted flag.
343	testl	$(THREAD_FLAGS_64_BIT_SYSCALL_RETURN \
344				| THREAD_FLAGS_SYSCALL_RESTARTED), THREAD_flags(%r12)
345	jz		2f
3461:
347	movl	THREAD_flags(%r12), %eax
348	movl	%eax, %edx
349	andl	$~(THREAD_FLAGS_64_BIT_SYSCALL_RETURN \
350				| THREAD_FLAGS_SYSCALL_RESTARTED), %edx
351	lock
352	cmpxchgl	%edx, THREAD_flags(%r12)
353	jnz		1b
3542:
355	testl	$THREAD_FLAGS_DEBUGGER_INSTALLED, THREAD_flags(%r12)
356	jz		1f
357
358	// Post-syscall debugging. Same as above, need a block of arguments.
359	// TODO: restore arguments from the stack
360	push	IFRAME_r9(%rbp)
361	push	IFRAME_r8(%rbp)
362	push	IFRAME_r10(%rbp)
363	push	IFRAME_dx(%rbp)
364	push	IFRAME_si(%rbp)
365	push	IFRAME_di(%rbp)
366	movq	%r14, %rdi				// syscall number
367	movq	%rsp, %rsi
368	movq	IFRAME_ax(%rbp), %rdx	// return value
369	movq	%r13, %rcx				// start time, preserved earlier
370	call	user_debug_post_syscall
371	addq	$48, %rsp
3721:
373	// Do we need to handle signals?
374	testl	$(THREAD_FLAGS_SIGNALS_PENDING | THREAD_FLAGS_DEBUG_THREAD \
375			| THREAD_FLAGS_TRAP_FOR_CORE_DUMP) \
376			, THREAD_flags(%r12)
377	jnz		.Lpost_syscall_handle_signals
378	cli
379	call	thread_at_kernel_exit_no_signals
380
381.Lpost_syscall_work_done:
382	// Handle syscall restarting.
383	testl	$THREAD_FLAGS_RESTART_SYSCALL, THREAD_flags(%r12)
384	jz		1f
385	movq	%rsp, %rdi
386	call	x86_restart_syscall
3871:
388	// Install breakpoints, if defined.
389	testl	$THREAD_FLAGS_BREAKPOINTS_DEFINED, THREAD_flags(%r12)
390	jz		1f
391	movq	%rbp, %rdi
392	call	x86_init_user_debug_at_kernel_exit
3931:
394	// On this return path it is possible that the frame has been modified,
395	// for example to execute a signal handler. In this case it is safer to
396	// return via IRET.
397	CLEAR_FPU_STATE()
398	jmp .Liret
399
400.Lrestore_fpu:
401	movq	IFRAME_fpu(%rbp), %rax
402	fxrstorq	(%rax)
403.Liret:
404	// Restore the saved registers.
405	RESTORE_IFRAME()
406
407	// Restore the previous GS base and return.
408	swapgs
409	iretq
410
411.Lpost_syscall_handle_signals:
412	call	thread_at_kernel_exit
413	jmp		.Lpost_syscall_work_done
414
415.Lbad_syscall_args:
416	movq	$0, THREAD_fault_handler(%r12)
417	movq	%rbp, %rsp
418	jmp		.Lsyscall_return
419FUNCTION_END(x86_64_sysenter32_entry)
420
421
422/* thread exit stub */
423// TODO: build with the x86 compiler
424FUNCTION(x86_sysenter32_userspace_thread_exit):
425	.byte	0x50		// push %eax
426	mov		$SYSCALL_EXIT_THREAD, %eax
427	.byte	0x89,0xe1	// mov %esp, %ecx
428	sysenter
429FUNCTION_END(x86_sysenter32_userspace_thread_exit)
430SYMBOL(x86_sysenter32_userspace_thread_exit_end):
431
432