xref: /haiku/src/system/kernel/arch/x86/64/interrupts.S (revision 9a6a20d4689307142a7ed26a1437ba47e244e73f)
1/*
2 * Copyright 2018, Jérôme Duval, jerome.duval@gmail.com.
3 * Copyright 2012, Alex Smith, alex@alex-smith.me.uk.
4 * Distributed under the terms of the MIT License.
5 */
6
7
8#include <asm_defs.h>
9
10#include <thread_types.h>
11
12#include <arch/x86/descriptors.h>
13#include <arch/x86/arch_altcodepatch.h>
14#include <arch/x86/arch_cpu.h>
15#include <arch/x86/arch_kernel.h>
16
17#include "asm_offsets.h"
18#include "syscall_numbers.h"
19#include "syscall_table.h"
20
21
22// Push the remainder of the interrupt frame onto the stack.
23#define PUSH_IFRAME_BOTTOM(iframeType)	\
24	push	%rax;	/* orig_rax */		\
25	push	%rax;						\
26	push	%rbx;						\
27	push	%rcx;						\
28	push	%rdx;						\
29	push	%rdi;						\
30	push	%rsi;						\
31	push	%rbp;						\
32	push	%r8;						\
33	push	%r9;						\
34	push	%r10;						\
35	push	%r11;						\
36	push	%r12;						\
37	push	%r13;						\
38	push	%r14;						\
39	push	%r15;						\
40	pushq	$0;							\
41	push	$iframeType;
42
43
44// Restore the interrupt frame.
45#define RESTORE_IFRAME()				\
46	add		$16, %rsp;					\
47	pop		%r15;						\
48	pop		%r14;						\
49	pop		%r13;						\
50	pop		%r12;						\
51	pop		%r11;						\
52	pop		%r10;						\
53	pop		%r9;						\
54	pop		%r8;						\
55	pop		%rbp;						\
56	pop		%rsi;						\
57	pop		%rdi;						\
58	pop		%rdx;						\
59	pop		%rcx;						\
60	pop		%rbx;						\
61	pop		%rax;						\
62	addq	$24, %rsp;
63
64
65// The macros below require R12 to contain the current thread pointer. R12 is
66// callee-save so will be preserved through all function calls and only needs
67// to be obtained once.
68
69#define LOCK_THREAD_TIME()										\
70	leaq	THREAD_time_lock(%r12), %rdi;						\
71	call	acquire_spinlock;
72
73#define UNLOCK_THREAD_TIME()									\
74	leaq	THREAD_time_lock(%r12), %rdi;						\
75	call	release_spinlock;									\
76
77#define UPDATE_THREAD_USER_TIME()								\
78	LOCK_THREAD_TIME()											\
79																\
80	call	system_time;										\
81	movq	%rax, %r13;											\
82																\
83	/* thread->user_time += now - thread->last_time; */			\
84	subq	THREAD_last_time(%r12), %rax;						\
85	addq	%rax, THREAD_user_time(%r12);						\
86																\
87	/* thread->last_time = now; */								\
88	movq	%r13, THREAD_last_time(%r12);						\
89																\
90	/* thread->in_kernel = true; */								\
91	movb	$1, THREAD_in_kernel(%r12);							\
92																\
93	UNLOCK_THREAD_TIME()
94
95#define UPDATE_THREAD_KERNEL_TIME()								\
96	LOCK_THREAD_TIME()											\
97																\
98	call	system_time;										\
99	movq	%rax, %r13;											\
100																\
101	/* thread->kernel_time += now - thread->last_time; */		\
102	subq	THREAD_last_time(%r12), %rax;						\
103	addq	%rax, THREAD_kernel_time(%r12);						\
104																\
105	/* thread->last_time = now; */								\
106	movq	%r13, THREAD_last_time(%r12);						\
107																\
108	/* thread->in_kernel = false; */							\
109	movb	$0, THREAD_in_kernel(%r12);							\
110																\
111	UNLOCK_THREAD_TIME()
112
113#define STOP_USER_DEBUGGING()									\
114	testl	$(THREAD_FLAGS_BREAKPOINTS_INSTALLED				\
115			| THREAD_FLAGS_SINGLE_STEP), THREAD_flags(%r12);	\
116	jz		1f;													\
117	call	x86_exit_user_debug_at_kernel_entry;				\
118  1:
119
120#define CLEAR_FPU_STATE() \
121	pxor %xmm0, %xmm0; \
122	pxor %xmm1, %xmm1; \
123	pxor %xmm2, %xmm2; \
124	pxor %xmm3, %xmm3; \
125	pxor %xmm4, %xmm4; \
126	pxor %xmm5, %xmm5; \
127	pxor %xmm6, %xmm6; \
128	pxor %xmm7, %xmm7; \
129	pxor %xmm8, %xmm8; \
130	pxor %xmm9, %xmm9; \
131	pxor %xmm10, %xmm10; \
132	pxor %xmm11, %xmm11; \
133	pxor %xmm12, %xmm12; \
134	pxor %xmm13, %xmm13; \
135	pxor %xmm14, %xmm14; \
136	pxor %xmm15, %xmm15
137
138// The following code defines the interrupt service routines for all 256
139// interrupts. It creates a block of handlers, each 16 bytes, that the IDT
140// initialization code just loops through.
141
142// Interrupt with no error code, pushes a 0 error code.
143#define DEFINE_ISR(nr)					\
144	.align 16;							\
145	ASM_CLAC							\
146	push	$0;							\
147	push	$nr;						\
148	jmp		int_bottom;
149
150// Interrupt with an error code.
151#define DEFINE_ISR_E(nr)				\
152	.align 16;							\
153	ASM_CLAC							\
154	push	$nr;						\
155	jmp		int_bottom;
156
157// Array of interrupt service routines.
158.align 16
159SYMBOL(isr_array):
160	// Exceptions (0-19) and reserved interrupts (20-31).
161	DEFINE_ISR(0)
162	DEFINE_ISR(1)
163	DEFINE_ISR(2)
164	DEFINE_ISR(3)
165	DEFINE_ISR(4)
166	DEFINE_ISR(5)
167	DEFINE_ISR(6)
168	DEFINE_ISR(7)
169	DEFINE_ISR_E(8)
170	DEFINE_ISR(9)
171	DEFINE_ISR_E(10)
172	DEFINE_ISR_E(11)
173	DEFINE_ISR_E(12)
174	DEFINE_ISR_E(13)
175	DEFINE_ISR_E(14)
176	DEFINE_ISR(15)
177	DEFINE_ISR(16)
178	DEFINE_ISR_E(17)
179	DEFINE_ISR(18)
180	DEFINE_ISR(19)
181	DEFINE_ISR(20)
182	DEFINE_ISR(21)
183	DEFINE_ISR(22)
184	DEFINE_ISR(23)
185	DEFINE_ISR(24)
186	DEFINE_ISR(25)
187	DEFINE_ISR(26)
188	DEFINE_ISR(27)
189	DEFINE_ISR(28)
190	DEFINE_ISR(29)
191	DEFINE_ISR(30)
192	DEFINE_ISR(31)
193
194	// User-defined ISRs (32-255) - none take an error code.
195	.Lintr = 32
196	.rept 224
197		DEFINE_ISR(.Lintr)
198		.Lintr = .Lintr+1
199	.endr
200
201
202// Common interrupt handling code.
203STATIC_FUNCTION(int_bottom):
204	// Coming from user-mode requires special handling.
205	testl	$3, 24(%rsp)
206	jnz		int_bottom_user
207
208	// Push the rest of the interrupt frame to the stack.
209	PUSH_IFRAME_BOTTOM(IFRAME_TYPE_OTHER)
210
211	cld
212
213	// Frame pointer is the iframe.
214	movq	%rsp, %rbp
215
216	// Set the RF (resume flag) in RFLAGS. This prevents an instruction
217	// breakpoint on the instruction we're returning to to trigger a debug
218	// exception.
219	orq		$X86_EFLAGS_RESUME, IFRAME_flags(%rbp)
220
221	// xsave needs a 64-byte alignment
222	andq	$~63, %rsp
223	movq	(gFPUSaveLength), %rcx
224	subq	%rcx, %rsp
225	leaq	(%rsp), %rdi
226	shrq	$3, %rcx
227	movq	$0, %rax
228	rep stosq
229	movl	(gXsaveMask), %eax
230	movl	(gXsaveMask+4), %edx
231	movq	%rsp, %rdi
232	CODEPATCH_START
233	fxsaveq	(%rdi)
234	CODEPATCH_END(ALTCODEPATCH_TAG_XSAVE)
235
236	// Call the interrupt handler.
237	movq	%rbp, %rdi
238	movq	IFRAME_vector(%rbp), %rax
239	call	*gInterruptHandlerTable(, %rax, 8)
240
241	movl	(gXsaveMask), %eax
242	movl	(gXsaveMask+4), %edx
243	movq	%rsp, %rdi
244	CODEPATCH_START
245	fxrstorq	(%rdi)
246	CODEPATCH_END(ALTCODEPATCH_TAG_XRSTOR)
247	movq	%rbp, %rsp
248
249	// Restore the saved registers.
250	RESTORE_IFRAME()
251
252	iretq
253FUNCTION_END(int_bottom)
254
255
256// Handler for an interrupt that occurred in user-mode.
257STATIC_FUNCTION(int_bottom_user):
258	// Load the kernel GS segment base.
259	swapgs
260	lfence
261
262	// Push the rest of the interrupt frame to the stack.
263	PUSH_IFRAME_BOTTOM(IFRAME_TYPE_OTHER)
264	cld
265
266	// Frame pointer is the iframe.
267	movq	%rsp, %rbp
268
269	// xsave needs a 64-byte alignment
270	andq	$~63, %rsp
271	movq	(gFPUSaveLength), %rcx
272	subq	%rcx, %rsp
273	leaq	(%rsp), %rdi
274	shrq	$3, %rcx
275	movq	$0, %rax
276	rep stosq
277	movl	(gXsaveMask), %eax
278	movl	(gXsaveMask+4), %edx
279
280	movq	%rsp, %rdi
281	CODEPATCH_START
282	fxsaveq	(%rdi)
283	CODEPATCH_END(ALTCODEPATCH_TAG_XSAVE)
284
285	movq	%rsp, IFRAME_fpu(%rbp)
286
287	// Set the RF (resume flag) in RFLAGS. This prevents an instruction
288	// breakpoint on the instruction we're returning to to trigger a debug
289	// exception.
290	orq		$X86_EFLAGS_RESUME, IFRAME_flags(%rbp)
291
292	// Get thread pointer.
293	movq	%gs:0, %r12
294
295	STOP_USER_DEBUGGING()
296	UPDATE_THREAD_USER_TIME()
297
298	// Call the interrupt handler.
299	movq	%rbp, %rdi
300	movq	IFRAME_vector(%rbp), %rax
301	call	*gInterruptHandlerTable(, %rax, 8)
302
303	// If there are no signals pending or we're not debugging, we can avoid
304	// most of the work here, just need to update the kernel time.
305	testl	$(THREAD_FLAGS_DEBUGGER_INSTALLED | THREAD_FLAGS_SIGNALS_PENDING \
306			| THREAD_FLAGS_DEBUG_THREAD | THREAD_FLAGS_BREAKPOINTS_DEFINED \
307			| THREAD_FLAGS_TRAP_FOR_CORE_DUMP) \
308			, THREAD_flags(%r12)
309	jnz		.Lkernel_exit_work
310
311	cli
312
313	UPDATE_THREAD_KERNEL_TIME()
314
315	movl	(gXsaveMask), %eax
316	movl	(gXsaveMask+4), %edx
317	movq	%rsp, %rdi
318	CODEPATCH_START
319	fxrstorq	(%rdi)
320	CODEPATCH_END(ALTCODEPATCH_TAG_XRSTOR)
321	movq	%rbp, %rsp
322
323	// Restore the saved registers.
324	RESTORE_IFRAME()
325
326	// Restore the previous GS base and return.
327	swapgs
328	iretq
329
330.Lkernel_exit_work:
331	// Slow path for return to userland.
332
333	// Do we need to handle signals?
334	testl	$(THREAD_FLAGS_SIGNALS_PENDING | THREAD_FLAGS_DEBUG_THREAD \
335			| THREAD_FLAGS_TRAP_FOR_CORE_DUMP) \
336			, THREAD_flags(%r12)
337	jnz		.Lkernel_exit_handle_signals
338	cli
339	call	thread_at_kernel_exit_no_signals
340
341.Lkernel_exit_work_done:
342	// Install breakpoints, if defined.
343	testl	$THREAD_FLAGS_BREAKPOINTS_DEFINED, THREAD_flags(%r12)
344	jz		1f
345	movq	%rbp, %rdi
346	call	x86_init_user_debug_at_kernel_exit
3471:
348	movl	(gXsaveMask), %eax
349	movl	(gXsaveMask+4), %edx
350	movq	%rsp, %rdi
351	CODEPATCH_START
352	fxrstorq	(%rdi)
353	CODEPATCH_END(ALTCODEPATCH_TAG_XRSTOR)
354	movq	%rbp, %rsp
355
356	// Restore the saved registers.
357	RESTORE_IFRAME()
358
359	// Restore the previous GS base and return.
360	swapgs
361	iretq
362
363.Lkernel_exit_handle_signals:
364	// thread_at_kernel_exit requires interrupts to be enabled, it will disable
365	// them after.
366	sti
367	call	thread_at_kernel_exit
368	jmp		.Lkernel_exit_work_done
369FUNCTION_END(int_bottom_user)
370
371
372// SYSCALL entry point.
373FUNCTION(x86_64_syscall_entry):
374	// Upon entry, RSP still points at the user stack.  Load the kernel GS
375	// segment base address, which points at the current thread's arch_thread
376	// structure. This contains our kernel stack pointer and a temporary
377	// scratch space to store the user stack pointer in before we can push it
378	// to the stack.
379	swapgs
380	movq	%rsp, %gs:ARCH_THREAD_user_rsp
381	movq	%gs:ARCH_THREAD_syscall_rsp, %rsp
382
383	// The following pushes de-align the stack by 8 bytes, so account for that first.
384	sub 	$8, %rsp
385
386	// Set up an iframe on the stack (R11 = saved RFLAGS, RCX = saved RIP).
387	push	$USER_DATA_SELECTOR			// ss
388	push	%gs:ARCH_THREAD_user_rsp	// rsp
389	push	%r11						// flags
390	push	$USER_CODE_SELECTOR			// cs
391	push	%rcx						// ip
392	push	$0							// error_code
393	push	$99							// vector
394	PUSH_IFRAME_BOTTOM(IFRAME_TYPE_SYSCALL)
395
396	cld
397
398	// Frame pointer is the iframe.
399	movq	%rsp, %rbp
400
401	// Preserve call number (R14 is callee-save), get thread pointer.
402	movq	%rax, %r14
403	movq	%gs:0, %r12
404
405	STOP_USER_DEBUGGING()
406	UPDATE_THREAD_USER_TIME()
407
408	// No longer need interrupts disabled.
409	sti
410
411	// Check whether the syscall number is valid.
412	cmpq	$SYSCALL_COUNT, %r14
413	jae		.Lsyscall_return
414
415	// Get the system call table entry. Note I'm hardcoding the shift because
416	// sizeof(syscall_info) is 16 and scale factors of 16 aren't supported,
417	// so can't just do leaq kSyscallInfos(, %rax, SYSCALL_INFO_sizeof).
418	movq	%r14, %rax
419	shlq	$4, %rax
420	leaq	kSyscallInfos(, %rax, 1), %rax
421
422	// Check the number of call arguments, greater than 6 (6 * 8 = 48) requires
423	// a stack copy.
424	movq	SYSCALL_INFO_parameter_size(%rax), %rcx
425	cmpq	$48, %rcx
426	ja		.Lsyscall_stack_args
427
428.Lperform_syscall:
429	testl	$THREAD_FLAGS_DEBUGGER_INSTALLED, THREAD_flags(%r12)
430	jnz		.Lpre_syscall_debug
431
432.Lpre_syscall_debug_done:
433	// Restore the arguments from the iframe. UPDATE_THREAD_USER_TIME() makes
434	// 2 function calls which means they may have been overwritten. Note that
435	// argument 4 is in R10 on the frame rather than RCX as RCX is used by
436	// SYSCALL.
437	movq	IFRAME_di(%rbp), %rdi
438	movq	IFRAME_si(%rbp), %rsi
439	movq	IFRAME_dx(%rbp), %rdx
440	movq	IFRAME_r10(%rbp), %rcx
441	movq	IFRAME_r8(%rbp), %r8
442	movq	IFRAME_r9(%rbp), %r9
443
444	// TODO: pre-syscall tracing
445
446	// Call the function and save its return value.
447	call	*SYSCALL_INFO_function(%rax)
448	movq	%rax, IFRAME_ax(%rbp)
449
450	// TODO: post-syscall tracing
451
452.Lsyscall_return:
453	// Restore the original stack pointer and return.
454	movq	%rbp, %rsp
455
456	// Clear the restarted flag.
457	testl	$THREAD_FLAGS_SYSCALL_RESTARTED, THREAD_flags(%r12)
458	jz		2f
4591:
460	movl	THREAD_flags(%r12), %eax
461	movl	%eax, %edx
462	andl	$~THREAD_FLAGS_SYSCALL_RESTARTED, %edx
463	lock
464	cmpxchgl	%edx, THREAD_flags(%r12)
465	jnz		1b
4662:
467	testl	$(THREAD_FLAGS_DEBUGGER_INSTALLED | THREAD_FLAGS_SIGNALS_PENDING \
468			| THREAD_FLAGS_DEBUG_THREAD | THREAD_FLAGS_BREAKPOINTS_DEFINED \
469			| THREAD_FLAGS_TRAP_FOR_CORE_DUMP | THREAD_FLAGS_RESTART_SYSCALL) \
470			, THREAD_flags(%r12)
471	jnz		.Lpost_syscall_work
472
473	cli
474
475	UPDATE_THREAD_KERNEL_TIME()
476
477	// If we've just restored a signal frame, use the IRET path.
478	cmpq	$SYSCALL_RESTORE_SIGNAL_FRAME, %r14
479	je		.Lrestore_fpu
480
481	CLEAR_FPU_STATE()
482
483	// Restore the iframe and RCX/R11 for SYSRET.
484	RESTORE_IFRAME()
485	pop		%rcx
486	addq	$8, %rsp
487	pop		%r11
488	pop		%rsp
489
490	// Restore previous GS base and return.
491	swapgs
492	sysretq
493
494.Lpre_syscall_debug:
495	// user_debug_pre_syscall expects a pointer to a block of arguments, need
496	// to push the register arguments onto the stack.
497	push	IFRAME_r9(%rbp)
498	push	IFRAME_r8(%rbp)
499	push	IFRAME_r10(%rbp)
500	push	IFRAME_dx(%rbp)
501	push	IFRAME_si(%rbp)
502	push	IFRAME_di(%rbp)
503	movq	%r14, %rdi				// syscall number
504	movq	%rsp, %rsi
505	subq	$8, %rsp
506	push	%rax
507	call	user_debug_pre_syscall
508	pop		%rax
509	addq	$56, %rsp
510	jmp		.Lpre_syscall_debug_done
511
512.Lpost_syscall_work:
513	testl	$THREAD_FLAGS_DEBUGGER_INSTALLED, THREAD_flags(%r12)
514	jz		1f
515
516	// Post-syscall debugging. Same as above, need a block of arguments.
517	push	IFRAME_r9(%rbp)
518	push	IFRAME_r8(%rbp)
519	push	IFRAME_r10(%rbp)
520	push	IFRAME_dx(%rbp)
521	push	IFRAME_si(%rbp)
522	push	IFRAME_di(%rbp)
523	movq	%r14, %rdi				// syscall number
524	movq	%rsp, %rsi
525	movq	IFRAME_ax(%rbp), %rdx	// return value
526	call	user_debug_post_syscall
527	addq	$48, %rsp
5281:
529	// Do we need to handle signals?
530	testl	$(THREAD_FLAGS_SIGNALS_PENDING | THREAD_FLAGS_DEBUG_THREAD \
531			| THREAD_FLAGS_TRAP_FOR_CORE_DUMP) \
532			, THREAD_flags(%r12)
533	jnz		.Lpost_syscall_handle_signals
534	cli
535	call	thread_at_kernel_exit_no_signals
536
537.Lpost_syscall_work_done:
538	// Handle syscall restarting.
539	testl	$THREAD_FLAGS_RESTART_SYSCALL, THREAD_flags(%r12)
540	jz		1f
541	movq	%rsp, %rdi
542	call	x86_restart_syscall
5431:
544	// Install breakpoints, if defined.
545	testl	$THREAD_FLAGS_BREAKPOINTS_DEFINED, THREAD_flags(%r12)
546	jz		1f
547	movq	%rbp, %rdi
548	call	x86_init_user_debug_at_kernel_exit
5491:
550	// On this return path it is possible that the frame has been modified,
551	// for example to execute a signal handler. In this case it is safer to
552	// return via IRET.
553	CLEAR_FPU_STATE()
554	jmp .Liret
555
556.Lrestore_fpu:
557	movq	IFRAME_fpu(%rbp), %rdi
558
559	movl	(gXsaveMask), %eax
560	movl	(gXsaveMask+4), %edx
561	CODEPATCH_START
562	fxrstorq	(%rdi)
563	CODEPATCH_END(ALTCODEPATCH_TAG_XRSTOR)
564.Liret:
565	// Restore the saved registers.
566	RESTORE_IFRAME()
567
568	// Restore the previous GS base and return.
569	swapgs
570	iretq
571
572.Lpost_syscall_handle_signals:
573	call	thread_at_kernel_exit
574	jmp		.Lpost_syscall_work_done
575
576.Lsyscall_stack_args:
577	// Some arguments are on the stack, work out what we need to copy. 6
578	// arguments (48 bytes) are already in registers.
579	// RAX = syscall table entry address, RCX = argument size.
580	subq	$48, %rcx
581
582	// Get the address to copy from.
583	movq	IFRAME_user_sp(%rbp), %rsi
584	addq	$8, %rsi
585	movabs	$(USER_BASE + USER_SIZE), %rdx
586	cmp		%rdx, %rsi
587	jae		.Lbad_syscall_args
588
589	// Make space on the stack.
590	subq	%rcx, %rsp
591	andq	$~15, %rsp
592	movq	%rsp, %rdi
593
594	// Set a fault handler.
595	movq	$.Lbad_syscall_args, THREAD_fault_handler(%r12)
596
597	ASM_STAC
598
599	// Copy them by quadwords.
600	shrq	$3, %rcx
601	rep
602	movsq
603	ASM_CLAC
604	movq	$0, THREAD_fault_handler(%r12)
605
606	// Perform the call.
607	jmp		.Lperform_syscall
608
609.Lbad_syscall_args:
610	movq	$0, THREAD_fault_handler(%r12)
611	movq	%rbp, %rsp
612	jmp		.Lsyscall_return
613FUNCTION_END(x86_64_syscall_entry)
614
615
616/*!	\fn void x86_return_to_userland(iframe* frame)
617	\brief Returns to the userland environment given by \a frame.
618
619	Before returning to userland all potentially necessary kernel exit work is
620	done.
621
622	\a frame must point to a location somewhere on the caller's stack (e.g. a
623	local variable).
624	The function must be called with interrupts disabled.
625
626	\param frame The iframe defining the userland environment.
627*/
628FUNCTION(x86_return_to_userland):
629	movq	%rdi, %rbp
630	movq	%rbp, %rsp
631
632	// Perform kernel exit work.
633	movq	%gs:0, %r12
634	testl	$(THREAD_FLAGS_DEBUGGER_INSTALLED | THREAD_FLAGS_SIGNALS_PENDING \
635			| THREAD_FLAGS_DEBUG_THREAD | THREAD_FLAGS_BREAKPOINTS_DEFINED \
636			| THREAD_FLAGS_TRAP_FOR_CORE_DUMP) \
637			, THREAD_flags(%r12)
638	jnz		.Luserland_return_work
639
640	// update the thread's kernel time and return
641	UPDATE_THREAD_KERNEL_TIME()
642
643	// Restore the frame and return.
644	RESTORE_IFRAME()
645	swapgs
646	iretq
647.Luserland_return_work:
648	// Slow path for return to userland.
649
650	// Do we need to handle signals?
651	testl	$(THREAD_FLAGS_SIGNALS_PENDING | THREAD_FLAGS_DEBUG_THREAD \
652			| THREAD_FLAGS_TRAP_FOR_CORE_DUMP) \
653			, THREAD_flags(%r12)
654	jnz		.Luserland_return_handle_signals
655	cli
656	call	thread_at_kernel_exit_no_signals
657
658.Luserland_return_work_done:
659	// Install breakpoints, if defined.
660	testl	$THREAD_FLAGS_BREAKPOINTS_DEFINED, THREAD_flags(%r12)
661	jz		1f
662	movq	%rbp, %rdi
663	call	x86_init_user_debug_at_kernel_exit
6641:
665	// Restore the saved registers.
666	RESTORE_IFRAME()
667
668	// Restore the previous GS base and return.
669	swapgs
670	iretq
671.Luserland_return_handle_signals:
672	// thread_at_kernel_exit requires interrupts to be enabled, it will disable
673	// them after.
674	sti
675	call	thread_at_kernel_exit
676	jmp		.Luserland_return_work_done
677FUNCTION_END(x86_return_to_userland)
678