xref: /haiku/src/system/kernel/arch/x86/64/interrupts.S (revision 68ea01249e1e2088933cb12f9c28d4e5c5d1c9ef)
1/*
2 * Copyright 2018, Jérôme Duval, jerome.duval@gmail.com.
3 * Copyright 2012, Alex Smith, alex@alex-smith.me.uk.
4 * Distributed under the terms of the MIT License.
5 */
6
7
8#include <asm_defs.h>
9
10#include <thread_types.h>
11
12#include <arch/x86/descriptors.h>
13#include <arch/x86/arch_altcodepatch.h>
14#include <arch/x86/arch_cpu.h>
15#include <arch/x86/arch_kernel.h>
16
17#include "asm_offsets.h"
18#include "syscall_numbers.h"
19#include "syscall_table.h"
20
21
22// Push the remainder of the interrupt frame onto the stack.
23#define PUSH_IFRAME_BOTTOM(iframeType)	\
24	push	%rax;	/* orig_rax */		\
25	push	%rax;						\
26	push	%rbx;						\
27	push	%rcx;						\
28	push	%rdx;						\
29	push	%rdi;						\
30	push	%rsi;						\
31	push	%rbp;						\
32	push	%r8;						\
33	push	%r9;						\
34	push	%r10;						\
35	push	%r11;						\
36	push	%r12;						\
37	push	%r13;						\
38	push	%r14;						\
39	push	%r15;						\
40	pushq	$0;							\
41	push	$iframeType;
42
43
44// Restore the interrupt frame.
45#define RESTORE_IFRAME()				\
46	add		$16, %rsp;					\
47	pop		%r15;						\
48	pop		%r14;						\
49	pop		%r13;						\
50	pop		%r12;						\
51	pop		%r11;						\
52	pop		%r10;						\
53	pop		%r9;						\
54	pop		%r8;						\
55	pop		%rbp;						\
56	pop		%rsi;						\
57	pop		%rdi;						\
58	pop		%rdx;						\
59	pop		%rcx;						\
60	pop		%rbx;						\
61	pop		%rax;						\
62	addq	$24, %rsp;
63
64
65// The macros below require R12 to contain the current thread pointer. R12 is
66// callee-save so will be preserved through all function calls and only needs
67// to be obtained once. R13 is used to store the system call start time, will
68// also be preserved.
69
70#define LOCK_THREAD_TIME()										\
71	leaq	THREAD_time_lock(%r12), %rdi;						\
72	call	acquire_spinlock;
73
74#define UNLOCK_THREAD_TIME()									\
75	leaq	THREAD_time_lock(%r12), %rdi;						\
76	call	release_spinlock;									\
77
78#define UPDATE_THREAD_USER_TIME()								\
79	LOCK_THREAD_TIME()											\
80																\
81	call	system_time;										\
82																\
83	/* Preserve system_time for post syscall debug */			\
84	movq	%rax, %r13;											\
85																\
86	/* thread->user_time += now - thread->last_time; */			\
87	subq	THREAD_last_time(%r12), %rax;						\
88	addq	%rax, THREAD_user_time(%r12);						\
89																\
90	/* thread->last_time = now; */								\
91	movq	%r13, THREAD_last_time(%r12);						\
92																\
93	/* thread->in_kernel = true; */								\
94	movb	$1, THREAD_in_kernel(%r12);							\
95																\
96	UNLOCK_THREAD_TIME()
97
98#define UPDATE_THREAD_KERNEL_TIME()								\
99	LOCK_THREAD_TIME()											\
100																\
101	call	system_time;										\
102	movq	%rax, %r13;											\
103																\
104	/* thread->kernel_time += now - thread->last_time; */		\
105	subq	THREAD_last_time(%r12), %rax;						\
106	addq	%rax, THREAD_kernel_time(%r12);						\
107																\
108	/* thread->last_time = now; */								\
109	movq	%r13, THREAD_last_time(%r12);						\
110																\
111	/* thread->in_kernel = false; */							\
112	movb	$0, THREAD_in_kernel(%r12);							\
113																\
114	UNLOCK_THREAD_TIME()
115
116#define STOP_USER_DEBUGGING()									\
117	testl	$(THREAD_FLAGS_BREAKPOINTS_INSTALLED				\
118			| THREAD_FLAGS_SINGLE_STEP), THREAD_flags(%r12);	\
119	jz		1f;													\
120	call	x86_exit_user_debug_at_kernel_entry;				\
121  1:
122
123#define CLEAR_FPU_STATE() \
124	pxor %xmm0, %xmm0; \
125	pxor %xmm1, %xmm1; \
126	pxor %xmm2, %xmm2; \
127	pxor %xmm3, %xmm3; \
128	pxor %xmm4, %xmm4; \
129	pxor %xmm5, %xmm5; \
130	pxor %xmm6, %xmm6; \
131	pxor %xmm7, %xmm7; \
132	pxor %xmm8, %xmm8; \
133	pxor %xmm9, %xmm9; \
134	pxor %xmm10, %xmm10; \
135	pxor %xmm11, %xmm11; \
136	pxor %xmm12, %xmm12; \
137	pxor %xmm13, %xmm13; \
138	pxor %xmm14, %xmm14; \
139	pxor %xmm15, %xmm15
140
141// The following code defines the interrupt service routines for all 256
142// interrupts. It creates a block of handlers, each 16 bytes, that the IDT
143// initialization code just loops through.
144
145// Interrupt with no error code, pushes a 0 error code.
146#define DEFINE_ISR(nr)					\
147	.align 16;							\
148	ASM_CLAC							\
149	push	$0;							\
150	push	$nr;						\
151	jmp		int_bottom;
152
153// Interrupt with an error code.
154#define DEFINE_ISR_E(nr)				\
155	.align 16;							\
156	ASM_CLAC							\
157	push	$nr;						\
158	jmp		int_bottom;
159
160// Array of interrupt service routines.
161.align 16
162SYMBOL(isr_array):
163	// Exceptions (0-19) and reserved interrupts (20-31).
164	DEFINE_ISR(0)
165	DEFINE_ISR(1)
166	DEFINE_ISR(2)
167	DEFINE_ISR(3)
168	DEFINE_ISR(4)
169	DEFINE_ISR(5)
170	DEFINE_ISR(6)
171	DEFINE_ISR(7)
172	DEFINE_ISR_E(8)
173	DEFINE_ISR(9)
174	DEFINE_ISR_E(10)
175	DEFINE_ISR_E(11)
176	DEFINE_ISR_E(12)
177	DEFINE_ISR_E(13)
178	DEFINE_ISR_E(14)
179	DEFINE_ISR(15)
180	DEFINE_ISR(16)
181	DEFINE_ISR_E(17)
182	DEFINE_ISR(18)
183	DEFINE_ISR(19)
184	DEFINE_ISR(20)
185	DEFINE_ISR(21)
186	DEFINE_ISR(22)
187	DEFINE_ISR(23)
188	DEFINE_ISR(24)
189	DEFINE_ISR(25)
190	DEFINE_ISR(26)
191	DEFINE_ISR(27)
192	DEFINE_ISR(28)
193	DEFINE_ISR(29)
194	DEFINE_ISR(30)
195	DEFINE_ISR(31)
196
197	// User-defined ISRs (32-255) - none take an error code.
198	.Lintr = 32
199	.rept 224
200		DEFINE_ISR(.Lintr)
201		.Lintr = .Lintr+1
202	.endr
203
204
205// Common interrupt handling code.
206STATIC_FUNCTION(int_bottom):
207	// Coming from user-mode requires special handling.
208	testl	$3, 24(%rsp)
209	jnz		int_bottom_user
210
211	// Push the rest of the interrupt frame to the stack.
212	PUSH_IFRAME_BOTTOM(IFRAME_TYPE_OTHER)
213
214	cld
215
216	// Frame pointer is the iframe.
217	movq	%rsp, %rbp
218
219	// Set the RF (resume flag) in RFLAGS. This prevents an instruction
220	// breakpoint on the instruction we're returning to to trigger a debug
221	// exception.
222	orq		$X86_EFLAGS_RESUME, IFRAME_flags(%rbp)
223
224	// xsave needs a 64-byte alignment
225	andq	$~63, %rsp
226	movq	(gFPUSaveLength), %rcx
227	subq	%rcx, %rsp
228	leaq	(%rsp), %rdi
229	shrq	$3, %rcx
230	movq	$0, %rax
231	rep stosq
232	movl	(gXsaveMask), %eax
233	movl	(gXsaveMask+4), %edx
234	movq	%rsp, %rdi
235	CODEPATCH_START
236	fxsaveq	(%rdi)
237	CODEPATCH_END(ALTCODEPATCH_TAG_XSAVE)
238
239	// Call the interrupt handler.
240	movq	%rbp, %rdi
241	movq	IFRAME_vector(%rbp), %rax
242	call	*gInterruptHandlerTable(, %rax, 8)
243
244	movl	(gXsaveMask), %eax
245	movl	(gXsaveMask+4), %edx
246	movq	%rsp, %rdi
247	CODEPATCH_START
248	fxrstorq	(%rdi)
249	CODEPATCH_END(ALTCODEPATCH_TAG_XRSTOR)
250	movq	%rbp, %rsp
251
252	// Restore the saved registers.
253	RESTORE_IFRAME()
254
255	iretq
256FUNCTION_END(int_bottom)
257
258
259// Handler for an interrupt that occurred in user-mode.
260STATIC_FUNCTION(int_bottom_user):
261	// Load the kernel GS segment base.
262	swapgs
263	lfence
264
265	// Push the rest of the interrupt frame to the stack.
266	PUSH_IFRAME_BOTTOM(IFRAME_TYPE_OTHER)
267	cld
268
269	// Frame pointer is the iframe.
270	movq	%rsp, %rbp
271
272	// xsave needs a 64-byte alignment
273	andq	$~63, %rsp
274	movq	(gFPUSaveLength), %rcx
275	subq	%rcx, %rsp
276	leaq	(%rsp), %rdi
277	shrq	$3, %rcx
278	movq	$0, %rax
279	rep stosq
280	movl	(gXsaveMask), %eax
281	movl	(gXsaveMask+4), %edx
282
283	movq	%rsp, %rdi
284	CODEPATCH_START
285	fxsaveq	(%rdi)
286	CODEPATCH_END(ALTCODEPATCH_TAG_XSAVE)
287
288	movq	%rsp, IFRAME_fpu(%rbp)
289
290	// Set the RF (resume flag) in RFLAGS. This prevents an instruction
291	// breakpoint on the instruction we're returning to to trigger a debug
292	// exception.
293	orq		$X86_EFLAGS_RESUME, IFRAME_flags(%rbp)
294
295	// Get thread pointer.
296	movq	%gs:0, %r12
297
298	STOP_USER_DEBUGGING()
299	UPDATE_THREAD_USER_TIME()
300
301	// Call the interrupt handler.
302	movq	%rbp, %rdi
303	movq	IFRAME_vector(%rbp), %rax
304	call	*gInterruptHandlerTable(, %rax, 8)
305
306	// If there are no signals pending or we're not debugging, we can avoid
307	// most of the work here, just need to update the kernel time.
308	testl	$(THREAD_FLAGS_DEBUGGER_INSTALLED | THREAD_FLAGS_SIGNALS_PENDING \
309			| THREAD_FLAGS_DEBUG_THREAD | THREAD_FLAGS_BREAKPOINTS_DEFINED \
310			| THREAD_FLAGS_TRAP_FOR_CORE_DUMP) \
311			, THREAD_flags(%r12)
312	jnz		.Lkernel_exit_work
313
314	cli
315
316	UPDATE_THREAD_KERNEL_TIME()
317
318	movl	(gXsaveMask), %eax
319	movl	(gXsaveMask+4), %edx
320	movq	%rsp, %rdi
321	CODEPATCH_START
322	fxrstorq	(%rdi)
323	CODEPATCH_END(ALTCODEPATCH_TAG_XRSTOR)
324	movq	%rbp, %rsp
325
326	// Restore the saved registers.
327	RESTORE_IFRAME()
328
329	// Restore the previous GS base and return.
330	swapgs
331	lfence
332	iretq
333
334.Lkernel_exit_work:
335	// Slow path for return to userland.
336
337	// Do we need to handle signals?
338	testl	$(THREAD_FLAGS_SIGNALS_PENDING | THREAD_FLAGS_DEBUG_THREAD \
339			| THREAD_FLAGS_TRAP_FOR_CORE_DUMP) \
340			, THREAD_flags(%r12)
341	jnz		.Lkernel_exit_handle_signals
342	cli
343	call	thread_at_kernel_exit_no_signals
344
345.Lkernel_exit_work_done:
346	// Install breakpoints, if defined.
347	testl	$THREAD_FLAGS_BREAKPOINTS_DEFINED, THREAD_flags(%r12)
348	jz		1f
349	movq	%rbp, %rdi
350	call	x86_init_user_debug_at_kernel_exit
3511:
352	movl	(gXsaveMask), %eax
353	movl	(gXsaveMask+4), %edx
354	movq	%rsp, %rdi
355	CODEPATCH_START
356	fxrstorq	(%rdi)
357	CODEPATCH_END(ALTCODEPATCH_TAG_XRSTOR)
358	movq	%rbp, %rsp
359
360	// Restore the saved registers.
361	RESTORE_IFRAME()
362
363	// Restore the previous GS base and return.
364	swapgs
365	lfence
366	iretq
367
368.Lkernel_exit_handle_signals:
369	// thread_at_kernel_exit requires interrupts to be enabled, it will disable
370	// them after.
371	sti
372	call	thread_at_kernel_exit
373	jmp		.Lkernel_exit_work_done
374FUNCTION_END(int_bottom_user)
375
376
377// SYSCALL entry point.
378FUNCTION(x86_64_syscall_entry):
379	// Upon entry, RSP still points at the user stack.  Load the kernel GS
380	// segment base address, which points at the current thread's arch_thread
381	// structure. This contains our kernel stack pointer and a temporary
382	// scratch space to store the user stack pointer in before we can push it
383	// to the stack.
384	swapgs
385	lfence
386	movq	%rsp, %gs:ARCH_THREAD_user_rsp
387	movq	%gs:ARCH_THREAD_syscall_rsp, %rsp
388
389	// The following pushes de-align the stack by 8 bytes, so account for that first.
390	sub 	$8, %rsp
391
392	// Set up an iframe on the stack (R11 = saved RFLAGS, RCX = saved RIP).
393	push	$USER_DATA_SELECTOR			// ss
394	push	%gs:ARCH_THREAD_user_rsp	// rsp
395	push	%r11						// flags
396	push	$USER_CODE_SELECTOR			// cs
397	push	%rcx						// ip
398	push	$0							// error_code
399	push	$99							// vector
400	PUSH_IFRAME_BOTTOM(IFRAME_TYPE_SYSCALL)
401
402	cld
403
404	// Frame pointer is the iframe.
405	movq	%rsp, %rbp
406
407	// Preserve call number (R14 is callee-save), get thread pointer.
408	movq	%rax, %r14
409	movq	%gs:0, %r12
410
411	STOP_USER_DEBUGGING()
412	UPDATE_THREAD_USER_TIME()
413
414	// No longer need interrupts disabled.
415	sti
416
417	// Check whether the syscall number is valid.
418	cmpq	$SYSCALL_COUNT, %r14
419	jae		.Lsyscall_return
420
421	// Get the system call table entry. Note I'm hardcoding the shift because
422	// sizeof(syscall_info) is 16 and scale factors of 16 aren't supported,
423	// so can't just do leaq kSyscallInfos(, %rax, SYSCALL_INFO_sizeof).
424	movq	%r14, %rax
425	shlq	$4, %rax
426	leaq	kSyscallInfos(, %rax, 1), %rax
427
428	// Check the number of call arguments, greater than 6 (6 * 8 = 48) requires
429	// a stack copy.
430	movq	SYSCALL_INFO_parameter_size(%rax), %rcx
431	cmpq	$48, %rcx
432	ja		.Lsyscall_stack_args
433
434.Lperform_syscall:
435	testl	$THREAD_FLAGS_DEBUGGER_INSTALLED, THREAD_flags(%r12)
436	jnz		.Lpre_syscall_debug
437
438.Lpre_syscall_debug_done:
439	// Restore the arguments from the iframe. UPDATE_THREAD_USER_TIME() makes
440	// 2 function calls which means they may have been overwritten. Note that
441	// argument 4 is in R10 on the frame rather than RCX as RCX is used by
442	// SYSCALL.
443	movq	IFRAME_di(%rbp), %rdi
444	movq	IFRAME_si(%rbp), %rsi
445	movq	IFRAME_dx(%rbp), %rdx
446	movq	IFRAME_r10(%rbp), %rcx
447	movq	IFRAME_r8(%rbp), %r8
448	movq	IFRAME_r9(%rbp), %r9
449
450	// TODO: pre-syscall tracing
451
452	// Call the function and save its return value.
453	call	*SYSCALL_INFO_function(%rax)
454	movq	%rax, IFRAME_ax(%rbp)
455
456	// TODO: post-syscall tracing
457
458.Lsyscall_return:
459	// Restore the original stack pointer and return.
460	movq	%rbp, %rsp
461
462	// Clear the restarted flag.
463	testl	$THREAD_FLAGS_SYSCALL_RESTARTED, THREAD_flags(%r12)
464	jz		2f
4651:
466	movl	THREAD_flags(%r12), %eax
467	movl	%eax, %edx
468	andl	$~THREAD_FLAGS_SYSCALL_RESTARTED, %edx
469	lock
470	cmpxchgl	%edx, THREAD_flags(%r12)
471	jnz		1b
4722:
473	testl	$(THREAD_FLAGS_DEBUGGER_INSTALLED | THREAD_FLAGS_SIGNALS_PENDING \
474			| THREAD_FLAGS_DEBUG_THREAD | THREAD_FLAGS_BREAKPOINTS_DEFINED \
475			| THREAD_FLAGS_TRAP_FOR_CORE_DUMP | THREAD_FLAGS_RESTART_SYSCALL) \
476			, THREAD_flags(%r12)
477	jnz		.Lpost_syscall_work
478
479	cli
480
481	UPDATE_THREAD_KERNEL_TIME()
482
483	// If we've just restored a signal frame, use the IRET path.
484	cmpq	$SYSCALL_RESTORE_SIGNAL_FRAME, %r14
485	je		.Lrestore_fpu
486
487	CLEAR_FPU_STATE()
488
489	// Restore the iframe and RCX/R11 for SYSRET.
490	RESTORE_IFRAME()
491	pop		%rcx
492	addq	$8, %rsp
493	pop		%r11
494	pop		%rsp
495
496	// Restore previous GS base and return.
497	swapgs
498	lfence
499	sysretq
500
501.Lpre_syscall_debug:
502	// user_debug_pre_syscall expects a pointer to a block of arguments, need
503	// to push the register arguments onto the stack.
504	push	IFRAME_r9(%rbp)
505	push	IFRAME_r8(%rbp)
506	push	IFRAME_r10(%rbp)
507	push	IFRAME_dx(%rbp)
508	push	IFRAME_si(%rbp)
509	push	IFRAME_di(%rbp)
510	movq	%r14, %rdi				// syscall number
511	movq	%rsp, %rsi
512	push	%rax
513	call	user_debug_pre_syscall
514	pop		%rax
515	addq	$48, %rsp
516	jmp		.Lpre_syscall_debug_done
517
518.Lpost_syscall_work:
519	testl	$THREAD_FLAGS_DEBUGGER_INSTALLED, THREAD_flags(%r12)
520	jz		1f
521
522	// Post-syscall debugging. Same as above, need a block of arguments.
523	push	IFRAME_r9(%rbp)
524	push	IFRAME_r8(%rbp)
525	push	IFRAME_r10(%rbp)
526	push	IFRAME_dx(%rbp)
527	push	IFRAME_si(%rbp)
528	push	IFRAME_di(%rbp)
529	movq	%r14, %rdi				// syscall number
530	movq	%rsp, %rsi
531	movq	IFRAME_ax(%rbp), %rdx	// return value
532	movq	%r13, %rcx				// start time, preserved earlier
533	call	user_debug_post_syscall
534	addq	$48, %rsp
5351:
536	// Do we need to handle signals?
537	testl	$(THREAD_FLAGS_SIGNALS_PENDING | THREAD_FLAGS_DEBUG_THREAD \
538			| THREAD_FLAGS_TRAP_FOR_CORE_DUMP) \
539			, THREAD_flags(%r12)
540	jnz		.Lpost_syscall_handle_signals
541	cli
542	call	thread_at_kernel_exit_no_signals
543
544.Lpost_syscall_work_done:
545	// Handle syscall restarting.
546	testl	$THREAD_FLAGS_RESTART_SYSCALL, THREAD_flags(%r12)
547	jz		1f
548	movq	%rsp, %rdi
549	call	x86_restart_syscall
5501:
551	// Install breakpoints, if defined.
552	testl	$THREAD_FLAGS_BREAKPOINTS_DEFINED, THREAD_flags(%r12)
553	jz		1f
554	movq	%rbp, %rdi
555	call	x86_init_user_debug_at_kernel_exit
5561:
557	// On this return path it is possible that the frame has been modified,
558	// for example to execute a signal handler. In this case it is safer to
559	// return via IRET.
560	CLEAR_FPU_STATE()
561	jmp .Liret
562
563.Lrestore_fpu:
564	movq	IFRAME_fpu(%rbp), %rdi
565
566	movl	(gXsaveMask), %eax
567	movl	(gXsaveMask+4), %edx
568	CODEPATCH_START
569	fxrstorq	(%rdi)
570	CODEPATCH_END(ALTCODEPATCH_TAG_XRSTOR)
571.Liret:
572	// Restore the saved registers.
573	RESTORE_IFRAME()
574
575	// Restore the previous GS base and return.
576	swapgs
577	lfence
578	iretq
579
580.Lpost_syscall_handle_signals:
581	call	thread_at_kernel_exit
582	jmp		.Lpost_syscall_work_done
583
584.Lsyscall_stack_args:
585	// Some arguments are on the stack, work out what we need to copy. 6
586	// arguments (48 bytes) are already in registers.
587	// RAX = syscall table entry address, RCX = argument size.
588	subq	$48, %rcx
589
590	// Get the address to copy from.
591	movq	IFRAME_user_sp(%rbp), %rsi
592	addq	$8, %rsi
593	movabs	$(USER_BASE + USER_SIZE), %rdx
594	cmp		%rdx, %rsi
595	jae		.Lbad_syscall_args
596
597	// Make space on the stack.
598	subq	%rcx, %rsp
599	andq	$~15, %rsp
600	movq	%rsp, %rdi
601
602	// Set a fault handler.
603	movq	$.Lbad_syscall_args, THREAD_fault_handler(%r12)
604
605	ASM_STAC
606
607	// Copy them by quadwords.
608	shrq	$3, %rcx
609	rep
610	movsq
611	ASM_CLAC
612	movq	$0, THREAD_fault_handler(%r12)
613
614	// Perform the call.
615	jmp		.Lperform_syscall
616
617.Lbad_syscall_args:
618	movq	$0, THREAD_fault_handler(%r12)
619	movq	%rbp, %rsp
620	jmp		.Lsyscall_return
621FUNCTION_END(x86_64_syscall_entry)
622
623
624/*!	\fn void x86_return_to_userland(iframe* frame)
625	\brief Returns to the userland environment given by \a frame.
626
627	Before returning to userland all potentially necessary kernel exit work is
628	done.
629
630	\a frame must point to a location somewhere on the caller's stack (e.g. a
631	local variable).
632	The function must be called with interrupts disabled.
633
634	\param frame The iframe defining the userland environment.
635*/
636FUNCTION(x86_return_to_userland):
637	movq	%rdi, %rbp
638	movq	%rbp, %rsp
639
640	// Perform kernel exit work.
641	movq	%gs:0, %r12
642	testl	$(THREAD_FLAGS_DEBUGGER_INSTALLED | THREAD_FLAGS_SIGNALS_PENDING \
643			| THREAD_FLAGS_DEBUG_THREAD | THREAD_FLAGS_BREAKPOINTS_DEFINED \
644			| THREAD_FLAGS_TRAP_FOR_CORE_DUMP) \
645			, THREAD_flags(%r12)
646	jnz		.Luserland_return_work
647
648	// update the thread's kernel time and return
649	UPDATE_THREAD_KERNEL_TIME()
650
651	// Restore the frame and return.
652	RESTORE_IFRAME()
653	swapgs
654	lfence
655	iretq
656.Luserland_return_work:
657	// Slow path for return to userland.
658
659	// Do we need to handle signals?
660	testl	$(THREAD_FLAGS_SIGNALS_PENDING | THREAD_FLAGS_DEBUG_THREAD \
661			| THREAD_FLAGS_TRAP_FOR_CORE_DUMP) \
662			, THREAD_flags(%r12)
663	jnz		.Luserland_return_handle_signals
664	cli
665	call	thread_at_kernel_exit_no_signals
666
667.Luserland_return_work_done:
668	// Install breakpoints, if defined.
669	testl	$THREAD_FLAGS_BREAKPOINTS_DEFINED, THREAD_flags(%r12)
670	jz		1f
671	movq	%rbp, %rdi
672	call	x86_init_user_debug_at_kernel_exit
6731:
674	// Restore the saved registers.
675	RESTORE_IFRAME()
676
677	// Restore the previous GS base and return.
678	swapgs
679	lfence
680	iretq
681.Luserland_return_handle_signals:
682	// thread_at_kernel_exit requires interrupts to be enabled, it will disable
683	// them after.
684	sti
685	call	thread_at_kernel_exit
686	jmp		.Luserland_return_work_done
687FUNCTION_END(x86_return_to_userland)
688