xref: /haiku/src/system/kernel/signal.cpp (revision 1c09002cbee8e797a0f8bbfc5678dfadd39ee1a7)
1 /*
2  * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
3  * Copyright 2002, Angelo Mottola, a.mottola@libero.it.
4  *
5  * Distributed under the terms of the MIT License.
6  */
7 
8 
9 /*! POSIX signals handling routines */
10 
11 
12 #include <ksignal.h>
13 
14 #include <errno.h>
15 #include <stddef.h>
16 #include <string.h>
17 
18 #include <OS.h>
19 #include <KernelExport.h>
20 
21 #include <cpu.h>
22 #include <debug.h>
23 #include <kernel.h>
24 #include <kscheduler.h>
25 #include <sem.h>
26 #include <syscall_restart.h>
27 #include <syscall_utils.h>
28 #include <team.h>
29 #include <thread.h>
30 #include <tracing.h>
31 #include <user_debugger.h>
32 #include <user_thread.h>
33 #include <util/AutoLock.h>
34 
35 
36 //#define TRACE_SIGNAL
37 #ifdef TRACE_SIGNAL
38 #	define TRACE(x) dprintf x
39 #else
40 #	define TRACE(x) ;
41 #endif
42 
43 
44 #define BLOCKABLE_SIGNALS		(~(KILL_SIGNALS | SIGNAL_TO_MASK(SIGSTOP)))
45 #define STOP_SIGNALS \
46 	(SIGNAL_TO_MASK(SIGSTOP) | SIGNAL_TO_MASK(SIGTSTP) \
47 	| SIGNAL_TO_MASK(SIGTTIN) | SIGNAL_TO_MASK(SIGTTOU))
48 #define DEFAULT_IGNORE_SIGNALS \
49 	(SIGNAL_TO_MASK(SIGCHLD) | SIGNAL_TO_MASK(SIGWINCH) \
50 	| SIGNAL_TO_MASK(SIGCONT))
51 #define NON_DEFERRABLE_SIGNALS	\
52 	(KILL_SIGNALS				\
53 	| SIGNAL_TO_MASK(SIGILL)	\
54 	| SIGNAL_TO_MASK(SIGFPE)	\
55 	| SIGNAL_TO_MASK(SIGSEGV))
56 
57 
58 const char * const sigstr[NSIG] = {
59 	"NONE", "HUP", "INT", "QUIT", "ILL", "CHLD", "ABRT", "PIPE",
60 	"FPE", "KILL", "STOP", "SEGV", "CONT", "TSTP", "ALRM", "TERM",
61 	"TTIN", "TTOU", "USR1", "USR2", "WINCH", "KILLTHR", "TRAP",
62 	"POLL", "PROF", "SYS", "URG", "VTALRM", "XCPU", "XFSZ"
63 };
64 
65 
66 static status_t deliver_signal(Thread *thread, uint signal, uint32 flags);
67 
68 
69 
70 // #pragma mark - signal tracing
71 
72 
73 #if SIGNAL_TRACING
74 
75 namespace SignalTracing {
76 
77 
78 class HandleSignals : public AbstractTraceEntry {
79 	public:
80 		HandleSignals(uint32 signals)
81 			:
82 			fSignals(signals)
83 		{
84 			Initialized();
85 		}
86 
87 		virtual void AddDump(TraceOutput& out)
88 		{
89 			out.Print("signal handle:  0x%lx", fSignals);
90 		}
91 
92 	private:
93 		uint32		fSignals;
94 };
95 
96 
97 class ExecuteSignalHandler : public AbstractTraceEntry {
98 	public:
99 		ExecuteSignalHandler(int signal, struct sigaction* handler)
100 			:
101 			fSignal(signal),
102 			fHandler((void*)handler->sa_handler)
103 		{
104 			Initialized();
105 		}
106 
107 		virtual void AddDump(TraceOutput& out)
108 		{
109 			out.Print("signal exec handler: signal: %d, handler: %p",
110 				fSignal, fHandler);
111 		}
112 
113 	private:
114 		int		fSignal;
115 		void*	fHandler;
116 };
117 
118 
119 class SendSignal : public AbstractTraceEntry {
120 	public:
121 		SendSignal(pid_t target, uint32 signal, uint32 flags)
122 			:
123 			fTarget(target),
124 			fSignal(signal),
125 			fFlags(flags)
126 		{
127 			Initialized();
128 		}
129 
130 		virtual void AddDump(TraceOutput& out)
131 		{
132 			out.Print("signal send: target: %ld, signal: %lu (%s), "
133 				"flags: 0x%lx", fTarget, fSignal,
134 				(fSignal < NSIG ? sigstr[fSignal] : "invalid"), fFlags);
135 		}
136 
137 	private:
138 		pid_t	fTarget;
139 		uint32	fSignal;
140 		uint32	fFlags;
141 };
142 
143 
144 class SigAction : public AbstractTraceEntry {
145 	public:
146 		SigAction(Thread* thread, uint32 signal, const struct sigaction* act)
147 			:
148 			fThread(thread->id),
149 			fSignal(signal),
150 			fAction(*act)
151 		{
152 			Initialized();
153 		}
154 
155 		virtual void AddDump(TraceOutput& out)
156 		{
157 			out.Print("signal action: thread: %ld, signal: %lu (%s), "
158 				"action: {handler: %p, flags: 0x%x, mask: 0x%lx}",
159 				fThread, fSignal,
160 				(fSignal < NSIG ? sigstr[fSignal] : "invalid"),
161 				fAction.sa_handler, fAction.sa_flags, fAction.sa_mask);
162 		}
163 
164 	private:
165 		thread_id			fThread;
166 		uint32				fSignal;
167 		struct sigaction	fAction;
168 };
169 
170 
171 class SigProcMask : public AbstractTraceEntry {
172 	public:
173 		SigProcMask(int how, sigset_t mask)
174 			:
175 			fHow(how),
176 			fMask(mask),
177 			fOldMask(thread_get_current_thread()->sig_block_mask)
178 		{
179 			Initialized();
180 		}
181 
182 		virtual void AddDump(TraceOutput& out)
183 		{
184 			const char* how = "invalid";
185 			switch (fHow) {
186 				case SIG_BLOCK:
187 					how = "block";
188 					break;
189 				case SIG_UNBLOCK:
190 					how = "unblock";
191 					break;
192 				case SIG_SETMASK:
193 					how = "set";
194 					break;
195 			}
196 
197 			out.Print("signal proc mask: %s 0x%lx, old mask: 0x%lx", how, fMask,
198 				fOldMask);
199 		}
200 
201 	private:
202 		int			fHow;
203 		sigset_t	fMask;
204 		sigset_t	fOldMask;
205 };
206 
207 
208 class SigSuspend : public AbstractTraceEntry {
209 	public:
210 		SigSuspend(sigset_t mask)
211 			:
212 			fMask(mask),
213 			fOldMask(thread_get_current_thread()->sig_block_mask)
214 		{
215 			Initialized();
216 		}
217 
218 		virtual void AddDump(TraceOutput& out)
219 		{
220 			out.Print("signal suspend: %#" B_PRIx32 ", old mask: %#" B_PRIx32,
221 				fMask, fOldMask);
222 		}
223 
224 	private:
225 		sigset_t	fMask;
226 		sigset_t	fOldMask;
227 };
228 
229 
230 class SigSuspendDone : public AbstractTraceEntry {
231 	public:
232 		SigSuspendDone()
233 			:
234 			fSignals(thread_get_current_thread()->sig_pending)
235 		{
236 			Initialized();
237 		}
238 
239 		virtual void AddDump(TraceOutput& out)
240 		{
241 			out.Print("signal suspend done: %#" B_PRIx32, fSignals);
242 		}
243 
244 	private:
245 		uint32		fSignals;
246 };
247 
248 }	// namespace SignalTracing
249 
250 #	define T(x)	new(std::nothrow) SignalTracing::x
251 
252 #else
253 #	define T(x)
254 #endif	// SIGNAL_TRACING
255 
256 
257 // #pragma mark -
258 
259 
260 /*!	Updates the thread::flags field according to what signals are pending.
261 	Interrupts must be disabled and the thread lock must be held.
262 */
263 static void
264 update_thread_signals_flag(Thread* thread)
265 {
266 	sigset_t mask = ~atomic_get(&thread->sig_block_mask)
267 		| thread->sig_temp_enabled;
268 	if (atomic_get(&thread->sig_pending) & mask)
269 		atomic_or(&thread->flags, THREAD_FLAGS_SIGNALS_PENDING);
270 	else
271 		atomic_and(&thread->flags, ~THREAD_FLAGS_SIGNALS_PENDING);
272 }
273 
274 
275 void
276 update_current_thread_signals_flag()
277 {
278 	InterruptsSpinLocker locker(gThreadSpinlock);
279 
280 	update_thread_signals_flag(thread_get_current_thread());
281 }
282 
283 
284 static bool
285 notify_debugger(Thread *thread, int signal, struct sigaction *handler,
286 	bool deadly)
287 {
288 	uint64 signalMask = SIGNAL_TO_MASK(signal);
289 
290 	// first check the ignore signal masks the debugger specified for the thread
291 
292 	if (atomic_get(&thread->debug_info.ignore_signals_once) & signalMask) {
293 		atomic_and(&thread->debug_info.ignore_signals_once, ~signalMask);
294 		return true;
295 	}
296 
297 	if (atomic_get(&thread->debug_info.ignore_signals) & signalMask)
298 		return true;
299 
300 	// deliver the event
301 	return user_debug_handle_signal(signal, handler, deadly);
302 }
303 
304 
305 /*! Actually handles the signal - ie. the thread will exit, a custom signal
306 	handler is prepared, or whatever the signal demands.
307 */
308 bool
309 handle_signals(Thread *thread)
310 {
311 	uint32 signalMask = atomic_get(&thread->sig_pending)
312 		& (~atomic_get(&thread->sig_block_mask) | thread->sig_temp_enabled);
313 	thread->sig_temp_enabled = 0;
314 
315 	// If SIGKILL[THR] are pending, we ignore other signals.
316 	// Otherwise check, if the thread shall stop for debugging.
317 	if (signalMask & KILL_SIGNALS) {
318 		signalMask &= KILL_SIGNALS;
319 	} else if (thread->debug_info.flags & B_THREAD_DEBUG_STOP) {
320 		user_debug_stop_thread();
321 	}
322 
323 	if (signalMask == 0)
324 		return 0;
325 
326 	if (thread->user_thread->defer_signals > 0
327 		&& (signalMask & NON_DEFERRABLE_SIGNALS) == 0) {
328 		thread->user_thread->pending_signals = signalMask;
329 		return 0;
330 	}
331 
332 	thread->user_thread->pending_signals = 0;
333 
334 	uint32 restartFlags = atomic_and(&thread->flags,
335 		~THREAD_FLAGS_DONT_RESTART_SYSCALL);
336 	bool alwaysRestart
337 		= (restartFlags & THREAD_FLAGS_ALWAYS_RESTART_SYSCALL) != 0;
338 	bool restart = alwaysRestart
339 		|| (restartFlags & THREAD_FLAGS_DONT_RESTART_SYSCALL) == 0;
340 
341 	T(HandleSignals(signalMask));
342 
343 	for (int32 i = 0; i < NSIG; i++) {
344 		bool debugSignal;
345 		int32 signal = i + 1;
346 
347 		if ((signalMask & SIGNAL_TO_MASK(signal)) == 0)
348 			continue;
349 
350 		// clear the signal that we will handle
351 		atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal));
352 
353 		debugSignal = !(~atomic_get(&thread->team->debug_info.flags)
354 				& (B_TEAM_DEBUG_SIGNALS | B_TEAM_DEBUG_DEBUGGER_INSTALLED));
355 
356 		// TODO: since sigaction_etc() could clobber the fields at any time,
357 		//		we should actually copy the relevant fields atomically before
358 		//		accessing them (only the debugger is calling sigaction_etc()
359 		//		right now).
360 		//		Update: sigaction_etc() is only used by the userland debugger
361 		//		support. We can just as well restrict getting/setting signal
362 		//		handlers to work only when the respective thread is stopped.
363 		//		Then sigaction() could be used instead and we could get rid of
364 		//		sigaction_etc().
365 		struct sigaction* handler = &thread->sig_action[i];
366 
367 		TRACE(("Thread 0x%lx received signal %s\n", thread->id, sigstr[signal]));
368 
369 		if (handler->sa_handler == SIG_IGN) {
370 			// signal is to be ignored
371 			// ToDo: apply zombie cleaning on SIGCHLD
372 
373 			// notify the debugger
374 			if (debugSignal)
375 				notify_debugger(thread, signal, handler, false);
376 			continue;
377 		} else if (handler->sa_handler == SIG_DFL) {
378 			// default signal behaviour
379 			switch (signal) {
380 				case SIGCHLD:
381 				case SIGWINCH:
382 				case SIGURG:
383 					// notify the debugger
384 					if (debugSignal)
385 						notify_debugger(thread, signal, handler, false);
386 					continue;
387 
388 				case SIGCONT:
389 					// notify the debugger
390 					if (debugSignal
391 						&& !notify_debugger(thread, signal, handler, false))
392 						continue;
393 
394 					// notify threads waiting for team state changes
395 					if (thread == thread->team->main_thread) {
396 						InterruptsSpinLocker locker(gTeamSpinlock);
397 						team_set_job_control_state(thread->team,
398 							JOB_CONTROL_STATE_CONTINUED, signal, false);
399 
400 						// The standard states that the system *may* send a
401 						// SIGCHLD when a child is continued. I haven't found
402 						// a good reason why we would want to, though.
403 					}
404 					continue;
405 
406 				case SIGSTOP:
407 				case SIGTSTP:
408 				case SIGTTIN:
409 				case SIGTTOU:
410 					// notify the debugger
411 					if (debugSignal
412 						&& !notify_debugger(thread, signal, handler, false))
413 						continue;
414 
415 					thread->next_state = B_THREAD_SUSPENDED;
416 
417 					// notify threads waiting for team state changes
418 					if (thread == thread->team->main_thread) {
419 						InterruptsSpinLocker locker(gTeamSpinlock);
420 						team_set_job_control_state(thread->team,
421 							JOB_CONTROL_STATE_STOPPED, signal, false);
422 
423 						// send a SIGCHLD to the parent (if it does have
424 						// SA_NOCLDSTOP defined)
425 						SpinLocker _(gThreadSpinlock);
426 						Thread* parentThread
427 							= thread->team->parent->main_thread;
428 						struct sigaction& parentHandler
429 							= parentThread->sig_action[SIGCHLD - 1];
430 						if ((parentHandler.sa_flags & SA_NOCLDSTOP) == 0)
431 							deliver_signal(parentThread, SIGCHLD, 0);
432 					}
433 
434 					return true;
435 
436 				case SIGSEGV:
437 				case SIGFPE:
438 				case SIGILL:
439 				case SIGTRAP:
440 				case SIGABRT:
441 					// If this is the main thread, we just fall through and let
442 					// this signal kill the team. Otherwise we send a SIGKILL to
443 					// the main thread first, since the signal will kill this
444 					// thread only.
445 					if (thread != thread->team->main_thread)
446 						send_signal(thread->team->main_thread->id, SIGKILL);
447 				case SIGQUIT:
448 				case SIGPOLL:
449 				case SIGPROF:
450 				case SIGSYS:
451 				case SIGVTALRM:
452 				case SIGXCPU:
453 				case SIGXFSZ:
454 					TRACE(("Shutting down thread 0x%lx due to signal #%ld\n",
455 						thread->id, signal));
456 				case SIGKILL:
457 				case SIGKILLTHR:
458 				default:
459 					// if the thread exited normally, the exit reason is already set
460 					if (thread->exit.reason != THREAD_RETURN_EXIT) {
461 						thread->exit.reason = THREAD_RETURN_INTERRUPTED;
462 						thread->exit.signal = (uint16)signal;
463 					}
464 
465 					// notify the debugger
466 					if (debugSignal && signal != SIGKILL && signal != SIGKILLTHR
467 						&& !notify_debugger(thread, signal, handler, true))
468 						continue;
469 
470 					thread_exit();
471 						// won't return
472 			}
473 		}
474 
475 		// User defined signal handler
476 
477 		// notify the debugger
478 		if (debugSignal && !notify_debugger(thread, signal, handler, false))
479 			continue;
480 
481 		if (!restart
482 				|| ((!alwaysRestart && handler->sa_flags & SA_RESTART) == 0)) {
483 			atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
484 		}
485 
486 		T(ExecuteSignalHandler(signal, handler));
487 
488 		TRACE(("### Setting up custom signal handler frame...\n"));
489 		arch_setup_signal_frame(thread, handler, signal,
490 			atomic_get(&thread->sig_block_mask));
491 
492 		if (handler->sa_flags & SA_ONESHOT)
493 			handler->sa_handler = SIG_DFL;
494 		if ((handler->sa_flags & SA_NOMASK) == 0) {
495 			// Update the block mask while the signal handler is running - it
496 			// will be automatically restored when the signal frame is left.
497 			atomic_or(&thread->sig_block_mask,
498 				(handler->sa_mask | SIGNAL_TO_MASK(signal)) & BLOCKABLE_SIGNALS);
499 		}
500 
501 		update_current_thread_signals_flag();
502 
503 		return false;
504 	}
505 
506 	// clear syscall restart thread flag, if we're not supposed to restart the
507 	// syscall
508 	if (!restart)
509 		atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
510 
511 	update_current_thread_signals_flag();
512 
513 	return false;
514 }
515 
516 
517 bool
518 is_kill_signal_pending(void)
519 {
520 	return (atomic_get(&thread_get_current_thread()->sig_pending)
521 		& KILL_SIGNALS) != 0;
522 }
523 
524 
525 bool
526 is_signal_blocked(int signal)
527 {
528 	return (atomic_get(&thread_get_current_thread()->sig_block_mask)
529 		& SIGNAL_TO_MASK(signal)) != 0;
530 }
531 
532 
533 /*!	Delivers the \a signal to the \a thread, but doesn't handle the signal -
534 	it just makes sure the thread gets the signal, ie. unblocks it if needed.
535 	This function must be called with interrupts disabled and the
536 	thread lock held.
537 */
538 static status_t
539 deliver_signal(Thread *thread, uint signal, uint32 flags)
540 {
541 	if (flags & B_CHECK_PERMISSION) {
542 		// ToDo: introduce euid & uid fields to the team and check permission
543 	}
544 
545 	if (signal == 0)
546 		return B_OK;
547 
548 	if (thread->team == team_get_kernel_team()) {
549 		// Signals to kernel threads will only wake them up
550 		if (thread->state == B_THREAD_SUSPENDED)
551 			scheduler_enqueue_in_run_queue(thread);
552 		return B_OK;
553 	}
554 
555 	atomic_or(&thread->sig_pending, SIGNAL_TO_MASK(signal));
556 
557 	switch (signal) {
558 		case SIGKILL:
559 		{
560 			// Forward KILLTHR to the main thread of the team
561 			Thread *mainThread = thread->team->main_thread;
562 			atomic_or(&mainThread->sig_pending, SIGNAL_TO_MASK(SIGKILLTHR));
563 
564 			// Wake up main thread
565 			if (mainThread->state == B_THREAD_SUSPENDED)
566 				scheduler_enqueue_in_run_queue(mainThread);
567 			else
568 				thread_interrupt(mainThread, true);
569 
570 			update_thread_signals_flag(mainThread);
571 
572 			// Supposed to fall through
573 		}
574 		case SIGKILLTHR:
575 			// Wake up suspended threads and interrupt waiting ones
576 			if (thread->state == B_THREAD_SUSPENDED)
577 				scheduler_enqueue_in_run_queue(thread);
578 			else
579 				thread_interrupt(thread, true);
580 			break;
581 
582 		case SIGCONT:
583 			// Wake up thread if it was suspended
584 			if (thread->state == B_THREAD_SUSPENDED)
585 				scheduler_enqueue_in_run_queue(thread);
586 
587 			if ((flags & SIGNAL_FLAG_DONT_RESTART_SYSCALL) != 0)
588 				atomic_or(&thread->flags, THREAD_FLAGS_DONT_RESTART_SYSCALL);
589 
590 			atomic_and(&thread->sig_pending, ~STOP_SIGNALS);
591 				// remove any pending stop signals
592 			break;
593 
594 		default:
595 			if (thread->sig_pending
596 				& (~thread->sig_block_mask | SIGNAL_TO_MASK(SIGCHLD))) {
597 				// Interrupt thread if it was waiting
598 				thread_interrupt(thread, false);
599 			}
600 			break;
601 	}
602 
603 	update_thread_signals_flag(thread);
604 
605 	return B_OK;
606 }
607 
608 
609 int
610 send_signal_etc(pid_t id, uint signal, uint32 flags)
611 {
612 	status_t status = B_BAD_THREAD_ID;
613 	Thread *thread;
614 	cpu_status state = 0;
615 
616 	if (signal < 0 || signal > MAX_SIGNO)
617 		return B_BAD_VALUE;
618 
619 	T(SendSignal(id, signal, flags));
620 
621 	if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
622 		state = disable_interrupts();
623 
624 	if (id > 0) {
625 		// send a signal to the specified thread
626 
627 		GRAB_THREAD_LOCK();
628 
629 		thread = thread_get_thread_struct_locked(id);
630 		if (thread != NULL)
631 			status = deliver_signal(thread, signal, flags);
632 	} else {
633 		// send a signal to the specified process group
634 		// (the absolute value of the id)
635 
636 		struct process_group *group;
637 
638 		// TODO: handle -1 correctly
639 		if (id == 0 || id == -1) {
640 			// send a signal to the current team
641 			id = thread_get_current_thread()->team->id;
642 		} else
643 			id = -id;
644 
645 		if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
646 			GRAB_TEAM_LOCK();
647 
648 		group = team_get_process_group_locked(NULL, id);
649 		if (group != NULL) {
650 			Team *team, *next;
651 
652 			// Send a signal to all teams in this process group
653 
654 			for (team = group->teams; team != NULL; team = next) {
655 				next = team->group_next;
656 				id = team->id;
657 
658 				GRAB_THREAD_LOCK();
659 
660 				thread = thread_get_thread_struct_locked(id);
661 				if (thread != NULL) {
662 					// we don't stop because of an error sending the signal; we
663 					// rather want to send as much signals as possible
664 					status = deliver_signal(thread, signal, flags);
665 				}
666 
667 				RELEASE_THREAD_LOCK();
668 			}
669 		}
670 
671 		if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
672 			RELEASE_TEAM_LOCK();
673 
674 		GRAB_THREAD_LOCK();
675 	}
676 
677 	if ((flags & (B_DO_NOT_RESCHEDULE | SIGNAL_FLAG_TEAMS_LOCKED)) == 0)
678 		scheduler_reschedule_if_necessary_locked();
679 
680 	RELEASE_THREAD_LOCK();
681 
682 	if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
683 		restore_interrupts(state);
684 
685 	return status;
686 }
687 
688 
689 int
690 send_signal(pid_t threadID, uint signal)
691 {
692 	// The BeBook states that this function wouldn't be exported
693 	// for drivers, but, of course, it's wrong.
694 	return send_signal_etc(threadID, signal, 0);
695 }
696 
697 
698 int
699 has_signals_pending(void *_thread)
700 {
701 	Thread *thread = (Thread *)_thread;
702 	if (thread == NULL)
703 		thread = thread_get_current_thread();
704 
705 	return atomic_get(&thread->sig_pending)
706 		& ~atomic_get(&thread->sig_block_mask);
707 }
708 
709 
710 static int
711 sigprocmask_internal(int how, const sigset_t *set, sigset_t *oldSet)
712 {
713 	Thread *thread = thread_get_current_thread();
714 	sigset_t oldMask = atomic_get(&thread->sig_block_mask);
715 
716 	if (set != NULL) {
717 		T(SigProcMask(how, *set));
718 
719 		switch (how) {
720 			case SIG_BLOCK:
721 				atomic_or(&thread->sig_block_mask, *set & BLOCKABLE_SIGNALS);
722 				break;
723 			case SIG_UNBLOCK:
724 				atomic_and(&thread->sig_block_mask, ~*set);
725 				break;
726 			case SIG_SETMASK:
727 				atomic_set(&thread->sig_block_mask, *set & BLOCKABLE_SIGNALS);
728 				break;
729 			default:
730 				return B_BAD_VALUE;
731 		}
732 
733 		update_current_thread_signals_flag();
734 	}
735 
736 	if (oldSet != NULL)
737 		*oldSet = oldMask;
738 
739 	return B_OK;
740 }
741 
742 
743 int
744 sigprocmask(int how, const sigset_t *set, sigset_t *oldSet)
745 {
746 	RETURN_AND_SET_ERRNO(sigprocmask_internal(how, set, oldSet));
747 }
748 
749 
750 /*!	\brief sigaction() for the specified thread.
751 	A \a threadID is < 0 specifies the current thread.
752 */
753 static status_t
754 sigaction_etc_internal(thread_id threadID, int signal, const struct sigaction *act,
755 	struct sigaction *oldAction)
756 {
757 	Thread *thread;
758 	cpu_status state;
759 	status_t error = B_OK;
760 
761 	if (signal < 1 || signal > MAX_SIGNO
762 		|| (SIGNAL_TO_MASK(signal) & ~BLOCKABLE_SIGNALS) != 0)
763 		return B_BAD_VALUE;
764 
765 	state = disable_interrupts();
766 	GRAB_THREAD_LOCK();
767 
768 	thread = (threadID < 0
769 		? thread_get_current_thread()
770 		: thread_get_thread_struct_locked(threadID));
771 
772 	if (thread) {
773 		if (oldAction) {
774 			// save previous sigaction structure
775 			memcpy(oldAction, &thread->sig_action[signal - 1],
776 				sizeof(struct sigaction));
777 		}
778 
779 		if (act) {
780 			T(SigAction(thread, signal, act));
781 
782 			// set new sigaction structure
783 			memcpy(&thread->sig_action[signal - 1], act,
784 				sizeof(struct sigaction));
785 			thread->sig_action[signal - 1].sa_mask &= BLOCKABLE_SIGNALS;
786 		}
787 
788 		if (act && act->sa_handler == SIG_IGN) {
789 			// remove pending signal if it should now be ignored
790 			atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal));
791 		} else if (act && act->sa_handler == SIG_DFL
792 			&& (SIGNAL_TO_MASK(signal) & DEFAULT_IGNORE_SIGNALS) != 0) {
793 			// remove pending signal for those signals whose default
794 			// action is to ignore them
795 			atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal));
796 		}
797 	} else
798 		error = B_BAD_THREAD_ID;
799 
800 	RELEASE_THREAD_LOCK();
801 	restore_interrupts(state);
802 
803 	return error;
804 }
805 
806 
807 int
808 sigaction_etc(thread_id threadID, int signal, const struct sigaction *act,
809 	struct sigaction *oldAction)
810 {
811 	RETURN_AND_SET_ERRNO(sigaction_etc_internal(threadID, signal, act,
812 		oldAction));
813 }
814 
815 
816 int
817 sigaction(int signal, const struct sigaction *act, struct sigaction *oldAction)
818 {
819 	return sigaction_etc(-1, signal, act, oldAction);
820 }
821 
822 
823 /*! Triggers a SIGALRM to the thread that issued the timer and reschedules */
824 static int32
825 alarm_event(timer *t)
826 {
827 	// The hook can be called from any context, but we have to
828 	// deliver the signal to the thread that originally called
829 	// set_alarm().
830 	// Since thread->alarm is this timer structure, we can just
831 	// cast it back - ugly but it works for now
832 	Thread *thread = (Thread *)((uint8 *)t - offsetof(Thread, alarm));
833 		// ToDo: investigate adding one user parameter to the timer structure to fix this hack
834 
835 	TRACE(("alarm_event: thread = %p\n", thread));
836 	send_signal_etc(thread->id, SIGALRM, B_DO_NOT_RESCHEDULE);
837 
838 	return B_HANDLED_INTERRUPT;
839 }
840 
841 
842 /*!	Sets the alarm timer for the current thread. The timer fires at the
843 	specified time in the future, periodically or just once, as determined
844 	by \a mode.
845 	\return the time left until a previous set alarm would have fired.
846 */
847 bigtime_t
848 set_alarm(bigtime_t time, uint32 mode)
849 {
850 	Thread *thread = thread_get_current_thread();
851 	bigtime_t remainingTime = 0;
852 
853 	ASSERT(B_ONE_SHOT_RELATIVE_ALARM == B_ONE_SHOT_RELATIVE_TIMER);
854 		// just to be sure no one changes the headers some day
855 
856 	TRACE(("set_alarm: thread = %p\n", thread));
857 
858 	if (thread->alarm.period)
859 		remainingTime = (bigtime_t)thread->alarm.schedule_time - system_time();
860 
861 	cancel_timer(&thread->alarm);
862 
863 	if (time != B_INFINITE_TIMEOUT)
864 		add_timer(&thread->alarm, &alarm_event, time, mode);
865 	else {
866 		// this marks the alarm as canceled (for returning the remaining time)
867 		thread->alarm.period = 0;
868 	}
869 
870 	return remainingTime;
871 }
872 
873 
874 /*!	Wait for the specified signals, and return the signal retrieved in
875 	\a _signal.
876 */
877 static status_t
878 sigwait_internal(const sigset_t *set, int *_signal)
879 {
880 	sigset_t requestedSignals = *set & BLOCKABLE_SIGNALS;
881 
882 	Thread* thread = thread_get_current_thread();
883 
884 	while (true) {
885 		sigset_t pendingSignals = atomic_get(&thread->sig_pending);
886 		sigset_t blockedSignals = atomic_get(&thread->sig_block_mask);
887 		sigset_t pendingRequestedSignals = pendingSignals & requestedSignals;
888 		if ((pendingRequestedSignals) != 0) {
889 			// select the lowest pending signal to return in _signal
890 			for (int signal = 1; signal < NSIG; signal++) {
891 				if ((SIGNAL_TO_MASK(signal) & pendingSignals) != 0) {
892 					atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal));
893 					*_signal = signal;
894 					return B_OK;
895 				}
896 			}
897 		}
898 
899 		if ((pendingSignals & ~blockedSignals) != 0) {
900 			// Non-blocked signals are pending -- return to let them be handled.
901 			return B_INTERRUPTED;
902 		}
903 
904 		// No signals yet. Set the signal block mask to not include the
905 		// requested mask and wait until we're interrupted.
906 		atomic_set(&thread->sig_block_mask,
907 			blockedSignals & ~(requestedSignals & BLOCKABLE_SIGNALS));
908 
909 		while (!has_signals_pending(thread)) {
910 			thread_prepare_to_block(thread, B_CAN_INTERRUPT,
911 				THREAD_BLOCK_TYPE_SIGNAL, NULL);
912 			thread_block();
913 		}
914 
915 		// restore the original block mask
916 		atomic_set(&thread->sig_block_mask, blockedSignals);
917 
918 		update_current_thread_signals_flag();
919 	}
920 }
921 
922 
923 int
924 sigwait(const sigset_t *set, int *_signal)
925 {
926 	RETURN_AND_SET_ERRNO(sigwait_internal(set, _signal));
927 }
928 
929 
930 /*!	Replace the current signal block mask and wait for any event to happen.
931 	Before returning, the original signal block mask is reinstantiated.
932 */
933 static status_t
934 sigsuspend_internal(const sigset_t *mask)
935 {
936 	T(SigSuspend(*mask));
937 
938 	Thread *thread = thread_get_current_thread();
939 	sigset_t oldMask = atomic_get(&thread->sig_block_mask);
940 
941 	// Set the new block mask and block until interrupted.
942 
943 	atomic_set(&thread->sig_block_mask, *mask & BLOCKABLE_SIGNALS);
944 
945 	while (!has_signals_pending(thread)) {
946 		thread_prepare_to_block(thread, B_CAN_INTERRUPT,
947 			THREAD_BLOCK_TYPE_SIGNAL, NULL);
948 		thread_block();
949 	}
950 
951 	// restore the original block mask
952 	atomic_set(&thread->sig_block_mask, oldMask);
953 
954 	thread->sig_temp_enabled = ~*mask;
955 
956 	update_current_thread_signals_flag();
957 
958 	T(SigSuspendDone());
959 
960 	// we're not supposed to actually succeed
961 	return B_INTERRUPTED;
962 }
963 
964 
965 int
966 sigsuspend(const sigset_t *mask)
967 {
968 	RETURN_AND_SET_ERRNO(sigsuspend_internal(mask));
969 }
970 
971 
972 static status_t
973 sigpending_internal(sigset_t *set)
974 {
975 	Thread *thread = thread_get_current_thread();
976 
977 	if (set == NULL)
978 		return B_BAD_VALUE;
979 
980 	*set = atomic_get(&thread->sig_pending);
981 	return B_OK;
982 }
983 
984 
985 int
986 sigpending(sigset_t *set)
987 {
988 	RETURN_AND_SET_ERRNO(sigpending_internal(set));
989 }
990 
991 
992 // #pragma mark - syscalls
993 
994 
995 bigtime_t
996 _user_set_alarm(bigtime_t time, uint32 mode)
997 {
998 	syscall_64_bit_return_value();
999 
1000 	return set_alarm(time, mode);
1001 }
1002 
1003 
1004 status_t
1005 _user_send_signal(pid_t team, uint signal)
1006 {
1007 	return send_signal_etc(team, signal, B_CHECK_PERMISSION);
1008 }
1009 
1010 
1011 status_t
1012 _user_sigprocmask(int how, const sigset_t *userSet, sigset_t *userOldSet)
1013 {
1014 	sigset_t set, oldSet;
1015 	status_t status;
1016 
1017 	if ((userSet != NULL && user_memcpy(&set, userSet, sizeof(sigset_t)) < B_OK)
1018 		|| (userOldSet != NULL && user_memcpy(&oldSet, userOldSet,
1019 				sizeof(sigset_t)) < B_OK))
1020 		return B_BAD_ADDRESS;
1021 
1022 	status = sigprocmask_internal(how, userSet ? &set : NULL,
1023 		userOldSet ? &oldSet : NULL);
1024 
1025 	// copy old set if asked for
1026 	if (status >= B_OK && userOldSet != NULL
1027 		&& user_memcpy(userOldSet, &oldSet, sizeof(sigset_t)) < B_OK)
1028 		return B_BAD_ADDRESS;
1029 
1030 	return status;
1031 }
1032 
1033 
1034 status_t
1035 _user_sigaction(int signal, const struct sigaction *userAction,
1036 	struct sigaction *userOldAction)
1037 {
1038 	struct sigaction act, oact;
1039 	status_t status;
1040 
1041 	if ((userAction != NULL && user_memcpy(&act, userAction,
1042 				sizeof(struct sigaction)) < B_OK)
1043 		|| (userOldAction != NULL && user_memcpy(&oact, userOldAction,
1044 				sizeof(struct sigaction)) < B_OK))
1045 		return B_BAD_ADDRESS;
1046 
1047 	status = sigaction(signal, userAction ? &act : NULL,
1048 		userOldAction ? &oact : NULL);
1049 
1050 	// only copy the old action if a pointer has been given
1051 	if (status >= B_OK && userOldAction != NULL
1052 		&& user_memcpy(userOldAction, &oact, sizeof(struct sigaction)) < B_OK)
1053 		return B_BAD_ADDRESS;
1054 
1055 	return status;
1056 }
1057 
1058 
1059 status_t
1060 _user_sigwait(const sigset_t *userSet, int *_userSignal)
1061 {
1062 	if (userSet == NULL || _userSignal == NULL)
1063 		return B_BAD_VALUE;
1064 
1065 	sigset_t set;
1066 	if (user_memcpy(&set, userSet, sizeof(sigset_t)) < B_OK)
1067 		return B_BAD_ADDRESS;
1068 
1069 	int signal;
1070 	status_t status = sigwait_internal(&set, &signal);
1071 	if (status == B_INTERRUPTED) {
1072 		// make sure we'll be restarted
1073 		Thread* thread = thread_get_current_thread();
1074 		atomic_or(&thread->flags,
1075 			THREAD_FLAGS_ALWAYS_RESTART_SYSCALL | THREAD_FLAGS_RESTART_SYSCALL);
1076 		return status;
1077 	}
1078 
1079 	return user_memcpy(_userSignal, &signal, sizeof(int));
1080 }
1081 
1082 
1083 status_t
1084 _user_sigsuspend(const sigset_t *userMask)
1085 {
1086 	sigset_t mask;
1087 
1088 	if (userMask == NULL)
1089 		return B_BAD_VALUE;
1090 	if (user_memcpy(&mask, userMask, sizeof(sigset_t)) < B_OK)
1091 		return B_BAD_ADDRESS;
1092 
1093 	return sigsuspend_internal(&mask);
1094 }
1095 
1096 
1097 status_t
1098 _user_sigpending(sigset_t *userSet)
1099 {
1100 	sigset_t set;
1101 	int status;
1102 
1103 	if (userSet == NULL)
1104 		return B_BAD_VALUE;
1105 	if (!IS_USER_ADDRESS(userSet))
1106 		return B_BAD_ADDRESS;
1107 
1108 	status = sigpending_internal(&set);
1109 	if (status == B_OK
1110 		&& user_memcpy(userSet, &set, sizeof(sigset_t)) < B_OK)
1111 		return B_BAD_ADDRESS;
1112 
1113 	return status;
1114 }
1115 
1116 
1117 status_t
1118 _user_set_signal_stack(const stack_t *newUserStack, stack_t *oldUserStack)
1119 {
1120 	Thread *thread = thread_get_current_thread();
1121 	struct stack_t newStack, oldStack;
1122 	bool onStack = false;
1123 
1124 	if ((newUserStack != NULL && user_memcpy(&newStack, newUserStack,
1125 				sizeof(stack_t)) < B_OK)
1126 		|| (oldUserStack != NULL && user_memcpy(&oldStack, oldUserStack,
1127 				sizeof(stack_t)) < B_OK))
1128 		return B_BAD_ADDRESS;
1129 
1130 	if (thread->signal_stack_enabled) {
1131 		// determine wether or not the user thread is currently
1132 		// on the active signal stack
1133 		onStack = arch_on_signal_stack(thread);
1134 	}
1135 
1136 	if (oldUserStack != NULL) {
1137 		oldStack.ss_sp = (void *)thread->signal_stack_base;
1138 		oldStack.ss_size = thread->signal_stack_size;
1139 		oldStack.ss_flags = (thread->signal_stack_enabled ? 0 : SS_DISABLE)
1140 			| (onStack ? SS_ONSTACK : 0);
1141 	}
1142 
1143 	if (newUserStack != NULL) {
1144 		// no flags other than SS_DISABLE are allowed
1145 		if ((newStack.ss_flags & ~SS_DISABLE) != 0)
1146 			return B_BAD_VALUE;
1147 
1148 		if ((newStack.ss_flags & SS_DISABLE) == 0) {
1149 			// check if the size is valid
1150 			if (newStack.ss_size < MINSIGSTKSZ)
1151 				return B_NO_MEMORY;
1152 			if (onStack)
1153 				return B_NOT_ALLOWED;
1154 			if (!IS_USER_ADDRESS(newStack.ss_sp))
1155 				return B_BAD_VALUE;
1156 
1157 			thread->signal_stack_base = (addr_t)newStack.ss_sp;
1158 			thread->signal_stack_size = newStack.ss_size;
1159 			thread->signal_stack_enabled = true;
1160 		} else
1161 			thread->signal_stack_enabled = false;
1162 	}
1163 
1164 	// only copy the old stack info if a pointer has been given
1165 	if (oldUserStack != NULL
1166 		&& user_memcpy(oldUserStack, &oldStack, sizeof(stack_t)) < B_OK)
1167 		return B_BAD_ADDRESS;
1168 
1169 	return B_OK;
1170 }
1171 
1172