xref: /haiku/src/system/kernel/signal.cpp (revision c9ad965c81b08802fed0827fd1dd16f45297928a)
1 /*
2  * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
3  * Copyright 2002, Angelo Mottola, a.mottola@libero.it.
4  *
5  * Distributed under the terms of the MIT License.
6  */
7 
8 
9 /*! POSIX signals handling routines */
10 
11 
12 #include <ksignal.h>
13 
14 #include <stddef.h>
15 #include <string.h>
16 
17 #include <OS.h>
18 #include <KernelExport.h>
19 
20 #include <debug.h>
21 #include <kernel.h>
22 #include <kscheduler.h>
23 #include <sem.h>
24 #include <syscall_restart.h>
25 #include <team.h>
26 #include <thread.h>
27 #include <tracing.h>
28 #include <user_debugger.h>
29 #include <user_thread.h>
30 #include <util/AutoLock.h>
31 
32 
33 //#define TRACE_SIGNAL
34 #ifdef TRACE_SIGNAL
35 #	define TRACE(x) dprintf x
36 #else
37 #	define TRACE(x) ;
38 #endif
39 
40 
41 #define BLOCKABLE_SIGNALS		(~(KILL_SIGNALS | SIGNAL_TO_MASK(SIGSTOP)))
42 #define STOP_SIGNALS \
43 	(SIGNAL_TO_MASK(SIGSTOP) | SIGNAL_TO_MASK(SIGTSTP) \
44 	| SIGNAL_TO_MASK(SIGTTIN) | SIGNAL_TO_MASK(SIGTTOU))
45 #define DEFAULT_IGNORE_SIGNALS \
46 	(SIGNAL_TO_MASK(SIGCHLD) | SIGNAL_TO_MASK(SIGWINCH) \
47 	| SIGNAL_TO_MASK(SIGCONT))
48 #define NON_DEFERRABLE_SIGNALS	\
49 	(KILL_SIGNALS				\
50 	| SIGNAL_TO_MASK(SIGILL)	\
51 	| SIGNAL_TO_MASK(SIGFPE)	\
52 	| SIGNAL_TO_MASK(SIGSEGV))
53 
54 
55 const char * const sigstr[NSIG] = {
56 	"NONE", "HUP", "INT", "QUIT", "ILL", "CHLD", "ABRT", "PIPE",
57 	"FPE", "KILL", "STOP", "SEGV", "CONT", "TSTP", "ALRM", "TERM",
58 	"TTIN", "TTOU", "USR1", "USR2", "WINCH", "KILLTHR", "TRAP",
59 	"POLL", "PROF", "SYS", "URG", "VTALRM", "XCPU", "XFSZ"
60 };
61 
62 
63 static status_t deliver_signal(struct thread *thread, uint signal,
64 	uint32 flags, bool &reschedule);
65 
66 
67 
68 // #pragma mark - signal tracing
69 
70 
71 #if SIGNAL_TRACING
72 
73 namespace SignalTracing {
74 
75 
76 class HandleSignals : public AbstractTraceEntry {
77 	public:
78 		HandleSignals(uint32 signals)
79 			:
80 			fSignals(signals)
81 		{
82 			Initialized();
83 		}
84 
85 		virtual void AddDump(TraceOutput& out)
86 		{
87 			out.Print("signal handle:  0x%lx", fSignals);
88 		}
89 
90 	private:
91 		uint32		fSignals;
92 };
93 
94 
95 class ExecuteSignalHandler : public AbstractTraceEntry {
96 	public:
97 		ExecuteSignalHandler(int signal, struct sigaction* handler)
98 			:
99 			fSignal(signal),
100 			fHandler((void*)handler->sa_handler)
101 		{
102 			Initialized();
103 		}
104 
105 		virtual void AddDump(TraceOutput& out)
106 		{
107 			out.Print("signal exec handler: signal: %d, handler: %p",
108 				fSignal, fHandler);
109 		}
110 
111 	private:
112 		int		fSignal;
113 		void*	fHandler;
114 };
115 
116 
117 class SendSignal : public AbstractTraceEntry {
118 	public:
119 		SendSignal(pid_t target, uint32 signal, uint32 flags)
120 			:
121 			fTarget(target),
122 			fSignal(signal),
123 			fFlags(flags)
124 		{
125 			Initialized();
126 		}
127 
128 		virtual void AddDump(TraceOutput& out)
129 		{
130 			out.Print("signal send: target: %ld, signal: %lu (%s), "
131 				"flags: 0x%lx", fTarget, fSignal,
132 				(fSignal < NSIG ? sigstr[fSignal] : "invalid"), fFlags);
133 		}
134 
135 	private:
136 		pid_t	fTarget;
137 		uint32	fSignal;
138 		uint32	fFlags;
139 };
140 
141 
142 class SigAction : public AbstractTraceEntry {
143 	public:
144 		SigAction(struct thread* thread, uint32 signal,
145 			const struct sigaction* act)
146 			:
147 			fThread(thread->id),
148 			fSignal(signal),
149 			fAction(*act)
150 		{
151 			Initialized();
152 		}
153 
154 		virtual void AddDump(TraceOutput& out)
155 		{
156 			out.Print("signal action: thread: %ld, signal: %lu (%s), "
157 				"action: {handler: %p, flags: 0x%x, mask: 0x%lx}",
158 				fThread, fSignal,
159 				(fSignal < NSIG ? sigstr[fSignal] : "invalid"),
160 				fAction.sa_handler, fAction.sa_flags, fAction.sa_mask);
161 		}
162 
163 	private:
164 		thread_id			fThread;
165 		uint32				fSignal;
166 		struct sigaction	fAction;
167 };
168 
169 
170 class SigProcMask : public AbstractTraceEntry {
171 	public:
172 		SigProcMask(int how, sigset_t mask)
173 			:
174 			fHow(how),
175 			fMask(mask),
176 			fOldMask(thread_get_current_thread()->sig_block_mask)
177 		{
178 			Initialized();
179 		}
180 
181 		virtual void AddDump(TraceOutput& out)
182 		{
183 			const char* how = "invalid";
184 			switch (fHow) {
185 				case SIG_BLOCK:
186 					how = "block";
187 					break;
188 				case SIG_UNBLOCK:
189 					how = "unblock";
190 					break;
191 				case SIG_SETMASK:
192 					how = "set";
193 					break;
194 			}
195 
196 			out.Print("signal proc mask: %s 0x%lx, old mask: 0x%lx", how, fMask,
197 				fOldMask);
198 		}
199 
200 	private:
201 		int			fHow;
202 		sigset_t	fMask;
203 		sigset_t	fOldMask;
204 };
205 
206 }	// namespace SignalTracing
207 
208 #	define T(x)	new(std::nothrow) SignalTracing::x
209 
210 #else
211 #	define T(x)
212 #endif	// SIGNAL_TRACING
213 
214 
215 // #pragma mark -
216 
217 
218 /*!	Updates the thread::flags field according to what signals are pending.
219 	Interrupts must be disabled and the thread lock must be held.
220 */
221 static void
222 update_thread_signals_flag(struct thread* thread)
223 {
224 	if (atomic_get(&thread->sig_pending) & ~atomic_get(&thread->sig_block_mask))
225 		atomic_or(&thread->flags, THREAD_FLAGS_SIGNALS_PENDING);
226 	else
227 		atomic_and(&thread->flags, ~THREAD_FLAGS_SIGNALS_PENDING);
228 }
229 
230 
231 void
232 update_current_thread_signals_flag()
233 {
234 	InterruptsSpinLocker locker(gThreadSpinlock);
235 
236 	update_thread_signals_flag(thread_get_current_thread());
237 }
238 
239 
240 static bool
241 notify_debugger(struct thread *thread, int signal, struct sigaction *handler,
242 	bool deadly)
243 {
244 	uint64 signalMask = SIGNAL_TO_MASK(signal);
245 
246 	// first check the ignore signal masks the debugger specified for the thread
247 
248 	if (atomic_get(&thread->debug_info.ignore_signals_once) & signalMask) {
249 		atomic_and(&thread->debug_info.ignore_signals_once, ~signalMask);
250 		return true;
251 	}
252 
253 	if (atomic_get(&thread->debug_info.ignore_signals) & signalMask)
254 		return true;
255 
256 	// deliver the event
257 	return user_debug_handle_signal(signal, handler, deadly);
258 }
259 
260 
261 /*! Actually handles the signal - ie. the thread will exit, a custom signal
262 	handler is prepared, or whatever the signal demands.
263 */
264 bool
265 handle_signals(struct thread *thread)
266 {
267 	uint32 signalMask = atomic_get(&thread->sig_pending)
268 		& ~atomic_get(&thread->sig_block_mask);
269 
270 	// If SIGKILL[THR] are pending, we ignore other signals.
271 	// Otherwise check, if the thread shall stop for debugging.
272 	if (signalMask & KILL_SIGNALS) {
273 		signalMask &= KILL_SIGNALS;
274 	} else if (thread->debug_info.flags & B_THREAD_DEBUG_STOP) {
275 		user_debug_stop_thread();
276 	}
277 
278 	if (signalMask == 0)
279 		return 0;
280 
281 	if (thread->user_thread->defer_signals > 0
282 		&& (signalMask & NON_DEFERRABLE_SIGNALS) == 0) {
283 		thread->user_thread->pending_signals = signalMask;
284 		return 0;
285 	}
286 
287 	thread->user_thread->pending_signals = 0;
288 
289 	bool restart = (atomic_and(&thread->flags,
290 			~THREAD_FLAGS_DONT_RESTART_SYSCALL)
291 		& THREAD_FLAGS_DONT_RESTART_SYSCALL) == 0;
292 
293 	T(HandleSignals(signalMask));
294 
295 	for (int32 i = 0; i < NSIG; i++) {
296 		bool debugSignal;
297 		int32 signal = i + 1;
298 
299 		if ((signalMask & SIGNAL_TO_MASK(signal)) == 0)
300 			continue;
301 
302 		// clear the signal that we will handle
303 		atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal));
304 
305 		debugSignal = !(~atomic_get(&thread->team->debug_info.flags)
306 				& (B_TEAM_DEBUG_SIGNALS | B_TEAM_DEBUG_DEBUGGER_INSTALLED));
307 
308 		// TODO: since sigaction_etc() could clobber the fields at any time,
309 		//		we should actually copy the relevant fields atomically before
310 		//		accessing them (only the debugger is calling sigaction_etc()
311 		//		right now).
312 		//		Update: sigaction_etc() is only used by the userland debugger
313 		//		support. We can just as well restrict getting/setting signal
314 		//		handlers to work only when the respective thread is stopped.
315 		//		Then sigaction() could be used instead and we could get rid of
316 		//		sigaction_etc().
317 		struct sigaction* handler = &thread->sig_action[i];
318 
319 		TRACE(("Thread 0x%lx received signal %s\n", thread->id, sigstr[signal]));
320 
321 		if (handler->sa_handler == SIG_IGN) {
322 			// signal is to be ignored
323 			// ToDo: apply zombie cleaning on SIGCHLD
324 
325 			// notify the debugger
326 			if (debugSignal)
327 				notify_debugger(thread, signal, handler, false);
328 			continue;
329 		} else if (handler->sa_handler == SIG_DFL) {
330 			// default signal behaviour
331 			switch (signal) {
332 				case SIGCHLD:
333 				case SIGWINCH:
334 				case SIGURG:
335 					// notify the debugger
336 					if (debugSignal)
337 						notify_debugger(thread, signal, handler, false);
338 					continue;
339 
340 				case SIGCONT:
341 					// notify the debugger
342 					if (debugSignal
343 						&& !notify_debugger(thread, signal, handler, false))
344 						continue;
345 
346 					// notify threads waiting for team state changes
347 					if (thread == thread->team->main_thread) {
348 						InterruptsSpinLocker locker(gTeamSpinlock);
349 						team_set_job_control_state(thread->team,
350 							JOB_CONTROL_STATE_CONTINUED, signal, false);
351 
352 						// The standard states that the system *may* send a
353 						// SIGCHLD when a child is continued. I haven't found
354 						// a good reason why we would want to, though.
355 					}
356 					continue;
357 
358 				case SIGSTOP:
359 				case SIGTSTP:
360 				case SIGTTIN:
361 				case SIGTTOU:
362 					// notify the debugger
363 					if (debugSignal
364 						&& !notify_debugger(thread, signal, handler, false))
365 						continue;
366 
367 					thread->next_state = B_THREAD_SUSPENDED;
368 
369 					// notify threads waiting for team state changes
370 					if (thread == thread->team->main_thread) {
371 						InterruptsSpinLocker locker(gTeamSpinlock);
372 						team_set_job_control_state(thread->team,
373 							JOB_CONTROL_STATE_STOPPED, signal, false);
374 
375 						// send a SIGCHLD to the parent (if it does have
376 						// SA_NOCLDSTOP defined)
377 						SpinLocker _(gThreadSpinlock);
378 						struct thread* parentThread
379 							= thread->team->parent->main_thread;
380 						struct sigaction& parentHandler
381 							= parentThread->sig_action[SIGCHLD - 1];
382 						// TODO: do we need to worry about rescheduling here?
383 						bool unused = false;
384 						if ((parentHandler.sa_flags & SA_NOCLDSTOP) == 0) {
385 							deliver_signal(parentThread, SIGCHLD, 0,
386 								unused);
387 						}
388 					}
389 
390 					return true;
391 
392 				case SIGSEGV:
393 				case SIGFPE:
394 				case SIGILL:
395 				case SIGTRAP:
396 				case SIGABRT:
397 					// If this is the main thread, we just fall through and let
398 					// this signal kill the team. Otherwise we send a SIGKILL to
399 					// the main thread first, since the signal will kill this
400 					// thread only.
401 					if (thread != thread->team->main_thread)
402 						send_signal(thread->team->main_thread->id, SIGKILL);
403 				case SIGQUIT:
404 				case SIGPOLL:
405 				case SIGPROF:
406 				case SIGSYS:
407 				case SIGVTALRM:
408 				case SIGXCPU:
409 				case SIGXFSZ:
410 					TRACE(("Shutting down thread 0x%lx due to signal #%ld\n",
411 						thread->id, signal));
412 				case SIGKILL:
413 				case SIGKILLTHR:
414 				default:
415 					// if the thread exited normally, the exit reason is already set
416 					if (thread->exit.reason != THREAD_RETURN_EXIT) {
417 						thread->exit.reason = THREAD_RETURN_INTERRUPTED;
418 						thread->exit.signal = (uint16)signal;
419 					}
420 
421 					// notify the debugger
422 					if (debugSignal && signal != SIGKILL && signal != SIGKILLTHR
423 						&& !notify_debugger(thread, signal, handler, true))
424 						continue;
425 
426 					thread_exit();
427 						// won't return
428 			}
429 		}
430 
431 		// User defined signal handler
432 
433 		// notify the debugger
434 		if (debugSignal && !notify_debugger(thread, signal, handler, false))
435 			continue;
436 
437 		if (!restart || (handler->sa_flags & SA_RESTART) == 0)
438 			atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
439 
440 		T(ExecuteSignalHandler(signal, handler));
441 
442 		TRACE(("### Setting up custom signal handler frame...\n"));
443 		arch_setup_signal_frame(thread, handler, signal,
444 			atomic_get(&thread->sig_block_mask));
445 
446 		if (handler->sa_flags & SA_ONESHOT)
447 			handler->sa_handler = SIG_DFL;
448 		if ((handler->sa_flags & SA_NOMASK) == 0) {
449 			// Update the block mask while the signal handler is running - it
450 			// will be automatically restored when the signal frame is left.
451 			atomic_or(&thread->sig_block_mask,
452 				(handler->sa_mask | SIGNAL_TO_MASK(signal)) & BLOCKABLE_SIGNALS);
453 		}
454 
455 		update_current_thread_signals_flag();
456 
457 		return false;
458 	}
459 
460 	// clear syscall restart thread flag, if we're not supposed to restart the
461 	// syscall
462 	if (!restart)
463 		atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
464 
465 	update_current_thread_signals_flag();
466 
467 	return false;
468 }
469 
470 
471 bool
472 is_kill_signal_pending(void)
473 {
474 	return (atomic_get(&thread_get_current_thread()->sig_pending)
475 		& KILL_SIGNALS) != 0;
476 }
477 
478 
479 bool
480 is_signal_blocked(int signal)
481 {
482 	return (atomic_get(&thread_get_current_thread()->sig_block_mask)
483 		& SIGNAL_TO_MASK(signal)) != 0;
484 }
485 
486 
487 /*!	Delivers the \a signal to the \a thread, but doesn't handle the signal -
488 	it just makes sure the thread gets the signal, ie. unblocks it if needed.
489 	This function must be called with interrupts disabled and the
490 	thread lock held.
491 */
492 static status_t
493 deliver_signal(struct thread *thread, uint signal, uint32 flags,
494 	bool &reschedule)
495 {
496 	if (flags & B_CHECK_PERMISSION) {
497 		// ToDo: introduce euid & uid fields to the team and check permission
498 	}
499 
500 	if (signal == 0)
501 		return B_OK;
502 
503 	if (thread->team == team_get_kernel_team()) {
504 		// Signals to kernel threads will only wake them up
505 		if (thread->state == B_THREAD_SUSPENDED)
506 			reschedule |= scheduler_enqueue_in_run_queue(thread);
507 		return B_OK;
508 	}
509 
510 	atomic_or(&thread->sig_pending, SIGNAL_TO_MASK(signal));
511 
512 	switch (signal) {
513 		case SIGKILL:
514 		{
515 			// Forward KILLTHR to the main thread of the team
516 			struct thread *mainThread = thread->team->main_thread;
517 			atomic_or(&mainThread->sig_pending, SIGNAL_TO_MASK(SIGKILLTHR));
518 
519 			// Wake up main thread
520 			if (mainThread->state == B_THREAD_SUSPENDED)
521 				reschedule |= scheduler_enqueue_in_run_queue(mainThread);
522 			else
523 				thread_interrupt(mainThread, true);
524 
525 			update_thread_signals_flag(mainThread);
526 
527 			// Supposed to fall through
528 		}
529 		case SIGKILLTHR:
530 			// Wake up suspended threads and interrupt waiting ones
531 			if (thread->state == B_THREAD_SUSPENDED)
532 				reschedule |= scheduler_enqueue_in_run_queue(thread);
533 			else
534 				thread_interrupt(thread, true);
535 			break;
536 
537 		case SIGCONT:
538 			// Wake up thread if it was suspended
539 			if (thread->state == B_THREAD_SUSPENDED)
540 				reschedule |= scheduler_enqueue_in_run_queue(thread);
541 
542 			if ((flags & SIGNAL_FLAG_DONT_RESTART_SYSCALL) != 0)
543 				atomic_or(&thread->flags, THREAD_FLAGS_DONT_RESTART_SYSCALL);
544 
545 			atomic_and(&thread->sig_pending, ~STOP_SIGNALS);
546 				// remove any pending stop signals
547 			break;
548 
549 		default:
550 			if (thread->sig_pending
551 				& (~thread->sig_block_mask | SIGNAL_TO_MASK(SIGCHLD))) {
552 				// Interrupt thread if it was waiting
553 				thread_interrupt(thread, false);
554 			}
555 			break;
556 	}
557 
558 	update_thread_signals_flag(thread);
559 
560 	return B_OK;
561 }
562 
563 
564 int
565 send_signal_etc(pid_t id, uint signal, uint32 flags)
566 {
567 	status_t status = B_BAD_THREAD_ID;
568 	struct thread *thread;
569 	cpu_status state = 0;
570 	bool reschedule = false;
571 
572 	if (signal < 0 || signal > MAX_SIGNO)
573 		return B_BAD_VALUE;
574 
575 	T(SendSignal(id, signal, flags));
576 
577 	if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
578 		state = disable_interrupts();
579 
580 	if (id > 0) {
581 		// send a signal to the specified thread
582 
583 		GRAB_THREAD_LOCK();
584 
585 		thread = thread_get_thread_struct_locked(id);
586 		if (thread != NULL)
587 			status = deliver_signal(thread, signal, flags, reschedule);
588 	} else {
589 		// send a signal to the specified process group
590 		// (the absolute value of the id)
591 
592 		struct process_group *group;
593 
594 		// TODO: handle -1 correctly
595 		if (id == 0 || id == -1) {
596 			// send a signal to the current team
597 			id = thread_get_current_thread()->team->id;
598 		} else
599 			id = -id;
600 
601 		if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
602 			GRAB_TEAM_LOCK();
603 
604 		group = team_get_process_group_locked(NULL, id);
605 		if (group != NULL) {
606 			struct team *team, *next;
607 
608 			// Send a signal to all teams in this process group
609 
610 			for (team = group->teams; team != NULL; team = next) {
611 				next = team->group_next;
612 				id = team->id;
613 
614 				GRAB_THREAD_LOCK();
615 
616 				thread = thread_get_thread_struct_locked(id);
617 				if (thread != NULL) {
618 					// we don't stop because of an error sending the signal; we
619 					// rather want to send as much signals as possible
620 					status = deliver_signal(thread, signal, flags, reschedule);
621 				}
622 
623 				RELEASE_THREAD_LOCK();
624 			}
625 		}
626 
627 		if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
628 			RELEASE_TEAM_LOCK();
629 
630 		GRAB_THREAD_LOCK();
631 	}
632 
633 	if ((flags & (B_DO_NOT_RESCHEDULE | SIGNAL_FLAG_TEAMS_LOCKED)) == 0
634 		&& reschedule)
635 		scheduler_reschedule();
636 
637 	RELEASE_THREAD_LOCK();
638 
639 	if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
640 		restore_interrupts(state);
641 
642 	return status;
643 }
644 
645 
646 int
647 send_signal(pid_t threadID, uint signal)
648 {
649 	// The BeBook states that this function wouldn't be exported
650 	// for drivers, but, of course, it's wrong.
651 	return send_signal_etc(threadID, signal, 0);
652 }
653 
654 
655 int
656 has_signals_pending(void *_thread)
657 {
658 	struct thread *thread = (struct thread *)_thread;
659 	if (thread == NULL)
660 		thread = thread_get_current_thread();
661 
662 	return atomic_get(&thread->sig_pending)
663 		& ~atomic_get(&thread->sig_block_mask);
664 }
665 
666 
667 int
668 sigprocmask(int how, const sigset_t *set, sigset_t *oldSet)
669 {
670 	struct thread *thread = thread_get_current_thread();
671 	sigset_t oldMask = atomic_get(&thread->sig_block_mask);
672 
673 	if (set != NULL) {
674 		T(SigProcMask(how, *set));
675 
676 		switch (how) {
677 			case SIG_BLOCK:
678 				atomic_or(&thread->sig_block_mask, *set & BLOCKABLE_SIGNALS);
679 				break;
680 			case SIG_UNBLOCK:
681 				atomic_and(&thread->sig_block_mask, ~*set);
682 				break;
683 			case SIG_SETMASK:
684 				atomic_set(&thread->sig_block_mask, *set & BLOCKABLE_SIGNALS);
685 				break;
686 			default:
687 				return B_BAD_VALUE;
688 		}
689 
690 		update_current_thread_signals_flag();
691 	}
692 
693 	if (oldSet != NULL)
694 		*oldSet = oldMask;
695 
696 	return B_OK;
697 }
698 
699 
700 /*!	\brief sigaction() for the specified thread.
701 	A \a threadID is < 0 specifies the current thread.
702 */
703 int
704 sigaction_etc(thread_id threadID, int signal, const struct sigaction *act,
705 	struct sigaction *oldAction)
706 {
707 	struct thread *thread;
708 	cpu_status state;
709 	status_t error = B_OK;
710 
711 	if (signal < 1 || signal > MAX_SIGNO
712 		|| (SIGNAL_TO_MASK(signal) & ~BLOCKABLE_SIGNALS) != 0)
713 		return B_BAD_VALUE;
714 
715 	state = disable_interrupts();
716 	GRAB_THREAD_LOCK();
717 
718 	thread = (threadID < 0
719 		? thread_get_current_thread()
720 		: thread_get_thread_struct_locked(threadID));
721 
722 	if (thread) {
723 		if (oldAction) {
724 			// save previous sigaction structure
725 			memcpy(oldAction, &thread->sig_action[signal - 1],
726 				sizeof(struct sigaction));
727 		}
728 
729 		if (act) {
730 			T(SigAction(thread, signal, act));
731 
732 			// set new sigaction structure
733 			memcpy(&thread->sig_action[signal - 1], act,
734 				sizeof(struct sigaction));
735 			thread->sig_action[signal - 1].sa_mask &= BLOCKABLE_SIGNALS;
736 		}
737 
738 		if (act && act->sa_handler == SIG_IGN) {
739 			// remove pending signal if it should now be ignored
740 			atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal));
741 		} else if (act && act->sa_handler == SIG_DFL
742 			&& (SIGNAL_TO_MASK(signal) & DEFAULT_IGNORE_SIGNALS) != 0) {
743 			// remove pending signal for those signals whose default
744 			// action is to ignore them
745 			atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal));
746 		}
747 	} else
748 		error = B_BAD_THREAD_ID;
749 
750 	RELEASE_THREAD_LOCK();
751 	restore_interrupts(state);
752 
753 	return error;
754 }
755 
756 
757 int
758 sigaction(int signal, const struct sigaction *act, struct sigaction *oldAction)
759 {
760 	return sigaction_etc(-1, signal, act, oldAction);
761 }
762 
763 
764 /*! Triggers a SIGALRM to the thread that issued the timer and reschedules */
765 static int32
766 alarm_event(timer *t)
767 {
768 	// The hook can be called from any context, but we have to
769 	// deliver the signal to the thread that originally called
770 	// set_alarm().
771 	// Since thread->alarm is this timer structure, we can just
772 	// cast it back - ugly but it works for now
773 	struct thread *thread = (struct thread *)((uint8 *)t
774 		- offsetof(struct thread, alarm));
775 		// ToDo: investigate adding one user parameter to the timer structure to fix this hack
776 
777 	TRACE(("alarm_event: thread = %p\n", thread));
778 	send_signal_etc(thread->id, SIGALRM, B_DO_NOT_RESCHEDULE);
779 
780 	return B_INVOKE_SCHEDULER;
781 }
782 
783 
784 /*!	Sets the alarm timer for the current thread. The timer fires at the
785 	specified time in the future, periodically or just once, as determined
786 	by \a mode.
787 	\return the time left until a previous set alarm would have fired.
788 */
789 bigtime_t
790 set_alarm(bigtime_t time, uint32 mode)
791 {
792 	struct thread *thread = thread_get_current_thread();
793 	bigtime_t remainingTime = 0;
794 
795 	ASSERT(B_ONE_SHOT_RELATIVE_ALARM == B_ONE_SHOT_RELATIVE_TIMER);
796 		// just to be sure no one changes the headers some day
797 
798 	TRACE(("set_alarm: thread = %p\n", thread));
799 
800 	if (thread->alarm.period)
801 		remainingTime = (bigtime_t)thread->alarm.schedule_time - system_time();
802 
803 	cancel_timer(&thread->alarm);
804 
805 	if (time != B_INFINITE_TIMEOUT)
806 		add_timer(&thread->alarm, &alarm_event, time, mode);
807 	else {
808 		// this marks the alarm as canceled (for returning the remaining time)
809 		thread->alarm.period = 0;
810 	}
811 
812 	return remainingTime;
813 }
814 
815 
816 /*!	Wait for the specified signals, and return the signal retrieved in
817 	\a _signal.
818 */
819 int
820 sigwait(const sigset_t *set, int *_signal)
821 {
822 	struct thread *thread = thread_get_current_thread();
823 
824 	while (!has_signals_pending(thread)) {
825 		thread_prepare_to_block(thread, B_CAN_INTERRUPT,
826 			THREAD_BLOCK_TYPE_SIGNAL, NULL);
827 		thread_block();
828 	}
829 
830 	int signalsPending = atomic_get(&thread->sig_pending) & *set;
831 
832 	update_current_thread_signals_flag();
833 
834 	if (signalsPending) {
835 		// select the lowest pending signal to return in _signal
836 		for (int signal = 1; signal < NSIG; signal++) {
837 			if ((SIGNAL_TO_MASK(signal) & signalsPending) != 0) {
838 				*_signal = signal;
839 				return B_OK;
840 			}
841 		}
842 	}
843 
844 	return B_INTERRUPTED;
845 }
846 
847 
848 /*!	Replace the current signal block mask and wait for any event to happen.
849 	Before returning, the original signal block mask is reinstantiated.
850 */
851 int
852 sigsuspend(const sigset_t *mask)
853 {
854 	struct thread *thread = thread_get_current_thread();
855 	sigset_t oldMask = atomic_get(&thread->sig_block_mask);
856 
857 	// Set the new block mask and block until interrupted.
858 
859 	atomic_set(&thread->sig_block_mask, *mask & BLOCKABLE_SIGNALS);
860 
861 	while (!has_signals_pending(thread)) {
862 		thread_prepare_to_block(thread, B_CAN_INTERRUPT,
863 			THREAD_BLOCK_TYPE_SIGNAL, NULL);
864 		thread_block();
865 	}
866 
867 	// restore the original block mask
868 	atomic_set(&thread->sig_block_mask, oldMask);
869 
870 	update_current_thread_signals_flag();
871 
872 	// we're not supposed to actually succeed
873 	return B_INTERRUPTED;
874 }
875 
876 
877 int
878 sigpending(sigset_t *set)
879 {
880 	struct thread *thread = thread_get_current_thread();
881 
882 	if (set == NULL)
883 		return B_BAD_VALUE;
884 
885 	*set = atomic_get(&thread->sig_pending);
886 	return B_OK;
887 }
888 
889 
890 //	#pragma mark -
891 
892 
893 bigtime_t
894 _user_set_alarm(bigtime_t time, uint32 mode)
895 {
896 	syscall_64_bit_return_value();
897 
898 	return set_alarm(time, mode);
899 }
900 
901 
902 status_t
903 _user_send_signal(pid_t team, uint signal)
904 {
905 	return send_signal_etc(team, signal, B_CHECK_PERMISSION);
906 }
907 
908 
909 status_t
910 _user_sigprocmask(int how, const sigset_t *userSet, sigset_t *userOldSet)
911 {
912 	sigset_t set, oldSet;
913 	status_t status;
914 
915 	if ((userSet != NULL && user_memcpy(&set, userSet, sizeof(sigset_t)) < B_OK)
916 		|| (userOldSet != NULL && user_memcpy(&oldSet, userOldSet,
917 				sizeof(sigset_t)) < B_OK))
918 		return B_BAD_ADDRESS;
919 
920 	status = sigprocmask(how, userSet ? &set : NULL,
921 		userOldSet ? &oldSet : NULL);
922 
923 	// copy old set if asked for
924 	if (status >= B_OK && userOldSet != NULL
925 		&& user_memcpy(userOldSet, &oldSet, sizeof(sigset_t)) < B_OK)
926 		return B_BAD_ADDRESS;
927 
928 	return status;
929 }
930 
931 
932 status_t
933 _user_sigaction(int signal, const struct sigaction *userAction,
934 	struct sigaction *userOldAction)
935 {
936 	struct sigaction act, oact;
937 	status_t status;
938 
939 	if ((userAction != NULL && user_memcpy(&act, userAction,
940 				sizeof(struct sigaction)) < B_OK)
941 		|| (userOldAction != NULL && user_memcpy(&oact, userOldAction,
942 				sizeof(struct sigaction)) < B_OK))
943 		return B_BAD_ADDRESS;
944 
945 	status = sigaction(signal, userAction ? &act : NULL,
946 		userOldAction ? &oact : NULL);
947 
948 	// only copy the old action if a pointer has been given
949 	if (status >= B_OK && userOldAction != NULL
950 		&& user_memcpy(userOldAction, &oact, sizeof(struct sigaction)) < B_OK)
951 		return B_BAD_ADDRESS;
952 
953 	return status;
954 }
955 
956 
957 status_t
958 _user_sigwait(const sigset_t *userSet, int *_userSignal)
959 {
960 	if (userSet == NULL || _userSignal == NULL)
961 		return B_BAD_VALUE;
962 
963 	sigset_t set;
964 	if (user_memcpy(&set, userSet, sizeof(sigset_t)) < B_OK)
965 		return B_BAD_ADDRESS;
966 
967 	int signal;
968 	status_t status = sigwait(&set, &signal);
969 	if (status < B_OK)
970 		return syscall_restart_handle_post(status);
971 
972 	return user_memcpy(_userSignal, &signal, sizeof(int));
973 }
974 
975 
976 status_t
977 _user_sigsuspend(const sigset_t *userMask)
978 {
979 	sigset_t mask;
980 
981 	if (userMask == NULL)
982 		return B_BAD_VALUE;
983 	if (user_memcpy(&mask, userMask, sizeof(sigset_t)) < B_OK)
984 		return B_BAD_ADDRESS;
985 
986 	return sigsuspend(&mask);
987 }
988 
989 
990 status_t
991 _user_sigpending(sigset_t *userSet)
992 {
993 	sigset_t set;
994 	int status;
995 
996 	if (userSet == NULL)
997 		return B_BAD_VALUE;
998 	if (!IS_USER_ADDRESS(userSet))
999 		return B_BAD_ADDRESS;
1000 
1001 	status = sigpending(&set);
1002 	if (status == B_OK
1003 		&& user_memcpy(userSet, &set, sizeof(sigset_t)) < B_OK)
1004 		return B_BAD_ADDRESS;
1005 
1006 	return status;
1007 }
1008 
1009 
1010 status_t
1011 _user_set_signal_stack(const stack_t *newUserStack, stack_t *oldUserStack)
1012 {
1013 	struct thread *thread = thread_get_current_thread();
1014 	struct stack_t newStack, oldStack;
1015 	bool onStack = false;
1016 
1017 	if ((newUserStack != NULL && user_memcpy(&newStack, newUserStack,
1018 				sizeof(stack_t)) < B_OK)
1019 		|| (oldUserStack != NULL && user_memcpy(&oldStack, oldUserStack,
1020 				sizeof(stack_t)) < B_OK))
1021 		return B_BAD_ADDRESS;
1022 
1023 	if (thread->signal_stack_enabled) {
1024 		// determine wether or not the user thread is currently
1025 		// on the active signal stack
1026 		onStack = arch_on_signal_stack(thread);
1027 	}
1028 
1029 	if (oldUserStack != NULL) {
1030 		oldStack.ss_sp = (void *)thread->signal_stack_base;
1031 		oldStack.ss_size = thread->signal_stack_size;
1032 		oldStack.ss_flags = (thread->signal_stack_enabled ? 0 : SS_DISABLE)
1033 			| (onStack ? SS_ONSTACK : 0);
1034 	}
1035 
1036 	if (newUserStack != NULL) {
1037 		// no flags other than SS_DISABLE are allowed
1038 		if ((newStack.ss_flags & ~SS_DISABLE) != 0)
1039 			return B_BAD_VALUE;
1040 
1041 		if ((newStack.ss_flags & SS_DISABLE) == 0) {
1042 			// check if the size is valid
1043 			if (newStack.ss_size < MINSIGSTKSZ)
1044 				return B_NO_MEMORY;
1045 			if (onStack)
1046 				return B_NOT_ALLOWED;
1047 			if (!IS_USER_ADDRESS(newStack.ss_sp))
1048 				return B_BAD_VALUE;
1049 
1050 			thread->signal_stack_base = (addr_t)newStack.ss_sp;
1051 			thread->signal_stack_size = newStack.ss_size;
1052 			thread->signal_stack_enabled = true;
1053 		} else
1054 			thread->signal_stack_enabled = false;
1055 	}
1056 
1057 	// only copy the old stack info if a pointer has been given
1058 	if (oldUserStack != NULL
1059 		&& user_memcpy(oldUserStack, &oldStack, sizeof(stack_t)) < B_OK)
1060 		return B_BAD_ADDRESS;
1061 
1062 	return B_OK;
1063 }
1064 
1065