xref: /haiku/src/system/kernel/signal.cpp (revision 8df6a8dbf579280f55b61d725e470dee5d504e83)
1 /*
2  * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
3  * Copyright 2002, Angelo Mottola, a.mottola@libero.it.
4  *
5  * Distributed under the terms of the MIT License.
6  */
7 
8 /*! POSIX signals handling routines */
9 
10 #include <ksignal.h>
11 
12 #include <stddef.h>
13 #include <string.h>
14 
15 #include <OS.h>
16 #include <KernelExport.h>
17 
18 #include <debug.h>
19 #include <kernel.h>
20 #include <kscheduler.h>
21 #include <sem.h>
22 #include <syscall_restart.h>
23 #include <team.h>
24 #include <thread.h>
25 #include <tracing.h>
26 #include <user_debugger.h>
27 #include <user_thread.h>
28 #include <util/AutoLock.h>
29 
30 
31 //#define TRACE_SIGNAL
32 #ifdef TRACE_SIGNAL
33 #	define TRACE(x) dprintf x
34 #else
35 #	define TRACE(x) ;
36 #endif
37 
38 
39 #define BLOCKABLE_SIGNALS		(~(KILL_SIGNALS | SIGNAL_TO_MASK(SIGSTOP)))
40 #define STOP_SIGNALS \
41 	(SIGNAL_TO_MASK(SIGSTOP) | SIGNAL_TO_MASK(SIGTSTP) \
42 	| SIGNAL_TO_MASK(SIGTTIN) | SIGNAL_TO_MASK(SIGTTOU))
43 #define DEFAULT_IGNORE_SIGNALS \
44 	(SIGNAL_TO_MASK(SIGCHLD) | SIGNAL_TO_MASK(SIGWINCH) \
45 	| SIGNAL_TO_MASK(SIGCONT))
46 #define NON_DEFERRABLE_SIGNALS	\
47 	(KILL_SIGNALS				\
48 	| SIGNAL_TO_MASK(SIGILL)	\
49 	| SIGNAL_TO_MASK(SIGFPE)	\
50 	| SIGNAL_TO_MASK(SIGSEGV))
51 
52 
53 const char * const sigstr[NSIG] = {
54 	"NONE", "HUP", "INT", "QUIT", "ILL", "CHLD", "ABRT", "PIPE",
55 	"FPE", "KILL", "STOP", "SEGV", "CONT", "TSTP", "ALRM", "TERM",
56 	"TTIN", "TTOU", "USR1", "USR2", "WINCH", "KILLTHR", "TRAP",
57 	"POLL", "PROF", "SYS", "URG", "VTALRM", "XCPU", "XFSZ"
58 };
59 
60 
61 static status_t deliver_signal(struct thread *thread, uint signal,
62 	uint32 flags);
63 
64 
65 
66 // #pragma mark - signal tracing
67 
68 
69 #if SIGNAL_TRACING
70 
71 namespace SignalTracing {
72 
73 
74 class HandleSignals : public AbstractTraceEntry {
75 	public:
76 		HandleSignals(uint32 signals)
77 			:
78 			fSignals(signals)
79 		{
80 			Initialized();
81 		}
82 
83 		virtual void AddDump(TraceOutput& out)
84 		{
85 			out.Print("signal handle:  0x%lx", fSignals);
86 		}
87 
88 	private:
89 		uint32		fSignals;
90 };
91 
92 
93 class ExecuteSignalHandler : public AbstractTraceEntry {
94 	public:
95 		ExecuteSignalHandler(int signal, struct sigaction* handler)
96 			:
97 			fSignal(signal),
98 			fHandler((void*)handler->sa_handler)
99 		{
100 			Initialized();
101 		}
102 
103 		virtual void AddDump(TraceOutput& out)
104 		{
105 			out.Print("signal exec handler: signal: %d, handler: %p",
106 				fSignal, fHandler);
107 		}
108 
109 	private:
110 		int		fSignal;
111 		void*	fHandler;
112 };
113 
114 
115 class SendSignal : public AbstractTraceEntry {
116 	public:
117 		SendSignal(pid_t target, uint32 signal, uint32 flags)
118 			:
119 			fTarget(target),
120 			fSignal(signal),
121 			fFlags(flags)
122 		{
123 			Initialized();
124 		}
125 
126 		virtual void AddDump(TraceOutput& out)
127 		{
128 			out.Print("signal send: target: %ld, signal: %lu (%s), "
129 				"flags: 0x%lx", fTarget, fSignal,
130 				(fSignal < NSIG ? sigstr[fSignal] : "invalid"), fFlags);
131 		}
132 
133 	private:
134 		pid_t	fTarget;
135 		uint32	fSignal;
136 		uint32	fFlags;
137 };
138 
139 
140 class SigAction : public AbstractTraceEntry {
141 	public:
142 		SigAction(struct thread* thread, uint32 signal,
143 			const struct sigaction* act)
144 			:
145 			fThread(thread->id),
146 			fSignal(signal),
147 			fAction(*act)
148 		{
149 			Initialized();
150 		}
151 
152 		virtual void AddDump(TraceOutput& out)
153 		{
154 			out.Print("signal action: thread: %ld, signal: %lu (%s), "
155 				"action: {handler: %p, flags: 0x%x, mask: 0x%lx}",
156 				fThread, fSignal,
157 				(fSignal < NSIG ? sigstr[fSignal] : "invalid"),
158 				fAction.sa_handler, fAction.sa_flags, fAction.sa_mask);
159 		}
160 
161 	private:
162 		thread_id			fThread;
163 		uint32				fSignal;
164 		struct sigaction	fAction;
165 };
166 
167 
168 class SigProcMask : public AbstractTraceEntry {
169 	public:
170 		SigProcMask(int how, sigset_t mask)
171 			:
172 			fHow(how),
173 			fMask(mask),
174 			fOldMask(thread_get_current_thread()->sig_block_mask)
175 		{
176 			Initialized();
177 		}
178 
179 		virtual void AddDump(TraceOutput& out)
180 		{
181 			const char* how = "invalid";
182 			switch (fHow) {
183 				case SIG_BLOCK:
184 					how = "block";
185 					break;
186 				case SIG_UNBLOCK:
187 					how = "unblock";
188 					break;
189 				case SIG_SETMASK:
190 					how = "set";
191 					break;
192 			}
193 
194 			out.Print("signal proc mask: %s 0x%lx, old mask: 0x%lx", how, fMask,
195 				fOldMask);
196 		}
197 
198 	private:
199 		int			fHow;
200 		sigset_t	fMask;
201 		sigset_t	fOldMask;
202 };
203 
204 }	// namespace SignalTracing
205 
206 #	define T(x)	new(std::nothrow) SignalTracing::x
207 
208 #else
209 #	define T(x)
210 #endif	// SIGNAL_TRACING
211 
212 
213 // #pragma mark -
214 
215 
216 /*!	Updates the thread::flags field according to what signals are pending.
217 	Interrupts must be disabled and the thread lock must be held.
218 */
219 static void
220 update_thread_signals_flag(struct thread* thread)
221 {
222 	if (atomic_get(&thread->sig_pending) & ~atomic_get(&thread->sig_block_mask))
223 		atomic_or(&thread->flags, THREAD_FLAGS_SIGNALS_PENDING);
224 	else
225 		atomic_and(&thread->flags, ~THREAD_FLAGS_SIGNALS_PENDING);
226 }
227 
228 
229 void
230 update_current_thread_signals_flag()
231 {
232 	InterruptsSpinLocker locker(gThreadSpinlock);
233 
234 	update_thread_signals_flag(thread_get_current_thread());
235 }
236 
237 
238 static bool
239 notify_debugger(struct thread *thread, int signal, struct sigaction *handler,
240 	bool deadly)
241 {
242 	uint64 signalMask = SIGNAL_TO_MASK(signal);
243 
244 	// first check the ignore signal masks the debugger specified for the thread
245 
246 	if (atomic_get(&thread->debug_info.ignore_signals_once) & signalMask) {
247 		atomic_and(&thread->debug_info.ignore_signals_once, ~signalMask);
248 		return true;
249 	}
250 
251 	if (atomic_get(&thread->debug_info.ignore_signals) & signalMask)
252 		return true;
253 
254 	// deliver the event
255 	return user_debug_handle_signal(signal, handler, deadly);
256 }
257 
258 
259 /*! Actually handles the signal - ie. the thread will exit, a custom signal
260 	handler is prepared, or whatever the signal demands.
261 */
262 bool
263 handle_signals(struct thread *thread)
264 {
265 	uint32 signalMask = atomic_get(&thread->sig_pending)
266 		& ~atomic_get(&thread->sig_block_mask);
267 
268 	// If SIGKILL[THR] are pending, we ignore other signals.
269 	// Otherwise check, if the thread shall stop for debugging.
270 	if (signalMask & KILL_SIGNALS) {
271 		signalMask &= KILL_SIGNALS;
272 	} else if (thread->debug_info.flags & B_THREAD_DEBUG_STOP) {
273 		user_debug_stop_thread();
274 	}
275 
276 	if (signalMask == 0)
277 		return 0;
278 
279 	if (thread->user_thread->defer_signals > 0
280 		&& (signalMask & NON_DEFERRABLE_SIGNALS) == 0) {
281 		thread->user_thread->pending_signals = signalMask;
282 		return 0;
283 	}
284 
285 	thread->user_thread->pending_signals = 0;
286 
287 	bool restart = (atomic_and(&thread->flags,
288 			~THREAD_FLAGS_DONT_RESTART_SYSCALL)
289 		& THREAD_FLAGS_DONT_RESTART_SYSCALL) == 0;
290 
291 	T(HandleSignals(signalMask));
292 
293 	for (int32 i = 0; i < NSIG; i++) {
294 		bool debugSignal;
295 		int32 signal = i + 1;
296 
297 		if ((signalMask & SIGNAL_TO_MASK(signal)) == 0)
298 			continue;
299 
300 		// clear the signal that we will handle
301 		atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal));
302 
303 		debugSignal = !(~atomic_get(&thread->team->debug_info.flags)
304 				& (B_TEAM_DEBUG_SIGNALS | B_TEAM_DEBUG_DEBUGGER_INSTALLED));
305 
306 		// TODO: since sigaction_etc() could clobber the fields at any time,
307 		//		we should actually copy the relevant fields atomically before
308 		//		accessing them (only the debugger is calling sigaction_etc()
309 		//		right now).
310 		//		Update: sigaction_etc() is only used by the userland debugger
311 		//		support. We can just as well restrict getting/setting signal
312 		//		handlers to work only when the respective thread is stopped.
313 		//		Then sigaction() could be used instead and we could get rid of
314 		//		sigaction_etc().
315 		struct sigaction* handler = &thread->sig_action[i];
316 
317 		TRACE(("Thread 0x%lx received signal %s\n", thread->id, sigstr[signal]));
318 
319 		if (handler->sa_handler == SIG_IGN) {
320 			// signal is to be ignored
321 			// ToDo: apply zombie cleaning on SIGCHLD
322 
323 			// notify the debugger
324 			if (debugSignal)
325 				notify_debugger(thread, signal, handler, false);
326 			continue;
327 		} else if (handler->sa_handler == SIG_DFL) {
328 			// default signal behaviour
329 			switch (signal) {
330 				case SIGCHLD:
331 				case SIGWINCH:
332 				case SIGURG:
333 					// notify the debugger
334 					if (debugSignal)
335 						notify_debugger(thread, signal, handler, false);
336 					continue;
337 
338 				case SIGCONT:
339 					// notify the debugger
340 					if (debugSignal
341 						&& !notify_debugger(thread, signal, handler, false))
342 						continue;
343 
344 					// notify threads waiting for team state changes
345 					if (thread == thread->team->main_thread) {
346 						InterruptsSpinLocker locker(gTeamSpinlock);
347 						team_set_job_control_state(thread->team,
348 							JOB_CONTROL_STATE_CONTINUED, signal, false);
349 
350 						// The standard states that the system *may* send a
351 						// SIGCHLD when a child is continued. I haven't found
352 						// a good reason why we would want to, though.
353 					}
354 					continue;
355 
356 				case SIGSTOP:
357 				case SIGTSTP:
358 				case SIGTTIN:
359 				case SIGTTOU:
360 					// notify the debugger
361 					if (debugSignal
362 						&& !notify_debugger(thread, signal, handler, false))
363 						continue;
364 
365 					thread->next_state = B_THREAD_SUSPENDED;
366 
367 					// notify threads waiting for team state changes
368 					if (thread == thread->team->main_thread) {
369 						InterruptsSpinLocker locker(gTeamSpinlock);
370 						team_set_job_control_state(thread->team,
371 							JOB_CONTROL_STATE_STOPPED, signal, false);
372 
373 						// send a SIGCHLD to the parent (if it does have
374 						// SA_NOCLDSTOP defined)
375 						SpinLocker _(gThreadSpinlock);
376 						struct thread* parentThread
377 							= thread->team->parent->main_thread;
378 						struct sigaction& parentHandler
379 							= parentThread->sig_action[SIGCHLD - 1];
380 						if ((parentHandler.sa_flags & SA_NOCLDSTOP) == 0)
381 							deliver_signal(parentThread, SIGCHLD, 0);
382 					}
383 
384 					return true;
385 
386 				case SIGQUIT:
387 				case SIGILL:
388 				case SIGTRAP:
389 				case SIGABRT:
390 				case SIGFPE:
391 				case SIGSEGV:
392 				case SIGPOLL:
393 				case SIGPROF:
394 				case SIGSYS:
395 				case SIGVTALRM:
396 				case SIGXCPU:
397 				case SIGXFSZ:
398 					TRACE(("Shutting down thread 0x%lx due to signal #%ld\n",
399 						thread->id, signal));
400 				case SIGKILL:
401 				case SIGKILLTHR:
402 				default:
403 					// if the thread exited normally, the exit reason is already set
404 					if (thread->exit.reason != THREAD_RETURN_EXIT) {
405 						thread->exit.reason = THREAD_RETURN_INTERRUPTED;
406 						thread->exit.signal = (uint16)signal;
407 					}
408 
409 					// notify the debugger
410 					if (debugSignal && signal != SIGKILL && signal != SIGKILLTHR
411 						&& !notify_debugger(thread, signal, handler, true))
412 						continue;
413 
414 					thread_exit();
415 						// won't return
416 			}
417 		}
418 
419 		// User defined signal handler
420 
421 		// notify the debugger
422 		if (debugSignal && !notify_debugger(thread, signal, handler, false))
423 			continue;
424 
425 		if (!restart || (handler->sa_flags & SA_RESTART) == 0)
426 			atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
427 
428 		T(ExecuteSignalHandler(signal, handler));
429 
430 		TRACE(("### Setting up custom signal handler frame...\n"));
431 		arch_setup_signal_frame(thread, handler, signal,
432 			atomic_get(&thread->sig_block_mask));
433 
434 		if (handler->sa_flags & SA_ONESHOT)
435 			handler->sa_handler = SIG_DFL;
436 		if ((handler->sa_flags & SA_NOMASK) == 0) {
437 			// Update the block mask while the signal handler is running - it
438 			// will be automatically restored when the signal frame is left.
439 			atomic_or(&thread->sig_block_mask,
440 				(handler->sa_mask | SIGNAL_TO_MASK(signal)) & BLOCKABLE_SIGNALS);
441 		}
442 
443 		update_current_thread_signals_flag();
444 
445 		return false;
446 	}
447 
448 	// clear syscall restart thread flag, if we're not supposed to restart the
449 	// syscall
450 	if (!restart)
451 		atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
452 
453 	update_current_thread_signals_flag();
454 
455 	return false;
456 }
457 
458 
459 bool
460 is_kill_signal_pending(void)
461 {
462 	return (atomic_get(&thread_get_current_thread()->sig_pending)
463 		& KILL_SIGNALS) != 0;
464 }
465 
466 
467 bool
468 is_signal_blocked(int signal)
469 {
470 	return (atomic_get(&thread_get_current_thread()->sig_block_mask)
471 		& SIGNAL_TO_MASK(signal)) != 0;
472 }
473 
474 
475 /*!	Delivers the \a signal to the \a thread, but doesn't handle the signal -
476 	it just makes sure the thread gets the signal, ie. unblocks it if needed.
477 	This function must be called with interrupts disabled and the
478 	thread lock held.
479 */
480 static status_t
481 deliver_signal(struct thread *thread, uint signal, uint32 flags)
482 {
483 	if (flags & B_CHECK_PERMISSION) {
484 		// ToDo: introduce euid & uid fields to the team and check permission
485 	}
486 
487 	if (signal == 0)
488 		return B_OK;
489 
490 	if (thread->team == team_get_kernel_team()) {
491 		// Signals to kernel threads will only wake them up
492 		if (thread->state == B_THREAD_SUSPENDED)
493 			scheduler_enqueue_in_run_queue(thread);
494 		return B_OK;
495 	}
496 
497 	atomic_or(&thread->sig_pending, SIGNAL_TO_MASK(signal));
498 
499 	switch (signal) {
500 		case SIGKILL:
501 		{
502 			struct thread *mainThread = thread->team->main_thread;
503 			// Forward KILLTHR to the main thread of the team
504 
505 			mainThread->sig_pending |= SIGNAL_TO_MASK(SIGKILLTHR);
506 			// Wake up main thread
507 			if (mainThread->state == B_THREAD_SUSPENDED)
508 				scheduler_enqueue_in_run_queue(mainThread);
509 			else
510 				thread_interrupt(mainThread, true);
511 
512 			// Supposed to fall through
513 		}
514 		case SIGKILLTHR:
515 			// Wake up suspended threads and interrupt waiting ones
516 			if (thread->state == B_THREAD_SUSPENDED)
517 				scheduler_enqueue_in_run_queue(thread);
518 			else
519 				thread_interrupt(thread, true);
520 			break;
521 
522 		case SIGCONT:
523 			// Wake up thread if it was suspended
524 			if (thread->state == B_THREAD_SUSPENDED)
525 				scheduler_enqueue_in_run_queue(thread);
526 
527 			if ((flags & SIGNAL_FLAG_DONT_RESTART_SYSCALL) != 0)
528 				atomic_or(&thread->flags, THREAD_FLAGS_DONT_RESTART_SYSCALL);
529 
530 			atomic_and(&thread->sig_pending, ~STOP_SIGNALS);
531 				// remove any pending stop signals
532 			break;
533 
534 		default:
535 			if (thread->sig_pending
536 				& (~thread->sig_block_mask | SIGNAL_TO_MASK(SIGCHLD))) {
537 				// Interrupt thread if it was waiting
538 				thread_interrupt(thread, false);
539 			}
540 			break;
541 	}
542 
543 	update_thread_signals_flag(thread);
544 
545 	return B_OK;
546 }
547 
548 
549 int
550 send_signal_etc(pid_t id, uint signal, uint32 flags)
551 {
552 	status_t status = B_BAD_THREAD_ID;
553 	struct thread *thread;
554 	cpu_status state = 0;
555 
556 	if (signal < 0 || signal > MAX_SIGNO)
557 		return B_BAD_VALUE;
558 
559 	T(SendSignal(id, signal, flags));
560 
561 	if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
562 		state = disable_interrupts();
563 
564 	if (id > 0) {
565 		// send a signal to the specified thread
566 
567 		GRAB_THREAD_LOCK();
568 
569 		thread = thread_get_thread_struct_locked(id);
570 		if (thread != NULL)
571 			status = deliver_signal(thread, signal, flags);
572 	} else {
573 		// send a signal to the specified process group
574 		// (the absolute value of the id)
575 
576 		struct process_group *group;
577 
578 		// TODO: handle -1 correctly
579 		if (id == 0 || id == -1) {
580 			// send a signal to the current team
581 			id = thread_get_current_thread()->team->id;
582 		} else
583 			id = -id;
584 
585 		if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
586 			GRAB_TEAM_LOCK();
587 
588 		group = team_get_process_group_locked(NULL, id);
589 		if (group != NULL) {
590 			struct team *team, *next;
591 
592 			// Send a signal to all teams in this process group
593 
594 			for (team = group->teams; team != NULL; team = next) {
595 				next = team->group_next;
596 				id = team->id;
597 
598 				GRAB_THREAD_LOCK();
599 
600 				thread = thread_get_thread_struct_locked(id);
601 				if (thread != NULL) {
602 					// we don't stop because of an error sending the signal; we
603 					// rather want to send as much signals as possible
604 					status = deliver_signal(thread, signal, flags);
605 				}
606 
607 				RELEASE_THREAD_LOCK();
608 			}
609 		}
610 
611 		if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
612 			RELEASE_TEAM_LOCK();
613 
614 		GRAB_THREAD_LOCK();
615 	}
616 
617 	// ToDo: maybe the scheduler should only be invoked if there is reason to do it?
618 	//	(ie. deliver_signal() moved some threads in the running queue?)
619 	if ((flags & (B_DO_NOT_RESCHEDULE | SIGNAL_FLAG_TEAMS_LOCKED)) == 0)
620 		scheduler_reschedule();
621 
622 	RELEASE_THREAD_LOCK();
623 
624 	if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
625 		restore_interrupts(state);
626 
627 	return status;
628 }
629 
630 
631 int
632 send_signal(pid_t threadID, uint signal)
633 {
634 	// The BeBook states that this function wouldn't be exported
635 	// for drivers, but, of course, it's wrong.
636 	return send_signal_etc(threadID, signal, 0);
637 }
638 
639 
640 int
641 has_signals_pending(void *_thread)
642 {
643 	struct thread *thread = (struct thread *)_thread;
644 	if (thread == NULL)
645 		thread = thread_get_current_thread();
646 
647 	return atomic_get(&thread->sig_pending)
648 		& ~atomic_get(&thread->sig_block_mask);
649 }
650 
651 
652 int
653 sigprocmask(int how, const sigset_t *set, sigset_t *oldSet)
654 {
655 	struct thread *thread = thread_get_current_thread();
656 	sigset_t oldMask = atomic_get(&thread->sig_block_mask);
657 
658 	if (set != NULL) {
659 		T(SigProcMask(how, *set));
660 
661 		switch (how) {
662 			case SIG_BLOCK:
663 				atomic_or(&thread->sig_block_mask, *set & BLOCKABLE_SIGNALS);
664 				break;
665 			case SIG_UNBLOCK:
666 				atomic_and(&thread->sig_block_mask, ~*set);
667 				break;
668 			case SIG_SETMASK:
669 				atomic_set(&thread->sig_block_mask, *set & BLOCKABLE_SIGNALS);
670 				break;
671 			default:
672 				return B_BAD_VALUE;
673 		}
674 
675 		update_current_thread_signals_flag();
676 	}
677 
678 	if (oldSet != NULL)
679 		*oldSet = oldMask;
680 
681 	return B_OK;
682 }
683 
684 
685 /*!	\brief sigaction() for the specified thread.
686 	A \a threadID is < 0 specifies the current thread.
687 */
688 int
689 sigaction_etc(thread_id threadID, int signal, const struct sigaction *act,
690 	struct sigaction *oldAction)
691 {
692 	struct thread *thread;
693 	cpu_status state;
694 	status_t error = B_OK;
695 
696 	if (signal < 1 || signal > MAX_SIGNO
697 		|| (SIGNAL_TO_MASK(signal) & ~BLOCKABLE_SIGNALS) != 0)
698 		return B_BAD_VALUE;
699 
700 	state = disable_interrupts();
701 	GRAB_THREAD_LOCK();
702 
703 	thread = (threadID < 0
704 		? thread_get_current_thread()
705 		: thread_get_thread_struct_locked(threadID));
706 
707 	if (thread) {
708 		if (oldAction) {
709 			// save previous sigaction structure
710 			memcpy(oldAction, &thread->sig_action[signal - 1],
711 				sizeof(struct sigaction));
712 		}
713 
714 		if (act) {
715 			T(SigAction(thread, signal, act));
716 
717 			// set new sigaction structure
718 			memcpy(&thread->sig_action[signal - 1], act,
719 				sizeof(struct sigaction));
720 			thread->sig_action[signal - 1].sa_mask &= BLOCKABLE_SIGNALS;
721 		}
722 
723 		if (act && act->sa_handler == SIG_IGN) {
724 			// remove pending signal if it should now be ignored
725 			atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal));
726 		} else if (act && act->sa_handler == SIG_DFL
727 			&& (SIGNAL_TO_MASK(signal) & DEFAULT_IGNORE_SIGNALS) != 0) {
728 			// remove pending signal for those signals whose default
729 			// action is to ignore them
730 			atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal));
731 		}
732 	} else
733 		error = B_BAD_THREAD_ID;
734 
735 	RELEASE_THREAD_LOCK();
736 	restore_interrupts(state);
737 
738 	return error;
739 }
740 
741 
742 int
743 sigaction(int signal, const struct sigaction *act, struct sigaction *oldAction)
744 {
745 	return sigaction_etc(-1, signal, act, oldAction);
746 }
747 
748 
749 /*! Triggers a SIGALRM to the thread that issued the timer and reschedules */
750 static int32
751 alarm_event(timer *t)
752 {
753 	// The hook can be called from any context, but we have to
754 	// deliver the signal to the thread that originally called
755 	// set_alarm().
756 	// Since thread->alarm is this timer structure, we can just
757 	// cast it back - ugly but it works for now
758 	struct thread *thread = (struct thread *)((uint8 *)t
759 		- offsetof(struct thread, alarm));
760 		// ToDo: investigate adding one user parameter to the timer structure to fix this hack
761 
762 	TRACE(("alarm_event: thread = %p\n", thread));
763 	send_signal_etc(thread->id, SIGALRM, B_DO_NOT_RESCHEDULE);
764 
765 	return B_INVOKE_SCHEDULER;
766 }
767 
768 
769 /*!	Sets the alarm timer for the current thread. The timer fires at the
770 	specified time in the future, periodically or just once, as determined
771 	by \a mode.
772 	\return the time left until a previous set alarm would have fired.
773 */
774 bigtime_t
775 set_alarm(bigtime_t time, uint32 mode)
776 {
777 	struct thread *thread = thread_get_current_thread();
778 	bigtime_t remainingTime = 0;
779 
780 	ASSERT(B_ONE_SHOT_RELATIVE_ALARM == B_ONE_SHOT_RELATIVE_TIMER);
781 		// just to be sure no one changes the headers some day
782 
783 	TRACE(("set_alarm: thread = %p\n", thread));
784 
785 	if (thread->alarm.period)
786 		remainingTime = (bigtime_t)thread->alarm.schedule_time - system_time();
787 
788 	cancel_timer(&thread->alarm);
789 
790 	if (time != B_INFINITE_TIMEOUT)
791 		add_timer(&thread->alarm, &alarm_event, time, mode);
792 	else {
793 		// this marks the alarm as canceled (for returning the remaining time)
794 		thread->alarm.period = 0;
795 	}
796 
797 	return remainingTime;
798 }
799 
800 
801 /*!	Wait for the specified signals, and return the signal retrieved in
802 	\a _signal.
803 */
804 int
805 sigwait(const sigset_t *set, int *_signal)
806 {
807 	struct thread *thread = thread_get_current_thread();
808 
809 	while (!has_signals_pending(thread)) {
810 		thread_prepare_to_block(thread, B_CAN_INTERRUPT,
811 			THREAD_BLOCK_TYPE_SIGNAL, NULL);
812 		thread_block();
813 	}
814 
815 	int signalsPending = atomic_get(&thread->sig_pending) & *set;
816 
817 	update_current_thread_signals_flag();
818 
819 	if (signalsPending) {
820 		// select the lowest pending signal to return in _signal
821 		for (int signal = 1; signal < NSIG; signal++) {
822 			if ((SIGNAL_TO_MASK(signal) & signalsPending) != 0) {
823 				*_signal = signal;
824 				return B_OK;
825 			}
826 		}
827 	}
828 
829 	return B_INTERRUPTED;
830 }
831 
832 
833 /*!	Replace the current signal block mask and wait for any event to happen.
834 	Before returning, the original signal block mask is reinstantiated.
835 */
836 int
837 sigsuspend(const sigset_t *mask)
838 {
839 	struct thread *thread = thread_get_current_thread();
840 	sigset_t oldMask = atomic_get(&thread->sig_block_mask);
841 
842 	// Set the new block mask and block until interrupted.
843 
844 	atomic_set(&thread->sig_block_mask, *mask & BLOCKABLE_SIGNALS);
845 
846 	while (!has_signals_pending(thread)) {
847 		thread_prepare_to_block(thread, B_CAN_INTERRUPT,
848 			THREAD_BLOCK_TYPE_SIGNAL, NULL);
849 		thread_block();
850 	}
851 
852 	// restore the original block mask
853 	atomic_set(&thread->sig_block_mask, oldMask);
854 
855 	update_current_thread_signals_flag();
856 
857 	// we're not supposed to actually succeed
858 	return B_INTERRUPTED;
859 }
860 
861 
862 int
863 sigpending(sigset_t *set)
864 {
865 	struct thread *thread = thread_get_current_thread();
866 
867 	if (set == NULL)
868 		return B_BAD_VALUE;
869 
870 	*set = atomic_get(&thread->sig_pending);
871 	return B_OK;
872 }
873 
874 
875 //	#pragma mark -
876 
877 
878 bigtime_t
879 _user_set_alarm(bigtime_t time, uint32 mode)
880 {
881 	syscall_64_bit_return_value();
882 
883 	return set_alarm(time, mode);
884 }
885 
886 
887 status_t
888 _user_send_signal(pid_t team, uint signal)
889 {
890 	return send_signal_etc(team, signal, B_CHECK_PERMISSION);
891 }
892 
893 
894 status_t
895 _user_sigprocmask(int how, const sigset_t *userSet, sigset_t *userOldSet)
896 {
897 	sigset_t set, oldSet;
898 	status_t status;
899 
900 	if ((userSet != NULL && user_memcpy(&set, userSet, sizeof(sigset_t)) < B_OK)
901 		|| (userOldSet != NULL && user_memcpy(&oldSet, userOldSet,
902 				sizeof(sigset_t)) < B_OK))
903 		return B_BAD_ADDRESS;
904 
905 	status = sigprocmask(how, userSet ? &set : NULL,
906 		userOldSet ? &oldSet : NULL);
907 
908 	// copy old set if asked for
909 	if (status >= B_OK && userOldSet != NULL
910 		&& user_memcpy(userOldSet, &oldSet, sizeof(sigset_t)) < B_OK)
911 		return B_BAD_ADDRESS;
912 
913 	return status;
914 }
915 
916 
917 status_t
918 _user_sigaction(int signal, const struct sigaction *userAction,
919 	struct sigaction *userOldAction)
920 {
921 	struct sigaction act, oact;
922 	status_t status;
923 
924 	if ((userAction != NULL && user_memcpy(&act, userAction,
925 				sizeof(struct sigaction)) < B_OK)
926 		|| (userOldAction != NULL && user_memcpy(&oact, userOldAction,
927 				sizeof(struct sigaction)) < B_OK))
928 		return B_BAD_ADDRESS;
929 
930 	status = sigaction(signal, userAction ? &act : NULL,
931 		userOldAction ? &oact : NULL);
932 
933 	// only copy the old action if a pointer has been given
934 	if (status >= B_OK && userOldAction != NULL
935 		&& user_memcpy(userOldAction, &oact, sizeof(struct sigaction)) < B_OK)
936 		return B_BAD_ADDRESS;
937 
938 	return status;
939 }
940 
941 
942 status_t
943 _user_sigwait(const sigset_t *userSet, int *_userSignal)
944 {
945 	if (userSet == NULL || _userSignal == NULL)
946 		return B_BAD_VALUE;
947 
948 	sigset_t set;
949 	if (user_memcpy(&set, userSet, sizeof(sigset_t)) < B_OK)
950 		return B_BAD_ADDRESS;
951 
952 	int signal;
953 	status_t status = sigwait(&set, &signal);
954 	if (status < B_OK)
955 		return syscall_restart_handle_post(status);
956 
957 	return user_memcpy(_userSignal, &signal, sizeof(int));
958 }
959 
960 
961 status_t
962 _user_sigsuspend(const sigset_t *userMask)
963 {
964 	sigset_t mask;
965 
966 	if (userMask == NULL)
967 		return B_BAD_VALUE;
968 	if (user_memcpy(&mask, userMask, sizeof(sigset_t)) < B_OK)
969 		return B_BAD_ADDRESS;
970 
971 	return sigsuspend(&mask);
972 }
973 
974 
975 status_t
976 _user_sigpending(sigset_t *userSet)
977 {
978 	sigset_t set;
979 	int status;
980 
981 	if (userSet == NULL)
982 		return B_BAD_VALUE;
983 	if (!IS_USER_ADDRESS(userSet))
984 		return B_BAD_ADDRESS;
985 
986 	status = sigpending(&set);
987 	if (status == B_OK
988 		&& user_memcpy(userSet, &set, sizeof(sigset_t)) < B_OK)
989 		return B_BAD_ADDRESS;
990 
991 	return status;
992 }
993 
994 
995 status_t
996 _user_set_signal_stack(const stack_t *newUserStack, stack_t *oldUserStack)
997 {
998 	struct thread *thread = thread_get_current_thread();
999 	struct stack_t newStack, oldStack;
1000 	bool onStack = false;
1001 
1002 	if ((newUserStack != NULL && user_memcpy(&newStack, newUserStack,
1003 				sizeof(stack_t)) < B_OK)
1004 		|| (oldUserStack != NULL && user_memcpy(&oldStack, oldUserStack,
1005 				sizeof(stack_t)) < B_OK))
1006 		return B_BAD_ADDRESS;
1007 
1008 	if (thread->signal_stack_enabled) {
1009 		// determine wether or not the user thread is currently
1010 		// on the active signal stack
1011 		onStack = arch_on_signal_stack(thread);
1012 	}
1013 
1014 	if (oldUserStack != NULL) {
1015 		oldStack.ss_sp = (void *)thread->signal_stack_base;
1016 		oldStack.ss_size = thread->signal_stack_size;
1017 		oldStack.ss_flags = (thread->signal_stack_enabled ? 0 : SS_DISABLE)
1018 			| (onStack ? SS_ONSTACK : 0);
1019 	}
1020 
1021 	if (newUserStack != NULL) {
1022 		// no flags other than SS_DISABLE are allowed
1023 		if ((newStack.ss_flags & ~SS_DISABLE) != 0)
1024 			return B_BAD_VALUE;
1025 
1026 		if ((newStack.ss_flags & SS_DISABLE) == 0) {
1027 			// check if the size is valid
1028 			if (newStack.ss_size < MINSIGSTKSZ)
1029 				return B_NO_MEMORY;
1030 			if (onStack)
1031 				return B_NOT_ALLOWED;
1032 			if (!IS_USER_ADDRESS(newStack.ss_sp))
1033 				return B_BAD_VALUE;
1034 
1035 			thread->signal_stack_base = (addr_t)newStack.ss_sp;
1036 			thread->signal_stack_size = newStack.ss_size;
1037 			thread->signal_stack_enabled = true;
1038 		} else
1039 			thread->signal_stack_enabled = false;
1040 	}
1041 
1042 	// only copy the old stack info if a pointer has been given
1043 	if (oldUserStack != NULL
1044 		&& user_memcpy(oldUserStack, &oldStack, sizeof(stack_t)) < B_OK)
1045 		return B_BAD_ADDRESS;
1046 
1047 	return B_OK;
1048 }
1049 
1050