xref: /haiku/src/system/kernel/signal.cpp (revision 46b4cc5651dde8b2eff205cf6d57d0449641f0c9)
1 /*
2  * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
3  * Copyright 2002, Angelo Mottola, a.mottola@libero.it.
4  *
5  * Distributed under the terms of the MIT License.
6  */
7 
8 /*! POSIX signals handling routines */
9 
10 #include <ksignal.h>
11 
12 #include <stddef.h>
13 #include <string.h>
14 
15 #include <OS.h>
16 #include <KernelExport.h>
17 
18 #include <debug.h>
19 #include <kernel.h>
20 #include <kscheduler.h>
21 #include <sem.h>
22 #include <syscall_restart.h>
23 #include <team.h>
24 #include <thread.h>
25 #include <tracing.h>
26 #include <user_debugger.h>
27 #include <user_thread.h>
28 #include <util/AutoLock.h>
29 
30 
31 //#define TRACE_SIGNAL
32 #ifdef TRACE_SIGNAL
33 #	define TRACE(x) dprintf x
34 #else
35 #	define TRACE(x) ;
36 #endif
37 
38 
39 #define BLOCKABLE_SIGNALS		(~(KILL_SIGNALS | SIGNAL_TO_MASK(SIGSTOP)))
40 #define STOP_SIGNALS \
41 	(SIGNAL_TO_MASK(SIGSTOP) | SIGNAL_TO_MASK(SIGTSTP) \
42 	| SIGNAL_TO_MASK(SIGTTIN) | SIGNAL_TO_MASK(SIGTTOU))
43 #define DEFAULT_IGNORE_SIGNALS \
44 	(SIGNAL_TO_MASK(SIGCHLD) | SIGNAL_TO_MASK(SIGWINCH) \
45 	| SIGNAL_TO_MASK(SIGCONT))
46 #define NON_DEFERRABLE_SIGNALS	\
47 	(KILL_SIGNALS				\
48 	| SIGNAL_TO_MASK(SIGILL)	\
49 	| SIGNAL_TO_MASK(SIGFPE)	\
50 	| SIGNAL_TO_MASK(SIGSEGV))
51 
52 
53 const char * const sigstr[NSIG] = {
54 	"NONE", "HUP", "INT", "QUIT", "ILL", "CHLD", "ABRT", "PIPE",
55 	"FPE", "KILL", "STOP", "SEGV", "CONT", "TSTP", "ALRM", "TERM",
56 	"TTIN", "TTOU", "USR1", "USR2", "WINCH", "KILLTHR", "TRAP",
57 	"POLL", "PROF", "SYS", "URG", "VTALRM", "XCPU", "XFSZ"
58 };
59 
60 
61 static status_t deliver_signal(struct thread *thread, uint signal,
62 	uint32 flags, bool &reschedule);
63 
64 
65 
66 // #pragma mark - signal tracing
67 
68 
69 #if SIGNAL_TRACING
70 
71 namespace SignalTracing {
72 
73 
74 class HandleSignals : public AbstractTraceEntry {
75 	public:
76 		HandleSignals(uint32 signals)
77 			:
78 			fSignals(signals)
79 		{
80 			Initialized();
81 		}
82 
83 		virtual void AddDump(TraceOutput& out)
84 		{
85 			out.Print("signal handle:  0x%lx", fSignals);
86 		}
87 
88 	private:
89 		uint32		fSignals;
90 };
91 
92 
93 class ExecuteSignalHandler : public AbstractTraceEntry {
94 	public:
95 		ExecuteSignalHandler(int signal, struct sigaction* handler)
96 			:
97 			fSignal(signal),
98 			fHandler((void*)handler->sa_handler)
99 		{
100 			Initialized();
101 		}
102 
103 		virtual void AddDump(TraceOutput& out)
104 		{
105 			out.Print("signal exec handler: signal: %d, handler: %p",
106 				fSignal, fHandler);
107 		}
108 
109 	private:
110 		int		fSignal;
111 		void*	fHandler;
112 };
113 
114 
115 class SendSignal : public AbstractTraceEntry {
116 	public:
117 		SendSignal(pid_t target, uint32 signal, uint32 flags)
118 			:
119 			fTarget(target),
120 			fSignal(signal),
121 			fFlags(flags)
122 		{
123 			Initialized();
124 		}
125 
126 		virtual void AddDump(TraceOutput& out)
127 		{
128 			out.Print("signal send: target: %ld, signal: %lu (%s), "
129 				"flags: 0x%lx", fTarget, fSignal,
130 				(fSignal < NSIG ? sigstr[fSignal] : "invalid"), fFlags);
131 		}
132 
133 	private:
134 		pid_t	fTarget;
135 		uint32	fSignal;
136 		uint32	fFlags;
137 };
138 
139 
140 class SigAction : public AbstractTraceEntry {
141 	public:
142 		SigAction(struct thread* thread, uint32 signal,
143 			const struct sigaction* act)
144 			:
145 			fThread(thread->id),
146 			fSignal(signal),
147 			fAction(*act)
148 		{
149 			Initialized();
150 		}
151 
152 		virtual void AddDump(TraceOutput& out)
153 		{
154 			out.Print("signal action: thread: %ld, signal: %lu (%s), "
155 				"action: {handler: %p, flags: 0x%x, mask: 0x%lx}",
156 				fThread, fSignal,
157 				(fSignal < NSIG ? sigstr[fSignal] : "invalid"),
158 				fAction.sa_handler, fAction.sa_flags, fAction.sa_mask);
159 		}
160 
161 	private:
162 		thread_id			fThread;
163 		uint32				fSignal;
164 		struct sigaction	fAction;
165 };
166 
167 
168 class SigProcMask : public AbstractTraceEntry {
169 	public:
170 		SigProcMask(int how, sigset_t mask)
171 			:
172 			fHow(how),
173 			fMask(mask),
174 			fOldMask(thread_get_current_thread()->sig_block_mask)
175 		{
176 			Initialized();
177 		}
178 
179 		virtual void AddDump(TraceOutput& out)
180 		{
181 			const char* how = "invalid";
182 			switch (fHow) {
183 				case SIG_BLOCK:
184 					how = "block";
185 					break;
186 				case SIG_UNBLOCK:
187 					how = "unblock";
188 					break;
189 				case SIG_SETMASK:
190 					how = "set";
191 					break;
192 			}
193 
194 			out.Print("signal proc mask: %s 0x%lx, old mask: 0x%lx", how, fMask,
195 				fOldMask);
196 		}
197 
198 	private:
199 		int			fHow;
200 		sigset_t	fMask;
201 		sigset_t	fOldMask;
202 };
203 
204 }	// namespace SignalTracing
205 
206 #	define T(x)	new(std::nothrow) SignalTracing::x
207 
208 #else
209 #	define T(x)
210 #endif	// SIGNAL_TRACING
211 
212 
213 // #pragma mark -
214 
215 
216 /*!	Updates the thread::flags field according to what signals are pending.
217 	Interrupts must be disabled and the thread lock must be held.
218 */
219 static void
220 update_thread_signals_flag(struct thread* thread)
221 {
222 	if (atomic_get(&thread->sig_pending) & ~atomic_get(&thread->sig_block_mask))
223 		atomic_or(&thread->flags, THREAD_FLAGS_SIGNALS_PENDING);
224 	else
225 		atomic_and(&thread->flags, ~THREAD_FLAGS_SIGNALS_PENDING);
226 }
227 
228 
229 void
230 update_current_thread_signals_flag()
231 {
232 	InterruptsSpinLocker locker(gThreadSpinlock);
233 
234 	update_thread_signals_flag(thread_get_current_thread());
235 }
236 
237 
238 static bool
239 notify_debugger(struct thread *thread, int signal, struct sigaction *handler,
240 	bool deadly)
241 {
242 	uint64 signalMask = SIGNAL_TO_MASK(signal);
243 
244 	// first check the ignore signal masks the debugger specified for the thread
245 
246 	if (atomic_get(&thread->debug_info.ignore_signals_once) & signalMask) {
247 		atomic_and(&thread->debug_info.ignore_signals_once, ~signalMask);
248 		return true;
249 	}
250 
251 	if (atomic_get(&thread->debug_info.ignore_signals) & signalMask)
252 		return true;
253 
254 	// deliver the event
255 	return user_debug_handle_signal(signal, handler, deadly);
256 }
257 
258 
259 /*! Actually handles the signal - ie. the thread will exit, a custom signal
260 	handler is prepared, or whatever the signal demands.
261 */
262 bool
263 handle_signals(struct thread *thread)
264 {
265 	uint32 signalMask = atomic_get(&thread->sig_pending)
266 		& ~atomic_get(&thread->sig_block_mask);
267 
268 	// If SIGKILL[THR] are pending, we ignore other signals.
269 	// Otherwise check, if the thread shall stop for debugging.
270 	if (signalMask & KILL_SIGNALS) {
271 		signalMask &= KILL_SIGNALS;
272 	} else if (thread->debug_info.flags & B_THREAD_DEBUG_STOP) {
273 		user_debug_stop_thread();
274 	}
275 
276 	if (signalMask == 0)
277 		return 0;
278 
279 	if (thread->user_thread->defer_signals > 0
280 		&& (signalMask & NON_DEFERRABLE_SIGNALS) == 0) {
281 		thread->user_thread->pending_signals = signalMask;
282 		return 0;
283 	}
284 
285 	thread->user_thread->pending_signals = 0;
286 
287 	bool restart = (atomic_and(&thread->flags,
288 			~THREAD_FLAGS_DONT_RESTART_SYSCALL)
289 		& THREAD_FLAGS_DONT_RESTART_SYSCALL) == 0;
290 
291 	T(HandleSignals(signalMask));
292 
293 	for (int32 i = 0; i < NSIG; i++) {
294 		bool debugSignal;
295 		int32 signal = i + 1;
296 
297 		if ((signalMask & SIGNAL_TO_MASK(signal)) == 0)
298 			continue;
299 
300 		// clear the signal that we will handle
301 		atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal));
302 
303 		debugSignal = !(~atomic_get(&thread->team->debug_info.flags)
304 				& (B_TEAM_DEBUG_SIGNALS | B_TEAM_DEBUG_DEBUGGER_INSTALLED));
305 
306 		// TODO: since sigaction_etc() could clobber the fields at any time,
307 		//		we should actually copy the relevant fields atomically before
308 		//		accessing them (only the debugger is calling sigaction_etc()
309 		//		right now).
310 		//		Update: sigaction_etc() is only used by the userland debugger
311 		//		support. We can just as well restrict getting/setting signal
312 		//		handlers to work only when the respective thread is stopped.
313 		//		Then sigaction() could be used instead and we could get rid of
314 		//		sigaction_etc().
315 		struct sigaction* handler = &thread->sig_action[i];
316 
317 		TRACE(("Thread 0x%lx received signal %s\n", thread->id, sigstr[signal]));
318 
319 		if (handler->sa_handler == SIG_IGN) {
320 			// signal is to be ignored
321 			// ToDo: apply zombie cleaning on SIGCHLD
322 
323 			// notify the debugger
324 			if (debugSignal)
325 				notify_debugger(thread, signal, handler, false);
326 			continue;
327 		} else if (handler->sa_handler == SIG_DFL) {
328 			// default signal behaviour
329 			switch (signal) {
330 				case SIGCHLD:
331 				case SIGWINCH:
332 				case SIGURG:
333 					// notify the debugger
334 					if (debugSignal)
335 						notify_debugger(thread, signal, handler, false);
336 					continue;
337 
338 				case SIGCONT:
339 					// notify the debugger
340 					if (debugSignal
341 						&& !notify_debugger(thread, signal, handler, false))
342 						continue;
343 
344 					// notify threads waiting for team state changes
345 					if (thread == thread->team->main_thread) {
346 						InterruptsSpinLocker locker(gTeamSpinlock);
347 						team_set_job_control_state(thread->team,
348 							JOB_CONTROL_STATE_CONTINUED, signal, false);
349 
350 						// The standard states that the system *may* send a
351 						// SIGCHLD when a child is continued. I haven't found
352 						// a good reason why we would want to, though.
353 					}
354 					continue;
355 
356 				case SIGSTOP:
357 				case SIGTSTP:
358 				case SIGTTIN:
359 				case SIGTTOU:
360 					// notify the debugger
361 					if (debugSignal
362 						&& !notify_debugger(thread, signal, handler, false))
363 						continue;
364 
365 					thread->next_state = B_THREAD_SUSPENDED;
366 
367 					// notify threads waiting for team state changes
368 					if (thread == thread->team->main_thread) {
369 						InterruptsSpinLocker locker(gTeamSpinlock);
370 						team_set_job_control_state(thread->team,
371 							JOB_CONTROL_STATE_STOPPED, signal, false);
372 
373 						// send a SIGCHLD to the parent (if it does have
374 						// SA_NOCLDSTOP defined)
375 						SpinLocker _(gThreadSpinlock);
376 						struct thread* parentThread
377 							= thread->team->parent->main_thread;
378 						struct sigaction& parentHandler
379 							= parentThread->sig_action[SIGCHLD - 1];
380 						// TODO: do we need to worry about rescheduling here?
381 						bool unused = false;
382 						if ((parentHandler.sa_flags & SA_NOCLDSTOP) == 0) {
383 							deliver_signal(parentThread, SIGCHLD, 0,
384 								unused);
385 						}
386 					}
387 
388 					return true;
389 
390 				case SIGSEGV:
391 				case SIGFPE:
392 				case SIGILL:
393 				case SIGTRAP:
394 					// If this is the main thread, we just fall through and let
395 					// this signal kill the team. Otherwise we send a SIGKILL to
396 					// the main thread first, since the signal will kill this
397 					// thread only.
398 					if (thread != thread->team->main_thread)
399 						send_signal(thread->team->main_thread->id, SIGKILL);
400 				case SIGQUIT:
401 				case SIGABRT:
402 				case SIGPOLL:
403 				case SIGPROF:
404 				case SIGSYS:
405 				case SIGVTALRM:
406 				case SIGXCPU:
407 				case SIGXFSZ:
408 					TRACE(("Shutting down thread 0x%lx due to signal #%ld\n",
409 						thread->id, signal));
410 				case SIGKILL:
411 				case SIGKILLTHR:
412 				default:
413 					// if the thread exited normally, the exit reason is already set
414 					if (thread->exit.reason != THREAD_RETURN_EXIT) {
415 						thread->exit.reason = THREAD_RETURN_INTERRUPTED;
416 						thread->exit.signal = (uint16)signal;
417 					}
418 
419 					// notify the debugger
420 					if (debugSignal && signal != SIGKILL && signal != SIGKILLTHR
421 						&& !notify_debugger(thread, signal, handler, true))
422 						continue;
423 
424 					thread_exit();
425 						// won't return
426 			}
427 		}
428 
429 		// User defined signal handler
430 
431 		// notify the debugger
432 		if (debugSignal && !notify_debugger(thread, signal, handler, false))
433 			continue;
434 
435 		if (!restart || (handler->sa_flags & SA_RESTART) == 0)
436 			atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
437 
438 		T(ExecuteSignalHandler(signal, handler));
439 
440 		TRACE(("### Setting up custom signal handler frame...\n"));
441 		arch_setup_signal_frame(thread, handler, signal,
442 			atomic_get(&thread->sig_block_mask));
443 
444 		if (handler->sa_flags & SA_ONESHOT)
445 			handler->sa_handler = SIG_DFL;
446 		if ((handler->sa_flags & SA_NOMASK) == 0) {
447 			// Update the block mask while the signal handler is running - it
448 			// will be automatically restored when the signal frame is left.
449 			atomic_or(&thread->sig_block_mask,
450 				(handler->sa_mask | SIGNAL_TO_MASK(signal)) & BLOCKABLE_SIGNALS);
451 		}
452 
453 		update_current_thread_signals_flag();
454 
455 		return false;
456 	}
457 
458 	// clear syscall restart thread flag, if we're not supposed to restart the
459 	// syscall
460 	if (!restart)
461 		atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
462 
463 	update_current_thread_signals_flag();
464 
465 	return false;
466 }
467 
468 
469 bool
470 is_kill_signal_pending(void)
471 {
472 	return (atomic_get(&thread_get_current_thread()->sig_pending)
473 		& KILL_SIGNALS) != 0;
474 }
475 
476 
477 bool
478 is_signal_blocked(int signal)
479 {
480 	return (atomic_get(&thread_get_current_thread()->sig_block_mask)
481 		& SIGNAL_TO_MASK(signal)) != 0;
482 }
483 
484 
485 /*!	Delivers the \a signal to the \a thread, but doesn't handle the signal -
486 	it just makes sure the thread gets the signal, ie. unblocks it if needed.
487 	This function must be called with interrupts disabled and the
488 	thread lock held.
489 */
490 static status_t
491 deliver_signal(struct thread *thread, uint signal, uint32 flags,
492 	bool &reschedule)
493 {
494 	if (flags & B_CHECK_PERMISSION) {
495 		// ToDo: introduce euid & uid fields to the team and check permission
496 	}
497 
498 	if (signal == 0)
499 		return B_OK;
500 
501 	if (thread->team == team_get_kernel_team()) {
502 		// Signals to kernel threads will only wake them up
503 		if (thread->state == B_THREAD_SUSPENDED)
504 			reschedule |= scheduler_enqueue_in_run_queue(thread);
505 		return B_OK;
506 	}
507 
508 	atomic_or(&thread->sig_pending, SIGNAL_TO_MASK(signal));
509 
510 	switch (signal) {
511 		case SIGKILL:
512 		{
513 			// Forward KILLTHR to the main thread of the team
514 			struct thread *mainThread = thread->team->main_thread;
515 			atomic_or(&mainThread->sig_pending, SIGNAL_TO_MASK(SIGKILLTHR));
516 
517 			// Wake up main thread
518 			if (mainThread->state == B_THREAD_SUSPENDED)
519 				reschedule |= scheduler_enqueue_in_run_queue(mainThread);
520 			else
521 				thread_interrupt(mainThread, true);
522 
523 			update_thread_signals_flag(mainThread);
524 
525 			// Supposed to fall through
526 		}
527 		case SIGKILLTHR:
528 			// Wake up suspended threads and interrupt waiting ones
529 			if (thread->state == B_THREAD_SUSPENDED)
530 				reschedule |= scheduler_enqueue_in_run_queue(thread);
531 			else
532 				thread_interrupt(thread, true);
533 			break;
534 
535 		case SIGCONT:
536 			// Wake up thread if it was suspended
537 			if (thread->state == B_THREAD_SUSPENDED)
538 				reschedule |= scheduler_enqueue_in_run_queue(thread);
539 
540 			if ((flags & SIGNAL_FLAG_DONT_RESTART_SYSCALL) != 0)
541 				atomic_or(&thread->flags, THREAD_FLAGS_DONT_RESTART_SYSCALL);
542 
543 			atomic_and(&thread->sig_pending, ~STOP_SIGNALS);
544 				// remove any pending stop signals
545 			break;
546 
547 		default:
548 			if (thread->sig_pending
549 				& (~thread->sig_block_mask | SIGNAL_TO_MASK(SIGCHLD))) {
550 				// Interrupt thread if it was waiting
551 				thread_interrupt(thread, false);
552 			}
553 			break;
554 	}
555 
556 	update_thread_signals_flag(thread);
557 
558 	return B_OK;
559 }
560 
561 
562 int
563 send_signal_etc(pid_t id, uint signal, uint32 flags)
564 {
565 	status_t status = B_BAD_THREAD_ID;
566 	struct thread *thread;
567 	cpu_status state = 0;
568 	bool reschedule = false;
569 
570 	if (signal < 0 || signal > MAX_SIGNO)
571 		return B_BAD_VALUE;
572 
573 	T(SendSignal(id, signal, flags));
574 
575 	if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
576 		state = disable_interrupts();
577 
578 	if (id > 0) {
579 		// send a signal to the specified thread
580 
581 		GRAB_THREAD_LOCK();
582 
583 		thread = thread_get_thread_struct_locked(id);
584 		if (thread != NULL)
585 			status = deliver_signal(thread, signal, flags, reschedule);
586 	} else {
587 		// send a signal to the specified process group
588 		// (the absolute value of the id)
589 
590 		struct process_group *group;
591 
592 		// TODO: handle -1 correctly
593 		if (id == 0 || id == -1) {
594 			// send a signal to the current team
595 			id = thread_get_current_thread()->team->id;
596 		} else
597 			id = -id;
598 
599 		if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
600 			GRAB_TEAM_LOCK();
601 
602 		group = team_get_process_group_locked(NULL, id);
603 		if (group != NULL) {
604 			struct team *team, *next;
605 
606 			// Send a signal to all teams in this process group
607 
608 			for (team = group->teams; team != NULL; team = next) {
609 				next = team->group_next;
610 				id = team->id;
611 
612 				GRAB_THREAD_LOCK();
613 
614 				thread = thread_get_thread_struct_locked(id);
615 				if (thread != NULL) {
616 					// we don't stop because of an error sending the signal; we
617 					// rather want to send as much signals as possible
618 					status = deliver_signal(thread, signal, flags, reschedule);
619 				}
620 
621 				RELEASE_THREAD_LOCK();
622 			}
623 		}
624 
625 		if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
626 			RELEASE_TEAM_LOCK();
627 
628 		GRAB_THREAD_LOCK();
629 	}
630 
631 	if ((flags & (B_DO_NOT_RESCHEDULE | SIGNAL_FLAG_TEAMS_LOCKED)) == 0
632 		&& reschedule)
633 		scheduler_reschedule();
634 
635 	RELEASE_THREAD_LOCK();
636 
637 	if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
638 		restore_interrupts(state);
639 
640 	return status;
641 }
642 
643 
644 int
645 send_signal(pid_t threadID, uint signal)
646 {
647 	// The BeBook states that this function wouldn't be exported
648 	// for drivers, but, of course, it's wrong.
649 	return send_signal_etc(threadID, signal, 0);
650 }
651 
652 
653 int
654 has_signals_pending(void *_thread)
655 {
656 	struct thread *thread = (struct thread *)_thread;
657 	if (thread == NULL)
658 		thread = thread_get_current_thread();
659 
660 	return atomic_get(&thread->sig_pending)
661 		& ~atomic_get(&thread->sig_block_mask);
662 }
663 
664 
665 int
666 sigprocmask(int how, const sigset_t *set, sigset_t *oldSet)
667 {
668 	struct thread *thread = thread_get_current_thread();
669 	sigset_t oldMask = atomic_get(&thread->sig_block_mask);
670 
671 	if (set != NULL) {
672 		T(SigProcMask(how, *set));
673 
674 		switch (how) {
675 			case SIG_BLOCK:
676 				atomic_or(&thread->sig_block_mask, *set & BLOCKABLE_SIGNALS);
677 				break;
678 			case SIG_UNBLOCK:
679 				atomic_and(&thread->sig_block_mask, ~*set);
680 				break;
681 			case SIG_SETMASK:
682 				atomic_set(&thread->sig_block_mask, *set & BLOCKABLE_SIGNALS);
683 				break;
684 			default:
685 				return B_BAD_VALUE;
686 		}
687 
688 		update_current_thread_signals_flag();
689 	}
690 
691 	if (oldSet != NULL)
692 		*oldSet = oldMask;
693 
694 	return B_OK;
695 }
696 
697 
698 /*!	\brief sigaction() for the specified thread.
699 	A \a threadID is < 0 specifies the current thread.
700 */
701 int
702 sigaction_etc(thread_id threadID, int signal, const struct sigaction *act,
703 	struct sigaction *oldAction)
704 {
705 	struct thread *thread;
706 	cpu_status state;
707 	status_t error = B_OK;
708 
709 	if (signal < 1 || signal > MAX_SIGNO
710 		|| (SIGNAL_TO_MASK(signal) & ~BLOCKABLE_SIGNALS) != 0)
711 		return B_BAD_VALUE;
712 
713 	state = disable_interrupts();
714 	GRAB_THREAD_LOCK();
715 
716 	thread = (threadID < 0
717 		? thread_get_current_thread()
718 		: thread_get_thread_struct_locked(threadID));
719 
720 	if (thread) {
721 		if (oldAction) {
722 			// save previous sigaction structure
723 			memcpy(oldAction, &thread->sig_action[signal - 1],
724 				sizeof(struct sigaction));
725 		}
726 
727 		if (act) {
728 			T(SigAction(thread, signal, act));
729 
730 			// set new sigaction structure
731 			memcpy(&thread->sig_action[signal - 1], act,
732 				sizeof(struct sigaction));
733 			thread->sig_action[signal - 1].sa_mask &= BLOCKABLE_SIGNALS;
734 		}
735 
736 		if (act && act->sa_handler == SIG_IGN) {
737 			// remove pending signal if it should now be ignored
738 			atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal));
739 		} else if (act && act->sa_handler == SIG_DFL
740 			&& (SIGNAL_TO_MASK(signal) & DEFAULT_IGNORE_SIGNALS) != 0) {
741 			// remove pending signal for those signals whose default
742 			// action is to ignore them
743 			atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal));
744 		}
745 	} else
746 		error = B_BAD_THREAD_ID;
747 
748 	RELEASE_THREAD_LOCK();
749 	restore_interrupts(state);
750 
751 	return error;
752 }
753 
754 
755 int
756 sigaction(int signal, const struct sigaction *act, struct sigaction *oldAction)
757 {
758 	return sigaction_etc(-1, signal, act, oldAction);
759 }
760 
761 
762 /*! Triggers a SIGALRM to the thread that issued the timer and reschedules */
763 static int32
764 alarm_event(timer *t)
765 {
766 	// The hook can be called from any context, but we have to
767 	// deliver the signal to the thread that originally called
768 	// set_alarm().
769 	// Since thread->alarm is this timer structure, we can just
770 	// cast it back - ugly but it works for now
771 	struct thread *thread = (struct thread *)((uint8 *)t
772 		- offsetof(struct thread, alarm));
773 		// ToDo: investigate adding one user parameter to the timer structure to fix this hack
774 
775 	TRACE(("alarm_event: thread = %p\n", thread));
776 	send_signal_etc(thread->id, SIGALRM, B_DO_NOT_RESCHEDULE);
777 
778 	return B_INVOKE_SCHEDULER;
779 }
780 
781 
782 /*!	Sets the alarm timer for the current thread. The timer fires at the
783 	specified time in the future, periodically or just once, as determined
784 	by \a mode.
785 	\return the time left until a previous set alarm would have fired.
786 */
787 bigtime_t
788 set_alarm(bigtime_t time, uint32 mode)
789 {
790 	struct thread *thread = thread_get_current_thread();
791 	bigtime_t remainingTime = 0;
792 
793 	ASSERT(B_ONE_SHOT_RELATIVE_ALARM == B_ONE_SHOT_RELATIVE_TIMER);
794 		// just to be sure no one changes the headers some day
795 
796 	TRACE(("set_alarm: thread = %p\n", thread));
797 
798 	if (thread->alarm.period)
799 		remainingTime = (bigtime_t)thread->alarm.schedule_time - system_time();
800 
801 	cancel_timer(&thread->alarm);
802 
803 	if (time != B_INFINITE_TIMEOUT)
804 		add_timer(&thread->alarm, &alarm_event, time, mode);
805 	else {
806 		// this marks the alarm as canceled (for returning the remaining time)
807 		thread->alarm.period = 0;
808 	}
809 
810 	return remainingTime;
811 }
812 
813 
814 /*!	Wait for the specified signals, and return the signal retrieved in
815 	\a _signal.
816 */
817 int
818 sigwait(const sigset_t *set, int *_signal)
819 {
820 	struct thread *thread = thread_get_current_thread();
821 
822 	while (!has_signals_pending(thread)) {
823 		thread_prepare_to_block(thread, B_CAN_INTERRUPT,
824 			THREAD_BLOCK_TYPE_SIGNAL, NULL);
825 		thread_block();
826 	}
827 
828 	int signalsPending = atomic_get(&thread->sig_pending) & *set;
829 
830 	update_current_thread_signals_flag();
831 
832 	if (signalsPending) {
833 		// select the lowest pending signal to return in _signal
834 		for (int signal = 1; signal < NSIG; signal++) {
835 			if ((SIGNAL_TO_MASK(signal) & signalsPending) != 0) {
836 				*_signal = signal;
837 				return B_OK;
838 			}
839 		}
840 	}
841 
842 	return B_INTERRUPTED;
843 }
844 
845 
846 /*!	Replace the current signal block mask and wait for any event to happen.
847 	Before returning, the original signal block mask is reinstantiated.
848 */
849 int
850 sigsuspend(const sigset_t *mask)
851 {
852 	struct thread *thread = thread_get_current_thread();
853 	sigset_t oldMask = atomic_get(&thread->sig_block_mask);
854 
855 	// Set the new block mask and block until interrupted.
856 
857 	atomic_set(&thread->sig_block_mask, *mask & BLOCKABLE_SIGNALS);
858 
859 	while (!has_signals_pending(thread)) {
860 		thread_prepare_to_block(thread, B_CAN_INTERRUPT,
861 			THREAD_BLOCK_TYPE_SIGNAL, NULL);
862 		thread_block();
863 	}
864 
865 	// restore the original block mask
866 	atomic_set(&thread->sig_block_mask, oldMask);
867 
868 	update_current_thread_signals_flag();
869 
870 	// we're not supposed to actually succeed
871 	return B_INTERRUPTED;
872 }
873 
874 
875 int
876 sigpending(sigset_t *set)
877 {
878 	struct thread *thread = thread_get_current_thread();
879 
880 	if (set == NULL)
881 		return B_BAD_VALUE;
882 
883 	*set = atomic_get(&thread->sig_pending);
884 	return B_OK;
885 }
886 
887 
888 //	#pragma mark -
889 
890 
891 bigtime_t
892 _user_set_alarm(bigtime_t time, uint32 mode)
893 {
894 	syscall_64_bit_return_value();
895 
896 	return set_alarm(time, mode);
897 }
898 
899 
900 status_t
901 _user_send_signal(pid_t team, uint signal)
902 {
903 	return send_signal_etc(team, signal, B_CHECK_PERMISSION);
904 }
905 
906 
907 status_t
908 _user_sigprocmask(int how, const sigset_t *userSet, sigset_t *userOldSet)
909 {
910 	sigset_t set, oldSet;
911 	status_t status;
912 
913 	if ((userSet != NULL && user_memcpy(&set, userSet, sizeof(sigset_t)) < B_OK)
914 		|| (userOldSet != NULL && user_memcpy(&oldSet, userOldSet,
915 				sizeof(sigset_t)) < B_OK))
916 		return B_BAD_ADDRESS;
917 
918 	status = sigprocmask(how, userSet ? &set : NULL,
919 		userOldSet ? &oldSet : NULL);
920 
921 	// copy old set if asked for
922 	if (status >= B_OK && userOldSet != NULL
923 		&& user_memcpy(userOldSet, &oldSet, sizeof(sigset_t)) < B_OK)
924 		return B_BAD_ADDRESS;
925 
926 	return status;
927 }
928 
929 
930 status_t
931 _user_sigaction(int signal, const struct sigaction *userAction,
932 	struct sigaction *userOldAction)
933 {
934 	struct sigaction act, oact;
935 	status_t status;
936 
937 	if ((userAction != NULL && user_memcpy(&act, userAction,
938 				sizeof(struct sigaction)) < B_OK)
939 		|| (userOldAction != NULL && user_memcpy(&oact, userOldAction,
940 				sizeof(struct sigaction)) < B_OK))
941 		return B_BAD_ADDRESS;
942 
943 	status = sigaction(signal, userAction ? &act : NULL,
944 		userOldAction ? &oact : NULL);
945 
946 	// only copy the old action if a pointer has been given
947 	if (status >= B_OK && userOldAction != NULL
948 		&& user_memcpy(userOldAction, &oact, sizeof(struct sigaction)) < B_OK)
949 		return B_BAD_ADDRESS;
950 
951 	return status;
952 }
953 
954 
955 status_t
956 _user_sigwait(const sigset_t *userSet, int *_userSignal)
957 {
958 	if (userSet == NULL || _userSignal == NULL)
959 		return B_BAD_VALUE;
960 
961 	sigset_t set;
962 	if (user_memcpy(&set, userSet, sizeof(sigset_t)) < B_OK)
963 		return B_BAD_ADDRESS;
964 
965 	int signal;
966 	status_t status = sigwait(&set, &signal);
967 	if (status < B_OK)
968 		return syscall_restart_handle_post(status);
969 
970 	return user_memcpy(_userSignal, &signal, sizeof(int));
971 }
972 
973 
974 status_t
975 _user_sigsuspend(const sigset_t *userMask)
976 {
977 	sigset_t mask;
978 
979 	if (userMask == NULL)
980 		return B_BAD_VALUE;
981 	if (user_memcpy(&mask, userMask, sizeof(sigset_t)) < B_OK)
982 		return B_BAD_ADDRESS;
983 
984 	return sigsuspend(&mask);
985 }
986 
987 
988 status_t
989 _user_sigpending(sigset_t *userSet)
990 {
991 	sigset_t set;
992 	int status;
993 
994 	if (userSet == NULL)
995 		return B_BAD_VALUE;
996 	if (!IS_USER_ADDRESS(userSet))
997 		return B_BAD_ADDRESS;
998 
999 	status = sigpending(&set);
1000 	if (status == B_OK
1001 		&& user_memcpy(userSet, &set, sizeof(sigset_t)) < B_OK)
1002 		return B_BAD_ADDRESS;
1003 
1004 	return status;
1005 }
1006 
1007 
1008 status_t
1009 _user_set_signal_stack(const stack_t *newUserStack, stack_t *oldUserStack)
1010 {
1011 	struct thread *thread = thread_get_current_thread();
1012 	struct stack_t newStack, oldStack;
1013 	bool onStack = false;
1014 
1015 	if ((newUserStack != NULL && user_memcpy(&newStack, newUserStack,
1016 				sizeof(stack_t)) < B_OK)
1017 		|| (oldUserStack != NULL && user_memcpy(&oldStack, oldUserStack,
1018 				sizeof(stack_t)) < B_OK))
1019 		return B_BAD_ADDRESS;
1020 
1021 	if (thread->signal_stack_enabled) {
1022 		// determine wether or not the user thread is currently
1023 		// on the active signal stack
1024 		onStack = arch_on_signal_stack(thread);
1025 	}
1026 
1027 	if (oldUserStack != NULL) {
1028 		oldStack.ss_sp = (void *)thread->signal_stack_base;
1029 		oldStack.ss_size = thread->signal_stack_size;
1030 		oldStack.ss_flags = (thread->signal_stack_enabled ? 0 : SS_DISABLE)
1031 			| (onStack ? SS_ONSTACK : 0);
1032 	}
1033 
1034 	if (newUserStack != NULL) {
1035 		// no flags other than SS_DISABLE are allowed
1036 		if ((newStack.ss_flags & ~SS_DISABLE) != 0)
1037 			return B_BAD_VALUE;
1038 
1039 		if ((newStack.ss_flags & SS_DISABLE) == 0) {
1040 			// check if the size is valid
1041 			if (newStack.ss_size < MINSIGSTKSZ)
1042 				return B_NO_MEMORY;
1043 			if (onStack)
1044 				return B_NOT_ALLOWED;
1045 			if (!IS_USER_ADDRESS(newStack.ss_sp))
1046 				return B_BAD_VALUE;
1047 
1048 			thread->signal_stack_base = (addr_t)newStack.ss_sp;
1049 			thread->signal_stack_size = newStack.ss_size;
1050 			thread->signal_stack_enabled = true;
1051 		} else
1052 			thread->signal_stack_enabled = false;
1053 	}
1054 
1055 	// only copy the old stack info if a pointer has been given
1056 	if (oldUserStack != NULL
1057 		&& user_memcpy(oldUserStack, &oldStack, sizeof(stack_t)) < B_OK)
1058 		return B_BAD_ADDRESS;
1059 
1060 	return B_OK;
1061 }
1062 
1063