xref: /haiku/src/system/kernel/signal.cpp (revision 62f5ba006a08b0df30631375878effaf67ae5dbc)
1 /*
2  * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
3  * Copyright 2002, Angelo Mottola, a.mottola@libero.it.
4  *
5  * Distributed under the terms of the MIT License.
6  */
7 
8 
9 /*! POSIX signals handling routines */
10 
11 
12 #include <ksignal.h>
13 
14 #include <stddef.h>
15 #include <string.h>
16 
17 #include <OS.h>
18 #include <KernelExport.h>
19 
20 #include <cpu.h>
21 #include <debug.h>
22 #include <kernel.h>
23 #include <kscheduler.h>
24 #include <sem.h>
25 #include <syscall_restart.h>
26 #include <team.h>
27 #include <thread.h>
28 #include <tracing.h>
29 #include <user_debugger.h>
30 #include <user_thread.h>
31 #include <util/AutoLock.h>
32 
33 
34 //#define TRACE_SIGNAL
35 #ifdef TRACE_SIGNAL
36 #	define TRACE(x) dprintf x
37 #else
38 #	define TRACE(x) ;
39 #endif
40 
41 
42 #define BLOCKABLE_SIGNALS		(~(KILL_SIGNALS | SIGNAL_TO_MASK(SIGSTOP)))
43 #define STOP_SIGNALS \
44 	(SIGNAL_TO_MASK(SIGSTOP) | SIGNAL_TO_MASK(SIGTSTP) \
45 	| SIGNAL_TO_MASK(SIGTTIN) | SIGNAL_TO_MASK(SIGTTOU))
46 #define DEFAULT_IGNORE_SIGNALS \
47 	(SIGNAL_TO_MASK(SIGCHLD) | SIGNAL_TO_MASK(SIGWINCH) \
48 	| SIGNAL_TO_MASK(SIGCONT))
49 #define NON_DEFERRABLE_SIGNALS	\
50 	(KILL_SIGNALS				\
51 	| SIGNAL_TO_MASK(SIGILL)	\
52 	| SIGNAL_TO_MASK(SIGFPE)	\
53 	| SIGNAL_TO_MASK(SIGSEGV))
54 
55 
56 const char * const sigstr[NSIG] = {
57 	"NONE", "HUP", "INT", "QUIT", "ILL", "CHLD", "ABRT", "PIPE",
58 	"FPE", "KILL", "STOP", "SEGV", "CONT", "TSTP", "ALRM", "TERM",
59 	"TTIN", "TTOU", "USR1", "USR2", "WINCH", "KILLTHR", "TRAP",
60 	"POLL", "PROF", "SYS", "URG", "VTALRM", "XCPU", "XFSZ"
61 };
62 
63 
64 static status_t deliver_signal(struct thread *thread, uint signal,
65 	uint32 flags);
66 
67 
68 
69 // #pragma mark - signal tracing
70 
71 
72 #if SIGNAL_TRACING
73 
74 namespace SignalTracing {
75 
76 
77 class HandleSignals : public AbstractTraceEntry {
78 	public:
79 		HandleSignals(uint32 signals)
80 			:
81 			fSignals(signals)
82 		{
83 			Initialized();
84 		}
85 
86 		virtual void AddDump(TraceOutput& out)
87 		{
88 			out.Print("signal handle:  0x%lx", fSignals);
89 		}
90 
91 	private:
92 		uint32		fSignals;
93 };
94 
95 
96 class ExecuteSignalHandler : public AbstractTraceEntry {
97 	public:
98 		ExecuteSignalHandler(int signal, struct sigaction* handler)
99 			:
100 			fSignal(signal),
101 			fHandler((void*)handler->sa_handler)
102 		{
103 			Initialized();
104 		}
105 
106 		virtual void AddDump(TraceOutput& out)
107 		{
108 			out.Print("signal exec handler: signal: %d, handler: %p",
109 				fSignal, fHandler);
110 		}
111 
112 	private:
113 		int		fSignal;
114 		void*	fHandler;
115 };
116 
117 
118 class SendSignal : public AbstractTraceEntry {
119 	public:
120 		SendSignal(pid_t target, uint32 signal, uint32 flags)
121 			:
122 			fTarget(target),
123 			fSignal(signal),
124 			fFlags(flags)
125 		{
126 			Initialized();
127 		}
128 
129 		virtual void AddDump(TraceOutput& out)
130 		{
131 			out.Print("signal send: target: %ld, signal: %lu (%s), "
132 				"flags: 0x%lx", fTarget, fSignal,
133 				(fSignal < NSIG ? sigstr[fSignal] : "invalid"), fFlags);
134 		}
135 
136 	private:
137 		pid_t	fTarget;
138 		uint32	fSignal;
139 		uint32	fFlags;
140 };
141 
142 
143 class SigAction : public AbstractTraceEntry {
144 	public:
145 		SigAction(struct thread* thread, uint32 signal,
146 			const struct sigaction* act)
147 			:
148 			fThread(thread->id),
149 			fSignal(signal),
150 			fAction(*act)
151 		{
152 			Initialized();
153 		}
154 
155 		virtual void AddDump(TraceOutput& out)
156 		{
157 			out.Print("signal action: thread: %ld, signal: %lu (%s), "
158 				"action: {handler: %p, flags: 0x%x, mask: 0x%lx}",
159 				fThread, fSignal,
160 				(fSignal < NSIG ? sigstr[fSignal] : "invalid"),
161 				fAction.sa_handler, fAction.sa_flags, fAction.sa_mask);
162 		}
163 
164 	private:
165 		thread_id			fThread;
166 		uint32				fSignal;
167 		struct sigaction	fAction;
168 };
169 
170 
171 class SigProcMask : public AbstractTraceEntry {
172 	public:
173 		SigProcMask(int how, sigset_t mask)
174 			:
175 			fHow(how),
176 			fMask(mask),
177 			fOldMask(thread_get_current_thread()->sig_block_mask)
178 		{
179 			Initialized();
180 		}
181 
182 		virtual void AddDump(TraceOutput& out)
183 		{
184 			const char* how = "invalid";
185 			switch (fHow) {
186 				case SIG_BLOCK:
187 					how = "block";
188 					break;
189 				case SIG_UNBLOCK:
190 					how = "unblock";
191 					break;
192 				case SIG_SETMASK:
193 					how = "set";
194 					break;
195 			}
196 
197 			out.Print("signal proc mask: %s 0x%lx, old mask: 0x%lx", how, fMask,
198 				fOldMask);
199 		}
200 
201 	private:
202 		int			fHow;
203 		sigset_t	fMask;
204 		sigset_t	fOldMask;
205 };
206 
207 
208 class SigSuspend : public AbstractTraceEntry {
209 	public:
210 		SigSuspend(sigset_t mask)
211 			:
212 			fMask(mask),
213 			fOldMask(thread_get_current_thread()->sig_block_mask)
214 		{
215 			Initialized();
216 		}
217 
218 		virtual void AddDump(TraceOutput& out)
219 		{
220 			out.Print("signal suspend: %#" B_PRIx32 ", old mask: %#" B_PRIx32,
221 				fMask, fOldMask);
222 		}
223 
224 	private:
225 		sigset_t	fMask;
226 		sigset_t	fOldMask;
227 };
228 
229 
230 class SigSuspendDone : public AbstractTraceEntry {
231 	public:
232 		SigSuspendDone()
233 			:
234 			fSignals(thread_get_current_thread()->sig_pending)
235 		{
236 			Initialized();
237 		}
238 
239 		virtual void AddDump(TraceOutput& out)
240 		{
241 			out.Print("signal suspend done: %#" B_PRIx32, fSignals);
242 		}
243 
244 	private:
245 		uint32		fSignals;
246 };
247 
248 }	// namespace SignalTracing
249 
250 #	define T(x)	new(std::nothrow) SignalTracing::x
251 
252 #else
253 #	define T(x)
254 #endif	// SIGNAL_TRACING
255 
256 
257 // #pragma mark -
258 
259 
260 /*!	Updates the thread::flags field according to what signals are pending.
261 	Interrupts must be disabled and the thread lock must be held.
262 */
263 static void
264 update_thread_signals_flag(struct thread* thread)
265 {
266 	sigset_t mask = ~atomic_get(&thread->sig_block_mask)
267 		| thread->sig_temp_enabled;
268 	if (atomic_get(&thread->sig_pending) & mask)
269 		atomic_or(&thread->flags, THREAD_FLAGS_SIGNALS_PENDING);
270 	else
271 		atomic_and(&thread->flags, ~THREAD_FLAGS_SIGNALS_PENDING);
272 }
273 
274 
275 void
276 update_current_thread_signals_flag()
277 {
278 	InterruptsSpinLocker locker(gThreadSpinlock);
279 
280 	update_thread_signals_flag(thread_get_current_thread());
281 }
282 
283 
284 static bool
285 notify_debugger(struct thread *thread, int signal, struct sigaction *handler,
286 	bool deadly)
287 {
288 	uint64 signalMask = SIGNAL_TO_MASK(signal);
289 
290 	// first check the ignore signal masks the debugger specified for the thread
291 
292 	if (atomic_get(&thread->debug_info.ignore_signals_once) & signalMask) {
293 		atomic_and(&thread->debug_info.ignore_signals_once, ~signalMask);
294 		return true;
295 	}
296 
297 	if (atomic_get(&thread->debug_info.ignore_signals) & signalMask)
298 		return true;
299 
300 	// deliver the event
301 	return user_debug_handle_signal(signal, handler, deadly);
302 }
303 
304 
305 /*! Actually handles the signal - ie. the thread will exit, a custom signal
306 	handler is prepared, or whatever the signal demands.
307 */
308 bool
309 handle_signals(struct thread *thread)
310 {
311 	uint32 signalMask = atomic_get(&thread->sig_pending)
312 		& (~atomic_get(&thread->sig_block_mask) | thread->sig_temp_enabled);
313 	thread->sig_temp_enabled = 0;
314 
315 	// If SIGKILL[THR] are pending, we ignore other signals.
316 	// Otherwise check, if the thread shall stop for debugging.
317 	if (signalMask & KILL_SIGNALS) {
318 		signalMask &= KILL_SIGNALS;
319 	} else if (thread->debug_info.flags & B_THREAD_DEBUG_STOP) {
320 		user_debug_stop_thread();
321 	}
322 
323 	if (signalMask == 0)
324 		return 0;
325 
326 	if (thread->user_thread->defer_signals > 0
327 		&& (signalMask & NON_DEFERRABLE_SIGNALS) == 0) {
328 		thread->user_thread->pending_signals = signalMask;
329 		return 0;
330 	}
331 
332 	thread->user_thread->pending_signals = 0;
333 
334 	bool restart = (atomic_and(&thread->flags,
335 			~THREAD_FLAGS_DONT_RESTART_SYSCALL)
336 		& THREAD_FLAGS_DONT_RESTART_SYSCALL) == 0;
337 
338 	T(HandleSignals(signalMask));
339 
340 	for (int32 i = 0; i < NSIG; i++) {
341 		bool debugSignal;
342 		int32 signal = i + 1;
343 
344 		if ((signalMask & SIGNAL_TO_MASK(signal)) == 0)
345 			continue;
346 
347 		// clear the signal that we will handle
348 		atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal));
349 
350 		debugSignal = !(~atomic_get(&thread->team->debug_info.flags)
351 				& (B_TEAM_DEBUG_SIGNALS | B_TEAM_DEBUG_DEBUGGER_INSTALLED));
352 
353 		// TODO: since sigaction_etc() could clobber the fields at any time,
354 		//		we should actually copy the relevant fields atomically before
355 		//		accessing them (only the debugger is calling sigaction_etc()
356 		//		right now).
357 		//		Update: sigaction_etc() is only used by the userland debugger
358 		//		support. We can just as well restrict getting/setting signal
359 		//		handlers to work only when the respective thread is stopped.
360 		//		Then sigaction() could be used instead and we could get rid of
361 		//		sigaction_etc().
362 		struct sigaction* handler = &thread->sig_action[i];
363 
364 		TRACE(("Thread 0x%lx received signal %s\n", thread->id, sigstr[signal]));
365 
366 		if (handler->sa_handler == SIG_IGN) {
367 			// signal is to be ignored
368 			// ToDo: apply zombie cleaning on SIGCHLD
369 
370 			// notify the debugger
371 			if (debugSignal)
372 				notify_debugger(thread, signal, handler, false);
373 			continue;
374 		} else if (handler->sa_handler == SIG_DFL) {
375 			// default signal behaviour
376 			switch (signal) {
377 				case SIGCHLD:
378 				case SIGWINCH:
379 				case SIGURG:
380 					// notify the debugger
381 					if (debugSignal)
382 						notify_debugger(thread, signal, handler, false);
383 					continue;
384 
385 				case SIGCONT:
386 					// notify the debugger
387 					if (debugSignal
388 						&& !notify_debugger(thread, signal, handler, false))
389 						continue;
390 
391 					// notify threads waiting for team state changes
392 					if (thread == thread->team->main_thread) {
393 						InterruptsSpinLocker locker(gTeamSpinlock);
394 						team_set_job_control_state(thread->team,
395 							JOB_CONTROL_STATE_CONTINUED, signal, false);
396 
397 						// The standard states that the system *may* send a
398 						// SIGCHLD when a child is continued. I haven't found
399 						// a good reason why we would want to, though.
400 					}
401 					continue;
402 
403 				case SIGSTOP:
404 				case SIGTSTP:
405 				case SIGTTIN:
406 				case SIGTTOU:
407 					// notify the debugger
408 					if (debugSignal
409 						&& !notify_debugger(thread, signal, handler, false))
410 						continue;
411 
412 					thread->next_state = B_THREAD_SUSPENDED;
413 
414 					// notify threads waiting for team state changes
415 					if (thread == thread->team->main_thread) {
416 						InterruptsSpinLocker locker(gTeamSpinlock);
417 						team_set_job_control_state(thread->team,
418 							JOB_CONTROL_STATE_STOPPED, signal, false);
419 
420 						// send a SIGCHLD to the parent (if it does have
421 						// SA_NOCLDSTOP defined)
422 						SpinLocker _(gThreadSpinlock);
423 						struct thread* parentThread
424 							= thread->team->parent->main_thread;
425 						struct sigaction& parentHandler
426 							= parentThread->sig_action[SIGCHLD - 1];
427 						if ((parentHandler.sa_flags & SA_NOCLDSTOP) == 0)
428 							deliver_signal(parentThread, SIGCHLD, 0);
429 					}
430 
431 					return true;
432 
433 				case SIGSEGV:
434 				case SIGFPE:
435 				case SIGILL:
436 				case SIGTRAP:
437 				case SIGABRT:
438 					// If this is the main thread, we just fall through and let
439 					// this signal kill the team. Otherwise we send a SIGKILL to
440 					// the main thread first, since the signal will kill this
441 					// thread only.
442 					if (thread != thread->team->main_thread)
443 						send_signal(thread->team->main_thread->id, SIGKILL);
444 				case SIGQUIT:
445 				case SIGPOLL:
446 				case SIGPROF:
447 				case SIGSYS:
448 				case SIGVTALRM:
449 				case SIGXCPU:
450 				case SIGXFSZ:
451 					TRACE(("Shutting down thread 0x%lx due to signal #%ld\n",
452 						thread->id, signal));
453 				case SIGKILL:
454 				case SIGKILLTHR:
455 				default:
456 					// if the thread exited normally, the exit reason is already set
457 					if (thread->exit.reason != THREAD_RETURN_EXIT) {
458 						thread->exit.reason = THREAD_RETURN_INTERRUPTED;
459 						thread->exit.signal = (uint16)signal;
460 					}
461 
462 					// notify the debugger
463 					if (debugSignal && signal != SIGKILL && signal != SIGKILLTHR
464 						&& !notify_debugger(thread, signal, handler, true))
465 						continue;
466 
467 					thread_exit();
468 						// won't return
469 			}
470 		}
471 
472 		// User defined signal handler
473 
474 		// notify the debugger
475 		if (debugSignal && !notify_debugger(thread, signal, handler, false))
476 			continue;
477 
478 		if (!restart || (handler->sa_flags & SA_RESTART) == 0)
479 			atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
480 
481 		T(ExecuteSignalHandler(signal, handler));
482 
483 		TRACE(("### Setting up custom signal handler frame...\n"));
484 		arch_setup_signal_frame(thread, handler, signal,
485 			atomic_get(&thread->sig_block_mask));
486 
487 		if (handler->sa_flags & SA_ONESHOT)
488 			handler->sa_handler = SIG_DFL;
489 		if ((handler->sa_flags & SA_NOMASK) == 0) {
490 			// Update the block mask while the signal handler is running - it
491 			// will be automatically restored when the signal frame is left.
492 			atomic_or(&thread->sig_block_mask,
493 				(handler->sa_mask | SIGNAL_TO_MASK(signal)) & BLOCKABLE_SIGNALS);
494 		}
495 
496 		update_current_thread_signals_flag();
497 
498 		return false;
499 	}
500 
501 	// clear syscall restart thread flag, if we're not supposed to restart the
502 	// syscall
503 	if (!restart)
504 		atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
505 
506 	update_current_thread_signals_flag();
507 
508 	return false;
509 }
510 
511 
512 bool
513 is_kill_signal_pending(void)
514 {
515 	return (atomic_get(&thread_get_current_thread()->sig_pending)
516 		& KILL_SIGNALS) != 0;
517 }
518 
519 
520 bool
521 is_signal_blocked(int signal)
522 {
523 	return (atomic_get(&thread_get_current_thread()->sig_block_mask)
524 		& SIGNAL_TO_MASK(signal)) != 0;
525 }
526 
527 
528 /*!	Delivers the \a signal to the \a thread, but doesn't handle the signal -
529 	it just makes sure the thread gets the signal, ie. unblocks it if needed.
530 	This function must be called with interrupts disabled and the
531 	thread lock held.
532 */
533 static status_t
534 deliver_signal(struct thread *thread, uint signal, uint32 flags)
535 {
536 	if (flags & B_CHECK_PERMISSION) {
537 		// ToDo: introduce euid & uid fields to the team and check permission
538 	}
539 
540 	if (signal == 0)
541 		return B_OK;
542 
543 	if (thread->team == team_get_kernel_team()) {
544 		// Signals to kernel threads will only wake them up
545 		if (thread->state == B_THREAD_SUSPENDED)
546 			scheduler_enqueue_in_run_queue(thread);
547 		return B_OK;
548 	}
549 
550 	atomic_or(&thread->sig_pending, SIGNAL_TO_MASK(signal));
551 
552 	switch (signal) {
553 		case SIGKILL:
554 		{
555 			// Forward KILLTHR to the main thread of the team
556 			struct thread *mainThread = thread->team->main_thread;
557 			atomic_or(&mainThread->sig_pending, SIGNAL_TO_MASK(SIGKILLTHR));
558 
559 			// Wake up main thread
560 			if (mainThread->state == B_THREAD_SUSPENDED)
561 				scheduler_enqueue_in_run_queue(mainThread);
562 			else
563 				thread_interrupt(mainThread, true);
564 
565 			update_thread_signals_flag(mainThread);
566 
567 			// Supposed to fall through
568 		}
569 		case SIGKILLTHR:
570 			// Wake up suspended threads and interrupt waiting ones
571 			if (thread->state == B_THREAD_SUSPENDED)
572 				scheduler_enqueue_in_run_queue(thread);
573 			else
574 				thread_interrupt(thread, true);
575 			break;
576 
577 		case SIGCONT:
578 			// Wake up thread if it was suspended
579 			if (thread->state == B_THREAD_SUSPENDED)
580 				scheduler_enqueue_in_run_queue(thread);
581 
582 			if ((flags & SIGNAL_FLAG_DONT_RESTART_SYSCALL) != 0)
583 				atomic_or(&thread->flags, THREAD_FLAGS_DONT_RESTART_SYSCALL);
584 
585 			atomic_and(&thread->sig_pending, ~STOP_SIGNALS);
586 				// remove any pending stop signals
587 			break;
588 
589 		default:
590 			if (thread->sig_pending
591 				& (~thread->sig_block_mask | SIGNAL_TO_MASK(SIGCHLD))) {
592 				// Interrupt thread if it was waiting
593 				thread_interrupt(thread, false);
594 			}
595 			break;
596 	}
597 
598 	update_thread_signals_flag(thread);
599 
600 	return B_OK;
601 }
602 
603 
604 int
605 send_signal_etc(pid_t id, uint signal, uint32 flags)
606 {
607 	status_t status = B_BAD_THREAD_ID;
608 	struct thread *thread;
609 	cpu_status state = 0;
610 
611 	if (signal < 0 || signal > MAX_SIGNO)
612 		return B_BAD_VALUE;
613 
614 	T(SendSignal(id, signal, flags));
615 
616 	if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
617 		state = disable_interrupts();
618 
619 	if (id > 0) {
620 		// send a signal to the specified thread
621 
622 		GRAB_THREAD_LOCK();
623 
624 		thread = thread_get_thread_struct_locked(id);
625 		if (thread != NULL)
626 			status = deliver_signal(thread, signal, flags);
627 	} else {
628 		// send a signal to the specified process group
629 		// (the absolute value of the id)
630 
631 		struct process_group *group;
632 
633 		// TODO: handle -1 correctly
634 		if (id == 0 || id == -1) {
635 			// send a signal to the current team
636 			id = thread_get_current_thread()->team->id;
637 		} else
638 			id = -id;
639 
640 		if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
641 			GRAB_TEAM_LOCK();
642 
643 		group = team_get_process_group_locked(NULL, id);
644 		if (group != NULL) {
645 			struct team *team, *next;
646 
647 			// Send a signal to all teams in this process group
648 
649 			for (team = group->teams; team != NULL; team = next) {
650 				next = team->group_next;
651 				id = team->id;
652 
653 				GRAB_THREAD_LOCK();
654 
655 				thread = thread_get_thread_struct_locked(id);
656 				if (thread != NULL) {
657 					// we don't stop because of an error sending the signal; we
658 					// rather want to send as much signals as possible
659 					status = deliver_signal(thread, signal, flags);
660 				}
661 
662 				RELEASE_THREAD_LOCK();
663 			}
664 		}
665 
666 		if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
667 			RELEASE_TEAM_LOCK();
668 
669 		GRAB_THREAD_LOCK();
670 	}
671 
672 	if ((flags & (B_DO_NOT_RESCHEDULE | SIGNAL_FLAG_TEAMS_LOCKED)) == 0)
673 		scheduler_reschedule_if_necessary_locked();
674 
675 	RELEASE_THREAD_LOCK();
676 
677 	if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
678 		restore_interrupts(state);
679 
680 	return status;
681 }
682 
683 
684 int
685 send_signal(pid_t threadID, uint signal)
686 {
687 	// The BeBook states that this function wouldn't be exported
688 	// for drivers, but, of course, it's wrong.
689 	return send_signal_etc(threadID, signal, 0);
690 }
691 
692 
693 int
694 has_signals_pending(void *_thread)
695 {
696 	struct thread *thread = (struct thread *)_thread;
697 	if (thread == NULL)
698 		thread = thread_get_current_thread();
699 
700 	return atomic_get(&thread->sig_pending)
701 		& ~atomic_get(&thread->sig_block_mask);
702 }
703 
704 
705 int
706 sigprocmask(int how, const sigset_t *set, sigset_t *oldSet)
707 {
708 	struct thread *thread = thread_get_current_thread();
709 	sigset_t oldMask = atomic_get(&thread->sig_block_mask);
710 
711 	if (set != NULL) {
712 		T(SigProcMask(how, *set));
713 
714 		switch (how) {
715 			case SIG_BLOCK:
716 				atomic_or(&thread->sig_block_mask, *set & BLOCKABLE_SIGNALS);
717 				break;
718 			case SIG_UNBLOCK:
719 				atomic_and(&thread->sig_block_mask, ~*set);
720 				break;
721 			case SIG_SETMASK:
722 				atomic_set(&thread->sig_block_mask, *set & BLOCKABLE_SIGNALS);
723 				break;
724 			default:
725 				return B_BAD_VALUE;
726 		}
727 
728 		update_current_thread_signals_flag();
729 	}
730 
731 	if (oldSet != NULL)
732 		*oldSet = oldMask;
733 
734 	return B_OK;
735 }
736 
737 
738 /*!	\brief sigaction() for the specified thread.
739 	A \a threadID is < 0 specifies the current thread.
740 */
741 int
742 sigaction_etc(thread_id threadID, int signal, const struct sigaction *act,
743 	struct sigaction *oldAction)
744 {
745 	struct thread *thread;
746 	cpu_status state;
747 	status_t error = B_OK;
748 
749 	if (signal < 1 || signal > MAX_SIGNO
750 		|| (SIGNAL_TO_MASK(signal) & ~BLOCKABLE_SIGNALS) != 0)
751 		return B_BAD_VALUE;
752 
753 	state = disable_interrupts();
754 	GRAB_THREAD_LOCK();
755 
756 	thread = (threadID < 0
757 		? thread_get_current_thread()
758 		: thread_get_thread_struct_locked(threadID));
759 
760 	if (thread) {
761 		if (oldAction) {
762 			// save previous sigaction structure
763 			memcpy(oldAction, &thread->sig_action[signal - 1],
764 				sizeof(struct sigaction));
765 		}
766 
767 		if (act) {
768 			T(SigAction(thread, signal, act));
769 
770 			// set new sigaction structure
771 			memcpy(&thread->sig_action[signal - 1], act,
772 				sizeof(struct sigaction));
773 			thread->sig_action[signal - 1].sa_mask &= BLOCKABLE_SIGNALS;
774 		}
775 
776 		if (act && act->sa_handler == SIG_IGN) {
777 			// remove pending signal if it should now be ignored
778 			atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal));
779 		} else if (act && act->sa_handler == SIG_DFL
780 			&& (SIGNAL_TO_MASK(signal) & DEFAULT_IGNORE_SIGNALS) != 0) {
781 			// remove pending signal for those signals whose default
782 			// action is to ignore them
783 			atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal));
784 		}
785 	} else
786 		error = B_BAD_THREAD_ID;
787 
788 	RELEASE_THREAD_LOCK();
789 	restore_interrupts(state);
790 
791 	return error;
792 }
793 
794 
795 int
796 sigaction(int signal, const struct sigaction *act, struct sigaction *oldAction)
797 {
798 	return sigaction_etc(-1, signal, act, oldAction);
799 }
800 
801 
802 /*! Triggers a SIGALRM to the thread that issued the timer and reschedules */
803 static int32
804 alarm_event(timer *t)
805 {
806 	// The hook can be called from any context, but we have to
807 	// deliver the signal to the thread that originally called
808 	// set_alarm().
809 	// Since thread->alarm is this timer structure, we can just
810 	// cast it back - ugly but it works for now
811 	struct thread *thread = (struct thread *)((uint8 *)t
812 		- offsetof(struct thread, alarm));
813 		// ToDo: investigate adding one user parameter to the timer structure to fix this hack
814 
815 	TRACE(("alarm_event: thread = %p\n", thread));
816 	send_signal_etc(thread->id, SIGALRM, B_DO_NOT_RESCHEDULE);
817 
818 	return B_HANDLED_INTERRUPT;
819 }
820 
821 
822 /*!	Sets the alarm timer for the current thread. The timer fires at the
823 	specified time in the future, periodically or just once, as determined
824 	by \a mode.
825 	\return the time left until a previous set alarm would have fired.
826 */
827 bigtime_t
828 set_alarm(bigtime_t time, uint32 mode)
829 {
830 	struct thread *thread = thread_get_current_thread();
831 	bigtime_t remainingTime = 0;
832 
833 	ASSERT(B_ONE_SHOT_RELATIVE_ALARM == B_ONE_SHOT_RELATIVE_TIMER);
834 		// just to be sure no one changes the headers some day
835 
836 	TRACE(("set_alarm: thread = %p\n", thread));
837 
838 	if (thread->alarm.period)
839 		remainingTime = (bigtime_t)thread->alarm.schedule_time - system_time();
840 
841 	cancel_timer(&thread->alarm);
842 
843 	if (time != B_INFINITE_TIMEOUT)
844 		add_timer(&thread->alarm, &alarm_event, time, mode);
845 	else {
846 		// this marks the alarm as canceled (for returning the remaining time)
847 		thread->alarm.period = 0;
848 	}
849 
850 	return remainingTime;
851 }
852 
853 
854 /*!	Wait for the specified signals, and return the signal retrieved in
855 	\a _signal.
856 */
857 int
858 sigwait(const sigset_t *set, int *_signal)
859 {
860 	struct thread *thread = thread_get_current_thread();
861 
862 	while (!has_signals_pending(thread)) {
863 		thread_prepare_to_block(thread, B_CAN_INTERRUPT,
864 			THREAD_BLOCK_TYPE_SIGNAL, NULL);
865 		thread_block();
866 	}
867 
868 	int signalsPending = atomic_get(&thread->sig_pending) & *set;
869 
870 	update_current_thread_signals_flag();
871 
872 	if (signalsPending) {
873 		// select the lowest pending signal to return in _signal
874 		for (int signal = 1; signal < NSIG; signal++) {
875 			if ((SIGNAL_TO_MASK(signal) & signalsPending) != 0) {
876 				*_signal = signal;
877 				return B_OK;
878 			}
879 		}
880 	}
881 
882 	return B_INTERRUPTED;
883 }
884 
885 
886 /*!	Replace the current signal block mask and wait for any event to happen.
887 	Before returning, the original signal block mask is reinstantiated.
888 */
889 int
890 sigsuspend(const sigset_t *mask)
891 {
892 	T(SigSuspend(*mask));
893 
894 	struct thread *thread = thread_get_current_thread();
895 	sigset_t oldMask = atomic_get(&thread->sig_block_mask);
896 
897 	// Set the new block mask and block until interrupted.
898 
899 	atomic_set(&thread->sig_block_mask, *mask & BLOCKABLE_SIGNALS);
900 
901 	while (!has_signals_pending(thread)) {
902 		thread_prepare_to_block(thread, B_CAN_INTERRUPT,
903 			THREAD_BLOCK_TYPE_SIGNAL, NULL);
904 		thread_block();
905 	}
906 
907 	// restore the original block mask
908 	atomic_set(&thread->sig_block_mask, oldMask);
909 
910 	thread->sig_temp_enabled = ~*mask;
911 
912 	update_current_thread_signals_flag();
913 
914 	T(SigSuspendDone());
915 
916 	// we're not supposed to actually succeed
917 	return B_INTERRUPTED;
918 }
919 
920 
921 int
922 sigpending(sigset_t *set)
923 {
924 	struct thread *thread = thread_get_current_thread();
925 
926 	if (set == NULL)
927 		return B_BAD_VALUE;
928 
929 	*set = atomic_get(&thread->sig_pending);
930 	return B_OK;
931 }
932 
933 
934 //	#pragma mark -
935 
936 
937 bigtime_t
938 _user_set_alarm(bigtime_t time, uint32 mode)
939 {
940 	syscall_64_bit_return_value();
941 
942 	return set_alarm(time, mode);
943 }
944 
945 
946 status_t
947 _user_send_signal(pid_t team, uint signal)
948 {
949 	return send_signal_etc(team, signal, B_CHECK_PERMISSION);
950 }
951 
952 
953 status_t
954 _user_sigprocmask(int how, const sigset_t *userSet, sigset_t *userOldSet)
955 {
956 	sigset_t set, oldSet;
957 	status_t status;
958 
959 	if ((userSet != NULL && user_memcpy(&set, userSet, sizeof(sigset_t)) < B_OK)
960 		|| (userOldSet != NULL && user_memcpy(&oldSet, userOldSet,
961 				sizeof(sigset_t)) < B_OK))
962 		return B_BAD_ADDRESS;
963 
964 	status = sigprocmask(how, userSet ? &set : NULL,
965 		userOldSet ? &oldSet : NULL);
966 
967 	// copy old set if asked for
968 	if (status >= B_OK && userOldSet != NULL
969 		&& user_memcpy(userOldSet, &oldSet, sizeof(sigset_t)) < B_OK)
970 		return B_BAD_ADDRESS;
971 
972 	return status;
973 }
974 
975 
976 status_t
977 _user_sigaction(int signal, const struct sigaction *userAction,
978 	struct sigaction *userOldAction)
979 {
980 	struct sigaction act, oact;
981 	status_t status;
982 
983 	if ((userAction != NULL && user_memcpy(&act, userAction,
984 				sizeof(struct sigaction)) < B_OK)
985 		|| (userOldAction != NULL && user_memcpy(&oact, userOldAction,
986 				sizeof(struct sigaction)) < B_OK))
987 		return B_BAD_ADDRESS;
988 
989 	status = sigaction(signal, userAction ? &act : NULL,
990 		userOldAction ? &oact : NULL);
991 
992 	// only copy the old action if a pointer has been given
993 	if (status >= B_OK && userOldAction != NULL
994 		&& user_memcpy(userOldAction, &oact, sizeof(struct sigaction)) < B_OK)
995 		return B_BAD_ADDRESS;
996 
997 	return status;
998 }
999 
1000 
1001 status_t
1002 _user_sigwait(const sigset_t *userSet, int *_userSignal)
1003 {
1004 	if (userSet == NULL || _userSignal == NULL)
1005 		return B_BAD_VALUE;
1006 
1007 	sigset_t set;
1008 	if (user_memcpy(&set, userSet, sizeof(sigset_t)) < B_OK)
1009 		return B_BAD_ADDRESS;
1010 
1011 	int signal;
1012 	status_t status = sigwait(&set, &signal);
1013 	if (status < B_OK)
1014 		return syscall_restart_handle_post(status);
1015 
1016 	return user_memcpy(_userSignal, &signal, sizeof(int));
1017 }
1018 
1019 
1020 status_t
1021 _user_sigsuspend(const sigset_t *userMask)
1022 {
1023 	sigset_t mask;
1024 
1025 	if (userMask == NULL)
1026 		return B_BAD_VALUE;
1027 	if (user_memcpy(&mask, userMask, sizeof(sigset_t)) < B_OK)
1028 		return B_BAD_ADDRESS;
1029 
1030 	return sigsuspend(&mask);
1031 }
1032 
1033 
1034 status_t
1035 _user_sigpending(sigset_t *userSet)
1036 {
1037 	sigset_t set;
1038 	int status;
1039 
1040 	if (userSet == NULL)
1041 		return B_BAD_VALUE;
1042 	if (!IS_USER_ADDRESS(userSet))
1043 		return B_BAD_ADDRESS;
1044 
1045 	status = sigpending(&set);
1046 	if (status == B_OK
1047 		&& user_memcpy(userSet, &set, sizeof(sigset_t)) < B_OK)
1048 		return B_BAD_ADDRESS;
1049 
1050 	return status;
1051 }
1052 
1053 
1054 status_t
1055 _user_set_signal_stack(const stack_t *newUserStack, stack_t *oldUserStack)
1056 {
1057 	struct thread *thread = thread_get_current_thread();
1058 	struct stack_t newStack, oldStack;
1059 	bool onStack = false;
1060 
1061 	if ((newUserStack != NULL && user_memcpy(&newStack, newUserStack,
1062 				sizeof(stack_t)) < B_OK)
1063 		|| (oldUserStack != NULL && user_memcpy(&oldStack, oldUserStack,
1064 				sizeof(stack_t)) < B_OK))
1065 		return B_BAD_ADDRESS;
1066 
1067 	if (thread->signal_stack_enabled) {
1068 		// determine wether or not the user thread is currently
1069 		// on the active signal stack
1070 		onStack = arch_on_signal_stack(thread);
1071 	}
1072 
1073 	if (oldUserStack != NULL) {
1074 		oldStack.ss_sp = (void *)thread->signal_stack_base;
1075 		oldStack.ss_size = thread->signal_stack_size;
1076 		oldStack.ss_flags = (thread->signal_stack_enabled ? 0 : SS_DISABLE)
1077 			| (onStack ? SS_ONSTACK : 0);
1078 	}
1079 
1080 	if (newUserStack != NULL) {
1081 		// no flags other than SS_DISABLE are allowed
1082 		if ((newStack.ss_flags & ~SS_DISABLE) != 0)
1083 			return B_BAD_VALUE;
1084 
1085 		if ((newStack.ss_flags & SS_DISABLE) == 0) {
1086 			// check if the size is valid
1087 			if (newStack.ss_size < MINSIGSTKSZ)
1088 				return B_NO_MEMORY;
1089 			if (onStack)
1090 				return B_NOT_ALLOWED;
1091 			if (!IS_USER_ADDRESS(newStack.ss_sp))
1092 				return B_BAD_VALUE;
1093 
1094 			thread->signal_stack_base = (addr_t)newStack.ss_sp;
1095 			thread->signal_stack_size = newStack.ss_size;
1096 			thread->signal_stack_enabled = true;
1097 		} else
1098 			thread->signal_stack_enabled = false;
1099 	}
1100 
1101 	// only copy the old stack info if a pointer has been given
1102 	if (oldUserStack != NULL
1103 		&& user_memcpy(oldUserStack, &oldStack, sizeof(stack_t)) < B_OK)
1104 		return B_BAD_ADDRESS;
1105 
1106 	return B_OK;
1107 }
1108 
1109