xref: /haiku/src/system/kernel/signal.cpp (revision 3995592cdf304335132305e27c40cbb0b1ac46e3)
1 /*
2  * Copyright 2014, Paweł Dziepak, pdziepak@quarnos.org.
3  * Copyright 2011-2016, Ingo Weinhold, ingo_weinhold@gmx.de.
4  * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
5  * Copyright 2002, Angelo Mottola, a.mottola@libero.it.
6  *
7  * Distributed under the terms of the MIT License.
8  */
9 
10 
11 /*! POSIX signals handling routines */
12 
13 
14 #include <ksignal.h>
15 
16 #include <errno.h>
17 #include <stddef.h>
18 #include <string.h>
19 
20 #include <OS.h>
21 #include <KernelExport.h>
22 
23 #include <cpu.h>
24 #include <core_dump.h>
25 #include <debug.h>
26 #include <kernel.h>
27 #include <kscheduler.h>
28 #include <sem.h>
29 #include <syscall_restart.h>
30 #include <syscall_utils.h>
31 #include <team.h>
32 #include <thread.h>
33 #include <tracing.h>
34 #include <user_debugger.h>
35 #include <user_thread.h>
36 #include <util/AutoLock.h>
37 
38 
39 //#define TRACE_SIGNAL
40 #ifdef TRACE_SIGNAL
41 #	define TRACE(x) dprintf x
42 #else
43 #	define TRACE(x) ;
44 #endif
45 
46 
47 #define BLOCKABLE_SIGNALS	\
48 	(~(KILL_SIGNALS | SIGNAL_TO_MASK(SIGSTOP)	\
49 	| SIGNAL_TO_MASK(SIGNAL_DEBUG_THREAD)	\
50 	| SIGNAL_TO_MASK(SIGNAL_CONTINUE_THREAD)	\
51 	| SIGNAL_TO_MASK(SIGNAL_CANCEL_THREAD)))
52 #define STOP_SIGNALS \
53 	(SIGNAL_TO_MASK(SIGSTOP) | SIGNAL_TO_MASK(SIGTSTP) \
54 	| SIGNAL_TO_MASK(SIGTTIN) | SIGNAL_TO_MASK(SIGTTOU))
55 #define CONTINUE_SIGNALS \
56 	(SIGNAL_TO_MASK(SIGCONT) | SIGNAL_TO_MASK(SIGNAL_CONTINUE_THREAD) \
57 	| SIGNAL_TO_MASK(SIGNAL_DEBUG_THREAD))
58 #define DEFAULT_IGNORE_SIGNALS \
59 	(SIGNAL_TO_MASK(SIGCHLD) | SIGNAL_TO_MASK(SIGWINCH) \
60 	| SIGNAL_TO_MASK(SIGCONT) \
61 	| SIGNAL_RANGE_TO_MASK(SIGNAL_REALTIME_MIN, SIGNAL_REALTIME_MAX))
62 #define NON_DEFERRABLE_SIGNALS	\
63 	(KILL_SIGNALS				\
64 	| SIGNAL_TO_MASK(SIGILL)	\
65 	| SIGNAL_TO_MASK(SIGFPE)	\
66 	| SIGNAL_TO_MASK(SIGSEGV))
67 
68 
69 static const struct {
70 	const char*	name;
71 	int32		priority;
72 } kSignalInfos[__MAX_SIGNO + 1] = {
73 	{"NONE",			-1},
74 	{"HUP",				0},
75 	{"INT",				0},
76 	{"QUIT",			0},
77 	{"ILL",				0},
78 	{"CHLD",			0},
79 	{"ABRT",			0},
80 	{"PIPE",			0},
81 	{"FPE",				0},
82 	{"KILL",			100},
83 	{"STOP",			0},
84 	{"SEGV",			0},
85 	{"CONT",			0},
86 	{"TSTP",			0},
87 	{"ALRM",			0},
88 	{"TERM",			0},
89 	{"TTIN",			0},
90 	{"TTOU",			0},
91 	{"USR1",			0},
92 	{"USR2",			0},
93 	{"WINCH",			0},
94 	{"KILLTHR",			100},
95 	{"TRAP",			0},
96 	{"POLL",			0},
97 	{"PROF",			0},
98 	{"SYS",				0},
99 	{"URG",				0},
100 	{"VTALRM",			0},
101 	{"XCPU",			0},
102 	{"XFSZ",			0},
103 	{"SIGBUS",			0},
104 	{"SIGRESERVED1",	0},
105 	{"SIGRESERVED2",	0},
106 	{"SIGRT1",			8},
107 	{"SIGRT2",			7},
108 	{"SIGRT3",			6},
109 	{"SIGRT4",			5},
110 	{"SIGRT5",			4},
111 	{"SIGRT6",			3},
112 	{"SIGRT7",			2},
113 	{"SIGRT8",			1},
114 	{"invalid 41",		0},
115 	{"invalid 42",		0},
116 	{"invalid 43",		0},
117 	{"invalid 44",		0},
118 	{"invalid 45",		0},
119 	{"invalid 46",		0},
120 	{"invalid 47",		0},
121 	{"invalid 48",		0},
122 	{"invalid 49",		0},
123 	{"invalid 50",		0},
124 	{"invalid 51",		0},
125 	{"invalid 52",		0},
126 	{"invalid 53",		0},
127 	{"invalid 54",		0},
128 	{"invalid 55",		0},
129 	{"invalid 56",		0},
130 	{"invalid 57",		0},
131 	{"invalid 58",		0},
132 	{"invalid 59",		0},
133 	{"invalid 60",		0},
134 	{"invalid 61",		0},
135 	{"invalid 62",		0},
136 	{"CANCEL_THREAD",	0},
137 	{"CONTINUE_THREAD",	0}	// priority must be <= that of SIGSTOP
138 };
139 
140 
141 static inline const char*
142 signal_name(uint32 number)
143 {
144 	return number <= __MAX_SIGNO ? kSignalInfos[number].name : "invalid";
145 }
146 
147 
148 // #pragma mark - SignalHandledCaller
149 
150 
151 struct SignalHandledCaller {
152 	SignalHandledCaller(Signal* signal)
153 		:
154 		fSignal(signal)
155 	{
156 	}
157 
158 	~SignalHandledCaller()
159 	{
160 		Done();
161 	}
162 
163 	void Done()
164 	{
165 		if (fSignal != NULL) {
166 			fSignal->Handled();
167 			fSignal = NULL;
168 		}
169 	}
170 
171 private:
172 	Signal*	fSignal;
173 };
174 
175 
176 // #pragma mark - QueuedSignalsCounter
177 
178 
179 /*!	Creates a counter with the given limit.
180 	The limit defines the maximum the counter may reach. Since the
181 	BReferenceable's reference count is used, it is assumed that the owning
182 	team holds a reference and the reference count is one greater than the
183 	counter value.
184 	\param limit The maximum allowed value the counter may have. When
185 		\code < 0 \endcode, the value is not limited.
186 */
187 QueuedSignalsCounter::QueuedSignalsCounter(int32 limit)
188 	:
189 	fLimit(limit)
190 {
191 }
192 
193 
194 /*!	Increments the counter, if the limit allows that.
195 	\return \c true, if incrementing the counter succeeded, \c false otherwise.
196 */
197 bool
198 QueuedSignalsCounter::Increment()
199 {
200 	// no limit => no problem
201 	if (fLimit < 0) {
202 		AcquireReference();
203 		return true;
204 	}
205 
206 	// Increment the reference count manually, so we can check atomically. We
207 	// compare the old value > fLimit, assuming that our (primary) owner has a
208 	// reference, we don't want to count.
209 	if (atomic_add(&fReferenceCount, 1) > fLimit) {
210 		ReleaseReference();
211 		return false;
212 	}
213 
214 	return true;
215 }
216 
217 
218 // #pragma mark - Signal
219 
220 
221 Signal::Signal()
222 	:
223 	fCounter(NULL),
224 	fPending(false)
225 {
226 }
227 
228 
229 Signal::Signal(const Signal& other)
230 	:
231 	fCounter(NULL),
232 	fNumber(other.fNumber),
233 	fSignalCode(other.fSignalCode),
234 	fErrorCode(other.fErrorCode),
235 	fSendingProcess(other.fSendingProcess),
236 	fSendingUser(other.fSendingUser),
237 	fStatus(other.fStatus),
238 	fPollBand(other.fPollBand),
239 	fAddress(other.fAddress),
240 	fUserValue(other.fUserValue),
241 	fPending(false)
242 {
243 }
244 
245 
246 Signal::Signal(uint32 number, int32 signalCode, int32 errorCode,
247 	pid_t sendingProcess)
248 	:
249 	fCounter(NULL),
250 	fNumber(number),
251 	fSignalCode(signalCode),
252 	fErrorCode(errorCode),
253 	fSendingProcess(sendingProcess),
254 	fSendingUser(getuid()),
255 	fStatus(0),
256 	fPollBand(0),
257 	fAddress(NULL),
258 	fPending(false)
259 {
260 	fUserValue.sival_ptr = NULL;
261 }
262 
263 
264 Signal::~Signal()
265 {
266 	if (fCounter != NULL)
267 		fCounter->ReleaseReference();
268 }
269 
270 
271 /*!	Creates a queuable clone of the given signal.
272 	Also enforces the current team's signal queuing limit.
273 
274 	\param signal The signal to clone.
275 	\param queuingRequired If \c true, the function will return an error code
276 		when creating the clone fails for any reason. Otherwise, the function
277 		will set \a _signalToQueue to \c NULL, but still return \c B_OK.
278 	\param _signalToQueue Return parameter. Set to the clone of the signal.
279 	\return When \c queuingRequired is \c false, always \c B_OK. Otherwise
280 		\c B_OK, when creating the signal clone succeeds, another error code,
281 		when it fails.
282 */
283 /*static*/ status_t
284 Signal::CreateQueuable(const Signal& signal, bool queuingRequired,
285 	Signal*& _signalToQueue)
286 {
287 	_signalToQueue = NULL;
288 
289 	// If interrupts are disabled, we can't allocate a signal.
290 	if (!are_interrupts_enabled())
291 		return queuingRequired ? B_BAD_VALUE : B_OK;
292 
293 	// increment the queued signals counter
294 	QueuedSignalsCounter* counter
295 		= thread_get_current_thread()->team->QueuedSignalsCounter();
296 	if (!counter->Increment())
297 		return queuingRequired ? EAGAIN : B_OK;
298 
299 	// allocate the signal
300 	Signal* signalToQueue = new(std::nothrow) Signal(signal);
301 	if (signalToQueue == NULL) {
302 		counter->Decrement();
303 		return queuingRequired ? B_NO_MEMORY : B_OK;
304 	}
305 
306 	signalToQueue->fCounter = counter;
307 
308 	_signalToQueue = signalToQueue;
309 	return B_OK;
310 }
311 
312 void
313 Signal::SetTo(uint32 number)
314 {
315 	Team* team = thread_get_current_thread()->team;
316 
317 	fNumber = number;
318 	fSignalCode = SI_USER;
319 	fErrorCode = 0;
320 	fSendingProcess = team->id;
321 	fSendingUser = team->effective_uid;
322 	fStatus = 0;
323 	fPollBand = 0;
324 	fAddress = NULL;
325 	fUserValue.sival_ptr = NULL;
326 }
327 
328 
329 int32
330 Signal::Priority() const
331 {
332 	return kSignalInfos[fNumber].priority;
333 }
334 
335 
336 void
337 Signal::Handled()
338 {
339 	ReleaseReference();
340 }
341 
342 
343 void
344 Signal::LastReferenceReleased()
345 {
346 	if (are_interrupts_enabled())
347 		delete this;
348 	else
349 		deferred_delete(this);
350 }
351 
352 
353 // #pragma mark - PendingSignals
354 
355 
356 PendingSignals::PendingSignals()
357 	:
358 	fQueuedSignalsMask(0),
359 	fUnqueuedSignalsMask(0)
360 {
361 }
362 
363 
364 PendingSignals::~PendingSignals()
365 {
366 	Clear();
367 }
368 
369 
370 /*!	Of the signals in \a nonBlocked returns the priority of that with the
371 	highest priority.
372 	\param nonBlocked The mask with the non-blocked signals.
373 	\return The priority of the highest priority non-blocked signal, or, if all
374 		signals are blocked, \c -1.
375 */
376 int32
377 PendingSignals::HighestSignalPriority(sigset_t nonBlocked) const
378 {
379 	Signal* queuedSignal;
380 	int32 unqueuedSignal;
381 	return _GetHighestPrioritySignal(nonBlocked, queuedSignal, unqueuedSignal);
382 }
383 
384 
385 void
386 PendingSignals::Clear()
387 {
388 	// release references of all queued signals
389 	while (Signal* signal = fQueuedSignals.RemoveHead())
390 		signal->Handled();
391 
392 	fQueuedSignalsMask = 0;
393 	fUnqueuedSignalsMask = 0;
394 }
395 
396 
397 /*!	Adds a signal.
398 	Takes over the reference to the signal from the caller.
399 */
400 void
401 PendingSignals::AddSignal(Signal* signal)
402 {
403 	// queue according to priority
404 	int32 priority = signal->Priority();
405 	Signal* otherSignal = NULL;
406 	for (SignalList::Iterator it = fQueuedSignals.GetIterator();
407 			(otherSignal = it.Next()) != NULL;) {
408 		if (priority > otherSignal->Priority())
409 			break;
410 	}
411 
412 	fQueuedSignals.InsertBefore(otherSignal, signal);
413 	signal->SetPending(true);
414 
415 	fQueuedSignalsMask |= SIGNAL_TO_MASK(signal->Number());
416 }
417 
418 
419 void
420 PendingSignals::RemoveSignal(Signal* signal)
421 {
422 	signal->SetPending(false);
423 	fQueuedSignals.Remove(signal);
424 	_UpdateQueuedSignalMask();
425 }
426 
427 
428 void
429 PendingSignals::RemoveSignals(sigset_t mask)
430 {
431 	// remove from queued signals
432 	if ((fQueuedSignalsMask & mask) != 0) {
433 		for (SignalList::Iterator it = fQueuedSignals.GetIterator();
434 				Signal* signal = it.Next();) {
435 			// remove signal, if in mask
436 			if ((SIGNAL_TO_MASK(signal->Number()) & mask) != 0) {
437 				it.Remove();
438 				signal->SetPending(false);
439 				signal->Handled();
440 			}
441 		}
442 
443 		fQueuedSignalsMask &= ~mask;
444 	}
445 
446 	// remove from unqueued signals
447 	fUnqueuedSignalsMask &= ~mask;
448 }
449 
450 
451 /*!	Removes and returns a signal in \a nonBlocked that has the highest priority.
452 	The caller gets a reference to the returned signal, if any.
453 	\param nonBlocked The mask of non-blocked signals.
454 	\param buffer If the signal is not queued this buffer is returned. In this
455 		case the method acquires a reference to \a buffer, so that the caller
456 		gets a reference also in this case.
457 	\return The removed signal or \c NULL, if all signals are blocked.
458 */
459 Signal*
460 PendingSignals::DequeueSignal(sigset_t nonBlocked, Signal& buffer)
461 {
462 	// find the signal with the highest priority
463 	Signal* queuedSignal;
464 	int32 unqueuedSignal;
465 	if (_GetHighestPrioritySignal(nonBlocked, queuedSignal, unqueuedSignal) < 0)
466 		return NULL;
467 
468 	// if it is a queued signal, dequeue it
469 	if (queuedSignal != NULL) {
470 		fQueuedSignals.Remove(queuedSignal);
471 		queuedSignal->SetPending(false);
472 		_UpdateQueuedSignalMask();
473 		return queuedSignal;
474 	}
475 
476 	// it is unqueued -- remove from mask
477 	fUnqueuedSignalsMask &= ~SIGNAL_TO_MASK(unqueuedSignal);
478 
479 	// init buffer
480 	buffer.SetTo(unqueuedSignal);
481 	buffer.AcquireReference();
482 	return &buffer;
483 }
484 
485 
486 /*!	Of the signals not it \a blocked returns the priority of that with the
487 	highest priority.
488 	\param blocked The mask with the non-blocked signals.
489 	\param _queuedSignal If the found signal is a queued signal, the variable
490 		will be set to that signal, otherwise to \c NULL.
491 	\param _unqueuedSignal If the found signal is an unqueued signal, the
492 		variable is set to that signal's number, otherwise to \c -1.
493 	\return The priority of the highest priority non-blocked signal, or, if all
494 		signals are blocked, \c -1.
495 */
496 int32
497 PendingSignals::_GetHighestPrioritySignal(sigset_t nonBlocked,
498 	Signal*& _queuedSignal, int32& _unqueuedSignal) const
499 {
500 	// check queued signals
501 	Signal* queuedSignal = NULL;
502 	int32 queuedPriority = -1;
503 
504 	if ((fQueuedSignalsMask & nonBlocked) != 0) {
505 		for (SignalList::ConstIterator it = fQueuedSignals.GetIterator();
506 				Signal* signal = it.Next();) {
507 			if ((SIGNAL_TO_MASK(signal->Number()) & nonBlocked) != 0) {
508 				queuedPriority = signal->Priority();
509 				queuedSignal = signal;
510 				break;
511 			}
512 		}
513 	}
514 
515 	// check unqueued signals
516 	int32 unqueuedSignal = -1;
517 	int32 unqueuedPriority = -1;
518 
519 	sigset_t unqueuedSignals = fUnqueuedSignalsMask & nonBlocked;
520 	if (unqueuedSignals != 0) {
521 		int32 signal = 1;
522 		while (unqueuedSignals != 0) {
523 			sigset_t mask = SIGNAL_TO_MASK(signal);
524 			if ((unqueuedSignals & mask) != 0) {
525 				int32 priority = kSignalInfos[signal].priority;
526 				if (priority > unqueuedPriority) {
527 					unqueuedSignal = signal;
528 					unqueuedPriority = priority;
529 				}
530 				unqueuedSignals &= ~mask;
531 			}
532 
533 			signal++;
534 		}
535 	}
536 
537 	// Return found queued or unqueued signal, whichever has the higher
538 	// priority.
539 	if (queuedPriority >= unqueuedPriority) {
540 		_queuedSignal = queuedSignal;
541 		_unqueuedSignal = -1;
542 		return queuedPriority;
543 	}
544 
545 	_queuedSignal = NULL;
546 	_unqueuedSignal = unqueuedSignal;
547 	return unqueuedPriority;
548 }
549 
550 
551 void
552 PendingSignals::_UpdateQueuedSignalMask()
553 {
554 	sigset_t mask = 0;
555 	for (SignalList::Iterator it = fQueuedSignals.GetIterator();
556 			Signal* signal = it.Next();) {
557 		mask |= SIGNAL_TO_MASK(signal->Number());
558 	}
559 
560 	fQueuedSignalsMask = mask;
561 }
562 
563 
564 // #pragma mark - signal tracing
565 
566 
567 #if SIGNAL_TRACING
568 
569 namespace SignalTracing {
570 
571 
572 class HandleSignal : public AbstractTraceEntry {
573 	public:
574 		HandleSignal(uint32 signal)
575 			:
576 			fSignal(signal)
577 		{
578 			Initialized();
579 		}
580 
581 		virtual void AddDump(TraceOutput& out)
582 		{
583 			out.Print("signal handle:  %" B_PRIu32 " (%s)" , fSignal,
584 				signal_name(fSignal));
585 		}
586 
587 	private:
588 		uint32		fSignal;
589 };
590 
591 
592 class ExecuteSignalHandler : public AbstractTraceEntry {
593 	public:
594 		ExecuteSignalHandler(uint32 signal, struct sigaction* handler)
595 			:
596 			fSignal(signal),
597 			fHandler((void*)handler->sa_handler)
598 		{
599 			Initialized();
600 		}
601 
602 		virtual void AddDump(TraceOutput& out)
603 		{
604 			out.Print("signal exec handler: signal: %" B_PRIu32 " (%s), "
605 				"handler: %p", fSignal, signal_name(fSignal), fHandler);
606 		}
607 
608 	private:
609 		uint32	fSignal;
610 		void*	fHandler;
611 };
612 
613 
614 class SendSignal : public AbstractTraceEntry {
615 	public:
616 		SendSignal(pid_t target, uint32 signal, uint32 flags)
617 			:
618 			fTarget(target),
619 			fSignal(signal),
620 			fFlags(flags)
621 		{
622 			Initialized();
623 		}
624 
625 		virtual void AddDump(TraceOutput& out)
626 		{
627 			out.Print("signal send: target: %" B_PRId32 ", signal: %" B_PRIu32
628 				" (%s), flags: %#" B_PRIx32, fTarget, fSignal,
629 				signal_name(fSignal), fFlags);
630 		}
631 
632 	private:
633 		pid_t	fTarget;
634 		uint32	fSignal;
635 		uint32	fFlags;
636 };
637 
638 
639 class SigAction : public AbstractTraceEntry {
640 	public:
641 		SigAction(uint32 signal, const struct sigaction* act)
642 			:
643 			fSignal(signal),
644 			fAction(*act)
645 		{
646 			Initialized();
647 		}
648 
649 		virtual void AddDump(TraceOutput& out)
650 		{
651 			out.Print("signal action: signal: %" B_PRIu32 " (%s), "
652 				"action: {handler: %p, flags: %#x, mask: %#" B_PRIx64 "}",
653 				fSignal, signal_name(fSignal), fAction.sa_handler,
654 				fAction.sa_flags, (uint64)fAction.sa_mask);
655 		}
656 
657 	private:
658 		uint32				fSignal;
659 		struct sigaction	fAction;
660 };
661 
662 
663 class SigProcMask : public AbstractTraceEntry {
664 	public:
665 		SigProcMask(int how, sigset_t mask)
666 			:
667 			fHow(how),
668 			fMask(mask),
669 			fOldMask(thread_get_current_thread()->sig_block_mask)
670 		{
671 			Initialized();
672 		}
673 
674 		virtual void AddDump(TraceOutput& out)
675 		{
676 			const char* how = "invalid";
677 			switch (fHow) {
678 				case SIG_BLOCK:
679 					how = "block";
680 					break;
681 				case SIG_UNBLOCK:
682 					how = "unblock";
683 					break;
684 				case SIG_SETMASK:
685 					how = "set";
686 					break;
687 			}
688 
689 			out.Print("signal proc mask: %s 0x%llx, old mask: 0x%llx", how,
690 				(long long)fMask, (long long)fOldMask);
691 		}
692 
693 	private:
694 		int			fHow;
695 		sigset_t	fMask;
696 		sigset_t	fOldMask;
697 };
698 
699 
700 class SigSuspend : public AbstractTraceEntry {
701 	public:
702 		SigSuspend(sigset_t mask)
703 			:
704 			fMask(mask),
705 			fOldMask(thread_get_current_thread()->sig_block_mask)
706 		{
707 			Initialized();
708 		}
709 
710 		virtual void AddDump(TraceOutput& out)
711 		{
712 			out.Print("signal suspend: %#llx, old mask: %#llx",
713 				(long long)fMask, (long long)fOldMask);
714 		}
715 
716 	private:
717 		sigset_t	fMask;
718 		sigset_t	fOldMask;
719 };
720 
721 
722 class SigSuspendDone : public AbstractTraceEntry {
723 	public:
724 		SigSuspendDone()
725 			:
726 			fSignals(thread_get_current_thread()->ThreadPendingSignals())
727 		{
728 			Initialized();
729 		}
730 
731 		virtual void AddDump(TraceOutput& out)
732 		{
733 			out.Print("signal suspend done: %#" B_PRIx32, fSignals);
734 		}
735 
736 	private:
737 		uint32		fSignals;
738 };
739 
740 }	// namespace SignalTracing
741 
742 #	define T(x)	new(std::nothrow) SignalTracing::x
743 
744 #else
745 #	define T(x)
746 #endif	// SIGNAL_TRACING
747 
748 
749 // #pragma mark -
750 
751 
752 /*!	Updates the given thread's Thread::flags field according to what signals are
753 	pending.
754 	The caller must hold \c team->signal_lock.
755 */
756 static void
757 update_thread_signals_flag(Thread* thread)
758 {
759 	sigset_t mask = ~thread->sig_block_mask;
760 	if ((thread->AllPendingSignals() & mask) != 0)
761 		atomic_or(&thread->flags, THREAD_FLAGS_SIGNALS_PENDING);
762 	else
763 		atomic_and(&thread->flags, ~THREAD_FLAGS_SIGNALS_PENDING);
764 }
765 
766 
767 /*!	Updates the current thread's Thread::flags field according to what signals
768 	are pending.
769 	The caller must hold \c team->signal_lock.
770 */
771 static void
772 update_current_thread_signals_flag()
773 {
774 	update_thread_signals_flag(thread_get_current_thread());
775 }
776 
777 
778 /*!	Updates all of the given team's threads' Thread::flags fields according to
779 	what signals are pending.
780 	The caller must hold \c signal_lock.
781 */
782 static void
783 update_team_threads_signal_flag(Team* team)
784 {
785 	for (Thread* thread = team->thread_list; thread != NULL;
786 			thread = thread->team_next) {
787 		update_thread_signals_flag(thread);
788 	}
789 }
790 
791 
792 /*!	Notifies the user debugger about a signal to be handled.
793 
794 	The caller must not hold any locks.
795 
796 	\param thread The current thread.
797 	\param signal The signal to be handled.
798 	\param handler The installed signal handler for the signal.
799 	\param deadly Indicates whether the signal is deadly.
800 	\return \c true, if the signal shall be handled, \c false, if it shall be
801 		ignored.
802 */
803 static bool
804 notify_debugger(Thread* thread, Signal* signal, struct sigaction& handler,
805 	bool deadly)
806 {
807 	uint64 signalMask = SIGNAL_TO_MASK(signal->Number());
808 
809 	// first check the ignore signal masks the debugger specified for the thread
810 	InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock);
811 
812 	if ((thread->debug_info.ignore_signals_once & signalMask) != 0) {
813 		thread->debug_info.ignore_signals_once &= ~signalMask;
814 		return true;
815 	}
816 
817 	if ((thread->debug_info.ignore_signals & signalMask) != 0)
818 		return true;
819 
820 	threadDebugInfoLocker.Unlock();
821 
822 	// deliver the event
823 	return user_debug_handle_signal(signal->Number(), &handler, deadly);
824 }
825 
826 
827 /*!	Removes and returns a signal with the highest priority in \a nonBlocked that
828 	is pending in the given thread or its team.
829 	After dequeuing the signal the Thread::flags field of the affected threads
830 	are updated.
831 	The caller gets a reference to the returned signal, if any.
832 	The caller must hold \c team->signal_lock.
833 	\param thread The thread.
834 	\param nonBlocked The mask of non-blocked signals.
835 	\param buffer If the signal is not queued this buffer is returned. In this
836 		case the method acquires a reference to \a buffer, so that the caller
837 		gets a reference also in this case.
838 	\return The removed signal or \c NULL, if all signals are blocked.
839 */
840 static Signal*
841 dequeue_thread_or_team_signal(Thread* thread, sigset_t nonBlocked,
842 	Signal& buffer)
843 {
844 	Team* team = thread->team;
845 	Signal* signal;
846 	if (team->HighestPendingSignalPriority(nonBlocked)
847 			> thread->HighestPendingSignalPriority(nonBlocked)) {
848 		signal = team->DequeuePendingSignal(nonBlocked, buffer);
849 		update_team_threads_signal_flag(team);
850 	} else {
851 		signal = thread->DequeuePendingSignal(nonBlocked, buffer);
852 		update_thread_signals_flag(thread);
853 	}
854 
855 	return signal;
856 }
857 
858 
859 static status_t
860 setup_signal_frame(Thread* thread, struct sigaction* action, Signal* signal,
861 	sigset_t signalMask)
862 {
863 	// prepare the data, we need to copy onto the user stack
864 	signal_frame_data frameData;
865 
866 	// signal info
867 	frameData.info.si_signo = signal->Number();
868 	frameData.info.si_code = signal->SignalCode();
869 	frameData.info.si_errno = signal->ErrorCode();
870 	frameData.info.si_pid = signal->SendingProcess();
871 	frameData.info.si_uid = signal->SendingUser();
872 	frameData.info.si_addr = signal->Address();
873 	frameData.info.si_status = signal->Status();
874 	frameData.info.si_band = signal->PollBand();
875 	frameData.info.si_value = signal->UserValue();
876 
877 	// context
878 	frameData.context.uc_link = thread->user_signal_context;
879 	frameData.context.uc_sigmask = signalMask;
880 	// uc_stack and uc_mcontext are filled in by the architecture specific code.
881 
882 	// user data
883 	frameData.user_data = action->sa_userdata;
884 
885 	// handler function
886 	frameData.siginfo_handler = (action->sa_flags & SA_SIGINFO) != 0;
887 	frameData.handler = frameData.siginfo_handler
888 		? (void*)action->sa_sigaction : (void*)action->sa_handler;
889 
890 	// thread flags -- save the and clear the thread's syscall restart related
891 	// flags
892 	frameData.thread_flags = atomic_and(&thread->flags,
893 		~(THREAD_FLAGS_RESTART_SYSCALL | THREAD_FLAGS_64_BIT_SYSCALL_RETURN));
894 
895 	// syscall restart related fields
896 	memcpy(frameData.syscall_restart_parameters,
897 		thread->syscall_restart.parameters,
898 		sizeof(frameData.syscall_restart_parameters));
899 
900 	// commpage address
901 	frameData.commpage_address = thread->team->commpage_address;
902 
903 	// syscall_restart_return_value is filled in by the architecture specific
904 	// code.
905 
906 	return arch_setup_signal_frame(thread, action, &frameData);
907 }
908 
909 
910 /*! Actually handles pending signals -- i.e. the thread will exit, a custom
911 	signal handler is prepared, or whatever the signal demands.
912 	The function will not return, when a deadly signal is encountered. The
913 	function will suspend the thread indefinitely, when a stop signal is
914 	encountered.
915 	Interrupts must be enabled.
916 	\param thread The current thread.
917 */
918 void
919 handle_signals(Thread* thread)
920 {
921 	Team* team = thread->team;
922 
923 	TeamLocker teamLocker(team);
924 	InterruptsSpinLocker locker(thread->team->signal_lock);
925 
926 	// If userland requested to defer signals, we check now, if this is
927 	// possible.
928 	sigset_t nonBlockedMask = ~thread->sig_block_mask;
929 	sigset_t signalMask = thread->AllPendingSignals() & nonBlockedMask;
930 
931 	if (thread->user_thread->defer_signals > 0
932 		&& (signalMask & NON_DEFERRABLE_SIGNALS) == 0
933 		&& thread->sigsuspend_original_unblocked_mask == 0) {
934 		thread->user_thread->pending_signals = signalMask;
935 		return;
936 	}
937 
938 	thread->user_thread->pending_signals = 0;
939 
940 	// determine syscall restart behavior
941 	uint32 restartFlags = atomic_and(&thread->flags,
942 		~THREAD_FLAGS_DONT_RESTART_SYSCALL);
943 	bool alwaysRestart
944 		= (restartFlags & THREAD_FLAGS_ALWAYS_RESTART_SYSCALL) != 0;
945 	bool restart = alwaysRestart
946 		|| (restartFlags & THREAD_FLAGS_DONT_RESTART_SYSCALL) == 0;
947 
948 	// Loop until we've handled all signals.
949 	bool initialIteration = true;
950 	while (true) {
951 		if (initialIteration) {
952 			initialIteration = false;
953 		} else {
954 			teamLocker.Lock();
955 			locker.Lock();
956 
957 			signalMask = thread->AllPendingSignals() & nonBlockedMask;
958 		}
959 
960 		// Unless SIGKILL[THR] are pending, check, if the thread shall stop for
961 		// a core dump or for debugging.
962 		if ((signalMask & KILL_SIGNALS) == 0) {
963 			if ((atomic_get(&thread->flags) & THREAD_FLAGS_TRAP_FOR_CORE_DUMP)
964 					!= 0) {
965 				locker.Unlock();
966 				teamLocker.Unlock();
967 
968 				core_dump_trap_thread();
969 				continue;
970 			}
971 
972 			if ((atomic_get(&thread->debug_info.flags) & B_THREAD_DEBUG_STOP)
973 					!= 0) {
974 				locker.Unlock();
975 				teamLocker.Unlock();
976 
977 				user_debug_stop_thread();
978 				continue;
979 			}
980 		}
981 
982 		// We're done, if there aren't any pending signals anymore.
983 		if ((signalMask & nonBlockedMask) == 0)
984 			break;
985 
986 		// get pending non-blocked thread or team signal with the highest
987 		// priority
988 		Signal stackSignal;
989 		Signal* signal = dequeue_thread_or_team_signal(thread, nonBlockedMask,
990 			stackSignal);
991 		ASSERT(signal != NULL);
992 		SignalHandledCaller signalHandledCaller(signal);
993 
994 		locker.Unlock();
995 
996 		// get the action for the signal
997 		struct sigaction handler;
998 		if (signal->Number() <= MAX_SIGNAL_NUMBER) {
999 			handler = team->SignalActionFor(signal->Number());
1000 		} else {
1001 			handler.sa_handler = SIG_DFL;
1002 			handler.sa_flags = 0;
1003 		}
1004 
1005 		if ((handler.sa_flags & SA_ONESHOT) != 0
1006 			&& handler.sa_handler != SIG_IGN && handler.sa_handler != SIG_DFL) {
1007 			team->SignalActionFor(signal->Number()).sa_handler = SIG_DFL;
1008 		}
1009 
1010 		T(HandleSignal(signal->Number()));
1011 
1012 		teamLocker.Unlock();
1013 
1014 		// debug the signal, if a debugger is installed and the signal debugging
1015 		// flag is set
1016 		bool debugSignal = (~atomic_get(&team->debug_info.flags)
1017 				& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_SIGNALS))
1018 			== 0;
1019 
1020 		// handle the signal
1021 		TRACE(("Thread %" B_PRId32 " received signal %s\n", thread->id,
1022 			kSignalInfos[signal->Number()].name));
1023 
1024 		if (handler.sa_handler == SIG_IGN) {
1025 			// signal is to be ignored
1026 			// TODO: apply zombie cleaning on SIGCHLD
1027 
1028 			// notify the debugger
1029 			if (debugSignal)
1030 				notify_debugger(thread, signal, handler, false);
1031 			continue;
1032 		} else if (handler.sa_handler == SIG_DFL) {
1033 			// default signal behaviour
1034 
1035 			// realtime signals are ignored by default
1036 			if (signal->Number() >= SIGNAL_REALTIME_MIN
1037 				&& signal->Number() <= SIGNAL_REALTIME_MAX) {
1038 				// notify the debugger
1039 				if (debugSignal)
1040 					notify_debugger(thread, signal, handler, false);
1041 				continue;
1042 			}
1043 
1044 			bool killTeam = false;
1045 			switch (signal->Number()) {
1046 				case SIGCHLD:
1047 				case SIGWINCH:
1048 				case SIGURG:
1049 					// notify the debugger
1050 					if (debugSignal)
1051 						notify_debugger(thread, signal, handler, false);
1052 					continue;
1053 
1054 				case SIGNAL_DEBUG_THREAD:
1055 					// ignore -- used together with B_THREAD_DEBUG_STOP, which
1056 					// is handled above
1057 					continue;
1058 
1059 				case SIGNAL_CANCEL_THREAD:
1060 					// set up the signal handler
1061 					handler.sa_handler = thread->cancel_function;
1062 					handler.sa_flags = 0;
1063 					handler.sa_mask = 0;
1064 					handler.sa_userdata = NULL;
1065 
1066 					restart = false;
1067 						// we always want to interrupt
1068 					break;
1069 
1070 				case SIGNAL_CONTINUE_THREAD:
1071 					// prevent syscall restart, but otherwise ignore
1072 					restart = false;
1073 					atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
1074 					continue;
1075 
1076 				case SIGCONT:
1077 					// notify the debugger
1078 					if (debugSignal
1079 						&& !notify_debugger(thread, signal, handler, false))
1080 						continue;
1081 
1082 					// notify threads waiting for team state changes
1083 					if (thread == team->main_thread) {
1084 						team->LockTeamAndParent(false);
1085 
1086 						team_set_job_control_state(team,
1087 							JOB_CONTROL_STATE_CONTINUED, signal);
1088 
1089 						team->UnlockTeamAndParent();
1090 
1091 						// The standard states that the system *may* send a
1092 						// SIGCHLD when a child is continued. I haven't found
1093 						// a good reason why we would want to, though.
1094 					}
1095 					continue;
1096 
1097 				case SIGSTOP:
1098 				case SIGTSTP:
1099 				case SIGTTIN:
1100 				case SIGTTOU:
1101 				{
1102 					// notify the debugger
1103 					if (debugSignal
1104 						&& !notify_debugger(thread, signal, handler, false))
1105 						continue;
1106 
1107 					// The terminal-sent stop signals are allowed to stop the
1108 					// process only, if it doesn't belong to an orphaned process
1109 					// group. Otherwise the signal must be discarded.
1110 					team->LockProcessGroup();
1111 					AutoLocker<ProcessGroup> groupLocker(team->group, true);
1112 					if (signal->Number() != SIGSTOP
1113 						&& team->group->IsOrphaned()) {
1114 						continue;
1115 					}
1116 
1117 					// notify threads waiting for team state changes
1118 					if (thread == team->main_thread) {
1119 						team->LockTeamAndParent(false);
1120 
1121 						team_set_job_control_state(team,
1122 							JOB_CONTROL_STATE_STOPPED, signal);
1123 
1124 						// send a SIGCHLD to the parent (if it does have
1125 						// SA_NOCLDSTOP defined)
1126 						Team* parentTeam = team->parent;
1127 
1128 						struct sigaction& parentHandler
1129 							= parentTeam->SignalActionFor(SIGCHLD);
1130 						if ((parentHandler.sa_flags & SA_NOCLDSTOP) == 0) {
1131 							Signal childSignal(SIGCHLD, CLD_STOPPED, B_OK,
1132 								team->id);
1133 							childSignal.SetStatus(signal->Number());
1134 							childSignal.SetSendingUser(signal->SendingUser());
1135 							send_signal_to_team(parentTeam, childSignal, 0);
1136 						}
1137 
1138 						team->UnlockTeamAndParent();
1139 					}
1140 
1141 					groupLocker.Unlock();
1142 
1143 					// Suspend the thread, unless there's already a signal to
1144 					// continue or kill pending.
1145 					locker.Lock();
1146 					bool resume = (thread->AllPendingSignals()
1147 								& (CONTINUE_SIGNALS | KILL_SIGNALS)) != 0;
1148 					locker.Unlock();
1149 
1150 					if (!resume)
1151 						thread_suspend();
1152 
1153 					continue;
1154 				}
1155 
1156 				case SIGSEGV:
1157 				case SIGBUS:
1158 				case SIGFPE:
1159 				case SIGILL:
1160 				case SIGTRAP:
1161 				case SIGABRT:
1162 				case SIGKILL:
1163 				case SIGQUIT:
1164 				case SIGPOLL:
1165 				case SIGPROF:
1166 				case SIGSYS:
1167 				case SIGVTALRM:
1168 				case SIGXCPU:
1169 				case SIGXFSZ:
1170 				default:
1171 					TRACE(("Shutting down team %" B_PRId32 " due to signal %"
1172 						B_PRIu32 " received in thread %" B_PRIu32 " \n",
1173 						team->id, signal->Number(), thread->id));
1174 
1175 					// This signal kills the team regardless which thread
1176 					// received it.
1177 					killTeam = true;
1178 
1179 					// fall through
1180 				case SIGKILLTHR:
1181 					// notify the debugger
1182 					if (debugSignal && signal->Number() != SIGKILL
1183 						&& signal->Number() != SIGKILLTHR
1184 						&& !notify_debugger(thread, signal, handler, true)) {
1185 						continue;
1186 					}
1187 
1188 					if (killTeam || thread == team->main_thread) {
1189 						// The signal is terminal for the team or the thread is
1190 						// the main thread. In either case the team is going
1191 						// down. Set its exit status, if that didn't happen yet.
1192 						teamLocker.Lock();
1193 
1194 						if (!team->exit.initialized) {
1195 							team->exit.reason = CLD_KILLED;
1196 							team->exit.signal = signal->Number();
1197 							team->exit.signaling_user = signal->SendingUser();
1198 							team->exit.status = 0;
1199 							team->exit.initialized = true;
1200 						}
1201 
1202 						teamLocker.Unlock();
1203 
1204 						// If this is not the main thread, send it a SIGKILLTHR
1205 						// so that the team terminates.
1206 						if (thread != team->main_thread) {
1207 							Signal childSignal(SIGKILLTHR, SI_USER, B_OK,
1208 								team->id);
1209 							send_signal_to_thread_id(team->id, childSignal, 0);
1210 						}
1211 					}
1212 
1213 					// explicitly get rid of the signal reference, since
1214 					// thread_exit() won't return
1215 					signalHandledCaller.Done();
1216 
1217 					thread_exit();
1218 						// won't return
1219 			}
1220 		}
1221 
1222 		// User defined signal handler
1223 
1224 		// notify the debugger
1225 		if (debugSignal && !notify_debugger(thread, signal, handler, false))
1226 			continue;
1227 
1228 		if (!restart
1229 				|| (!alwaysRestart && (handler.sa_flags & SA_RESTART) == 0)) {
1230 			atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
1231 		}
1232 
1233 		T(ExecuteSignalHandler(signal->Number(), &handler));
1234 
1235 		TRACE(("### Setting up custom signal handler frame...\n"));
1236 
1237 		// save the old block mask -- we may need to adjust it for the handler
1238 		locker.Lock();
1239 
1240 		sigset_t oldBlockMask = thread->sigsuspend_original_unblocked_mask != 0
1241 			? ~thread->sigsuspend_original_unblocked_mask
1242 			: thread->sig_block_mask;
1243 
1244 		// Update the block mask while the signal handler is running -- it
1245 		// will be automatically restored when the signal frame is left.
1246 		thread->sig_block_mask |= handler.sa_mask & BLOCKABLE_SIGNALS;
1247 
1248 		if ((handler.sa_flags & SA_NOMASK) == 0) {
1249 			thread->sig_block_mask
1250 				|= SIGNAL_TO_MASK(signal->Number()) & BLOCKABLE_SIGNALS;
1251 		}
1252 
1253 		update_current_thread_signals_flag();
1254 
1255 		locker.Unlock();
1256 
1257 		setup_signal_frame(thread, &handler, signal, oldBlockMask);
1258 
1259 		// Reset sigsuspend_original_unblocked_mask. It would have been set by
1260 		// sigsuspend_internal(). In that case, above we set oldBlockMask
1261 		// accordingly so that after the handler returns the thread's signal
1262 		// mask is reset.
1263 		thread->sigsuspend_original_unblocked_mask = 0;
1264 
1265 		return;
1266 	}
1267 
1268 	// We have not handled any signal (respectively only ignored ones).
1269 
1270 	// If sigsuspend_original_unblocked_mask is non-null, we came from a
1271 	// sigsuspend_internal(). Not having handled any signal, we should restart
1272 	// the syscall.
1273 	if (thread->sigsuspend_original_unblocked_mask != 0) {
1274 		restart = true;
1275 		atomic_or(&thread->flags, THREAD_FLAGS_RESTART_SYSCALL);
1276 	} else if (!restart) {
1277 		// clear syscall restart thread flag, if we're not supposed to restart
1278 		// the syscall
1279 		atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
1280 	}
1281 }
1282 
1283 
1284 /*!	Checks whether the given signal is blocked for the given team (i.e. all of
1285 	its threads).
1286 	The caller must hold the team's lock and \c signal_lock.
1287 */
1288 bool
1289 is_team_signal_blocked(Team* team, int signal)
1290 {
1291 	sigset_t mask = SIGNAL_TO_MASK(signal);
1292 
1293 	for (Thread* thread = team->thread_list; thread != NULL;
1294 			thread = thread->team_next) {
1295 		if ((thread->sig_block_mask & mask) == 0)
1296 			return false;
1297 	}
1298 
1299 	return true;
1300 }
1301 
1302 
1303 /*!	Gets (guesses) the current thread's currently used stack from the given
1304 	stack pointer.
1305 	Fills in \a stack with either the signal stack or the thread's user stack.
1306 	\param address A stack pointer address to be used to determine the used
1307 		stack.
1308 	\param stack Filled in by the function.
1309 */
1310 void
1311 signal_get_user_stack(addr_t address, stack_t* stack)
1312 {
1313 	// If a signal stack is enabled for the stack and the address is within it,
1314 	// return the signal stack. In all other cases return the thread's user
1315 	// stack, even if the address doesn't lie within it.
1316 	Thread* thread = thread_get_current_thread();
1317 	if (thread->signal_stack_enabled && address >= thread->signal_stack_base
1318 		&& address < thread->signal_stack_base + thread->signal_stack_size) {
1319 		stack->ss_sp = (void*)thread->signal_stack_base;
1320 		stack->ss_size = thread->signal_stack_size;
1321 	} else {
1322 		stack->ss_sp = (void*)thread->user_stack_base;
1323 		stack->ss_size = thread->user_stack_size;
1324 	}
1325 
1326 	stack->ss_flags = 0;
1327 }
1328 
1329 
1330 /*!	Checks whether any non-blocked signal is pending for the current thread.
1331 	The caller must hold \c team->signal_lock.
1332 	\param thread The current thread.
1333 */
1334 static bool
1335 has_signals_pending(Thread* thread)
1336 {
1337 	return (thread->AllPendingSignals() & ~thread->sig_block_mask) != 0;
1338 }
1339 
1340 
1341 /*!	Checks whether the current user has permission to send a signal to the given
1342 	target team.
1343 
1344 	\param team The target team.
1345 */
1346 static bool
1347 has_permission_to_signal(Team* team)
1348 {
1349 	// get the current user
1350 	uid_t currentUser = thread_get_current_thread()->team->effective_uid;
1351 
1352 	// root is omnipotent -- in the other cases the current user must match the
1353 	// target team's
1354 	return currentUser == 0 || currentUser == team->effective_uid;
1355 }
1356 
1357 
1358 /*!	Delivers a signal to the \a thread, but doesn't handle the signal -- it just
1359 	makes sure the thread gets the signal, i.e. unblocks it if needed.
1360 
1361 	The caller must hold \c team->signal_lock.
1362 
1363 	\param thread The thread the signal shall be delivered to.
1364 	\param signalNumber The number of the signal to be delivered. If \c 0, no
1365 		actual signal will be delivered. Only delivery checks will be performed.
1366 	\param signal If non-NULL the signal to be queued (has number
1367 		\a signalNumber in this case). The caller transfers an object reference
1368 		to this function. If \c NULL an unqueued signal will be delivered to the
1369 		thread.
1370 	\param flags A bitwise combination of any number of the following:
1371 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1372 			target thread the signal.
1373 	\return \c B_OK, when the signal was delivered successfully, another error
1374 		code otherwise.
1375 */
1376 status_t
1377 send_signal_to_thread_locked(Thread* thread, uint32 signalNumber,
1378 	Signal* signal, uint32 flags)
1379 {
1380 	ASSERT(signal == NULL || signalNumber == signal->Number());
1381 
1382 	T(SendSignal(thread->id, signalNumber, flags));
1383 
1384 	// The caller transferred a reference to the signal to us.
1385 	BReference<Signal> signalReference(signal, true);
1386 
1387 	if ((flags & B_CHECK_PERMISSION) != 0) {
1388 		if (!has_permission_to_signal(thread->team))
1389 			return EPERM;
1390 	}
1391 
1392 	if (signalNumber == 0)
1393 		return B_OK;
1394 
1395 	if (thread->team == team_get_kernel_team()) {
1396 		// Signals to kernel threads will only wake them up
1397 		thread_continue(thread);
1398 		return B_OK;
1399 	}
1400 
1401 	if (signal != NULL)
1402 		thread->AddPendingSignal(signal);
1403 	else
1404 		thread->AddPendingSignal(signalNumber);
1405 
1406 	// the thread has the signal reference, now
1407 	signalReference.Detach();
1408 
1409 	switch (signalNumber) {
1410 		case SIGKILL:
1411 		{
1412 			// If sent to a thread other than the team's main thread, also send
1413 			// a SIGKILLTHR to the main thread to kill the team.
1414 			Thread* mainThread = thread->team->main_thread;
1415 			if (mainThread != NULL && mainThread != thread) {
1416 				mainThread->AddPendingSignal(SIGKILLTHR);
1417 
1418 				// wake up main thread
1419 				thread->going_to_suspend = false;
1420 
1421 				SpinLocker locker(mainThread->scheduler_lock);
1422 				if (mainThread->state == B_THREAD_SUSPENDED)
1423 					scheduler_enqueue_in_run_queue(mainThread);
1424 				else
1425 					thread_interrupt(mainThread, true);
1426 				locker.Unlock();
1427 
1428 				update_thread_signals_flag(mainThread);
1429 			}
1430 
1431 			// supposed to fall through
1432 		}
1433 		case SIGKILLTHR:
1434 		{
1435 			// Wake up suspended threads and interrupt waiting ones
1436 			thread->going_to_suspend = false;
1437 
1438 			SpinLocker locker(thread->scheduler_lock);
1439 			if (thread->state == B_THREAD_SUSPENDED)
1440 				scheduler_enqueue_in_run_queue(thread);
1441 			else
1442 				thread_interrupt(thread, true);
1443 
1444 			break;
1445 		}
1446 		case SIGNAL_DEBUG_THREAD:
1447 		{
1448 			// Wake up thread if it was suspended, otherwise interrupt it.
1449 			thread->going_to_suspend = false;
1450 
1451 			SpinLocker locker(thread->scheduler_lock);
1452 			if (thread->state == B_THREAD_SUSPENDED)
1453 				scheduler_enqueue_in_run_queue(thread);
1454 			else
1455 				thread_interrupt(thread, false);
1456 
1457 			break;
1458 		}
1459 		case SIGNAL_CONTINUE_THREAD:
1460 		{
1461 			// wake up thread, and interrupt its current syscall
1462 			thread->going_to_suspend = false;
1463 
1464 			SpinLocker locker(thread->scheduler_lock);
1465 			if (thread->state == B_THREAD_SUSPENDED)
1466 				scheduler_enqueue_in_run_queue(thread);
1467 
1468 			atomic_or(&thread->flags, THREAD_FLAGS_DONT_RESTART_SYSCALL);
1469 			break;
1470 		}
1471 		case SIGCONT:
1472 		{
1473 			// Wake up thread if it was suspended, otherwise interrupt it, if
1474 			// the signal isn't blocked.
1475 			thread->going_to_suspend = false;
1476 
1477 			SpinLocker locker(thread->scheduler_lock);
1478 			if (thread->state == B_THREAD_SUSPENDED)
1479 				scheduler_enqueue_in_run_queue(thread);
1480 			else if ((SIGNAL_TO_MASK(SIGCONT) & ~thread->sig_block_mask) != 0)
1481 				thread_interrupt(thread, false);
1482 
1483 			// remove any pending stop signals
1484 			thread->RemovePendingSignals(STOP_SIGNALS);
1485 			break;
1486 		}
1487 		default:
1488 			// If the signal is not masked, interrupt the thread, if it is
1489 			// currently waiting (interruptibly).
1490 			if ((thread->AllPendingSignals()
1491 						& (~thread->sig_block_mask | SIGNAL_TO_MASK(SIGCHLD)))
1492 					!= 0) {
1493 				// Interrupt thread if it was waiting
1494 				SpinLocker locker(thread->scheduler_lock);
1495 				thread_interrupt(thread, false);
1496 			}
1497 			break;
1498 	}
1499 
1500 	update_thread_signals_flag(thread);
1501 
1502 	return B_OK;
1503 }
1504 
1505 
1506 /*!	Sends the given signal to the given thread.
1507 
1508 	\param thread The thread the signal shall be sent to.
1509 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1510 		actual signal will be delivered. Only delivery checks will be performed.
1511 		The given object will be copied. The caller retains ownership.
1512 	\param flags A bitwise combination of any number of the following:
1513 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1514 			target thread the signal.
1515 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1516 			woken up, the scheduler will be invoked. If set that will not be
1517 			done explicitly, but rescheduling can still happen, e.g. when the
1518 			current thread's time slice runs out.
1519 	\return \c B_OK, when the signal was delivered successfully, another error
1520 		code otherwise.
1521 */
1522 status_t
1523 send_signal_to_thread(Thread* thread, const Signal& signal, uint32 flags)
1524 {
1525 	// Clone the signal -- the clone will be queued. If something fails and the
1526 	// caller doesn't require queuing, we will add an unqueued signal.
1527 	Signal* signalToQueue = NULL;
1528 	status_t error = Signal::CreateQueuable(signal,
1529 		(flags & SIGNAL_FLAG_QUEUING_REQUIRED) != 0, signalToQueue);
1530 	if (error != B_OK)
1531 		return error;
1532 
1533 	InterruptsReadSpinLocker teamLocker(thread->team_lock);
1534 	SpinLocker locker(thread->team->signal_lock);
1535 
1536 	error = send_signal_to_thread_locked(thread, signal.Number(), signalToQueue,
1537 		flags);
1538 	if (error != B_OK)
1539 		return error;
1540 
1541 	locker.Unlock();
1542 	teamLocker.Unlock();
1543 
1544 	if ((flags & B_DO_NOT_RESCHEDULE) == 0)
1545 		scheduler_reschedule_if_necessary();
1546 
1547 	return B_OK;
1548 }
1549 
1550 
1551 /*!	Sends the given signal to the thread with the given ID.
1552 
1553 	\param threadID The ID of the thread the signal shall be sent to.
1554 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1555 		actual signal will be delivered. Only delivery checks will be performed.
1556 		The given object will be copied. The caller retains ownership.
1557 	\param flags A bitwise combination of any number of the following:
1558 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1559 			target thread the signal.
1560 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1561 			woken up, the scheduler will be invoked. If set that will not be
1562 			done explicitly, but rescheduling can still happen, e.g. when the
1563 			current thread's time slice runs out.
1564 	\return \c B_OK, when the signal was delivered successfully, another error
1565 		code otherwise.
1566 */
1567 status_t
1568 send_signal_to_thread_id(thread_id threadID, const Signal& signal, uint32 flags)
1569 {
1570 	Thread* thread = Thread::Get(threadID);
1571 	if (thread == NULL)
1572 		return B_BAD_THREAD_ID;
1573 	BReference<Thread> threadReference(thread, true);
1574 
1575 	return send_signal_to_thread(thread, signal, flags);
1576 }
1577 
1578 
1579 /*!	Sends the given signal to the given team.
1580 
1581 	The caller must hold \c signal_lock.
1582 
1583 	\param team The team the signal shall be sent to.
1584 	\param signalNumber The number of the signal to be delivered. If \c 0, no
1585 		actual signal will be delivered. Only delivery checks will be performed.
1586 	\param signal If non-NULL the signal to be queued (has number
1587 		\a signalNumber in this case). The caller transfers an object reference
1588 		to this function. If \c NULL an unqueued signal will be delivered to the
1589 		thread.
1590 	\param flags A bitwise combination of any number of the following:
1591 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1592 			target thread the signal.
1593 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1594 			woken up, the scheduler will be invoked. If set that will not be
1595 			done explicitly, but rescheduling can still happen, e.g. when the
1596 			current thread's time slice runs out.
1597 	\return \c B_OK, when the signal was delivered successfully, another error
1598 		code otherwise.
1599 */
1600 status_t
1601 send_signal_to_team_locked(Team* team, uint32 signalNumber, Signal* signal,
1602 	uint32 flags)
1603 {
1604 	ASSERT(signal == NULL || signalNumber == signal->Number());
1605 
1606 	T(SendSignal(team->id, signalNumber, flags));
1607 
1608 	// The caller transferred a reference to the signal to us.
1609 	BReference<Signal> signalReference(signal, true);
1610 
1611 	if ((flags & B_CHECK_PERMISSION) != 0) {
1612 		if (!has_permission_to_signal(team))
1613 			return EPERM;
1614 	}
1615 
1616 	if (signalNumber == 0)
1617 		return B_OK;
1618 
1619 	if (team == team_get_kernel_team()) {
1620 		// signals to the kernel team are not allowed
1621 		return EPERM;
1622 	}
1623 
1624 	if (signal != NULL)
1625 		team->AddPendingSignal(signal);
1626 	else
1627 		team->AddPendingSignal(signalNumber);
1628 
1629 	// the team has the signal reference, now
1630 	signalReference.Detach();
1631 
1632 	switch (signalNumber) {
1633 		case SIGKILL:
1634 		case SIGKILLTHR:
1635 		{
1636 			// Also add a SIGKILLTHR to the main thread's signals and wake it
1637 			// up/interrupt it, so we get this over with as soon as possible
1638 			// (only the main thread shuts down the team).
1639 			Thread* mainThread = team->main_thread;
1640 			if (mainThread != NULL) {
1641 				mainThread->AddPendingSignal(SIGKILLTHR);
1642 
1643 				// wake up main thread
1644 				mainThread->going_to_suspend = false;
1645 
1646 				SpinLocker _(mainThread->scheduler_lock);
1647 				if (mainThread->state == B_THREAD_SUSPENDED)
1648 					scheduler_enqueue_in_run_queue(mainThread);
1649 				else
1650 					thread_interrupt(mainThread, true);
1651 			}
1652 			break;
1653 		}
1654 
1655 		case SIGCONT:
1656 			// Wake up any suspended threads, interrupt the others, if they
1657 			// don't block the signal.
1658 			for (Thread* thread = team->thread_list; thread != NULL;
1659 					thread = thread->team_next) {
1660 				thread->going_to_suspend = false;
1661 
1662 				SpinLocker _(thread->scheduler_lock);
1663 				if (thread->state == B_THREAD_SUSPENDED) {
1664 					scheduler_enqueue_in_run_queue(thread);
1665 				} else if ((SIGNAL_TO_MASK(SIGCONT) & ~thread->sig_block_mask)
1666 						!= 0) {
1667 					thread_interrupt(thread, false);
1668 				}
1669 
1670 				// remove any pending stop signals
1671 				thread->RemovePendingSignals(STOP_SIGNALS);
1672 			}
1673 
1674 			// remove any pending team stop signals
1675 			team->RemovePendingSignals(STOP_SIGNALS);
1676 			break;
1677 
1678 		case SIGSTOP:
1679 		case SIGTSTP:
1680 		case SIGTTIN:
1681 		case SIGTTOU:
1682 			// send the stop signal to all threads
1683 			// TODO: Is that correct or should we only target the main thread?
1684 			for (Thread* thread = team->thread_list; thread != NULL;
1685 					thread = thread->team_next) {
1686 				thread->AddPendingSignal(signalNumber);
1687 			}
1688 
1689 			// remove the stop signal from the team again
1690 			if (signal != NULL) {
1691 				team->RemovePendingSignal(signal);
1692 				signalReference.SetTo(signal, true);
1693 			} else
1694 				team->RemovePendingSignal(signalNumber);
1695 
1696 			// fall through to interrupt threads
1697 		default:
1698 			// Interrupt all interruptibly waiting threads, if the signal is
1699 			// not masked.
1700 			for (Thread* thread = team->thread_list; thread != NULL;
1701 					thread = thread->team_next) {
1702 				sigset_t nonBlocked = ~thread->sig_block_mask
1703 					| SIGNAL_TO_MASK(SIGCHLD);
1704 				if ((thread->AllPendingSignals() & nonBlocked) != 0) {
1705 					SpinLocker _(thread->scheduler_lock);
1706 					thread_interrupt(thread, false);
1707 				}
1708 			}
1709 			break;
1710 	}
1711 
1712 	update_team_threads_signal_flag(team);
1713 
1714 	return B_OK;
1715 }
1716 
1717 
1718 /*!	Sends the given signal to the given team.
1719 
1720 	\param team The team the signal shall be sent to.
1721 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1722 		actual signal will be delivered. Only delivery checks will be performed.
1723 		The given object will be copied. The caller retains ownership.
1724 	\param flags A bitwise combination of any number of the following:
1725 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1726 			target thread the signal.
1727 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1728 			woken up, the scheduler will be invoked. If set that will not be
1729 			done explicitly, but rescheduling can still happen, e.g. when the
1730 			current thread's time slice runs out.
1731 	\return \c B_OK, when the signal was delivered successfully, another error
1732 		code otherwise.
1733 */
1734 status_t
1735 send_signal_to_team(Team* team, const Signal& signal, uint32 flags)
1736 {
1737 	// Clone the signal -- the clone will be queued. If something fails and the
1738 	// caller doesn't require queuing, we will add an unqueued signal.
1739 	Signal* signalToQueue = NULL;
1740 	status_t error = Signal::CreateQueuable(signal,
1741 		(flags & SIGNAL_FLAG_QUEUING_REQUIRED) != 0, signalToQueue);
1742 	if (error != B_OK)
1743 		return error;
1744 
1745 	InterruptsSpinLocker locker(team->signal_lock);
1746 
1747 	error = send_signal_to_team_locked(team, signal.Number(), signalToQueue,
1748 			flags);
1749 
1750 	locker.Unlock();
1751 
1752 	if ((flags & B_DO_NOT_RESCHEDULE) == 0)
1753 		scheduler_reschedule_if_necessary();
1754 
1755 	return error;
1756 }
1757 
1758 
1759 /*!	Sends the given signal to the team with the given ID.
1760 
1761 	\param teamID The ID of the team the signal shall be sent to.
1762 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1763 		actual signal will be delivered. Only delivery checks will be performed.
1764 		The given object will be copied. The caller retains ownership.
1765 	\param flags A bitwise combination of any number of the following:
1766 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1767 			target thread the signal.
1768 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1769 			woken up, the scheduler will be invoked. If set that will not be
1770 			done explicitly, but rescheduling can still happen, e.g. when the
1771 			current thread's time slice runs out.
1772 	\return \c B_OK, when the signal was delivered successfully, another error
1773 		code otherwise.
1774 */
1775 status_t
1776 send_signal_to_team_id(team_id teamID, const Signal& signal, uint32 flags)
1777 {
1778 	// get the team
1779 	Team* team = Team::Get(teamID);
1780 	if (team == NULL)
1781 		return B_BAD_TEAM_ID;
1782 	BReference<Team> teamReference(team, true);
1783 
1784 	return send_signal_to_team(team, signal, flags);
1785 }
1786 
1787 
1788 /*!	Sends the given signal to the given process group.
1789 
1790 	The caller must hold the process group's lock. Interrupts must be enabled.
1791 
1792 	\param group The the process group the signal shall be sent to.
1793 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1794 		actual signal will be delivered. Only delivery checks will be performed.
1795 		The given object will be copied. The caller retains ownership.
1796 	\param flags A bitwise combination of any number of the following:
1797 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1798 			target thread the signal.
1799 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1800 			woken up, the scheduler will be invoked. If set that will not be
1801 			done explicitly, but rescheduling can still happen, e.g. when the
1802 			current thread's time slice runs out.
1803 	\return \c B_OK, when the signal was delivered successfully, another error
1804 		code otherwise.
1805 */
1806 status_t
1807 send_signal_to_process_group_locked(ProcessGroup* group, const Signal& signal,
1808 	uint32 flags)
1809 {
1810 	T(SendSignal(-group->id, signal.Number(), flags));
1811 
1812 	bool firstTeam = true;
1813 
1814 	for (Team* team = group->teams; team != NULL; team = team->group_next) {
1815 		status_t error = send_signal_to_team(team, signal,
1816 			flags | B_DO_NOT_RESCHEDULE);
1817 		// If sending to the first team in the group failed, let the whole call
1818 		// fail.
1819 		if (firstTeam) {
1820 			if (error != B_OK)
1821 				return error;
1822 			firstTeam = false;
1823 		}
1824 	}
1825 
1826 	if ((flags & B_DO_NOT_RESCHEDULE) == 0)
1827 		scheduler_reschedule_if_necessary();
1828 
1829 	return B_OK;
1830 }
1831 
1832 
1833 /*!	Sends the given signal to the process group specified by the given ID.
1834 
1835 	The caller must not hold any process group, team, or thread lock. Interrupts
1836 	must be enabled.
1837 
1838 	\param groupID The ID of the process group the signal shall be sent to.
1839 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1840 		actual signal will be delivered. Only delivery checks will be performed.
1841 		The given object will be copied. The caller retains ownership.
1842 	\param flags A bitwise combination of any number of the following:
1843 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1844 			target thread the signal.
1845 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1846 			woken up, the scheduler will be invoked. If set that will not be
1847 			done explicitly, but rescheduling can still happen, e.g. when the
1848 			current thread's time slice runs out.
1849 	\return \c B_OK, when the signal was delivered successfully, another error
1850 		code otherwise.
1851 */
1852 status_t
1853 send_signal_to_process_group(pid_t groupID, const Signal& signal, uint32 flags)
1854 {
1855 	ProcessGroup* group = ProcessGroup::Get(groupID);
1856 	if (group == NULL)
1857 		return B_BAD_TEAM_ID;
1858 	BReference<ProcessGroup> groupReference(group);
1859 
1860 	T(SendSignal(-group->id, signal.Number(), flags));
1861 
1862 	AutoLocker<ProcessGroup> groupLocker(group);
1863 
1864 	status_t error = send_signal_to_process_group_locked(group, signal,
1865 		flags | B_DO_NOT_RESCHEDULE);
1866 	if (error != B_OK)
1867 		return error;
1868 
1869 	groupLocker.Unlock();
1870 
1871 	if ((flags & B_DO_NOT_RESCHEDULE) == 0)
1872 		scheduler_reschedule_if_necessary();
1873 
1874 	return B_OK;
1875 }
1876 
1877 
1878 static status_t
1879 send_signal_internal(pid_t id, uint signalNumber, union sigval userValue,
1880 	uint32 flags)
1881 {
1882 	if (signalNumber > MAX_SIGNAL_NUMBER)
1883 		return B_BAD_VALUE;
1884 
1885 	Thread* thread = thread_get_current_thread();
1886 
1887 	Signal signal(signalNumber,
1888 		(flags & SIGNAL_FLAG_QUEUING_REQUIRED) != 0 ? SI_QUEUE : SI_USER,
1889 		B_OK, thread->team->id);
1890 		// Note: SI_USER/SI_QUEUE is not correct, if called from within the
1891 		// kernel (or a driver), but we don't have any info here.
1892 	signal.SetUserValue(userValue);
1893 
1894 	// If id is > 0, send the signal to the respective thread.
1895 	if (id > 0)
1896 		return send_signal_to_thread_id(id, signal, flags);
1897 
1898 	// If id == 0, send the signal to the current thread.
1899 	if (id == 0)
1900 		return send_signal_to_thread(thread, signal, flags);
1901 
1902 	// If id == -1, send the signal to all teams the calling team has permission
1903 	// to send signals to.
1904 	if (id == -1) {
1905 		// TODO: Implement correctly!
1906 		// currently only send to the current team
1907 		return send_signal_to_team_id(thread->team->id, signal, flags);
1908 	}
1909 
1910 	// Send a signal to the specified process group (the absolute value of the
1911 	// id).
1912 	return send_signal_to_process_group(-id, signal, flags);
1913 }
1914 
1915 
1916 int
1917 send_signal_etc(pid_t id, uint signalNumber, uint32 flags)
1918 {
1919 	// a dummy user value
1920 	union sigval userValue;
1921 	userValue.sival_ptr = NULL;
1922 
1923 	return send_signal_internal(id, signalNumber, userValue, flags);
1924 }
1925 
1926 
1927 int
1928 send_signal(pid_t threadID, uint signal)
1929 {
1930 	// The BeBook states that this function wouldn't be exported
1931 	// for drivers, but, of course, it's wrong.
1932 	return send_signal_etc(threadID, signal, 0);
1933 }
1934 
1935 
1936 static int
1937 sigprocmask_internal(int how, const sigset_t* set, sigset_t* oldSet)
1938 {
1939 	Thread* thread = thread_get_current_thread();
1940 
1941 	InterruptsSpinLocker _(thread->team->signal_lock);
1942 
1943 	sigset_t oldMask = thread->sig_block_mask;
1944 
1945 	if (set != NULL) {
1946 		T(SigProcMask(how, *set));
1947 
1948 		switch (how) {
1949 			case SIG_BLOCK:
1950 				thread->sig_block_mask |= *set & BLOCKABLE_SIGNALS;
1951 				break;
1952 			case SIG_UNBLOCK:
1953 				thread->sig_block_mask &= ~*set;
1954 				break;
1955 			case SIG_SETMASK:
1956 				thread->sig_block_mask = *set & BLOCKABLE_SIGNALS;
1957 				break;
1958 			default:
1959 				return B_BAD_VALUE;
1960 		}
1961 
1962 		update_current_thread_signals_flag();
1963 	}
1964 
1965 	if (oldSet != NULL)
1966 		*oldSet = oldMask;
1967 
1968 	return B_OK;
1969 }
1970 
1971 
1972 int
1973 sigprocmask(int how, const sigset_t* set, sigset_t* oldSet)
1974 {
1975 	RETURN_AND_SET_ERRNO(sigprocmask_internal(how, set, oldSet));
1976 }
1977 
1978 
1979 /*!	\brief Like sigaction(), but returning the error instead of setting errno.
1980 */
1981 static status_t
1982 sigaction_internal(int signal, const struct sigaction* act,
1983 	struct sigaction* oldAction)
1984 {
1985 	if (signal < 1 || signal > MAX_SIGNAL_NUMBER
1986 		|| (SIGNAL_TO_MASK(signal) & ~BLOCKABLE_SIGNALS) != 0)
1987 		return B_BAD_VALUE;
1988 
1989 	// get and lock the team
1990 	Team* team = thread_get_current_thread()->team;
1991 	TeamLocker teamLocker(team);
1992 
1993 	struct sigaction& teamHandler = team->SignalActionFor(signal);
1994 	if (oldAction) {
1995 		// save previous sigaction structure
1996 		*oldAction = teamHandler;
1997 	}
1998 
1999 	if (act) {
2000 		T(SigAction(signal, act));
2001 
2002 		// set new sigaction structure
2003 		teamHandler = *act;
2004 		teamHandler.sa_mask &= BLOCKABLE_SIGNALS;
2005 	}
2006 
2007 	// Remove pending signal if it should now be ignored and remove pending
2008 	// signal for those signals whose default action is to ignore them.
2009 	if ((act && act->sa_handler == SIG_IGN)
2010 		|| (act && act->sa_handler == SIG_DFL
2011 			&& (SIGNAL_TO_MASK(signal) & DEFAULT_IGNORE_SIGNALS) != 0)) {
2012 		InterruptsSpinLocker locker(team->signal_lock);
2013 
2014 		team->RemovePendingSignal(signal);
2015 
2016 		for (Thread* thread = team->thread_list; thread != NULL;
2017 				thread = thread->team_next) {
2018 			thread->RemovePendingSignal(signal);
2019 		}
2020 	}
2021 
2022 	return B_OK;
2023 }
2024 
2025 
2026 int
2027 sigaction(int signal, const struct sigaction* act, struct sigaction* oldAction)
2028 {
2029 	RETURN_AND_SET_ERRNO(sigaction_internal(signal, act, oldAction));
2030 }
2031 
2032 
2033 /*!	Wait for the specified signals, and return the information for the retrieved
2034 	signal in \a info.
2035 	The \c flags and \c timeout combination must either define an infinite
2036 	timeout (no timeout flags set), an absolute timeout (\c B_ABSOLUTE_TIMEOUT
2037 	set), or a relative timeout \code <= 0 \endcode (\c B_RELATIVE_TIMEOUT set).
2038 */
2039 static status_t
2040 sigwait_internal(const sigset_t* set, siginfo_t* info, uint32 flags,
2041 	bigtime_t timeout)
2042 {
2043 	// restrict mask to blockable signals
2044 	sigset_t requestedSignals = *set & BLOCKABLE_SIGNALS;
2045 
2046 	// make always interruptable
2047 	flags |= B_CAN_INTERRUPT;
2048 
2049 	// check whether we are allowed to wait at all
2050 	bool canWait = (flags & B_RELATIVE_TIMEOUT) == 0 || timeout > 0;
2051 
2052 	Thread* thread = thread_get_current_thread();
2053 
2054 	InterruptsSpinLocker locker(thread->team->signal_lock);
2055 
2056 	bool timedOut = false;
2057 	status_t error = B_OK;
2058 
2059 	while (!timedOut) {
2060 		sigset_t pendingSignals = thread->AllPendingSignals();
2061 
2062 		// If a kill signal is pending, just bail out.
2063 		if ((pendingSignals & KILL_SIGNALS) != 0)
2064 			return B_INTERRUPTED;
2065 
2066 		if ((pendingSignals & requestedSignals) != 0) {
2067 			// get signal with the highest priority
2068 			Signal stackSignal;
2069 			Signal* signal = dequeue_thread_or_team_signal(thread,
2070 				requestedSignals, stackSignal);
2071 			ASSERT(signal != NULL);
2072 
2073 			SignalHandledCaller signalHandledCaller(signal);
2074 			locker.Unlock();
2075 
2076 			info->si_signo = signal->Number();
2077 			info->si_code = signal->SignalCode();
2078 			info->si_errno = signal->ErrorCode();
2079 			info->si_pid = signal->SendingProcess();
2080 			info->si_uid = signal->SendingUser();
2081 			info->si_addr = signal->Address();
2082 			info->si_status = signal->Status();
2083 			info->si_band = signal->PollBand();
2084 			info->si_value = signal->UserValue();
2085 
2086 			return B_OK;
2087 		}
2088 
2089 		if (!canWait)
2090 			return B_WOULD_BLOCK;
2091 
2092 		sigset_t blockedSignals = thread->sig_block_mask;
2093 		if ((pendingSignals & ~blockedSignals) != 0) {
2094 			// Non-blocked signals are pending -- return to let them be handled.
2095 			return B_INTERRUPTED;
2096 		}
2097 
2098 		// No signals yet. Set the signal block mask to not include the
2099 		// requested mask and wait until we're interrupted.
2100 		thread->sig_block_mask = blockedSignals & ~requestedSignals;
2101 
2102 		while (!has_signals_pending(thread)) {
2103 			thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_SIGNAL,
2104 				NULL);
2105 
2106 			locker.Unlock();
2107 
2108 			if ((flags & B_ABSOLUTE_TIMEOUT) != 0) {
2109 				error = thread_block_with_timeout(flags, timeout);
2110 				if (error == B_WOULD_BLOCK || error == B_TIMED_OUT) {
2111 					error = B_WOULD_BLOCK;
2112 						// POSIX requires EAGAIN (B_WOULD_BLOCK) on timeout
2113 					timedOut = true;
2114 
2115 					locker.Lock();
2116 					break;
2117 				}
2118 			} else
2119 				thread_block();
2120 
2121 			locker.Lock();
2122 		}
2123 
2124 		// restore the original block mask
2125 		thread->sig_block_mask = blockedSignals;
2126 
2127 		update_current_thread_signals_flag();
2128 	}
2129 
2130 	// we get here only when timed out
2131 	return error;
2132 }
2133 
2134 
2135 /*!	Replace the current signal block mask and wait for any event to happen.
2136 	Before returning, the original signal block mask is reinstantiated.
2137 */
2138 static status_t
2139 sigsuspend_internal(const sigset_t* _mask)
2140 {
2141 	sigset_t mask = *_mask & BLOCKABLE_SIGNALS;
2142 
2143 	T(SigSuspend(mask));
2144 
2145 	Thread* thread = thread_get_current_thread();
2146 
2147 	InterruptsSpinLocker locker(thread->team->signal_lock);
2148 
2149 	// Set the new block mask and block until interrupted. We might be here
2150 	// after a syscall restart, in which case sigsuspend_original_unblocked_mask
2151 	// will still be set.
2152 	sigset_t oldMask = thread->sigsuspend_original_unblocked_mask != 0
2153 		? ~thread->sigsuspend_original_unblocked_mask : thread->sig_block_mask;
2154 	thread->sig_block_mask = mask & BLOCKABLE_SIGNALS;
2155 
2156 	update_current_thread_signals_flag();
2157 
2158 	while (!has_signals_pending(thread)) {
2159 		thread_prepare_to_block(thread, B_CAN_INTERRUPT,
2160 			THREAD_BLOCK_TYPE_SIGNAL, NULL);
2161 
2162 		locker.Unlock();
2163 		thread_block();
2164 		locker.Lock();
2165 	}
2166 
2167 	// Set sigsuspend_original_unblocked_mask (guaranteed to be non-0 due to
2168 	// BLOCKABLE_SIGNALS). This will indicate to handle_signals() that it is
2169 	// called after a _user_sigsuspend(). It will reset the field after invoking
2170 	// a signal handler, or restart the syscall, if there wasn't anything to
2171 	// handle anymore (e.g. because another thread was faster).
2172 	thread->sigsuspend_original_unblocked_mask = ~oldMask;
2173 
2174 	T(SigSuspendDone());
2175 
2176 	// we're not supposed to actually succeed
2177 	return B_INTERRUPTED;
2178 }
2179 
2180 
2181 static status_t
2182 sigpending_internal(sigset_t* set)
2183 {
2184 	Thread* thread = thread_get_current_thread();
2185 
2186 	if (set == NULL)
2187 		return B_BAD_VALUE;
2188 
2189 	InterruptsSpinLocker locker(thread->team->signal_lock);
2190 
2191 	*set = thread->AllPendingSignals() & thread->sig_block_mask;
2192 
2193 	return B_OK;
2194 }
2195 
2196 
2197 // #pragma mark - syscalls
2198 
2199 
2200 /*!	Sends a signal to a thread, process, or process group.
2201 	\param id Specifies the ID of the target:
2202 		- \code id > 0 \endcode: If \a toThread is \c true, the target is the
2203 			thread with ID \a id, otherwise the team with the ID \a id.
2204 		- \code id == 0 \endcode: If toThread is \c true, the target is the
2205 			current thread, otherwise the current team.
2206 		- \code id == -1 \endcode: The target are all teams the current team has
2207 			permission to send signals to. Currently not implemented correctly.
2208 		- \code id < -1 \endcode: The target are is the process group with ID
2209 			\c -id.
2210 	\param signalNumber The signal number. \c 0 to just perform checks, but not
2211 		actually send any signal.
2212 	\param userUserValue A user value to be associated with the signal. Might be
2213 		ignored unless signal queuing is forced. Can be \c NULL.
2214 	\param flags A bitwise or of any number of the following:
2215 		- \c SIGNAL_FLAG_QUEUING_REQUIRED: Signal queuing is required. Fail
2216 			instead of falling back to unqueued signals, when queuing isn't
2217 			possible.
2218 		- \c SIGNAL_FLAG_SEND_TO_THREAD: Interpret the the given ID as a
2219 			\c thread_id rather than a \c team_id. Ignored when the \a id is
2220 			\code < 0 \endcode -- then the target is a process group.
2221 	\return \c B_OK on success, another error code otherwise.
2222 */
2223 status_t
2224 _user_send_signal(int32 id, uint32 signalNumber,
2225 	const union sigval* userUserValue, uint32 flags)
2226 {
2227 	// restrict flags to the allowed ones and add B_CHECK_PERMISSION
2228 	flags &= SIGNAL_FLAG_QUEUING_REQUIRED | SIGNAL_FLAG_SEND_TO_THREAD;
2229 	flags |= B_CHECK_PERMISSION;
2230 
2231 	// Copy the user value from userland. If not given, use a dummy value.
2232 	union sigval userValue;
2233 	if (userUserValue != NULL) {
2234 		if (!IS_USER_ADDRESS(userUserValue)
2235 			|| user_memcpy(&userValue, userUserValue, sizeof(userValue))
2236 				!= B_OK) {
2237 			return B_BAD_ADDRESS;
2238 		}
2239 	} else
2240 		userValue.sival_ptr = NULL;
2241 
2242 	// If to be sent to a thread, delegate to send_signal_internal(). Also do
2243 	// that when id < 0, since in this case the semantics is the same as well.
2244 	if ((flags & SIGNAL_FLAG_SEND_TO_THREAD) != 0 || id < 0)
2245 		return send_signal_internal(id, signalNumber, userValue, flags);
2246 
2247 	// kill() semantics for id >= 0
2248 	if (signalNumber > MAX_SIGNAL_NUMBER)
2249 		return B_BAD_VALUE;
2250 
2251 	Thread* thread = thread_get_current_thread();
2252 
2253 	Signal signal(signalNumber,
2254 		(flags & SIGNAL_FLAG_QUEUING_REQUIRED) != 0 ? SI_QUEUE : SI_USER,
2255 		B_OK, thread->team->id);
2256 	signal.SetUserValue(userValue);
2257 
2258 	// send to current team for id == 0, otherwise to the respective team
2259 	return send_signal_to_team_id(id == 0 ? team_get_current_team_id() : id,
2260 		signal, flags);
2261 }
2262 
2263 
2264 status_t
2265 _user_set_signal_mask(int how, const sigset_t *userSet, sigset_t *userOldSet)
2266 {
2267 	sigset_t set, oldSet;
2268 	status_t status;
2269 
2270 	if ((userSet != NULL && (!IS_USER_ADDRESS(userSet)
2271 			|| user_memcpy(&set, userSet, sizeof(sigset_t)) < B_OK))
2272 		|| (userOldSet != NULL && (!IS_USER_ADDRESS(userOldSet)
2273 			|| user_memcpy(&oldSet, userOldSet, sizeof(sigset_t)) < B_OK)))
2274 		return B_BAD_ADDRESS;
2275 
2276 	status = sigprocmask_internal(how, userSet ? &set : NULL,
2277 		userOldSet ? &oldSet : NULL);
2278 
2279 	// copy old set if asked for
2280 	if (status >= B_OK && userOldSet != NULL
2281 		&& user_memcpy(userOldSet, &oldSet, sizeof(sigset_t)) < B_OK)
2282 		return B_BAD_ADDRESS;
2283 
2284 	return status;
2285 }
2286 
2287 
2288 status_t
2289 _user_sigaction(int signal, const struct sigaction *userAction,
2290 	struct sigaction *userOldAction)
2291 {
2292 	struct sigaction act, oact;
2293 	status_t status;
2294 
2295 	if ((userAction != NULL && (!IS_USER_ADDRESS(userAction)
2296 			|| user_memcpy(&act, userAction, sizeof(struct sigaction)) < B_OK))
2297 		|| (userOldAction != NULL && (!IS_USER_ADDRESS(userOldAction)
2298 			|| user_memcpy(&oact, userOldAction, sizeof(struct sigaction))
2299 				< B_OK)))
2300 		return B_BAD_ADDRESS;
2301 
2302 	status = sigaction_internal(signal, userAction ? &act : NULL,
2303 		userOldAction ? &oact : NULL);
2304 
2305 	// only copy the old action if a pointer has been given
2306 	if (status >= B_OK && userOldAction != NULL
2307 		&& user_memcpy(userOldAction, &oact, sizeof(struct sigaction)) < B_OK)
2308 		return B_BAD_ADDRESS;
2309 
2310 	return status;
2311 }
2312 
2313 
2314 status_t
2315 _user_sigwait(const sigset_t *userSet, siginfo_t *userInfo, uint32 flags,
2316 	bigtime_t timeout)
2317 {
2318 	// copy userSet to stack
2319 	sigset_t set;
2320 	if (userSet == NULL || !IS_USER_ADDRESS(userSet)
2321 		|| user_memcpy(&set, userSet, sizeof(sigset_t)) != B_OK) {
2322 		return B_BAD_ADDRESS;
2323 	}
2324 
2325 	// userInfo is optional, but must be a user address when given
2326 	if (userInfo != NULL && !IS_USER_ADDRESS(userInfo))
2327 		return B_BAD_ADDRESS;
2328 
2329 	syscall_restart_handle_timeout_pre(flags, timeout);
2330 
2331 	flags |= B_CAN_INTERRUPT;
2332 
2333 	siginfo_t info;
2334 	status_t status = sigwait_internal(&set, &info, flags, timeout);
2335 	if (status == B_OK) {
2336 		// copy the info back to userland, if userSet is non-NULL
2337 		if (userInfo != NULL)
2338 			status = user_memcpy(userInfo, &info, sizeof(info));
2339 	} else if (status == B_INTERRUPTED) {
2340 		// make sure we'll be restarted
2341 		Thread* thread = thread_get_current_thread();
2342 		atomic_or(&thread->flags, THREAD_FLAGS_ALWAYS_RESTART_SYSCALL);
2343 	}
2344 
2345 	return syscall_restart_handle_timeout_post(status, timeout);
2346 }
2347 
2348 
2349 status_t
2350 _user_sigsuspend(const sigset_t *userMask)
2351 {
2352 	sigset_t mask;
2353 
2354 	if (userMask == NULL)
2355 		return B_BAD_VALUE;
2356 	if (!IS_USER_ADDRESS(userMask)
2357 		|| user_memcpy(&mask, userMask, sizeof(sigset_t)) < B_OK) {
2358 		return B_BAD_ADDRESS;
2359 	}
2360 
2361 	return sigsuspend_internal(&mask);
2362 }
2363 
2364 
2365 status_t
2366 _user_sigpending(sigset_t *userSet)
2367 {
2368 	sigset_t set;
2369 	int status;
2370 
2371 	if (userSet == NULL)
2372 		return B_BAD_VALUE;
2373 	if (!IS_USER_ADDRESS(userSet))
2374 		return B_BAD_ADDRESS;
2375 
2376 	status = sigpending_internal(&set);
2377 	if (status == B_OK
2378 		&& user_memcpy(userSet, &set, sizeof(sigset_t)) < B_OK)
2379 		return B_BAD_ADDRESS;
2380 
2381 	return status;
2382 }
2383 
2384 
2385 status_t
2386 _user_set_signal_stack(const stack_t* newUserStack, stack_t* oldUserStack)
2387 {
2388 	Thread *thread = thread_get_current_thread();
2389 	struct stack_t newStack, oldStack;
2390 	bool onStack = false;
2391 
2392 	if ((newUserStack != NULL && (!IS_USER_ADDRESS(newUserStack)
2393 			|| user_memcpy(&newStack, newUserStack, sizeof(stack_t)) < B_OK))
2394 		|| (oldUserStack != NULL && (!IS_USER_ADDRESS(oldUserStack)
2395 			|| user_memcpy(&oldStack, oldUserStack, sizeof(stack_t)) < B_OK)))
2396 		return B_BAD_ADDRESS;
2397 
2398 	if (thread->signal_stack_enabled) {
2399 		// determine whether or not the user thread is currently
2400 		// on the active signal stack
2401 		onStack = arch_on_signal_stack(thread);
2402 	}
2403 
2404 	if (oldUserStack != NULL) {
2405 		oldStack.ss_sp = (void *)thread->signal_stack_base;
2406 		oldStack.ss_size = thread->signal_stack_size;
2407 		oldStack.ss_flags = (thread->signal_stack_enabled ? 0 : SS_DISABLE)
2408 			| (onStack ? SS_ONSTACK : 0);
2409 	}
2410 
2411 	if (newUserStack != NULL) {
2412 		// no flags other than SS_DISABLE are allowed
2413 		if ((newStack.ss_flags & ~SS_DISABLE) != 0)
2414 			return B_BAD_VALUE;
2415 
2416 		if ((newStack.ss_flags & SS_DISABLE) == 0) {
2417 			// check if the size is valid
2418 			if (newStack.ss_size < MINSIGSTKSZ)
2419 				return B_NO_MEMORY;
2420 			if (onStack)
2421 				return B_NOT_ALLOWED;
2422 			if (!IS_USER_ADDRESS(newStack.ss_sp))
2423 				return B_BAD_VALUE;
2424 
2425 			thread->signal_stack_base = (addr_t)newStack.ss_sp;
2426 			thread->signal_stack_size = newStack.ss_size;
2427 			thread->signal_stack_enabled = true;
2428 		} else
2429 			thread->signal_stack_enabled = false;
2430 	}
2431 
2432 	// only copy the old stack info if a pointer has been given
2433 	if (oldUserStack != NULL
2434 		&& user_memcpy(oldUserStack, &oldStack, sizeof(stack_t)) < B_OK)
2435 		return B_BAD_ADDRESS;
2436 
2437 	return B_OK;
2438 }
2439 
2440 
2441 /*!	Restores the environment of a function that was interrupted by a signal
2442 	handler call.
2443 	This syscall is invoked when a signal handler function returns. It
2444 	deconstructs the signal handler frame and restores the stack and register
2445 	state of the function that was interrupted by a signal. The syscall is
2446 	therefore somewhat unusual, since it does not return to the calling
2447 	function, but to someplace else. In case the signal interrupted a syscall,
2448 	it will appear as if the syscall just returned. That is also the reason, why
2449 	this syscall returns an int64, since it needs to return the value the
2450 	interrupted syscall returns, which is potentially 64 bits wide.
2451 
2452 	\param userSignalFrameData The signal frame data created for the signal
2453 		handler. Potentially some data (e.g. registers) have been modified by
2454 		the signal handler.
2455 	\return In case the signal interrupted a syscall, the return value of that
2456 		syscall. Otherwise (i.e. in case of a (hardware) interrupt/exception)
2457 		the value might need to be tailored such that after a return to userland
2458 		the interrupted environment is identical to the interrupted one (unless
2459 		explicitly modified). E.g. for x86 to achieve that, the return value
2460 		must contain the eax|edx values of the interrupted environment.
2461 */
2462 int64
2463 _user_restore_signal_frame(struct signal_frame_data* userSignalFrameData)
2464 {
2465 	syscall_64_bit_return_value();
2466 
2467 	Thread *thread = thread_get_current_thread();
2468 
2469 	// copy the signal frame data from userland
2470 	signal_frame_data signalFrameData;
2471 	if (userSignalFrameData == NULL || !IS_USER_ADDRESS(userSignalFrameData)
2472 		|| user_memcpy(&signalFrameData, userSignalFrameData,
2473 			sizeof(signalFrameData)) != B_OK) {
2474 		// We failed to copy the signal frame data from userland. This is a
2475 		// serious problem. Kill the thread.
2476 		dprintf("_user_restore_signal_frame(): thread %" B_PRId32 ": Failed to "
2477 			"copy signal frame data (%p) from userland. Killing thread...\n",
2478 			thread->id, userSignalFrameData);
2479 		kill_thread(thread->id);
2480 		return B_BAD_ADDRESS;
2481 	}
2482 
2483 	// restore the signal block mask
2484 	InterruptsSpinLocker locker(thread->team->signal_lock);
2485 
2486 	thread->sig_block_mask
2487 		= signalFrameData.context.uc_sigmask & BLOCKABLE_SIGNALS;
2488 	update_current_thread_signals_flag();
2489 
2490 	locker.Unlock();
2491 
2492 	// restore the syscall restart related thread flags and the syscall restart
2493 	// parameters
2494 	atomic_and(&thread->flags,
2495 		~(THREAD_FLAGS_RESTART_SYSCALL | THREAD_FLAGS_64_BIT_SYSCALL_RETURN));
2496 	atomic_or(&thread->flags, signalFrameData.thread_flags
2497 		& (THREAD_FLAGS_RESTART_SYSCALL | THREAD_FLAGS_64_BIT_SYSCALL_RETURN));
2498 
2499 	memcpy(thread->syscall_restart.parameters,
2500 		signalFrameData.syscall_restart_parameters,
2501 		sizeof(thread->syscall_restart.parameters));
2502 
2503 	// restore the previously stored Thread::user_signal_context
2504 	thread->user_signal_context = signalFrameData.context.uc_link;
2505 	if (thread->user_signal_context != NULL
2506 		&& !IS_USER_ADDRESS(thread->user_signal_context)) {
2507 		thread->user_signal_context = NULL;
2508 	}
2509 
2510 	// let the architecture specific code restore the registers
2511 	return arch_restore_signal_frame(&signalFrameData);
2512 }
2513