xref: /haiku/src/system/kernel/signal.cpp (revision f5821a1aee77d3b9a979b42c68a79e50b5ebaefe)
1 /*
2  * Copyright 2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
4  * Copyright 2002, Angelo Mottola, a.mottola@libero.it.
5  *
6  * Distributed under the terms of the MIT License.
7  */
8 
9 
10 /*! POSIX signals handling routines */
11 
12 
13 #include <ksignal.h>
14 
15 #include <errno.h>
16 #include <stddef.h>
17 #include <string.h>
18 
19 #include <OS.h>
20 #include <KernelExport.h>
21 
22 #include <cpu.h>
23 #include <debug.h>
24 #include <kernel.h>
25 #include <kscheduler.h>
26 #include <sem.h>
27 #include <syscall_restart.h>
28 #include <syscall_utils.h>
29 #include <team.h>
30 #include <thread.h>
31 #include <tracing.h>
32 #include <user_debugger.h>
33 #include <user_thread.h>
34 #include <util/AutoLock.h>
35 
36 
37 //#define TRACE_SIGNAL
38 #ifdef TRACE_SIGNAL
39 #	define TRACE(x) dprintf x
40 #else
41 #	define TRACE(x) ;
42 #endif
43 
44 
45 #define BLOCKABLE_SIGNALS	\
46 	(~(KILL_SIGNALS | SIGNAL_TO_MASK(SIGSTOP)	\
47 	| SIGNAL_TO_MASK(SIGNAL_CONTINUE_THREAD)	\
48 	| SIGNAL_TO_MASK(SIGNAL_CANCEL_THREAD)))
49 #define STOP_SIGNALS \
50 	(SIGNAL_TO_MASK(SIGSTOP) | SIGNAL_TO_MASK(SIGTSTP) \
51 	| SIGNAL_TO_MASK(SIGTTIN) | SIGNAL_TO_MASK(SIGTTOU))
52 #define CONTINUE_SIGNALS \
53 	(SIGNAL_TO_MASK(SIGCONT) | SIGNAL_TO_MASK(SIGNAL_CONTINUE_THREAD))
54 #define DEFAULT_IGNORE_SIGNALS \
55 	(SIGNAL_TO_MASK(SIGCHLD) | SIGNAL_TO_MASK(SIGWINCH) \
56 	| SIGNAL_TO_MASK(SIGCONT) \
57 	| SIGNAL_RANGE_TO_MASK(SIGNAL_REALTIME_MIN, SIGNAL_REALTIME_MAX))
58 #define NON_DEFERRABLE_SIGNALS	\
59 	(KILL_SIGNALS				\
60 	| SIGNAL_TO_MASK(SIGILL)	\
61 	| SIGNAL_TO_MASK(SIGFPE)	\
62 	| SIGNAL_TO_MASK(SIGSEGV))
63 
64 
65 static const struct {
66 	const char*	name;
67 	int32		priority;
68 } kSignalInfos[__MAX_SIGNO + 1] = {
69 	{"NONE",			-1},
70 	{"HUP",				0},
71 	{"INT",				0},
72 	{"QUIT",			0},
73 	{"ILL",				0},
74 	{"CHLD",			0},
75 	{"ABRT",			0},
76 	{"PIPE",			0},
77 	{"FPE",				0},
78 	{"KILL",			100},
79 	{"STOP",			0},
80 	{"SEGV",			0},
81 	{"CONT",			0},
82 	{"TSTP",			0},
83 	{"ALRM",			0},
84 	{"TERM",			0},
85 	{"TTIN",			0},
86 	{"TTOU",			0},
87 	{"USR1",			0},
88 	{"USR2",			0},
89 	{"WINCH",			0},
90 	{"KILLTHR",			100},
91 	{"TRAP",			0},
92 	{"POLL",			0},
93 	{"PROF",			0},
94 	{"SYS",				0},
95 	{"URG",				0},
96 	{"VTALRM",			0},
97 	{"XCPU",			0},
98 	{"XFSZ",			0},
99 	{"SIGBUS",			0},
100 	{"SIGRESERVED1",	0},
101 	{"SIGRESERVED2",	0},
102 	{"SIGRT1",			8},
103 	{"SIGRT2",			7},
104 	{"SIGRT3",			6},
105 	{"SIGRT4",			5},
106 	{"SIGRT5",			4},
107 	{"SIGRT6",			3},
108 	{"SIGRT7",			2},
109 	{"SIGRT8",			1},
110 	{"invalid 41",		0},
111 	{"invalid 42",		0},
112 	{"invalid 43",		0},
113 	{"invalid 44",		0},
114 	{"invalid 45",		0},
115 	{"invalid 46",		0},
116 	{"invalid 47",		0},
117 	{"invalid 48",		0},
118 	{"invalid 49",		0},
119 	{"invalid 50",		0},
120 	{"invalid 51",		0},
121 	{"invalid 52",		0},
122 	{"invalid 53",		0},
123 	{"invalid 54",		0},
124 	{"invalid 55",		0},
125 	{"invalid 56",		0},
126 	{"invalid 57",		0},
127 	{"invalid 58",		0},
128 	{"invalid 59",		0},
129 	{"invalid 60",		0},
130 	{"invalid 61",		0},
131 	{"invalid 62",		0},
132 	{"CANCEL_THREAD",	0},
133 	{"CONTINUE_THREAD",	0}	// priority must be <= that of SIGSTOP
134 };
135 
136 
137 static inline const char*
138 signal_name(uint32 number)
139 {
140 	return number <= __MAX_SIGNO ? kSignalInfos[number].name : "invalid";
141 }
142 
143 
144 // #pragma mark - SignalHandledCaller
145 
146 
147 struct SignalHandledCaller {
148 	SignalHandledCaller(Signal* signal)
149 		:
150 		fSignal(signal)
151 	{
152 	}
153 
154 	~SignalHandledCaller()
155 	{
156 		Done();
157 	}
158 
159 	void Done()
160 	{
161 		if (fSignal != NULL) {
162 			fSignal->Handled();
163 			fSignal = NULL;
164 		}
165 	}
166 
167 private:
168 	Signal*	fSignal;
169 };
170 
171 
172 // #pragma mark - QueuedSignalsCounter
173 
174 
175 /*!	Creates a counter with the given limit.
176 	The limit defines the maximum the counter may reach. Since the
177 	BReferenceable's reference count is used, it is assumed that the owning
178 	team holds a reference and the reference count is one greater than the
179 	counter value.
180 	\param limit The maximum allowed value the counter may have. When
181 		\code < 0 \endcode, the value is not limited.
182 */
183 QueuedSignalsCounter::QueuedSignalsCounter(int32 limit)
184 	:
185 	fLimit(limit)
186 {
187 }
188 
189 
190 /*!	Increments the counter, if the limit allows that.
191 	\return \c true, if incrementing the counter succeeded, \c false otherwise.
192 */
193 bool
194 QueuedSignalsCounter::Increment()
195 {
196 	// no limit => no problem
197 	if (fLimit < 0) {
198 		AcquireReference();
199 		return true;
200 	}
201 
202 	// Increment the reference count manually, so we can check atomically. We
203 	// compare the old value > fLimit, assuming that our (primary) owner has a
204 	// reference, we don't want to count.
205 	if (atomic_add(&fReferenceCount, 1) > fLimit) {
206 		ReleaseReference();
207 		return false;
208 	}
209 
210 	return true;
211 }
212 
213 
214 // #pragma mark - Signal
215 
216 
217 Signal::Signal()
218 	:
219 	fCounter(NULL),
220 	fPending(false)
221 {
222 }
223 
224 
225 Signal::Signal(const Signal& other)
226 	:
227 	fCounter(NULL),
228 	fNumber(other.fNumber),
229 	fSignalCode(other.fSignalCode),
230 	fErrorCode(other.fErrorCode),
231 	fSendingProcess(other.fSendingProcess),
232 	fSendingUser(other.fSendingUser),
233 	fStatus(other.fStatus),
234 	fPollBand(other.fPollBand),
235 	fAddress(other.fAddress),
236 	fUserValue(other.fUserValue),
237 	fPending(false)
238 {
239 }
240 
241 
242 Signal::Signal(uint32 number, int32 signalCode, int32 errorCode,
243 	pid_t sendingProcess)
244 	:
245 	fCounter(NULL),
246 	fNumber(number),
247 	fSignalCode(signalCode),
248 	fErrorCode(errorCode),
249 	fSendingProcess(sendingProcess),
250 	fSendingUser(getuid()),
251 	fStatus(0),
252 	fPollBand(0),
253 	fAddress(NULL),
254 	fPending(false)
255 {
256 	fUserValue.sival_ptr = NULL;
257 }
258 
259 
260 Signal::~Signal()
261 {
262 	if (fCounter != NULL)
263 		fCounter->ReleaseReference();
264 }
265 
266 
267 /*!	Creates a queuable clone of the given signal.
268 	Also enforces the current team's signal queuing limit.
269 
270 	\param signal The signal to clone.
271 	\param queuingRequired If \c true, the function will return an error code
272 		when creating the clone fails for any reason. Otherwise, the function
273 		will set \a _signalToQueue to \c NULL, but still return \c B_OK.
274 	\param _signalToQueue Return parameter. Set to the clone of the signal.
275 	\return When \c queuingRequired is \c false, always \c B_OK. Otherwise
276 		\c B_OK, when creating the signal clone succeeds, another error code,
277 		when it fails.
278 */
279 /*static*/ status_t
280 Signal::CreateQueuable(const Signal& signal, bool queuingRequired,
281 	Signal*& _signalToQueue)
282 {
283 	_signalToQueue = NULL;
284 
285 	// If interrupts are disabled, we can't allocate a signal.
286 	if (!are_interrupts_enabled())
287 		return queuingRequired ? B_BAD_VALUE : B_OK;
288 
289 	// increment the queued signals counter
290 	QueuedSignalsCounter* counter
291 		= thread_get_current_thread()->team->QueuedSignalsCounter();
292 	if (!counter->Increment())
293 		return queuingRequired ? EAGAIN : B_OK;
294 
295 	// allocate the signal
296 	Signal* signalToQueue = new(std::nothrow) Signal(signal);
297 	if (signalToQueue == NULL) {
298 		counter->Decrement();
299 		return queuingRequired ? B_NO_MEMORY : B_OK;
300 	}
301 
302 	signalToQueue->fCounter = counter;
303 
304 	_signalToQueue = signalToQueue;
305 	return B_OK;
306 }
307 
308 void
309 Signal::SetTo(uint32 number)
310 {
311 	Team* team = thread_get_current_thread()->team;
312 
313 	fNumber = number;
314 	fSignalCode = SI_USER;
315 	fErrorCode = 0;
316 	fSendingProcess = team->id;
317 	fSendingUser = team->effective_uid;
318 		// assuming scheduler lock is being held
319 	fStatus = 0;
320 	fPollBand = 0;
321 	fAddress = NULL;
322 	fUserValue.sival_ptr = NULL;
323 }
324 
325 
326 int32
327 Signal::Priority() const
328 {
329 	return kSignalInfos[fNumber].priority;
330 }
331 
332 
333 void
334 Signal::Handled()
335 {
336 	ReleaseReference();
337 }
338 
339 
340 void
341 Signal::LastReferenceReleased()
342 {
343 	if (are_interrupts_enabled())
344 		delete this;
345 	else
346 		deferred_delete(this);
347 }
348 
349 
350 // #pragma mark - PendingSignals
351 
352 
353 PendingSignals::PendingSignals()
354 	:
355 	fQueuedSignalsMask(0),
356 	fUnqueuedSignalsMask(0)
357 {
358 }
359 
360 
361 PendingSignals::~PendingSignals()
362 {
363 	Clear();
364 }
365 
366 
367 /*!	Of the signals in \a nonBlocked returns the priority of that with the
368 	highest priority.
369 	\param nonBlocked The mask with the non-blocked signals.
370 	\return The priority of the highest priority non-blocked signal, or, if all
371 		signals are blocked, \c -1.
372 */
373 int32
374 PendingSignals::HighestSignalPriority(sigset_t nonBlocked) const
375 {
376 	Signal* queuedSignal;
377 	int32 unqueuedSignal;
378 	return _GetHighestPrioritySignal(nonBlocked, queuedSignal, unqueuedSignal);
379 }
380 
381 
382 void
383 PendingSignals::Clear()
384 {
385 	// release references of all queued signals
386 	while (Signal* signal = fQueuedSignals.RemoveHead())
387 		signal->Handled();
388 
389 	fQueuedSignalsMask = 0;
390 	fUnqueuedSignalsMask = 0;
391 }
392 
393 
394 /*!	Adds a signal.
395 	Takes over the reference to the signal from the caller.
396 */
397 void
398 PendingSignals::AddSignal(Signal* signal)
399 {
400 	// queue according to priority
401 	int32 priority = signal->Priority();
402 	Signal* otherSignal = NULL;
403 	for (SignalList::Iterator it = fQueuedSignals.GetIterator();
404 			(otherSignal = it.Next()) != NULL;) {
405 		if (priority > otherSignal->Priority())
406 			break;
407 	}
408 
409 	fQueuedSignals.InsertBefore(otherSignal, signal);
410 	signal->SetPending(true);
411 
412 	fQueuedSignalsMask |= SIGNAL_TO_MASK(signal->Number());
413 }
414 
415 
416 void
417 PendingSignals::RemoveSignal(Signal* signal)
418 {
419 	signal->SetPending(false);
420 	fQueuedSignals.Remove(signal);
421 	_UpdateQueuedSignalMask();
422 }
423 
424 
425 void
426 PendingSignals::RemoveSignals(sigset_t mask)
427 {
428 	// remove from queued signals
429 	if ((fQueuedSignalsMask & mask) != 0) {
430 		for (SignalList::Iterator it = fQueuedSignals.GetIterator();
431 				Signal* signal = it.Next();) {
432 			// remove signal, if in mask
433 			if ((SIGNAL_TO_MASK(signal->Number()) & mask) != 0) {
434 				it.Remove();
435 				signal->SetPending(false);
436 				signal->Handled();
437 			}
438 		}
439 
440 		fQueuedSignalsMask &= ~mask;
441 	}
442 
443 	// remove from unqueued signals
444 	fUnqueuedSignalsMask &= ~mask;
445 }
446 
447 
448 /*!	Removes and returns a signal in \a nonBlocked that has the highest priority.
449 	The caller gets a reference to the returned signal, if any.
450 	\param nonBlocked The mask of non-blocked signals.
451 	\param buffer If the signal is not queued this buffer is returned. In this
452 		case the method acquires a reference to \a buffer, so that the caller
453 		gets a reference also in this case.
454 	\return The removed signal or \c NULL, if all signals are blocked.
455 */
456 Signal*
457 PendingSignals::DequeueSignal(sigset_t nonBlocked, Signal& buffer)
458 {
459 	// find the signal with the highest priority
460 	Signal* queuedSignal;
461 	int32 unqueuedSignal;
462 	if (_GetHighestPrioritySignal(nonBlocked, queuedSignal, unqueuedSignal) < 0)
463 		return NULL;
464 
465 	// if it is a queued signal, dequeue it
466 	if (queuedSignal != NULL) {
467 		fQueuedSignals.Remove(queuedSignal);
468 		queuedSignal->SetPending(false);
469 		_UpdateQueuedSignalMask();
470 		return queuedSignal;
471 	}
472 
473 	// it is unqueued -- remove from mask
474 	fUnqueuedSignalsMask &= ~SIGNAL_TO_MASK(unqueuedSignal);
475 
476 	// init buffer
477 	buffer.SetTo(unqueuedSignal);
478 	buffer.AcquireReference();
479 	return &buffer;
480 }
481 
482 
483 /*!	Of the signals not it \a blocked returns the priority of that with the
484 	highest priority.
485 	\param blocked The mask with the non-blocked signals.
486 	\param _queuedSignal If the found signal is a queued signal, the variable
487 		will be set to that signal, otherwise to \c NULL.
488 	\param _unqueuedSignal If the found signal is an unqueued signal, the
489 		variable is set to that signal's number, otherwise to \c -1.
490 	\return The priority of the highest priority non-blocked signal, or, if all
491 		signals are blocked, \c -1.
492 */
493 int32
494 PendingSignals::_GetHighestPrioritySignal(sigset_t nonBlocked,
495 	Signal*& _queuedSignal, int32& _unqueuedSignal) const
496 {
497 	// check queued signals
498 	Signal* queuedSignal = NULL;
499 	int32 queuedPriority = -1;
500 
501 	if ((fQueuedSignalsMask & nonBlocked) != 0) {
502 		for (SignalList::ConstIterator it = fQueuedSignals.GetIterator();
503 				Signal* signal = it.Next();) {
504 			if ((SIGNAL_TO_MASK(signal->Number()) & nonBlocked) != 0) {
505 				queuedPriority = signal->Priority();
506 				queuedSignal = signal;
507 				break;
508 			}
509 		}
510 	}
511 
512 	// check unqueued signals
513 	int32 unqueuedSignal = -1;
514 	int32 unqueuedPriority = -1;
515 
516 	sigset_t unqueuedSignals = fUnqueuedSignalsMask & nonBlocked;
517 	if (unqueuedSignals != 0) {
518 		int32 signal = 1;
519 		while (unqueuedSignals != 0) {
520 			sigset_t mask = SIGNAL_TO_MASK(signal);
521 			if ((unqueuedSignals & mask) != 0) {
522 				int32 priority = kSignalInfos[signal].priority;
523 				if (priority > unqueuedPriority) {
524 					unqueuedSignal = signal;
525 					unqueuedPriority = priority;
526 				}
527 				unqueuedSignals &= ~mask;
528 			}
529 
530 			signal++;
531 		}
532 	}
533 
534 	// Return found queued or unqueued signal, whichever has the higher
535 	// priority.
536 	if (queuedPriority >= unqueuedPriority) {
537 		_queuedSignal = queuedSignal;
538 		_unqueuedSignal = -1;
539 		return queuedPriority;
540 	}
541 
542 	_queuedSignal = NULL;
543 	_unqueuedSignal = unqueuedSignal;
544 	return unqueuedPriority;
545 }
546 
547 
548 void
549 PendingSignals::_UpdateQueuedSignalMask()
550 {
551 	sigset_t mask = 0;
552 	for (SignalList::Iterator it = fQueuedSignals.GetIterator();
553 			Signal* signal = it.Next();) {
554 		mask |= SIGNAL_TO_MASK(signal->Number());
555 	}
556 
557 	fQueuedSignalsMask = mask;
558 }
559 
560 
561 // #pragma mark - signal tracing
562 
563 
564 #if SIGNAL_TRACING
565 
566 namespace SignalTracing {
567 
568 
569 class HandleSignal : public AbstractTraceEntry {
570 	public:
571 		HandleSignal(uint32 signal)
572 			:
573 			fSignal(signal)
574 		{
575 			Initialized();
576 		}
577 
578 		virtual void AddDump(TraceOutput& out)
579 		{
580 			out.Print("signal handle:  %" B_PRIu32 " (%s)" , fSignal,
581 				signal_name(fSignal));
582 		}
583 
584 	private:
585 		uint32		fSignal;
586 };
587 
588 
589 class ExecuteSignalHandler : public AbstractTraceEntry {
590 	public:
591 		ExecuteSignalHandler(uint32 signal, struct sigaction* handler)
592 			:
593 			fSignal(signal),
594 			fHandler((void*)handler->sa_handler)
595 		{
596 			Initialized();
597 		}
598 
599 		virtual void AddDump(TraceOutput& out)
600 		{
601 			out.Print("signal exec handler: signal: %" B_PRIu32 " (%s), "
602 				"handler: %p", fSignal, signal_name(fSignal), fHandler);
603 		}
604 
605 	private:
606 		uint32	fSignal;
607 		void*	fHandler;
608 };
609 
610 
611 class SendSignal : public AbstractTraceEntry {
612 	public:
613 		SendSignal(pid_t target, uint32 signal, uint32 flags)
614 			:
615 			fTarget(target),
616 			fSignal(signal),
617 			fFlags(flags)
618 		{
619 			Initialized();
620 		}
621 
622 		virtual void AddDump(TraceOutput& out)
623 		{
624 			out.Print("signal send: target: %ld, signal: %lu (%s), "
625 				"flags: 0x%lx", fTarget, fSignal, signal_name(fSignal), fFlags);
626 		}
627 
628 	private:
629 		pid_t	fTarget;
630 		uint32	fSignal;
631 		uint32	fFlags;
632 };
633 
634 
635 class SigAction : public AbstractTraceEntry {
636 	public:
637 		SigAction(uint32 signal, const struct sigaction* act)
638 			:
639 			fSignal(signal),
640 			fAction(*act)
641 		{
642 			Initialized();
643 		}
644 
645 		virtual void AddDump(TraceOutput& out)
646 		{
647 			out.Print("signal action: signal: %lu (%s), "
648 				"action: {handler: %p, flags: 0x%x, mask: 0x%llx}", fSignal,
649 				signal_name(fSignal), fAction.sa_handler, fAction.sa_flags,
650 				(long long)fAction.sa_mask);
651 		}
652 
653 	private:
654 		uint32				fSignal;
655 		struct sigaction	fAction;
656 };
657 
658 
659 class SigProcMask : public AbstractTraceEntry {
660 	public:
661 		SigProcMask(int how, sigset_t mask)
662 			:
663 			fHow(how),
664 			fMask(mask),
665 			fOldMask(thread_get_current_thread()->sig_block_mask)
666 		{
667 			Initialized();
668 		}
669 
670 		virtual void AddDump(TraceOutput& out)
671 		{
672 			const char* how = "invalid";
673 			switch (fHow) {
674 				case SIG_BLOCK:
675 					how = "block";
676 					break;
677 				case SIG_UNBLOCK:
678 					how = "unblock";
679 					break;
680 				case SIG_SETMASK:
681 					how = "set";
682 					break;
683 			}
684 
685 			out.Print("signal proc mask: %s 0x%llx, old mask: 0x%llx", how,
686 				(long long)fMask, (long long)fOldMask);
687 		}
688 
689 	private:
690 		int			fHow;
691 		sigset_t	fMask;
692 		sigset_t	fOldMask;
693 };
694 
695 
696 class SigSuspend : public AbstractTraceEntry {
697 	public:
698 		SigSuspend(sigset_t mask)
699 			:
700 			fMask(mask),
701 			fOldMask(thread_get_current_thread()->sig_block_mask)
702 		{
703 			Initialized();
704 		}
705 
706 		virtual void AddDump(TraceOutput& out)
707 		{
708 			out.Print("signal suspend: %#llx, old mask: %#llx",
709 				(long long)fMask, (long long)fOldMask);
710 		}
711 
712 	private:
713 		sigset_t	fMask;
714 		sigset_t	fOldMask;
715 };
716 
717 
718 class SigSuspendDone : public AbstractTraceEntry {
719 	public:
720 		SigSuspendDone()
721 			:
722 			fSignals(thread_get_current_thread()->ThreadPendingSignals())
723 		{
724 			Initialized();
725 		}
726 
727 		virtual void AddDump(TraceOutput& out)
728 		{
729 			out.Print("signal suspend done: %#" B_PRIx32, fSignals);
730 		}
731 
732 	private:
733 		uint32		fSignals;
734 };
735 
736 }	// namespace SignalTracing
737 
738 #	define T(x)	new(std::nothrow) SignalTracing::x
739 
740 #else
741 #	define T(x)
742 #endif	// SIGNAL_TRACING
743 
744 
745 // #pragma mark -
746 
747 
748 /*!	Updates the given thread's Thread::flags field according to what signals are
749 	pending.
750 	The caller must hold the scheduler lock.
751 */
752 static void
753 update_thread_signals_flag(Thread* thread)
754 {
755 	sigset_t mask = ~thread->sig_block_mask;
756 	if ((thread->AllPendingSignals() & mask) != 0)
757 		atomic_or(&thread->flags, THREAD_FLAGS_SIGNALS_PENDING);
758 	else
759 		atomic_and(&thread->flags, ~THREAD_FLAGS_SIGNALS_PENDING);
760 }
761 
762 
763 /*!	Updates the current thread's Thread::flags field according to what signals
764 	are pending.
765 	The caller must hold the scheduler lock.
766 */
767 static void
768 update_current_thread_signals_flag()
769 {
770 	update_thread_signals_flag(thread_get_current_thread());
771 }
772 
773 
774 /*!	Updates all of the given team's threads' Thread::flags fields according to
775 	what signals are pending.
776 	The caller must hold the scheduler lock.
777 */
778 static void
779 update_team_threads_signal_flag(Team* team)
780 {
781 	for (Thread* thread = team->thread_list; thread != NULL;
782 			thread = thread->team_next) {
783 		update_thread_signals_flag(thread);
784 	}
785 }
786 
787 
788 /*!	Notifies the user debugger about a signal to be handled.
789 
790 	The caller must not hold any locks.
791 
792 	\param thread The current thread.
793 	\param signal The signal to be handled.
794 	\param handler The installed signal handler for the signal.
795 	\param deadly Indicates whether the signal is deadly.
796 	\return \c true, if the signal shall be handled, \c false, if it shall be
797 		ignored.
798 */
799 static bool
800 notify_debugger(Thread* thread, Signal* signal, struct sigaction& handler,
801 	bool deadly)
802 {
803 	uint64 signalMask = SIGNAL_TO_MASK(signal->Number());
804 
805 	// first check the ignore signal masks the debugger specified for the thread
806 	InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock);
807 
808 	if ((thread->debug_info.ignore_signals_once & signalMask) != 0) {
809 		thread->debug_info.ignore_signals_once &= ~signalMask;
810 		return true;
811 	}
812 
813 	if ((thread->debug_info.ignore_signals & signalMask) != 0)
814 		return true;
815 
816 	threadDebugInfoLocker.Unlock();
817 
818 	// deliver the event
819 	return user_debug_handle_signal(signal->Number(), &handler, deadly);
820 }
821 
822 
823 /*!	Removes and returns a signal with the highest priority in \a nonBlocked that
824 	is pending in the given thread or its team.
825 	After dequeuing the signal the Thread::flags field of the affected threads
826 	are updated.
827 	The caller gets a reference to the returned signal, if any.
828 	The caller must hold the scheduler lock.
829 	\param thread The thread.
830 	\param nonBlocked The mask of non-blocked signals.
831 	\param buffer If the signal is not queued this buffer is returned. In this
832 		case the method acquires a reference to \a buffer, so that the caller
833 		gets a reference also in this case.
834 	\return The removed signal or \c NULL, if all signals are blocked.
835 */
836 static Signal*
837 dequeue_thread_or_team_signal(Thread* thread, sigset_t nonBlocked,
838 	Signal& buffer)
839 {
840 	Team* team = thread->team;
841 	Signal* signal;
842 	if (team->HighestPendingSignalPriority(nonBlocked)
843 			> thread->HighestPendingSignalPriority(nonBlocked)) {
844 		signal = team->DequeuePendingSignal(nonBlocked, buffer);
845 		update_team_threads_signal_flag(team);
846 	} else {
847 		signal = thread->DequeuePendingSignal(nonBlocked, buffer);
848 		update_thread_signals_flag(thread);
849 	}
850 
851 	return signal;
852 }
853 
854 
855 static status_t
856 setup_signal_frame(Thread* thread, struct sigaction* action, Signal* signal,
857 	sigset_t signalMask)
858 {
859 	// prepare the data, we need to copy onto the user stack
860 	signal_frame_data frameData;
861 
862 	// signal info
863 	frameData.info.si_signo = signal->Number();
864 	frameData.info.si_code = signal->SignalCode();
865 	frameData.info.si_errno = signal->ErrorCode();
866 	frameData.info.si_pid = signal->SendingProcess();
867 	frameData.info.si_uid = signal->SendingUser();
868 	frameData.info.si_addr = signal->Address();
869 	frameData.info.si_status = signal->Status();
870 	frameData.info.si_band = signal->PollBand();
871 	frameData.info.si_value = signal->UserValue();
872 
873 	// context
874 	frameData.context.uc_link = thread->user_signal_context;
875 	frameData.context.uc_sigmask = signalMask;
876 	// uc_stack and uc_mcontext are filled in by the architecture specific code.
877 
878 	// user data
879 	frameData.user_data = action->sa_userdata;
880 
881 	// handler function
882 	frameData.siginfo_handler = (action->sa_flags & SA_SIGINFO) != 0;
883 	frameData.handler = frameData.siginfo_handler
884 		? (void*)action->sa_sigaction : (void*)action->sa_handler;
885 
886 	// thread flags -- save the and clear the thread's syscall restart related
887 	// flags
888 	frameData.thread_flags = atomic_and(&thread->flags,
889 		~(THREAD_FLAGS_RESTART_SYSCALL | THREAD_FLAGS_64_BIT_SYSCALL_RETURN));
890 
891 	// syscall restart related fields
892 	memcpy(frameData.syscall_restart_parameters,
893 		thread->syscall_restart.parameters,
894 		sizeof(frameData.syscall_restart_parameters));
895 
896 	// commpage address
897 	frameData.commpage_address = thread->team->commpage_address;
898 
899 	// syscall_restart_return_value is filled in by the architecture specific
900 	// code.
901 
902 	return arch_setup_signal_frame(thread, action, &frameData);
903 }
904 
905 
906 /*! Actually handles pending signals -- i.e. the thread will exit, a custom
907 	signal handler is prepared, or whatever the signal demands.
908 	The function will not return, when a deadly signal is encountered. The
909 	function will suspend the thread indefinitely, when a stop signal is
910 	encountered.
911 	Interrupts must be enabled.
912 	\param thread The current thread.
913 */
914 void
915 handle_signals(Thread* thread)
916 {
917 	Team* team = thread->team;
918 
919 	TeamLocker teamLocker(team);
920 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
921 
922 	// If userland requested to defer signals, we check now, if this is
923 	// possible.
924 	sigset_t nonBlockedMask = ~thread->sig_block_mask;
925 	sigset_t signalMask = thread->AllPendingSignals() & nonBlockedMask;
926 
927 	if (thread->user_thread->defer_signals > 0
928 		&& (signalMask & NON_DEFERRABLE_SIGNALS) == 0
929 		&& thread->sigsuspend_original_unblocked_mask == 0) {
930 		thread->user_thread->pending_signals = signalMask;
931 		return;
932 	}
933 
934 	thread->user_thread->pending_signals = 0;
935 
936 	// determine syscall restart behavior
937 	uint32 restartFlags = atomic_and(&thread->flags,
938 		~THREAD_FLAGS_DONT_RESTART_SYSCALL);
939 	bool alwaysRestart
940 		= (restartFlags & THREAD_FLAGS_ALWAYS_RESTART_SYSCALL) != 0;
941 	bool restart = alwaysRestart
942 		|| (restartFlags & THREAD_FLAGS_DONT_RESTART_SYSCALL) == 0;
943 
944 	// Loop until we've handled all signals.
945 	bool initialIteration = true;
946 	while (true) {
947 		if (initialIteration) {
948 			initialIteration = false;
949 		} else {
950 			teamLocker.Lock();
951 			schedulerLocker.Lock();
952 
953 			signalMask = thread->AllPendingSignals() & nonBlockedMask;
954 		}
955 
956 		// Unless SIGKILL[THR] are pending, check, if the thread shall stop for
957 		// debugging.
958 		if ((signalMask & KILL_SIGNALS) == 0
959 			&& (atomic_get(&thread->debug_info.flags) & B_THREAD_DEBUG_STOP)
960 				!= 0) {
961 			schedulerLocker.Unlock();
962 			teamLocker.Unlock();
963 
964 			user_debug_stop_thread();
965 			continue;
966 		}
967 
968 		// We're done, if there aren't any pending signals anymore.
969 		if ((signalMask & nonBlockedMask) == 0)
970 			break;
971 
972 		// get pending non-blocked thread or team signal with the highest
973 		// priority
974 		Signal stackSignal;
975 		Signal* signal = dequeue_thread_or_team_signal(thread, nonBlockedMask,
976 			stackSignal);
977 		ASSERT(signal != NULL);
978 		SignalHandledCaller signalHandledCaller(signal);
979 
980 		schedulerLocker.Unlock();
981 
982 		// get the action for the signal
983 		struct sigaction handler;
984 		if (signal->Number() <= MAX_SIGNAL_NUMBER) {
985 			handler = team->SignalActionFor(signal->Number());
986 		} else {
987 			handler.sa_handler = SIG_DFL;
988 			handler.sa_flags = 0;
989 		}
990 
991 		if ((handler.sa_flags & SA_ONESHOT) != 0
992 			&& handler.sa_handler != SIG_IGN && handler.sa_handler != SIG_DFL) {
993 			team->SignalActionFor(signal->Number()).sa_handler = SIG_DFL;
994 		}
995 
996 		T(HandleSignal(signal->Number()));
997 
998 		teamLocker.Unlock();
999 
1000 		// debug the signal, if a debugger is installed and the signal debugging
1001 		// flag is set
1002 		bool debugSignal = (~atomic_get(&team->debug_info.flags)
1003 				& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_SIGNALS))
1004 			== 0;
1005 
1006 		// handle the signal
1007 		TRACE(("Thread %" B_PRId32 " received signal %s\n", thread->id,
1008 			kSignalInfos[signal->Number()].name));
1009 
1010 		if (handler.sa_handler == SIG_IGN) {
1011 			// signal is to be ignored
1012 			// TODO: apply zombie cleaning on SIGCHLD
1013 
1014 			// notify the debugger
1015 			if (debugSignal)
1016 				notify_debugger(thread, signal, handler, false);
1017 			continue;
1018 		} else if (handler.sa_handler == SIG_DFL) {
1019 			// default signal behaviour
1020 
1021 			// realtime signals are ignored by default
1022 			if (signal->Number() >= SIGNAL_REALTIME_MIN
1023 				&& signal->Number() <= SIGNAL_REALTIME_MAX) {
1024 				// notify the debugger
1025 				if (debugSignal)
1026 					notify_debugger(thread, signal, handler, false);
1027 				continue;
1028 			}
1029 
1030 			bool killTeam = false;
1031 			switch (signal->Number()) {
1032 				case SIGCHLD:
1033 				case SIGWINCH:
1034 				case SIGURG:
1035 					// notify the debugger
1036 					if (debugSignal)
1037 						notify_debugger(thread, signal, handler, false);
1038 					continue;
1039 
1040 				case SIGNAL_CANCEL_THREAD:
1041 					// set up the signal handler
1042 					handler.sa_handler = thread->cancel_function;
1043 					handler.sa_flags = 0;
1044 					handler.sa_mask = 0;
1045 					handler.sa_userdata = NULL;
1046 
1047 					restart = false;
1048 						// we always want to interrupt
1049 					break;
1050 
1051 				case SIGNAL_CONTINUE_THREAD:
1052 					// prevent syscall restart, but otherwise ignore
1053 					restart = false;
1054 					atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
1055 					continue;
1056 
1057 				case SIGCONT:
1058 					// notify the debugger
1059 					if (debugSignal
1060 						&& !notify_debugger(thread, signal, handler, false))
1061 						continue;
1062 
1063 					// notify threads waiting for team state changes
1064 					if (thread == team->main_thread) {
1065 						team->LockTeamAndParent(false);
1066 
1067 						team_set_job_control_state(team,
1068 							JOB_CONTROL_STATE_CONTINUED, signal, false);
1069 
1070 						team->UnlockTeamAndParent();
1071 
1072 						// The standard states that the system *may* send a
1073 						// SIGCHLD when a child is continued. I haven't found
1074 						// a good reason why we would want to, though.
1075 					}
1076 					continue;
1077 
1078 				case SIGSTOP:
1079 				case SIGTSTP:
1080 				case SIGTTIN:
1081 				case SIGTTOU:
1082 				{
1083 					// notify the debugger
1084 					if (debugSignal
1085 						&& !notify_debugger(thread, signal, handler, false))
1086 						continue;
1087 
1088 					// The terminal-sent stop signals are allowed to stop the
1089 					// process only, if it doesn't belong to an orphaned process
1090 					// group. Otherwise the signal must be discarded.
1091 					team->LockProcessGroup();
1092 					AutoLocker<ProcessGroup> groupLocker(team->group, true);
1093 					if (signal->Number() != SIGSTOP
1094 						&& team->group->IsOrphaned()) {
1095 						continue;
1096 					}
1097 
1098 					// notify threads waiting for team state changes
1099 					if (thread == team->main_thread) {
1100 						team->LockTeamAndParent(false);
1101 
1102 						team_set_job_control_state(team,
1103 							JOB_CONTROL_STATE_STOPPED, signal, false);
1104 
1105 						// send a SIGCHLD to the parent (if it does have
1106 						// SA_NOCLDSTOP defined)
1107 						Team* parentTeam = team->parent;
1108 
1109 						struct sigaction& parentHandler
1110 							= parentTeam->SignalActionFor(SIGCHLD);
1111 						if ((parentHandler.sa_flags & SA_NOCLDSTOP) == 0) {
1112 							Signal childSignal(SIGCHLD, CLD_STOPPED, B_OK,
1113 								team->id);
1114 							childSignal.SetStatus(signal->Number());
1115 							childSignal.SetSendingUser(signal->SendingUser());
1116 							send_signal_to_team(parentTeam, childSignal, 0);
1117 						}
1118 
1119 						team->UnlockTeamAndParent();
1120 					}
1121 
1122 					groupLocker.Unlock();
1123 
1124 					// Suspend the thread, unless there's already a signal to
1125 					// continue or kill pending.
1126 					InterruptsSpinLocker schedulerLocker(gSchedulerLock);
1127 					if ((thread->AllPendingSignals()
1128 							& (CONTINUE_SIGNALS | KILL_SIGNALS)) == 0) {
1129 						thread->next_state = B_THREAD_SUSPENDED;
1130 						scheduler_reschedule();
1131 					}
1132 					schedulerLocker.Unlock();
1133 
1134 					continue;
1135 				}
1136 
1137 				case SIGSEGV:
1138 				case SIGBUS:
1139 				case SIGFPE:
1140 				case SIGILL:
1141 				case SIGTRAP:
1142 				case SIGABRT:
1143 				case SIGKILL:
1144 				case SIGQUIT:
1145 				case SIGPOLL:
1146 				case SIGPROF:
1147 				case SIGSYS:
1148 				case SIGVTALRM:
1149 				case SIGXCPU:
1150 				case SIGXFSZ:
1151 				default:
1152 					TRACE(("Shutting down team %" B_PRId32 " due to signal %"
1153 						B_PRIu32 " received in thread %" B_PRIu32 " \n",
1154 						team->id, signal->Number(), thread->id));
1155 
1156 					// This signal kills the team regardless which thread
1157 					// received it.
1158 					killTeam = true;
1159 
1160 					// fall through
1161 				case SIGKILLTHR:
1162 					// notify the debugger
1163 					if (debugSignal && signal->Number() != SIGKILL
1164 						&& signal->Number() != SIGKILLTHR
1165 						&& !notify_debugger(thread, signal, handler, true)) {
1166 						continue;
1167 					}
1168 
1169 					if (killTeam || thread == team->main_thread) {
1170 						// The signal is terminal for the team or the thread is
1171 						// the main thread. In either case the team is going
1172 						// down. Set its exit status, if that didn't happen yet.
1173 						teamLocker.Lock();
1174 
1175 						if (!team->exit.initialized) {
1176 							team->exit.reason = CLD_KILLED;
1177 							team->exit.signal = signal->Number();
1178 							team->exit.signaling_user = signal->SendingUser();
1179 							team->exit.status = 0;
1180 							team->exit.initialized = true;
1181 						}
1182 
1183 						teamLocker.Unlock();
1184 
1185 						// If this is not the main thread, send it a SIGKILLTHR
1186 						// so that the team terminates.
1187 						if (thread != team->main_thread) {
1188 							Signal childSignal(SIGKILLTHR, SI_USER, B_OK,
1189 								team->id);
1190 							send_signal_to_thread_id(team->id, childSignal, 0);
1191 						}
1192 					}
1193 
1194 					// explicitly get rid of the signal reference, since
1195 					// thread_exit() won't return
1196 					signalHandledCaller.Done();
1197 
1198 					thread_exit();
1199 						// won't return
1200 			}
1201 		}
1202 
1203 		// User defined signal handler
1204 
1205 		// notify the debugger
1206 		if (debugSignal && !notify_debugger(thread, signal, handler, false))
1207 			continue;
1208 
1209 		if (!restart
1210 				|| (!alwaysRestart && (handler.sa_flags & SA_RESTART) == 0)) {
1211 			atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
1212 		}
1213 
1214 		T(ExecuteSignalHandler(signal->Number(), &handler));
1215 
1216 		TRACE(("### Setting up custom signal handler frame...\n"));
1217 
1218 		// save the old block mask -- we may need to adjust it for the handler
1219 		schedulerLocker.Lock();
1220 
1221 		sigset_t oldBlockMask = thread->sigsuspend_original_unblocked_mask != 0
1222 			? ~thread->sigsuspend_original_unblocked_mask
1223 			: thread->sig_block_mask;
1224 
1225 		// Update the block mask while the signal handler is running -- it
1226 		// will be automatically restored when the signal frame is left.
1227 		thread->sig_block_mask |= handler.sa_mask & BLOCKABLE_SIGNALS;
1228 
1229 		if ((handler.sa_flags & SA_NOMASK) == 0) {
1230 			thread->sig_block_mask
1231 				|= SIGNAL_TO_MASK(signal->Number()) & BLOCKABLE_SIGNALS;
1232 		}
1233 
1234 		update_current_thread_signals_flag();
1235 
1236 		schedulerLocker.Unlock();
1237 
1238 		setup_signal_frame(thread, &handler, signal, oldBlockMask);
1239 
1240 		// Reset sigsuspend_original_unblocked_mask. It would have been set by
1241 		// sigsuspend_internal(). In that case, above we set oldBlockMask
1242 		// accordingly so that after the handler returns the thread's signal
1243 		// mask is reset.
1244 		thread->sigsuspend_original_unblocked_mask = 0;
1245 
1246 		return;
1247 	}
1248 
1249 	// We have not handled any signal (respectively only ignored ones).
1250 
1251 	// If sigsuspend_original_unblocked_mask is non-null, we came from a
1252 	// sigsuspend_internal(). Not having handled any signal, we should restart
1253 	// the syscall.
1254 	if (thread->sigsuspend_original_unblocked_mask != 0) {
1255 		restart = true;
1256 		atomic_or(&thread->flags, THREAD_FLAGS_RESTART_SYSCALL);
1257 	} else if (!restart) {
1258 		// clear syscall restart thread flag, if we're not supposed to restart
1259 		// the syscall
1260 		atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
1261 	}
1262 }
1263 
1264 
1265 /*!	Checks whether the given signal is blocked for the given team (i.e. all of
1266 	its threads).
1267 	The caller must hold the team's lock and the scheduler lock.
1268 */
1269 bool
1270 is_team_signal_blocked(Team* team, int signal)
1271 {
1272 	sigset_t mask = SIGNAL_TO_MASK(signal);
1273 
1274 	for (Thread* thread = team->thread_list; thread != NULL;
1275 			thread = thread->team_next) {
1276 		if ((thread->sig_block_mask & mask) == 0)
1277 			return false;
1278 	}
1279 
1280 	return true;
1281 }
1282 
1283 
1284 /*!	Gets (guesses) the current thread's currently used stack from the given
1285 	stack pointer.
1286 	Fills in \a stack with either the signal stack or the thread's user stack.
1287 	\param address A stack pointer address to be used to determine the used
1288 		stack.
1289 	\param stack Filled in by the function.
1290 */
1291 void
1292 signal_get_user_stack(addr_t address, stack_t* stack)
1293 {
1294 	// If a signal stack is enabled for the stack and the address is within it,
1295 	// return the signal stack. In all other cases return the thread's user
1296 	// stack, even if the address doesn't lie within it.
1297 	Thread* thread = thread_get_current_thread();
1298 	if (thread->signal_stack_enabled && address >= thread->signal_stack_base
1299 		&& address < thread->signal_stack_base + thread->signal_stack_size) {
1300 		stack->ss_sp = (void*)thread->signal_stack_base;
1301 		stack->ss_size = thread->signal_stack_size;
1302 	} else {
1303 		stack->ss_sp = (void*)thread->user_stack_base;
1304 		stack->ss_size = thread->user_stack_size;
1305 	}
1306 
1307 	stack->ss_flags = 0;
1308 }
1309 
1310 
1311 /*!	Checks whether any non-blocked signal is pending for the current thread.
1312 	The caller must hold the scheduler lock.
1313 	\param thread The current thread.
1314 */
1315 static bool
1316 has_signals_pending(Thread* thread)
1317 {
1318 	return (thread->AllPendingSignals() & ~thread->sig_block_mask) != 0;
1319 }
1320 
1321 
1322 /*!	Checks whether the current user has permission to send a signal to the given
1323 	target team.
1324 
1325 	The caller must hold the scheduler lock or \a team's lock.
1326 
1327 	\param team The target team.
1328 	\param schedulerLocked \c true, if the caller holds the scheduler lock,
1329 		\c false otherwise.
1330 */
1331 static bool
1332 has_permission_to_signal(Team* team, bool schedulerLocked)
1333 {
1334 	// get the current user
1335 	uid_t currentUser = schedulerLocked
1336 		? thread_get_current_thread()->team->effective_uid
1337 		: geteuid();
1338 
1339 	// root is omnipotent -- in the other cases the current user must match the
1340 	// target team's
1341 	return currentUser == 0 || currentUser == team->effective_uid;
1342 }
1343 
1344 
1345 /*!	Delivers a signal to the \a thread, but doesn't handle the signal -- it just
1346 	makes sure the thread gets the signal, i.e. unblocks it if needed.
1347 
1348 	The caller must hold the scheduler lock.
1349 
1350 	\param thread The thread the signal shall be delivered to.
1351 	\param signalNumber The number of the signal to be delivered. If \c 0, no
1352 		actual signal will be delivered. Only delivery checks will be performed.
1353 	\param signal If non-NULL the signal to be queued (has number
1354 		\a signalNumber in this case). The caller transfers an object reference
1355 		to this function. If \c NULL an unqueued signal will be delivered to the
1356 		thread.
1357 	\param flags A bitwise combination of any number of the following:
1358 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1359 			target thread the signal.
1360 	\return \c B_OK, when the signal was delivered successfully, another error
1361 		code otherwise.
1362 */
1363 status_t
1364 send_signal_to_thread_locked(Thread* thread, uint32 signalNumber,
1365 	Signal* signal, uint32 flags)
1366 {
1367 	ASSERT(signal == NULL || signalNumber == signal->Number());
1368 
1369 	T(SendSignal(thread->id, signalNumber, flags));
1370 
1371 	// The caller transferred a reference to the signal to us.
1372 	BReference<Signal> signalReference(signal, true);
1373 
1374 	if ((flags & B_CHECK_PERMISSION) != 0) {
1375 		if (!has_permission_to_signal(thread->team, true))
1376 			return EPERM;
1377 	}
1378 
1379 	if (signalNumber == 0)
1380 		return B_OK;
1381 
1382 	if (thread->team == team_get_kernel_team()) {
1383 		// Signals to kernel threads will only wake them up
1384 		if (thread->state == B_THREAD_SUSPENDED)
1385 			scheduler_enqueue_in_run_queue(thread);
1386 		return B_OK;
1387 	}
1388 
1389 	if (signal != NULL)
1390 		thread->AddPendingSignal(signal);
1391 	else
1392 		thread->AddPendingSignal(signalNumber);
1393 
1394 	// the thread has the signal reference, now
1395 	signalReference.Detach();
1396 
1397 	switch (signalNumber) {
1398 		case SIGKILL:
1399 		{
1400 			// If sent to a thread other than the team's main thread, also send
1401 			// a SIGKILLTHR to the main thread to kill the team.
1402 			Thread* mainThread = thread->team->main_thread;
1403 			if (mainThread != NULL && mainThread != thread) {
1404 				mainThread->AddPendingSignal(SIGKILLTHR);
1405 
1406 				// wake up main thread
1407 				if (mainThread->state == B_THREAD_SUSPENDED)
1408 					scheduler_enqueue_in_run_queue(mainThread);
1409 				else
1410 					thread_interrupt(mainThread, true);
1411 
1412 				update_thread_signals_flag(mainThread);
1413 			}
1414 
1415 			// supposed to fall through
1416 		}
1417 		case SIGKILLTHR:
1418 			// Wake up suspended threads and interrupt waiting ones
1419 			if (thread->state == B_THREAD_SUSPENDED)
1420 				scheduler_enqueue_in_run_queue(thread);
1421 			else
1422 				thread_interrupt(thread, true);
1423 			break;
1424 
1425 		case SIGNAL_CONTINUE_THREAD:
1426 			// wake up thread, and interrupt its current syscall
1427 			if (thread->state == B_THREAD_SUSPENDED)
1428 				scheduler_enqueue_in_run_queue(thread);
1429 
1430 			atomic_or(&thread->flags, THREAD_FLAGS_DONT_RESTART_SYSCALL);
1431 			break;
1432 
1433 		case SIGCONT:
1434 			// Wake up thread if it was suspended, otherwise interrupt it, if
1435 			// the signal isn't blocked.
1436 			if (thread->state == B_THREAD_SUSPENDED)
1437 				scheduler_enqueue_in_run_queue(thread);
1438 			else if ((SIGNAL_TO_MASK(SIGCONT) & ~thread->sig_block_mask) != 0)
1439 				thread_interrupt(thread, false);
1440 
1441 			// remove any pending stop signals
1442 			thread->RemovePendingSignals(STOP_SIGNALS);
1443 			break;
1444 
1445 		default:
1446 			// If the signal is not masked, interrupt the thread, if it is
1447 			// currently waiting (interruptibly).
1448 			if ((thread->AllPendingSignals()
1449 						& (~thread->sig_block_mask | SIGNAL_TO_MASK(SIGCHLD)))
1450 					!= 0) {
1451 				// Interrupt thread if it was waiting
1452 				thread_interrupt(thread, false);
1453 			}
1454 			break;
1455 	}
1456 
1457 	update_thread_signals_flag(thread);
1458 
1459 	return B_OK;
1460 }
1461 
1462 
1463 /*!	Sends the given signal to the given thread.
1464 
1465 	The caller must not hold the scheduler lock.
1466 
1467 	\param thread The thread the signal shall be sent to.
1468 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1469 		actual signal will be delivered. Only delivery checks will be performed.
1470 		The given object will be copied. The caller retains ownership.
1471 	\param flags A bitwise combination of any number of the following:
1472 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1473 			target thread the signal.
1474 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1475 			woken up, the scheduler will be invoked. If set that will not be
1476 			done explicitly, but rescheduling can still happen, e.g. when the
1477 			current thread's time slice runs out.
1478 	\return \c B_OK, when the signal was delivered successfully, another error
1479 		code otherwise.
1480 */
1481 status_t
1482 send_signal_to_thread(Thread* thread, const Signal& signal, uint32 flags)
1483 {
1484 	// Clone the signal -- the clone will be queued. If something fails and the
1485 	// caller doesn't require queuing, we will add an unqueued signal.
1486 	Signal* signalToQueue = NULL;
1487 	status_t error = Signal::CreateQueuable(signal,
1488 		(flags & SIGNAL_FLAG_QUEUING_REQUIRED) != 0, signalToQueue);
1489 	if (error != B_OK)
1490 		return error;
1491 
1492 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
1493 
1494 	error = send_signal_to_thread_locked(thread, signal.Number(), signalToQueue,
1495 		flags);
1496 	if (error != B_OK)
1497 		return error;
1498 
1499 	if ((flags & B_DO_NOT_RESCHEDULE) == 0)
1500 		scheduler_reschedule_if_necessary_locked();
1501 
1502 	return B_OK;
1503 }
1504 
1505 
1506 /*!	Sends the given signal to the thread with the given ID.
1507 
1508 	The caller must not hold the scheduler lock.
1509 
1510 	\param threadID The ID of the thread the signal shall be sent to.
1511 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1512 		actual signal will be delivered. Only delivery checks will be performed.
1513 		The given object will be copied. The caller retains ownership.
1514 	\param flags A bitwise combination of any number of the following:
1515 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1516 			target thread the signal.
1517 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1518 			woken up, the scheduler will be invoked. If set that will not be
1519 			done explicitly, but rescheduling can still happen, e.g. when the
1520 			current thread's time slice runs out.
1521 	\return \c B_OK, when the signal was delivered successfully, another error
1522 		code otherwise.
1523 */
1524 status_t
1525 send_signal_to_thread_id(thread_id threadID, const Signal& signal, uint32 flags)
1526 {
1527 	Thread* thread = Thread::Get(threadID);
1528 	if (thread == NULL)
1529 		return B_BAD_THREAD_ID;
1530 	BReference<Thread> threadReference(thread, true);
1531 
1532 	return send_signal_to_thread(thread, signal, flags);
1533 }
1534 
1535 
1536 /*!	Sends the given signal to the given team.
1537 
1538 	The caller must hold the scheduler lock.
1539 
1540 	\param team The team the signal shall be sent to.
1541 	\param signalNumber The number of the signal to be delivered. If \c 0, no
1542 		actual signal will be delivered. Only delivery checks will be performed.
1543 	\param signal If non-NULL the signal to be queued (has number
1544 		\a signalNumber in this case). The caller transfers an object reference
1545 		to this function. If \c NULL an unqueued signal will be delivered to the
1546 		thread.
1547 	\param flags A bitwise combination of any number of the following:
1548 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1549 			target thread the signal.
1550 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1551 			woken up, the scheduler will be invoked. If set that will not be
1552 			done explicitly, but rescheduling can still happen, e.g. when the
1553 			current thread's time slice runs out.
1554 	\return \c B_OK, when the signal was delivered successfully, another error
1555 		code otherwise.
1556 */
1557 status_t
1558 send_signal_to_team_locked(Team* team, uint32 signalNumber, Signal* signal,
1559 	uint32 flags)
1560 {
1561 	ASSERT(signal == NULL || signalNumber == signal->Number());
1562 
1563 	T(SendSignal(team->id, signalNumber, flags));
1564 
1565 	// The caller transferred a reference to the signal to us.
1566 	BReference<Signal> signalReference(signal, true);
1567 
1568 	if ((flags & B_CHECK_PERMISSION) != 0) {
1569 		if (!has_permission_to_signal(team, true))
1570 			return EPERM;
1571 	}
1572 
1573 	if (signalNumber == 0)
1574 		return B_OK;
1575 
1576 	if (team == team_get_kernel_team()) {
1577 		// signals to the kernel team are not allowed
1578 		return EPERM;
1579 	}
1580 
1581 	if (signal != NULL)
1582 		team->AddPendingSignal(signal);
1583 	else
1584 		team->AddPendingSignal(signalNumber);
1585 
1586 	// the team has the signal reference, now
1587 	signalReference.Detach();
1588 
1589 	switch (signalNumber) {
1590 		case SIGKILL:
1591 		case SIGKILLTHR:
1592 		{
1593 			// Also add a SIGKILLTHR to the main thread's signals and wake it
1594 			// up/interrupt it, so we get this over with as soon as possible
1595 			// (only the main thread shuts down the team).
1596 			Thread* mainThread = team->main_thread;
1597 			if (mainThread != NULL) {
1598 				mainThread->AddPendingSignal(SIGKILLTHR);
1599 
1600 				// wake up main thread
1601 				if (mainThread->state == B_THREAD_SUSPENDED)
1602 					scheduler_enqueue_in_run_queue(mainThread);
1603 				else
1604 					thread_interrupt(mainThread, true);
1605 			}
1606 			break;
1607 		}
1608 
1609 		case SIGCONT:
1610 			// Wake up any suspended threads, interrupt the others, if they
1611 			// don't block the signal.
1612 			for (Thread* thread = team->thread_list; thread != NULL;
1613 					thread = thread->team_next) {
1614 				if (thread->state == B_THREAD_SUSPENDED) {
1615 					scheduler_enqueue_in_run_queue(thread);
1616 				} else if ((SIGNAL_TO_MASK(SIGCONT) & ~thread->sig_block_mask)
1617 						!= 0) {
1618 					thread_interrupt(thread, false);
1619 				}
1620 
1621 				// remove any pending stop signals
1622 				thread->RemovePendingSignals(STOP_SIGNALS);
1623 			}
1624 
1625 			// remove any pending team stop signals
1626 			team->RemovePendingSignals(STOP_SIGNALS);
1627 			break;
1628 
1629 		case SIGSTOP:
1630 		case SIGTSTP:
1631 		case SIGTTIN:
1632 		case SIGTTOU:
1633 			// send the stop signal to all threads
1634 			// TODO: Is that correct or should we only target the main thread?
1635 			for (Thread* thread = team->thread_list; thread != NULL;
1636 					thread = thread->team_next) {
1637 				thread->AddPendingSignal(signalNumber);
1638 			}
1639 
1640 			// remove the stop signal from the team again
1641 			if (signal != NULL) {
1642 				team->RemovePendingSignal(signal);
1643 				signalReference.SetTo(signal, true);
1644 			} else
1645 				team->RemovePendingSignal(signalNumber);
1646 
1647 			// fall through to interrupt threads
1648 		default:
1649 			// Interrupt all interruptibly waiting threads, if the signal is
1650 			// not masked.
1651 			for (Thread* thread = team->thread_list; thread != NULL;
1652 					thread = thread->team_next) {
1653 				sigset_t nonBlocked = ~thread->sig_block_mask
1654 					| SIGNAL_TO_MASK(SIGCHLD);
1655 				if ((thread->AllPendingSignals() & nonBlocked) != 0)
1656 					thread_interrupt(thread, false);
1657 			}
1658 			break;
1659 	}
1660 
1661 	update_team_threads_signal_flag(team);
1662 
1663 	if ((flags & B_DO_NOT_RESCHEDULE) == 0)
1664 		scheduler_reschedule_if_necessary_locked();
1665 
1666 	return B_OK;
1667 }
1668 
1669 
1670 /*!	Sends the given signal to the given team.
1671 
1672 	\param team The team the signal shall be sent to.
1673 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1674 		actual signal will be delivered. Only delivery checks will be performed.
1675 		The given object will be copied. The caller retains ownership.
1676 	\param flags A bitwise combination of any number of the following:
1677 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1678 			target thread the signal.
1679 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1680 			woken up, the scheduler will be invoked. If set that will not be
1681 			done explicitly, but rescheduling can still happen, e.g. when the
1682 			current thread's time slice runs out.
1683 	\return \c B_OK, when the signal was delivered successfully, another error
1684 		code otherwise.
1685 */
1686 status_t
1687 send_signal_to_team(Team* team, const Signal& signal, uint32 flags)
1688 {
1689 	// Clone the signal -- the clone will be queued. If something fails and the
1690 	// caller doesn't require queuing, we will add an unqueued signal.
1691 	Signal* signalToQueue = NULL;
1692 	status_t error = Signal::CreateQueuable(signal,
1693 		(flags & SIGNAL_FLAG_QUEUING_REQUIRED) != 0, signalToQueue);
1694 	if (error != B_OK)
1695 		return error;
1696 
1697 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
1698 
1699 	return send_signal_to_team_locked(team, signal.Number(), signalToQueue,
1700 		flags);
1701 }
1702 
1703 
1704 /*!	Sends the given signal to the team with the given ID.
1705 
1706 	\param teamID The ID of the team the signal shall be sent to.
1707 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1708 		actual signal will be delivered. Only delivery checks will be performed.
1709 		The given object will be copied. The caller retains ownership.
1710 	\param flags A bitwise combination of any number of the following:
1711 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1712 			target thread the signal.
1713 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1714 			woken up, the scheduler will be invoked. If set that will not be
1715 			done explicitly, but rescheduling can still happen, e.g. when the
1716 			current thread's time slice runs out.
1717 	\return \c B_OK, when the signal was delivered successfully, another error
1718 		code otherwise.
1719 */
1720 status_t
1721 send_signal_to_team_id(team_id teamID, const Signal& signal, uint32 flags)
1722 {
1723 	// get the team
1724 	Team* team = Team::Get(teamID);
1725 	if (team == NULL)
1726 		return B_BAD_TEAM_ID;
1727 	BReference<Team> teamReference(team, true);
1728 
1729 	return send_signal_to_team(team, signal, flags);
1730 }
1731 
1732 
1733 /*!	Sends the given signal to the given process group.
1734 
1735 	The caller must hold the process group's lock. Interrupts must be enabled.
1736 
1737 	\param group The the process group the signal shall be sent to.
1738 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1739 		actual signal will be delivered. Only delivery checks will be performed.
1740 		The given object will be copied. The caller retains ownership.
1741 	\param flags A bitwise combination of any number of the following:
1742 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1743 			target thread the signal.
1744 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1745 			woken up, the scheduler will be invoked. If set that will not be
1746 			done explicitly, but rescheduling can still happen, e.g. when the
1747 			current thread's time slice runs out.
1748 	\return \c B_OK, when the signal was delivered successfully, another error
1749 		code otherwise.
1750 */
1751 status_t
1752 send_signal_to_process_group_locked(ProcessGroup* group, const Signal& signal,
1753 	uint32 flags)
1754 {
1755 	T(SendSignal(-group->id, signal.Number(), flags));
1756 
1757 	bool firstTeam = true;
1758 
1759 	for (Team* team = group->teams; team != NULL; team = team->group_next) {
1760 		status_t error = send_signal_to_team(team, signal,
1761 			flags | B_DO_NOT_RESCHEDULE);
1762 		// If sending to the first team in the group failed, let the whole call
1763 		// fail.
1764 		if (firstTeam) {
1765 			if (error != B_OK)
1766 				return error;
1767 			firstTeam = false;
1768 		}
1769 	}
1770 
1771 	if ((flags & B_DO_NOT_RESCHEDULE) == 0)
1772 		scheduler_reschedule_if_necessary();
1773 
1774 	return B_OK;
1775 }
1776 
1777 
1778 /*!	Sends the given signal to the process group specified by the given ID.
1779 
1780 	The caller must not hold any process group, team, or thread lock. Interrupts
1781 	must be enabled.
1782 
1783 	\param groupID The ID of the process group the signal shall be sent to.
1784 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1785 		actual signal will be delivered. Only delivery checks will be performed.
1786 		The given object will be copied. The caller retains ownership.
1787 	\param flags A bitwise combination of any number of the following:
1788 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1789 			target thread the signal.
1790 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1791 			woken up, the scheduler will be invoked. If set that will not be
1792 			done explicitly, but rescheduling can still happen, e.g. when the
1793 			current thread's time slice runs out.
1794 	\return \c B_OK, when the signal was delivered successfully, another error
1795 		code otherwise.
1796 */
1797 status_t
1798 send_signal_to_process_group(pid_t groupID, const Signal& signal, uint32 flags)
1799 {
1800 	ProcessGroup* group = ProcessGroup::Get(groupID);
1801 	if (group == NULL)
1802 		return B_BAD_TEAM_ID;
1803 	BReference<ProcessGroup> groupReference(group);
1804 
1805 	T(SendSignal(-group->id, signal.Number(), flags));
1806 
1807 	AutoLocker<ProcessGroup> groupLocker(group);
1808 
1809 	status_t error = send_signal_to_process_group_locked(group, signal,
1810 		flags | B_DO_NOT_RESCHEDULE);
1811 	if (error != B_OK)
1812 		return error;
1813 
1814 	groupLocker.Unlock();
1815 
1816 	if ((flags & B_DO_NOT_RESCHEDULE) == 0)
1817 		scheduler_reschedule_if_necessary();
1818 
1819 	return B_OK;
1820 }
1821 
1822 
1823 static status_t
1824 send_signal_internal(pid_t id, uint signalNumber, union sigval userValue,
1825 	uint32 flags)
1826 {
1827 	if (signalNumber > MAX_SIGNAL_NUMBER)
1828 		return B_BAD_VALUE;
1829 
1830 	Thread* thread = thread_get_current_thread();
1831 
1832 	Signal signal(signalNumber,
1833 		(flags & SIGNAL_FLAG_QUEUING_REQUIRED) != 0 ? SI_QUEUE : SI_USER,
1834 		B_OK, thread->team->id);
1835 		// Note: SI_USER/SI_QUEUE is not correct, if called from within the
1836 		// kernel (or a driver), but we don't have any info here.
1837 	signal.SetUserValue(userValue);
1838 
1839 	// If id is > 0, send the signal to the respective thread.
1840 	if (id > 0)
1841 		return send_signal_to_thread_id(id, signal, flags);
1842 
1843 	// If id == 0, send the signal to the current thread.
1844 	if (id == 0)
1845 		return send_signal_to_thread(thread, signal, flags);
1846 
1847 	// If id == -1, send the signal to all teams the calling team has permission
1848 	// to send signals to.
1849 	if (id == -1) {
1850 		// TODO: Implement correctly!
1851 		// currently only send to the current team
1852 		return send_signal_to_team_id(thread->team->id, signal, flags);
1853 	}
1854 
1855 	// Send a signal to the specified process group (the absolute value of the
1856 	// id).
1857 	return send_signal_to_process_group(-id, signal, flags);
1858 }
1859 
1860 
1861 int
1862 send_signal_etc(pid_t id, uint signalNumber, uint32 flags)
1863 {
1864 	// a dummy user value
1865 	union sigval userValue;
1866 	userValue.sival_ptr = NULL;
1867 
1868 	return send_signal_internal(id, signalNumber, userValue, flags);
1869 }
1870 
1871 
1872 int
1873 send_signal(pid_t threadID, uint signal)
1874 {
1875 	// The BeBook states that this function wouldn't be exported
1876 	// for drivers, but, of course, it's wrong.
1877 	return send_signal_etc(threadID, signal, 0);
1878 }
1879 
1880 
1881 static int
1882 sigprocmask_internal(int how, const sigset_t* set, sigset_t* oldSet)
1883 {
1884 	Thread* thread = thread_get_current_thread();
1885 
1886 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
1887 
1888 	sigset_t oldMask = thread->sig_block_mask;
1889 
1890 	if (set != NULL) {
1891 		T(SigProcMask(how, *set));
1892 
1893 		switch (how) {
1894 			case SIG_BLOCK:
1895 				thread->sig_block_mask |= *set & BLOCKABLE_SIGNALS;
1896 				break;
1897 			case SIG_UNBLOCK:
1898 				thread->sig_block_mask &= ~*set;
1899 				break;
1900 			case SIG_SETMASK:
1901 				thread->sig_block_mask = *set & BLOCKABLE_SIGNALS;
1902 				break;
1903 			default:
1904 				return B_BAD_VALUE;
1905 		}
1906 
1907 		update_current_thread_signals_flag();
1908 	}
1909 
1910 	if (oldSet != NULL)
1911 		*oldSet = oldMask;
1912 
1913 	return B_OK;
1914 }
1915 
1916 
1917 int
1918 sigprocmask(int how, const sigset_t* set, sigset_t* oldSet)
1919 {
1920 	RETURN_AND_SET_ERRNO(sigprocmask_internal(how, set, oldSet));
1921 }
1922 
1923 
1924 /*!	\brief Like sigaction(), but returning the error instead of setting errno.
1925 */
1926 static status_t
1927 sigaction_internal(int signal, const struct sigaction* act,
1928 	struct sigaction* oldAction)
1929 {
1930 	if (signal < 1 || signal > MAX_SIGNAL_NUMBER
1931 		|| (SIGNAL_TO_MASK(signal) & ~BLOCKABLE_SIGNALS) != 0)
1932 		return B_BAD_VALUE;
1933 
1934 	// get and lock the team
1935 	Team* team = thread_get_current_thread()->team;
1936 	TeamLocker teamLocker(team);
1937 
1938 	struct sigaction& teamHandler = team->SignalActionFor(signal);
1939 	if (oldAction) {
1940 		// save previous sigaction structure
1941 		*oldAction = teamHandler;
1942 	}
1943 
1944 	if (act) {
1945 		T(SigAction(signal, act));
1946 
1947 		// set new sigaction structure
1948 		teamHandler = *act;
1949 		teamHandler.sa_mask &= BLOCKABLE_SIGNALS;
1950 	}
1951 
1952 	// Remove pending signal if it should now be ignored and remove pending
1953 	// signal for those signals whose default action is to ignore them.
1954 	if ((act && act->sa_handler == SIG_IGN)
1955 		|| (act && act->sa_handler == SIG_DFL
1956 			&& (SIGNAL_TO_MASK(signal) & DEFAULT_IGNORE_SIGNALS) != 0)) {
1957 		InterruptsSpinLocker schedulerLocker(gSchedulerLock);
1958 
1959 		team->RemovePendingSignal(signal);
1960 
1961 		for (Thread* thread = team->thread_list; thread != NULL;
1962 				thread = thread->team_next) {
1963 			thread->RemovePendingSignal(signal);
1964 		}
1965 	}
1966 
1967 	return B_OK;
1968 }
1969 
1970 
1971 int
1972 sigaction(int signal, const struct sigaction* act, struct sigaction* oldAction)
1973 {
1974 	RETURN_AND_SET_ERRNO(sigaction_internal(signal, act, oldAction));
1975 }
1976 
1977 
1978 /*!	Wait for the specified signals, and return the information for the retrieved
1979 	signal in \a info.
1980 	The \c flags and \c timeout combination must either define an infinite
1981 	timeout (no timeout flags set), an absolute timeout (\c B_ABSOLUTE_TIMEOUT
1982 	set), or a relative timeout \code <= 0 \endcode (\c B_RELATIVE_TIMEOUT set).
1983 */
1984 static status_t
1985 sigwait_internal(const sigset_t* set, siginfo_t* info, uint32 flags,
1986 	bigtime_t timeout)
1987 {
1988 	// restrict mask to blockable signals
1989 	sigset_t requestedSignals = *set & BLOCKABLE_SIGNALS;
1990 
1991 	// make always interruptable
1992 	flags |= B_CAN_INTERRUPT;
1993 
1994 	// check whether we are allowed to wait at all
1995 	bool canWait = (flags & B_RELATIVE_TIMEOUT) == 0 || timeout > 0;
1996 
1997 	Thread* thread = thread_get_current_thread();
1998 
1999 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
2000 
2001 	bool timedOut = false;
2002 	status_t error = B_OK;
2003 
2004 	while (!timedOut) {
2005 		sigset_t pendingSignals = thread->AllPendingSignals();
2006 
2007 		// If a kill signal is pending, just bail out.
2008 		if ((pendingSignals & KILL_SIGNALS) != 0)
2009 			return B_INTERRUPTED;
2010 
2011 		if ((pendingSignals & requestedSignals) != 0) {
2012 			// get signal with the highest priority
2013 			Signal stackSignal;
2014 			Signal* signal = dequeue_thread_or_team_signal(thread,
2015 				requestedSignals, stackSignal);
2016 			ASSERT(signal != NULL);
2017 
2018 			SignalHandledCaller signalHandledCaller(signal);
2019 			schedulerLocker.Unlock();
2020 
2021 			info->si_signo = signal->Number();
2022 			info->si_code = signal->SignalCode();
2023 			info->si_errno = signal->ErrorCode();
2024 			info->si_pid = signal->SendingProcess();
2025 			info->si_uid = signal->SendingUser();
2026 			info->si_addr = signal->Address();
2027 			info->si_status = signal->Status();
2028 			info->si_band = signal->PollBand();
2029 			info->si_value = signal->UserValue();
2030 
2031 			return B_OK;
2032 		}
2033 
2034 		if (!canWait)
2035 			return B_WOULD_BLOCK;
2036 
2037 		sigset_t blockedSignals = thread->sig_block_mask;
2038 		if ((pendingSignals & ~blockedSignals) != 0) {
2039 			// Non-blocked signals are pending -- return to let them be handled.
2040 			return B_INTERRUPTED;
2041 		}
2042 
2043 		// No signals yet. Set the signal block mask to not include the
2044 		// requested mask and wait until we're interrupted.
2045 		thread->sig_block_mask = blockedSignals & ~requestedSignals;
2046 
2047 		while (!has_signals_pending(thread)) {
2048 			thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_SIGNAL,
2049 				NULL);
2050 
2051 			if ((flags & B_ABSOLUTE_TIMEOUT) != 0) {
2052 				error = thread_block_with_timeout_locked(flags, timeout);
2053 				if (error == B_WOULD_BLOCK || error == B_TIMED_OUT) {
2054 					error = B_WOULD_BLOCK;
2055 						// POSIX requires EAGAIN (B_WOULD_BLOCK) on timeout
2056 					timedOut = true;
2057 					break;
2058 				}
2059 			} else
2060 				thread_block_locked(thread);
2061 		}
2062 
2063 		// restore the original block mask
2064 		thread->sig_block_mask = blockedSignals;
2065 
2066 		update_current_thread_signals_flag();
2067 	}
2068 
2069 	// we get here only when timed out
2070 	return error;
2071 }
2072 
2073 
2074 /*!	Replace the current signal block mask and wait for any event to happen.
2075 	Before returning, the original signal block mask is reinstantiated.
2076 */
2077 static status_t
2078 sigsuspend_internal(const sigset_t* _mask)
2079 {
2080 	sigset_t mask = *_mask & BLOCKABLE_SIGNALS;
2081 
2082 	T(SigSuspend(mask));
2083 
2084 	Thread* thread = thread_get_current_thread();
2085 
2086 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
2087 
2088 	// Set the new block mask and block until interrupted. We might be here
2089 	// after a syscall restart, in which case sigsuspend_original_unblocked_mask
2090 	// will still be set.
2091 	sigset_t oldMask = thread->sigsuspend_original_unblocked_mask != 0
2092 		? ~thread->sigsuspend_original_unblocked_mask : thread->sig_block_mask;
2093 	thread->sig_block_mask = mask & BLOCKABLE_SIGNALS;
2094 
2095 	update_current_thread_signals_flag();
2096 
2097 	while (!has_signals_pending(thread)) {
2098 		thread_prepare_to_block(thread, B_CAN_INTERRUPT,
2099 			THREAD_BLOCK_TYPE_SIGNAL, NULL);
2100 		thread_block_locked(thread);
2101 	}
2102 
2103 	// Set sigsuspend_original_unblocked_mask (guaranteed to be non-0 due to
2104 	// BLOCKABLE_SIGNALS). This will indicate to handle_signals() that it is
2105 	// called after a _user_sigsuspend(). It will reset the field after invoking
2106 	// a signal handler, or restart the syscall, if there wasn't anything to
2107 	// handle anymore (e.g. because another thread was faster).
2108 	thread->sigsuspend_original_unblocked_mask = ~oldMask;
2109 
2110 	T(SigSuspendDone());
2111 
2112 	// we're not supposed to actually succeed
2113 	return B_INTERRUPTED;
2114 }
2115 
2116 
2117 static status_t
2118 sigpending_internal(sigset_t* set)
2119 {
2120 	Thread* thread = thread_get_current_thread();
2121 
2122 	if (set == NULL)
2123 		return B_BAD_VALUE;
2124 
2125 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
2126 
2127 	*set = thread->AllPendingSignals() & thread->sig_block_mask;
2128 
2129 	return B_OK;
2130 }
2131 
2132 
2133 // #pragma mark - syscalls
2134 
2135 
2136 /*!	Sends a signal to a thread, process, or process group.
2137 	\param id Specifies the ID of the target:
2138 		- \code id > 0 \endcode: If \a toThread is \c true, the target is the
2139 			thread with ID \a id, otherwise the team with the ID \a id.
2140 		- \code id == 0 \endcode: If toThread is \c true, the target is the
2141 			current thread, otherwise the current team.
2142 		- \code id == -1 \endcode: The target are all teams the current team has
2143 			permission to send signals to. Currently not implemented correctly.
2144 		- \code id < -1 \endcode: The target are is the process group with ID
2145 			\c -id.
2146 	\param signalNumber The signal number. \c 0 to just perform checks, but not
2147 		actually send any signal.
2148 	\param userUserValue A user value to be associated with the signal. Might be
2149 		ignored unless signal queuing is forced. Can be \c NULL.
2150 	\param flags A bitwise or of any number of the following:
2151 		- \c SIGNAL_FLAG_QUEUING_REQUIRED: Signal queuing is required. Fail
2152 			instead of falling back to unqueued signals, when queuing isn't
2153 			possible.
2154 		- \c SIGNAL_FLAG_SEND_TO_THREAD: Interpret the the given ID as a
2155 			\c thread_id rather than a \c team_id. Ignored when the \a id is
2156 			\code < 0 \endcode -- then the target is a process group.
2157 	\return \c B_OK on success, another error code otherwise.
2158 */
2159 status_t
2160 _user_send_signal(int32 id, uint32 signalNumber,
2161 	const union sigval* userUserValue, uint32 flags)
2162 {
2163 	// restrict flags to the allowed ones and add B_CHECK_PERMISSION
2164 	flags &= SIGNAL_FLAG_QUEUING_REQUIRED | SIGNAL_FLAG_SEND_TO_THREAD;
2165 	flags |= B_CHECK_PERMISSION;
2166 
2167 	// Copy the user value from userland. If not given, use a dummy value.
2168 	union sigval userValue;
2169 	if (userUserValue != NULL) {
2170 		if (!IS_USER_ADDRESS(userUserValue)
2171 			|| user_memcpy(&userValue, userUserValue, sizeof(userValue))
2172 				!= B_OK) {
2173 			return B_BAD_ADDRESS;
2174 		}
2175 	} else
2176 		userValue.sival_ptr = NULL;
2177 
2178 	// If to be sent to a thread, delegate to send_signal_internal(). Also do
2179 	// that when id < 0, since in this case the semantics is the same as well.
2180 	if ((flags & SIGNAL_FLAG_SEND_TO_THREAD) != 0 || id < 0)
2181 		return send_signal_internal(id, signalNumber, userValue, flags);
2182 
2183 	// kill() semantics for id >= 0
2184 	if (signalNumber > MAX_SIGNAL_NUMBER)
2185 		return B_BAD_VALUE;
2186 
2187 	Thread* thread = thread_get_current_thread();
2188 
2189 	Signal signal(signalNumber,
2190 		(flags & SIGNAL_FLAG_QUEUING_REQUIRED) != 0 ? SI_QUEUE : SI_USER,
2191 		B_OK, thread->team->id);
2192 	signal.SetUserValue(userValue);
2193 
2194 	// send to current team for id == 0, otherwise to the respective team
2195 	return send_signal_to_team_id(id == 0 ? team_get_current_team_id() : id,
2196 		signal, flags);
2197 }
2198 
2199 
2200 status_t
2201 _user_set_signal_mask(int how, const sigset_t *userSet, sigset_t *userOldSet)
2202 {
2203 	sigset_t set, oldSet;
2204 	status_t status;
2205 
2206 	if ((userSet != NULL && user_memcpy(&set, userSet, sizeof(sigset_t)) < B_OK)
2207 		|| (userOldSet != NULL && user_memcpy(&oldSet, userOldSet,
2208 				sizeof(sigset_t)) < B_OK))
2209 		return B_BAD_ADDRESS;
2210 
2211 	status = sigprocmask_internal(how, userSet ? &set : NULL,
2212 		userOldSet ? &oldSet : NULL);
2213 
2214 	// copy old set if asked for
2215 	if (status >= B_OK && userOldSet != NULL
2216 		&& user_memcpy(userOldSet, &oldSet, sizeof(sigset_t)) < B_OK)
2217 		return B_BAD_ADDRESS;
2218 
2219 	return status;
2220 }
2221 
2222 
2223 status_t
2224 _user_sigaction(int signal, const struct sigaction *userAction,
2225 	struct sigaction *userOldAction)
2226 {
2227 	struct sigaction act, oact;
2228 	status_t status;
2229 
2230 	if ((userAction != NULL && user_memcpy(&act, userAction,
2231 				sizeof(struct sigaction)) < B_OK)
2232 		|| (userOldAction != NULL && user_memcpy(&oact, userOldAction,
2233 				sizeof(struct sigaction)) < B_OK))
2234 		return B_BAD_ADDRESS;
2235 
2236 	status = sigaction_internal(signal, userAction ? &act : NULL,
2237 		userOldAction ? &oact : NULL);
2238 
2239 	// only copy the old action if a pointer has been given
2240 	if (status >= B_OK && userOldAction != NULL
2241 		&& user_memcpy(userOldAction, &oact, sizeof(struct sigaction)) < B_OK)
2242 		return B_BAD_ADDRESS;
2243 
2244 	return status;
2245 }
2246 
2247 
2248 status_t
2249 _user_sigwait(const sigset_t *userSet, siginfo_t *userInfo, uint32 flags,
2250 	bigtime_t timeout)
2251 {
2252 	// copy userSet to stack
2253 	sigset_t set;
2254 	if (userSet == NULL || !IS_USER_ADDRESS(userSet)
2255 		|| user_memcpy(&set, userSet, sizeof(sigset_t)) != B_OK) {
2256 		return B_BAD_ADDRESS;
2257 	}
2258 
2259 	// userInfo is optional, but must be a user address when given
2260 	if (userInfo != NULL && !IS_USER_ADDRESS(userInfo))
2261 		return B_BAD_ADDRESS;
2262 
2263 	syscall_restart_handle_timeout_pre(flags, timeout);
2264 
2265 	flags |= B_CAN_INTERRUPT;
2266 
2267 	siginfo_t info;
2268 	status_t status = sigwait_internal(&set, &info, flags, timeout);
2269 	if (status == B_OK) {
2270 		// copy the info back to userland, if userSet is non-NULL
2271 		if (userInfo != NULL)
2272 			status = user_memcpy(userInfo, &info, sizeof(info));
2273 	} else if (status == B_INTERRUPTED) {
2274 		// make sure we'll be restarted
2275 		Thread* thread = thread_get_current_thread();
2276 		atomic_or(&thread->flags, THREAD_FLAGS_ALWAYS_RESTART_SYSCALL);
2277 	}
2278 
2279 	return syscall_restart_handle_timeout_post(status, timeout);
2280 }
2281 
2282 
2283 status_t
2284 _user_sigsuspend(const sigset_t *userMask)
2285 {
2286 	sigset_t mask;
2287 
2288 	if (userMask == NULL)
2289 		return B_BAD_VALUE;
2290 	if (user_memcpy(&mask, userMask, sizeof(sigset_t)) < B_OK)
2291 		return B_BAD_ADDRESS;
2292 
2293 	return sigsuspend_internal(&mask);
2294 }
2295 
2296 
2297 status_t
2298 _user_sigpending(sigset_t *userSet)
2299 {
2300 	sigset_t set;
2301 	int status;
2302 
2303 	if (userSet == NULL)
2304 		return B_BAD_VALUE;
2305 	if (!IS_USER_ADDRESS(userSet))
2306 		return B_BAD_ADDRESS;
2307 
2308 	status = sigpending_internal(&set);
2309 	if (status == B_OK
2310 		&& user_memcpy(userSet, &set, sizeof(sigset_t)) < B_OK)
2311 		return B_BAD_ADDRESS;
2312 
2313 	return status;
2314 }
2315 
2316 
2317 status_t
2318 _user_set_signal_stack(const stack_t* newUserStack, stack_t* oldUserStack)
2319 {
2320 	Thread *thread = thread_get_current_thread();
2321 	struct stack_t newStack, oldStack;
2322 	bool onStack = false;
2323 
2324 	if ((newUserStack != NULL && user_memcpy(&newStack, newUserStack,
2325 				sizeof(stack_t)) < B_OK)
2326 		|| (oldUserStack != NULL && user_memcpy(&oldStack, oldUserStack,
2327 				sizeof(stack_t)) < B_OK))
2328 		return B_BAD_ADDRESS;
2329 
2330 	if (thread->signal_stack_enabled) {
2331 		// determine whether or not the user thread is currently
2332 		// on the active signal stack
2333 		onStack = arch_on_signal_stack(thread);
2334 	}
2335 
2336 	if (oldUserStack != NULL) {
2337 		oldStack.ss_sp = (void *)thread->signal_stack_base;
2338 		oldStack.ss_size = thread->signal_stack_size;
2339 		oldStack.ss_flags = (thread->signal_stack_enabled ? 0 : SS_DISABLE)
2340 			| (onStack ? SS_ONSTACK : 0);
2341 	}
2342 
2343 	if (newUserStack != NULL) {
2344 		// no flags other than SS_DISABLE are allowed
2345 		if ((newStack.ss_flags & ~SS_DISABLE) != 0)
2346 			return B_BAD_VALUE;
2347 
2348 		if ((newStack.ss_flags & SS_DISABLE) == 0) {
2349 			// check if the size is valid
2350 			if (newStack.ss_size < MINSIGSTKSZ)
2351 				return B_NO_MEMORY;
2352 			if (onStack)
2353 				return B_NOT_ALLOWED;
2354 			if (!IS_USER_ADDRESS(newStack.ss_sp))
2355 				return B_BAD_VALUE;
2356 
2357 			thread->signal_stack_base = (addr_t)newStack.ss_sp;
2358 			thread->signal_stack_size = newStack.ss_size;
2359 			thread->signal_stack_enabled = true;
2360 		} else
2361 			thread->signal_stack_enabled = false;
2362 	}
2363 
2364 	// only copy the old stack info if a pointer has been given
2365 	if (oldUserStack != NULL
2366 		&& user_memcpy(oldUserStack, &oldStack, sizeof(stack_t)) < B_OK)
2367 		return B_BAD_ADDRESS;
2368 
2369 	return B_OK;
2370 }
2371 
2372 
2373 /*!	Restores the environment of a function that was interrupted by a signal
2374 	handler call.
2375 	This syscall is invoked when a signal handler function returns. It
2376 	deconstructs the signal handler frame and restores the stack and register
2377 	state of the function that was interrupted by a signal. The syscall is
2378 	therefore somewhat unusual, since it does not return to the calling
2379 	function, but to someplace else. In case the signal interrupted a syscall,
2380 	it will appear as if the syscall just returned. That is also the reason, why
2381 	this syscall returns an int64, since it needs to return the value the
2382 	interrupted syscall returns, which is potentially 64 bits wide.
2383 
2384 	\param userSignalFrameData The signal frame data created for the signal
2385 		handler. Potentially some data (e.g. registers) have been modified by
2386 		the signal handler.
2387 	\return In case the signal interrupted a syscall, the return value of that
2388 		syscall. Otherwise (i.e. in case of a (hardware) interrupt/exception)
2389 		the value might need to be tailored such that after a return to userland
2390 		the interrupted environment is identical to the interrupted one (unless
2391 		explicitly modified). E.g. for x86 to achieve that, the return value
2392 		must contain the eax|edx values of the interrupted environment.
2393 */
2394 int64
2395 _user_restore_signal_frame(struct signal_frame_data* userSignalFrameData)
2396 {
2397 	syscall_64_bit_return_value();
2398 
2399 	Thread *thread = thread_get_current_thread();
2400 
2401 	// copy the signal frame data from userland
2402 	signal_frame_data signalFrameData;
2403 	if (userSignalFrameData == NULL || !IS_USER_ADDRESS(userSignalFrameData)
2404 		|| user_memcpy(&signalFrameData, userSignalFrameData,
2405 			sizeof(signalFrameData)) != B_OK) {
2406 		// We failed to copy the signal frame data from userland. This is a
2407 		// serious problem. Kill the thread.
2408 		dprintf("_user_restore_signal_frame(): thread %" B_PRId32 ": Failed to "
2409 			"copy signal frame data (%p) from userland. Killing thread...\n",
2410 			thread->id, userSignalFrameData);
2411 		kill_thread(thread->id);
2412 		return B_BAD_ADDRESS;
2413 	}
2414 
2415 	// restore the signal block mask
2416 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
2417 
2418 	thread->sig_block_mask
2419 		= signalFrameData.context.uc_sigmask & BLOCKABLE_SIGNALS;
2420 	update_current_thread_signals_flag();
2421 
2422 	schedulerLocker.Unlock();
2423 
2424 	// restore the syscall restart related thread flags and the syscall restart
2425 	// parameters
2426 	atomic_and(&thread->flags,
2427 		~(THREAD_FLAGS_RESTART_SYSCALL | THREAD_FLAGS_64_BIT_SYSCALL_RETURN));
2428 	atomic_or(&thread->flags, signalFrameData.thread_flags
2429 		& (THREAD_FLAGS_RESTART_SYSCALL | THREAD_FLAGS_64_BIT_SYSCALL_RETURN));
2430 
2431 	memcpy(thread->syscall_restart.parameters,
2432 		signalFrameData.syscall_restart_parameters,
2433 		sizeof(thread->syscall_restart.parameters));
2434 
2435 	// restore the previously stored Thread::user_signal_context
2436 	thread->user_signal_context = signalFrameData.context.uc_link;
2437 	if (thread->user_signal_context != NULL
2438 		&& !IS_USER_ADDRESS(thread->user_signal_context)) {
2439 		thread->user_signal_context = NULL;
2440 	}
2441 
2442 	// let the architecture specific code restore the registers
2443 	return arch_restore_signal_frame(&signalFrameData);
2444 }
2445