xref: /haiku/src/system/kernel/signal.cpp (revision 1026b0a1a76dc88927bb8175c470f638dc5464ee)
1 /*
2  * Copyright 2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
4  * Copyright 2002, Angelo Mottola, a.mottola@libero.it.
5  *
6  * Distributed under the terms of the MIT License.
7  */
8 
9 
10 /*! POSIX signals handling routines */
11 
12 
13 #include <ksignal.h>
14 
15 #include <errno.h>
16 #include <stddef.h>
17 #include <string.h>
18 
19 #include <OS.h>
20 #include <KernelExport.h>
21 
22 #include <cpu.h>
23 #include <debug.h>
24 #include <kernel.h>
25 #include <kscheduler.h>
26 #include <sem.h>
27 #include <syscall_restart.h>
28 #include <syscall_utils.h>
29 #include <team.h>
30 #include <thread.h>
31 #include <tracing.h>
32 #include <user_debugger.h>
33 #include <user_thread.h>
34 #include <util/AutoLock.h>
35 
36 
37 //#define TRACE_SIGNAL
38 #ifdef TRACE_SIGNAL
39 #	define TRACE(x) dprintf x
40 #else
41 #	define TRACE(x) ;
42 #endif
43 
44 
45 #define BLOCKABLE_SIGNALS	\
46 	(~(KILL_SIGNALS | SIGNAL_TO_MASK(SIGSTOP)	\
47 	| SIGNAL_TO_MASK(SIGNAL_CONTINUE_THREAD)	\
48 	| SIGNAL_TO_MASK(SIGNAL_CANCEL_THREAD)))
49 #define STOP_SIGNALS \
50 	(SIGNAL_TO_MASK(SIGSTOP) | SIGNAL_TO_MASK(SIGTSTP) \
51 	| SIGNAL_TO_MASK(SIGTTIN) | SIGNAL_TO_MASK(SIGTTOU))
52 #define CONTINUE_SIGNALS \
53 	(SIGNAL_TO_MASK(SIGCONT) | SIGNAL_TO_MASK(SIGNAL_CONTINUE_THREAD))
54 #define DEFAULT_IGNORE_SIGNALS \
55 	(SIGNAL_TO_MASK(SIGCHLD) | SIGNAL_TO_MASK(SIGWINCH) \
56 	| SIGNAL_TO_MASK(SIGCONT) \
57 	| SIGNAL_RANGE_TO_MASK(SIGNAL_REALTIME_MIN, SIGNAL_REALTIME_MAX))
58 #define NON_DEFERRABLE_SIGNALS	\
59 	(KILL_SIGNALS				\
60 	| SIGNAL_TO_MASK(SIGILL)	\
61 	| SIGNAL_TO_MASK(SIGFPE)	\
62 	| SIGNAL_TO_MASK(SIGSEGV))
63 
64 
65 static const struct {
66 	const char*	name;
67 	int32		priority;
68 } kSignalInfos[__MAX_SIGNO + 1] = {
69 	{"NONE",			-1},
70 	{"HUP",				0},
71 	{"INT",				0},
72 	{"QUIT",			0},
73 	{"ILL",				0},
74 	{"CHLD",			0},
75 	{"ABRT",			0},
76 	{"PIPE",			0},
77 	{"FPE",				0},
78 	{"KILL",			100},
79 	{"STOP",			0},
80 	{"SEGV",			0},
81 	{"CONT",			0},
82 	{"TSTP",			0},
83 	{"ALRM",			0},
84 	{"TERM",			0},
85 	{"TTIN",			0},
86 	{"TTOU",			0},
87 	{"USR1",			0},
88 	{"USR2",			0},
89 	{"WINCH",			0},
90 	{"KILLTHR",			100},
91 	{"TRAP",			0},
92 	{"POLL",			0},
93 	{"PROF",			0},
94 	{"SYS",				0},
95 	{"URG",				0},
96 	{"VTALRM",			0},
97 	{"XCPU",			0},
98 	{"XFSZ",			0},
99 	{"SIGBUS",			0},
100 	{"SIGRESERVED1",	0},
101 	{"SIGRESERVED2",	0},
102 	{"SIGRT1",			8},
103 	{"SIGRT2",			7},
104 	{"SIGRT3",			6},
105 	{"SIGRT4",			5},
106 	{"SIGRT5",			4},
107 	{"SIGRT6",			3},
108 	{"SIGRT7",			2},
109 	{"SIGRT8",			1},
110 	{"invalid 41",		0},
111 	{"invalid 42",		0},
112 	{"invalid 43",		0},
113 	{"invalid 44",		0},
114 	{"invalid 45",		0},
115 	{"invalid 46",		0},
116 	{"invalid 47",		0},
117 	{"invalid 48",		0},
118 	{"invalid 49",		0},
119 	{"invalid 50",		0},
120 	{"invalid 51",		0},
121 	{"invalid 52",		0},
122 	{"invalid 53",		0},
123 	{"invalid 54",		0},
124 	{"invalid 55",		0},
125 	{"invalid 56",		0},
126 	{"invalid 57",		0},
127 	{"invalid 58",		0},
128 	{"invalid 59",		0},
129 	{"invalid 60",		0},
130 	{"invalid 61",		0},
131 	{"invalid 62",		0},
132 	{"CANCEL_THREAD",	0},
133 	{"CONTINUE_THREAD",	0}	// priority must be <= that of SIGSTOP
134 };
135 
136 
137 static inline const char*
138 signal_name(uint32 number)
139 {
140 	return number <= __MAX_SIGNO ? kSignalInfos[number].name : "invalid";
141 }
142 
143 
144 // #pragma mark - SignalHandledCaller
145 
146 
147 struct SignalHandledCaller {
148 	SignalHandledCaller(Signal* signal)
149 		:
150 		fSignal(signal)
151 	{
152 	}
153 
154 	~SignalHandledCaller()
155 	{
156 		Done();
157 	}
158 
159 	void Done()
160 	{
161 		if (fSignal != NULL) {
162 			fSignal->Handled();
163 			fSignal = NULL;
164 		}
165 	}
166 
167 private:
168 	Signal*	fSignal;
169 };
170 
171 
172 // #pragma mark - QueuedSignalsCounter
173 
174 
175 /*!	Creates a counter with the given limit.
176 	The limit defines the maximum the counter may reach. Since the
177 	BReferenceable's reference count is used, it is assumed that the owning
178 	team holds a reference and the reference count is one greater than the
179 	counter value.
180 	\param limit The maximum allowed value the counter may have. When
181 		\code < 0 \endcode, the value is not limited.
182 */
183 QueuedSignalsCounter::QueuedSignalsCounter(int32 limit)
184 	:
185 	fLimit(limit)
186 {
187 }
188 
189 
190 /*!	Increments the counter, if the limit allows that.
191 	\return \c true, if incrementing the counter succeeded, \c false otherwise.
192 */
193 bool
194 QueuedSignalsCounter::Increment()
195 {
196 	// no limit => no problem
197 	if (fLimit < 0) {
198 		AcquireReference();
199 		return true;
200 	}
201 
202 	// Increment the reference count manually, so we can check atomically. We
203 	// compare the old value > fLimit, assuming that our (primary) owner has a
204 	// reference, we don't want to count.
205 	if (atomic_add(&fReferenceCount, 1) > fLimit) {
206 		ReleaseReference();
207 		return false;
208 	}
209 
210 	return true;
211 }
212 
213 
214 // #pragma mark - Signal
215 
216 
217 Signal::Signal()
218 	:
219 	fCounter(NULL),
220 	fPending(false)
221 {
222 }
223 
224 
225 Signal::Signal(const Signal& other)
226 	:
227 	fCounter(NULL),
228 	fNumber(other.fNumber),
229 	fSignalCode(other.fSignalCode),
230 	fErrorCode(other.fErrorCode),
231 	fSendingProcess(other.fSendingProcess),
232 	fSendingUser(other.fSendingUser),
233 	fStatus(other.fStatus),
234 	fPollBand(other.fPollBand),
235 	fAddress(other.fAddress),
236 	fUserValue(other.fUserValue),
237 	fPending(false)
238 {
239 }
240 
241 
242 Signal::Signal(uint32 number, int32 signalCode, int32 errorCode,
243 	pid_t sendingProcess)
244 	:
245 	fCounter(NULL),
246 	fNumber(number),
247 	fSignalCode(signalCode),
248 	fErrorCode(errorCode),
249 	fSendingProcess(sendingProcess),
250 	fSendingUser(getuid()),
251 	fStatus(0),
252 	fPollBand(0),
253 	fAddress(NULL),
254 	fPending(false)
255 {
256 	fUserValue.sival_ptr = NULL;
257 }
258 
259 
260 Signal::~Signal()
261 {
262 	if (fCounter != NULL)
263 		fCounter->ReleaseReference();
264 }
265 
266 
267 /*!	Creates a queuable clone of the given signal.
268 	Also enforces the current team's signal queuing limit.
269 
270 	\param signal The signal to clone.
271 	\param queuingRequired If \c true, the function will return an error code
272 		when creating the clone fails for any reason. Otherwise, the function
273 		will set \a _signalToQueue to \c NULL, but still return \c B_OK.
274 	\param _signalToQueue Return parameter. Set to the clone of the signal.
275 	\return When \c queuingRequired is \c false, always \c B_OK. Otherwise
276 		\c B_OK, when creating the signal clone succeeds, another error code,
277 		when it fails.
278 */
279 /*static*/ status_t
280 Signal::CreateQueuable(const Signal& signal, bool queuingRequired,
281 	Signal*& _signalToQueue)
282 {
283 	_signalToQueue = NULL;
284 
285 	// If interrupts are disabled, we can't allocate a signal.
286 	if (!are_interrupts_enabled())
287 		return queuingRequired ? B_BAD_VALUE : B_OK;
288 
289 	// increment the queued signals counter
290 	QueuedSignalsCounter* counter
291 		= thread_get_current_thread()->team->QueuedSignalsCounter();
292 	if (!counter->Increment())
293 		return queuingRequired ? EAGAIN : B_OK;
294 
295 	// allocate the signal
296 	Signal* signalToQueue = new(std::nothrow) Signal(signal);
297 	if (signalToQueue == NULL) {
298 		counter->Decrement();
299 		return queuingRequired ? B_NO_MEMORY : B_OK;
300 	}
301 
302 	signalToQueue->fCounter = counter;
303 
304 	_signalToQueue = signalToQueue;
305 	return B_OK;
306 }
307 
308 void
309 Signal::SetTo(uint32 number)
310 {
311 	Team* team = thread_get_current_thread()->team;
312 
313 	fNumber = number;
314 	fSignalCode = SI_USER;
315 	fErrorCode = 0;
316 	fSendingProcess = team->id;
317 	fSendingUser = team->effective_uid;
318 		// assuming scheduler lock is being held
319 	fStatus = 0;
320 	fPollBand = 0;
321 	fAddress = NULL;
322 	fUserValue.sival_ptr = NULL;
323 }
324 
325 
326 int32
327 Signal::Priority() const
328 {
329 	return kSignalInfos[fNumber].priority;
330 }
331 
332 
333 void
334 Signal::Handled()
335 {
336 	ReleaseReference();
337 }
338 
339 
340 void
341 Signal::LastReferenceReleased()
342 {
343 	if (are_interrupts_enabled())
344 		delete this;
345 	else
346 		deferred_delete(this);
347 }
348 
349 
350 // #pragma mark - PendingSignals
351 
352 
353 PendingSignals::PendingSignals()
354 	:
355 	fQueuedSignalsMask(0),
356 	fUnqueuedSignalsMask(0)
357 {
358 }
359 
360 
361 PendingSignals::~PendingSignals()
362 {
363 	Clear();
364 }
365 
366 
367 /*!	Of the signals in \a nonBlocked returns the priority of that with the
368 	highest priority.
369 	\param nonBlocked The mask with the non-blocked signals.
370 	\return The priority of the highest priority non-blocked signal, or, if all
371 		signals are blocked, \c -1.
372 */
373 int32
374 PendingSignals::HighestSignalPriority(sigset_t nonBlocked) const
375 {
376 	Signal* queuedSignal;
377 	int32 unqueuedSignal;
378 	return _GetHighestPrioritySignal(nonBlocked, queuedSignal, unqueuedSignal);
379 }
380 
381 
382 void
383 PendingSignals::Clear()
384 {
385 	// release references of all queued signals
386 	while (Signal* signal = fQueuedSignals.RemoveHead())
387 		signal->Handled();
388 
389 	fQueuedSignalsMask = 0;
390 	fUnqueuedSignalsMask = 0;
391 }
392 
393 
394 /*!	Adds a signal.
395 	Takes over the reference to the signal from the caller.
396 */
397 void
398 PendingSignals::AddSignal(Signal* signal)
399 {
400 	// queue according to priority
401 	int32 priority = signal->Priority();
402 	Signal* otherSignal = NULL;
403 	for (SignalList::Iterator it = fQueuedSignals.GetIterator();
404 			(otherSignal = it.Next()) != NULL;) {
405 		if (priority > otherSignal->Priority())
406 			break;
407 	}
408 
409 	fQueuedSignals.InsertBefore(otherSignal, signal);
410 	signal->SetPending(true);
411 
412 	fQueuedSignalsMask |= SIGNAL_TO_MASK(signal->Number());
413 }
414 
415 
416 void
417 PendingSignals::RemoveSignal(Signal* signal)
418 {
419 	signal->SetPending(false);
420 	fQueuedSignals.Remove(signal);
421 	_UpdateQueuedSignalMask();
422 }
423 
424 
425 void
426 PendingSignals::RemoveSignals(sigset_t mask)
427 {
428 	// remove from queued signals
429 	if ((fQueuedSignalsMask & mask) != 0) {
430 		for (SignalList::Iterator it = fQueuedSignals.GetIterator();
431 				Signal* signal = it.Next();) {
432 			// remove signal, if in mask
433 			if ((SIGNAL_TO_MASK(signal->Number()) & mask) != 0) {
434 				it.Remove();
435 				signal->SetPending(false);
436 				signal->Handled();
437 			}
438 		}
439 
440 		fQueuedSignalsMask &= ~mask;
441 	}
442 
443 	// remove from unqueued signals
444 	fUnqueuedSignalsMask &= ~mask;
445 }
446 
447 
448 /*!	Removes and returns a signal in \a nonBlocked that has the highest priority.
449 	The caller gets a reference to the returned signal, if any.
450 	\param nonBlocked The mask of non-blocked signals.
451 	\param buffer If the signal is not queued this buffer is returned. In this
452 		case the method acquires a reference to \a buffer, so that the caller
453 		gets a reference also in this case.
454 	\return The removed signal or \c NULL, if all signals are blocked.
455 */
456 Signal*
457 PendingSignals::DequeueSignal(sigset_t nonBlocked, Signal& buffer)
458 {
459 	// find the signal with the highest priority
460 	Signal* queuedSignal;
461 	int32 unqueuedSignal;
462 	if (_GetHighestPrioritySignal(nonBlocked, queuedSignal, unqueuedSignal) < 0)
463 		return NULL;
464 
465 	// if it is a queued signal, dequeue it
466 	if (queuedSignal != NULL) {
467 		fQueuedSignals.Remove(queuedSignal);
468 		queuedSignal->SetPending(false);
469 		_UpdateQueuedSignalMask();
470 		return queuedSignal;
471 	}
472 
473 	// it is unqueued -- remove from mask
474 	fUnqueuedSignalsMask &= ~SIGNAL_TO_MASK(unqueuedSignal);
475 
476 	// init buffer
477 	buffer.SetTo(unqueuedSignal);
478 	buffer.AcquireReference();
479 	return &buffer;
480 }
481 
482 
483 /*!	Of the signals not it \a blocked returns the priority of that with the
484 	highest priority.
485 	\param blocked The mask with the non-blocked signals.
486 	\param _queuedSignal If the found signal is a queued signal, the variable
487 		will be set to that signal, otherwise to \c NULL.
488 	\param _unqueuedSignal If the found signal is an unqueued signal, the
489 		variable is set to that signal's number, otherwise to \c -1.
490 	\return The priority of the highest priority non-blocked signal, or, if all
491 		signals are blocked, \c -1.
492 */
493 int32
494 PendingSignals::_GetHighestPrioritySignal(sigset_t nonBlocked,
495 	Signal*& _queuedSignal, int32& _unqueuedSignal) const
496 {
497 	// check queued signals
498 	Signal* queuedSignal = NULL;
499 	int32 queuedPriority = -1;
500 
501 	if ((fQueuedSignalsMask & nonBlocked) != 0) {
502 		for (SignalList::ConstIterator it = fQueuedSignals.GetIterator();
503 				Signal* signal = it.Next();) {
504 			if ((SIGNAL_TO_MASK(signal->Number()) & nonBlocked) != 0) {
505 				queuedPriority = signal->Priority();
506 				queuedSignal = signal;
507 				break;
508 			}
509 		}
510 	}
511 
512 	// check unqueued signals
513 	int32 unqueuedSignal = -1;
514 	int32 unqueuedPriority = -1;
515 
516 	sigset_t unqueuedSignals = fUnqueuedSignalsMask & nonBlocked;
517 	if (unqueuedSignals != 0) {
518 		int32 signal = 1;
519 		while (unqueuedSignals != 0) {
520 			sigset_t mask = SIGNAL_TO_MASK(signal);
521 			if ((unqueuedSignals & mask) != 0) {
522 				int32 priority = kSignalInfos[signal].priority;
523 				if (priority > unqueuedPriority) {
524 					unqueuedSignal = signal;
525 					unqueuedPriority = priority;
526 				}
527 				unqueuedSignals &= ~mask;
528 			}
529 
530 			signal++;
531 		}
532 	}
533 
534 	// Return found queued or unqueued signal, whichever has the higher
535 	// priority.
536 	if (queuedPriority >= unqueuedPriority) {
537 		_queuedSignal = queuedSignal;
538 		_unqueuedSignal = -1;
539 		return queuedPriority;
540 	}
541 
542 	_queuedSignal = NULL;
543 	_unqueuedSignal = unqueuedSignal;
544 	return unqueuedPriority;
545 }
546 
547 
548 void
549 PendingSignals::_UpdateQueuedSignalMask()
550 {
551 	sigset_t mask = 0;
552 	for (SignalList::Iterator it = fQueuedSignals.GetIterator();
553 			Signal* signal = it.Next();) {
554 		mask |= SIGNAL_TO_MASK(signal->Number());
555 	}
556 
557 	fQueuedSignalsMask = mask;
558 }
559 
560 
561 // #pragma mark - signal tracing
562 
563 
564 #if SIGNAL_TRACING
565 
566 namespace SignalTracing {
567 
568 
569 class HandleSignal : public AbstractTraceEntry {
570 	public:
571 		HandleSignal(uint32 signal)
572 			:
573 			fSignal(signal)
574 		{
575 			Initialized();
576 		}
577 
578 		virtual void AddDump(TraceOutput& out)
579 		{
580 			out.Print("signal handle:  %" B_PRIu32 " (%s)" , fSignal,
581 				signal_name(fSignal));
582 		}
583 
584 	private:
585 		uint32		fSignal;
586 };
587 
588 
589 class ExecuteSignalHandler : public AbstractTraceEntry {
590 	public:
591 		ExecuteSignalHandler(uint32 signal, struct sigaction* handler)
592 			:
593 			fSignal(signal),
594 			fHandler((void*)handler->sa_handler)
595 		{
596 			Initialized();
597 		}
598 
599 		virtual void AddDump(TraceOutput& out)
600 		{
601 			out.Print("signal exec handler: signal: %" B_PRIu32 " (%s), "
602 				"handler: %p", fSignal, signal_name(fSignal), fHandler);
603 		}
604 
605 	private:
606 		uint32	fSignal;
607 		void*	fHandler;
608 };
609 
610 
611 class SendSignal : public AbstractTraceEntry {
612 	public:
613 		SendSignal(pid_t target, uint32 signal, uint32 flags)
614 			:
615 			fTarget(target),
616 			fSignal(signal),
617 			fFlags(flags)
618 		{
619 			Initialized();
620 		}
621 
622 		virtual void AddDump(TraceOutput& out)
623 		{
624 			out.Print("signal send: target: %ld, signal: %lu (%s), "
625 				"flags: 0x%lx", fTarget, fSignal, signal_name(fSignal), fFlags);
626 		}
627 
628 	private:
629 		pid_t	fTarget;
630 		uint32	fSignal;
631 		uint32	fFlags;
632 };
633 
634 
635 class SigAction : public AbstractTraceEntry {
636 	public:
637 		SigAction(uint32 signal, const struct sigaction* act)
638 			:
639 			fSignal(signal),
640 			fAction(*act)
641 		{
642 			Initialized();
643 		}
644 
645 		virtual void AddDump(TraceOutput& out)
646 		{
647 			out.Print("signal action: signal: %lu (%s), "
648 				"action: {handler: %p, flags: 0x%x, mask: 0x%llx}", fSignal,
649 				signal_name(fSignal), fAction.sa_handler, fAction.sa_flags,
650 				(long long)fAction.sa_mask);
651 		}
652 
653 	private:
654 		uint32				fSignal;
655 		struct sigaction	fAction;
656 };
657 
658 
659 class SigProcMask : public AbstractTraceEntry {
660 	public:
661 		SigProcMask(int how, sigset_t mask)
662 			:
663 			fHow(how),
664 			fMask(mask),
665 			fOldMask(thread_get_current_thread()->sig_block_mask)
666 		{
667 			Initialized();
668 		}
669 
670 		virtual void AddDump(TraceOutput& out)
671 		{
672 			const char* how = "invalid";
673 			switch (fHow) {
674 				case SIG_BLOCK:
675 					how = "block";
676 					break;
677 				case SIG_UNBLOCK:
678 					how = "unblock";
679 					break;
680 				case SIG_SETMASK:
681 					how = "set";
682 					break;
683 			}
684 
685 			out.Print("signal proc mask: %s 0x%llx, old mask: 0x%llx", how,
686 				(long long)fMask, (long long)fOldMask);
687 		}
688 
689 	private:
690 		int			fHow;
691 		sigset_t	fMask;
692 		sigset_t	fOldMask;
693 };
694 
695 
696 class SigSuspend : public AbstractTraceEntry {
697 	public:
698 		SigSuspend(sigset_t mask)
699 			:
700 			fMask(mask),
701 			fOldMask(thread_get_current_thread()->sig_block_mask)
702 		{
703 			Initialized();
704 		}
705 
706 		virtual void AddDump(TraceOutput& out)
707 		{
708 			out.Print("signal suspend: %#llx, old mask: %#llx",
709 				(long long)fMask, (long long)fOldMask);
710 		}
711 
712 	private:
713 		sigset_t	fMask;
714 		sigset_t	fOldMask;
715 };
716 
717 
718 class SigSuspendDone : public AbstractTraceEntry {
719 	public:
720 		SigSuspendDone()
721 			:
722 			fSignals(thread_get_current_thread()->ThreadPendingSignals())
723 		{
724 			Initialized();
725 		}
726 
727 		virtual void AddDump(TraceOutput& out)
728 		{
729 			out.Print("signal suspend done: %#" B_PRIx32, fSignals);
730 		}
731 
732 	private:
733 		uint32		fSignals;
734 };
735 
736 }	// namespace SignalTracing
737 
738 #	define T(x)	new(std::nothrow) SignalTracing::x
739 
740 #else
741 #	define T(x)
742 #endif	// SIGNAL_TRACING
743 
744 
745 // #pragma mark -
746 
747 
748 /*!	Updates the given thread's Thread::flags field according to what signals are
749 	pending.
750 	The caller must hold the scheduler lock.
751 */
752 static void
753 update_thread_signals_flag(Thread* thread)
754 {
755 	sigset_t mask = ~thread->sig_block_mask;
756 	if ((thread->AllPendingSignals() & mask) != 0)
757 		atomic_or(&thread->flags, THREAD_FLAGS_SIGNALS_PENDING);
758 	else
759 		atomic_and(&thread->flags, ~THREAD_FLAGS_SIGNALS_PENDING);
760 }
761 
762 
763 /*!	Updates the current thread's Thread::flags field according to what signals
764 	are pending.
765 	The caller must hold the scheduler lock.
766 */
767 static void
768 update_current_thread_signals_flag()
769 {
770 	update_thread_signals_flag(thread_get_current_thread());
771 }
772 
773 
774 /*!	Updates all of the given team's threads' Thread::flags fields according to
775 	what signals are pending.
776 	The caller must hold the scheduler lock.
777 */
778 static void
779 update_team_threads_signal_flag(Team* team)
780 {
781 	for (Thread* thread = team->thread_list; thread != NULL;
782 			thread = thread->team_next) {
783 		update_thread_signals_flag(thread);
784 	}
785 }
786 
787 
788 /*!	Notifies the user debugger about a signal to be handled.
789 
790 	The caller must not hold any locks.
791 
792 	\param thread The current thread.
793 	\param signal The signal to be handled.
794 	\param handler The installed signal handler for the signal.
795 	\param deadly Indicates whether the signal is deadly.
796 	\return \c true, if the signal shall be handled, \c false, if it shall be
797 		ignored.
798 */
799 static bool
800 notify_debugger(Thread* thread, Signal* signal, struct sigaction& handler,
801 	bool deadly)
802 {
803 	uint64 signalMask = SIGNAL_TO_MASK(signal->Number());
804 
805 	// first check the ignore signal masks the debugger specified for the thread
806 	InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock);
807 
808 	if ((thread->debug_info.ignore_signals_once & signalMask) != 0) {
809 		thread->debug_info.ignore_signals_once &= ~signalMask;
810 		return true;
811 	}
812 
813 	if ((thread->debug_info.ignore_signals & signalMask) != 0)
814 		return true;
815 
816 	threadDebugInfoLocker.Unlock();
817 
818 	// deliver the event
819 	return user_debug_handle_signal(signal->Number(), &handler, deadly);
820 }
821 
822 
823 /*!	Removes and returns a signal with the highest priority in \a nonBlocked that
824 	is pending in the given thread or its team.
825 	After dequeuing the signal the Thread::flags field of the affected threads
826 	are updated.
827 	The caller gets a reference to the returned signal, if any.
828 	The caller must hold the scheduler lock.
829 	\param thread The thread.
830 	\param nonBlocked The mask of non-blocked signals.
831 	\param buffer If the signal is not queued this buffer is returned. In this
832 		case the method acquires a reference to \a buffer, so that the caller
833 		gets a reference also in this case.
834 	\return The removed signal or \c NULL, if all signals are blocked.
835 */
836 static Signal*
837 dequeue_thread_or_team_signal(Thread* thread, sigset_t nonBlocked,
838 	Signal& buffer)
839 {
840 	Team* team = thread->team;
841 	Signal* signal;
842 	if (team->HighestPendingSignalPriority(nonBlocked)
843 			> thread->HighestPendingSignalPriority(nonBlocked)) {
844 		signal = team->DequeuePendingSignal(nonBlocked, buffer);
845 		update_team_threads_signal_flag(team);
846 	} else {
847 		signal = thread->DequeuePendingSignal(nonBlocked, buffer);
848 		update_thread_signals_flag(thread);
849 	}
850 
851 	return signal;
852 }
853 
854 
855 static status_t
856 setup_signal_frame(Thread* thread, struct sigaction* action, Signal* signal,
857 	sigset_t signalMask)
858 {
859 	// prepare the data, we need to copy onto the user stack
860 	signal_frame_data frameData;
861 
862 	// signal info
863 	frameData.info.si_signo = signal->Number();
864 	frameData.info.si_code = signal->SignalCode();
865 	frameData.info.si_errno = signal->ErrorCode();
866 	frameData.info.si_pid = signal->SendingProcess();
867 	frameData.info.si_uid = signal->SendingUser();
868 	frameData.info.si_addr = signal->Address();
869 	frameData.info.si_status = signal->Status();
870 	frameData.info.si_band = signal->PollBand();
871 	frameData.info.si_value = signal->UserValue();
872 
873 	// context
874 	frameData.context.uc_link = thread->user_signal_context;
875 	frameData.context.uc_sigmask = signalMask;
876 	// uc_stack and uc_mcontext are filled in by the architecture specific code.
877 
878 	// user data
879 	frameData.user_data = action->sa_userdata;
880 
881 	// handler function
882 	frameData.siginfo_handler = (action->sa_flags & SA_SIGINFO) != 0;
883 	frameData.handler = frameData.siginfo_handler
884 		? (void*)action->sa_sigaction : (void*)action->sa_handler;
885 
886 	// thread flags -- save the and clear the thread's syscall restart related
887 	// flags
888 	frameData.thread_flags = atomic_and(&thread->flags,
889 		~(THREAD_FLAGS_RESTART_SYSCALL | THREAD_FLAGS_64_BIT_SYSCALL_RETURN));
890 
891 	// syscall restart related fields
892 	memcpy(frameData.syscall_restart_parameters,
893 		thread->syscall_restart.parameters,
894 		sizeof(frameData.syscall_restart_parameters));
895 	// syscall_restart_return_value is filled in by the architecture specific
896 	// code.
897 
898 	return arch_setup_signal_frame(thread, action, &frameData);
899 }
900 
901 
902 /*! Actually handles pending signals -- i.e. the thread will exit, a custom
903 	signal handler is prepared, or whatever the signal demands.
904 	The function will not return, when a deadly signal is encountered. The
905 	function will suspend the thread indefinitely, when a stop signal is
906 	encountered.
907 	Interrupts must be enabled.
908 	\param thread The current thread.
909 */
910 void
911 handle_signals(Thread* thread)
912 {
913 	Team* team = thread->team;
914 
915 	TeamLocker teamLocker(team);
916 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
917 
918 	// If userland requested to defer signals, we check now, if this is
919 	// possible.
920 	sigset_t nonBlockedMask = ~thread->sig_block_mask;
921 	sigset_t signalMask = thread->AllPendingSignals() & nonBlockedMask;
922 
923 	if (thread->user_thread->defer_signals > 0
924 		&& (signalMask & NON_DEFERRABLE_SIGNALS) == 0
925 		&& thread->sigsuspend_original_unblocked_mask == 0) {
926 		thread->user_thread->pending_signals = signalMask;
927 		return;
928 	}
929 
930 	thread->user_thread->pending_signals = 0;
931 
932 	// determine syscall restart behavior
933 	uint32 restartFlags = atomic_and(&thread->flags,
934 		~THREAD_FLAGS_DONT_RESTART_SYSCALL);
935 	bool alwaysRestart
936 		= (restartFlags & THREAD_FLAGS_ALWAYS_RESTART_SYSCALL) != 0;
937 	bool restart = alwaysRestart
938 		|| (restartFlags & THREAD_FLAGS_DONT_RESTART_SYSCALL) == 0;
939 
940 	// Loop until we've handled all signals.
941 	bool initialIteration = true;
942 	while (true) {
943 		if (initialIteration) {
944 			initialIteration = false;
945 		} else {
946 			teamLocker.Lock();
947 			schedulerLocker.Lock();
948 
949 			signalMask = thread->AllPendingSignals() & nonBlockedMask;
950 		}
951 
952 		// Unless SIGKILL[THR] are pending, check, if the thread shall stop for
953 		// debugging.
954 		if ((signalMask & KILL_SIGNALS) == 0
955 			&& (atomic_get(&thread->debug_info.flags) & B_THREAD_DEBUG_STOP)
956 				!= 0) {
957 			schedulerLocker.Unlock();
958 			teamLocker.Unlock();
959 
960 			user_debug_stop_thread();
961 			continue;
962 		}
963 
964 		// We're done, if there aren't any pending signals anymore.
965 		if ((signalMask & nonBlockedMask) == 0)
966 			break;
967 
968 		// get pending non-blocked thread or team signal with the highest
969 		// priority
970 		Signal stackSignal;
971 		Signal* signal = dequeue_thread_or_team_signal(thread, nonBlockedMask,
972 			stackSignal);
973 		ASSERT(signal != NULL);
974 		SignalHandledCaller signalHandledCaller(signal);
975 
976 		schedulerLocker.Unlock();
977 
978 		// get the action for the signal
979 		struct sigaction handler;
980 		if (signal->Number() <= MAX_SIGNAL_NUMBER) {
981 			handler = team->SignalActionFor(signal->Number());
982 		} else {
983 			handler.sa_handler = SIG_DFL;
984 			handler.sa_flags = 0;
985 		}
986 
987 		if ((handler.sa_flags & SA_ONESHOT) != 0
988 			&& handler.sa_handler != SIG_IGN && handler.sa_handler != SIG_DFL) {
989 			team->SignalActionFor(signal->Number()).sa_handler = SIG_DFL;
990 		}
991 
992 		T(HandleSignal(signal->Number()));
993 
994 		teamLocker.Unlock();
995 
996 		// debug the signal, if a debugger is installed and the signal debugging
997 		// flag is set
998 		bool debugSignal = (~atomic_get(&team->debug_info.flags)
999 				& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_SIGNALS))
1000 			== 0;
1001 
1002 		// handle the signal
1003 		TRACE(("Thread %" B_PRId32 " received signal %s\n", thread->id,
1004 			kSignalInfos[signal->Number()].name));
1005 
1006 		if (handler.sa_handler == SIG_IGN) {
1007 			// signal is to be ignored
1008 			// TODO: apply zombie cleaning on SIGCHLD
1009 
1010 			// notify the debugger
1011 			if (debugSignal)
1012 				notify_debugger(thread, signal, handler, false);
1013 			continue;
1014 		} else if (handler.sa_handler == SIG_DFL) {
1015 			// default signal behaviour
1016 
1017 			// realtime signals are ignored by default
1018 			if (signal->Number() >= SIGNAL_REALTIME_MIN
1019 				&& signal->Number() <= SIGNAL_REALTIME_MAX) {
1020 				// notify the debugger
1021 				if (debugSignal)
1022 					notify_debugger(thread, signal, handler, false);
1023 				continue;
1024 			}
1025 
1026 			bool killTeam = false;
1027 			switch (signal->Number()) {
1028 				case SIGCHLD:
1029 				case SIGWINCH:
1030 				case SIGURG:
1031 					// notify the debugger
1032 					if (debugSignal)
1033 						notify_debugger(thread, signal, handler, false);
1034 					continue;
1035 
1036 				case SIGNAL_CANCEL_THREAD:
1037 					// set up the signal handler
1038 					handler.sa_handler = thread->cancel_function;
1039 					handler.sa_flags = 0;
1040 					handler.sa_mask = 0;
1041 					handler.sa_userdata = NULL;
1042 
1043 					restart = false;
1044 						// we always want to interrupt
1045 					break;
1046 
1047 				case SIGNAL_CONTINUE_THREAD:
1048 					// prevent syscall restart, but otherwise ignore
1049 					restart = false;
1050 					atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
1051 					continue;
1052 
1053 				case SIGCONT:
1054 					// notify the debugger
1055 					if (debugSignal
1056 						&& !notify_debugger(thread, signal, handler, false))
1057 						continue;
1058 
1059 					// notify threads waiting for team state changes
1060 					if (thread == team->main_thread) {
1061 						team->LockTeamAndParent(false);
1062 
1063 						team_set_job_control_state(team,
1064 							JOB_CONTROL_STATE_CONTINUED, signal, false);
1065 
1066 						team->UnlockTeamAndParent();
1067 
1068 						// The standard states that the system *may* send a
1069 						// SIGCHLD when a child is continued. I haven't found
1070 						// a good reason why we would want to, though.
1071 					}
1072 					continue;
1073 
1074 				case SIGSTOP:
1075 				case SIGTSTP:
1076 				case SIGTTIN:
1077 				case SIGTTOU:
1078 				{
1079 					// notify the debugger
1080 					if (debugSignal
1081 						&& !notify_debugger(thread, signal, handler, false))
1082 						continue;
1083 
1084 					// The terminal-sent stop signals are allowed to stop the
1085 					// process only, if it doesn't belong to an orphaned process
1086 					// group. Otherwise the signal must be discarded.
1087 					team->LockProcessGroup();
1088 					AutoLocker<ProcessGroup> groupLocker(team->group, true);
1089 					if (signal->Number() != SIGSTOP
1090 						&& team->group->IsOrphaned()) {
1091 						continue;
1092 					}
1093 
1094 					// notify threads waiting for team state changes
1095 					if (thread == team->main_thread) {
1096 						team->LockTeamAndParent(false);
1097 
1098 						team_set_job_control_state(team,
1099 							JOB_CONTROL_STATE_STOPPED, signal, false);
1100 
1101 						// send a SIGCHLD to the parent (if it does have
1102 						// SA_NOCLDSTOP defined)
1103 						Team* parentTeam = team->parent;
1104 
1105 						struct sigaction& parentHandler
1106 							= parentTeam->SignalActionFor(SIGCHLD);
1107 						if ((parentHandler.sa_flags & SA_NOCLDSTOP) == 0) {
1108 							Signal childSignal(SIGCHLD, CLD_STOPPED, B_OK,
1109 								team->id);
1110 							childSignal.SetStatus(signal->Number());
1111 							childSignal.SetSendingUser(signal->SendingUser());
1112 							send_signal_to_team(parentTeam, childSignal, 0);
1113 						}
1114 
1115 						team->UnlockTeamAndParent();
1116 					}
1117 
1118 					groupLocker.Unlock();
1119 
1120 					// Suspend the thread, unless there's already a signal to
1121 					// continue or kill pending.
1122 					InterruptsSpinLocker schedulerLocker(gSchedulerLock);
1123 					if ((thread->AllPendingSignals()
1124 							& (CONTINUE_SIGNALS | KILL_SIGNALS)) == 0) {
1125 						thread->next_state = B_THREAD_SUSPENDED;
1126 						scheduler_reschedule();
1127 					}
1128 					schedulerLocker.Unlock();
1129 
1130 					continue;
1131 				}
1132 
1133 				case SIGSEGV:
1134 				case SIGBUS:
1135 				case SIGFPE:
1136 				case SIGILL:
1137 				case SIGTRAP:
1138 				case SIGABRT:
1139 				case SIGKILL:
1140 				case SIGQUIT:
1141 				case SIGPOLL:
1142 				case SIGPROF:
1143 				case SIGSYS:
1144 				case SIGVTALRM:
1145 				case SIGXCPU:
1146 				case SIGXFSZ:
1147 				default:
1148 					TRACE(("Shutting down team %" B_PRId32 " due to signal %"
1149 						B_PRIu32 " received in thread %" B_PRIu32 " \n",
1150 						team->id, signal->Number(), thread->id));
1151 
1152 					// This signal kills the team regardless which thread
1153 					// received it.
1154 					killTeam = true;
1155 
1156 					// fall through
1157 				case SIGKILLTHR:
1158 					// notify the debugger
1159 					if (debugSignal && signal->Number() != SIGKILL
1160 						&& signal->Number() != SIGKILLTHR
1161 						&& !notify_debugger(thread, signal, handler, true)) {
1162 						continue;
1163 					}
1164 
1165 					if (killTeam || thread == team->main_thread) {
1166 						// The signal is terminal for the team or the thread is
1167 						// the main thread. In either case the team is going
1168 						// down. Set its exit status, if that didn't happen yet.
1169 						teamLocker.Lock();
1170 
1171 						if (!team->exit.initialized) {
1172 							team->exit.reason = CLD_KILLED;
1173 							team->exit.signal = signal->Number();
1174 							team->exit.signaling_user = signal->SendingUser();
1175 							team->exit.status = 0;
1176 							team->exit.initialized = true;
1177 						}
1178 
1179 						teamLocker.Unlock();
1180 
1181 						// If this is not the main thread, send it a SIGKILLTHR
1182 						// so that the team terminates.
1183 						if (thread != team->main_thread) {
1184 							Signal childSignal(SIGKILLTHR, SI_USER, B_OK,
1185 								team->id);
1186 							send_signal_to_thread_id(team->id, childSignal, 0);
1187 						}
1188 					}
1189 
1190 					// explicitly get rid of the signal reference, since
1191 					// thread_exit() won't return
1192 					signalHandledCaller.Done();
1193 
1194 					thread_exit();
1195 						// won't return
1196 			}
1197 		}
1198 
1199 		// User defined signal handler
1200 
1201 		// notify the debugger
1202 		if (debugSignal && !notify_debugger(thread, signal, handler, false))
1203 			continue;
1204 
1205 		if (!restart
1206 				|| (!alwaysRestart && (handler.sa_flags & SA_RESTART) == 0)) {
1207 			atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
1208 		}
1209 
1210 		T(ExecuteSignalHandler(signal->Number(), &handler));
1211 
1212 		TRACE(("### Setting up custom signal handler frame...\n"));
1213 
1214 		// save the old block mask -- we may need to adjust it for the handler
1215 		schedulerLocker.Lock();
1216 
1217 		sigset_t oldBlockMask = thread->sigsuspend_original_unblocked_mask != 0
1218 			? ~thread->sigsuspend_original_unblocked_mask
1219 			: thread->sig_block_mask;
1220 
1221 		// Update the block mask while the signal handler is running -- it
1222 		// will be automatically restored when the signal frame is left.
1223 		thread->sig_block_mask |= handler.sa_mask & BLOCKABLE_SIGNALS;
1224 
1225 		if ((handler.sa_flags & SA_NOMASK) == 0) {
1226 			thread->sig_block_mask
1227 				|= SIGNAL_TO_MASK(signal->Number()) & BLOCKABLE_SIGNALS;
1228 		}
1229 
1230 		update_current_thread_signals_flag();
1231 
1232 		schedulerLocker.Unlock();
1233 
1234 		setup_signal_frame(thread, &handler, signal, oldBlockMask);
1235 
1236 		// Reset sigsuspend_original_unblocked_mask. It would have been set by
1237 		// sigsuspend_internal(). In that case, above we set oldBlockMask
1238 		// accordingly so that after the handler returns the thread's signal
1239 		// mask is reset.
1240 		thread->sigsuspend_original_unblocked_mask = 0;
1241 
1242 		return;
1243 	}
1244 
1245 	// We have not handled any signal (respectively only ignored ones).
1246 
1247 	// If sigsuspend_original_unblocked_mask is non-null, we came from a
1248 	// sigsuspend_internal(). Not having handled any signal, we should restart
1249 	// the syscall.
1250 	if (thread->sigsuspend_original_unblocked_mask != 0) {
1251 		restart = true;
1252 		atomic_or(&thread->flags, THREAD_FLAGS_RESTART_SYSCALL);
1253 	} else if (!restart) {
1254 		// clear syscall restart thread flag, if we're not supposed to restart
1255 		// the syscall
1256 		atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
1257 	}
1258 }
1259 
1260 
1261 /*!	Checks whether the given signal is blocked for the given team (i.e. all of
1262 	its threads).
1263 	The caller must hold the team's lock and the scheduler lock.
1264 */
1265 bool
1266 is_team_signal_blocked(Team* team, int signal)
1267 {
1268 	sigset_t mask = SIGNAL_TO_MASK(signal);
1269 
1270 	for (Thread* thread = team->thread_list; thread != NULL;
1271 			thread = thread->team_next) {
1272 		if ((thread->sig_block_mask & mask) == 0)
1273 			return false;
1274 	}
1275 
1276 	return true;
1277 }
1278 
1279 
1280 /*!	Gets (guesses) the current thread's currently used stack from the given
1281 	stack pointer.
1282 	Fills in \a stack with either the signal stack or the thread's user stack.
1283 	\param address A stack pointer address to be used to determine the used
1284 		stack.
1285 	\param stack Filled in by the function.
1286 */
1287 void
1288 signal_get_user_stack(addr_t address, stack_t* stack)
1289 {
1290 	// If a signal stack is enabled for the stack and the address is within it,
1291 	// return the signal stack. In all other cases return the thread's user
1292 	// stack, even if the address doesn't lie within it.
1293 	Thread* thread = thread_get_current_thread();
1294 	if (thread->signal_stack_enabled && address >= thread->signal_stack_base
1295 		&& address < thread->signal_stack_base + thread->signal_stack_size) {
1296 		stack->ss_sp = (void*)thread->signal_stack_base;
1297 		stack->ss_size = thread->signal_stack_size;
1298 	} else {
1299 		stack->ss_sp = (void*)thread->user_stack_base;
1300 		stack->ss_size = thread->user_stack_size;
1301 	}
1302 
1303 	stack->ss_flags = 0;
1304 }
1305 
1306 
1307 /*!	Checks whether any non-blocked signal is pending for the current thread.
1308 	The caller must hold the scheduler lock.
1309 	\param thread The current thread.
1310 */
1311 static bool
1312 has_signals_pending(Thread* thread)
1313 {
1314 	return (thread->AllPendingSignals() & ~thread->sig_block_mask) != 0;
1315 }
1316 
1317 
1318 /*!	Checks whether the current user has permission to send a signal to the given
1319 	target team.
1320 
1321 	The caller must hold the scheduler lock or \a team's lock.
1322 
1323 	\param team The target team.
1324 	\param schedulerLocked \c true, if the caller holds the scheduler lock,
1325 		\c false otherwise.
1326 */
1327 static bool
1328 has_permission_to_signal(Team* team, bool schedulerLocked)
1329 {
1330 	// get the current user
1331 	uid_t currentUser = schedulerLocked
1332 		? thread_get_current_thread()->team->effective_uid
1333 		: geteuid();
1334 
1335 	// root is omnipotent -- in the other cases the current user must match the
1336 	// target team's
1337 	return currentUser == 0 || currentUser == team->effective_uid;
1338 }
1339 
1340 
1341 /*!	Delivers a signal to the \a thread, but doesn't handle the signal -- it just
1342 	makes sure the thread gets the signal, i.e. unblocks it if needed.
1343 
1344 	The caller must hold the scheduler lock.
1345 
1346 	\param thread The thread the signal shall be delivered to.
1347 	\param signalNumber The number of the signal to be delivered. If \c 0, no
1348 		actual signal will be delivered. Only delivery checks will be performed.
1349 	\param signal If non-NULL the signal to be queued (has number
1350 		\a signalNumber in this case). The caller transfers an object reference
1351 		to this function. If \c NULL an unqueued signal will be delivered to the
1352 		thread.
1353 	\param flags A bitwise combination of any number of the following:
1354 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1355 			target thread the signal.
1356 	\return \c B_OK, when the signal was delivered successfully, another error
1357 		code otherwise.
1358 */
1359 status_t
1360 send_signal_to_thread_locked(Thread* thread, uint32 signalNumber,
1361 	Signal* signal, uint32 flags)
1362 {
1363 	ASSERT(signal == NULL || signalNumber == signal->Number());
1364 
1365 	T(SendSignal(thread->id, signalNumber, flags));
1366 
1367 	// The caller transferred a reference to the signal to us.
1368 	BReference<Signal> signalReference(signal, true);
1369 
1370 	if ((flags & B_CHECK_PERMISSION) != 0) {
1371 		if (!has_permission_to_signal(thread->team, true))
1372 			return EPERM;
1373 	}
1374 
1375 	if (signalNumber == 0)
1376 		return B_OK;
1377 
1378 	if (thread->team == team_get_kernel_team()) {
1379 		// Signals to kernel threads will only wake them up
1380 		if (thread->state == B_THREAD_SUSPENDED)
1381 			scheduler_enqueue_in_run_queue(thread);
1382 		return B_OK;
1383 	}
1384 
1385 	if (signal != NULL)
1386 		thread->AddPendingSignal(signal);
1387 	else
1388 		thread->AddPendingSignal(signalNumber);
1389 
1390 	// the thread has the signal reference, now
1391 	signalReference.Detach();
1392 
1393 	switch (signalNumber) {
1394 		case SIGKILL:
1395 		{
1396 			// If sent to a thread other than the team's main thread, also send
1397 			// a SIGKILLTHR to the main thread to kill the team.
1398 			Thread* mainThread = thread->team->main_thread;
1399 			if (mainThread != NULL && mainThread != thread) {
1400 				mainThread->AddPendingSignal(SIGKILLTHR);
1401 
1402 				// wake up main thread
1403 				if (mainThread->state == B_THREAD_SUSPENDED)
1404 					scheduler_enqueue_in_run_queue(mainThread);
1405 				else
1406 					thread_interrupt(mainThread, true);
1407 
1408 				update_thread_signals_flag(mainThread);
1409 			}
1410 
1411 			// supposed to fall through
1412 		}
1413 		case SIGKILLTHR:
1414 			// Wake up suspended threads and interrupt waiting ones
1415 			if (thread->state == B_THREAD_SUSPENDED)
1416 				scheduler_enqueue_in_run_queue(thread);
1417 			else
1418 				thread_interrupt(thread, true);
1419 			break;
1420 
1421 		case SIGNAL_CONTINUE_THREAD:
1422 			// wake up thread, and interrupt its current syscall
1423 			if (thread->state == B_THREAD_SUSPENDED)
1424 				scheduler_enqueue_in_run_queue(thread);
1425 
1426 			atomic_or(&thread->flags, THREAD_FLAGS_DONT_RESTART_SYSCALL);
1427 			break;
1428 
1429 		case SIGCONT:
1430 			// Wake up thread if it was suspended, otherwise interrupt it, if
1431 			// the signal isn't blocked.
1432 			if (thread->state == B_THREAD_SUSPENDED)
1433 				scheduler_enqueue_in_run_queue(thread);
1434 			else if ((SIGNAL_TO_MASK(SIGCONT) & ~thread->sig_block_mask) != 0)
1435 				thread_interrupt(thread, false);
1436 
1437 			// remove any pending stop signals
1438 			thread->RemovePendingSignals(STOP_SIGNALS);
1439 			break;
1440 
1441 		default:
1442 			// If the signal is not masked, interrupt the thread, if it is
1443 			// currently waiting (interruptibly).
1444 			if ((thread->AllPendingSignals()
1445 						& (~thread->sig_block_mask | SIGNAL_TO_MASK(SIGCHLD)))
1446 					!= 0) {
1447 				// Interrupt thread if it was waiting
1448 				thread_interrupt(thread, false);
1449 			}
1450 			break;
1451 	}
1452 
1453 	update_thread_signals_flag(thread);
1454 
1455 	return B_OK;
1456 }
1457 
1458 
1459 /*!	Sends the given signal to the given thread.
1460 
1461 	The caller must not hold the scheduler lock.
1462 
1463 	\param thread The thread the signal shall be sent to.
1464 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1465 		actual signal will be delivered. Only delivery checks will be performed.
1466 		The given object will be copied. The caller retains ownership.
1467 	\param flags A bitwise combination of any number of the following:
1468 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1469 			target thread the signal.
1470 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1471 			woken up, the scheduler will be invoked. If set that will not be
1472 			done explicitly, but rescheduling can still happen, e.g. when the
1473 			current thread's time slice runs out.
1474 	\return \c B_OK, when the signal was delivered successfully, another error
1475 		code otherwise.
1476 */
1477 status_t
1478 send_signal_to_thread(Thread* thread, const Signal& signal, uint32 flags)
1479 {
1480 	// Clone the signal -- the clone will be queued. If something fails and the
1481 	// caller doesn't require queuing, we will add an unqueued signal.
1482 	Signal* signalToQueue = NULL;
1483 	status_t error = Signal::CreateQueuable(signal,
1484 		(flags & SIGNAL_FLAG_QUEUING_REQUIRED) != 0, signalToQueue);
1485 	if (error != B_OK)
1486 		return error;
1487 
1488 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
1489 
1490 	error = send_signal_to_thread_locked(thread, signal.Number(), signalToQueue,
1491 		flags);
1492 	if (error != B_OK)
1493 		return error;
1494 
1495 	if ((flags & B_DO_NOT_RESCHEDULE) == 0)
1496 		scheduler_reschedule_if_necessary_locked();
1497 
1498 	return B_OK;
1499 }
1500 
1501 
1502 /*!	Sends the given signal to the thread with the given ID.
1503 
1504 	The caller must not hold the scheduler lock.
1505 
1506 	\param threadID The ID of the thread the signal shall be sent to.
1507 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1508 		actual signal will be delivered. Only delivery checks will be performed.
1509 		The given object will be copied. The caller retains ownership.
1510 	\param flags A bitwise combination of any number of the following:
1511 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1512 			target thread the signal.
1513 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1514 			woken up, the scheduler will be invoked. If set that will not be
1515 			done explicitly, but rescheduling can still happen, e.g. when the
1516 			current thread's time slice runs out.
1517 	\return \c B_OK, when the signal was delivered successfully, another error
1518 		code otherwise.
1519 */
1520 status_t
1521 send_signal_to_thread_id(thread_id threadID, const Signal& signal, uint32 flags)
1522 {
1523 	Thread* thread = Thread::Get(threadID);
1524 	if (thread == NULL)
1525 		return B_BAD_THREAD_ID;
1526 	BReference<Thread> threadReference(thread, true);
1527 
1528 	return send_signal_to_thread(thread, signal, flags);
1529 }
1530 
1531 
1532 /*!	Sends the given signal to the given team.
1533 
1534 	The caller must hold the scheduler lock.
1535 
1536 	\param team The team the signal shall be sent to.
1537 	\param signalNumber The number of the signal to be delivered. If \c 0, no
1538 		actual signal will be delivered. Only delivery checks will be performed.
1539 	\param signal If non-NULL the signal to be queued (has number
1540 		\a signalNumber in this case). The caller transfers an object reference
1541 		to this function. If \c NULL an unqueued signal will be delivered to the
1542 		thread.
1543 	\param flags A bitwise combination of any number of the following:
1544 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1545 			target thread the signal.
1546 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1547 			woken up, the scheduler will be invoked. If set that will not be
1548 			done explicitly, but rescheduling can still happen, e.g. when the
1549 			current thread's time slice runs out.
1550 	\return \c B_OK, when the signal was delivered successfully, another error
1551 		code otherwise.
1552 */
1553 status_t
1554 send_signal_to_team_locked(Team* team, uint32 signalNumber, Signal* signal,
1555 	uint32 flags)
1556 {
1557 	ASSERT(signal == NULL || signalNumber == signal->Number());
1558 
1559 	T(SendSignal(team->id, signalNumber, flags));
1560 
1561 	// The caller transferred a reference to the signal to us.
1562 	BReference<Signal> signalReference(signal, true);
1563 
1564 	if ((flags & B_CHECK_PERMISSION) != 0) {
1565 		if (!has_permission_to_signal(team, true))
1566 			return EPERM;
1567 	}
1568 
1569 	if (signalNumber == 0)
1570 		return B_OK;
1571 
1572 	if (team == team_get_kernel_team()) {
1573 		// signals to the kernel team are not allowed
1574 		return EPERM;
1575 	}
1576 
1577 	if (signal != NULL)
1578 		team->AddPendingSignal(signal);
1579 	else
1580 		team->AddPendingSignal(signalNumber);
1581 
1582 	// the team has the signal reference, now
1583 	signalReference.Detach();
1584 
1585 	switch (signalNumber) {
1586 		case SIGKILL:
1587 		case SIGKILLTHR:
1588 		{
1589 			// Also add a SIGKILLTHR to the main thread's signals and wake it
1590 			// up/interrupt it, so we get this over with as soon as possible
1591 			// (only the main thread shuts down the team).
1592 			Thread* mainThread = team->main_thread;
1593 			if (mainThread != NULL) {
1594 				mainThread->AddPendingSignal(SIGKILLTHR);
1595 
1596 				// wake up main thread
1597 				if (mainThread->state == B_THREAD_SUSPENDED)
1598 					scheduler_enqueue_in_run_queue(mainThread);
1599 				else
1600 					thread_interrupt(mainThread, true);
1601 			}
1602 			break;
1603 		}
1604 
1605 		case SIGCONT:
1606 			// Wake up any suspended threads, interrupt the others, if they
1607 			// don't block the signal.
1608 			for (Thread* thread = team->thread_list; thread != NULL;
1609 					thread = thread->team_next) {
1610 				if (thread->state == B_THREAD_SUSPENDED) {
1611 					scheduler_enqueue_in_run_queue(thread);
1612 				} else if ((SIGNAL_TO_MASK(SIGCONT) & ~thread->sig_block_mask)
1613 						!= 0) {
1614 					thread_interrupt(thread, false);
1615 				}
1616 
1617 				// remove any pending stop signals
1618 				thread->RemovePendingSignals(STOP_SIGNALS);
1619 			}
1620 
1621 			// remove any pending team stop signals
1622 			team->RemovePendingSignals(STOP_SIGNALS);
1623 			break;
1624 
1625 		case SIGSTOP:
1626 		case SIGTSTP:
1627 		case SIGTTIN:
1628 		case SIGTTOU:
1629 			// send the stop signal to all threads
1630 			// TODO: Is that correct or should we only target the main thread?
1631 			for (Thread* thread = team->thread_list; thread != NULL;
1632 					thread = thread->team_next) {
1633 				thread->AddPendingSignal(signalNumber);
1634 			}
1635 
1636 			// remove the stop signal from the team again
1637 			if (signal != NULL) {
1638 				team->RemovePendingSignal(signal);
1639 				signalReference.SetTo(signal, true);
1640 			} else
1641 				team->RemovePendingSignal(signalNumber);
1642 
1643 			// fall through to interrupt threads
1644 		default:
1645 			// Interrupt all interruptibly waiting threads, if the signal is
1646 			// not masked.
1647 			for (Thread* thread = team->thread_list; thread != NULL;
1648 					thread = thread->team_next) {
1649 				sigset_t nonBlocked = ~thread->sig_block_mask
1650 					| SIGNAL_TO_MASK(SIGCHLD);
1651 				if ((thread->AllPendingSignals() & nonBlocked) != 0)
1652 					thread_interrupt(thread, false);
1653 			}
1654 			break;
1655 	}
1656 
1657 	update_team_threads_signal_flag(team);
1658 
1659 	if ((flags & B_DO_NOT_RESCHEDULE) == 0)
1660 		scheduler_reschedule_if_necessary_locked();
1661 
1662 	return B_OK;
1663 }
1664 
1665 
1666 /*!	Sends the given signal to the given team.
1667 
1668 	\param team The team the signal shall be sent to.
1669 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1670 		actual signal will be delivered. Only delivery checks will be performed.
1671 		The given object will be copied. The caller retains ownership.
1672 	\param flags A bitwise combination of any number of the following:
1673 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1674 			target thread the signal.
1675 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1676 			woken up, the scheduler will be invoked. If set that will not be
1677 			done explicitly, but rescheduling can still happen, e.g. when the
1678 			current thread's time slice runs out.
1679 	\return \c B_OK, when the signal was delivered successfully, another error
1680 		code otherwise.
1681 */
1682 status_t
1683 send_signal_to_team(Team* team, const Signal& signal, uint32 flags)
1684 {
1685 	// Clone the signal -- the clone will be queued. If something fails and the
1686 	// caller doesn't require queuing, we will add an unqueued signal.
1687 	Signal* signalToQueue = NULL;
1688 	status_t error = Signal::CreateQueuable(signal,
1689 		(flags & SIGNAL_FLAG_QUEUING_REQUIRED) != 0, signalToQueue);
1690 	if (error != B_OK)
1691 		return error;
1692 
1693 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
1694 
1695 	return send_signal_to_team_locked(team, signal.Number(), signalToQueue,
1696 		flags);
1697 }
1698 
1699 
1700 /*!	Sends the given signal to the team with the given ID.
1701 
1702 	\param teamID The ID of the team the signal shall be sent to.
1703 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1704 		actual signal will be delivered. Only delivery checks will be performed.
1705 		The given object will be copied. The caller retains ownership.
1706 	\param flags A bitwise combination of any number of the following:
1707 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1708 			target thread the signal.
1709 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1710 			woken up, the scheduler will be invoked. If set that will not be
1711 			done explicitly, but rescheduling can still happen, e.g. when the
1712 			current thread's time slice runs out.
1713 	\return \c B_OK, when the signal was delivered successfully, another error
1714 		code otherwise.
1715 */
1716 status_t
1717 send_signal_to_team_id(team_id teamID, const Signal& signal, uint32 flags)
1718 {
1719 	// get the team
1720 	Team* team = Team::Get(teamID);
1721 	if (team == NULL)
1722 		return B_BAD_TEAM_ID;
1723 	BReference<Team> teamReference(team, true);
1724 
1725 	return send_signal_to_team(team, signal, flags);
1726 }
1727 
1728 
1729 /*!	Sends the given signal to the given process group.
1730 
1731 	The caller must hold the process group's lock. Interrupts must be enabled.
1732 
1733 	\param group The the process group the signal shall be sent to.
1734 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1735 		actual signal will be delivered. Only delivery checks will be performed.
1736 		The given object will be copied. The caller retains ownership.
1737 	\param flags A bitwise combination of any number of the following:
1738 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1739 			target thread the signal.
1740 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1741 			woken up, the scheduler will be invoked. If set that will not be
1742 			done explicitly, but rescheduling can still happen, e.g. when the
1743 			current thread's time slice runs out.
1744 	\return \c B_OK, when the signal was delivered successfully, another error
1745 		code otherwise.
1746 */
1747 status_t
1748 send_signal_to_process_group_locked(ProcessGroup* group, const Signal& signal,
1749 	uint32 flags)
1750 {
1751 	T(SendSignal(-group->id, signal.Number(), flags));
1752 
1753 	bool firstTeam = true;
1754 
1755 	for (Team* team = group->teams; team != NULL; team = team->group_next) {
1756 		status_t error = send_signal_to_team(team, signal,
1757 			flags | B_DO_NOT_RESCHEDULE);
1758 		// If sending to the first team in the group failed, let the whole call
1759 		// fail.
1760 		if (firstTeam) {
1761 			if (error != B_OK)
1762 				return error;
1763 			firstTeam = false;
1764 		}
1765 	}
1766 
1767 	if ((flags & B_DO_NOT_RESCHEDULE) == 0)
1768 		scheduler_reschedule_if_necessary();
1769 
1770 	return B_OK;
1771 }
1772 
1773 
1774 /*!	Sends the given signal to the process group specified by the given ID.
1775 
1776 	The caller must not hold any process group, team, or thread lock. Interrupts
1777 	must be enabled.
1778 
1779 	\param groupID The ID of the process group the signal shall be sent to.
1780 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1781 		actual signal will be delivered. Only delivery checks will be performed.
1782 		The given object will be copied. The caller retains ownership.
1783 	\param flags A bitwise combination of any number of the following:
1784 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1785 			target thread the signal.
1786 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1787 			woken up, the scheduler will be invoked. If set that will not be
1788 			done explicitly, but rescheduling can still happen, e.g. when the
1789 			current thread's time slice runs out.
1790 	\return \c B_OK, when the signal was delivered successfully, another error
1791 		code otherwise.
1792 */
1793 status_t
1794 send_signal_to_process_group(pid_t groupID, const Signal& signal, uint32 flags)
1795 {
1796 	ProcessGroup* group = ProcessGroup::Get(groupID);
1797 	if (group == NULL)
1798 		return B_BAD_TEAM_ID;
1799 	BReference<ProcessGroup> groupReference(group);
1800 
1801 	T(SendSignal(-group->id, signal.Number(), flags));
1802 
1803 	AutoLocker<ProcessGroup> groupLocker(group);
1804 
1805 	status_t error = send_signal_to_process_group_locked(group, signal,
1806 		flags | B_DO_NOT_RESCHEDULE);
1807 	if (error != B_OK)
1808 		return error;
1809 
1810 	groupLocker.Unlock();
1811 
1812 	if ((flags & B_DO_NOT_RESCHEDULE) == 0)
1813 		scheduler_reschedule_if_necessary();
1814 
1815 	return B_OK;
1816 }
1817 
1818 
1819 static status_t
1820 send_signal_internal(pid_t id, uint signalNumber, union sigval userValue,
1821 	uint32 flags)
1822 {
1823 	if (signalNumber > MAX_SIGNAL_NUMBER)
1824 		return B_BAD_VALUE;
1825 
1826 	Thread* thread = thread_get_current_thread();
1827 
1828 	Signal signal(signalNumber,
1829 		(flags & SIGNAL_FLAG_QUEUING_REQUIRED) != 0 ? SI_QUEUE : SI_USER,
1830 		B_OK, thread->team->id);
1831 		// Note: SI_USER/SI_QUEUE is not correct, if called from within the
1832 		// kernel (or a driver), but we don't have any info here.
1833 	signal.SetUserValue(userValue);
1834 
1835 	// If id is > 0, send the signal to the respective thread.
1836 	if (id > 0)
1837 		return send_signal_to_thread_id(id, signal, flags);
1838 
1839 	// If id == 0, send the signal to the current thread.
1840 	if (id == 0)
1841 		return send_signal_to_thread(thread, signal, flags);
1842 
1843 	// If id == -1, send the signal to all teams the calling team has permission
1844 	// to send signals to.
1845 	if (id == -1) {
1846 		// TODO: Implement correctly!
1847 		// currently only send to the current team
1848 		return send_signal_to_team_id(thread->team->id, signal, flags);
1849 	}
1850 
1851 	// Send a signal to the specified process group (the absolute value of the
1852 	// id).
1853 	return send_signal_to_process_group(-id, signal, flags);
1854 }
1855 
1856 
1857 int
1858 send_signal_etc(pid_t id, uint signalNumber, uint32 flags)
1859 {
1860 	// a dummy user value
1861 	union sigval userValue;
1862 	userValue.sival_ptr = NULL;
1863 
1864 	return send_signal_internal(id, signalNumber, userValue, flags);
1865 }
1866 
1867 
1868 int
1869 send_signal(pid_t threadID, uint signal)
1870 {
1871 	// The BeBook states that this function wouldn't be exported
1872 	// for drivers, but, of course, it's wrong.
1873 	return send_signal_etc(threadID, signal, 0);
1874 }
1875 
1876 
1877 static int
1878 sigprocmask_internal(int how, const sigset_t* set, sigset_t* oldSet)
1879 {
1880 	Thread* thread = thread_get_current_thread();
1881 
1882 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
1883 
1884 	sigset_t oldMask = thread->sig_block_mask;
1885 
1886 	if (set != NULL) {
1887 		T(SigProcMask(how, *set));
1888 
1889 		switch (how) {
1890 			case SIG_BLOCK:
1891 				thread->sig_block_mask |= *set & BLOCKABLE_SIGNALS;
1892 				break;
1893 			case SIG_UNBLOCK:
1894 				thread->sig_block_mask &= ~*set;
1895 				break;
1896 			case SIG_SETMASK:
1897 				thread->sig_block_mask = *set & BLOCKABLE_SIGNALS;
1898 				break;
1899 			default:
1900 				return B_BAD_VALUE;
1901 		}
1902 
1903 		update_current_thread_signals_flag();
1904 	}
1905 
1906 	if (oldSet != NULL)
1907 		*oldSet = oldMask;
1908 
1909 	return B_OK;
1910 }
1911 
1912 
1913 int
1914 sigprocmask(int how, const sigset_t* set, sigset_t* oldSet)
1915 {
1916 	RETURN_AND_SET_ERRNO(sigprocmask_internal(how, set, oldSet));
1917 }
1918 
1919 
1920 /*!	\brief Like sigaction(), but returning the error instead of setting errno.
1921 */
1922 static status_t
1923 sigaction_internal(int signal, const struct sigaction* act,
1924 	struct sigaction* oldAction)
1925 {
1926 	if (signal < 1 || signal > MAX_SIGNAL_NUMBER
1927 		|| (SIGNAL_TO_MASK(signal) & ~BLOCKABLE_SIGNALS) != 0)
1928 		return B_BAD_VALUE;
1929 
1930 	// get and lock the team
1931 	Team* team = thread_get_current_thread()->team;
1932 	TeamLocker teamLocker(team);
1933 
1934 	struct sigaction& teamHandler = team->SignalActionFor(signal);
1935 	if (oldAction) {
1936 		// save previous sigaction structure
1937 		*oldAction = teamHandler;
1938 	}
1939 
1940 	if (act) {
1941 		T(SigAction(signal, act));
1942 
1943 		// set new sigaction structure
1944 		teamHandler = *act;
1945 		teamHandler.sa_mask &= BLOCKABLE_SIGNALS;
1946 	}
1947 
1948 	// Remove pending signal if it should now be ignored and remove pending
1949 	// signal for those signals whose default action is to ignore them.
1950 	if ((act && act->sa_handler == SIG_IGN)
1951 		|| (act && act->sa_handler == SIG_DFL
1952 			&& (SIGNAL_TO_MASK(signal) & DEFAULT_IGNORE_SIGNALS) != 0)) {
1953 		InterruptsSpinLocker schedulerLocker(gSchedulerLock);
1954 
1955 		team->RemovePendingSignal(signal);
1956 
1957 		for (Thread* thread = team->thread_list; thread != NULL;
1958 				thread = thread->team_next) {
1959 			thread->RemovePendingSignal(signal);
1960 		}
1961 	}
1962 
1963 	return B_OK;
1964 }
1965 
1966 
1967 int
1968 sigaction(int signal, const struct sigaction* act, struct sigaction* oldAction)
1969 {
1970 	RETURN_AND_SET_ERRNO(sigaction_internal(signal, act, oldAction));
1971 }
1972 
1973 
1974 /*!	Wait for the specified signals, and return the information for the retrieved
1975 	signal in \a info.
1976 	The \c flags and \c timeout combination must either define an infinite
1977 	timeout (no timeout flags set), an absolute timeout (\c B_ABSOLUTE_TIMEOUT
1978 	set), or a relative timeout \code <= 0 \endcode (\c B_RELATIVE_TIMEOUT set).
1979 */
1980 static status_t
1981 sigwait_internal(const sigset_t* set, siginfo_t* info, uint32 flags,
1982 	bigtime_t timeout)
1983 {
1984 	// restrict mask to blockable signals
1985 	sigset_t requestedSignals = *set & BLOCKABLE_SIGNALS;
1986 
1987 	// make always interruptable
1988 	flags |= B_CAN_INTERRUPT;
1989 
1990 	// check whether we are allowed to wait at all
1991 	bool canWait = (flags & B_RELATIVE_TIMEOUT) == 0 || timeout > 0;
1992 
1993 	Thread* thread = thread_get_current_thread();
1994 
1995 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
1996 
1997 	bool timedOut = false;
1998 	status_t error = B_OK;
1999 
2000 	while (!timedOut) {
2001 		sigset_t pendingSignals = thread->AllPendingSignals();
2002 
2003 		// If a kill signal is pending, just bail out.
2004 		if ((pendingSignals & KILL_SIGNALS) != 0)
2005 			return B_INTERRUPTED;
2006 
2007 		if ((pendingSignals & requestedSignals) != 0) {
2008 			// get signal with the highest priority
2009 			Signal stackSignal;
2010 			Signal* signal = dequeue_thread_or_team_signal(thread,
2011 				requestedSignals, stackSignal);
2012 			ASSERT(signal != NULL);
2013 
2014 			SignalHandledCaller signalHandledCaller(signal);
2015 			schedulerLocker.Unlock();
2016 
2017 			info->si_signo = signal->Number();
2018 			info->si_code = signal->SignalCode();
2019 			info->si_errno = signal->ErrorCode();
2020 			info->si_pid = signal->SendingProcess();
2021 			info->si_uid = signal->SendingUser();
2022 			info->si_addr = signal->Address();
2023 			info->si_status = signal->Status();
2024 			info->si_band = signal->PollBand();
2025 			info->si_value = signal->UserValue();
2026 
2027 			return B_OK;
2028 		}
2029 
2030 		if (!canWait)
2031 			return B_WOULD_BLOCK;
2032 
2033 		sigset_t blockedSignals = thread->sig_block_mask;
2034 		if ((pendingSignals & ~blockedSignals) != 0) {
2035 			// Non-blocked signals are pending -- return to let them be handled.
2036 			return B_INTERRUPTED;
2037 		}
2038 
2039 		// No signals yet. Set the signal block mask to not include the
2040 		// requested mask and wait until we're interrupted.
2041 		thread->sig_block_mask = blockedSignals & ~requestedSignals;
2042 
2043 		while (!has_signals_pending(thread)) {
2044 			thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_SIGNAL,
2045 				NULL);
2046 
2047 			if ((flags & B_ABSOLUTE_TIMEOUT) != 0) {
2048 				error = thread_block_with_timeout_locked(flags, timeout);
2049 				if (error == B_WOULD_BLOCK || error == B_TIMED_OUT) {
2050 					error = B_WOULD_BLOCK;
2051 						// POSIX requires EAGAIN (B_WOULD_BLOCK) on timeout
2052 					timedOut = true;
2053 					break;
2054 				}
2055 			} else
2056 				thread_block_locked(thread);
2057 		}
2058 
2059 		// restore the original block mask
2060 		thread->sig_block_mask = blockedSignals;
2061 
2062 		update_current_thread_signals_flag();
2063 	}
2064 
2065 	// we get here only when timed out
2066 	return error;
2067 }
2068 
2069 
2070 /*!	Replace the current signal block mask and wait for any event to happen.
2071 	Before returning, the original signal block mask is reinstantiated.
2072 */
2073 static status_t
2074 sigsuspend_internal(const sigset_t* _mask)
2075 {
2076 	sigset_t mask = *_mask & BLOCKABLE_SIGNALS;
2077 
2078 	T(SigSuspend(mask));
2079 
2080 	Thread* thread = thread_get_current_thread();
2081 
2082 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
2083 
2084 	// Set the new block mask and block until interrupted. We might be here
2085 	// after a syscall restart, in which case sigsuspend_original_unblocked_mask
2086 	// will still be set.
2087 	sigset_t oldMask = thread->sigsuspend_original_unblocked_mask != 0
2088 		? ~thread->sigsuspend_original_unblocked_mask : thread->sig_block_mask;
2089 	thread->sig_block_mask = mask & BLOCKABLE_SIGNALS;
2090 
2091 	update_current_thread_signals_flag();
2092 
2093 	while (!has_signals_pending(thread)) {
2094 		thread_prepare_to_block(thread, B_CAN_INTERRUPT,
2095 			THREAD_BLOCK_TYPE_SIGNAL, NULL);
2096 		thread_block_locked(thread);
2097 	}
2098 
2099 	// Set sigsuspend_original_unblocked_mask (guaranteed to be non-0 due to
2100 	// BLOCKABLE_SIGNALS). This will indicate to handle_signals() that it is
2101 	// called after a _user_sigsuspend(). It will reset the field after invoking
2102 	// a signal handler, or restart the syscall, if there wasn't anything to
2103 	// handle anymore (e.g. because another thread was faster).
2104 	thread->sigsuspend_original_unblocked_mask = ~oldMask;
2105 
2106 	T(SigSuspendDone());
2107 
2108 	// we're not supposed to actually succeed
2109 	return B_INTERRUPTED;
2110 }
2111 
2112 
2113 static status_t
2114 sigpending_internal(sigset_t* set)
2115 {
2116 	Thread* thread = thread_get_current_thread();
2117 
2118 	if (set == NULL)
2119 		return B_BAD_VALUE;
2120 
2121 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
2122 
2123 	*set = thread->AllPendingSignals() & thread->sig_block_mask;
2124 
2125 	return B_OK;
2126 }
2127 
2128 
2129 // #pragma mark - syscalls
2130 
2131 
2132 /*!	Sends a signal to a thread, process, or process group.
2133 	\param id Specifies the ID of the target:
2134 		- \code id > 0 \endcode: If \a toThread is \c true, the target is the
2135 			thread with ID \a id, otherwise the team with the ID \a id.
2136 		- \code id == 0 \endcode: If toThread is \c true, the target is the
2137 			current thread, otherwise the current team.
2138 		- \code id == -1 \endcode: The target are all teams the current team has
2139 			permission to send signals to. Currently not implemented correctly.
2140 		- \code id < -1 \endcode: The target are is the process group with ID
2141 			\c -id.
2142 	\param signalNumber The signal number. \c 0 to just perform checks, but not
2143 		actually send any signal.
2144 	\param userUserValue A user value to be associated with the signal. Might be
2145 		ignored unless signal queuing is forced. Can be \c NULL.
2146 	\param flags A bitwise or of any number of the following:
2147 		- \c SIGNAL_FLAG_QUEUING_REQUIRED: Signal queuing is required. Fail
2148 			instead of falling back to unqueued signals, when queuing isn't
2149 			possible.
2150 		- \c SIGNAL_FLAG_SEND_TO_THREAD: Interpret the the given ID as a
2151 			\c thread_id rather than a \c team_id. Ignored when the \a id is
2152 			\code < 0 \endcode -- then the target is a process group.
2153 	\return \c B_OK on success, another error code otherwise.
2154 */
2155 status_t
2156 _user_send_signal(int32 id, uint32 signalNumber,
2157 	const union sigval* userUserValue, uint32 flags)
2158 {
2159 	// restrict flags to the allowed ones and add B_CHECK_PERMISSION
2160 	flags &= SIGNAL_FLAG_QUEUING_REQUIRED | SIGNAL_FLAG_SEND_TO_THREAD;
2161 	flags |= B_CHECK_PERMISSION;
2162 
2163 	// Copy the user value from userland. If not given, use a dummy value.
2164 	union sigval userValue;
2165 	if (userUserValue != NULL) {
2166 		if (!IS_USER_ADDRESS(userUserValue)
2167 			|| user_memcpy(&userValue, userUserValue, sizeof(userValue))
2168 				!= B_OK) {
2169 			return B_BAD_ADDRESS;
2170 		}
2171 	} else
2172 		userValue.sival_ptr = NULL;
2173 
2174 	// If to be sent to a thread, delegate to send_signal_internal(). Also do
2175 	// that when id < 0, since in this case the semantics is the same as well.
2176 	if ((flags & SIGNAL_FLAG_SEND_TO_THREAD) != 0 || id < 0)
2177 		return send_signal_internal(id, signalNumber, userValue, flags);
2178 
2179 	// kill() semantics for id >= 0
2180 	if (signalNumber > MAX_SIGNAL_NUMBER)
2181 		return B_BAD_VALUE;
2182 
2183 	Thread* thread = thread_get_current_thread();
2184 
2185 	Signal signal(signalNumber,
2186 		(flags & SIGNAL_FLAG_QUEUING_REQUIRED) != 0 ? SI_QUEUE : SI_USER,
2187 		B_OK, thread->team->id);
2188 	signal.SetUserValue(userValue);
2189 
2190 	// send to current team for id == 0, otherwise to the respective team
2191 	return send_signal_to_team_id(id == 0 ? team_get_current_team_id() : id,
2192 		signal, flags);
2193 }
2194 
2195 
2196 status_t
2197 _user_set_signal_mask(int how, const sigset_t *userSet, sigset_t *userOldSet)
2198 {
2199 	sigset_t set, oldSet;
2200 	status_t status;
2201 
2202 	if ((userSet != NULL && user_memcpy(&set, userSet, sizeof(sigset_t)) < B_OK)
2203 		|| (userOldSet != NULL && user_memcpy(&oldSet, userOldSet,
2204 				sizeof(sigset_t)) < B_OK))
2205 		return B_BAD_ADDRESS;
2206 
2207 	status = sigprocmask_internal(how, userSet ? &set : NULL,
2208 		userOldSet ? &oldSet : NULL);
2209 
2210 	// copy old set if asked for
2211 	if (status >= B_OK && userOldSet != NULL
2212 		&& user_memcpy(userOldSet, &oldSet, sizeof(sigset_t)) < B_OK)
2213 		return B_BAD_ADDRESS;
2214 
2215 	return status;
2216 }
2217 
2218 
2219 status_t
2220 _user_sigaction(int signal, const struct sigaction *userAction,
2221 	struct sigaction *userOldAction)
2222 {
2223 	struct sigaction act, oact;
2224 	status_t status;
2225 
2226 	if ((userAction != NULL && user_memcpy(&act, userAction,
2227 				sizeof(struct sigaction)) < B_OK)
2228 		|| (userOldAction != NULL && user_memcpy(&oact, userOldAction,
2229 				sizeof(struct sigaction)) < B_OK))
2230 		return B_BAD_ADDRESS;
2231 
2232 	status = sigaction_internal(signal, userAction ? &act : NULL,
2233 		userOldAction ? &oact : NULL);
2234 
2235 	// only copy the old action if a pointer has been given
2236 	if (status >= B_OK && userOldAction != NULL
2237 		&& user_memcpy(userOldAction, &oact, sizeof(struct sigaction)) < B_OK)
2238 		return B_BAD_ADDRESS;
2239 
2240 	return status;
2241 }
2242 
2243 
2244 status_t
2245 _user_sigwait(const sigset_t *userSet, siginfo_t *userInfo, uint32 flags,
2246 	bigtime_t timeout)
2247 {
2248 	// copy userSet to stack
2249 	sigset_t set;
2250 	if (userSet == NULL || !IS_USER_ADDRESS(userSet)
2251 		|| user_memcpy(&set, userSet, sizeof(sigset_t)) != B_OK) {
2252 		return B_BAD_ADDRESS;
2253 	}
2254 
2255 	// userInfo is optional, but must be a user address when given
2256 	if (userInfo != NULL && !IS_USER_ADDRESS(userInfo))
2257 		return B_BAD_ADDRESS;
2258 
2259 	syscall_restart_handle_timeout_pre(flags, timeout);
2260 
2261 	flags |= B_CAN_INTERRUPT;
2262 
2263 	siginfo_t info;
2264 	status_t status = sigwait_internal(&set, &info, flags, timeout);
2265 	if (status == B_OK) {
2266 		// copy the info back to userland, if userSet is non-NULL
2267 		if (userInfo != NULL)
2268 			status = user_memcpy(userInfo, &info, sizeof(info));
2269 	} else if (status == B_INTERRUPTED) {
2270 		// make sure we'll be restarted
2271 		Thread* thread = thread_get_current_thread();
2272 		atomic_or(&thread->flags, THREAD_FLAGS_ALWAYS_RESTART_SYSCALL);
2273 	}
2274 
2275 	return syscall_restart_handle_timeout_post(status, timeout);
2276 }
2277 
2278 
2279 status_t
2280 _user_sigsuspend(const sigset_t *userMask)
2281 {
2282 	sigset_t mask;
2283 
2284 	if (userMask == NULL)
2285 		return B_BAD_VALUE;
2286 	if (user_memcpy(&mask, userMask, sizeof(sigset_t)) < B_OK)
2287 		return B_BAD_ADDRESS;
2288 
2289 	return sigsuspend_internal(&mask);
2290 }
2291 
2292 
2293 status_t
2294 _user_sigpending(sigset_t *userSet)
2295 {
2296 	sigset_t set;
2297 	int status;
2298 
2299 	if (userSet == NULL)
2300 		return B_BAD_VALUE;
2301 	if (!IS_USER_ADDRESS(userSet))
2302 		return B_BAD_ADDRESS;
2303 
2304 	status = sigpending_internal(&set);
2305 	if (status == B_OK
2306 		&& user_memcpy(userSet, &set, sizeof(sigset_t)) < B_OK)
2307 		return B_BAD_ADDRESS;
2308 
2309 	return status;
2310 }
2311 
2312 
2313 status_t
2314 _user_set_signal_stack(const stack_t* newUserStack, stack_t* oldUserStack)
2315 {
2316 	Thread *thread = thread_get_current_thread();
2317 	struct stack_t newStack, oldStack;
2318 	bool onStack = false;
2319 
2320 	if ((newUserStack != NULL && user_memcpy(&newStack, newUserStack,
2321 				sizeof(stack_t)) < B_OK)
2322 		|| (oldUserStack != NULL && user_memcpy(&oldStack, oldUserStack,
2323 				sizeof(stack_t)) < B_OK))
2324 		return B_BAD_ADDRESS;
2325 
2326 	if (thread->signal_stack_enabled) {
2327 		// determine whether or not the user thread is currently
2328 		// on the active signal stack
2329 		onStack = arch_on_signal_stack(thread);
2330 	}
2331 
2332 	if (oldUserStack != NULL) {
2333 		oldStack.ss_sp = (void *)thread->signal_stack_base;
2334 		oldStack.ss_size = thread->signal_stack_size;
2335 		oldStack.ss_flags = (thread->signal_stack_enabled ? 0 : SS_DISABLE)
2336 			| (onStack ? SS_ONSTACK : 0);
2337 	}
2338 
2339 	if (newUserStack != NULL) {
2340 		// no flags other than SS_DISABLE are allowed
2341 		if ((newStack.ss_flags & ~SS_DISABLE) != 0)
2342 			return B_BAD_VALUE;
2343 
2344 		if ((newStack.ss_flags & SS_DISABLE) == 0) {
2345 			// check if the size is valid
2346 			if (newStack.ss_size < MINSIGSTKSZ)
2347 				return B_NO_MEMORY;
2348 			if (onStack)
2349 				return B_NOT_ALLOWED;
2350 			if (!IS_USER_ADDRESS(newStack.ss_sp))
2351 				return B_BAD_VALUE;
2352 
2353 			thread->signal_stack_base = (addr_t)newStack.ss_sp;
2354 			thread->signal_stack_size = newStack.ss_size;
2355 			thread->signal_stack_enabled = true;
2356 		} else
2357 			thread->signal_stack_enabled = false;
2358 	}
2359 
2360 	// only copy the old stack info if a pointer has been given
2361 	if (oldUserStack != NULL
2362 		&& user_memcpy(oldUserStack, &oldStack, sizeof(stack_t)) < B_OK)
2363 		return B_BAD_ADDRESS;
2364 
2365 	return B_OK;
2366 }
2367 
2368 
2369 /*!	Restores the environment of a function that was interrupted by a signal
2370 	handler call.
2371 	This syscall is invoked when a signal handler function returns. It
2372 	deconstructs the signal handler frame and restores the stack and register
2373 	state of the function that was interrupted by a signal. The syscall is
2374 	therefore somewhat unusual, since it does not return to the calling
2375 	function, but to someplace else. In case the signal interrupted a syscall,
2376 	it will appear as if the syscall just returned. That is also the reason, why
2377 	this syscall returns an int64, since it needs to return the value the
2378 	interrupted syscall returns, which is potentially 64 bits wide.
2379 
2380 	\param userSignalFrameData The signal frame data created for the signal
2381 		handler. Potentially some data (e.g. registers) have been modified by
2382 		the signal handler.
2383 	\return In case the signal interrupted a syscall, the return value of that
2384 		syscall. Otherwise (i.e. in case of a (hardware) interrupt/exception)
2385 		the value might need to be tailored such that after a return to userland
2386 		the interrupted environment is identical to the interrupted one (unless
2387 		explicitly modified). E.g. for x86 to achieve that, the return value
2388 		must contain the eax|edx values of the interrupted environment.
2389 */
2390 int64
2391 _user_restore_signal_frame(struct signal_frame_data* userSignalFrameData)
2392 {
2393 	syscall_64_bit_return_value();
2394 
2395 	Thread *thread = thread_get_current_thread();
2396 
2397 	// copy the signal frame data from userland
2398 	signal_frame_data signalFrameData;
2399 	if (userSignalFrameData == NULL || !IS_USER_ADDRESS(userSignalFrameData)
2400 		|| user_memcpy(&signalFrameData, userSignalFrameData,
2401 			sizeof(signalFrameData)) != B_OK) {
2402 		// We failed to copy the signal frame data from userland. This is a
2403 		// serious problem. Kill the thread.
2404 		dprintf("_user_restore_signal_frame(): thread %" B_PRId32 ": Failed to "
2405 			"copy signal frame data (%p) from userland. Killing thread...\n",
2406 			thread->id, userSignalFrameData);
2407 		kill_thread(thread->id);
2408 		return B_BAD_ADDRESS;
2409 	}
2410 
2411 	// restore the signal block mask
2412 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
2413 
2414 	thread->sig_block_mask
2415 		= signalFrameData.context.uc_sigmask & BLOCKABLE_SIGNALS;
2416 	update_current_thread_signals_flag();
2417 
2418 	schedulerLocker.Unlock();
2419 
2420 	// restore the syscall restart related thread flags and the syscall restart
2421 	// parameters
2422 	atomic_and(&thread->flags,
2423 		~(THREAD_FLAGS_RESTART_SYSCALL | THREAD_FLAGS_64_BIT_SYSCALL_RETURN));
2424 	atomic_or(&thread->flags, signalFrameData.thread_flags
2425 		& (THREAD_FLAGS_RESTART_SYSCALL | THREAD_FLAGS_64_BIT_SYSCALL_RETURN));
2426 
2427 	memcpy(thread->syscall_restart.parameters,
2428 		signalFrameData.syscall_restart_parameters,
2429 		sizeof(thread->syscall_restart.parameters));
2430 
2431 	// restore the previously stored Thread::user_signal_context
2432 	thread->user_signal_context = signalFrameData.context.uc_link;
2433 	if (thread->user_signal_context != NULL
2434 		&& !IS_USER_ADDRESS(thread->user_signal_context)) {
2435 		thread->user_signal_context = NULL;
2436 	}
2437 
2438 	// let the architecture specific code restore the registers
2439 	return arch_restore_signal_frame(&signalFrameData);
2440 }
2441