xref: /haiku/src/system/kernel/signal.cpp (revision 899e0ef82b5624ace2ccfa5f5a58c8ebee54aaef)
1 /*
2  * Copyright 2018, Jérôme Duval, jerome.duval@gmail.com.
3  * Copyright 2014, Paweł Dziepak, pdziepak@quarnos.org.
4  * Copyright 2011-2016, Ingo Weinhold, ingo_weinhold@gmx.de.
5  * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
6  * Copyright 2002, Angelo Mottola, a.mottola@libero.it.
7  *
8  * Distributed under the terms of the MIT License.
9  */
10 
11 
12 /*! POSIX signals handling routines */
13 
14 
15 #include <ksignal.h>
16 
17 #include <errno.h>
18 #include <stddef.h>
19 #include <string.h>
20 
21 #include <OS.h>
22 #include <KernelExport.h>
23 
24 #include <cpu.h>
25 #include <core_dump.h>
26 #include <debug.h>
27 #include <kernel.h>
28 #include <kscheduler.h>
29 #include <sem.h>
30 #include <syscall_restart.h>
31 #include <syscall_utils.h>
32 #include <team.h>
33 #include <thread.h>
34 #include <tracing.h>
35 #include <user_debugger.h>
36 #include <user_thread.h>
37 #include <util/AutoLock.h>
38 #include <util/ThreadAutoLock.h>
39 
40 
41 //#define TRACE_SIGNAL
42 #ifdef TRACE_SIGNAL
43 #	define TRACE(x) dprintf x
44 #else
45 #	define TRACE(x) ;
46 #endif
47 
48 
49 #define BLOCKABLE_SIGNALS	\
50 	(~(KILL_SIGNALS | SIGNAL_TO_MASK(SIGSTOP)	\
51 	| SIGNAL_TO_MASK(SIGNAL_DEBUG_THREAD)	\
52 	| SIGNAL_TO_MASK(SIGNAL_CONTINUE_THREAD)	\
53 	| SIGNAL_TO_MASK(SIGNAL_CANCEL_THREAD)))
54 #define STOP_SIGNALS \
55 	(SIGNAL_TO_MASK(SIGSTOP) | SIGNAL_TO_MASK(SIGTSTP) \
56 	| SIGNAL_TO_MASK(SIGTTIN) | SIGNAL_TO_MASK(SIGTTOU))
57 #define CONTINUE_SIGNALS \
58 	(SIGNAL_TO_MASK(SIGCONT) | SIGNAL_TO_MASK(SIGNAL_CONTINUE_THREAD) \
59 	| SIGNAL_TO_MASK(SIGNAL_DEBUG_THREAD))
60 #define DEFAULT_IGNORE_SIGNALS \
61 	(SIGNAL_TO_MASK(SIGCHLD) | SIGNAL_TO_MASK(SIGWINCH) \
62 	| SIGNAL_TO_MASK(SIGCONT) \
63 	| SIGNAL_RANGE_TO_MASK(SIGNAL_REALTIME_MIN, SIGNAL_REALTIME_MAX))
64 #define NON_DEFERRABLE_SIGNALS	\
65 	(KILL_SIGNALS				\
66 	| SIGNAL_TO_MASK(SIGILL)	\
67 	| SIGNAL_TO_MASK(SIGFPE)	\
68 	| SIGNAL_TO_MASK(SIGSEGV))
69 
70 
71 static const struct {
72 	const char*	name;
73 	int32		priority;
74 } kSignalInfos[__MAX_SIGNO + 1] = {
75 	{"NONE",			-1},
76 	{"HUP",				0},
77 	{"INT",				0},
78 	{"QUIT",			0},
79 	{"ILL",				0},
80 	{"CHLD",			0},
81 	{"ABRT",			0},
82 	{"PIPE",			0},
83 	{"FPE",				0},
84 	{"KILL",			100},
85 	{"STOP",			0},
86 	{"SEGV",			0},
87 	{"CONT",			0},
88 	{"TSTP",			0},
89 	{"ALRM",			0},
90 	{"TERM",			0},
91 	{"TTIN",			0},
92 	{"TTOU",			0},
93 	{"USR1",			0},
94 	{"USR2",			0},
95 	{"WINCH",			0},
96 	{"KILLTHR",			100},
97 	{"TRAP",			0},
98 	{"POLL",			0},
99 	{"PROF",			0},
100 	{"SYS",				0},
101 	{"URG",				0},
102 	{"VTALRM",			0},
103 	{"XCPU",			0},
104 	{"XFSZ",			0},
105 	{"SIGBUS",			0},
106 	{"SIGRESERVED1",	0},
107 	{"SIGRESERVED2",	0},
108 	{"SIGRT1",			8},
109 	{"SIGRT2",			7},
110 	{"SIGRT3",			6},
111 	{"SIGRT4",			5},
112 	{"SIGRT5",			4},
113 	{"SIGRT6",			3},
114 	{"SIGRT7",			2},
115 	{"SIGRT8",			1},
116 	{"invalid 41",		0},
117 	{"invalid 42",		0},
118 	{"invalid 43",		0},
119 	{"invalid 44",		0},
120 	{"invalid 45",		0},
121 	{"invalid 46",		0},
122 	{"invalid 47",		0},
123 	{"invalid 48",		0},
124 	{"invalid 49",		0},
125 	{"invalid 50",		0},
126 	{"invalid 51",		0},
127 	{"invalid 52",		0},
128 	{"invalid 53",		0},
129 	{"invalid 54",		0},
130 	{"invalid 55",		0},
131 	{"invalid 56",		0},
132 	{"invalid 57",		0},
133 	{"invalid 58",		0},
134 	{"invalid 59",		0},
135 	{"invalid 60",		0},
136 	{"invalid 61",		0},
137 	{"invalid 62",		0},
138 	{"CANCEL_THREAD",	0},
139 	{"CONTINUE_THREAD",	0}	// priority must be <= that of SIGSTOP
140 };
141 
142 
143 static inline const char*
144 signal_name(uint32 number)
145 {
146 	return number <= __MAX_SIGNO ? kSignalInfos[number].name : "invalid";
147 }
148 
149 
150 // #pragma mark - SignalHandledCaller
151 
152 
153 struct SignalHandledCaller {
154 	SignalHandledCaller(Signal* signal)
155 		:
156 		fSignal(signal)
157 	{
158 	}
159 
160 	~SignalHandledCaller()
161 	{
162 		Done();
163 	}
164 
165 	void Done()
166 	{
167 		if (fSignal != NULL) {
168 			fSignal->Handled();
169 			fSignal = NULL;
170 		}
171 	}
172 
173 private:
174 	Signal*	fSignal;
175 };
176 
177 
178 // #pragma mark - QueuedSignalsCounter
179 
180 
181 /*!	Creates a counter with the given limit.
182 	The limit defines the maximum the counter may reach. Since the
183 	BReferenceable's reference count is used, it is assumed that the owning
184 	team holds a reference and the reference count is one greater than the
185 	counter value.
186 	\param limit The maximum allowed value the counter may have. When
187 		\code < 0 \endcode, the value is not limited.
188 */
189 QueuedSignalsCounter::QueuedSignalsCounter(int32 limit)
190 	:
191 	fLimit(limit)
192 {
193 }
194 
195 
196 /*!	Increments the counter, if the limit allows that.
197 	\return \c true, if incrementing the counter succeeded, \c false otherwise.
198 */
199 bool
200 QueuedSignalsCounter::Increment()
201 {
202 	// no limit => no problem
203 	if (fLimit < 0) {
204 		AcquireReference();
205 		return true;
206 	}
207 
208 	// Increment the reference count manually, so we can check atomically. We
209 	// compare the old value > fLimit, assuming that our (primary) owner has a
210 	// reference, we don't want to count.
211 	if (atomic_add(&fReferenceCount, 1) > fLimit) {
212 		ReleaseReference();
213 		return false;
214 	}
215 
216 	return true;
217 }
218 
219 
220 // #pragma mark - Signal
221 
222 
223 Signal::Signal()
224 	:
225 	fCounter(NULL),
226 	fPending(false)
227 {
228 }
229 
230 
231 Signal::Signal(const Signal& other)
232 	:
233 	fCounter(NULL),
234 	fNumber(other.fNumber),
235 	fSignalCode(other.fSignalCode),
236 	fErrorCode(other.fErrorCode),
237 	fSendingProcess(other.fSendingProcess),
238 	fSendingUser(other.fSendingUser),
239 	fStatus(other.fStatus),
240 	fPollBand(other.fPollBand),
241 	fAddress(other.fAddress),
242 	fUserValue(other.fUserValue),
243 	fPending(false)
244 {
245 }
246 
247 
248 Signal::Signal(uint32 number, int32 signalCode, int32 errorCode,
249 	pid_t sendingProcess)
250 	:
251 	fCounter(NULL),
252 	fNumber(number),
253 	fSignalCode(signalCode),
254 	fErrorCode(errorCode),
255 	fSendingProcess(sendingProcess),
256 	fSendingUser(getuid()),
257 	fStatus(0),
258 	fPollBand(0),
259 	fAddress(NULL),
260 	fPending(false)
261 {
262 	fUserValue.sival_ptr = NULL;
263 }
264 
265 
266 Signal::~Signal()
267 {
268 	if (fCounter != NULL)
269 		fCounter->ReleaseReference();
270 }
271 
272 
273 /*!	Creates a queuable clone of the given signal.
274 	Also enforces the current team's signal queuing limit.
275 
276 	\param signal The signal to clone.
277 	\param queuingRequired If \c true, the function will return an error code
278 		when creating the clone fails for any reason. Otherwise, the function
279 		will set \a _signalToQueue to \c NULL, but still return \c B_OK.
280 	\param _signalToQueue Return parameter. Set to the clone of the signal.
281 	\return When \c queuingRequired is \c false, always \c B_OK. Otherwise
282 		\c B_OK, when creating the signal clone succeeds, another error code,
283 		when it fails.
284 */
285 /*static*/ status_t
286 Signal::CreateQueuable(const Signal& signal, bool queuingRequired,
287 	Signal*& _signalToQueue)
288 {
289 	_signalToQueue = NULL;
290 
291 	// If interrupts are disabled, we can't allocate a signal.
292 	if (!are_interrupts_enabled())
293 		return queuingRequired ? B_BAD_VALUE : B_OK;
294 
295 	// increment the queued signals counter
296 	QueuedSignalsCounter* counter
297 		= thread_get_current_thread()->team->QueuedSignalsCounter();
298 	if (!counter->Increment())
299 		return queuingRequired ? EAGAIN : B_OK;
300 
301 	// allocate the signal
302 	Signal* signalToQueue = new(std::nothrow) Signal(signal);
303 	if (signalToQueue == NULL) {
304 		counter->Decrement();
305 		return queuingRequired ? B_NO_MEMORY : B_OK;
306 	}
307 
308 	signalToQueue->fCounter = counter;
309 
310 	_signalToQueue = signalToQueue;
311 	return B_OK;
312 }
313 
314 void
315 Signal::SetTo(uint32 number)
316 {
317 	Team* team = thread_get_current_thread()->team;
318 
319 	fNumber = number;
320 	fSignalCode = SI_USER;
321 	fErrorCode = 0;
322 	fSendingProcess = team->id;
323 	fSendingUser = team->effective_uid;
324 	fStatus = 0;
325 	fPollBand = 0;
326 	fAddress = NULL;
327 	fUserValue.sival_ptr = NULL;
328 }
329 
330 
331 int32
332 Signal::Priority() const
333 {
334 	return kSignalInfos[fNumber].priority;
335 }
336 
337 
338 void
339 Signal::Handled()
340 {
341 	ReleaseReference();
342 }
343 
344 
345 void
346 Signal::LastReferenceReleased()
347 {
348 	if (are_interrupts_enabled())
349 		delete this;
350 	else
351 		deferred_delete(this);
352 }
353 
354 
355 // #pragma mark - PendingSignals
356 
357 
358 PendingSignals::PendingSignals()
359 	:
360 	fQueuedSignalsMask(0),
361 	fUnqueuedSignalsMask(0)
362 {
363 }
364 
365 
366 PendingSignals::~PendingSignals()
367 {
368 	Clear();
369 }
370 
371 
372 /*!	Of the signals in \a nonBlocked returns the priority of that with the
373 	highest priority.
374 	\param nonBlocked The mask with the non-blocked signals.
375 	\return The priority of the highest priority non-blocked signal, or, if all
376 		signals are blocked, \c -1.
377 */
378 int32
379 PendingSignals::HighestSignalPriority(sigset_t nonBlocked) const
380 {
381 	Signal* queuedSignal;
382 	int32 unqueuedSignal;
383 	return _GetHighestPrioritySignal(nonBlocked, queuedSignal, unqueuedSignal);
384 }
385 
386 
387 void
388 PendingSignals::Clear()
389 {
390 	// release references of all queued signals
391 	while (Signal* signal = fQueuedSignals.RemoveHead())
392 		signal->Handled();
393 
394 	fQueuedSignalsMask = 0;
395 	fUnqueuedSignalsMask = 0;
396 }
397 
398 
399 /*!	Adds a signal.
400 	Takes over the reference to the signal from the caller.
401 */
402 void
403 PendingSignals::AddSignal(Signal* signal)
404 {
405 	// queue according to priority
406 	int32 priority = signal->Priority();
407 	Signal* otherSignal = NULL;
408 	for (SignalList::Iterator it = fQueuedSignals.GetIterator();
409 			(otherSignal = it.Next()) != NULL;) {
410 		if (priority > otherSignal->Priority())
411 			break;
412 	}
413 
414 	fQueuedSignals.InsertBefore(otherSignal, signal);
415 	signal->SetPending(true);
416 
417 	fQueuedSignalsMask |= SIGNAL_TO_MASK(signal->Number());
418 }
419 
420 
421 void
422 PendingSignals::RemoveSignal(Signal* signal)
423 {
424 	signal->SetPending(false);
425 	fQueuedSignals.Remove(signal);
426 	_UpdateQueuedSignalMask();
427 }
428 
429 
430 void
431 PendingSignals::RemoveSignals(sigset_t mask)
432 {
433 	// remove from queued signals
434 	if ((fQueuedSignalsMask & mask) != 0) {
435 		for (SignalList::Iterator it = fQueuedSignals.GetIterator();
436 				Signal* signal = it.Next();) {
437 			// remove signal, if in mask
438 			if ((SIGNAL_TO_MASK(signal->Number()) & mask) != 0) {
439 				it.Remove();
440 				signal->SetPending(false);
441 				signal->Handled();
442 			}
443 		}
444 
445 		fQueuedSignalsMask &= ~mask;
446 	}
447 
448 	// remove from unqueued signals
449 	fUnqueuedSignalsMask &= ~mask;
450 }
451 
452 
453 /*!	Removes and returns a signal in \a nonBlocked that has the highest priority.
454 	The caller gets a reference to the returned signal, if any.
455 	\param nonBlocked The mask of non-blocked signals.
456 	\param buffer If the signal is not queued this buffer is returned. In this
457 		case the method acquires a reference to \a buffer, so that the caller
458 		gets a reference also in this case.
459 	\return The removed signal or \c NULL, if all signals are blocked.
460 */
461 Signal*
462 PendingSignals::DequeueSignal(sigset_t nonBlocked, Signal& buffer)
463 {
464 	// find the signal with the highest priority
465 	Signal* queuedSignal;
466 	int32 unqueuedSignal;
467 	if (_GetHighestPrioritySignal(nonBlocked, queuedSignal, unqueuedSignal) < 0)
468 		return NULL;
469 
470 	// if it is a queued signal, dequeue it
471 	if (queuedSignal != NULL) {
472 		fQueuedSignals.Remove(queuedSignal);
473 		queuedSignal->SetPending(false);
474 		_UpdateQueuedSignalMask();
475 		return queuedSignal;
476 	}
477 
478 	// it is unqueued -- remove from mask
479 	fUnqueuedSignalsMask &= ~SIGNAL_TO_MASK(unqueuedSignal);
480 
481 	// init buffer
482 	buffer.SetTo(unqueuedSignal);
483 	buffer.AcquireReference();
484 	return &buffer;
485 }
486 
487 
488 /*!	Of the signals not it \a blocked returns the priority of that with the
489 	highest priority.
490 	\param blocked The mask with the non-blocked signals.
491 	\param _queuedSignal If the found signal is a queued signal, the variable
492 		will be set to that signal, otherwise to \c NULL.
493 	\param _unqueuedSignal If the found signal is an unqueued signal, the
494 		variable is set to that signal's number, otherwise to \c -1.
495 	\return The priority of the highest priority non-blocked signal, or, if all
496 		signals are blocked, \c -1.
497 */
498 int32
499 PendingSignals::_GetHighestPrioritySignal(sigset_t nonBlocked,
500 	Signal*& _queuedSignal, int32& _unqueuedSignal) const
501 {
502 	// check queued signals
503 	Signal* queuedSignal = NULL;
504 	int32 queuedPriority = -1;
505 
506 	if ((fQueuedSignalsMask & nonBlocked) != 0) {
507 		for (SignalList::ConstIterator it = fQueuedSignals.GetIterator();
508 				Signal* signal = it.Next();) {
509 			if ((SIGNAL_TO_MASK(signal->Number()) & nonBlocked) != 0) {
510 				queuedPriority = signal->Priority();
511 				queuedSignal = signal;
512 				break;
513 			}
514 		}
515 	}
516 
517 	// check unqueued signals
518 	int32 unqueuedSignal = -1;
519 	int32 unqueuedPriority = -1;
520 
521 	sigset_t unqueuedSignals = fUnqueuedSignalsMask & nonBlocked;
522 	if (unqueuedSignals != 0) {
523 		int32 signal = 1;
524 		while (unqueuedSignals != 0) {
525 			sigset_t mask = SIGNAL_TO_MASK(signal);
526 			if ((unqueuedSignals & mask) != 0) {
527 				int32 priority = kSignalInfos[signal].priority;
528 				if (priority > unqueuedPriority) {
529 					unqueuedSignal = signal;
530 					unqueuedPriority = priority;
531 				}
532 				unqueuedSignals &= ~mask;
533 			}
534 
535 			signal++;
536 		}
537 	}
538 
539 	// Return found queued or unqueued signal, whichever has the higher
540 	// priority.
541 	if (queuedPriority >= unqueuedPriority) {
542 		_queuedSignal = queuedSignal;
543 		_unqueuedSignal = -1;
544 		return queuedPriority;
545 	}
546 
547 	_queuedSignal = NULL;
548 	_unqueuedSignal = unqueuedSignal;
549 	return unqueuedPriority;
550 }
551 
552 
553 void
554 PendingSignals::_UpdateQueuedSignalMask()
555 {
556 	sigset_t mask = 0;
557 	for (SignalList::Iterator it = fQueuedSignals.GetIterator();
558 			Signal* signal = it.Next();) {
559 		mask |= SIGNAL_TO_MASK(signal->Number());
560 	}
561 
562 	fQueuedSignalsMask = mask;
563 }
564 
565 
566 // #pragma mark - signal tracing
567 
568 
569 #if SIGNAL_TRACING
570 
571 namespace SignalTracing {
572 
573 
574 class HandleSignal : public AbstractTraceEntry {
575 	public:
576 		HandleSignal(uint32 signal)
577 			:
578 			fSignal(signal)
579 		{
580 			Initialized();
581 		}
582 
583 		virtual void AddDump(TraceOutput& out)
584 		{
585 			out.Print("signal handle:  %" B_PRIu32 " (%s)" , fSignal,
586 				signal_name(fSignal));
587 		}
588 
589 	private:
590 		uint32		fSignal;
591 };
592 
593 
594 class ExecuteSignalHandler : public AbstractTraceEntry {
595 	public:
596 		ExecuteSignalHandler(uint32 signal, struct sigaction* handler)
597 			:
598 			fSignal(signal),
599 			fHandler((void*)handler->sa_handler)
600 		{
601 			Initialized();
602 		}
603 
604 		virtual void AddDump(TraceOutput& out)
605 		{
606 			out.Print("signal exec handler: signal: %" B_PRIu32 " (%s), "
607 				"handler: %p", fSignal, signal_name(fSignal), fHandler);
608 		}
609 
610 	private:
611 		uint32	fSignal;
612 		void*	fHandler;
613 };
614 
615 
616 class SendSignal : public AbstractTraceEntry {
617 	public:
618 		SendSignal(pid_t target, uint32 signal, uint32 flags)
619 			:
620 			fTarget(target),
621 			fSignal(signal),
622 			fFlags(flags)
623 		{
624 			Initialized();
625 		}
626 
627 		virtual void AddDump(TraceOutput& out)
628 		{
629 			out.Print("signal send: target: %" B_PRId32 ", signal: %" B_PRIu32
630 				" (%s), flags: %#" B_PRIx32, fTarget, fSignal,
631 				signal_name(fSignal), fFlags);
632 		}
633 
634 	private:
635 		pid_t	fTarget;
636 		uint32	fSignal;
637 		uint32	fFlags;
638 };
639 
640 
641 class SigAction : public AbstractTraceEntry {
642 	public:
643 		SigAction(uint32 signal, const struct sigaction* act)
644 			:
645 			fSignal(signal),
646 			fAction(*act)
647 		{
648 			Initialized();
649 		}
650 
651 		virtual void AddDump(TraceOutput& out)
652 		{
653 			out.Print("signal action: signal: %" B_PRIu32 " (%s), "
654 				"action: {handler: %p, flags: %#x, mask: %#" B_PRIx64 "}",
655 				fSignal, signal_name(fSignal), fAction.sa_handler,
656 				fAction.sa_flags, (uint64)fAction.sa_mask);
657 		}
658 
659 	private:
660 		uint32				fSignal;
661 		struct sigaction	fAction;
662 };
663 
664 
665 class SigProcMask : public AbstractTraceEntry {
666 	public:
667 		SigProcMask(int how, sigset_t mask)
668 			:
669 			fHow(how),
670 			fMask(mask),
671 			fOldMask(thread_get_current_thread()->sig_block_mask)
672 		{
673 			Initialized();
674 		}
675 
676 		virtual void AddDump(TraceOutput& out)
677 		{
678 			const char* how = "invalid";
679 			switch (fHow) {
680 				case SIG_BLOCK:
681 					how = "block";
682 					break;
683 				case SIG_UNBLOCK:
684 					how = "unblock";
685 					break;
686 				case SIG_SETMASK:
687 					how = "set";
688 					break;
689 			}
690 
691 			out.Print("signal proc mask: %s 0x%llx, old mask: 0x%llx", how,
692 				(long long)fMask, (long long)fOldMask);
693 		}
694 
695 	private:
696 		int			fHow;
697 		sigset_t	fMask;
698 		sigset_t	fOldMask;
699 };
700 
701 
702 class SigSuspend : public AbstractTraceEntry {
703 	public:
704 		SigSuspend(sigset_t mask)
705 			:
706 			fMask(mask),
707 			fOldMask(thread_get_current_thread()->sig_block_mask)
708 		{
709 			Initialized();
710 		}
711 
712 		virtual void AddDump(TraceOutput& out)
713 		{
714 			out.Print("signal suspend: %#llx, old mask: %#llx",
715 				(long long)fMask, (long long)fOldMask);
716 		}
717 
718 	private:
719 		sigset_t	fMask;
720 		sigset_t	fOldMask;
721 };
722 
723 
724 class SigSuspendDone : public AbstractTraceEntry {
725 	public:
726 		SigSuspendDone()
727 			:
728 			fSignals(thread_get_current_thread()->ThreadPendingSignals())
729 		{
730 			Initialized();
731 		}
732 
733 		virtual void AddDump(TraceOutput& out)
734 		{
735 			out.Print("signal suspend done: %#" B_PRIx32, fSignals);
736 		}
737 
738 	private:
739 		uint32		fSignals;
740 };
741 
742 }	// namespace SignalTracing
743 
744 #	define T(x)	new(std::nothrow) SignalTracing::x
745 
746 #else
747 #	define T(x)
748 #endif	// SIGNAL_TRACING
749 
750 
751 // #pragma mark -
752 
753 
754 /*!	Updates the given thread's Thread::flags field according to what signals are
755 	pending.
756 	The caller must hold \c team->signal_lock.
757 */
758 static void
759 update_thread_signals_flag(Thread* thread)
760 {
761 	sigset_t mask = ~thread->sig_block_mask;
762 	if ((thread->AllPendingSignals() & mask) != 0)
763 		atomic_or(&thread->flags, THREAD_FLAGS_SIGNALS_PENDING);
764 	else
765 		atomic_and(&thread->flags, ~THREAD_FLAGS_SIGNALS_PENDING);
766 }
767 
768 
769 /*!	Updates the current thread's Thread::flags field according to what signals
770 	are pending.
771 	The caller must hold \c team->signal_lock.
772 */
773 static void
774 update_current_thread_signals_flag()
775 {
776 	update_thread_signals_flag(thread_get_current_thread());
777 }
778 
779 
780 /*!	Updates all of the given team's threads' Thread::flags fields according to
781 	what signals are pending.
782 	The caller must hold \c signal_lock.
783 */
784 static void
785 update_team_threads_signal_flag(Team* team)
786 {
787 	for (Thread* thread = team->thread_list; thread != NULL;
788 			thread = thread->team_next) {
789 		update_thread_signals_flag(thread);
790 	}
791 }
792 
793 
794 /*!	Notifies the user debugger about a signal to be handled.
795 
796 	The caller must not hold any locks.
797 
798 	\param thread The current thread.
799 	\param signal The signal to be handled.
800 	\param handler The installed signal handler for the signal.
801 	\param deadly Indicates whether the signal is deadly.
802 	\return \c true, if the signal shall be handled, \c false, if it shall be
803 		ignored.
804 */
805 static bool
806 notify_debugger(Thread* thread, Signal* signal, struct sigaction& handler,
807 	bool deadly)
808 {
809 	uint64 signalMask = SIGNAL_TO_MASK(signal->Number());
810 
811 	// first check the ignore signal masks the debugger specified for the thread
812 	InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock);
813 
814 	if ((thread->debug_info.ignore_signals_once & signalMask) != 0) {
815 		thread->debug_info.ignore_signals_once &= ~signalMask;
816 		return true;
817 	}
818 
819 	if ((thread->debug_info.ignore_signals & signalMask) != 0)
820 		return true;
821 
822 	threadDebugInfoLocker.Unlock();
823 
824 	siginfo_t info;
825 	info.si_signo = signal->Number();
826 	info.si_code = signal->SignalCode();
827 	info.si_errno = signal->ErrorCode();
828 	info.si_pid = signal->SendingProcess();
829 	info.si_uid = signal->SendingUser();
830 	info.si_addr = signal->Address();
831 	info.si_status = signal->Status();
832 	info.si_band = signal->PollBand();
833 	info.si_value = signal->UserValue();
834 
835 	// deliver the event
836 	return user_debug_handle_signal(signal->Number(), &handler, &info, deadly);
837 }
838 
839 
840 /*!	Removes and returns a signal with the highest priority in \a nonBlocked that
841 	is pending in the given thread or its team.
842 	After dequeuing the signal the Thread::flags field of the affected threads
843 	are updated.
844 	The caller gets a reference to the returned signal, if any.
845 	The caller must hold \c team->signal_lock.
846 	\param thread The thread.
847 	\param nonBlocked The mask of non-blocked signals.
848 	\param buffer If the signal is not queued this buffer is returned. In this
849 		case the method acquires a reference to \a buffer, so that the caller
850 		gets a reference also in this case.
851 	\return The removed signal or \c NULL, if all signals are blocked.
852 */
853 static Signal*
854 dequeue_thread_or_team_signal(Thread* thread, sigset_t nonBlocked,
855 	Signal& buffer)
856 {
857 	Team* team = thread->team;
858 	Signal* signal;
859 	if (team->HighestPendingSignalPriority(nonBlocked)
860 			> thread->HighestPendingSignalPriority(nonBlocked)) {
861 		signal = team->DequeuePendingSignal(nonBlocked, buffer);
862 		update_team_threads_signal_flag(team);
863 	} else {
864 		signal = thread->DequeuePendingSignal(nonBlocked, buffer);
865 		update_thread_signals_flag(thread);
866 	}
867 
868 	return signal;
869 }
870 
871 
872 static status_t
873 setup_signal_frame(Thread* thread, struct sigaction* action, Signal* signal,
874 	sigset_t signalMask)
875 {
876 	// prepare the data, we need to copy onto the user stack
877 	signal_frame_data frameData;
878 
879 	// signal info
880 	frameData.info.si_signo = signal->Number();
881 	frameData.info.si_code = signal->SignalCode();
882 	frameData.info.si_errno = signal->ErrorCode();
883 	frameData.info.si_pid = signal->SendingProcess();
884 	frameData.info.si_uid = signal->SendingUser();
885 	frameData.info.si_addr = signal->Address();
886 	frameData.info.si_status = signal->Status();
887 	frameData.info.si_band = signal->PollBand();
888 	frameData.info.si_value = signal->UserValue();
889 
890 	// context
891 	frameData.context.uc_link = thread->user_signal_context;
892 	frameData.context.uc_sigmask = signalMask;
893 	// uc_stack and uc_mcontext are filled in by the architecture specific code.
894 
895 	// user data
896 	frameData.user_data = action->sa_userdata;
897 
898 	// handler function
899 	frameData.siginfo_handler = (action->sa_flags & SA_SIGINFO) != 0;
900 	frameData.handler = frameData.siginfo_handler
901 		? (void*)action->sa_sigaction : (void*)action->sa_handler;
902 
903 	// thread flags -- save the and clear the thread's syscall restart related
904 	// flags
905 	frameData.thread_flags = atomic_and(&thread->flags,
906 		~(THREAD_FLAGS_RESTART_SYSCALL | THREAD_FLAGS_64_BIT_SYSCALL_RETURN));
907 
908 	// syscall restart related fields
909 	memcpy(frameData.syscall_restart_parameters,
910 		thread->syscall_restart.parameters,
911 		sizeof(frameData.syscall_restart_parameters));
912 
913 	// commpage address
914 	frameData.commpage_address = thread->team->commpage_address;
915 
916 	// syscall_restart_return_value is filled in by the architecture specific
917 	// code.
918 
919 	return arch_setup_signal_frame(thread, action, &frameData);
920 }
921 
922 
923 /*! Actually handles pending signals -- i.e. the thread will exit, a custom
924 	signal handler is prepared, or whatever the signal demands.
925 	The function will not return, when a deadly signal is encountered. The
926 	function will suspend the thread indefinitely, when a stop signal is
927 	encountered.
928 	Interrupts must be enabled.
929 	\param thread The current thread.
930 */
931 void
932 handle_signals(Thread* thread)
933 {
934 	Team* team = thread->team;
935 
936 	TeamLocker teamLocker(team);
937 	InterruptsSpinLocker locker(thread->team->signal_lock);
938 
939 	// If userland requested to defer signals, we check now, if this is
940 	// possible.
941 	sigset_t nonBlockedMask = ~thread->sig_block_mask;
942 	sigset_t signalMask = thread->AllPendingSignals() & nonBlockedMask;
943 
944 	set_ac();
945 	if (thread->user_thread->defer_signals > 0
946 		&& (signalMask & NON_DEFERRABLE_SIGNALS) == 0
947 		&& thread->sigsuspend_original_unblocked_mask == 0) {
948 		thread->user_thread->pending_signals = signalMask;
949 		clear_ac();
950 		return;
951 	}
952 
953 	thread->user_thread->pending_signals = 0;
954 	clear_ac();
955 
956 	// determine syscall restart behavior
957 	uint32 restartFlags = atomic_and(&thread->flags,
958 		~THREAD_FLAGS_DONT_RESTART_SYSCALL);
959 	bool alwaysRestart
960 		= (restartFlags & THREAD_FLAGS_ALWAYS_RESTART_SYSCALL) != 0;
961 	bool restart = alwaysRestart
962 		|| (restartFlags & THREAD_FLAGS_DONT_RESTART_SYSCALL) == 0;
963 
964 	// Loop until we've handled all signals.
965 	bool initialIteration = true;
966 	while (true) {
967 		if (initialIteration) {
968 			initialIteration = false;
969 		} else {
970 			teamLocker.Lock();
971 			locker.Lock();
972 
973 			signalMask = thread->AllPendingSignals() & nonBlockedMask;
974 		}
975 
976 		// Unless SIGKILL[THR] are pending, check, if the thread shall stop for
977 		// a core dump or for debugging.
978 		if ((signalMask & KILL_SIGNALS) == 0) {
979 			if ((atomic_get(&thread->flags) & THREAD_FLAGS_TRAP_FOR_CORE_DUMP)
980 					!= 0) {
981 				locker.Unlock();
982 				teamLocker.Unlock();
983 
984 				core_dump_trap_thread();
985 				continue;
986 			}
987 
988 			if ((atomic_get(&thread->debug_info.flags) & B_THREAD_DEBUG_STOP)
989 					!= 0) {
990 				locker.Unlock();
991 				teamLocker.Unlock();
992 
993 				user_debug_stop_thread();
994 				continue;
995 			}
996 		}
997 
998 		// We're done, if there aren't any pending signals anymore.
999 		if ((signalMask & nonBlockedMask) == 0)
1000 			break;
1001 
1002 		// get pending non-blocked thread or team signal with the highest
1003 		// priority
1004 		Signal stackSignal;
1005 		Signal* signal = dequeue_thread_or_team_signal(thread, nonBlockedMask,
1006 			stackSignal);
1007 		ASSERT(signal != NULL);
1008 		SignalHandledCaller signalHandledCaller(signal);
1009 
1010 		locker.Unlock();
1011 
1012 		// get the action for the signal
1013 		struct sigaction handler;
1014 		if (signal->Number() <= MAX_SIGNAL_NUMBER) {
1015 			handler = team->SignalActionFor(signal->Number());
1016 		} else {
1017 			handler.sa_handler = SIG_DFL;
1018 			handler.sa_flags = 0;
1019 		}
1020 
1021 		if ((handler.sa_flags & SA_ONESHOT) != 0
1022 			&& handler.sa_handler != SIG_IGN && handler.sa_handler != SIG_DFL) {
1023 			team->SignalActionFor(signal->Number()).sa_handler = SIG_DFL;
1024 		}
1025 
1026 		T(HandleSignal(signal->Number()));
1027 
1028 		teamLocker.Unlock();
1029 
1030 		// debug the signal, if a debugger is installed and the signal debugging
1031 		// flag is set
1032 		bool debugSignal = (~atomic_get(&team->debug_info.flags)
1033 				& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_SIGNALS))
1034 			== 0;
1035 
1036 		// handle the signal
1037 		TRACE(("Thread %" B_PRId32 " received signal %s\n", thread->id,
1038 			kSignalInfos[signal->Number()].name));
1039 
1040 		if (handler.sa_handler == SIG_IGN) {
1041 			// signal is to be ignored
1042 			// TODO: apply zombie cleaning on SIGCHLD
1043 
1044 			// notify the debugger
1045 			if (debugSignal)
1046 				notify_debugger(thread, signal, handler, false);
1047 			continue;
1048 		} else if (handler.sa_handler == SIG_DFL) {
1049 			// default signal behaviour
1050 
1051 			// realtime signals are ignored by default
1052 			if (signal->Number() >= SIGNAL_REALTIME_MIN
1053 				&& signal->Number() <= SIGNAL_REALTIME_MAX) {
1054 				// notify the debugger
1055 				if (debugSignal)
1056 					notify_debugger(thread, signal, handler, false);
1057 				continue;
1058 			}
1059 
1060 			bool killTeam = false;
1061 			switch (signal->Number()) {
1062 				case SIGCHLD:
1063 				case SIGWINCH:
1064 				case SIGURG:
1065 					// notify the debugger
1066 					if (debugSignal)
1067 						notify_debugger(thread, signal, handler, false);
1068 					continue;
1069 
1070 				case SIGNAL_DEBUG_THREAD:
1071 					// ignore -- used together with B_THREAD_DEBUG_STOP, which
1072 					// is handled above
1073 					continue;
1074 
1075 				case SIGNAL_CANCEL_THREAD:
1076 					// set up the signal handler
1077 					handler.sa_handler = thread->cancel_function;
1078 					handler.sa_flags = 0;
1079 					handler.sa_mask = 0;
1080 					handler.sa_userdata = NULL;
1081 
1082 					restart = false;
1083 						// we always want to interrupt
1084 					break;
1085 
1086 				case SIGNAL_CONTINUE_THREAD:
1087 					// prevent syscall restart, but otherwise ignore
1088 					restart = false;
1089 					atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
1090 					continue;
1091 
1092 				case SIGCONT:
1093 					// notify the debugger
1094 					if (debugSignal
1095 						&& !notify_debugger(thread, signal, handler, false))
1096 						continue;
1097 
1098 					// notify threads waiting for team state changes
1099 					if (thread == team->main_thread) {
1100 						team->LockTeamAndParent(false);
1101 
1102 						team_set_job_control_state(team,
1103 							JOB_CONTROL_STATE_CONTINUED, signal);
1104 
1105 						team->UnlockTeamAndParent();
1106 
1107 						// The standard states that the system *may* send a
1108 						// SIGCHLD when a child is continued. I haven't found
1109 						// a good reason why we would want to, though.
1110 					}
1111 					continue;
1112 
1113 				case SIGSTOP:
1114 				case SIGTSTP:
1115 				case SIGTTIN:
1116 				case SIGTTOU:
1117 				{
1118 					// notify the debugger
1119 					if (debugSignal
1120 						&& !notify_debugger(thread, signal, handler, false))
1121 						continue;
1122 
1123 					// The terminal-sent stop signals are allowed to stop the
1124 					// process only, if it doesn't belong to an orphaned process
1125 					// group. Otherwise the signal must be discarded.
1126 					team->LockProcessGroup();
1127 					AutoLocker<ProcessGroup> groupLocker(team->group, true);
1128 					if (signal->Number() != SIGSTOP
1129 						&& team->group->IsOrphaned()) {
1130 						continue;
1131 					}
1132 
1133 					// notify threads waiting for team state changes
1134 					if (thread == team->main_thread) {
1135 						team->LockTeamAndParent(false);
1136 
1137 						team_set_job_control_state(team,
1138 							JOB_CONTROL_STATE_STOPPED, signal);
1139 
1140 						// send a SIGCHLD to the parent (if it does have
1141 						// SA_NOCLDSTOP defined)
1142 						Team* parentTeam = team->parent;
1143 
1144 						struct sigaction& parentHandler
1145 							= parentTeam->SignalActionFor(SIGCHLD);
1146 						if ((parentHandler.sa_flags & SA_NOCLDSTOP) == 0) {
1147 							Signal childSignal(SIGCHLD, CLD_STOPPED, B_OK,
1148 								team->id);
1149 							childSignal.SetStatus(signal->Number());
1150 							childSignal.SetSendingUser(signal->SendingUser());
1151 							send_signal_to_team(parentTeam, childSignal, 0);
1152 						}
1153 
1154 						team->UnlockTeamAndParent();
1155 					}
1156 
1157 					groupLocker.Unlock();
1158 
1159 					// Suspend the thread, unless there's already a signal to
1160 					// continue or kill pending.
1161 					locker.Lock();
1162 					bool resume = (thread->AllPendingSignals()
1163 								& (CONTINUE_SIGNALS | KILL_SIGNALS)) != 0;
1164 					locker.Unlock();
1165 
1166 					if (!resume)
1167 						thread_suspend();
1168 
1169 					continue;
1170 				}
1171 
1172 				case SIGSEGV:
1173 				case SIGBUS:
1174 				case SIGFPE:
1175 				case SIGILL:
1176 				case SIGTRAP:
1177 				case SIGABRT:
1178 				case SIGKILL:
1179 				case SIGQUIT:
1180 				case SIGPOLL:
1181 				case SIGPROF:
1182 				case SIGSYS:
1183 				case SIGVTALRM:
1184 				case SIGXCPU:
1185 				case SIGXFSZ:
1186 				default:
1187 					TRACE(("Shutting down team %" B_PRId32 " due to signal %"
1188 						B_PRIu32 " received in thread %" B_PRIu32 " \n",
1189 						team->id, signal->Number(), thread->id));
1190 
1191 					// This signal kills the team regardless which thread
1192 					// received it.
1193 					killTeam = true;
1194 
1195 					// fall through
1196 				case SIGKILLTHR:
1197 					// notify the debugger
1198 					if (debugSignal && signal->Number() != SIGKILL
1199 						&& signal->Number() != SIGKILLTHR
1200 						&& !notify_debugger(thread, signal, handler, true)) {
1201 						continue;
1202 					}
1203 
1204 					if (killTeam || thread == team->main_thread) {
1205 						// The signal is terminal for the team or the thread is
1206 						// the main thread. In either case the team is going
1207 						// down. Set its exit status, if that didn't happen yet.
1208 						teamLocker.Lock();
1209 
1210 						if (!team->exit.initialized) {
1211 							team->exit.reason = CLD_KILLED;
1212 							team->exit.signal = signal->Number();
1213 							team->exit.signaling_user = signal->SendingUser();
1214 							team->exit.status = 0;
1215 							team->exit.initialized = true;
1216 						}
1217 
1218 						teamLocker.Unlock();
1219 
1220 						// If this is not the main thread, send it a SIGKILLTHR
1221 						// so that the team terminates.
1222 						if (thread != team->main_thread) {
1223 							Signal childSignal(SIGKILLTHR, SI_USER, B_OK,
1224 								team->id);
1225 							send_signal_to_thread_id(team->id, childSignal, 0);
1226 						}
1227 					}
1228 
1229 					// explicitly get rid of the signal reference, since
1230 					// thread_exit() won't return
1231 					signalHandledCaller.Done();
1232 
1233 					thread_exit();
1234 						// won't return
1235 			}
1236 		}
1237 
1238 		// User defined signal handler
1239 
1240 		// notify the debugger
1241 		if (debugSignal && !notify_debugger(thread, signal, handler, false))
1242 			continue;
1243 
1244 		if (!restart
1245 				|| (!alwaysRestart && (handler.sa_flags & SA_RESTART) == 0)) {
1246 			atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
1247 		}
1248 
1249 		T(ExecuteSignalHandler(signal->Number(), &handler));
1250 
1251 		TRACE(("### Setting up custom signal handler frame...\n"));
1252 
1253 		// save the old block mask -- we may need to adjust it for the handler
1254 		locker.Lock();
1255 
1256 		sigset_t oldBlockMask = thread->sigsuspend_original_unblocked_mask != 0
1257 			? ~thread->sigsuspend_original_unblocked_mask
1258 			: thread->sig_block_mask;
1259 
1260 		// Update the block mask while the signal handler is running -- it
1261 		// will be automatically restored when the signal frame is left.
1262 		thread->sig_block_mask |= handler.sa_mask & BLOCKABLE_SIGNALS;
1263 
1264 		if ((handler.sa_flags & SA_NOMASK) == 0) {
1265 			thread->sig_block_mask
1266 				|= SIGNAL_TO_MASK(signal->Number()) & BLOCKABLE_SIGNALS;
1267 		}
1268 
1269 		update_current_thread_signals_flag();
1270 
1271 		locker.Unlock();
1272 
1273 		setup_signal_frame(thread, &handler, signal, oldBlockMask);
1274 
1275 		// Reset sigsuspend_original_unblocked_mask. It would have been set by
1276 		// sigsuspend_internal(). In that case, above we set oldBlockMask
1277 		// accordingly so that after the handler returns the thread's signal
1278 		// mask is reset.
1279 		thread->sigsuspend_original_unblocked_mask = 0;
1280 
1281 		return;
1282 	}
1283 
1284 	// We have not handled any signal (respectively only ignored ones).
1285 
1286 	// If sigsuspend_original_unblocked_mask is non-null, we came from a
1287 	// sigsuspend_internal(). Not having handled any signal, we should restart
1288 	// the syscall.
1289 	if (thread->sigsuspend_original_unblocked_mask != 0) {
1290 		restart = true;
1291 		atomic_or(&thread->flags, THREAD_FLAGS_RESTART_SYSCALL);
1292 	} else if (!restart) {
1293 		// clear syscall restart thread flag, if we're not supposed to restart
1294 		// the syscall
1295 		atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
1296 	}
1297 }
1298 
1299 
1300 /*!	Checks whether the given signal is blocked for the given team (i.e. all of
1301 	its threads).
1302 	The caller must hold the team's lock and \c signal_lock.
1303 */
1304 bool
1305 is_team_signal_blocked(Team* team, int signal)
1306 {
1307 	sigset_t mask = SIGNAL_TO_MASK(signal);
1308 
1309 	for (Thread* thread = team->thread_list; thread != NULL;
1310 			thread = thread->team_next) {
1311 		if ((thread->sig_block_mask & mask) == 0)
1312 			return false;
1313 	}
1314 
1315 	return true;
1316 }
1317 
1318 
1319 /*!	Gets (guesses) the current thread's currently used stack from the given
1320 	stack pointer.
1321 	Fills in \a stack with either the signal stack or the thread's user stack.
1322 	\param address A stack pointer address to be used to determine the used
1323 		stack.
1324 	\param stack Filled in by the function.
1325 */
1326 void
1327 signal_get_user_stack(addr_t address, stack_t* stack)
1328 {
1329 	// If a signal stack is enabled for the stack and the address is within it,
1330 	// return the signal stack. In all other cases return the thread's user
1331 	// stack, even if the address doesn't lie within it.
1332 	Thread* thread = thread_get_current_thread();
1333 	if (thread->signal_stack_enabled && address >= thread->signal_stack_base
1334 		&& address < thread->signal_stack_base + thread->signal_stack_size) {
1335 		stack->ss_sp = (void*)thread->signal_stack_base;
1336 		stack->ss_size = thread->signal_stack_size;
1337 	} else {
1338 		stack->ss_sp = (void*)thread->user_stack_base;
1339 		stack->ss_size = thread->user_stack_size;
1340 	}
1341 
1342 	stack->ss_flags = 0;
1343 }
1344 
1345 
1346 /*!	Checks whether any non-blocked signal is pending for the current thread.
1347 	The caller must hold \c team->signal_lock.
1348 	\param thread The current thread.
1349 */
1350 static bool
1351 has_signals_pending(Thread* thread)
1352 {
1353 	return (thread->AllPendingSignals() & ~thread->sig_block_mask) != 0;
1354 }
1355 
1356 
1357 /*!	Checks whether the current user has permission to send a signal to the given
1358 	target team.
1359 
1360 	\param team The target team.
1361 */
1362 static bool
1363 has_permission_to_signal(Team* team)
1364 {
1365 	// get the current user
1366 	uid_t currentUser = thread_get_current_thread()->team->effective_uid;
1367 
1368 	// root is omnipotent -- in the other cases the current user must match the
1369 	// target team's
1370 	return currentUser == 0 || currentUser == team->effective_uid;
1371 }
1372 
1373 
1374 /*!	Delivers a signal to the \a thread, but doesn't handle the signal -- it just
1375 	makes sure the thread gets the signal, i.e. unblocks it if needed.
1376 
1377 	The caller must hold \c team->signal_lock.
1378 
1379 	\param thread The thread the signal shall be delivered to.
1380 	\param signalNumber The number of the signal to be delivered. If \c 0, no
1381 		actual signal will be delivered. Only delivery checks will be performed.
1382 	\param signal If non-NULL the signal to be queued (has number
1383 		\a signalNumber in this case). The caller transfers an object reference
1384 		to this function. If \c NULL an unqueued signal will be delivered to the
1385 		thread.
1386 	\param flags A bitwise combination of any number of the following:
1387 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1388 			target thread the signal.
1389 	\return \c B_OK, when the signal was delivered successfully, another error
1390 		code otherwise.
1391 */
1392 status_t
1393 send_signal_to_thread_locked(Thread* thread, uint32 signalNumber,
1394 	Signal* signal, uint32 flags)
1395 {
1396 	ASSERT(signal == NULL || signalNumber == signal->Number());
1397 
1398 	T(SendSignal(thread->id, signalNumber, flags));
1399 
1400 	// The caller transferred a reference to the signal to us.
1401 	BReference<Signal> signalReference(signal, true);
1402 
1403 	if ((flags & B_CHECK_PERMISSION) != 0) {
1404 		if (!has_permission_to_signal(thread->team))
1405 			return EPERM;
1406 	}
1407 
1408 	if (signalNumber == 0)
1409 		return B_OK;
1410 
1411 	if (thread->team == team_get_kernel_team()) {
1412 		// Signals to kernel threads will only wake them up
1413 		thread_continue(thread);
1414 		return B_OK;
1415 	}
1416 
1417 	if (signal != NULL)
1418 		thread->AddPendingSignal(signal);
1419 	else
1420 		thread->AddPendingSignal(signalNumber);
1421 
1422 	// the thread has the signal reference, now
1423 	signalReference.Detach();
1424 
1425 	switch (signalNumber) {
1426 		case SIGKILL:
1427 		{
1428 			// If sent to a thread other than the team's main thread, also send
1429 			// a SIGKILLTHR to the main thread to kill the team.
1430 			Thread* mainThread = thread->team->main_thread;
1431 			if (mainThread != NULL && mainThread != thread) {
1432 				mainThread->AddPendingSignal(SIGKILLTHR);
1433 
1434 				// wake up main thread
1435 				thread->going_to_suspend = false;
1436 
1437 				SpinLocker locker(mainThread->scheduler_lock);
1438 				if (mainThread->state == B_THREAD_SUSPENDED)
1439 					scheduler_enqueue_in_run_queue(mainThread);
1440 				else
1441 					thread_interrupt(mainThread, true);
1442 				locker.Unlock();
1443 
1444 				update_thread_signals_flag(mainThread);
1445 			}
1446 
1447 			// supposed to fall through
1448 		}
1449 		case SIGKILLTHR:
1450 		{
1451 			// Wake up suspended threads and interrupt waiting ones
1452 			thread->going_to_suspend = false;
1453 
1454 			SpinLocker locker(thread->scheduler_lock);
1455 			if (thread->state == B_THREAD_SUSPENDED)
1456 				scheduler_enqueue_in_run_queue(thread);
1457 			else
1458 				thread_interrupt(thread, true);
1459 
1460 			break;
1461 		}
1462 		case SIGNAL_DEBUG_THREAD:
1463 		{
1464 			// Wake up thread if it was suspended, otherwise interrupt it.
1465 			thread->going_to_suspend = false;
1466 
1467 			SpinLocker locker(thread->scheduler_lock);
1468 			if (thread->state == B_THREAD_SUSPENDED)
1469 				scheduler_enqueue_in_run_queue(thread);
1470 			else
1471 				thread_interrupt(thread, false);
1472 
1473 			break;
1474 		}
1475 		case SIGNAL_CONTINUE_THREAD:
1476 		{
1477 			// wake up thread, and interrupt its current syscall
1478 			thread->going_to_suspend = false;
1479 
1480 			SpinLocker locker(thread->scheduler_lock);
1481 			if (thread->state == B_THREAD_SUSPENDED)
1482 				scheduler_enqueue_in_run_queue(thread);
1483 
1484 			atomic_or(&thread->flags, THREAD_FLAGS_DONT_RESTART_SYSCALL);
1485 			break;
1486 		}
1487 		case SIGCONT:
1488 		{
1489 			// Wake up thread if it was suspended, otherwise interrupt it, if
1490 			// the signal isn't blocked.
1491 			thread->going_to_suspend = false;
1492 
1493 			SpinLocker locker(thread->scheduler_lock);
1494 			if (thread->state == B_THREAD_SUSPENDED)
1495 				scheduler_enqueue_in_run_queue(thread);
1496 			else if ((SIGNAL_TO_MASK(SIGCONT) & ~thread->sig_block_mask) != 0)
1497 				thread_interrupt(thread, false);
1498 
1499 			// remove any pending stop signals
1500 			thread->RemovePendingSignals(STOP_SIGNALS);
1501 			break;
1502 		}
1503 		default:
1504 			// If the signal is not masked, interrupt the thread, if it is
1505 			// currently waiting (interruptibly).
1506 			if ((thread->AllPendingSignals()
1507 						& (~thread->sig_block_mask | SIGNAL_TO_MASK(SIGCHLD)))
1508 					!= 0) {
1509 				// Interrupt thread if it was waiting
1510 				SpinLocker locker(thread->scheduler_lock);
1511 				thread_interrupt(thread, false);
1512 			}
1513 			break;
1514 	}
1515 
1516 	update_thread_signals_flag(thread);
1517 
1518 	return B_OK;
1519 }
1520 
1521 
1522 /*!	Sends the given signal to the given thread.
1523 
1524 	\param thread The thread the signal shall be sent to.
1525 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1526 		actual signal will be delivered. Only delivery checks will be performed.
1527 		The given object will be copied. The caller retains ownership.
1528 	\param flags A bitwise combination of any number of the following:
1529 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1530 			target thread the signal.
1531 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1532 			woken up, the scheduler will be invoked. If set that will not be
1533 			done explicitly, but rescheduling can still happen, e.g. when the
1534 			current thread's time slice runs out.
1535 	\return \c B_OK, when the signal was delivered successfully, another error
1536 		code otherwise.
1537 */
1538 status_t
1539 send_signal_to_thread(Thread* thread, const Signal& signal, uint32 flags)
1540 {
1541 	// Clone the signal -- the clone will be queued. If something fails and the
1542 	// caller doesn't require queuing, we will add an unqueued signal.
1543 	Signal* signalToQueue = NULL;
1544 	status_t error = Signal::CreateQueuable(signal,
1545 		(flags & SIGNAL_FLAG_QUEUING_REQUIRED) != 0, signalToQueue);
1546 	if (error != B_OK)
1547 		return error;
1548 
1549 	InterruptsReadSpinLocker teamLocker(thread->team_lock);
1550 	SpinLocker locker(thread->team->signal_lock);
1551 
1552 	error = send_signal_to_thread_locked(thread, signal.Number(), signalToQueue,
1553 		flags);
1554 	if (error != B_OK)
1555 		return error;
1556 
1557 	locker.Unlock();
1558 	teamLocker.Unlock();
1559 
1560 	if ((flags & B_DO_NOT_RESCHEDULE) == 0)
1561 		scheduler_reschedule_if_necessary();
1562 
1563 	return B_OK;
1564 }
1565 
1566 
1567 /*!	Sends the given signal to the thread with the given ID.
1568 
1569 	\param threadID The ID of the thread the signal shall be sent to.
1570 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1571 		actual signal will be delivered. Only delivery checks will be performed.
1572 		The given object will be copied. The caller retains ownership.
1573 	\param flags A bitwise combination of any number of the following:
1574 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1575 			target thread the signal.
1576 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1577 			woken up, the scheduler will be invoked. If set that will not be
1578 			done explicitly, but rescheduling can still happen, e.g. when the
1579 			current thread's time slice runs out.
1580 	\return \c B_OK, when the signal was delivered successfully, another error
1581 		code otherwise.
1582 */
1583 status_t
1584 send_signal_to_thread_id(thread_id threadID, const Signal& signal, uint32 flags)
1585 {
1586 	Thread* thread = Thread::Get(threadID);
1587 	if (thread == NULL)
1588 		return B_BAD_THREAD_ID;
1589 	BReference<Thread> threadReference(thread, true);
1590 
1591 	return send_signal_to_thread(thread, signal, flags);
1592 }
1593 
1594 
1595 /*!	Sends the given signal to the given team.
1596 
1597 	The caller must hold \c signal_lock.
1598 
1599 	\param team The team the signal shall be sent to.
1600 	\param signalNumber The number of the signal to be delivered. If \c 0, no
1601 		actual signal will be delivered. Only delivery checks will be performed.
1602 	\param signal If non-NULL the signal to be queued (has number
1603 		\a signalNumber in this case). The caller transfers an object reference
1604 		to this function. If \c NULL an unqueued signal will be delivered to the
1605 		thread.
1606 	\param flags A bitwise combination of any number of the following:
1607 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1608 			target thread the signal.
1609 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1610 			woken up, the scheduler will be invoked. If set that will not be
1611 			done explicitly, but rescheduling can still happen, e.g. when the
1612 			current thread's time slice runs out.
1613 	\return \c B_OK, when the signal was delivered successfully, another error
1614 		code otherwise.
1615 */
1616 status_t
1617 send_signal_to_team_locked(Team* team, uint32 signalNumber, Signal* signal,
1618 	uint32 flags)
1619 {
1620 	ASSERT(signal == NULL || signalNumber == signal->Number());
1621 
1622 	T(SendSignal(team->id, signalNumber, flags));
1623 
1624 	// The caller transferred a reference to the signal to us.
1625 	BReference<Signal> signalReference(signal, true);
1626 
1627 	if ((flags & B_CHECK_PERMISSION) != 0) {
1628 		if (!has_permission_to_signal(team))
1629 			return EPERM;
1630 	}
1631 
1632 	if (signalNumber == 0)
1633 		return B_OK;
1634 
1635 	if (team == team_get_kernel_team()) {
1636 		// signals to the kernel team are not allowed
1637 		return EPERM;
1638 	}
1639 
1640 	if (signal != NULL)
1641 		team->AddPendingSignal(signal);
1642 	else
1643 		team->AddPendingSignal(signalNumber);
1644 
1645 	// the team has the signal reference, now
1646 	signalReference.Detach();
1647 
1648 	switch (signalNumber) {
1649 		case SIGKILL:
1650 		case SIGKILLTHR:
1651 		{
1652 			// Also add a SIGKILLTHR to the main thread's signals and wake it
1653 			// up/interrupt it, so we get this over with as soon as possible
1654 			// (only the main thread shuts down the team).
1655 			Thread* mainThread = team->main_thread;
1656 			if (mainThread != NULL) {
1657 				mainThread->AddPendingSignal(SIGKILLTHR);
1658 
1659 				// wake up main thread
1660 				mainThread->going_to_suspend = false;
1661 
1662 				SpinLocker _(mainThread->scheduler_lock);
1663 				if (mainThread->state == B_THREAD_SUSPENDED)
1664 					scheduler_enqueue_in_run_queue(mainThread);
1665 				else
1666 					thread_interrupt(mainThread, true);
1667 			}
1668 			break;
1669 		}
1670 
1671 		case SIGCONT:
1672 			// Wake up any suspended threads, interrupt the others, if they
1673 			// don't block the signal.
1674 			for (Thread* thread = team->thread_list; thread != NULL;
1675 					thread = thread->team_next) {
1676 				thread->going_to_suspend = false;
1677 
1678 				SpinLocker _(thread->scheduler_lock);
1679 				if (thread->state == B_THREAD_SUSPENDED) {
1680 					scheduler_enqueue_in_run_queue(thread);
1681 				} else if ((SIGNAL_TO_MASK(SIGCONT) & ~thread->sig_block_mask)
1682 						!= 0) {
1683 					thread_interrupt(thread, false);
1684 				}
1685 
1686 				// remove any pending stop signals
1687 				thread->RemovePendingSignals(STOP_SIGNALS);
1688 			}
1689 
1690 			// remove any pending team stop signals
1691 			team->RemovePendingSignals(STOP_SIGNALS);
1692 			break;
1693 
1694 		case SIGSTOP:
1695 		case SIGTSTP:
1696 		case SIGTTIN:
1697 		case SIGTTOU:
1698 			// send the stop signal to all threads
1699 			// TODO: Is that correct or should we only target the main thread?
1700 			for (Thread* thread = team->thread_list; thread != NULL;
1701 					thread = thread->team_next) {
1702 				thread->AddPendingSignal(signalNumber);
1703 			}
1704 
1705 			// remove the stop signal from the team again
1706 			if (signal != NULL) {
1707 				team->RemovePendingSignal(signal);
1708 				signalReference.SetTo(signal, true);
1709 			} else
1710 				team->RemovePendingSignal(signalNumber);
1711 
1712 			// fall through to interrupt threads
1713 		default:
1714 			// Interrupt all interruptibly waiting threads, if the signal is
1715 			// not masked.
1716 			for (Thread* thread = team->thread_list; thread != NULL;
1717 					thread = thread->team_next) {
1718 				sigset_t nonBlocked = ~thread->sig_block_mask
1719 					| SIGNAL_TO_MASK(SIGCHLD);
1720 				if ((thread->AllPendingSignals() & nonBlocked) != 0) {
1721 					SpinLocker _(thread->scheduler_lock);
1722 					thread_interrupt(thread, false);
1723 				}
1724 			}
1725 			break;
1726 	}
1727 
1728 	update_team_threads_signal_flag(team);
1729 
1730 	return B_OK;
1731 }
1732 
1733 
1734 /*!	Sends the given signal to the given team.
1735 
1736 	\param team The team the signal shall be sent to.
1737 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1738 		actual signal will be delivered. Only delivery checks will be performed.
1739 		The given object will be copied. The caller retains ownership.
1740 	\param flags A bitwise combination of any number of the following:
1741 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1742 			target thread the signal.
1743 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1744 			woken up, the scheduler will be invoked. If set that will not be
1745 			done explicitly, but rescheduling can still happen, e.g. when the
1746 			current thread's time slice runs out.
1747 	\return \c B_OK, when the signal was delivered successfully, another error
1748 		code otherwise.
1749 */
1750 status_t
1751 send_signal_to_team(Team* team, const Signal& signal, uint32 flags)
1752 {
1753 	// Clone the signal -- the clone will be queued. If something fails and the
1754 	// caller doesn't require queuing, we will add an unqueued signal.
1755 	Signal* signalToQueue = NULL;
1756 	status_t error = Signal::CreateQueuable(signal,
1757 		(flags & SIGNAL_FLAG_QUEUING_REQUIRED) != 0, signalToQueue);
1758 	if (error != B_OK)
1759 		return error;
1760 
1761 	InterruptsSpinLocker locker(team->signal_lock);
1762 
1763 	error = send_signal_to_team_locked(team, signal.Number(), signalToQueue,
1764 			flags);
1765 
1766 	locker.Unlock();
1767 
1768 	if ((flags & B_DO_NOT_RESCHEDULE) == 0)
1769 		scheduler_reschedule_if_necessary();
1770 
1771 	return error;
1772 }
1773 
1774 
1775 /*!	Sends the given signal to the team with the given ID.
1776 
1777 	\param teamID The ID of the team the signal shall be sent to.
1778 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1779 		actual signal will be delivered. Only delivery checks will be performed.
1780 		The given object will be copied. The caller retains ownership.
1781 	\param flags A bitwise combination of any number of the following:
1782 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1783 			target thread the signal.
1784 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1785 			woken up, the scheduler will be invoked. If set that will not be
1786 			done explicitly, but rescheduling can still happen, e.g. when the
1787 			current thread's time slice runs out.
1788 	\return \c B_OK, when the signal was delivered successfully, another error
1789 		code otherwise.
1790 */
1791 status_t
1792 send_signal_to_team_id(team_id teamID, const Signal& signal, uint32 flags)
1793 {
1794 	// get the team
1795 	Team* team = Team::Get(teamID);
1796 	if (team == NULL)
1797 		return B_BAD_TEAM_ID;
1798 	BReference<Team> teamReference(team, true);
1799 
1800 	return send_signal_to_team(team, signal, flags);
1801 }
1802 
1803 
1804 /*!	Sends the given signal to the given process group.
1805 
1806 	The caller must hold the process group's lock. Interrupts must be enabled.
1807 
1808 	\param group The the process group the signal shall be sent to.
1809 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1810 		actual signal will be delivered. Only delivery checks will be performed.
1811 		The given object will be copied. The caller retains ownership.
1812 	\param flags A bitwise combination of any number of the following:
1813 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1814 			target thread the signal.
1815 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1816 			woken up, the scheduler will be invoked. If set that will not be
1817 			done explicitly, but rescheduling can still happen, e.g. when the
1818 			current thread's time slice runs out.
1819 	\return \c B_OK, when the signal was delivered successfully, another error
1820 		code otherwise.
1821 */
1822 status_t
1823 send_signal_to_process_group_locked(ProcessGroup* group, const Signal& signal,
1824 	uint32 flags)
1825 {
1826 	T(SendSignal(-group->id, signal.Number(), flags));
1827 
1828 	bool firstTeam = true;
1829 
1830 	for (Team* team = group->teams; team != NULL; team = team->group_next) {
1831 		status_t error = send_signal_to_team(team, signal,
1832 			flags | B_DO_NOT_RESCHEDULE);
1833 		// If sending to the first team in the group failed, let the whole call
1834 		// fail.
1835 		if (firstTeam) {
1836 			if (error != B_OK)
1837 				return error;
1838 			firstTeam = false;
1839 		}
1840 	}
1841 
1842 	if ((flags & B_DO_NOT_RESCHEDULE) == 0)
1843 		scheduler_reschedule_if_necessary();
1844 
1845 	return B_OK;
1846 }
1847 
1848 
1849 /*!	Sends the given signal to the process group specified by the given ID.
1850 
1851 	The caller must not hold any process group, team, or thread lock. Interrupts
1852 	must be enabled.
1853 
1854 	\param groupID The ID of the process group the signal shall be sent to.
1855 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1856 		actual signal will be delivered. Only delivery checks will be performed.
1857 		The given object will be copied. The caller retains ownership.
1858 	\param flags A bitwise combination of any number of the following:
1859 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1860 			target thread the signal.
1861 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1862 			woken up, the scheduler will be invoked. If set that will not be
1863 			done explicitly, but rescheduling can still happen, e.g. when the
1864 			current thread's time slice runs out.
1865 	\return \c B_OK, when the signal was delivered successfully, another error
1866 		code otherwise.
1867 */
1868 status_t
1869 send_signal_to_process_group(pid_t groupID, const Signal& signal, uint32 flags)
1870 {
1871 	ProcessGroup* group = ProcessGroup::Get(groupID);
1872 	if (group == NULL)
1873 		return B_BAD_TEAM_ID;
1874 	BReference<ProcessGroup> groupReference(group);
1875 
1876 	T(SendSignal(-group->id, signal.Number(), flags));
1877 
1878 	AutoLocker<ProcessGroup> groupLocker(group);
1879 
1880 	status_t error = send_signal_to_process_group_locked(group, signal,
1881 		flags | B_DO_NOT_RESCHEDULE);
1882 	if (error != B_OK)
1883 		return error;
1884 
1885 	groupLocker.Unlock();
1886 
1887 	if ((flags & B_DO_NOT_RESCHEDULE) == 0)
1888 		scheduler_reschedule_if_necessary();
1889 
1890 	return B_OK;
1891 }
1892 
1893 
1894 static status_t
1895 send_signal_internal(pid_t id, uint signalNumber, union sigval userValue,
1896 	uint32 flags)
1897 {
1898 	if (signalNumber > MAX_SIGNAL_NUMBER)
1899 		return B_BAD_VALUE;
1900 
1901 	Thread* thread = thread_get_current_thread();
1902 
1903 	Signal signal(signalNumber,
1904 		(flags & SIGNAL_FLAG_QUEUING_REQUIRED) != 0 ? SI_QUEUE : SI_USER,
1905 		B_OK, thread->team->id);
1906 		// Note: SI_USER/SI_QUEUE is not correct, if called from within the
1907 		// kernel (or a driver), but we don't have any info here.
1908 	signal.SetUserValue(userValue);
1909 
1910 	// If id is > 0, send the signal to the respective thread.
1911 	if (id > 0)
1912 		return send_signal_to_thread_id(id, signal, flags);
1913 
1914 	// If id == 0, send the signal to the current thread.
1915 	if (id == 0)
1916 		return send_signal_to_thread(thread, signal, flags);
1917 
1918 	// If id == -1, send the signal to all teams the calling team has permission
1919 	// to send signals to.
1920 	if (id == -1) {
1921 		// TODO: Implement correctly!
1922 		// currently only send to the current team
1923 		return send_signal_to_team_id(thread->team->id, signal, flags);
1924 	}
1925 
1926 	// Send a signal to the specified process group (the absolute value of the
1927 	// id).
1928 	return send_signal_to_process_group(-id, signal, flags);
1929 }
1930 
1931 
1932 int
1933 send_signal_etc(pid_t id, uint signalNumber, uint32 flags)
1934 {
1935 	// a dummy user value
1936 	union sigval userValue;
1937 	userValue.sival_ptr = NULL;
1938 
1939 	return send_signal_internal(id, signalNumber, userValue, flags);
1940 }
1941 
1942 
1943 int
1944 send_signal(pid_t threadID, uint signal)
1945 {
1946 	// The BeBook states that this function wouldn't be exported
1947 	// for drivers, but, of course, it's wrong.
1948 	return send_signal_etc(threadID, signal, 0);
1949 }
1950 
1951 
1952 static int
1953 sigprocmask_internal(int how, const sigset_t* set, sigset_t* oldSet)
1954 {
1955 	Thread* thread = thread_get_current_thread();
1956 
1957 	InterruptsSpinLocker _(thread->team->signal_lock);
1958 
1959 	sigset_t oldMask = thread->sig_block_mask;
1960 
1961 	if (set != NULL) {
1962 		T(SigProcMask(how, *set));
1963 
1964 		switch (how) {
1965 			case SIG_BLOCK:
1966 				thread->sig_block_mask |= *set & BLOCKABLE_SIGNALS;
1967 				break;
1968 			case SIG_UNBLOCK:
1969 				thread->sig_block_mask &= ~*set;
1970 				break;
1971 			case SIG_SETMASK:
1972 				thread->sig_block_mask = *set & BLOCKABLE_SIGNALS;
1973 				break;
1974 			default:
1975 				return B_BAD_VALUE;
1976 		}
1977 
1978 		update_current_thread_signals_flag();
1979 	}
1980 
1981 	if (oldSet != NULL)
1982 		*oldSet = oldMask;
1983 
1984 	return B_OK;
1985 }
1986 
1987 
1988 int
1989 sigprocmask(int how, const sigset_t* set, sigset_t* oldSet)
1990 {
1991 	RETURN_AND_SET_ERRNO(sigprocmask_internal(how, set, oldSet));
1992 }
1993 
1994 
1995 /*!	\brief Like sigaction(), but returning the error instead of setting errno.
1996 */
1997 static status_t
1998 sigaction_internal(int signal, const struct sigaction* act,
1999 	struct sigaction* oldAction)
2000 {
2001 	if (signal < 1 || signal > MAX_SIGNAL_NUMBER
2002 		|| (SIGNAL_TO_MASK(signal) & ~BLOCKABLE_SIGNALS) != 0)
2003 		return B_BAD_VALUE;
2004 
2005 	// get and lock the team
2006 	Team* team = thread_get_current_thread()->team;
2007 	TeamLocker teamLocker(team);
2008 
2009 	struct sigaction& teamHandler = team->SignalActionFor(signal);
2010 	if (oldAction) {
2011 		// save previous sigaction structure
2012 		*oldAction = teamHandler;
2013 	}
2014 
2015 	if (act) {
2016 		T(SigAction(signal, act));
2017 
2018 		// set new sigaction structure
2019 		teamHandler = *act;
2020 		teamHandler.sa_mask &= BLOCKABLE_SIGNALS;
2021 	}
2022 
2023 	// Remove pending signal if it should now be ignored and remove pending
2024 	// signal for those signals whose default action is to ignore them.
2025 	if ((act && act->sa_handler == SIG_IGN)
2026 		|| (act && act->sa_handler == SIG_DFL
2027 			&& (SIGNAL_TO_MASK(signal) & DEFAULT_IGNORE_SIGNALS) != 0)) {
2028 		InterruptsSpinLocker locker(team->signal_lock);
2029 
2030 		team->RemovePendingSignal(signal);
2031 
2032 		for (Thread* thread = team->thread_list; thread != NULL;
2033 				thread = thread->team_next) {
2034 			thread->RemovePendingSignal(signal);
2035 		}
2036 	}
2037 
2038 	return B_OK;
2039 }
2040 
2041 
2042 int
2043 sigaction(int signal, const struct sigaction* act, struct sigaction* oldAction)
2044 {
2045 	RETURN_AND_SET_ERRNO(sigaction_internal(signal, act, oldAction));
2046 }
2047 
2048 
2049 /*!	Wait for the specified signals, and return the information for the retrieved
2050 	signal in \a info.
2051 	The \c flags and \c timeout combination must either define an infinite
2052 	timeout (no timeout flags set), an absolute timeout (\c B_ABSOLUTE_TIMEOUT
2053 	set), or a relative timeout \code <= 0 \endcode (\c B_RELATIVE_TIMEOUT set).
2054 */
2055 static status_t
2056 sigwait_internal(const sigset_t* set, siginfo_t* info, uint32 flags,
2057 	bigtime_t timeout)
2058 {
2059 	// restrict mask to blockable signals
2060 	sigset_t requestedSignals = *set & BLOCKABLE_SIGNALS;
2061 
2062 	// make always interruptable
2063 	flags |= B_CAN_INTERRUPT;
2064 
2065 	// check whether we are allowed to wait at all
2066 	bool canWait = (flags & B_RELATIVE_TIMEOUT) == 0 || timeout > 0;
2067 
2068 	Thread* thread = thread_get_current_thread();
2069 
2070 	InterruptsSpinLocker locker(thread->team->signal_lock);
2071 
2072 	bool timedOut = false;
2073 	status_t error = B_OK;
2074 
2075 	while (!timedOut) {
2076 		sigset_t pendingSignals = thread->AllPendingSignals();
2077 
2078 		// If a kill signal is pending, just bail out.
2079 		if ((pendingSignals & KILL_SIGNALS) != 0)
2080 			return B_INTERRUPTED;
2081 
2082 		if ((pendingSignals & requestedSignals) != 0) {
2083 			// get signal with the highest priority
2084 			Signal stackSignal;
2085 			Signal* signal = dequeue_thread_or_team_signal(thread,
2086 				requestedSignals, stackSignal);
2087 			ASSERT(signal != NULL);
2088 
2089 			SignalHandledCaller signalHandledCaller(signal);
2090 			locker.Unlock();
2091 
2092 			info->si_signo = signal->Number();
2093 			info->si_code = signal->SignalCode();
2094 			info->si_errno = signal->ErrorCode();
2095 			info->si_pid = signal->SendingProcess();
2096 			info->si_uid = signal->SendingUser();
2097 			info->si_addr = signal->Address();
2098 			info->si_status = signal->Status();
2099 			info->si_band = signal->PollBand();
2100 			info->si_value = signal->UserValue();
2101 
2102 			return B_OK;
2103 		}
2104 
2105 		if (!canWait)
2106 			return B_WOULD_BLOCK;
2107 
2108 		sigset_t blockedSignals = thread->sig_block_mask;
2109 		if ((pendingSignals & ~blockedSignals) != 0) {
2110 			// Non-blocked signals are pending -- return to let them be handled.
2111 			return B_INTERRUPTED;
2112 		}
2113 
2114 		// No signals yet. Set the signal block mask to not include the
2115 		// requested mask and wait until we're interrupted.
2116 		thread->sig_block_mask = blockedSignals & ~requestedSignals;
2117 
2118 		while (!has_signals_pending(thread)) {
2119 			thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_SIGNAL,
2120 				NULL);
2121 
2122 			locker.Unlock();
2123 
2124 			if ((flags & B_ABSOLUTE_TIMEOUT) != 0) {
2125 				error = thread_block_with_timeout(flags, timeout);
2126 				if (error == B_WOULD_BLOCK || error == B_TIMED_OUT) {
2127 					error = B_WOULD_BLOCK;
2128 						// POSIX requires EAGAIN (B_WOULD_BLOCK) on timeout
2129 					timedOut = true;
2130 
2131 					locker.Lock();
2132 					break;
2133 				}
2134 			} else
2135 				thread_block();
2136 
2137 			locker.Lock();
2138 		}
2139 
2140 		// restore the original block mask
2141 		thread->sig_block_mask = blockedSignals;
2142 
2143 		update_current_thread_signals_flag();
2144 	}
2145 
2146 	// we get here only when timed out
2147 	return error;
2148 }
2149 
2150 
2151 /*!	Replace the current signal block mask and wait for any event to happen.
2152 	Before returning, the original signal block mask is reinstantiated.
2153 */
2154 static status_t
2155 sigsuspend_internal(const sigset_t* _mask)
2156 {
2157 	sigset_t mask = *_mask & BLOCKABLE_SIGNALS;
2158 
2159 	T(SigSuspend(mask));
2160 
2161 	Thread* thread = thread_get_current_thread();
2162 
2163 	InterruptsSpinLocker locker(thread->team->signal_lock);
2164 
2165 	// Set the new block mask and block until interrupted. We might be here
2166 	// after a syscall restart, in which case sigsuspend_original_unblocked_mask
2167 	// will still be set.
2168 	sigset_t oldMask = thread->sigsuspend_original_unblocked_mask != 0
2169 		? ~thread->sigsuspend_original_unblocked_mask : thread->sig_block_mask;
2170 	thread->sig_block_mask = mask & BLOCKABLE_SIGNALS;
2171 
2172 	update_current_thread_signals_flag();
2173 
2174 	while (!has_signals_pending(thread)) {
2175 		thread_prepare_to_block(thread, B_CAN_INTERRUPT,
2176 			THREAD_BLOCK_TYPE_SIGNAL, NULL);
2177 
2178 		locker.Unlock();
2179 		thread_block();
2180 		locker.Lock();
2181 	}
2182 
2183 	// Set sigsuspend_original_unblocked_mask (guaranteed to be non-0 due to
2184 	// BLOCKABLE_SIGNALS). This will indicate to handle_signals() that it is
2185 	// called after a _user_sigsuspend(). It will reset the field after invoking
2186 	// a signal handler, or restart the syscall, if there wasn't anything to
2187 	// handle anymore (e.g. because another thread was faster).
2188 	thread->sigsuspend_original_unblocked_mask = ~oldMask;
2189 
2190 	T(SigSuspendDone());
2191 
2192 	// we're not supposed to actually succeed
2193 	return B_INTERRUPTED;
2194 }
2195 
2196 
2197 static status_t
2198 sigpending_internal(sigset_t* set)
2199 {
2200 	Thread* thread = thread_get_current_thread();
2201 
2202 	if (set == NULL)
2203 		return B_BAD_VALUE;
2204 
2205 	InterruptsSpinLocker locker(thread->team->signal_lock);
2206 
2207 	*set = thread->AllPendingSignals() & thread->sig_block_mask;
2208 
2209 	return B_OK;
2210 }
2211 
2212 
2213 // #pragma mark - syscalls
2214 
2215 
2216 /*!	Sends a signal to a thread, process, or process group.
2217 	\param id Specifies the ID of the target:
2218 		- \code id > 0 \endcode: If \a toThread is \c true, the target is the
2219 			thread with ID \a id, otherwise the team with the ID \a id.
2220 		- \code id == 0 \endcode: If toThread is \c true, the target is the
2221 			current thread, otherwise the current team.
2222 		- \code id == -1 \endcode: The target are all teams the current team has
2223 			permission to send signals to. Currently not implemented correctly.
2224 		- \code id < -1 \endcode: The target are is the process group with ID
2225 			\c -id.
2226 	\param signalNumber The signal number. \c 0 to just perform checks, but not
2227 		actually send any signal.
2228 	\param userUserValue A user value to be associated with the signal. Might be
2229 		ignored unless signal queuing is forced. Can be \c NULL.
2230 	\param flags A bitwise or of any number of the following:
2231 		- \c SIGNAL_FLAG_QUEUING_REQUIRED: Signal queuing is required. Fail
2232 			instead of falling back to unqueued signals, when queuing isn't
2233 			possible.
2234 		- \c SIGNAL_FLAG_SEND_TO_THREAD: Interpret the the given ID as a
2235 			\c thread_id rather than a \c team_id. Ignored when the \a id is
2236 			\code < 0 \endcode -- then the target is a process group.
2237 	\return \c B_OK on success, another error code otherwise.
2238 */
2239 status_t
2240 _user_send_signal(int32 id, uint32 signalNumber,
2241 	const union sigval* userUserValue, uint32 flags)
2242 {
2243 	// restrict flags to the allowed ones and add B_CHECK_PERMISSION
2244 	flags &= SIGNAL_FLAG_QUEUING_REQUIRED | SIGNAL_FLAG_SEND_TO_THREAD;
2245 	flags |= B_CHECK_PERMISSION;
2246 
2247 	// Copy the user value from userland. If not given, use a dummy value.
2248 	union sigval userValue;
2249 	if (userUserValue != NULL) {
2250 		if (!IS_USER_ADDRESS(userUserValue)
2251 			|| user_memcpy(&userValue, userUserValue, sizeof(userValue))
2252 				!= B_OK) {
2253 			return B_BAD_ADDRESS;
2254 		}
2255 	} else
2256 		userValue.sival_ptr = NULL;
2257 
2258 	// If to be sent to a thread, delegate to send_signal_internal(). Also do
2259 	// that when id < 0, since in this case the semantics is the same as well.
2260 	if ((flags & SIGNAL_FLAG_SEND_TO_THREAD) != 0 || id < 0)
2261 		return send_signal_internal(id, signalNumber, userValue, flags);
2262 
2263 	// kill() semantics for id >= 0
2264 	if (signalNumber > MAX_SIGNAL_NUMBER)
2265 		return B_BAD_VALUE;
2266 
2267 	Thread* thread = thread_get_current_thread();
2268 
2269 	Signal signal(signalNumber,
2270 		(flags & SIGNAL_FLAG_QUEUING_REQUIRED) != 0 ? SI_QUEUE : SI_USER,
2271 		B_OK, thread->team->id);
2272 	signal.SetUserValue(userValue);
2273 
2274 	// send to current team for id == 0, otherwise to the respective team
2275 	return send_signal_to_team_id(id == 0 ? team_get_current_team_id() : id,
2276 		signal, flags);
2277 }
2278 
2279 
2280 status_t
2281 _user_set_signal_mask(int how, const sigset_t *userSet, sigset_t *userOldSet)
2282 {
2283 	sigset_t set, oldSet;
2284 	status_t status;
2285 
2286 	if ((userSet != NULL && (!IS_USER_ADDRESS(userSet)
2287 			|| user_memcpy(&set, userSet, sizeof(sigset_t)) < B_OK))
2288 		|| (userOldSet != NULL && (!IS_USER_ADDRESS(userOldSet)
2289 			|| user_memcpy(&oldSet, userOldSet, sizeof(sigset_t)) < B_OK)))
2290 		return B_BAD_ADDRESS;
2291 
2292 	status = sigprocmask_internal(how, userSet ? &set : NULL,
2293 		userOldSet ? &oldSet : NULL);
2294 
2295 	// copy old set if asked for
2296 	if (status >= B_OK && userOldSet != NULL
2297 		&& user_memcpy(userOldSet, &oldSet, sizeof(sigset_t)) < B_OK)
2298 		return B_BAD_ADDRESS;
2299 
2300 	return status;
2301 }
2302 
2303 
2304 status_t
2305 _user_sigaction(int signal, const struct sigaction *userAction,
2306 	struct sigaction *userOldAction)
2307 {
2308 	struct sigaction act, oact;
2309 	status_t status;
2310 
2311 	if ((userAction != NULL && (!IS_USER_ADDRESS(userAction)
2312 			|| user_memcpy(&act, userAction, sizeof(struct sigaction)) < B_OK))
2313 		|| (userOldAction != NULL && (!IS_USER_ADDRESS(userOldAction)
2314 			|| user_memcpy(&oact, userOldAction, sizeof(struct sigaction))
2315 				< B_OK)))
2316 		return B_BAD_ADDRESS;
2317 
2318 	status = sigaction_internal(signal, userAction ? &act : NULL,
2319 		userOldAction ? &oact : NULL);
2320 
2321 	// only copy the old action if a pointer has been given
2322 	if (status >= B_OK && userOldAction != NULL
2323 		&& user_memcpy(userOldAction, &oact, sizeof(struct sigaction)) < B_OK)
2324 		return B_BAD_ADDRESS;
2325 
2326 	return status;
2327 }
2328 
2329 
2330 status_t
2331 _user_sigwait(const sigset_t *userSet, siginfo_t *userInfo, uint32 flags,
2332 	bigtime_t timeout)
2333 {
2334 	// copy userSet to stack
2335 	sigset_t set;
2336 	if (userSet == NULL || !IS_USER_ADDRESS(userSet)
2337 		|| user_memcpy(&set, userSet, sizeof(sigset_t)) != B_OK) {
2338 		return B_BAD_ADDRESS;
2339 	}
2340 
2341 	// userInfo is optional, but must be a user address when given
2342 	if (userInfo != NULL && !IS_USER_ADDRESS(userInfo))
2343 		return B_BAD_ADDRESS;
2344 
2345 	syscall_restart_handle_timeout_pre(flags, timeout);
2346 
2347 	flags |= B_CAN_INTERRUPT;
2348 
2349 	siginfo_t info;
2350 	status_t status = sigwait_internal(&set, &info, flags, timeout);
2351 	if (status == B_OK) {
2352 		// copy the info back to userland, if userSet is non-NULL
2353 		if (userInfo != NULL)
2354 			status = user_memcpy(userInfo, &info, sizeof(info));
2355 	} else if (status == B_INTERRUPTED) {
2356 		// make sure we'll be restarted
2357 		Thread* thread = thread_get_current_thread();
2358 		atomic_or(&thread->flags, THREAD_FLAGS_ALWAYS_RESTART_SYSCALL);
2359 	}
2360 
2361 	return syscall_restart_handle_timeout_post(status, timeout);
2362 }
2363 
2364 
2365 status_t
2366 _user_sigsuspend(const sigset_t *userMask)
2367 {
2368 	sigset_t mask;
2369 
2370 	if (userMask == NULL)
2371 		return B_BAD_VALUE;
2372 	if (!IS_USER_ADDRESS(userMask)
2373 		|| user_memcpy(&mask, userMask, sizeof(sigset_t)) < B_OK) {
2374 		return B_BAD_ADDRESS;
2375 	}
2376 
2377 	return sigsuspend_internal(&mask);
2378 }
2379 
2380 
2381 status_t
2382 _user_sigpending(sigset_t *userSet)
2383 {
2384 	sigset_t set;
2385 	int status;
2386 
2387 	if (userSet == NULL)
2388 		return B_BAD_VALUE;
2389 	if (!IS_USER_ADDRESS(userSet))
2390 		return B_BAD_ADDRESS;
2391 
2392 	status = sigpending_internal(&set);
2393 	if (status == B_OK
2394 		&& user_memcpy(userSet, &set, sizeof(sigset_t)) < B_OK)
2395 		return B_BAD_ADDRESS;
2396 
2397 	return status;
2398 }
2399 
2400 
2401 status_t
2402 _user_set_signal_stack(const stack_t* newUserStack, stack_t* oldUserStack)
2403 {
2404 	Thread *thread = thread_get_current_thread();
2405 	struct stack_t newStack, oldStack;
2406 	bool onStack = false;
2407 
2408 	if ((newUserStack != NULL && (!IS_USER_ADDRESS(newUserStack)
2409 			|| user_memcpy(&newStack, newUserStack, sizeof(stack_t)) < B_OK))
2410 		|| (oldUserStack != NULL && (!IS_USER_ADDRESS(oldUserStack)
2411 			|| user_memcpy(&oldStack, oldUserStack, sizeof(stack_t)) < B_OK)))
2412 		return B_BAD_ADDRESS;
2413 
2414 	if (thread->signal_stack_enabled) {
2415 		// determine whether or not the user thread is currently
2416 		// on the active signal stack
2417 		onStack = arch_on_signal_stack(thread);
2418 	}
2419 
2420 	if (oldUserStack != NULL) {
2421 		oldStack.ss_sp = (void *)thread->signal_stack_base;
2422 		oldStack.ss_size = thread->signal_stack_size;
2423 		oldStack.ss_flags = (thread->signal_stack_enabled ? 0 : SS_DISABLE)
2424 			| (onStack ? SS_ONSTACK : 0);
2425 	}
2426 
2427 	if (newUserStack != NULL) {
2428 		// no flags other than SS_DISABLE are allowed
2429 		if ((newStack.ss_flags & ~SS_DISABLE) != 0)
2430 			return B_BAD_VALUE;
2431 
2432 		if ((newStack.ss_flags & SS_DISABLE) == 0) {
2433 			// check if the size is valid
2434 			if (newStack.ss_size < MINSIGSTKSZ)
2435 				return B_NO_MEMORY;
2436 			if (onStack)
2437 				return B_NOT_ALLOWED;
2438 			if (!IS_USER_ADDRESS(newStack.ss_sp))
2439 				return B_BAD_VALUE;
2440 
2441 			thread->signal_stack_base = (addr_t)newStack.ss_sp;
2442 			thread->signal_stack_size = newStack.ss_size;
2443 			thread->signal_stack_enabled = true;
2444 		} else
2445 			thread->signal_stack_enabled = false;
2446 	}
2447 
2448 	// only copy the old stack info if a pointer has been given
2449 	if (oldUserStack != NULL
2450 		&& user_memcpy(oldUserStack, &oldStack, sizeof(stack_t)) < B_OK)
2451 		return B_BAD_ADDRESS;
2452 
2453 	return B_OK;
2454 }
2455 
2456 
2457 /*!	Restores the environment of a function that was interrupted by a signal
2458 	handler call.
2459 	This syscall is invoked when a signal handler function returns. It
2460 	deconstructs the signal handler frame and restores the stack and register
2461 	state of the function that was interrupted by a signal. The syscall is
2462 	therefore somewhat unusual, since it does not return to the calling
2463 	function, but to someplace else. In case the signal interrupted a syscall,
2464 	it will appear as if the syscall just returned. That is also the reason, why
2465 	this syscall returns an int64, since it needs to return the value the
2466 	interrupted syscall returns, which is potentially 64 bits wide.
2467 
2468 	\param userSignalFrameData The signal frame data created for the signal
2469 		handler. Potentially some data (e.g. registers) have been modified by
2470 		the signal handler.
2471 	\return In case the signal interrupted a syscall, the return value of that
2472 		syscall. Otherwise (i.e. in case of a (hardware) interrupt/exception)
2473 		the value might need to be tailored such that after a return to userland
2474 		the interrupted environment is identical to the interrupted one (unless
2475 		explicitly modified). E.g. for x86 to achieve that, the return value
2476 		must contain the eax|edx values of the interrupted environment.
2477 */
2478 int64
2479 _user_restore_signal_frame(struct signal_frame_data* userSignalFrameData)
2480 {
2481 	syscall_64_bit_return_value();
2482 
2483 	Thread *thread = thread_get_current_thread();
2484 
2485 	// copy the signal frame data from userland
2486 	signal_frame_data signalFrameData;
2487 	if (userSignalFrameData == NULL || !IS_USER_ADDRESS(userSignalFrameData)
2488 		|| user_memcpy(&signalFrameData, userSignalFrameData,
2489 			sizeof(signalFrameData)) != B_OK) {
2490 		// We failed to copy the signal frame data from userland. This is a
2491 		// serious problem. Kill the thread.
2492 		dprintf("_user_restore_signal_frame(): thread %" B_PRId32 ": Failed to "
2493 			"copy signal frame data (%p) from userland. Killing thread...\n",
2494 			thread->id, userSignalFrameData);
2495 		kill_thread(thread->id);
2496 		return B_BAD_ADDRESS;
2497 	}
2498 
2499 	// restore the signal block mask
2500 	InterruptsSpinLocker locker(thread->team->signal_lock);
2501 
2502 	thread->sig_block_mask
2503 		= signalFrameData.context.uc_sigmask & BLOCKABLE_SIGNALS;
2504 	update_current_thread_signals_flag();
2505 
2506 	locker.Unlock();
2507 
2508 	// restore the syscall restart related thread flags and the syscall restart
2509 	// parameters
2510 	atomic_and(&thread->flags,
2511 		~(THREAD_FLAGS_RESTART_SYSCALL | THREAD_FLAGS_64_BIT_SYSCALL_RETURN));
2512 	atomic_or(&thread->flags, signalFrameData.thread_flags
2513 		& (THREAD_FLAGS_RESTART_SYSCALL | THREAD_FLAGS_64_BIT_SYSCALL_RETURN));
2514 
2515 	memcpy(thread->syscall_restart.parameters,
2516 		signalFrameData.syscall_restart_parameters,
2517 		sizeof(thread->syscall_restart.parameters));
2518 
2519 	// restore the previously stored Thread::user_signal_context
2520 	thread->user_signal_context = signalFrameData.context.uc_link;
2521 	if (thread->user_signal_context != NULL
2522 		&& !IS_USER_ADDRESS(thread->user_signal_context)) {
2523 		thread->user_signal_context = NULL;
2524 	}
2525 
2526 	// let the architecture specific code restore the registers
2527 	return arch_restore_signal_frame(&signalFrameData);
2528 }
2529