xref: /haiku/src/system/kernel/signal.cpp (revision fc7456e9b1ec38c941134ed6d01c438cf289381e)
1 /*
2  * Copyright 2018, Jérôme Duval, jerome.duval@gmail.com.
3  * Copyright 2014, Paweł Dziepak, pdziepak@quarnos.org.
4  * Copyright 2011-2016, Ingo Weinhold, ingo_weinhold@gmx.de.
5  * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
6  * Copyright 2002, Angelo Mottola, a.mottola@libero.it.
7  *
8  * Distributed under the terms of the MIT License.
9  */
10 
11 
12 /*! POSIX signals handling routines */
13 
14 
15 #include <ksignal.h>
16 
17 #include <errno.h>
18 #include <stddef.h>
19 #include <string.h>
20 
21 #include <OS.h>
22 #include <KernelExport.h>
23 
24 #include <cpu.h>
25 #include <core_dump.h>
26 #include <debug.h>
27 #include <kernel.h>
28 #include <kscheduler.h>
29 #include <sem.h>
30 #include <syscall_restart.h>
31 #include <syscall_utils.h>
32 #include <team.h>
33 #include <thread.h>
34 #include <tracing.h>
35 #include <user_debugger.h>
36 #include <user_thread.h>
37 #include <util/AutoLock.h>
38 #include <util/ThreadAutoLock.h>
39 
40 
41 //#define TRACE_SIGNAL
42 #ifdef TRACE_SIGNAL
43 #	define TRACE(x) dprintf x
44 #else
45 #	define TRACE(x) ;
46 #endif
47 
48 
49 #define BLOCKABLE_SIGNALS	\
50 	(~(KILL_SIGNALS | SIGNAL_TO_MASK(SIGSTOP)	\
51 	| SIGNAL_TO_MASK(SIGNAL_DEBUG_THREAD)	\
52 	| SIGNAL_TO_MASK(SIGNAL_CONTINUE_THREAD)	\
53 	| SIGNAL_TO_MASK(SIGNAL_CANCEL_THREAD)))
54 #define STOP_SIGNALS \
55 	(SIGNAL_TO_MASK(SIGSTOP) | SIGNAL_TO_MASK(SIGTSTP) \
56 	| SIGNAL_TO_MASK(SIGTTIN) | SIGNAL_TO_MASK(SIGTTOU))
57 #define CONTINUE_SIGNALS \
58 	(SIGNAL_TO_MASK(SIGCONT) | SIGNAL_TO_MASK(SIGNAL_CONTINUE_THREAD) \
59 	| SIGNAL_TO_MASK(SIGNAL_DEBUG_THREAD))
60 #define DEFAULT_IGNORE_SIGNALS \
61 	(SIGNAL_TO_MASK(SIGCHLD) | SIGNAL_TO_MASK(SIGWINCH) \
62 	| SIGNAL_TO_MASK(SIGCONT) \
63 	| SIGNAL_RANGE_TO_MASK(SIGNAL_REALTIME_MIN, SIGNAL_REALTIME_MAX))
64 #define NON_DEFERRABLE_SIGNALS	\
65 	(KILL_SIGNALS				\
66 	| SIGNAL_TO_MASK(SIGNAL_DEBUG_THREAD) \
67 	| SIGNAL_TO_MASK(SIGILL)	\
68 	| SIGNAL_TO_MASK(SIGFPE)	\
69 	| SIGNAL_TO_MASK(SIGSEGV))
70 
71 
72 static const struct {
73 	const char*	name;
74 	int32		priority;
75 } kSignalInfos[__MAX_SIGNO + 1] = {
76 	{"NONE",			-1},
77 	{"HUP",				0},
78 	{"INT",				0},
79 	{"QUIT",			0},
80 	{"ILL",				0},
81 	{"CHLD",			0},
82 	{"ABRT",			0},
83 	{"PIPE",			0},
84 	{"FPE",				0},
85 	{"KILL",			100},
86 	{"STOP",			0},
87 	{"SEGV",			0},
88 	{"CONT",			0},
89 	{"TSTP",			0},
90 	{"ALRM",			0},
91 	{"TERM",			0},
92 	{"TTIN",			0},
93 	{"TTOU",			0},
94 	{"USR1",			0},
95 	{"USR2",			0},
96 	{"WINCH",			0},
97 	{"KILLTHR",			100},
98 	{"TRAP",			0},
99 	{"POLL",			0},
100 	{"PROF",			0},
101 	{"SYS",				0},
102 	{"URG",				0},
103 	{"VTALRM",			0},
104 	{"XCPU",			0},
105 	{"XFSZ",			0},
106 	{"SIGBUS",			0},
107 	{"SIGRESERVED1",	0},
108 	{"SIGRESERVED2",	0},
109 	{"SIGRT1",			8},
110 	{"SIGRT2",			7},
111 	{"SIGRT3",			6},
112 	{"SIGRT4",			5},
113 	{"SIGRT5",			4},
114 	{"SIGRT6",			3},
115 	{"SIGRT7",			2},
116 	{"SIGRT8",			1},
117 	{"invalid 41",		0},
118 	{"invalid 42",		0},
119 	{"invalid 43",		0},
120 	{"invalid 44",		0},
121 	{"invalid 45",		0},
122 	{"invalid 46",		0},
123 	{"invalid 47",		0},
124 	{"invalid 48",		0},
125 	{"invalid 49",		0},
126 	{"invalid 50",		0},
127 	{"invalid 51",		0},
128 	{"invalid 52",		0},
129 	{"invalid 53",		0},
130 	{"invalid 54",		0},
131 	{"invalid 55",		0},
132 	{"invalid 56",		0},
133 	{"invalid 57",		0},
134 	{"invalid 58",		0},
135 	{"invalid 59",		0},
136 	{"invalid 60",		0},
137 	{"invalid 61",		0},
138 	{"invalid 62",		0},
139 	{"CANCEL_THREAD",	0},
140 	{"CONTINUE_THREAD",	0}	// priority must be <= that of SIGSTOP
141 };
142 
143 
144 static inline const char*
145 signal_name(uint32 number)
146 {
147 	return number <= __MAX_SIGNO ? kSignalInfos[number].name : "invalid";
148 }
149 
150 
151 // #pragma mark - SignalHandledCaller
152 
153 
154 struct SignalHandledCaller {
155 	SignalHandledCaller(Signal* signal)
156 		:
157 		fSignal(signal)
158 	{
159 	}
160 
161 	~SignalHandledCaller()
162 	{
163 		Done();
164 	}
165 
166 	void Done()
167 	{
168 		if (fSignal != NULL) {
169 			fSignal->Handled();
170 			fSignal = NULL;
171 		}
172 	}
173 
174 private:
175 	Signal*	fSignal;
176 };
177 
178 
179 // #pragma mark - QueuedSignalsCounter
180 
181 
182 /*!	Creates a counter with the given limit.
183 	The limit defines the maximum the counter may reach. Since the
184 	BReferenceable's reference count is used, it is assumed that the owning
185 	team holds a reference and the reference count is one greater than the
186 	counter value.
187 	\param limit The maximum allowed value the counter may have. When
188 		\code < 0 \endcode, the value is not limited.
189 */
190 QueuedSignalsCounter::QueuedSignalsCounter(int32 limit)
191 	:
192 	fLimit(limit)
193 {
194 }
195 
196 
197 /*!	Increments the counter, if the limit allows that.
198 	\return \c true, if incrementing the counter succeeded, \c false otherwise.
199 */
200 bool
201 QueuedSignalsCounter::Increment()
202 {
203 	// no limit => no problem
204 	if (fLimit < 0) {
205 		AcquireReference();
206 		return true;
207 	}
208 
209 	// Increment the reference count manually, so we can check atomically. We
210 	// compare the old value > fLimit, assuming that our (primary) owner has a
211 	// reference, we don't want to count.
212 	if (atomic_add(&fReferenceCount, 1) > fLimit) {
213 		ReleaseReference();
214 		return false;
215 	}
216 
217 	return true;
218 }
219 
220 
221 // #pragma mark - Signal
222 
223 
224 Signal::Signal()
225 	:
226 	fCounter(NULL),
227 	fPending(false)
228 {
229 }
230 
231 
232 Signal::Signal(const Signal& other)
233 	:
234 	fCounter(NULL),
235 	fNumber(other.fNumber),
236 	fSignalCode(other.fSignalCode),
237 	fErrorCode(other.fErrorCode),
238 	fSendingProcess(other.fSendingProcess),
239 	fSendingUser(other.fSendingUser),
240 	fStatus(other.fStatus),
241 	fPollBand(other.fPollBand),
242 	fAddress(other.fAddress),
243 	fUserValue(other.fUserValue),
244 	fPending(false)
245 {
246 }
247 
248 
249 Signal::Signal(uint32 number, int32 signalCode, int32 errorCode,
250 	pid_t sendingProcess)
251 	:
252 	fCounter(NULL),
253 	fNumber(number),
254 	fSignalCode(signalCode),
255 	fErrorCode(errorCode),
256 	fSendingProcess(sendingProcess),
257 	fSendingUser(getuid()),
258 	fStatus(0),
259 	fPollBand(0),
260 	fAddress(NULL),
261 	fPending(false)
262 {
263 	fUserValue.sival_ptr = NULL;
264 }
265 
266 
267 Signal::~Signal()
268 {
269 	if (fCounter != NULL)
270 		fCounter->ReleaseReference();
271 }
272 
273 
274 /*!	Creates a queuable clone of the given signal.
275 	Also enforces the current team's signal queuing limit.
276 
277 	\param signal The signal to clone.
278 	\param queuingRequired If \c true, the function will return an error code
279 		when creating the clone fails for any reason. Otherwise, the function
280 		will set \a _signalToQueue to \c NULL, but still return \c B_OK.
281 	\param _signalToQueue Return parameter. Set to the clone of the signal.
282 	\return When \c queuingRequired is \c false, always \c B_OK. Otherwise
283 		\c B_OK, when creating the signal clone succeeds, another error code,
284 		when it fails.
285 */
286 /*static*/ status_t
287 Signal::CreateQueuable(const Signal& signal, bool queuingRequired,
288 	Signal*& _signalToQueue)
289 {
290 	_signalToQueue = NULL;
291 
292 	// If interrupts are disabled, we can't allocate a signal.
293 	if (!are_interrupts_enabled())
294 		return queuingRequired ? B_BAD_VALUE : B_OK;
295 
296 	// increment the queued signals counter
297 	QueuedSignalsCounter* counter
298 		= thread_get_current_thread()->team->QueuedSignalsCounter();
299 	if (!counter->Increment())
300 		return queuingRequired ? EAGAIN : B_OK;
301 
302 	// allocate the signal
303 	Signal* signalToQueue = new(std::nothrow) Signal(signal);
304 	if (signalToQueue == NULL) {
305 		counter->Decrement();
306 		return queuingRequired ? B_NO_MEMORY : B_OK;
307 	}
308 
309 	signalToQueue->fCounter = counter;
310 
311 	_signalToQueue = signalToQueue;
312 	return B_OK;
313 }
314 
315 void
316 Signal::SetTo(uint32 number)
317 {
318 	Team* team = thread_get_current_thread()->team;
319 
320 	fNumber = number;
321 	fSignalCode = SI_USER;
322 	fErrorCode = 0;
323 	fSendingProcess = team->id;
324 	fSendingUser = team->effective_uid;
325 	fStatus = 0;
326 	fPollBand = 0;
327 	fAddress = NULL;
328 	fUserValue.sival_ptr = NULL;
329 }
330 
331 
332 int32
333 Signal::Priority() const
334 {
335 	return kSignalInfos[fNumber].priority;
336 }
337 
338 
339 void
340 Signal::Handled()
341 {
342 	ReleaseReference();
343 }
344 
345 
346 void
347 Signal::LastReferenceReleased()
348 {
349 	if (are_interrupts_enabled())
350 		delete this;
351 	else
352 		deferred_delete(this);
353 }
354 
355 
356 // #pragma mark - PendingSignals
357 
358 
359 PendingSignals::PendingSignals()
360 	:
361 	fQueuedSignalsMask(0),
362 	fUnqueuedSignalsMask(0)
363 {
364 }
365 
366 
367 PendingSignals::~PendingSignals()
368 {
369 	Clear();
370 }
371 
372 
373 /*!	Of the signals in \a nonBlocked returns the priority of that with the
374 	highest priority.
375 	\param nonBlocked The mask with the non-blocked signals.
376 	\return The priority of the highest priority non-blocked signal, or, if all
377 		signals are blocked, \c -1.
378 */
379 int32
380 PendingSignals::HighestSignalPriority(sigset_t nonBlocked) const
381 {
382 	Signal* queuedSignal;
383 	int32 unqueuedSignal;
384 	return _GetHighestPrioritySignal(nonBlocked, queuedSignal, unqueuedSignal);
385 }
386 
387 
388 void
389 PendingSignals::Clear()
390 {
391 	// release references of all queued signals
392 	while (Signal* signal = fQueuedSignals.RemoveHead())
393 		signal->Handled();
394 
395 	fQueuedSignalsMask = 0;
396 	fUnqueuedSignalsMask = 0;
397 }
398 
399 
400 /*!	Adds a signal.
401 	Takes over the reference to the signal from the caller.
402 */
403 void
404 PendingSignals::AddSignal(Signal* signal)
405 {
406 	// queue according to priority
407 	int32 priority = signal->Priority();
408 	Signal* otherSignal = NULL;
409 	for (SignalList::Iterator it = fQueuedSignals.GetIterator();
410 			(otherSignal = it.Next()) != NULL;) {
411 		if (priority > otherSignal->Priority())
412 			break;
413 	}
414 
415 	fQueuedSignals.InsertBefore(otherSignal, signal);
416 	signal->SetPending(true);
417 
418 	fQueuedSignalsMask |= SIGNAL_TO_MASK(signal->Number());
419 }
420 
421 
422 void
423 PendingSignals::RemoveSignal(Signal* signal)
424 {
425 	signal->SetPending(false);
426 	fQueuedSignals.Remove(signal);
427 	_UpdateQueuedSignalMask();
428 }
429 
430 
431 void
432 PendingSignals::RemoveSignals(sigset_t mask)
433 {
434 	// remove from queued signals
435 	if ((fQueuedSignalsMask & mask) != 0) {
436 		for (SignalList::Iterator it = fQueuedSignals.GetIterator();
437 				Signal* signal = it.Next();) {
438 			// remove signal, if in mask
439 			if ((SIGNAL_TO_MASK(signal->Number()) & mask) != 0) {
440 				it.Remove();
441 				signal->SetPending(false);
442 				signal->Handled();
443 			}
444 		}
445 
446 		fQueuedSignalsMask &= ~mask;
447 	}
448 
449 	// remove from unqueued signals
450 	fUnqueuedSignalsMask &= ~mask;
451 }
452 
453 
454 /*!	Removes and returns a signal in \a nonBlocked that has the highest priority.
455 	The caller gets a reference to the returned signal, if any.
456 	\param nonBlocked The mask of non-blocked signals.
457 	\param buffer If the signal is not queued this buffer is returned. In this
458 		case the method acquires a reference to \a buffer, so that the caller
459 		gets a reference also in this case.
460 	\return The removed signal or \c NULL, if all signals are blocked.
461 */
462 Signal*
463 PendingSignals::DequeueSignal(sigset_t nonBlocked, Signal& buffer)
464 {
465 	// find the signal with the highest priority
466 	Signal* queuedSignal;
467 	int32 unqueuedSignal;
468 	if (_GetHighestPrioritySignal(nonBlocked, queuedSignal, unqueuedSignal) < 0)
469 		return NULL;
470 
471 	// if it is a queued signal, dequeue it
472 	if (queuedSignal != NULL) {
473 		fQueuedSignals.Remove(queuedSignal);
474 		queuedSignal->SetPending(false);
475 		_UpdateQueuedSignalMask();
476 		return queuedSignal;
477 	}
478 
479 	// it is unqueued -- remove from mask
480 	fUnqueuedSignalsMask &= ~SIGNAL_TO_MASK(unqueuedSignal);
481 
482 	// init buffer
483 	buffer.SetTo(unqueuedSignal);
484 	buffer.AcquireReference();
485 	return &buffer;
486 }
487 
488 
489 /*!	Of the signals not it \a blocked returns the priority of that with the
490 	highest priority.
491 	\param blocked The mask with the non-blocked signals.
492 	\param _queuedSignal If the found signal is a queued signal, the variable
493 		will be set to that signal, otherwise to \c NULL.
494 	\param _unqueuedSignal If the found signal is an unqueued signal, the
495 		variable is set to that signal's number, otherwise to \c -1.
496 	\return The priority of the highest priority non-blocked signal, or, if all
497 		signals are blocked, \c -1.
498 */
499 int32
500 PendingSignals::_GetHighestPrioritySignal(sigset_t nonBlocked,
501 	Signal*& _queuedSignal, int32& _unqueuedSignal) const
502 {
503 	// check queued signals
504 	Signal* queuedSignal = NULL;
505 	int32 queuedPriority = -1;
506 
507 	if ((fQueuedSignalsMask & nonBlocked) != 0) {
508 		for (SignalList::ConstIterator it = fQueuedSignals.GetIterator();
509 				Signal* signal = it.Next();) {
510 			if ((SIGNAL_TO_MASK(signal->Number()) & nonBlocked) != 0) {
511 				queuedPriority = signal->Priority();
512 				queuedSignal = signal;
513 				break;
514 			}
515 		}
516 	}
517 
518 	// check unqueued signals
519 	int32 unqueuedSignal = -1;
520 	int32 unqueuedPriority = -1;
521 
522 	sigset_t unqueuedSignals = fUnqueuedSignalsMask & nonBlocked;
523 	if (unqueuedSignals != 0) {
524 		int32 signal = 1;
525 		while (unqueuedSignals != 0) {
526 			sigset_t mask = SIGNAL_TO_MASK(signal);
527 			if ((unqueuedSignals & mask) != 0) {
528 				int32 priority = kSignalInfos[signal].priority;
529 				if (priority > unqueuedPriority) {
530 					unqueuedSignal = signal;
531 					unqueuedPriority = priority;
532 				}
533 				unqueuedSignals &= ~mask;
534 			}
535 
536 			signal++;
537 		}
538 	}
539 
540 	// Return found queued or unqueued signal, whichever has the higher
541 	// priority.
542 	if (queuedPriority >= unqueuedPriority) {
543 		_queuedSignal = queuedSignal;
544 		_unqueuedSignal = -1;
545 		return queuedPriority;
546 	}
547 
548 	_queuedSignal = NULL;
549 	_unqueuedSignal = unqueuedSignal;
550 	return unqueuedPriority;
551 }
552 
553 
554 void
555 PendingSignals::_UpdateQueuedSignalMask()
556 {
557 	sigset_t mask = 0;
558 	for (SignalList::Iterator it = fQueuedSignals.GetIterator();
559 			Signal* signal = it.Next();) {
560 		mask |= SIGNAL_TO_MASK(signal->Number());
561 	}
562 
563 	fQueuedSignalsMask = mask;
564 }
565 
566 
567 // #pragma mark - signal tracing
568 
569 
570 #if SIGNAL_TRACING
571 
572 namespace SignalTracing {
573 
574 
575 class HandleSignal : public AbstractTraceEntry {
576 	public:
577 		HandleSignal(uint32 signal)
578 			:
579 			fSignal(signal)
580 		{
581 			Initialized();
582 		}
583 
584 		virtual void AddDump(TraceOutput& out)
585 		{
586 			out.Print("signal handle:  %" B_PRIu32 " (%s)" , fSignal,
587 				signal_name(fSignal));
588 		}
589 
590 	private:
591 		uint32		fSignal;
592 };
593 
594 
595 class ExecuteSignalHandler : public AbstractTraceEntry {
596 	public:
597 		ExecuteSignalHandler(uint32 signal, struct sigaction* handler)
598 			:
599 			fSignal(signal),
600 			fHandler((void*)handler->sa_handler)
601 		{
602 			Initialized();
603 		}
604 
605 		virtual void AddDump(TraceOutput& out)
606 		{
607 			out.Print("signal exec handler: signal: %" B_PRIu32 " (%s), "
608 				"handler: %p", fSignal, signal_name(fSignal), fHandler);
609 		}
610 
611 	private:
612 		uint32	fSignal;
613 		void*	fHandler;
614 };
615 
616 
617 class SendSignal : public AbstractTraceEntry {
618 	public:
619 		SendSignal(pid_t target, uint32 signal, uint32 flags)
620 			:
621 			fTarget(target),
622 			fSignal(signal),
623 			fFlags(flags)
624 		{
625 			Initialized();
626 		}
627 
628 		virtual void AddDump(TraceOutput& out)
629 		{
630 			out.Print("signal send: target: %" B_PRId32 ", signal: %" B_PRIu32
631 				" (%s), flags: %#" B_PRIx32, fTarget, fSignal,
632 				signal_name(fSignal), fFlags);
633 		}
634 
635 	private:
636 		pid_t	fTarget;
637 		uint32	fSignal;
638 		uint32	fFlags;
639 };
640 
641 
642 class SigAction : public AbstractTraceEntry {
643 	public:
644 		SigAction(uint32 signal, const struct sigaction* act)
645 			:
646 			fSignal(signal),
647 			fAction(*act)
648 		{
649 			Initialized();
650 		}
651 
652 		virtual void AddDump(TraceOutput& out)
653 		{
654 			out.Print("signal action: signal: %" B_PRIu32 " (%s), "
655 				"action: {handler: %p, flags: %#x, mask: %#" B_PRIx64 "}",
656 				fSignal, signal_name(fSignal), fAction.sa_handler,
657 				fAction.sa_flags, (uint64)fAction.sa_mask);
658 		}
659 
660 	private:
661 		uint32				fSignal;
662 		struct sigaction	fAction;
663 };
664 
665 
666 class SigProcMask : public AbstractTraceEntry {
667 	public:
668 		SigProcMask(int how, sigset_t mask)
669 			:
670 			fHow(how),
671 			fMask(mask),
672 			fOldMask(thread_get_current_thread()->sig_block_mask)
673 		{
674 			Initialized();
675 		}
676 
677 		virtual void AddDump(TraceOutput& out)
678 		{
679 			const char* how = "invalid";
680 			switch (fHow) {
681 				case SIG_BLOCK:
682 					how = "block";
683 					break;
684 				case SIG_UNBLOCK:
685 					how = "unblock";
686 					break;
687 				case SIG_SETMASK:
688 					how = "set";
689 					break;
690 			}
691 
692 			out.Print("signal proc mask: %s 0x%llx, old mask: 0x%llx", how,
693 				(long long)fMask, (long long)fOldMask);
694 		}
695 
696 	private:
697 		int			fHow;
698 		sigset_t	fMask;
699 		sigset_t	fOldMask;
700 };
701 
702 
703 class SigSuspend : public AbstractTraceEntry {
704 	public:
705 		SigSuspend(sigset_t mask)
706 			:
707 			fMask(mask),
708 			fOldMask(thread_get_current_thread()->sig_block_mask)
709 		{
710 			Initialized();
711 		}
712 
713 		virtual void AddDump(TraceOutput& out)
714 		{
715 			out.Print("signal suspend: %#llx, old mask: %#llx",
716 				(long long)fMask, (long long)fOldMask);
717 		}
718 
719 	private:
720 		sigset_t	fMask;
721 		sigset_t	fOldMask;
722 };
723 
724 
725 class SigSuspendDone : public AbstractTraceEntry {
726 	public:
727 		SigSuspendDone()
728 			:
729 			fSignals(thread_get_current_thread()->ThreadPendingSignals())
730 		{
731 			Initialized();
732 		}
733 
734 		virtual void AddDump(TraceOutput& out)
735 		{
736 			out.Print("signal suspend done: %#" B_PRIx32, fSignals);
737 		}
738 
739 	private:
740 		uint32		fSignals;
741 };
742 
743 }	// namespace SignalTracing
744 
745 #	define T(x)	new(std::nothrow) SignalTracing::x
746 
747 #else
748 #	define T(x)
749 #endif	// SIGNAL_TRACING
750 
751 
752 // #pragma mark -
753 
754 
755 /*!	Updates the given thread's Thread::flags field according to what signals are
756 	pending.
757 	The caller must hold \c team->signal_lock.
758 */
759 static void
760 update_thread_signals_flag(Thread* thread)
761 {
762 	sigset_t mask = ~thread->sig_block_mask;
763 	if ((thread->AllPendingSignals() & mask) != 0)
764 		atomic_or(&thread->flags, THREAD_FLAGS_SIGNALS_PENDING);
765 	else
766 		atomic_and(&thread->flags, ~THREAD_FLAGS_SIGNALS_PENDING);
767 }
768 
769 
770 /*!	Updates the current thread's Thread::flags field according to what signals
771 	are pending.
772 	The caller must hold \c team->signal_lock.
773 */
774 static void
775 update_current_thread_signals_flag()
776 {
777 	update_thread_signals_flag(thread_get_current_thread());
778 }
779 
780 
781 /*!	Updates all of the given team's threads' Thread::flags fields according to
782 	what signals are pending.
783 	The caller must hold \c signal_lock.
784 */
785 static void
786 update_team_threads_signal_flag(Team* team)
787 {
788 	for (Thread* thread = team->thread_list; thread != NULL;
789 			thread = thread->team_next) {
790 		update_thread_signals_flag(thread);
791 	}
792 }
793 
794 
795 /*!	Notifies the user debugger about a signal to be handled.
796 
797 	The caller must not hold any locks.
798 
799 	\param thread The current thread.
800 	\param signal The signal to be handled.
801 	\param handler The installed signal handler for the signal.
802 	\param deadly Indicates whether the signal is deadly.
803 	\return \c true, if the signal shall be handled, \c false, if it shall be
804 		ignored.
805 */
806 static bool
807 notify_debugger(Thread* thread, Signal* signal, struct sigaction& handler,
808 	bool deadly)
809 {
810 	uint64 signalMask = SIGNAL_TO_MASK(signal->Number());
811 
812 	// first check the ignore signal masks the debugger specified for the thread
813 	InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock);
814 
815 	if ((thread->debug_info.ignore_signals_once & signalMask) != 0) {
816 		thread->debug_info.ignore_signals_once &= ~signalMask;
817 		return true;
818 	}
819 
820 	if ((thread->debug_info.ignore_signals & signalMask) != 0)
821 		return true;
822 
823 	threadDebugInfoLocker.Unlock();
824 
825 	siginfo_t info;
826 	info.si_signo = signal->Number();
827 	info.si_code = signal->SignalCode();
828 	info.si_errno = signal->ErrorCode();
829 	info.si_pid = signal->SendingProcess();
830 	info.si_uid = signal->SendingUser();
831 	info.si_addr = signal->Address();
832 	info.si_status = signal->Status();
833 	info.si_band = signal->PollBand();
834 	info.si_value = signal->UserValue();
835 
836 	// deliver the event
837 	return user_debug_handle_signal(signal->Number(), &handler, &info, deadly);
838 }
839 
840 
841 /*!	Removes and returns a signal with the highest priority in \a nonBlocked that
842 	is pending in the given thread or its team.
843 	After dequeuing the signal the Thread::flags field of the affected threads
844 	are updated.
845 	The caller gets a reference to the returned signal, if any.
846 	The caller must hold \c team->signal_lock.
847 	\param thread The thread.
848 	\param nonBlocked The mask of non-blocked signals.
849 	\param buffer If the signal is not queued this buffer is returned. In this
850 		case the method acquires a reference to \a buffer, so that the caller
851 		gets a reference also in this case.
852 	\return The removed signal or \c NULL, if all signals are blocked.
853 */
854 static Signal*
855 dequeue_thread_or_team_signal(Thread* thread, sigset_t nonBlocked,
856 	Signal& buffer)
857 {
858 	Team* team = thread->team;
859 	Signal* signal;
860 	if (team->HighestPendingSignalPriority(nonBlocked)
861 			> thread->HighestPendingSignalPriority(nonBlocked)) {
862 		signal = team->DequeuePendingSignal(nonBlocked, buffer);
863 		update_team_threads_signal_flag(team);
864 	} else {
865 		signal = thread->DequeuePendingSignal(nonBlocked, buffer);
866 		update_thread_signals_flag(thread);
867 	}
868 
869 	return signal;
870 }
871 
872 
873 static status_t
874 setup_signal_frame(Thread* thread, struct sigaction* action, Signal* signal,
875 	sigset_t signalMask)
876 {
877 	// prepare the data, we need to copy onto the user stack
878 	signal_frame_data frameData;
879 
880 	// signal info
881 	frameData.info.si_signo = signal->Number();
882 	frameData.info.si_code = signal->SignalCode();
883 	frameData.info.si_errno = signal->ErrorCode();
884 	frameData.info.si_pid = signal->SendingProcess();
885 	frameData.info.si_uid = signal->SendingUser();
886 	frameData.info.si_addr = signal->Address();
887 	frameData.info.si_status = signal->Status();
888 	frameData.info.si_band = signal->PollBand();
889 	frameData.info.si_value = signal->UserValue();
890 
891 	// context
892 	frameData.context.uc_link = thread->user_signal_context;
893 	frameData.context.uc_sigmask = signalMask;
894 	// uc_stack and uc_mcontext are filled in by the architecture specific code.
895 
896 	// user data
897 	frameData.user_data = action->sa_userdata;
898 
899 	// handler function
900 	frameData.siginfo_handler = (action->sa_flags & SA_SIGINFO) != 0;
901 	frameData.handler = frameData.siginfo_handler
902 		? (void*)action->sa_sigaction : (void*)action->sa_handler;
903 
904 	// thread flags -- save the and clear the thread's syscall restart related
905 	// flags
906 	frameData.thread_flags = atomic_and(&thread->flags,
907 		~(THREAD_FLAGS_RESTART_SYSCALL | THREAD_FLAGS_64_BIT_SYSCALL_RETURN));
908 
909 	// syscall restart related fields
910 	memcpy(frameData.syscall_restart_parameters,
911 		thread->syscall_restart.parameters,
912 		sizeof(frameData.syscall_restart_parameters));
913 
914 	// commpage address
915 	frameData.commpage_address = thread->team->commpage_address;
916 
917 	// syscall_restart_return_value is filled in by the architecture specific
918 	// code.
919 
920 	return arch_setup_signal_frame(thread, action, &frameData);
921 }
922 
923 
924 /*! Actually handles pending signals -- i.e. the thread will exit, a custom
925 	signal handler is prepared, or whatever the signal demands.
926 	The function will not return, when a deadly signal is encountered. The
927 	function will suspend the thread indefinitely, when a stop signal is
928 	encountered.
929 	Interrupts must be enabled.
930 	\param thread The current thread.
931 */
932 void
933 handle_signals(Thread* thread)
934 {
935 	Team* team = thread->team;
936 
937 	TeamLocker teamLocker(team);
938 	InterruptsSpinLocker locker(thread->team->signal_lock);
939 
940 	// If userland requested to defer signals, we check now, if this is
941 	// possible.
942 	sigset_t nonBlockedMask = ~thread->sig_block_mask;
943 	sigset_t signalMask = thread->AllPendingSignals() & nonBlockedMask;
944 
945 	arch_cpu_enable_user_access();
946 	if (thread->user_thread->defer_signals > 0
947 		&& (signalMask & NON_DEFERRABLE_SIGNALS) == 0
948 		&& thread->sigsuspend_original_unblocked_mask == 0) {
949 		thread->user_thread->pending_signals = signalMask;
950 		arch_cpu_disable_user_access();
951 		return;
952 	}
953 
954 	thread->user_thread->pending_signals = 0;
955 	arch_cpu_disable_user_access();
956 
957 	// determine syscall restart behavior
958 	uint32 restartFlags = atomic_and(&thread->flags,
959 		~THREAD_FLAGS_DONT_RESTART_SYSCALL);
960 	bool alwaysRestart
961 		= (restartFlags & THREAD_FLAGS_ALWAYS_RESTART_SYSCALL) != 0;
962 	bool restart = alwaysRestart
963 		|| (restartFlags & THREAD_FLAGS_DONT_RESTART_SYSCALL) == 0;
964 
965 	// Loop until we've handled all signals.
966 	bool initialIteration = true;
967 	while (true) {
968 		if (initialIteration) {
969 			initialIteration = false;
970 		} else {
971 			teamLocker.Lock();
972 			locker.Lock();
973 
974 			signalMask = thread->AllPendingSignals() & nonBlockedMask;
975 		}
976 
977 		// Unless SIGKILL[THR] are pending, check, if the thread shall stop for
978 		// a core dump or for debugging.
979 		if ((signalMask & KILL_SIGNALS) == 0) {
980 			if ((atomic_get(&thread->flags) & THREAD_FLAGS_TRAP_FOR_CORE_DUMP)
981 					!= 0) {
982 				locker.Unlock();
983 				teamLocker.Unlock();
984 
985 				core_dump_trap_thread();
986 				continue;
987 			}
988 
989 			if ((atomic_get(&thread->debug_info.flags) & B_THREAD_DEBUG_STOP)
990 					!= 0) {
991 				locker.Unlock();
992 				teamLocker.Unlock();
993 
994 				user_debug_stop_thread();
995 				continue;
996 			}
997 		}
998 
999 		// We're done, if there aren't any pending signals anymore.
1000 		if ((signalMask & nonBlockedMask) == 0)
1001 			break;
1002 
1003 		// get pending non-blocked thread or team signal with the highest
1004 		// priority
1005 		Signal stackSignal;
1006 		Signal* signal = dequeue_thread_or_team_signal(thread, nonBlockedMask,
1007 			stackSignal);
1008 		ASSERT(signal != NULL);
1009 		SignalHandledCaller signalHandledCaller(signal);
1010 
1011 		locker.Unlock();
1012 
1013 		// get the action for the signal
1014 		struct sigaction handler;
1015 		if (signal->Number() <= MAX_SIGNAL_NUMBER) {
1016 			handler = team->SignalActionFor(signal->Number());
1017 		} else {
1018 			handler.sa_handler = SIG_DFL;
1019 			handler.sa_flags = 0;
1020 		}
1021 
1022 		if ((handler.sa_flags & SA_ONESHOT) != 0
1023 			&& handler.sa_handler != SIG_IGN && handler.sa_handler != SIG_DFL) {
1024 			team->SignalActionFor(signal->Number()).sa_handler = SIG_DFL;
1025 		}
1026 
1027 		T(HandleSignal(signal->Number()));
1028 
1029 		teamLocker.Unlock();
1030 
1031 		// debug the signal, if a debugger is installed and the signal debugging
1032 		// flag is set
1033 		bool debugSignal = (~atomic_get(&team->debug_info.flags)
1034 				& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_SIGNALS))
1035 			== 0;
1036 
1037 		// handle the signal
1038 		TRACE(("Thread %" B_PRId32 " received signal %s\n", thread->id,
1039 			kSignalInfos[signal->Number()].name));
1040 
1041 		if (handler.sa_handler == SIG_IGN) {
1042 			// signal is to be ignored
1043 			// TODO: apply zombie cleaning on SIGCHLD
1044 
1045 			// notify the debugger
1046 			if (debugSignal)
1047 				notify_debugger(thread, signal, handler, false);
1048 			continue;
1049 		} else if (handler.sa_handler == SIG_DFL) {
1050 			// default signal behaviour
1051 
1052 			// realtime signals are ignored by default
1053 			if (signal->Number() >= SIGNAL_REALTIME_MIN
1054 				&& signal->Number() <= SIGNAL_REALTIME_MAX) {
1055 				// notify the debugger
1056 				if (debugSignal)
1057 					notify_debugger(thread, signal, handler, false);
1058 				continue;
1059 			}
1060 
1061 			bool killTeam = false;
1062 			switch (signal->Number()) {
1063 				case SIGCHLD:
1064 				case SIGWINCH:
1065 				case SIGURG:
1066 					// notify the debugger
1067 					if (debugSignal)
1068 						notify_debugger(thread, signal, handler, false);
1069 					continue;
1070 
1071 				case SIGNAL_DEBUG_THREAD:
1072 					// ignore -- used together with B_THREAD_DEBUG_STOP, which
1073 					// is handled above
1074 					continue;
1075 
1076 				case SIGNAL_CANCEL_THREAD:
1077 					// set up the signal handler
1078 					handler.sa_handler = thread->cancel_function;
1079 					handler.sa_flags = 0;
1080 					handler.sa_mask = 0;
1081 					handler.sa_userdata = NULL;
1082 
1083 					restart = false;
1084 						// we always want to interrupt
1085 					break;
1086 
1087 				case SIGNAL_CONTINUE_THREAD:
1088 					// prevent syscall restart, but otherwise ignore
1089 					restart = false;
1090 					atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
1091 					continue;
1092 
1093 				case SIGCONT:
1094 					// notify the debugger
1095 					if (debugSignal
1096 						&& !notify_debugger(thread, signal, handler, false))
1097 						continue;
1098 
1099 					// notify threads waiting for team state changes
1100 					if (thread == team->main_thread) {
1101 						team->LockTeamAndParent(false);
1102 
1103 						team_set_job_control_state(team,
1104 							JOB_CONTROL_STATE_CONTINUED, signal);
1105 
1106 						team->UnlockTeamAndParent();
1107 
1108 						// The standard states that the system *may* send a
1109 						// SIGCHLD when a child is continued. I haven't found
1110 						// a good reason why we would want to, though.
1111 					}
1112 					continue;
1113 
1114 				case SIGSTOP:
1115 				case SIGTSTP:
1116 				case SIGTTIN:
1117 				case SIGTTOU:
1118 				{
1119 					// notify the debugger
1120 					if (debugSignal
1121 						&& !notify_debugger(thread, signal, handler, false))
1122 						continue;
1123 
1124 					// The terminal-sent stop signals are allowed to stop the
1125 					// process only, if it doesn't belong to an orphaned process
1126 					// group. Otherwise the signal must be discarded.
1127 					team->LockProcessGroup();
1128 					AutoLocker<ProcessGroup> groupLocker(team->group, true);
1129 					if (signal->Number() != SIGSTOP
1130 						&& team->group->IsOrphaned()) {
1131 						continue;
1132 					}
1133 
1134 					// notify threads waiting for team state changes
1135 					if (thread == team->main_thread) {
1136 						team->LockTeamAndParent(false);
1137 
1138 						team_set_job_control_state(team,
1139 							JOB_CONTROL_STATE_STOPPED, signal);
1140 
1141 						// send a SIGCHLD to the parent (if it does have
1142 						// SA_NOCLDSTOP defined)
1143 						Team* parentTeam = team->parent;
1144 
1145 						struct sigaction& parentHandler
1146 							= parentTeam->SignalActionFor(SIGCHLD);
1147 						if ((parentHandler.sa_flags & SA_NOCLDSTOP) == 0) {
1148 							Signal childSignal(SIGCHLD, CLD_STOPPED, B_OK,
1149 								team->id);
1150 							childSignal.SetStatus(signal->Number());
1151 							childSignal.SetSendingUser(signal->SendingUser());
1152 							send_signal_to_team(parentTeam, childSignal, 0);
1153 						}
1154 
1155 						team->UnlockTeamAndParent();
1156 					}
1157 
1158 					groupLocker.Unlock();
1159 
1160 					// Suspend the thread, unless there's already a signal to
1161 					// continue or kill pending.
1162 					locker.Lock();
1163 					bool resume = (thread->AllPendingSignals()
1164 								& (CONTINUE_SIGNALS | KILL_SIGNALS)) != 0;
1165 					locker.Unlock();
1166 
1167 					if (!resume)
1168 						thread_suspend();
1169 
1170 					continue;
1171 				}
1172 
1173 				case SIGSEGV:
1174 				case SIGBUS:
1175 				case SIGFPE:
1176 				case SIGILL:
1177 				case SIGTRAP:
1178 				case SIGABRT:
1179 				case SIGKILL:
1180 				case SIGQUIT:
1181 				case SIGPOLL:
1182 				case SIGPROF:
1183 				case SIGSYS:
1184 				case SIGVTALRM:
1185 				case SIGXCPU:
1186 				case SIGXFSZ:
1187 				default:
1188 					TRACE(("Shutting down team %" B_PRId32 " due to signal %"
1189 						B_PRIu32 " received in thread %" B_PRIu32 " \n",
1190 						team->id, signal->Number(), thread->id));
1191 
1192 					// This signal kills the team regardless which thread
1193 					// received it.
1194 					killTeam = true;
1195 
1196 					// fall through
1197 				case SIGKILLTHR:
1198 					// notify the debugger
1199 					if (debugSignal && signal->Number() != SIGKILL
1200 						&& signal->Number() != SIGKILLTHR
1201 						&& !notify_debugger(thread, signal, handler, true)) {
1202 						continue;
1203 					}
1204 
1205 					if (killTeam || thread == team->main_thread) {
1206 						// The signal is terminal for the team or the thread is
1207 						// the main thread. In either case the team is going
1208 						// down. Set its exit status, if that didn't happen yet.
1209 						teamLocker.Lock();
1210 
1211 						if (!team->exit.initialized) {
1212 							team->exit.reason = CLD_KILLED;
1213 							team->exit.signal = signal->Number();
1214 							team->exit.signaling_user = signal->SendingUser();
1215 							team->exit.status = 0;
1216 							team->exit.initialized = true;
1217 						}
1218 
1219 						teamLocker.Unlock();
1220 
1221 						// If this is not the main thread, send it a SIGKILLTHR
1222 						// so that the team terminates.
1223 						if (thread != team->main_thread) {
1224 							Signal childSignal(SIGKILLTHR, SI_USER, B_OK,
1225 								team->id);
1226 							send_signal_to_thread_id(team->id, childSignal, 0);
1227 						}
1228 					}
1229 
1230 					// explicitly get rid of the signal reference, since
1231 					// thread_exit() won't return
1232 					signalHandledCaller.Done();
1233 
1234 					thread_exit();
1235 						// won't return
1236 			}
1237 		}
1238 
1239 		// User defined signal handler
1240 
1241 		// notify the debugger
1242 		if (debugSignal && !notify_debugger(thread, signal, handler, false))
1243 			continue;
1244 
1245 		if (!restart
1246 				|| (!alwaysRestart && (handler.sa_flags & SA_RESTART) == 0)) {
1247 			atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
1248 		}
1249 
1250 		T(ExecuteSignalHandler(signal->Number(), &handler));
1251 
1252 		TRACE(("### Setting up custom signal handler frame...\n"));
1253 
1254 		// save the old block mask -- we may need to adjust it for the handler
1255 		locker.Lock();
1256 
1257 		sigset_t oldBlockMask = thread->sigsuspend_original_unblocked_mask != 0
1258 			? ~thread->sigsuspend_original_unblocked_mask
1259 			: thread->sig_block_mask;
1260 
1261 		// Update the block mask while the signal handler is running -- it
1262 		// will be automatically restored when the signal frame is left.
1263 		thread->sig_block_mask |= handler.sa_mask & BLOCKABLE_SIGNALS;
1264 
1265 		if ((handler.sa_flags & SA_NOMASK) == 0) {
1266 			thread->sig_block_mask
1267 				|= SIGNAL_TO_MASK(signal->Number()) & BLOCKABLE_SIGNALS;
1268 		}
1269 
1270 		update_current_thread_signals_flag();
1271 
1272 		locker.Unlock();
1273 
1274 		setup_signal_frame(thread, &handler, signal, oldBlockMask);
1275 
1276 		// Reset sigsuspend_original_unblocked_mask. It would have been set by
1277 		// sigsuspend_internal(). In that case, above we set oldBlockMask
1278 		// accordingly so that after the handler returns the thread's signal
1279 		// mask is reset.
1280 		thread->sigsuspend_original_unblocked_mask = 0;
1281 
1282 		return;
1283 	}
1284 
1285 	// We have not handled any signal (respectively only ignored ones).
1286 
1287 	// If sigsuspend_original_unblocked_mask is non-null, we came from a
1288 	// sigsuspend_internal(). Not having handled any signal, we should restart
1289 	// the syscall.
1290 	if (thread->sigsuspend_original_unblocked_mask != 0) {
1291 		restart = true;
1292 		atomic_or(&thread->flags, THREAD_FLAGS_RESTART_SYSCALL);
1293 	} else if (!restart) {
1294 		// clear syscall restart thread flag, if we're not supposed to restart
1295 		// the syscall
1296 		atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
1297 	}
1298 }
1299 
1300 
1301 /*!	Checks whether the given signal is blocked for the given team (i.e. all of
1302 	its threads).
1303 	The caller must hold the team's lock and \c signal_lock.
1304 */
1305 bool
1306 is_team_signal_blocked(Team* team, int signal)
1307 {
1308 	sigset_t mask = SIGNAL_TO_MASK(signal);
1309 
1310 	for (Thread* thread = team->thread_list; thread != NULL;
1311 			thread = thread->team_next) {
1312 		if ((thread->sig_block_mask & mask) == 0)
1313 			return false;
1314 	}
1315 
1316 	return true;
1317 }
1318 
1319 
1320 /*!	Gets (guesses) the current thread's currently used stack from the given
1321 	stack pointer.
1322 	Fills in \a stack with either the signal stack or the thread's user stack.
1323 	\param address A stack pointer address to be used to determine the used
1324 		stack.
1325 	\param stack Filled in by the function.
1326 */
1327 void
1328 signal_get_user_stack(addr_t address, stack_t* stack)
1329 {
1330 	// If a signal stack is enabled for the stack and the address is within it,
1331 	// return the signal stack. In all other cases return the thread's user
1332 	// stack, even if the address doesn't lie within it.
1333 	Thread* thread = thread_get_current_thread();
1334 	if (thread->signal_stack_enabled && address >= thread->signal_stack_base
1335 		&& address < thread->signal_stack_base + thread->signal_stack_size) {
1336 		stack->ss_sp = (void*)thread->signal_stack_base;
1337 		stack->ss_size = thread->signal_stack_size;
1338 	} else {
1339 		stack->ss_sp = (void*)thread->user_stack_base;
1340 		stack->ss_size = thread->user_stack_size;
1341 	}
1342 
1343 	stack->ss_flags = 0;
1344 }
1345 
1346 
1347 /*!	Checks whether any non-blocked signal is pending for the current thread.
1348 	The caller must hold \c team->signal_lock.
1349 	\param thread The current thread.
1350 */
1351 static bool
1352 has_signals_pending(Thread* thread)
1353 {
1354 	return (thread->AllPendingSignals() & ~thread->sig_block_mask) != 0;
1355 }
1356 
1357 
1358 /*!	Checks whether the current user has permission to send a signal to the given
1359 	target team.
1360 
1361 	\param team The target team.
1362 */
1363 static bool
1364 has_permission_to_signal(Team* team)
1365 {
1366 	// get the current user
1367 	uid_t currentUser = thread_get_current_thread()->team->effective_uid;
1368 
1369 	// root is omnipotent -- in the other cases the current user must match the
1370 	// target team's
1371 	return currentUser == 0 || currentUser == team->effective_uid;
1372 }
1373 
1374 
1375 /*!	Delivers a signal to the \a thread, but doesn't handle the signal -- it just
1376 	makes sure the thread gets the signal, i.e. unblocks it if needed.
1377 
1378 	The caller must hold \c team->signal_lock.
1379 
1380 	\param thread The thread the signal shall be delivered to.
1381 	\param signalNumber The number of the signal to be delivered. If \c 0, no
1382 		actual signal will be delivered. Only delivery checks will be performed.
1383 	\param signal If non-NULL the signal to be queued (has number
1384 		\a signalNumber in this case). The caller transfers an object reference
1385 		to this function. If \c NULL an unqueued signal will be delivered to the
1386 		thread.
1387 	\param flags A bitwise combination of any number of the following:
1388 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1389 			target thread the signal.
1390 	\return \c B_OK, when the signal was delivered successfully, another error
1391 		code otherwise.
1392 */
1393 status_t
1394 send_signal_to_thread_locked(Thread* thread, uint32 signalNumber,
1395 	Signal* signal, uint32 flags)
1396 {
1397 	ASSERT(signal == NULL || signalNumber == signal->Number());
1398 
1399 	T(SendSignal(thread->id, signalNumber, flags));
1400 
1401 	// The caller transferred a reference to the signal to us.
1402 	BReference<Signal> signalReference(signal, true);
1403 
1404 	if ((flags & B_CHECK_PERMISSION) != 0) {
1405 		if (!has_permission_to_signal(thread->team))
1406 			return EPERM;
1407 	}
1408 
1409 	if (signalNumber == 0)
1410 		return B_OK;
1411 
1412 	if (thread->team == team_get_kernel_team()) {
1413 		// Signals to kernel threads will only wake them up
1414 		thread_continue(thread);
1415 		return B_OK;
1416 	}
1417 
1418 	if (signal != NULL)
1419 		thread->AddPendingSignal(signal);
1420 	else
1421 		thread->AddPendingSignal(signalNumber);
1422 
1423 	// the thread has the signal reference, now
1424 	signalReference.Detach();
1425 
1426 	switch (signalNumber) {
1427 		case SIGKILL:
1428 		{
1429 			// If sent to a thread other than the team's main thread, also send
1430 			// a SIGKILLTHR to the main thread to kill the team.
1431 			Thread* mainThread = thread->team->main_thread;
1432 			if (mainThread != NULL && mainThread != thread) {
1433 				mainThread->AddPendingSignal(SIGKILLTHR);
1434 
1435 				// wake up main thread
1436 				thread->going_to_suspend = false;
1437 
1438 				SpinLocker locker(mainThread->scheduler_lock);
1439 				if (mainThread->state == B_THREAD_SUSPENDED)
1440 					scheduler_enqueue_in_run_queue(mainThread);
1441 				else
1442 					thread_interrupt(mainThread, true);
1443 				locker.Unlock();
1444 
1445 				update_thread_signals_flag(mainThread);
1446 			}
1447 
1448 			// supposed to fall through
1449 		}
1450 		case SIGKILLTHR:
1451 		{
1452 			// Wake up suspended threads and interrupt waiting ones
1453 			thread->going_to_suspend = false;
1454 
1455 			SpinLocker locker(thread->scheduler_lock);
1456 			if (thread->state == B_THREAD_SUSPENDED)
1457 				scheduler_enqueue_in_run_queue(thread);
1458 			else
1459 				thread_interrupt(thread, true);
1460 
1461 			break;
1462 		}
1463 		case SIGNAL_DEBUG_THREAD:
1464 		{
1465 			// Wake up thread if it was suspended, otherwise interrupt it.
1466 			thread->going_to_suspend = false;
1467 
1468 			SpinLocker locker(thread->scheduler_lock);
1469 			if (thread->state == B_THREAD_SUSPENDED)
1470 				scheduler_enqueue_in_run_queue(thread);
1471 			else
1472 				thread_interrupt(thread, false);
1473 
1474 			break;
1475 		}
1476 		case SIGNAL_CONTINUE_THREAD:
1477 		{
1478 			// wake up thread, and interrupt its current syscall
1479 			thread->going_to_suspend = false;
1480 
1481 			SpinLocker locker(thread->scheduler_lock);
1482 			if (thread->state == B_THREAD_SUSPENDED)
1483 				scheduler_enqueue_in_run_queue(thread);
1484 
1485 			atomic_or(&thread->flags, THREAD_FLAGS_DONT_RESTART_SYSCALL);
1486 			break;
1487 		}
1488 		case SIGCONT:
1489 		{
1490 			// Wake up thread if it was suspended, otherwise interrupt it, if
1491 			// the signal isn't blocked.
1492 			thread->going_to_suspend = false;
1493 
1494 			SpinLocker locker(thread->scheduler_lock);
1495 			if (thread->state == B_THREAD_SUSPENDED)
1496 				scheduler_enqueue_in_run_queue(thread);
1497 			else if ((SIGNAL_TO_MASK(SIGCONT) & ~thread->sig_block_mask) != 0)
1498 				thread_interrupt(thread, false);
1499 
1500 			// remove any pending stop signals
1501 			thread->RemovePendingSignals(STOP_SIGNALS);
1502 			break;
1503 		}
1504 		default:
1505 			// If the signal is not masked, interrupt the thread, if it is
1506 			// currently waiting (interruptibly).
1507 			if ((thread->AllPendingSignals()
1508 						& (~thread->sig_block_mask | SIGNAL_TO_MASK(SIGCHLD)))
1509 					!= 0) {
1510 				// Interrupt thread if it was waiting
1511 				SpinLocker locker(thread->scheduler_lock);
1512 				thread_interrupt(thread, false);
1513 			}
1514 			break;
1515 	}
1516 
1517 	update_thread_signals_flag(thread);
1518 
1519 	return B_OK;
1520 }
1521 
1522 
1523 /*!	Sends the given signal to the given thread.
1524 
1525 	\param thread The thread the signal shall be sent to.
1526 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1527 		actual signal will be delivered. Only delivery checks will be performed.
1528 		The given object will be copied. The caller retains ownership.
1529 	\param flags A bitwise combination of any number of the following:
1530 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1531 			target thread the signal.
1532 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1533 			woken up, the scheduler will be invoked. If set that will not be
1534 			done explicitly, but rescheduling can still happen, e.g. when the
1535 			current thread's time slice runs out.
1536 	\return \c B_OK, when the signal was delivered successfully, another error
1537 		code otherwise.
1538 */
1539 status_t
1540 send_signal_to_thread(Thread* thread, const Signal& signal, uint32 flags)
1541 {
1542 	// Clone the signal -- the clone will be queued. If something fails and the
1543 	// caller doesn't require queuing, we will add an unqueued signal.
1544 	Signal* signalToQueue = NULL;
1545 	status_t error = Signal::CreateQueuable(signal,
1546 		(flags & SIGNAL_FLAG_QUEUING_REQUIRED) != 0, signalToQueue);
1547 	if (error != B_OK)
1548 		return error;
1549 
1550 	InterruptsReadSpinLocker teamLocker(thread->team_lock);
1551 	SpinLocker locker(thread->team->signal_lock);
1552 
1553 	error = send_signal_to_thread_locked(thread, signal.Number(), signalToQueue,
1554 		flags);
1555 	if (error != B_OK)
1556 		return error;
1557 
1558 	locker.Unlock();
1559 	teamLocker.Unlock();
1560 
1561 	if ((flags & B_DO_NOT_RESCHEDULE) == 0)
1562 		scheduler_reschedule_if_necessary();
1563 
1564 	return B_OK;
1565 }
1566 
1567 
1568 /*!	Sends the given signal to the thread with the given ID.
1569 
1570 	\param threadID The ID of the thread the signal shall be sent to.
1571 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1572 		actual signal will be delivered. Only delivery checks will be performed.
1573 		The given object will be copied. The caller retains ownership.
1574 	\param flags A bitwise combination of any number of the following:
1575 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1576 			target thread the signal.
1577 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1578 			woken up, the scheduler will be invoked. If set that will not be
1579 			done explicitly, but rescheduling can still happen, e.g. when the
1580 			current thread's time slice runs out.
1581 	\return \c B_OK, when the signal was delivered successfully, another error
1582 		code otherwise.
1583 */
1584 status_t
1585 send_signal_to_thread_id(thread_id threadID, const Signal& signal, uint32 flags)
1586 {
1587 	Thread* thread = Thread::Get(threadID);
1588 	if (thread == NULL)
1589 		return B_BAD_THREAD_ID;
1590 	BReference<Thread> threadReference(thread, true);
1591 
1592 	return send_signal_to_thread(thread, signal, flags);
1593 }
1594 
1595 
1596 /*!	Sends the given signal to the given team.
1597 
1598 	The caller must hold \c signal_lock.
1599 
1600 	\param team The team the signal shall be sent to.
1601 	\param signalNumber The number of the signal to be delivered. If \c 0, no
1602 		actual signal will be delivered. Only delivery checks will be performed.
1603 	\param signal If non-NULL the signal to be queued (has number
1604 		\a signalNumber in this case). The caller transfers an object reference
1605 		to this function. If \c NULL an unqueued signal will be delivered to the
1606 		thread.
1607 	\param flags A bitwise combination of any number of the following:
1608 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1609 			target thread the signal.
1610 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1611 			woken up, the scheduler will be invoked. If set that will not be
1612 			done explicitly, but rescheduling can still happen, e.g. when the
1613 			current thread's time slice runs out.
1614 	\return \c B_OK, when the signal was delivered successfully, another error
1615 		code otherwise.
1616 */
1617 status_t
1618 send_signal_to_team_locked(Team* team, uint32 signalNumber, Signal* signal,
1619 	uint32 flags)
1620 {
1621 	ASSERT(signal == NULL || signalNumber == signal->Number());
1622 
1623 	T(SendSignal(team->id, signalNumber, flags));
1624 
1625 	// The caller transferred a reference to the signal to us.
1626 	BReference<Signal> signalReference(signal, true);
1627 
1628 	if ((flags & B_CHECK_PERMISSION) != 0) {
1629 		if (!has_permission_to_signal(team))
1630 			return EPERM;
1631 	}
1632 
1633 	if (signalNumber == 0)
1634 		return B_OK;
1635 
1636 	if (team == team_get_kernel_team()) {
1637 		// signals to the kernel team are not allowed
1638 		return EPERM;
1639 	}
1640 
1641 	if (signal != NULL)
1642 		team->AddPendingSignal(signal);
1643 	else
1644 		team->AddPendingSignal(signalNumber);
1645 
1646 	// the team has the signal reference, now
1647 	signalReference.Detach();
1648 
1649 	switch (signalNumber) {
1650 		case SIGKILL:
1651 		case SIGKILLTHR:
1652 		{
1653 			// Also add a SIGKILLTHR to the main thread's signals and wake it
1654 			// up/interrupt it, so we get this over with as soon as possible
1655 			// (only the main thread shuts down the team).
1656 			Thread* mainThread = team->main_thread;
1657 			if (mainThread != NULL) {
1658 				mainThread->AddPendingSignal(signalNumber);
1659 
1660 				// wake up main thread
1661 				mainThread->going_to_suspend = false;
1662 
1663 				SpinLocker _(mainThread->scheduler_lock);
1664 				if (mainThread->state == B_THREAD_SUSPENDED)
1665 					scheduler_enqueue_in_run_queue(mainThread);
1666 				else
1667 					thread_interrupt(mainThread, true);
1668 			}
1669 			break;
1670 		}
1671 
1672 		case SIGCONT:
1673 			// Wake up any suspended threads, interrupt the others, if they
1674 			// don't block the signal.
1675 			for (Thread* thread = team->thread_list; thread != NULL;
1676 					thread = thread->team_next) {
1677 				thread->going_to_suspend = false;
1678 
1679 				SpinLocker _(thread->scheduler_lock);
1680 				if (thread->state == B_THREAD_SUSPENDED) {
1681 					scheduler_enqueue_in_run_queue(thread);
1682 				} else if ((SIGNAL_TO_MASK(SIGCONT) & ~thread->sig_block_mask)
1683 						!= 0) {
1684 					thread_interrupt(thread, false);
1685 				}
1686 
1687 				// remove any pending stop signals
1688 				thread->RemovePendingSignals(STOP_SIGNALS);
1689 			}
1690 
1691 			// remove any pending team stop signals
1692 			team->RemovePendingSignals(STOP_SIGNALS);
1693 			break;
1694 
1695 		case SIGSTOP:
1696 		case SIGTSTP:
1697 		case SIGTTIN:
1698 		case SIGTTOU:
1699 			// send the stop signal to all threads
1700 			// TODO: Is that correct or should we only target the main thread?
1701 			for (Thread* thread = team->thread_list; thread != NULL;
1702 					thread = thread->team_next) {
1703 				thread->AddPendingSignal(signalNumber);
1704 			}
1705 
1706 			// remove the stop signal from the team again
1707 			if (signal != NULL) {
1708 				team->RemovePendingSignal(signal);
1709 				signalReference.SetTo(signal, true);
1710 			} else
1711 				team->RemovePendingSignal(signalNumber);
1712 
1713 			// fall through to interrupt threads
1714 		default:
1715 			// Interrupt all interruptibly waiting threads, if the signal is
1716 			// not masked.
1717 			for (Thread* thread = team->thread_list; thread != NULL;
1718 					thread = thread->team_next) {
1719 				sigset_t nonBlocked = ~thread->sig_block_mask
1720 					| SIGNAL_TO_MASK(SIGCHLD);
1721 				if ((thread->AllPendingSignals() & nonBlocked) != 0) {
1722 					SpinLocker _(thread->scheduler_lock);
1723 					thread_interrupt(thread, false);
1724 				}
1725 			}
1726 			break;
1727 	}
1728 
1729 	update_team_threads_signal_flag(team);
1730 
1731 	return B_OK;
1732 }
1733 
1734 
1735 /*!	Sends the given signal to the given team.
1736 
1737 	\param team The team the signal shall be sent to.
1738 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1739 		actual signal will be delivered. Only delivery checks will be performed.
1740 		The given object will be copied. The caller retains ownership.
1741 	\param flags A bitwise combination of any number of the following:
1742 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1743 			target thread the signal.
1744 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1745 			woken up, the scheduler will be invoked. If set that will not be
1746 			done explicitly, but rescheduling can still happen, e.g. when the
1747 			current thread's time slice runs out.
1748 	\return \c B_OK, when the signal was delivered successfully, another error
1749 		code otherwise.
1750 */
1751 status_t
1752 send_signal_to_team(Team* team, const Signal& signal, uint32 flags)
1753 {
1754 	// Clone the signal -- the clone will be queued. If something fails and the
1755 	// caller doesn't require queuing, we will add an unqueued signal.
1756 	Signal* signalToQueue = NULL;
1757 	status_t error = Signal::CreateQueuable(signal,
1758 		(flags & SIGNAL_FLAG_QUEUING_REQUIRED) != 0, signalToQueue);
1759 	if (error != B_OK)
1760 		return error;
1761 
1762 	InterruptsSpinLocker locker(team->signal_lock);
1763 
1764 	error = send_signal_to_team_locked(team, signal.Number(), signalToQueue,
1765 			flags);
1766 
1767 	locker.Unlock();
1768 
1769 	if ((flags & B_DO_NOT_RESCHEDULE) == 0)
1770 		scheduler_reschedule_if_necessary();
1771 
1772 	return error;
1773 }
1774 
1775 
1776 /*!	Sends the given signal to the team with the given ID.
1777 
1778 	\param teamID The ID of the team the signal shall be sent to.
1779 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1780 		actual signal will be delivered. Only delivery checks will be performed.
1781 		The given object will be copied. The caller retains ownership.
1782 	\param flags A bitwise combination of any number of the following:
1783 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1784 			target thread the signal.
1785 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1786 			woken up, the scheduler will be invoked. If set that will not be
1787 			done explicitly, but rescheduling can still happen, e.g. when the
1788 			current thread's time slice runs out.
1789 	\return \c B_OK, when the signal was delivered successfully, another error
1790 		code otherwise.
1791 */
1792 status_t
1793 send_signal_to_team_id(team_id teamID, const Signal& signal, uint32 flags)
1794 {
1795 	// get the team
1796 	Team* team = Team::Get(teamID);
1797 	if (team == NULL)
1798 		return B_BAD_TEAM_ID;
1799 	BReference<Team> teamReference(team, true);
1800 
1801 	return send_signal_to_team(team, signal, flags);
1802 }
1803 
1804 
1805 /*!	Sends the given signal to the given process group.
1806 
1807 	The caller must hold the process group's lock. Interrupts must be enabled.
1808 
1809 	\param group The the process group the signal shall be sent to.
1810 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1811 		actual signal will be delivered. Only delivery checks will be performed.
1812 		The given object will be copied. The caller retains ownership.
1813 	\param flags A bitwise combination of any number of the following:
1814 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1815 			target thread the signal.
1816 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1817 			woken up, the scheduler will be invoked. If set that will not be
1818 			done explicitly, but rescheduling can still happen, e.g. when the
1819 			current thread's time slice runs out.
1820 	\return \c B_OK, when the signal was delivered successfully, another error
1821 		code otherwise.
1822 */
1823 status_t
1824 send_signal_to_process_group_locked(ProcessGroup* group, const Signal& signal,
1825 	uint32 flags)
1826 {
1827 	T(SendSignal(-group->id, signal.Number(), flags));
1828 
1829 	bool firstTeam = true;
1830 
1831 	for (Team* team = group->teams; team != NULL; team = team->group_next) {
1832 		status_t error = send_signal_to_team(team, signal,
1833 			flags | B_DO_NOT_RESCHEDULE);
1834 		// If sending to the first team in the group failed, let the whole call
1835 		// fail.
1836 		if (firstTeam) {
1837 			if (error != B_OK)
1838 				return error;
1839 			firstTeam = false;
1840 		}
1841 	}
1842 
1843 	if ((flags & B_DO_NOT_RESCHEDULE) == 0)
1844 		scheduler_reschedule_if_necessary();
1845 
1846 	return B_OK;
1847 }
1848 
1849 
1850 /*!	Sends the given signal to the process group specified by the given ID.
1851 
1852 	The caller must not hold any process group, team, or thread lock. Interrupts
1853 	must be enabled.
1854 
1855 	\param groupID The ID of the process group the signal shall be sent to.
1856 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1857 		actual signal will be delivered. Only delivery checks will be performed.
1858 		The given object will be copied. The caller retains ownership.
1859 	\param flags A bitwise combination of any number of the following:
1860 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1861 			target thread the signal.
1862 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1863 			woken up, the scheduler will be invoked. If set that will not be
1864 			done explicitly, but rescheduling can still happen, e.g. when the
1865 			current thread's time slice runs out.
1866 	\return \c B_OK, when the signal was delivered successfully, another error
1867 		code otherwise.
1868 */
1869 status_t
1870 send_signal_to_process_group(pid_t groupID, const Signal& signal, uint32 flags)
1871 {
1872 	ProcessGroup* group = ProcessGroup::Get(groupID);
1873 	if (group == NULL)
1874 		return B_BAD_TEAM_ID;
1875 	BReference<ProcessGroup> groupReference(group);
1876 
1877 	T(SendSignal(-group->id, signal.Number(), flags));
1878 
1879 	AutoLocker<ProcessGroup> groupLocker(group);
1880 
1881 	status_t error = send_signal_to_process_group_locked(group, signal,
1882 		flags | B_DO_NOT_RESCHEDULE);
1883 	if (error != B_OK)
1884 		return error;
1885 
1886 	groupLocker.Unlock();
1887 
1888 	if ((flags & B_DO_NOT_RESCHEDULE) == 0)
1889 		scheduler_reschedule_if_necessary();
1890 
1891 	return B_OK;
1892 }
1893 
1894 
1895 static status_t
1896 send_signal_internal(pid_t id, uint signalNumber, union sigval userValue,
1897 	uint32 flags)
1898 {
1899 	if (signalNumber > MAX_SIGNAL_NUMBER)
1900 		return B_BAD_VALUE;
1901 
1902 	Thread* thread = thread_get_current_thread();
1903 
1904 	Signal signal(signalNumber,
1905 		(flags & SIGNAL_FLAG_QUEUING_REQUIRED) != 0 ? SI_QUEUE : SI_USER,
1906 		B_OK, thread->team->id);
1907 		// Note: SI_USER/SI_QUEUE is not correct, if called from within the
1908 		// kernel (or a driver), but we don't have any info here.
1909 	signal.SetUserValue(userValue);
1910 
1911 	// If id is > 0, send the signal to the respective thread.
1912 	if (id > 0)
1913 		return send_signal_to_thread_id(id, signal, flags);
1914 
1915 	// If id == 0, send the signal to the current thread.
1916 	if (id == 0)
1917 		return send_signal_to_thread(thread, signal, flags);
1918 
1919 	// If id == -1, send the signal to all teams the calling team has permission
1920 	// to send signals to.
1921 	if (id == -1) {
1922 		// TODO: Implement correctly!
1923 		// currently only send to the current team
1924 		return send_signal_to_team_id(thread->team->id, signal, flags);
1925 	}
1926 
1927 	// Send a signal to the specified process group (the absolute value of the
1928 	// id).
1929 	return send_signal_to_process_group(-id, signal, flags);
1930 }
1931 
1932 
1933 int
1934 send_signal_etc(pid_t id, uint signalNumber, uint32 flags)
1935 {
1936 	// a dummy user value
1937 	union sigval userValue;
1938 	userValue.sival_ptr = NULL;
1939 
1940 	return send_signal_internal(id, signalNumber, userValue, flags);
1941 }
1942 
1943 
1944 int
1945 send_signal(pid_t threadID, uint signal)
1946 {
1947 	// The BeBook states that this function wouldn't be exported
1948 	// for drivers, but, of course, it's wrong.
1949 	return send_signal_etc(threadID, signal, 0);
1950 }
1951 
1952 
1953 static int
1954 sigprocmask_internal(int how, const sigset_t* set, sigset_t* oldSet)
1955 {
1956 	Thread* thread = thread_get_current_thread();
1957 
1958 	InterruptsSpinLocker _(thread->team->signal_lock);
1959 
1960 	sigset_t oldMask = thread->sig_block_mask;
1961 
1962 	if (set != NULL) {
1963 		T(SigProcMask(how, *set));
1964 
1965 		switch (how) {
1966 			case SIG_BLOCK:
1967 				thread->sig_block_mask |= *set & BLOCKABLE_SIGNALS;
1968 				break;
1969 			case SIG_UNBLOCK:
1970 				thread->sig_block_mask &= ~*set;
1971 				break;
1972 			case SIG_SETMASK:
1973 				thread->sig_block_mask = *set & BLOCKABLE_SIGNALS;
1974 				break;
1975 			default:
1976 				return B_BAD_VALUE;
1977 		}
1978 
1979 		update_current_thread_signals_flag();
1980 	}
1981 
1982 	if (oldSet != NULL)
1983 		*oldSet = oldMask;
1984 
1985 	return B_OK;
1986 }
1987 
1988 
1989 int
1990 sigprocmask(int how, const sigset_t* set, sigset_t* oldSet)
1991 {
1992 	RETURN_AND_SET_ERRNO(sigprocmask_internal(how, set, oldSet));
1993 }
1994 
1995 
1996 /*!	\brief Like sigaction(), but returning the error instead of setting errno.
1997 */
1998 static status_t
1999 sigaction_internal(int signal, const struct sigaction* act,
2000 	struct sigaction* oldAction)
2001 {
2002 	if (signal < 1 || signal > MAX_SIGNAL_NUMBER
2003 		|| (SIGNAL_TO_MASK(signal) & ~BLOCKABLE_SIGNALS) != 0)
2004 		return B_BAD_VALUE;
2005 
2006 	// get and lock the team
2007 	Team* team = thread_get_current_thread()->team;
2008 	TeamLocker teamLocker(team);
2009 
2010 	struct sigaction& teamHandler = team->SignalActionFor(signal);
2011 	if (oldAction) {
2012 		// save previous sigaction structure
2013 		*oldAction = teamHandler;
2014 	}
2015 
2016 	if (act) {
2017 		T(SigAction(signal, act));
2018 
2019 		// set new sigaction structure
2020 		teamHandler = *act;
2021 		teamHandler.sa_mask &= BLOCKABLE_SIGNALS;
2022 	}
2023 
2024 	// Remove pending signal if it should now be ignored and remove pending
2025 	// signal for those signals whose default action is to ignore them.
2026 	if ((act && act->sa_handler == SIG_IGN)
2027 		|| (act && act->sa_handler == SIG_DFL
2028 			&& (SIGNAL_TO_MASK(signal) & DEFAULT_IGNORE_SIGNALS) != 0)) {
2029 		InterruptsSpinLocker locker(team->signal_lock);
2030 
2031 		team->RemovePendingSignal(signal);
2032 
2033 		for (Thread* thread = team->thread_list; thread != NULL;
2034 				thread = thread->team_next) {
2035 			thread->RemovePendingSignal(signal);
2036 		}
2037 	}
2038 
2039 	return B_OK;
2040 }
2041 
2042 
2043 int
2044 sigaction(int signal, const struct sigaction* act, struct sigaction* oldAction)
2045 {
2046 	RETURN_AND_SET_ERRNO(sigaction_internal(signal, act, oldAction));
2047 }
2048 
2049 
2050 /*!	Wait for the specified signals, and return the information for the retrieved
2051 	signal in \a info.
2052 	The \c flags and \c timeout combination must either define an infinite
2053 	timeout (no timeout flags set), an absolute timeout (\c B_ABSOLUTE_TIMEOUT
2054 	set), or a relative timeout \code <= 0 \endcode (\c B_RELATIVE_TIMEOUT set).
2055 */
2056 static status_t
2057 sigwait_internal(const sigset_t* set, siginfo_t* info, uint32 flags,
2058 	bigtime_t timeout)
2059 {
2060 	// restrict mask to blockable signals
2061 	sigset_t requestedSignals = *set & BLOCKABLE_SIGNALS;
2062 
2063 	// make always interruptable
2064 	flags |= B_CAN_INTERRUPT;
2065 
2066 	// check whether we are allowed to wait at all
2067 	bool canWait = (flags & B_RELATIVE_TIMEOUT) == 0 || timeout > 0;
2068 
2069 	Thread* thread = thread_get_current_thread();
2070 
2071 	InterruptsSpinLocker locker(thread->team->signal_lock);
2072 
2073 	bool timedOut = false;
2074 	status_t error = B_OK;
2075 
2076 	while (!timedOut) {
2077 		sigset_t pendingSignals = thread->AllPendingSignals();
2078 
2079 		// If a kill signal is pending, just bail out.
2080 		if ((pendingSignals & KILL_SIGNALS) != 0)
2081 			return B_INTERRUPTED;
2082 
2083 		if ((pendingSignals & requestedSignals) != 0) {
2084 			// get signal with the highest priority
2085 			Signal stackSignal;
2086 			Signal* signal = dequeue_thread_or_team_signal(thread,
2087 				requestedSignals, stackSignal);
2088 			ASSERT(signal != NULL);
2089 
2090 			SignalHandledCaller signalHandledCaller(signal);
2091 			locker.Unlock();
2092 
2093 			info->si_signo = signal->Number();
2094 			info->si_code = signal->SignalCode();
2095 			info->si_errno = signal->ErrorCode();
2096 			info->si_pid = signal->SendingProcess();
2097 			info->si_uid = signal->SendingUser();
2098 			info->si_addr = signal->Address();
2099 			info->si_status = signal->Status();
2100 			info->si_band = signal->PollBand();
2101 			info->si_value = signal->UserValue();
2102 
2103 			return B_OK;
2104 		}
2105 
2106 		if (!canWait)
2107 			return B_WOULD_BLOCK;
2108 
2109 		sigset_t blockedSignals = thread->sig_block_mask;
2110 		if ((pendingSignals & ~blockedSignals) != 0) {
2111 			// Non-blocked signals are pending -- return to let them be handled.
2112 			return B_INTERRUPTED;
2113 		}
2114 
2115 		// No signals yet. Set the signal block mask to not include the
2116 		// requested mask and wait until we're interrupted.
2117 		thread->sig_block_mask = blockedSignals & ~requestedSignals;
2118 
2119 		while (!has_signals_pending(thread)) {
2120 			thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_SIGNAL,
2121 				NULL);
2122 
2123 			locker.Unlock();
2124 
2125 			if ((flags & B_ABSOLUTE_TIMEOUT) != 0) {
2126 				error = thread_block_with_timeout(flags, timeout);
2127 				if (error == B_WOULD_BLOCK || error == B_TIMED_OUT) {
2128 					error = B_WOULD_BLOCK;
2129 						// POSIX requires EAGAIN (B_WOULD_BLOCK) on timeout
2130 					timedOut = true;
2131 
2132 					locker.Lock();
2133 					break;
2134 				}
2135 			} else
2136 				thread_block();
2137 
2138 			locker.Lock();
2139 		}
2140 
2141 		// restore the original block mask
2142 		thread->sig_block_mask = blockedSignals;
2143 
2144 		update_current_thread_signals_flag();
2145 	}
2146 
2147 	// we get here only when timed out
2148 	return error;
2149 }
2150 
2151 
2152 /*!	Replace the current signal block mask and wait for any event to happen.
2153 	Before returning, the original signal block mask is reinstantiated.
2154 */
2155 static status_t
2156 sigsuspend_internal(const sigset_t* _mask)
2157 {
2158 	sigset_t mask = *_mask & BLOCKABLE_SIGNALS;
2159 
2160 	T(SigSuspend(mask));
2161 
2162 	Thread* thread = thread_get_current_thread();
2163 
2164 	InterruptsSpinLocker locker(thread->team->signal_lock);
2165 
2166 	// Set the new block mask and block until interrupted. We might be here
2167 	// after a syscall restart, in which case sigsuspend_original_unblocked_mask
2168 	// will still be set.
2169 	sigset_t oldMask = thread->sigsuspend_original_unblocked_mask != 0
2170 		? ~thread->sigsuspend_original_unblocked_mask : thread->sig_block_mask;
2171 	thread->sig_block_mask = mask & BLOCKABLE_SIGNALS;
2172 
2173 	update_current_thread_signals_flag();
2174 
2175 	while (!has_signals_pending(thread)) {
2176 		thread_prepare_to_block(thread, B_CAN_INTERRUPT,
2177 			THREAD_BLOCK_TYPE_SIGNAL, NULL);
2178 
2179 		locker.Unlock();
2180 		thread_block();
2181 		locker.Lock();
2182 	}
2183 
2184 	// Set sigsuspend_original_unblocked_mask (guaranteed to be non-0 due to
2185 	// BLOCKABLE_SIGNALS). This will indicate to handle_signals() that it is
2186 	// called after a _user_sigsuspend(). It will reset the field after invoking
2187 	// a signal handler, or restart the syscall, if there wasn't anything to
2188 	// handle anymore (e.g. because another thread was faster).
2189 	thread->sigsuspend_original_unblocked_mask = ~oldMask;
2190 
2191 	T(SigSuspendDone());
2192 
2193 	// we're not supposed to actually succeed
2194 	return B_INTERRUPTED;
2195 }
2196 
2197 
2198 static status_t
2199 sigpending_internal(sigset_t* set)
2200 {
2201 	Thread* thread = thread_get_current_thread();
2202 
2203 	if (set == NULL)
2204 		return B_BAD_VALUE;
2205 
2206 	InterruptsSpinLocker locker(thread->team->signal_lock);
2207 
2208 	*set = thread->AllPendingSignals() & thread->sig_block_mask;
2209 
2210 	return B_OK;
2211 }
2212 
2213 
2214 // #pragma mark - syscalls
2215 
2216 
2217 /*!	Sends a signal to a thread, process, or process group.
2218 	\param id Specifies the ID of the target:
2219 		- \code id > 0 \endcode: If \a toThread is \c true, the target is the
2220 			thread with ID \a id, otherwise the team with the ID \a id.
2221 		- \code id == 0 \endcode: If toThread is \c true, the target is the
2222 			current thread, otherwise the current team.
2223 		- \code id == -1 \endcode: The target are all teams the current team has
2224 			permission to send signals to. Currently not implemented correctly.
2225 		- \code id < -1 \endcode: The target are is the process group with ID
2226 			\c -id.
2227 	\param signalNumber The signal number. \c 0 to just perform checks, but not
2228 		actually send any signal.
2229 	\param userUserValue A user value to be associated with the signal. Might be
2230 		ignored unless signal queuing is forced. Can be \c NULL.
2231 	\param flags A bitwise or of any number of the following:
2232 		- \c SIGNAL_FLAG_QUEUING_REQUIRED: Signal queuing is required. Fail
2233 			instead of falling back to unqueued signals, when queuing isn't
2234 			possible.
2235 		- \c SIGNAL_FLAG_SEND_TO_THREAD: Interpret the the given ID as a
2236 			\c thread_id rather than a \c team_id. Ignored when the \a id is
2237 			\code < 0 \endcode -- then the target is a process group.
2238 	\return \c B_OK on success, another error code otherwise.
2239 */
2240 status_t
2241 _user_send_signal(int32 id, uint32 signalNumber,
2242 	const union sigval* userUserValue, uint32 flags)
2243 {
2244 	// restrict flags to the allowed ones and add B_CHECK_PERMISSION
2245 	flags &= SIGNAL_FLAG_QUEUING_REQUIRED | SIGNAL_FLAG_SEND_TO_THREAD;
2246 	flags |= B_CHECK_PERMISSION;
2247 
2248 	// Copy the user value from userland. If not given, use a dummy value.
2249 	union sigval userValue;
2250 	if (userUserValue != NULL) {
2251 		if (!IS_USER_ADDRESS(userUserValue)
2252 			|| user_memcpy(&userValue, userUserValue, sizeof(userValue))
2253 				!= B_OK) {
2254 			return B_BAD_ADDRESS;
2255 		}
2256 	} else
2257 		userValue.sival_ptr = NULL;
2258 
2259 	// If to be sent to a thread, delegate to send_signal_internal(). Also do
2260 	// that when id < 0, since in this case the semantics is the same as well.
2261 	if ((flags & SIGNAL_FLAG_SEND_TO_THREAD) != 0 || id < 0)
2262 		return send_signal_internal(id, signalNumber, userValue, flags);
2263 
2264 	// kill() semantics for id >= 0
2265 	if (signalNumber > MAX_SIGNAL_NUMBER)
2266 		return B_BAD_VALUE;
2267 
2268 	Thread* thread = thread_get_current_thread();
2269 
2270 	Signal signal(signalNumber,
2271 		(flags & SIGNAL_FLAG_QUEUING_REQUIRED) != 0 ? SI_QUEUE : SI_USER,
2272 		B_OK, thread->team->id);
2273 	signal.SetUserValue(userValue);
2274 
2275 	// send to current team for id == 0, otherwise to the respective team
2276 	return send_signal_to_team_id(id == 0 ? team_get_current_team_id() : id,
2277 		signal, flags);
2278 }
2279 
2280 
2281 status_t
2282 _user_set_signal_mask(int how, const sigset_t *userSet, sigset_t *userOldSet)
2283 {
2284 	sigset_t set, oldSet;
2285 	status_t status;
2286 
2287 	if ((userSet != NULL && (!IS_USER_ADDRESS(userSet)
2288 			|| user_memcpy(&set, userSet, sizeof(sigset_t)) < B_OK))
2289 		|| (userOldSet != NULL && (!IS_USER_ADDRESS(userOldSet)
2290 			|| user_memcpy(&oldSet, userOldSet, sizeof(sigset_t)) < B_OK)))
2291 		return B_BAD_ADDRESS;
2292 
2293 	status = sigprocmask_internal(how, userSet ? &set : NULL,
2294 		userOldSet ? &oldSet : NULL);
2295 
2296 	// copy old set if asked for
2297 	if (status >= B_OK && userOldSet != NULL
2298 		&& user_memcpy(userOldSet, &oldSet, sizeof(sigset_t)) < B_OK)
2299 		return B_BAD_ADDRESS;
2300 
2301 	return status;
2302 }
2303 
2304 
2305 status_t
2306 _user_sigaction(int signal, const struct sigaction *userAction,
2307 	struct sigaction *userOldAction)
2308 {
2309 	struct sigaction act, oact;
2310 	status_t status;
2311 
2312 	if ((userAction != NULL && (!IS_USER_ADDRESS(userAction)
2313 			|| user_memcpy(&act, userAction, sizeof(struct sigaction)) < B_OK))
2314 		|| (userOldAction != NULL && (!IS_USER_ADDRESS(userOldAction)
2315 			|| user_memcpy(&oact, userOldAction, sizeof(struct sigaction))
2316 				< B_OK)))
2317 		return B_BAD_ADDRESS;
2318 
2319 	status = sigaction_internal(signal, userAction ? &act : NULL,
2320 		userOldAction ? &oact : NULL);
2321 
2322 	// only copy the old action if a pointer has been given
2323 	if (status >= B_OK && userOldAction != NULL
2324 		&& user_memcpy(userOldAction, &oact, sizeof(struct sigaction)) < B_OK)
2325 		return B_BAD_ADDRESS;
2326 
2327 	return status;
2328 }
2329 
2330 
2331 status_t
2332 _user_sigwait(const sigset_t *userSet, siginfo_t *userInfo, uint32 flags,
2333 	bigtime_t timeout)
2334 {
2335 	// copy userSet to stack
2336 	sigset_t set;
2337 	if (userSet == NULL || !IS_USER_ADDRESS(userSet)
2338 		|| user_memcpy(&set, userSet, sizeof(sigset_t)) != B_OK) {
2339 		return B_BAD_ADDRESS;
2340 	}
2341 
2342 	// userInfo is optional, but must be a user address when given
2343 	if (userInfo != NULL && !IS_USER_ADDRESS(userInfo))
2344 		return B_BAD_ADDRESS;
2345 
2346 	syscall_restart_handle_timeout_pre(flags, timeout);
2347 
2348 	flags |= B_CAN_INTERRUPT;
2349 
2350 	siginfo_t info;
2351 	status_t status = sigwait_internal(&set, &info, flags, timeout);
2352 	if (status == B_OK) {
2353 		// copy the info back to userland, if userSet is non-NULL
2354 		if (userInfo != NULL)
2355 			status = user_memcpy(userInfo, &info, sizeof(info));
2356 	} else if (status == B_INTERRUPTED) {
2357 		// make sure we'll be restarted
2358 		Thread* thread = thread_get_current_thread();
2359 		atomic_or(&thread->flags, THREAD_FLAGS_ALWAYS_RESTART_SYSCALL);
2360 	}
2361 
2362 	return syscall_restart_handle_timeout_post(status, timeout);
2363 }
2364 
2365 
2366 status_t
2367 _user_sigsuspend(const sigset_t *userMask)
2368 {
2369 	sigset_t mask;
2370 
2371 	if (userMask == NULL)
2372 		return B_BAD_VALUE;
2373 	if (!IS_USER_ADDRESS(userMask)
2374 		|| user_memcpy(&mask, userMask, sizeof(sigset_t)) < B_OK) {
2375 		return B_BAD_ADDRESS;
2376 	}
2377 
2378 	return sigsuspend_internal(&mask);
2379 }
2380 
2381 
2382 status_t
2383 _user_sigpending(sigset_t *userSet)
2384 {
2385 	sigset_t set;
2386 	int status;
2387 
2388 	if (userSet == NULL)
2389 		return B_BAD_VALUE;
2390 	if (!IS_USER_ADDRESS(userSet))
2391 		return B_BAD_ADDRESS;
2392 
2393 	status = sigpending_internal(&set);
2394 	if (status == B_OK
2395 		&& user_memcpy(userSet, &set, sizeof(sigset_t)) < B_OK)
2396 		return B_BAD_ADDRESS;
2397 
2398 	return status;
2399 }
2400 
2401 
2402 status_t
2403 _user_set_signal_stack(const stack_t* newUserStack, stack_t* oldUserStack)
2404 {
2405 	Thread *thread = thread_get_current_thread();
2406 	struct stack_t newStack, oldStack;
2407 	bool onStack = false;
2408 
2409 	if ((newUserStack != NULL && (!IS_USER_ADDRESS(newUserStack)
2410 			|| user_memcpy(&newStack, newUserStack, sizeof(stack_t)) < B_OK))
2411 		|| (oldUserStack != NULL && (!IS_USER_ADDRESS(oldUserStack)
2412 			|| user_memcpy(&oldStack, oldUserStack, sizeof(stack_t)) < B_OK)))
2413 		return B_BAD_ADDRESS;
2414 
2415 	if (thread->signal_stack_enabled) {
2416 		// determine whether or not the user thread is currently
2417 		// on the active signal stack
2418 		onStack = arch_on_signal_stack(thread);
2419 	}
2420 
2421 	if (oldUserStack != NULL) {
2422 		oldStack.ss_sp = (void *)thread->signal_stack_base;
2423 		oldStack.ss_size = thread->signal_stack_size;
2424 		oldStack.ss_flags = (thread->signal_stack_enabled ? 0 : SS_DISABLE)
2425 			| (onStack ? SS_ONSTACK : 0);
2426 	}
2427 
2428 	if (newUserStack != NULL) {
2429 		// no flags other than SS_DISABLE are allowed
2430 		if ((newStack.ss_flags & ~SS_DISABLE) != 0)
2431 			return B_BAD_VALUE;
2432 
2433 		if ((newStack.ss_flags & SS_DISABLE) == 0) {
2434 			// check if the size is valid
2435 			if (newStack.ss_size < MINSIGSTKSZ)
2436 				return B_NO_MEMORY;
2437 			if (onStack)
2438 				return B_NOT_ALLOWED;
2439 			if (!IS_USER_ADDRESS(newStack.ss_sp))
2440 				return B_BAD_VALUE;
2441 
2442 			thread->signal_stack_base = (addr_t)newStack.ss_sp;
2443 			thread->signal_stack_size = newStack.ss_size;
2444 			thread->signal_stack_enabled = true;
2445 		} else
2446 			thread->signal_stack_enabled = false;
2447 	}
2448 
2449 	// only copy the old stack info if a pointer has been given
2450 	if (oldUserStack != NULL
2451 		&& user_memcpy(oldUserStack, &oldStack, sizeof(stack_t)) < B_OK)
2452 		return B_BAD_ADDRESS;
2453 
2454 	return B_OK;
2455 }
2456 
2457 
2458 /*!	Restores the environment of a function that was interrupted by a signal
2459 	handler call.
2460 	This syscall is invoked when a signal handler function returns. It
2461 	deconstructs the signal handler frame and restores the stack and register
2462 	state of the function that was interrupted by a signal. The syscall is
2463 	therefore somewhat unusual, since it does not return to the calling
2464 	function, but to someplace else. In case the signal interrupted a syscall,
2465 	it will appear as if the syscall just returned. That is also the reason, why
2466 	this syscall returns an int64, since it needs to return the value the
2467 	interrupted syscall returns, which is potentially 64 bits wide.
2468 
2469 	\param userSignalFrameData The signal frame data created for the signal
2470 		handler. Potentially some data (e.g. registers) have been modified by
2471 		the signal handler.
2472 	\return In case the signal interrupted a syscall, the return value of that
2473 		syscall. Otherwise (i.e. in case of a (hardware) interrupt/exception)
2474 		the value might need to be tailored such that after a return to userland
2475 		the interrupted environment is identical to the interrupted one (unless
2476 		explicitly modified). E.g. for x86 to achieve that, the return value
2477 		must contain the eax|edx values of the interrupted environment.
2478 */
2479 int64
2480 _user_restore_signal_frame(struct signal_frame_data* userSignalFrameData)
2481 {
2482 	syscall_64_bit_return_value();
2483 
2484 	Thread *thread = thread_get_current_thread();
2485 
2486 	// copy the signal frame data from userland
2487 	signal_frame_data signalFrameData;
2488 	if (userSignalFrameData == NULL || !IS_USER_ADDRESS(userSignalFrameData)
2489 		|| user_memcpy(&signalFrameData, userSignalFrameData,
2490 			sizeof(signalFrameData)) != B_OK) {
2491 		// We failed to copy the signal frame data from userland. This is a
2492 		// serious problem. Kill the thread.
2493 		dprintf("_user_restore_signal_frame(): thread %" B_PRId32 ": Failed to "
2494 			"copy signal frame data (%p) from userland. Killing thread...\n",
2495 			thread->id, userSignalFrameData);
2496 		kill_thread(thread->id);
2497 		return B_BAD_ADDRESS;
2498 	}
2499 
2500 	// restore the signal block mask
2501 	InterruptsSpinLocker locker(thread->team->signal_lock);
2502 
2503 	thread->sig_block_mask
2504 		= signalFrameData.context.uc_sigmask & BLOCKABLE_SIGNALS;
2505 	update_current_thread_signals_flag();
2506 
2507 	locker.Unlock();
2508 
2509 	// restore the syscall restart related thread flags and the syscall restart
2510 	// parameters
2511 	atomic_and(&thread->flags,
2512 		~(THREAD_FLAGS_RESTART_SYSCALL | THREAD_FLAGS_64_BIT_SYSCALL_RETURN));
2513 	atomic_or(&thread->flags, signalFrameData.thread_flags
2514 		& (THREAD_FLAGS_RESTART_SYSCALL | THREAD_FLAGS_64_BIT_SYSCALL_RETURN));
2515 
2516 	memcpy(thread->syscall_restart.parameters,
2517 		signalFrameData.syscall_restart_parameters,
2518 		sizeof(thread->syscall_restart.parameters));
2519 
2520 	// restore the previously stored Thread::user_signal_context
2521 	thread->user_signal_context = signalFrameData.context.uc_link;
2522 	if (thread->user_signal_context != NULL
2523 		&& !IS_USER_ADDRESS(thread->user_signal_context)) {
2524 		thread->user_signal_context = NULL;
2525 	}
2526 
2527 	// let the architecture specific code restore the registers
2528 	return arch_restore_signal_frame(&signalFrameData);
2529 }
2530