xref: /haiku/src/system/kernel/signal.cpp (revision a629567a9001547736cfe892cdf992be16868fed)
1 /*
2  * Copyright 2014, Paweł Dziepak, pdziepak@quarnos.org.
3  * Copyright 2011, Ingo Weinhold, ingo_weinhold@gmx.de.
4  * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
5  * Copyright 2002, Angelo Mottola, a.mottola@libero.it.
6  *
7  * Distributed under the terms of the MIT License.
8  */
9 
10 
11 /*! POSIX signals handling routines */
12 
13 
14 #include <ksignal.h>
15 
16 #include <errno.h>
17 #include <stddef.h>
18 #include <string.h>
19 
20 #include <OS.h>
21 #include <KernelExport.h>
22 
23 #include <cpu.h>
24 #include <debug.h>
25 #include <kernel.h>
26 #include <kscheduler.h>
27 #include <sem.h>
28 #include <syscall_restart.h>
29 #include <syscall_utils.h>
30 #include <team.h>
31 #include <thread.h>
32 #include <tracing.h>
33 #include <user_debugger.h>
34 #include <user_thread.h>
35 #include <util/AutoLock.h>
36 
37 
38 //#define TRACE_SIGNAL
39 #ifdef TRACE_SIGNAL
40 #	define TRACE(x) dprintf x
41 #else
42 #	define TRACE(x) ;
43 #endif
44 
45 
46 #define BLOCKABLE_SIGNALS	\
47 	(~(KILL_SIGNALS | SIGNAL_TO_MASK(SIGSTOP)	\
48 	| SIGNAL_TO_MASK(SIGNAL_CONTINUE_THREAD)	\
49 	| SIGNAL_TO_MASK(SIGNAL_CANCEL_THREAD)))
50 #define STOP_SIGNALS \
51 	(SIGNAL_TO_MASK(SIGSTOP) | SIGNAL_TO_MASK(SIGTSTP) \
52 	| SIGNAL_TO_MASK(SIGTTIN) | SIGNAL_TO_MASK(SIGTTOU))
53 #define CONTINUE_SIGNALS \
54 	(SIGNAL_TO_MASK(SIGCONT) | SIGNAL_TO_MASK(SIGNAL_CONTINUE_THREAD))
55 #define DEFAULT_IGNORE_SIGNALS \
56 	(SIGNAL_TO_MASK(SIGCHLD) | SIGNAL_TO_MASK(SIGWINCH) \
57 	| SIGNAL_TO_MASK(SIGCONT) \
58 	| SIGNAL_RANGE_TO_MASK(SIGNAL_REALTIME_MIN, SIGNAL_REALTIME_MAX))
59 #define NON_DEFERRABLE_SIGNALS	\
60 	(KILL_SIGNALS				\
61 	| SIGNAL_TO_MASK(SIGILL)	\
62 	| SIGNAL_TO_MASK(SIGFPE)	\
63 	| SIGNAL_TO_MASK(SIGSEGV))
64 
65 
66 static const struct {
67 	const char*	name;
68 	int32		priority;
69 } kSignalInfos[__MAX_SIGNO + 1] = {
70 	{"NONE",			-1},
71 	{"HUP",				0},
72 	{"INT",				0},
73 	{"QUIT",			0},
74 	{"ILL",				0},
75 	{"CHLD",			0},
76 	{"ABRT",			0},
77 	{"PIPE",			0},
78 	{"FPE",				0},
79 	{"KILL",			100},
80 	{"STOP",			0},
81 	{"SEGV",			0},
82 	{"CONT",			0},
83 	{"TSTP",			0},
84 	{"ALRM",			0},
85 	{"TERM",			0},
86 	{"TTIN",			0},
87 	{"TTOU",			0},
88 	{"USR1",			0},
89 	{"USR2",			0},
90 	{"WINCH",			0},
91 	{"KILLTHR",			100},
92 	{"TRAP",			0},
93 	{"POLL",			0},
94 	{"PROF",			0},
95 	{"SYS",				0},
96 	{"URG",				0},
97 	{"VTALRM",			0},
98 	{"XCPU",			0},
99 	{"XFSZ",			0},
100 	{"SIGBUS",			0},
101 	{"SIGRESERVED1",	0},
102 	{"SIGRESERVED2",	0},
103 	{"SIGRT1",			8},
104 	{"SIGRT2",			7},
105 	{"SIGRT3",			6},
106 	{"SIGRT4",			5},
107 	{"SIGRT5",			4},
108 	{"SIGRT6",			3},
109 	{"SIGRT7",			2},
110 	{"SIGRT8",			1},
111 	{"invalid 41",		0},
112 	{"invalid 42",		0},
113 	{"invalid 43",		0},
114 	{"invalid 44",		0},
115 	{"invalid 45",		0},
116 	{"invalid 46",		0},
117 	{"invalid 47",		0},
118 	{"invalid 48",		0},
119 	{"invalid 49",		0},
120 	{"invalid 50",		0},
121 	{"invalid 51",		0},
122 	{"invalid 52",		0},
123 	{"invalid 53",		0},
124 	{"invalid 54",		0},
125 	{"invalid 55",		0},
126 	{"invalid 56",		0},
127 	{"invalid 57",		0},
128 	{"invalid 58",		0},
129 	{"invalid 59",		0},
130 	{"invalid 60",		0},
131 	{"invalid 61",		0},
132 	{"invalid 62",		0},
133 	{"CANCEL_THREAD",	0},
134 	{"CONTINUE_THREAD",	0}	// priority must be <= that of SIGSTOP
135 };
136 
137 
138 static inline const char*
139 signal_name(uint32 number)
140 {
141 	return number <= __MAX_SIGNO ? kSignalInfos[number].name : "invalid";
142 }
143 
144 
145 // #pragma mark - SignalHandledCaller
146 
147 
148 struct SignalHandledCaller {
149 	SignalHandledCaller(Signal* signal)
150 		:
151 		fSignal(signal)
152 	{
153 	}
154 
155 	~SignalHandledCaller()
156 	{
157 		Done();
158 	}
159 
160 	void Done()
161 	{
162 		if (fSignal != NULL) {
163 			fSignal->Handled();
164 			fSignal = NULL;
165 		}
166 	}
167 
168 private:
169 	Signal*	fSignal;
170 };
171 
172 
173 // #pragma mark - QueuedSignalsCounter
174 
175 
176 /*!	Creates a counter with the given limit.
177 	The limit defines the maximum the counter may reach. Since the
178 	BReferenceable's reference count is used, it is assumed that the owning
179 	team holds a reference and the reference count is one greater than the
180 	counter value.
181 	\param limit The maximum allowed value the counter may have. When
182 		\code < 0 \endcode, the value is not limited.
183 */
184 QueuedSignalsCounter::QueuedSignalsCounter(int32 limit)
185 	:
186 	fLimit(limit)
187 {
188 }
189 
190 
191 /*!	Increments the counter, if the limit allows that.
192 	\return \c true, if incrementing the counter succeeded, \c false otherwise.
193 */
194 bool
195 QueuedSignalsCounter::Increment()
196 {
197 	// no limit => no problem
198 	if (fLimit < 0) {
199 		AcquireReference();
200 		return true;
201 	}
202 
203 	// Increment the reference count manually, so we can check atomically. We
204 	// compare the old value > fLimit, assuming that our (primary) owner has a
205 	// reference, we don't want to count.
206 	if (atomic_add(&fReferenceCount, 1) > fLimit) {
207 		ReleaseReference();
208 		return false;
209 	}
210 
211 	return true;
212 }
213 
214 
215 // #pragma mark - Signal
216 
217 
218 Signal::Signal()
219 	:
220 	fCounter(NULL),
221 	fPending(false)
222 {
223 }
224 
225 
226 Signal::Signal(const Signal& other)
227 	:
228 	fCounter(NULL),
229 	fNumber(other.fNumber),
230 	fSignalCode(other.fSignalCode),
231 	fErrorCode(other.fErrorCode),
232 	fSendingProcess(other.fSendingProcess),
233 	fSendingUser(other.fSendingUser),
234 	fStatus(other.fStatus),
235 	fPollBand(other.fPollBand),
236 	fAddress(other.fAddress),
237 	fUserValue(other.fUserValue),
238 	fPending(false)
239 {
240 }
241 
242 
243 Signal::Signal(uint32 number, int32 signalCode, int32 errorCode,
244 	pid_t sendingProcess)
245 	:
246 	fCounter(NULL),
247 	fNumber(number),
248 	fSignalCode(signalCode),
249 	fErrorCode(errorCode),
250 	fSendingProcess(sendingProcess),
251 	fSendingUser(getuid()),
252 	fStatus(0),
253 	fPollBand(0),
254 	fAddress(NULL),
255 	fPending(false)
256 {
257 	fUserValue.sival_ptr = NULL;
258 }
259 
260 
261 Signal::~Signal()
262 {
263 	if (fCounter != NULL)
264 		fCounter->ReleaseReference();
265 }
266 
267 
268 /*!	Creates a queuable clone of the given signal.
269 	Also enforces the current team's signal queuing limit.
270 
271 	\param signal The signal to clone.
272 	\param queuingRequired If \c true, the function will return an error code
273 		when creating the clone fails for any reason. Otherwise, the function
274 		will set \a _signalToQueue to \c NULL, but still return \c B_OK.
275 	\param _signalToQueue Return parameter. Set to the clone of the signal.
276 	\return When \c queuingRequired is \c false, always \c B_OK. Otherwise
277 		\c B_OK, when creating the signal clone succeeds, another error code,
278 		when it fails.
279 */
280 /*static*/ status_t
281 Signal::CreateQueuable(const Signal& signal, bool queuingRequired,
282 	Signal*& _signalToQueue)
283 {
284 	_signalToQueue = NULL;
285 
286 	// If interrupts are disabled, we can't allocate a signal.
287 	if (!are_interrupts_enabled())
288 		return queuingRequired ? B_BAD_VALUE : B_OK;
289 
290 	// increment the queued signals counter
291 	QueuedSignalsCounter* counter
292 		= thread_get_current_thread()->team->QueuedSignalsCounter();
293 	if (!counter->Increment())
294 		return queuingRequired ? EAGAIN : B_OK;
295 
296 	// allocate the signal
297 	Signal* signalToQueue = new(std::nothrow) Signal(signal);
298 	if (signalToQueue == NULL) {
299 		counter->Decrement();
300 		return queuingRequired ? B_NO_MEMORY : B_OK;
301 	}
302 
303 	signalToQueue->fCounter = counter;
304 
305 	_signalToQueue = signalToQueue;
306 	return B_OK;
307 }
308 
309 void
310 Signal::SetTo(uint32 number)
311 {
312 	Team* team = thread_get_current_thread()->team;
313 
314 	fNumber = number;
315 	fSignalCode = SI_USER;
316 	fErrorCode = 0;
317 	fSendingProcess = team->id;
318 	fSendingUser = team->effective_uid;
319 	fStatus = 0;
320 	fPollBand = 0;
321 	fAddress = NULL;
322 	fUserValue.sival_ptr = NULL;
323 }
324 
325 
326 int32
327 Signal::Priority() const
328 {
329 	return kSignalInfos[fNumber].priority;
330 }
331 
332 
333 void
334 Signal::Handled()
335 {
336 	ReleaseReference();
337 }
338 
339 
340 void
341 Signal::LastReferenceReleased()
342 {
343 	if (are_interrupts_enabled())
344 		delete this;
345 	else
346 		deferred_delete(this);
347 }
348 
349 
350 // #pragma mark - PendingSignals
351 
352 
353 PendingSignals::PendingSignals()
354 	:
355 	fQueuedSignalsMask(0),
356 	fUnqueuedSignalsMask(0)
357 {
358 }
359 
360 
361 PendingSignals::~PendingSignals()
362 {
363 	Clear();
364 }
365 
366 
367 /*!	Of the signals in \a nonBlocked returns the priority of that with the
368 	highest priority.
369 	\param nonBlocked The mask with the non-blocked signals.
370 	\return The priority of the highest priority non-blocked signal, or, if all
371 		signals are blocked, \c -1.
372 */
373 int32
374 PendingSignals::HighestSignalPriority(sigset_t nonBlocked) const
375 {
376 	Signal* queuedSignal;
377 	int32 unqueuedSignal;
378 	return _GetHighestPrioritySignal(nonBlocked, queuedSignal, unqueuedSignal);
379 }
380 
381 
382 void
383 PendingSignals::Clear()
384 {
385 	// release references of all queued signals
386 	while (Signal* signal = fQueuedSignals.RemoveHead())
387 		signal->Handled();
388 
389 	fQueuedSignalsMask = 0;
390 	fUnqueuedSignalsMask = 0;
391 }
392 
393 
394 /*!	Adds a signal.
395 	Takes over the reference to the signal from the caller.
396 */
397 void
398 PendingSignals::AddSignal(Signal* signal)
399 {
400 	// queue according to priority
401 	int32 priority = signal->Priority();
402 	Signal* otherSignal = NULL;
403 	for (SignalList::Iterator it = fQueuedSignals.GetIterator();
404 			(otherSignal = it.Next()) != NULL;) {
405 		if (priority > otherSignal->Priority())
406 			break;
407 	}
408 
409 	fQueuedSignals.InsertBefore(otherSignal, signal);
410 	signal->SetPending(true);
411 
412 	fQueuedSignalsMask |= SIGNAL_TO_MASK(signal->Number());
413 }
414 
415 
416 void
417 PendingSignals::RemoveSignal(Signal* signal)
418 {
419 	signal->SetPending(false);
420 	fQueuedSignals.Remove(signal);
421 	_UpdateQueuedSignalMask();
422 }
423 
424 
425 void
426 PendingSignals::RemoveSignals(sigset_t mask)
427 {
428 	// remove from queued signals
429 	if ((fQueuedSignalsMask & mask) != 0) {
430 		for (SignalList::Iterator it = fQueuedSignals.GetIterator();
431 				Signal* signal = it.Next();) {
432 			// remove signal, if in mask
433 			if ((SIGNAL_TO_MASK(signal->Number()) & mask) != 0) {
434 				it.Remove();
435 				signal->SetPending(false);
436 				signal->Handled();
437 			}
438 		}
439 
440 		fQueuedSignalsMask &= ~mask;
441 	}
442 
443 	// remove from unqueued signals
444 	fUnqueuedSignalsMask &= ~mask;
445 }
446 
447 
448 /*!	Removes and returns a signal in \a nonBlocked that has the highest priority.
449 	The caller gets a reference to the returned signal, if any.
450 	\param nonBlocked The mask of non-blocked signals.
451 	\param buffer If the signal is not queued this buffer is returned. In this
452 		case the method acquires a reference to \a buffer, so that the caller
453 		gets a reference also in this case.
454 	\return The removed signal or \c NULL, if all signals are blocked.
455 */
456 Signal*
457 PendingSignals::DequeueSignal(sigset_t nonBlocked, Signal& buffer)
458 {
459 	// find the signal with the highest priority
460 	Signal* queuedSignal;
461 	int32 unqueuedSignal;
462 	if (_GetHighestPrioritySignal(nonBlocked, queuedSignal, unqueuedSignal) < 0)
463 		return NULL;
464 
465 	// if it is a queued signal, dequeue it
466 	if (queuedSignal != NULL) {
467 		fQueuedSignals.Remove(queuedSignal);
468 		queuedSignal->SetPending(false);
469 		_UpdateQueuedSignalMask();
470 		return queuedSignal;
471 	}
472 
473 	// it is unqueued -- remove from mask
474 	fUnqueuedSignalsMask &= ~SIGNAL_TO_MASK(unqueuedSignal);
475 
476 	// init buffer
477 	buffer.SetTo(unqueuedSignal);
478 	buffer.AcquireReference();
479 	return &buffer;
480 }
481 
482 
483 /*!	Of the signals not it \a blocked returns the priority of that with the
484 	highest priority.
485 	\param blocked The mask with the non-blocked signals.
486 	\param _queuedSignal If the found signal is a queued signal, the variable
487 		will be set to that signal, otherwise to \c NULL.
488 	\param _unqueuedSignal If the found signal is an unqueued signal, the
489 		variable is set to that signal's number, otherwise to \c -1.
490 	\return The priority of the highest priority non-blocked signal, or, if all
491 		signals are blocked, \c -1.
492 */
493 int32
494 PendingSignals::_GetHighestPrioritySignal(sigset_t nonBlocked,
495 	Signal*& _queuedSignal, int32& _unqueuedSignal) const
496 {
497 	// check queued signals
498 	Signal* queuedSignal = NULL;
499 	int32 queuedPriority = -1;
500 
501 	if ((fQueuedSignalsMask & nonBlocked) != 0) {
502 		for (SignalList::ConstIterator it = fQueuedSignals.GetIterator();
503 				Signal* signal = it.Next();) {
504 			if ((SIGNAL_TO_MASK(signal->Number()) & nonBlocked) != 0) {
505 				queuedPriority = signal->Priority();
506 				queuedSignal = signal;
507 				break;
508 			}
509 		}
510 	}
511 
512 	// check unqueued signals
513 	int32 unqueuedSignal = -1;
514 	int32 unqueuedPriority = -1;
515 
516 	sigset_t unqueuedSignals = fUnqueuedSignalsMask & nonBlocked;
517 	if (unqueuedSignals != 0) {
518 		int32 signal = 1;
519 		while (unqueuedSignals != 0) {
520 			sigset_t mask = SIGNAL_TO_MASK(signal);
521 			if ((unqueuedSignals & mask) != 0) {
522 				int32 priority = kSignalInfos[signal].priority;
523 				if (priority > unqueuedPriority) {
524 					unqueuedSignal = signal;
525 					unqueuedPriority = priority;
526 				}
527 				unqueuedSignals &= ~mask;
528 			}
529 
530 			signal++;
531 		}
532 	}
533 
534 	// Return found queued or unqueued signal, whichever has the higher
535 	// priority.
536 	if (queuedPriority >= unqueuedPriority) {
537 		_queuedSignal = queuedSignal;
538 		_unqueuedSignal = -1;
539 		return queuedPriority;
540 	}
541 
542 	_queuedSignal = NULL;
543 	_unqueuedSignal = unqueuedSignal;
544 	return unqueuedPriority;
545 }
546 
547 
548 void
549 PendingSignals::_UpdateQueuedSignalMask()
550 {
551 	sigset_t mask = 0;
552 	for (SignalList::Iterator it = fQueuedSignals.GetIterator();
553 			Signal* signal = it.Next();) {
554 		mask |= SIGNAL_TO_MASK(signal->Number());
555 	}
556 
557 	fQueuedSignalsMask = mask;
558 }
559 
560 
561 // #pragma mark - signal tracing
562 
563 
564 #if SIGNAL_TRACING
565 
566 namespace SignalTracing {
567 
568 
569 class HandleSignal : public AbstractTraceEntry {
570 	public:
571 		HandleSignal(uint32 signal)
572 			:
573 			fSignal(signal)
574 		{
575 			Initialized();
576 		}
577 
578 		virtual void AddDump(TraceOutput& out)
579 		{
580 			out.Print("signal handle:  %" B_PRIu32 " (%s)" , fSignal,
581 				signal_name(fSignal));
582 		}
583 
584 	private:
585 		uint32		fSignal;
586 };
587 
588 
589 class ExecuteSignalHandler : public AbstractTraceEntry {
590 	public:
591 		ExecuteSignalHandler(uint32 signal, struct sigaction* handler)
592 			:
593 			fSignal(signal),
594 			fHandler((void*)handler->sa_handler)
595 		{
596 			Initialized();
597 		}
598 
599 		virtual void AddDump(TraceOutput& out)
600 		{
601 			out.Print("signal exec handler: signal: %" B_PRIu32 " (%s), "
602 				"handler: %p", fSignal, signal_name(fSignal), fHandler);
603 		}
604 
605 	private:
606 		uint32	fSignal;
607 		void*	fHandler;
608 };
609 
610 
611 class SendSignal : public AbstractTraceEntry {
612 	public:
613 		SendSignal(pid_t target, uint32 signal, uint32 flags)
614 			:
615 			fTarget(target),
616 			fSignal(signal),
617 			fFlags(flags)
618 		{
619 			Initialized();
620 		}
621 
622 		virtual void AddDump(TraceOutput& out)
623 		{
624 			out.Print("signal send: target: %" B_PRId32 ", signal: %" B_PRIu32
625 				" (%s), flags: %#" B_PRIx32, fTarget, fSignal,
626 				signal_name(fSignal), fFlags);
627 		}
628 
629 	private:
630 		pid_t	fTarget;
631 		uint32	fSignal;
632 		uint32	fFlags;
633 };
634 
635 
636 class SigAction : public AbstractTraceEntry {
637 	public:
638 		SigAction(uint32 signal, const struct sigaction* act)
639 			:
640 			fSignal(signal),
641 			fAction(*act)
642 		{
643 			Initialized();
644 		}
645 
646 		virtual void AddDump(TraceOutput& out)
647 		{
648 			out.Print("signal action: signal: %" B_PRIu32 " (%s), "
649 				"action: {handler: %p, flags: %#x, mask: %#" B_PRIx64 "}",
650 				fSignal, signal_name(fSignal), fAction.sa_handler,
651 				fAction.sa_flags, (uint64)fAction.sa_mask);
652 		}
653 
654 	private:
655 		uint32				fSignal;
656 		struct sigaction	fAction;
657 };
658 
659 
660 class SigProcMask : public AbstractTraceEntry {
661 	public:
662 		SigProcMask(int how, sigset_t mask)
663 			:
664 			fHow(how),
665 			fMask(mask),
666 			fOldMask(thread_get_current_thread()->sig_block_mask)
667 		{
668 			Initialized();
669 		}
670 
671 		virtual void AddDump(TraceOutput& out)
672 		{
673 			const char* how = "invalid";
674 			switch (fHow) {
675 				case SIG_BLOCK:
676 					how = "block";
677 					break;
678 				case SIG_UNBLOCK:
679 					how = "unblock";
680 					break;
681 				case SIG_SETMASK:
682 					how = "set";
683 					break;
684 			}
685 
686 			out.Print("signal proc mask: %s 0x%llx, old mask: 0x%llx", how,
687 				(long long)fMask, (long long)fOldMask);
688 		}
689 
690 	private:
691 		int			fHow;
692 		sigset_t	fMask;
693 		sigset_t	fOldMask;
694 };
695 
696 
697 class SigSuspend : public AbstractTraceEntry {
698 	public:
699 		SigSuspend(sigset_t mask)
700 			:
701 			fMask(mask),
702 			fOldMask(thread_get_current_thread()->sig_block_mask)
703 		{
704 			Initialized();
705 		}
706 
707 		virtual void AddDump(TraceOutput& out)
708 		{
709 			out.Print("signal suspend: %#llx, old mask: %#llx",
710 				(long long)fMask, (long long)fOldMask);
711 		}
712 
713 	private:
714 		sigset_t	fMask;
715 		sigset_t	fOldMask;
716 };
717 
718 
719 class SigSuspendDone : public AbstractTraceEntry {
720 	public:
721 		SigSuspendDone()
722 			:
723 			fSignals(thread_get_current_thread()->ThreadPendingSignals())
724 		{
725 			Initialized();
726 		}
727 
728 		virtual void AddDump(TraceOutput& out)
729 		{
730 			out.Print("signal suspend done: %#" B_PRIx32, fSignals);
731 		}
732 
733 	private:
734 		uint32		fSignals;
735 };
736 
737 }	// namespace SignalTracing
738 
739 #	define T(x)	new(std::nothrow) SignalTracing::x
740 
741 #else
742 #	define T(x)
743 #endif	// SIGNAL_TRACING
744 
745 
746 // #pragma mark -
747 
748 
749 /*!	Updates the given thread's Thread::flags field according to what signals are
750 	pending.
751 	The caller must hold \c team->signal_lock.
752 */
753 static void
754 update_thread_signals_flag(Thread* thread)
755 {
756 	sigset_t mask = ~thread->sig_block_mask;
757 	if ((thread->AllPendingSignals() & mask) != 0)
758 		atomic_or(&thread->flags, THREAD_FLAGS_SIGNALS_PENDING);
759 	else
760 		atomic_and(&thread->flags, ~THREAD_FLAGS_SIGNALS_PENDING);
761 }
762 
763 
764 /*!	Updates the current thread's Thread::flags field according to what signals
765 	are pending.
766 	The caller must hold \c team->signal_lock.
767 */
768 static void
769 update_current_thread_signals_flag()
770 {
771 	update_thread_signals_flag(thread_get_current_thread());
772 }
773 
774 
775 /*!	Updates all of the given team's threads' Thread::flags fields according to
776 	what signals are pending.
777 	The caller must hold \c signal_lock.
778 */
779 static void
780 update_team_threads_signal_flag(Team* team)
781 {
782 	for (Thread* thread = team->thread_list; thread != NULL;
783 			thread = thread->team_next) {
784 		update_thread_signals_flag(thread);
785 	}
786 }
787 
788 
789 /*!	Notifies the user debugger about a signal to be handled.
790 
791 	The caller must not hold any locks.
792 
793 	\param thread The current thread.
794 	\param signal The signal to be handled.
795 	\param handler The installed signal handler for the signal.
796 	\param deadly Indicates whether the signal is deadly.
797 	\return \c true, if the signal shall be handled, \c false, if it shall be
798 		ignored.
799 */
800 static bool
801 notify_debugger(Thread* thread, Signal* signal, struct sigaction& handler,
802 	bool deadly)
803 {
804 	uint64 signalMask = SIGNAL_TO_MASK(signal->Number());
805 
806 	// first check the ignore signal masks the debugger specified for the thread
807 	InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock);
808 
809 	if ((thread->debug_info.ignore_signals_once & signalMask) != 0) {
810 		thread->debug_info.ignore_signals_once &= ~signalMask;
811 		return true;
812 	}
813 
814 	if ((thread->debug_info.ignore_signals & signalMask) != 0)
815 		return true;
816 
817 	threadDebugInfoLocker.Unlock();
818 
819 	// deliver the event
820 	return user_debug_handle_signal(signal->Number(), &handler, deadly);
821 }
822 
823 
824 /*!	Removes and returns a signal with the highest priority in \a nonBlocked that
825 	is pending in the given thread or its team.
826 	After dequeuing the signal the Thread::flags field of the affected threads
827 	are updated.
828 	The caller gets a reference to the returned signal, if any.
829 	The caller must hold \c team->signal_lock.
830 	\param thread The thread.
831 	\param nonBlocked The mask of non-blocked signals.
832 	\param buffer If the signal is not queued this buffer is returned. In this
833 		case the method acquires a reference to \a buffer, so that the caller
834 		gets a reference also in this case.
835 	\return The removed signal or \c NULL, if all signals are blocked.
836 */
837 static Signal*
838 dequeue_thread_or_team_signal(Thread* thread, sigset_t nonBlocked,
839 	Signal& buffer)
840 {
841 	Team* team = thread->team;
842 	Signal* signal;
843 	if (team->HighestPendingSignalPriority(nonBlocked)
844 			> thread->HighestPendingSignalPriority(nonBlocked)) {
845 		signal = team->DequeuePendingSignal(nonBlocked, buffer);
846 		update_team_threads_signal_flag(team);
847 	} else {
848 		signal = thread->DequeuePendingSignal(nonBlocked, buffer);
849 		update_thread_signals_flag(thread);
850 	}
851 
852 	return signal;
853 }
854 
855 
856 static status_t
857 setup_signal_frame(Thread* thread, struct sigaction* action, Signal* signal,
858 	sigset_t signalMask)
859 {
860 	// prepare the data, we need to copy onto the user stack
861 	signal_frame_data frameData;
862 
863 	// signal info
864 	frameData.info.si_signo = signal->Number();
865 	frameData.info.si_code = signal->SignalCode();
866 	frameData.info.si_errno = signal->ErrorCode();
867 	frameData.info.si_pid = signal->SendingProcess();
868 	frameData.info.si_uid = signal->SendingUser();
869 	frameData.info.si_addr = signal->Address();
870 	frameData.info.si_status = signal->Status();
871 	frameData.info.si_band = signal->PollBand();
872 	frameData.info.si_value = signal->UserValue();
873 
874 	// context
875 	frameData.context.uc_link = thread->user_signal_context;
876 	frameData.context.uc_sigmask = signalMask;
877 	// uc_stack and uc_mcontext are filled in by the architecture specific code.
878 
879 	// user data
880 	frameData.user_data = action->sa_userdata;
881 
882 	// handler function
883 	frameData.siginfo_handler = (action->sa_flags & SA_SIGINFO) != 0;
884 	frameData.handler = frameData.siginfo_handler
885 		? (void*)action->sa_sigaction : (void*)action->sa_handler;
886 
887 	// thread flags -- save the and clear the thread's syscall restart related
888 	// flags
889 	frameData.thread_flags = atomic_and(&thread->flags,
890 		~(THREAD_FLAGS_RESTART_SYSCALL | THREAD_FLAGS_64_BIT_SYSCALL_RETURN));
891 
892 	// syscall restart related fields
893 	memcpy(frameData.syscall_restart_parameters,
894 		thread->syscall_restart.parameters,
895 		sizeof(frameData.syscall_restart_parameters));
896 
897 	// commpage address
898 	frameData.commpage_address = thread->team->commpage_address;
899 
900 	// syscall_restart_return_value is filled in by the architecture specific
901 	// code.
902 
903 	return arch_setup_signal_frame(thread, action, &frameData);
904 }
905 
906 
907 /*! Actually handles pending signals -- i.e. the thread will exit, a custom
908 	signal handler is prepared, or whatever the signal demands.
909 	The function will not return, when a deadly signal is encountered. The
910 	function will suspend the thread indefinitely, when a stop signal is
911 	encountered.
912 	Interrupts must be enabled.
913 	\param thread The current thread.
914 */
915 void
916 handle_signals(Thread* thread)
917 {
918 	Team* team = thread->team;
919 
920 	TeamLocker teamLocker(team);
921 	InterruptsSpinLocker locker(thread->team->signal_lock);
922 
923 	// If userland requested to defer signals, we check now, if this is
924 	// possible.
925 	sigset_t nonBlockedMask = ~thread->sig_block_mask;
926 	sigset_t signalMask = thread->AllPendingSignals() & nonBlockedMask;
927 
928 	if (thread->user_thread->defer_signals > 0
929 		&& (signalMask & NON_DEFERRABLE_SIGNALS) == 0
930 		&& thread->sigsuspend_original_unblocked_mask == 0) {
931 		thread->user_thread->pending_signals = signalMask;
932 		return;
933 	}
934 
935 	thread->user_thread->pending_signals = 0;
936 
937 	// determine syscall restart behavior
938 	uint32 restartFlags = atomic_and(&thread->flags,
939 		~THREAD_FLAGS_DONT_RESTART_SYSCALL);
940 	bool alwaysRestart
941 		= (restartFlags & THREAD_FLAGS_ALWAYS_RESTART_SYSCALL) != 0;
942 	bool restart = alwaysRestart
943 		|| (restartFlags & THREAD_FLAGS_DONT_RESTART_SYSCALL) == 0;
944 
945 	// Loop until we've handled all signals.
946 	bool initialIteration = true;
947 	while (true) {
948 		if (initialIteration) {
949 			initialIteration = false;
950 		} else {
951 			teamLocker.Lock();
952 			locker.Lock();
953 
954 			signalMask = thread->AllPendingSignals() & nonBlockedMask;
955 		}
956 
957 		// Unless SIGKILL[THR] are pending, check, if the thread shall stop for
958 		// debugging.
959 		if ((signalMask & KILL_SIGNALS) == 0
960 			&& (atomic_get(&thread->debug_info.flags) & B_THREAD_DEBUG_STOP)
961 				!= 0) {
962 			locker.Unlock();
963 			teamLocker.Unlock();
964 
965 			user_debug_stop_thread();
966 			continue;
967 		}
968 
969 		// We're done, if there aren't any pending signals anymore.
970 		if ((signalMask & nonBlockedMask) == 0)
971 			break;
972 
973 		// get pending non-blocked thread or team signal with the highest
974 		// priority
975 		Signal stackSignal;
976 		Signal* signal = dequeue_thread_or_team_signal(thread, nonBlockedMask,
977 			stackSignal);
978 		ASSERT(signal != NULL);
979 		SignalHandledCaller signalHandledCaller(signal);
980 
981 		locker.Unlock();
982 
983 		// get the action for the signal
984 		struct sigaction handler;
985 		if (signal->Number() <= MAX_SIGNAL_NUMBER) {
986 			handler = team->SignalActionFor(signal->Number());
987 		} else {
988 			handler.sa_handler = SIG_DFL;
989 			handler.sa_flags = 0;
990 		}
991 
992 		if ((handler.sa_flags & SA_ONESHOT) != 0
993 			&& handler.sa_handler != SIG_IGN && handler.sa_handler != SIG_DFL) {
994 			team->SignalActionFor(signal->Number()).sa_handler = SIG_DFL;
995 		}
996 
997 		T(HandleSignal(signal->Number()));
998 
999 		teamLocker.Unlock();
1000 
1001 		// debug the signal, if a debugger is installed and the signal debugging
1002 		// flag is set
1003 		bool debugSignal = (~atomic_get(&team->debug_info.flags)
1004 				& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_SIGNALS))
1005 			== 0;
1006 
1007 		// handle the signal
1008 		TRACE(("Thread %" B_PRId32 " received signal %s\n", thread->id,
1009 			kSignalInfos[signal->Number()].name));
1010 
1011 		if (handler.sa_handler == SIG_IGN) {
1012 			// signal is to be ignored
1013 			// TODO: apply zombie cleaning on SIGCHLD
1014 
1015 			// notify the debugger
1016 			if (debugSignal)
1017 				notify_debugger(thread, signal, handler, false);
1018 			continue;
1019 		} else if (handler.sa_handler == SIG_DFL) {
1020 			// default signal behaviour
1021 
1022 			// realtime signals are ignored by default
1023 			if (signal->Number() >= SIGNAL_REALTIME_MIN
1024 				&& signal->Number() <= SIGNAL_REALTIME_MAX) {
1025 				// notify the debugger
1026 				if (debugSignal)
1027 					notify_debugger(thread, signal, handler, false);
1028 				continue;
1029 			}
1030 
1031 			bool killTeam = false;
1032 			switch (signal->Number()) {
1033 				case SIGCHLD:
1034 				case SIGWINCH:
1035 				case SIGURG:
1036 					// notify the debugger
1037 					if (debugSignal)
1038 						notify_debugger(thread, signal, handler, false);
1039 					continue;
1040 
1041 				case SIGNAL_CANCEL_THREAD:
1042 					// set up the signal handler
1043 					handler.sa_handler = thread->cancel_function;
1044 					handler.sa_flags = 0;
1045 					handler.sa_mask = 0;
1046 					handler.sa_userdata = NULL;
1047 
1048 					restart = false;
1049 						// we always want to interrupt
1050 					break;
1051 
1052 				case SIGNAL_CONTINUE_THREAD:
1053 					// prevent syscall restart, but otherwise ignore
1054 					restart = false;
1055 					atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
1056 					continue;
1057 
1058 				case SIGCONT:
1059 					// notify the debugger
1060 					if (debugSignal
1061 						&& !notify_debugger(thread, signal, handler, false))
1062 						continue;
1063 
1064 					// notify threads waiting for team state changes
1065 					if (thread == team->main_thread) {
1066 						team->LockTeamAndParent(false);
1067 
1068 						team_set_job_control_state(team,
1069 							JOB_CONTROL_STATE_CONTINUED, signal);
1070 
1071 						team->UnlockTeamAndParent();
1072 
1073 						// The standard states that the system *may* send a
1074 						// SIGCHLD when a child is continued. I haven't found
1075 						// a good reason why we would want to, though.
1076 					}
1077 					continue;
1078 
1079 				case SIGSTOP:
1080 				case SIGTSTP:
1081 				case SIGTTIN:
1082 				case SIGTTOU:
1083 				{
1084 					// notify the debugger
1085 					if (debugSignal
1086 						&& !notify_debugger(thread, signal, handler, false))
1087 						continue;
1088 
1089 					// The terminal-sent stop signals are allowed to stop the
1090 					// process only, if it doesn't belong to an orphaned process
1091 					// group. Otherwise the signal must be discarded.
1092 					team->LockProcessGroup();
1093 					AutoLocker<ProcessGroup> groupLocker(team->group, true);
1094 					if (signal->Number() != SIGSTOP
1095 						&& team->group->IsOrphaned()) {
1096 						continue;
1097 					}
1098 
1099 					// notify threads waiting for team state changes
1100 					if (thread == team->main_thread) {
1101 						team->LockTeamAndParent(false);
1102 
1103 						team_set_job_control_state(team,
1104 							JOB_CONTROL_STATE_STOPPED, signal);
1105 
1106 						// send a SIGCHLD to the parent (if it does have
1107 						// SA_NOCLDSTOP defined)
1108 						Team* parentTeam = team->parent;
1109 
1110 						struct sigaction& parentHandler
1111 							= parentTeam->SignalActionFor(SIGCHLD);
1112 						if ((parentHandler.sa_flags & SA_NOCLDSTOP) == 0) {
1113 							Signal childSignal(SIGCHLD, CLD_STOPPED, B_OK,
1114 								team->id);
1115 							childSignal.SetStatus(signal->Number());
1116 							childSignal.SetSendingUser(signal->SendingUser());
1117 							send_signal_to_team(parentTeam, childSignal, 0);
1118 						}
1119 
1120 						team->UnlockTeamAndParent();
1121 					}
1122 
1123 					groupLocker.Unlock();
1124 
1125 					// Suspend the thread, unless there's already a signal to
1126 					// continue or kill pending.
1127 					locker.Lock();
1128 					bool resume = (thread->AllPendingSignals()
1129 								& (CONTINUE_SIGNALS | KILL_SIGNALS)) != 0;
1130 					locker.Unlock();
1131 
1132 					if (!resume)
1133 						thread_suspend();
1134 
1135 					continue;
1136 				}
1137 
1138 				case SIGSEGV:
1139 				case SIGBUS:
1140 				case SIGFPE:
1141 				case SIGILL:
1142 				case SIGTRAP:
1143 				case SIGABRT:
1144 				case SIGKILL:
1145 				case SIGQUIT:
1146 				case SIGPOLL:
1147 				case SIGPROF:
1148 				case SIGSYS:
1149 				case SIGVTALRM:
1150 				case SIGXCPU:
1151 				case SIGXFSZ:
1152 				default:
1153 					TRACE(("Shutting down team %" B_PRId32 " due to signal %"
1154 						B_PRIu32 " received in thread %" B_PRIu32 " \n",
1155 						team->id, signal->Number(), thread->id));
1156 
1157 					// This signal kills the team regardless which thread
1158 					// received it.
1159 					killTeam = true;
1160 
1161 					// fall through
1162 				case SIGKILLTHR:
1163 					// notify the debugger
1164 					if (debugSignal && signal->Number() != SIGKILL
1165 						&& signal->Number() != SIGKILLTHR
1166 						&& !notify_debugger(thread, signal, handler, true)) {
1167 						continue;
1168 					}
1169 
1170 					if (killTeam || thread == team->main_thread) {
1171 						// The signal is terminal for the team or the thread is
1172 						// the main thread. In either case the team is going
1173 						// down. Set its exit status, if that didn't happen yet.
1174 						teamLocker.Lock();
1175 
1176 						if (!team->exit.initialized) {
1177 							team->exit.reason = CLD_KILLED;
1178 							team->exit.signal = signal->Number();
1179 							team->exit.signaling_user = signal->SendingUser();
1180 							team->exit.status = 0;
1181 							team->exit.initialized = true;
1182 						}
1183 
1184 						teamLocker.Unlock();
1185 
1186 						// If this is not the main thread, send it a SIGKILLTHR
1187 						// so that the team terminates.
1188 						if (thread != team->main_thread) {
1189 							Signal childSignal(SIGKILLTHR, SI_USER, B_OK,
1190 								team->id);
1191 							send_signal_to_thread_id(team->id, childSignal, 0);
1192 						}
1193 					}
1194 
1195 					// explicitly get rid of the signal reference, since
1196 					// thread_exit() won't return
1197 					signalHandledCaller.Done();
1198 
1199 					thread_exit();
1200 						// won't return
1201 			}
1202 		}
1203 
1204 		// User defined signal handler
1205 
1206 		// notify the debugger
1207 		if (debugSignal && !notify_debugger(thread, signal, handler, false))
1208 			continue;
1209 
1210 		if (!restart
1211 				|| (!alwaysRestart && (handler.sa_flags & SA_RESTART) == 0)) {
1212 			atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
1213 		}
1214 
1215 		T(ExecuteSignalHandler(signal->Number(), &handler));
1216 
1217 		TRACE(("### Setting up custom signal handler frame...\n"));
1218 
1219 		// save the old block mask -- we may need to adjust it for the handler
1220 		locker.Lock();
1221 
1222 		sigset_t oldBlockMask = thread->sigsuspend_original_unblocked_mask != 0
1223 			? ~thread->sigsuspend_original_unblocked_mask
1224 			: thread->sig_block_mask;
1225 
1226 		// Update the block mask while the signal handler is running -- it
1227 		// will be automatically restored when the signal frame is left.
1228 		thread->sig_block_mask |= handler.sa_mask & BLOCKABLE_SIGNALS;
1229 
1230 		if ((handler.sa_flags & SA_NOMASK) == 0) {
1231 			thread->sig_block_mask
1232 				|= SIGNAL_TO_MASK(signal->Number()) & BLOCKABLE_SIGNALS;
1233 		}
1234 
1235 		update_current_thread_signals_flag();
1236 
1237 		locker.Unlock();
1238 
1239 		setup_signal_frame(thread, &handler, signal, oldBlockMask);
1240 
1241 		// Reset sigsuspend_original_unblocked_mask. It would have been set by
1242 		// sigsuspend_internal(). In that case, above we set oldBlockMask
1243 		// accordingly so that after the handler returns the thread's signal
1244 		// mask is reset.
1245 		thread->sigsuspend_original_unblocked_mask = 0;
1246 
1247 		return;
1248 	}
1249 
1250 	// We have not handled any signal (respectively only ignored ones).
1251 
1252 	// If sigsuspend_original_unblocked_mask is non-null, we came from a
1253 	// sigsuspend_internal(). Not having handled any signal, we should restart
1254 	// the syscall.
1255 	if (thread->sigsuspend_original_unblocked_mask != 0) {
1256 		restart = true;
1257 		atomic_or(&thread->flags, THREAD_FLAGS_RESTART_SYSCALL);
1258 	} else if (!restart) {
1259 		// clear syscall restart thread flag, if we're not supposed to restart
1260 		// the syscall
1261 		atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
1262 	}
1263 }
1264 
1265 
1266 /*!	Checks whether the given signal is blocked for the given team (i.e. all of
1267 	its threads).
1268 	The caller must hold the team's lock and \c signal_lock.
1269 */
1270 bool
1271 is_team_signal_blocked(Team* team, int signal)
1272 {
1273 	sigset_t mask = SIGNAL_TO_MASK(signal);
1274 
1275 	for (Thread* thread = team->thread_list; thread != NULL;
1276 			thread = thread->team_next) {
1277 		if ((thread->sig_block_mask & mask) == 0)
1278 			return false;
1279 	}
1280 
1281 	return true;
1282 }
1283 
1284 
1285 /*!	Gets (guesses) the current thread's currently used stack from the given
1286 	stack pointer.
1287 	Fills in \a stack with either the signal stack or the thread's user stack.
1288 	\param address A stack pointer address to be used to determine the used
1289 		stack.
1290 	\param stack Filled in by the function.
1291 */
1292 void
1293 signal_get_user_stack(addr_t address, stack_t* stack)
1294 {
1295 	// If a signal stack is enabled for the stack and the address is within it,
1296 	// return the signal stack. In all other cases return the thread's user
1297 	// stack, even if the address doesn't lie within it.
1298 	Thread* thread = thread_get_current_thread();
1299 	if (thread->signal_stack_enabled && address >= thread->signal_stack_base
1300 		&& address < thread->signal_stack_base + thread->signal_stack_size) {
1301 		stack->ss_sp = (void*)thread->signal_stack_base;
1302 		stack->ss_size = thread->signal_stack_size;
1303 	} else {
1304 		stack->ss_sp = (void*)thread->user_stack_base;
1305 		stack->ss_size = thread->user_stack_size;
1306 	}
1307 
1308 	stack->ss_flags = 0;
1309 }
1310 
1311 
1312 /*!	Checks whether any non-blocked signal is pending for the current thread.
1313 	The caller must hold \c team->signal_lock.
1314 	\param thread The current thread.
1315 */
1316 static bool
1317 has_signals_pending(Thread* thread)
1318 {
1319 	return (thread->AllPendingSignals() & ~thread->sig_block_mask) != 0;
1320 }
1321 
1322 
1323 /*!	Checks whether the current user has permission to send a signal to the given
1324 	target team.
1325 
1326 	\param team The target team.
1327 */
1328 static bool
1329 has_permission_to_signal(Team* team)
1330 {
1331 	// get the current user
1332 	uid_t currentUser = thread_get_current_thread()->team->effective_uid;
1333 
1334 	// root is omnipotent -- in the other cases the current user must match the
1335 	// target team's
1336 	return currentUser == 0 || currentUser == team->effective_uid;
1337 }
1338 
1339 
1340 /*!	Delivers a signal to the \a thread, but doesn't handle the signal -- it just
1341 	makes sure the thread gets the signal, i.e. unblocks it if needed.
1342 
1343 	The caller must hold \c team->signal_lock.
1344 
1345 	\param thread The thread the signal shall be delivered to.
1346 	\param signalNumber The number of the signal to be delivered. If \c 0, no
1347 		actual signal will be delivered. Only delivery checks will be performed.
1348 	\param signal If non-NULL the signal to be queued (has number
1349 		\a signalNumber in this case). The caller transfers an object reference
1350 		to this function. If \c NULL an unqueued signal will be delivered to the
1351 		thread.
1352 	\param flags A bitwise combination of any number of the following:
1353 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1354 			target thread the signal.
1355 	\return \c B_OK, when the signal was delivered successfully, another error
1356 		code otherwise.
1357 */
1358 status_t
1359 send_signal_to_thread_locked(Thread* thread, uint32 signalNumber,
1360 	Signal* signal, uint32 flags)
1361 {
1362 	ASSERT(signal == NULL || signalNumber == signal->Number());
1363 
1364 	T(SendSignal(thread->id, signalNumber, flags));
1365 
1366 	// The caller transferred a reference to the signal to us.
1367 	BReference<Signal> signalReference(signal, true);
1368 
1369 	if ((flags & B_CHECK_PERMISSION) != 0) {
1370 		if (!has_permission_to_signal(thread->team))
1371 			return EPERM;
1372 	}
1373 
1374 	if (signalNumber == 0)
1375 		return B_OK;
1376 
1377 	if (thread->team == team_get_kernel_team()) {
1378 		// Signals to kernel threads will only wake them up
1379 		thread_continue(thread);
1380 		return B_OK;
1381 	}
1382 
1383 	if (signal != NULL)
1384 		thread->AddPendingSignal(signal);
1385 	else
1386 		thread->AddPendingSignal(signalNumber);
1387 
1388 	// the thread has the signal reference, now
1389 	signalReference.Detach();
1390 
1391 	switch (signalNumber) {
1392 		case SIGKILL:
1393 		{
1394 			// If sent to a thread other than the team's main thread, also send
1395 			// a SIGKILLTHR to the main thread to kill the team.
1396 			Thread* mainThread = thread->team->main_thread;
1397 			if (mainThread != NULL && mainThread != thread) {
1398 				mainThread->AddPendingSignal(SIGKILLTHR);
1399 
1400 				// wake up main thread
1401 				thread->going_to_suspend = false;
1402 
1403 				SpinLocker locker(mainThread->scheduler_lock);
1404 				if (mainThread->state == B_THREAD_SUSPENDED)
1405 					scheduler_enqueue_in_run_queue(mainThread);
1406 				else
1407 					thread_interrupt(mainThread, true);
1408 				locker.Unlock();
1409 
1410 				update_thread_signals_flag(mainThread);
1411 			}
1412 
1413 			// supposed to fall through
1414 		}
1415 		case SIGKILLTHR:
1416 		{
1417 			// Wake up suspended threads and interrupt waiting ones
1418 			thread->going_to_suspend = false;
1419 
1420 			SpinLocker locker(thread->scheduler_lock);
1421 			if (thread->state == B_THREAD_SUSPENDED)
1422 				scheduler_enqueue_in_run_queue(thread);
1423 			else
1424 				thread_interrupt(thread, true);
1425 
1426 			break;
1427 		}
1428 		case SIGNAL_CONTINUE_THREAD:
1429 		{
1430 			// wake up thread, and interrupt its current syscall
1431 			thread->going_to_suspend = false;
1432 
1433 			SpinLocker locker(thread->scheduler_lock);
1434 			if (thread->state == B_THREAD_SUSPENDED)
1435 				scheduler_enqueue_in_run_queue(thread);
1436 
1437 			atomic_or(&thread->flags, THREAD_FLAGS_DONT_RESTART_SYSCALL);
1438 			break;
1439 		}
1440 		case SIGCONT:
1441 		{
1442 			// Wake up thread if it was suspended, otherwise interrupt it, if
1443 			// the signal isn't blocked.
1444 			thread->going_to_suspend = false;
1445 
1446 			SpinLocker locker(thread->scheduler_lock);
1447 			if (thread->state == B_THREAD_SUSPENDED)
1448 				scheduler_enqueue_in_run_queue(thread);
1449 			else if ((SIGNAL_TO_MASK(SIGCONT) & ~thread->sig_block_mask) != 0)
1450 				thread_interrupt(thread, false);
1451 
1452 			// remove any pending stop signals
1453 			thread->RemovePendingSignals(STOP_SIGNALS);
1454 			break;
1455 		}
1456 		default:
1457 			// If the signal is not masked, interrupt the thread, if it is
1458 			// currently waiting (interruptibly).
1459 			if ((thread->AllPendingSignals()
1460 						& (~thread->sig_block_mask | SIGNAL_TO_MASK(SIGCHLD)))
1461 					!= 0) {
1462 				// Interrupt thread if it was waiting
1463 				SpinLocker locker(thread->scheduler_lock);
1464 				thread_interrupt(thread, false);
1465 			}
1466 			break;
1467 	}
1468 
1469 	update_thread_signals_flag(thread);
1470 
1471 	return B_OK;
1472 }
1473 
1474 
1475 /*!	Sends the given signal to the given thread.
1476 
1477 	\param thread The thread the signal shall be sent to.
1478 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1479 		actual signal will be delivered. Only delivery checks will be performed.
1480 		The given object will be copied. The caller retains ownership.
1481 	\param flags A bitwise combination of any number of the following:
1482 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1483 			target thread the signal.
1484 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1485 			woken up, the scheduler will be invoked. If set that will not be
1486 			done explicitly, but rescheduling can still happen, e.g. when the
1487 			current thread's time slice runs out.
1488 	\return \c B_OK, when the signal was delivered successfully, another error
1489 		code otherwise.
1490 */
1491 status_t
1492 send_signal_to_thread(Thread* thread, const Signal& signal, uint32 flags)
1493 {
1494 	// Clone the signal -- the clone will be queued. If something fails and the
1495 	// caller doesn't require queuing, we will add an unqueued signal.
1496 	Signal* signalToQueue = NULL;
1497 	status_t error = Signal::CreateQueuable(signal,
1498 		(flags & SIGNAL_FLAG_QUEUING_REQUIRED) != 0, signalToQueue);
1499 	if (error != B_OK)
1500 		return error;
1501 
1502 	InterruptsReadSpinLocker teamLocker(thread->team_lock);
1503 	SpinLocker locker(thread->team->signal_lock);
1504 
1505 	error = send_signal_to_thread_locked(thread, signal.Number(), signalToQueue,
1506 		flags);
1507 	if (error != B_OK)
1508 		return error;
1509 
1510 	locker.Unlock();
1511 	teamLocker.Unlock();
1512 
1513 	if ((flags & B_DO_NOT_RESCHEDULE) == 0)
1514 		scheduler_reschedule_if_necessary();
1515 
1516 	return B_OK;
1517 }
1518 
1519 
1520 /*!	Sends the given signal to the thread with the given ID.
1521 
1522 	\param threadID The ID of the thread the signal shall be sent to.
1523 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1524 		actual signal will be delivered. Only delivery checks will be performed.
1525 		The given object will be copied. The caller retains ownership.
1526 	\param flags A bitwise combination of any number of the following:
1527 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1528 			target thread the signal.
1529 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1530 			woken up, the scheduler will be invoked. If set that will not be
1531 			done explicitly, but rescheduling can still happen, e.g. when the
1532 			current thread's time slice runs out.
1533 	\return \c B_OK, when the signal was delivered successfully, another error
1534 		code otherwise.
1535 */
1536 status_t
1537 send_signal_to_thread_id(thread_id threadID, const Signal& signal, uint32 flags)
1538 {
1539 	Thread* thread = Thread::Get(threadID);
1540 	if (thread == NULL)
1541 		return B_BAD_THREAD_ID;
1542 	BReference<Thread> threadReference(thread, true);
1543 
1544 	return send_signal_to_thread(thread, signal, flags);
1545 }
1546 
1547 
1548 /*!	Sends the given signal to the given team.
1549 
1550 	The caller must hold \c signal_lock.
1551 
1552 	\param team The team the signal shall be sent to.
1553 	\param signalNumber The number of the signal to be delivered. If \c 0, no
1554 		actual signal will be delivered. Only delivery checks will be performed.
1555 	\param signal If non-NULL the signal to be queued (has number
1556 		\a signalNumber in this case). The caller transfers an object reference
1557 		to this function. If \c NULL an unqueued signal will be delivered to the
1558 		thread.
1559 	\param flags A bitwise combination of any number of the following:
1560 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1561 			target thread the signal.
1562 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1563 			woken up, the scheduler will be invoked. If set that will not be
1564 			done explicitly, but rescheduling can still happen, e.g. when the
1565 			current thread's time slice runs out.
1566 	\return \c B_OK, when the signal was delivered successfully, another error
1567 		code otherwise.
1568 */
1569 status_t
1570 send_signal_to_team_locked(Team* team, uint32 signalNumber, Signal* signal,
1571 	uint32 flags)
1572 {
1573 	ASSERT(signal == NULL || signalNumber == signal->Number());
1574 
1575 	T(SendSignal(team->id, signalNumber, flags));
1576 
1577 	// The caller transferred a reference to the signal to us.
1578 	BReference<Signal> signalReference(signal, true);
1579 
1580 	if ((flags & B_CHECK_PERMISSION) != 0) {
1581 		if (!has_permission_to_signal(team))
1582 			return EPERM;
1583 	}
1584 
1585 	if (signalNumber == 0)
1586 		return B_OK;
1587 
1588 	if (team == team_get_kernel_team()) {
1589 		// signals to the kernel team are not allowed
1590 		return EPERM;
1591 	}
1592 
1593 	if (signal != NULL)
1594 		team->AddPendingSignal(signal);
1595 	else
1596 		team->AddPendingSignal(signalNumber);
1597 
1598 	// the team has the signal reference, now
1599 	signalReference.Detach();
1600 
1601 	switch (signalNumber) {
1602 		case SIGKILL:
1603 		case SIGKILLTHR:
1604 		{
1605 			// Also add a SIGKILLTHR to the main thread's signals and wake it
1606 			// up/interrupt it, so we get this over with as soon as possible
1607 			// (only the main thread shuts down the team).
1608 			Thread* mainThread = team->main_thread;
1609 			if (mainThread != NULL) {
1610 				mainThread->AddPendingSignal(SIGKILLTHR);
1611 
1612 				// wake up main thread
1613 				mainThread->going_to_suspend = false;
1614 
1615 				SpinLocker _(mainThread->scheduler_lock);
1616 				if (mainThread->state == B_THREAD_SUSPENDED)
1617 					scheduler_enqueue_in_run_queue(mainThread);
1618 				else
1619 					thread_interrupt(mainThread, true);
1620 			}
1621 			break;
1622 		}
1623 
1624 		case SIGCONT:
1625 			// Wake up any suspended threads, interrupt the others, if they
1626 			// don't block the signal.
1627 			for (Thread* thread = team->thread_list; thread != NULL;
1628 					thread = thread->team_next) {
1629 				thread->going_to_suspend = false;
1630 
1631 				SpinLocker _(thread->scheduler_lock);
1632 				if (thread->state == B_THREAD_SUSPENDED) {
1633 					scheduler_enqueue_in_run_queue(thread);
1634 				} else if ((SIGNAL_TO_MASK(SIGCONT) & ~thread->sig_block_mask)
1635 						!= 0) {
1636 					thread_interrupt(thread, false);
1637 				}
1638 
1639 				// remove any pending stop signals
1640 				thread->RemovePendingSignals(STOP_SIGNALS);
1641 			}
1642 
1643 			// remove any pending team stop signals
1644 			team->RemovePendingSignals(STOP_SIGNALS);
1645 			break;
1646 
1647 		case SIGSTOP:
1648 		case SIGTSTP:
1649 		case SIGTTIN:
1650 		case SIGTTOU:
1651 			// send the stop signal to all threads
1652 			// TODO: Is that correct or should we only target the main thread?
1653 			for (Thread* thread = team->thread_list; thread != NULL;
1654 					thread = thread->team_next) {
1655 				thread->AddPendingSignal(signalNumber);
1656 			}
1657 
1658 			// remove the stop signal from the team again
1659 			if (signal != NULL) {
1660 				team->RemovePendingSignal(signal);
1661 				signalReference.SetTo(signal, true);
1662 			} else
1663 				team->RemovePendingSignal(signalNumber);
1664 
1665 			// fall through to interrupt threads
1666 		default:
1667 			// Interrupt all interruptibly waiting threads, if the signal is
1668 			// not masked.
1669 			for (Thread* thread = team->thread_list; thread != NULL;
1670 					thread = thread->team_next) {
1671 				sigset_t nonBlocked = ~thread->sig_block_mask
1672 					| SIGNAL_TO_MASK(SIGCHLD);
1673 				if ((thread->AllPendingSignals() & nonBlocked) != 0) {
1674 					SpinLocker _(thread->scheduler_lock);
1675 					thread_interrupt(thread, false);
1676 				}
1677 			}
1678 			break;
1679 	}
1680 
1681 	update_team_threads_signal_flag(team);
1682 
1683 	return B_OK;
1684 }
1685 
1686 
1687 /*!	Sends the given signal to the given team.
1688 
1689 	\param team The team the signal shall be sent to.
1690 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1691 		actual signal will be delivered. Only delivery checks will be performed.
1692 		The given object will be copied. The caller retains ownership.
1693 	\param flags A bitwise combination of any number of the following:
1694 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1695 			target thread the signal.
1696 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1697 			woken up, the scheduler will be invoked. If set that will not be
1698 			done explicitly, but rescheduling can still happen, e.g. when the
1699 			current thread's time slice runs out.
1700 	\return \c B_OK, when the signal was delivered successfully, another error
1701 		code otherwise.
1702 */
1703 status_t
1704 send_signal_to_team(Team* team, const Signal& signal, uint32 flags)
1705 {
1706 	// Clone the signal -- the clone will be queued. If something fails and the
1707 	// caller doesn't require queuing, we will add an unqueued signal.
1708 	Signal* signalToQueue = NULL;
1709 	status_t error = Signal::CreateQueuable(signal,
1710 		(flags & SIGNAL_FLAG_QUEUING_REQUIRED) != 0, signalToQueue);
1711 	if (error != B_OK)
1712 		return error;
1713 
1714 	InterruptsSpinLocker locker(team->signal_lock);
1715 
1716 	error = send_signal_to_team_locked(team, signal.Number(), signalToQueue,
1717 			flags);
1718 
1719 	locker.Unlock();
1720 
1721 	if ((flags & B_DO_NOT_RESCHEDULE) == 0)
1722 		scheduler_reschedule_if_necessary();
1723 
1724 	return error;
1725 }
1726 
1727 
1728 /*!	Sends the given signal to the team with the given ID.
1729 
1730 	\param teamID The ID of the team the signal shall be sent to.
1731 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1732 		actual signal will be delivered. Only delivery checks will be performed.
1733 		The given object will be copied. The caller retains ownership.
1734 	\param flags A bitwise combination of any number of the following:
1735 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1736 			target thread the signal.
1737 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1738 			woken up, the scheduler will be invoked. If set that will not be
1739 			done explicitly, but rescheduling can still happen, e.g. when the
1740 			current thread's time slice runs out.
1741 	\return \c B_OK, when the signal was delivered successfully, another error
1742 		code otherwise.
1743 */
1744 status_t
1745 send_signal_to_team_id(team_id teamID, const Signal& signal, uint32 flags)
1746 {
1747 	// get the team
1748 	Team* team = Team::Get(teamID);
1749 	if (team == NULL)
1750 		return B_BAD_TEAM_ID;
1751 	BReference<Team> teamReference(team, true);
1752 
1753 	return send_signal_to_team(team, signal, flags);
1754 }
1755 
1756 
1757 /*!	Sends the given signal to the given process group.
1758 
1759 	The caller must hold the process group's lock. Interrupts must be enabled.
1760 
1761 	\param group The the process group the signal shall be sent to.
1762 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1763 		actual signal will be delivered. Only delivery checks will be performed.
1764 		The given object will be copied. The caller retains ownership.
1765 	\param flags A bitwise combination of any number of the following:
1766 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1767 			target thread the signal.
1768 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1769 			woken up, the scheduler will be invoked. If set that will not be
1770 			done explicitly, but rescheduling can still happen, e.g. when the
1771 			current thread's time slice runs out.
1772 	\return \c B_OK, when the signal was delivered successfully, another error
1773 		code otherwise.
1774 */
1775 status_t
1776 send_signal_to_process_group_locked(ProcessGroup* group, const Signal& signal,
1777 	uint32 flags)
1778 {
1779 	T(SendSignal(-group->id, signal.Number(), flags));
1780 
1781 	bool firstTeam = true;
1782 
1783 	for (Team* team = group->teams; team != NULL; team = team->group_next) {
1784 		status_t error = send_signal_to_team(team, signal,
1785 			flags | B_DO_NOT_RESCHEDULE);
1786 		// If sending to the first team in the group failed, let the whole call
1787 		// fail.
1788 		if (firstTeam) {
1789 			if (error != B_OK)
1790 				return error;
1791 			firstTeam = false;
1792 		}
1793 	}
1794 
1795 	if ((flags & B_DO_NOT_RESCHEDULE) == 0)
1796 		scheduler_reschedule_if_necessary();
1797 
1798 	return B_OK;
1799 }
1800 
1801 
1802 /*!	Sends the given signal to the process group specified by the given ID.
1803 
1804 	The caller must not hold any process group, team, or thread lock. Interrupts
1805 	must be enabled.
1806 
1807 	\param groupID The ID of the process group the signal shall be sent to.
1808 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1809 		actual signal will be delivered. Only delivery checks will be performed.
1810 		The given object will be copied. The caller retains ownership.
1811 	\param flags A bitwise combination of any number of the following:
1812 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1813 			target thread the signal.
1814 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1815 			woken up, the scheduler will be invoked. If set that will not be
1816 			done explicitly, but rescheduling can still happen, e.g. when the
1817 			current thread's time slice runs out.
1818 	\return \c B_OK, when the signal was delivered successfully, another error
1819 		code otherwise.
1820 */
1821 status_t
1822 send_signal_to_process_group(pid_t groupID, const Signal& signal, uint32 flags)
1823 {
1824 	ProcessGroup* group = ProcessGroup::Get(groupID);
1825 	if (group == NULL)
1826 		return B_BAD_TEAM_ID;
1827 	BReference<ProcessGroup> groupReference(group);
1828 
1829 	T(SendSignal(-group->id, signal.Number(), flags));
1830 
1831 	AutoLocker<ProcessGroup> groupLocker(group);
1832 
1833 	status_t error = send_signal_to_process_group_locked(group, signal,
1834 		flags | B_DO_NOT_RESCHEDULE);
1835 	if (error != B_OK)
1836 		return error;
1837 
1838 	groupLocker.Unlock();
1839 
1840 	if ((flags & B_DO_NOT_RESCHEDULE) == 0)
1841 		scheduler_reschedule_if_necessary();
1842 
1843 	return B_OK;
1844 }
1845 
1846 
1847 static status_t
1848 send_signal_internal(pid_t id, uint signalNumber, union sigval userValue,
1849 	uint32 flags)
1850 {
1851 	if (signalNumber > MAX_SIGNAL_NUMBER)
1852 		return B_BAD_VALUE;
1853 
1854 	Thread* thread = thread_get_current_thread();
1855 
1856 	Signal signal(signalNumber,
1857 		(flags & SIGNAL_FLAG_QUEUING_REQUIRED) != 0 ? SI_QUEUE : SI_USER,
1858 		B_OK, thread->team->id);
1859 		// Note: SI_USER/SI_QUEUE is not correct, if called from within the
1860 		// kernel (or a driver), but we don't have any info here.
1861 	signal.SetUserValue(userValue);
1862 
1863 	// If id is > 0, send the signal to the respective thread.
1864 	if (id > 0)
1865 		return send_signal_to_thread_id(id, signal, flags);
1866 
1867 	// If id == 0, send the signal to the current thread.
1868 	if (id == 0)
1869 		return send_signal_to_thread(thread, signal, flags);
1870 
1871 	// If id == -1, send the signal to all teams the calling team has permission
1872 	// to send signals to.
1873 	if (id == -1) {
1874 		// TODO: Implement correctly!
1875 		// currently only send to the current team
1876 		return send_signal_to_team_id(thread->team->id, signal, flags);
1877 	}
1878 
1879 	// Send a signal to the specified process group (the absolute value of the
1880 	// id).
1881 	return send_signal_to_process_group(-id, signal, flags);
1882 }
1883 
1884 
1885 int
1886 send_signal_etc(pid_t id, uint signalNumber, uint32 flags)
1887 {
1888 	// a dummy user value
1889 	union sigval userValue;
1890 	userValue.sival_ptr = NULL;
1891 
1892 	return send_signal_internal(id, signalNumber, userValue, flags);
1893 }
1894 
1895 
1896 int
1897 send_signal(pid_t threadID, uint signal)
1898 {
1899 	// The BeBook states that this function wouldn't be exported
1900 	// for drivers, but, of course, it's wrong.
1901 	return send_signal_etc(threadID, signal, 0);
1902 }
1903 
1904 
1905 static int
1906 sigprocmask_internal(int how, const sigset_t* set, sigset_t* oldSet)
1907 {
1908 	Thread* thread = thread_get_current_thread();
1909 
1910 	InterruptsSpinLocker _(thread->team->signal_lock);
1911 
1912 	sigset_t oldMask = thread->sig_block_mask;
1913 
1914 	if (set != NULL) {
1915 		T(SigProcMask(how, *set));
1916 
1917 		switch (how) {
1918 			case SIG_BLOCK:
1919 				thread->sig_block_mask |= *set & BLOCKABLE_SIGNALS;
1920 				break;
1921 			case SIG_UNBLOCK:
1922 				thread->sig_block_mask &= ~*set;
1923 				break;
1924 			case SIG_SETMASK:
1925 				thread->sig_block_mask = *set & BLOCKABLE_SIGNALS;
1926 				break;
1927 			default:
1928 				return B_BAD_VALUE;
1929 		}
1930 
1931 		update_current_thread_signals_flag();
1932 	}
1933 
1934 	if (oldSet != NULL)
1935 		*oldSet = oldMask;
1936 
1937 	return B_OK;
1938 }
1939 
1940 
1941 int
1942 sigprocmask(int how, const sigset_t* set, sigset_t* oldSet)
1943 {
1944 	RETURN_AND_SET_ERRNO(sigprocmask_internal(how, set, oldSet));
1945 }
1946 
1947 
1948 /*!	\brief Like sigaction(), but returning the error instead of setting errno.
1949 */
1950 static status_t
1951 sigaction_internal(int signal, const struct sigaction* act,
1952 	struct sigaction* oldAction)
1953 {
1954 	if (signal < 1 || signal > MAX_SIGNAL_NUMBER
1955 		|| (SIGNAL_TO_MASK(signal) & ~BLOCKABLE_SIGNALS) != 0)
1956 		return B_BAD_VALUE;
1957 
1958 	// get and lock the team
1959 	Team* team = thread_get_current_thread()->team;
1960 	TeamLocker teamLocker(team);
1961 
1962 	struct sigaction& teamHandler = team->SignalActionFor(signal);
1963 	if (oldAction) {
1964 		// save previous sigaction structure
1965 		*oldAction = teamHandler;
1966 	}
1967 
1968 	if (act) {
1969 		T(SigAction(signal, act));
1970 
1971 		// set new sigaction structure
1972 		teamHandler = *act;
1973 		teamHandler.sa_mask &= BLOCKABLE_SIGNALS;
1974 	}
1975 
1976 	// Remove pending signal if it should now be ignored and remove pending
1977 	// signal for those signals whose default action is to ignore them.
1978 	if ((act && act->sa_handler == SIG_IGN)
1979 		|| (act && act->sa_handler == SIG_DFL
1980 			&& (SIGNAL_TO_MASK(signal) & DEFAULT_IGNORE_SIGNALS) != 0)) {
1981 		InterruptsSpinLocker locker(team->signal_lock);
1982 
1983 		team->RemovePendingSignal(signal);
1984 
1985 		for (Thread* thread = team->thread_list; thread != NULL;
1986 				thread = thread->team_next) {
1987 			thread->RemovePendingSignal(signal);
1988 		}
1989 	}
1990 
1991 	return B_OK;
1992 }
1993 
1994 
1995 int
1996 sigaction(int signal, const struct sigaction* act, struct sigaction* oldAction)
1997 {
1998 	RETURN_AND_SET_ERRNO(sigaction_internal(signal, act, oldAction));
1999 }
2000 
2001 
2002 /*!	Wait for the specified signals, and return the information for the retrieved
2003 	signal in \a info.
2004 	The \c flags and \c timeout combination must either define an infinite
2005 	timeout (no timeout flags set), an absolute timeout (\c B_ABSOLUTE_TIMEOUT
2006 	set), or a relative timeout \code <= 0 \endcode (\c B_RELATIVE_TIMEOUT set).
2007 */
2008 static status_t
2009 sigwait_internal(const sigset_t* set, siginfo_t* info, uint32 flags,
2010 	bigtime_t timeout)
2011 {
2012 	// restrict mask to blockable signals
2013 	sigset_t requestedSignals = *set & BLOCKABLE_SIGNALS;
2014 
2015 	// make always interruptable
2016 	flags |= B_CAN_INTERRUPT;
2017 
2018 	// check whether we are allowed to wait at all
2019 	bool canWait = (flags & B_RELATIVE_TIMEOUT) == 0 || timeout > 0;
2020 
2021 	Thread* thread = thread_get_current_thread();
2022 
2023 	InterruptsSpinLocker locker(thread->team->signal_lock);
2024 
2025 	bool timedOut = false;
2026 	status_t error = B_OK;
2027 
2028 	while (!timedOut) {
2029 		sigset_t pendingSignals = thread->AllPendingSignals();
2030 
2031 		// If a kill signal is pending, just bail out.
2032 		if ((pendingSignals & KILL_SIGNALS) != 0)
2033 			return B_INTERRUPTED;
2034 
2035 		if ((pendingSignals & requestedSignals) != 0) {
2036 			// get signal with the highest priority
2037 			Signal stackSignal;
2038 			Signal* signal = dequeue_thread_or_team_signal(thread,
2039 				requestedSignals, stackSignal);
2040 			ASSERT(signal != NULL);
2041 
2042 			SignalHandledCaller signalHandledCaller(signal);
2043 			locker.Unlock();
2044 
2045 			info->si_signo = signal->Number();
2046 			info->si_code = signal->SignalCode();
2047 			info->si_errno = signal->ErrorCode();
2048 			info->si_pid = signal->SendingProcess();
2049 			info->si_uid = signal->SendingUser();
2050 			info->si_addr = signal->Address();
2051 			info->si_status = signal->Status();
2052 			info->si_band = signal->PollBand();
2053 			info->si_value = signal->UserValue();
2054 
2055 			return B_OK;
2056 		}
2057 
2058 		if (!canWait)
2059 			return B_WOULD_BLOCK;
2060 
2061 		sigset_t blockedSignals = thread->sig_block_mask;
2062 		if ((pendingSignals & ~blockedSignals) != 0) {
2063 			// Non-blocked signals are pending -- return to let them be handled.
2064 			return B_INTERRUPTED;
2065 		}
2066 
2067 		// No signals yet. Set the signal block mask to not include the
2068 		// requested mask and wait until we're interrupted.
2069 		thread->sig_block_mask = blockedSignals & ~requestedSignals;
2070 
2071 		while (!has_signals_pending(thread)) {
2072 			thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_SIGNAL,
2073 				NULL);
2074 
2075 			locker.Unlock();
2076 
2077 			if ((flags & B_ABSOLUTE_TIMEOUT) != 0) {
2078 				error = thread_block_with_timeout(flags, timeout);
2079 				if (error == B_WOULD_BLOCK || error == B_TIMED_OUT) {
2080 					error = B_WOULD_BLOCK;
2081 						// POSIX requires EAGAIN (B_WOULD_BLOCK) on timeout
2082 					timedOut = true;
2083 
2084 					locker.Lock();
2085 					break;
2086 				}
2087 			} else
2088 				thread_block();
2089 
2090 			locker.Lock();
2091 		}
2092 
2093 		// restore the original block mask
2094 		thread->sig_block_mask = blockedSignals;
2095 
2096 		update_current_thread_signals_flag();
2097 	}
2098 
2099 	// we get here only when timed out
2100 	return error;
2101 }
2102 
2103 
2104 /*!	Replace the current signal block mask and wait for any event to happen.
2105 	Before returning, the original signal block mask is reinstantiated.
2106 */
2107 static status_t
2108 sigsuspend_internal(const sigset_t* _mask)
2109 {
2110 	sigset_t mask = *_mask & BLOCKABLE_SIGNALS;
2111 
2112 	T(SigSuspend(mask));
2113 
2114 	Thread* thread = thread_get_current_thread();
2115 
2116 	InterruptsSpinLocker locker(thread->team->signal_lock);
2117 
2118 	// Set the new block mask and block until interrupted. We might be here
2119 	// after a syscall restart, in which case sigsuspend_original_unblocked_mask
2120 	// will still be set.
2121 	sigset_t oldMask = thread->sigsuspend_original_unblocked_mask != 0
2122 		? ~thread->sigsuspend_original_unblocked_mask : thread->sig_block_mask;
2123 	thread->sig_block_mask = mask & BLOCKABLE_SIGNALS;
2124 
2125 	update_current_thread_signals_flag();
2126 
2127 	while (!has_signals_pending(thread)) {
2128 		thread_prepare_to_block(thread, B_CAN_INTERRUPT,
2129 			THREAD_BLOCK_TYPE_SIGNAL, NULL);
2130 
2131 		locker.Unlock();
2132 		thread_block();
2133 		locker.Lock();
2134 	}
2135 
2136 	// Set sigsuspend_original_unblocked_mask (guaranteed to be non-0 due to
2137 	// BLOCKABLE_SIGNALS). This will indicate to handle_signals() that it is
2138 	// called after a _user_sigsuspend(). It will reset the field after invoking
2139 	// a signal handler, or restart the syscall, if there wasn't anything to
2140 	// handle anymore (e.g. because another thread was faster).
2141 	thread->sigsuspend_original_unblocked_mask = ~oldMask;
2142 
2143 	T(SigSuspendDone());
2144 
2145 	// we're not supposed to actually succeed
2146 	return B_INTERRUPTED;
2147 }
2148 
2149 
2150 static status_t
2151 sigpending_internal(sigset_t* set)
2152 {
2153 	Thread* thread = thread_get_current_thread();
2154 
2155 	if (set == NULL)
2156 		return B_BAD_VALUE;
2157 
2158 	InterruptsSpinLocker locker(thread->team->signal_lock);
2159 
2160 	*set = thread->AllPendingSignals() & thread->sig_block_mask;
2161 
2162 	return B_OK;
2163 }
2164 
2165 
2166 // #pragma mark - syscalls
2167 
2168 
2169 /*!	Sends a signal to a thread, process, or process group.
2170 	\param id Specifies the ID of the target:
2171 		- \code id > 0 \endcode: If \a toThread is \c true, the target is the
2172 			thread with ID \a id, otherwise the team with the ID \a id.
2173 		- \code id == 0 \endcode: If toThread is \c true, the target is the
2174 			current thread, otherwise the current team.
2175 		- \code id == -1 \endcode: The target are all teams the current team has
2176 			permission to send signals to. Currently not implemented correctly.
2177 		- \code id < -1 \endcode: The target are is the process group with ID
2178 			\c -id.
2179 	\param signalNumber The signal number. \c 0 to just perform checks, but not
2180 		actually send any signal.
2181 	\param userUserValue A user value to be associated with the signal. Might be
2182 		ignored unless signal queuing is forced. Can be \c NULL.
2183 	\param flags A bitwise or of any number of the following:
2184 		- \c SIGNAL_FLAG_QUEUING_REQUIRED: Signal queuing is required. Fail
2185 			instead of falling back to unqueued signals, when queuing isn't
2186 			possible.
2187 		- \c SIGNAL_FLAG_SEND_TO_THREAD: Interpret the the given ID as a
2188 			\c thread_id rather than a \c team_id. Ignored when the \a id is
2189 			\code < 0 \endcode -- then the target is a process group.
2190 	\return \c B_OK on success, another error code otherwise.
2191 */
2192 status_t
2193 _user_send_signal(int32 id, uint32 signalNumber,
2194 	const union sigval* userUserValue, uint32 flags)
2195 {
2196 	// restrict flags to the allowed ones and add B_CHECK_PERMISSION
2197 	flags &= SIGNAL_FLAG_QUEUING_REQUIRED | SIGNAL_FLAG_SEND_TO_THREAD;
2198 	flags |= B_CHECK_PERMISSION;
2199 
2200 	// Copy the user value from userland. If not given, use a dummy value.
2201 	union sigval userValue;
2202 	if (userUserValue != NULL) {
2203 		if (!IS_USER_ADDRESS(userUserValue)
2204 			|| user_memcpy(&userValue, userUserValue, sizeof(userValue))
2205 				!= B_OK) {
2206 			return B_BAD_ADDRESS;
2207 		}
2208 	} else
2209 		userValue.sival_ptr = NULL;
2210 
2211 	// If to be sent to a thread, delegate to send_signal_internal(). Also do
2212 	// that when id < 0, since in this case the semantics is the same as well.
2213 	if ((flags & SIGNAL_FLAG_SEND_TO_THREAD) != 0 || id < 0)
2214 		return send_signal_internal(id, signalNumber, userValue, flags);
2215 
2216 	// kill() semantics for id >= 0
2217 	if (signalNumber > MAX_SIGNAL_NUMBER)
2218 		return B_BAD_VALUE;
2219 
2220 	Thread* thread = thread_get_current_thread();
2221 
2222 	Signal signal(signalNumber,
2223 		(flags & SIGNAL_FLAG_QUEUING_REQUIRED) != 0 ? SI_QUEUE : SI_USER,
2224 		B_OK, thread->team->id);
2225 	signal.SetUserValue(userValue);
2226 
2227 	// send to current team for id == 0, otherwise to the respective team
2228 	return send_signal_to_team_id(id == 0 ? team_get_current_team_id() : id,
2229 		signal, flags);
2230 }
2231 
2232 
2233 status_t
2234 _user_set_signal_mask(int how, const sigset_t *userSet, sigset_t *userOldSet)
2235 {
2236 	sigset_t set, oldSet;
2237 	status_t status;
2238 
2239 	if ((userSet != NULL && user_memcpy(&set, userSet, sizeof(sigset_t)) < B_OK)
2240 		|| (userOldSet != NULL && user_memcpy(&oldSet, userOldSet,
2241 				sizeof(sigset_t)) < B_OK))
2242 		return B_BAD_ADDRESS;
2243 
2244 	status = sigprocmask_internal(how, userSet ? &set : NULL,
2245 		userOldSet ? &oldSet : NULL);
2246 
2247 	// copy old set if asked for
2248 	if (status >= B_OK && userOldSet != NULL
2249 		&& user_memcpy(userOldSet, &oldSet, sizeof(sigset_t)) < B_OK)
2250 		return B_BAD_ADDRESS;
2251 
2252 	return status;
2253 }
2254 
2255 
2256 status_t
2257 _user_sigaction(int signal, const struct sigaction *userAction,
2258 	struct sigaction *userOldAction)
2259 {
2260 	struct sigaction act, oact;
2261 	status_t status;
2262 
2263 	if ((userAction != NULL && user_memcpy(&act, userAction,
2264 				sizeof(struct sigaction)) < B_OK)
2265 		|| (userOldAction != NULL && user_memcpy(&oact, userOldAction,
2266 				sizeof(struct sigaction)) < B_OK))
2267 		return B_BAD_ADDRESS;
2268 
2269 	status = sigaction_internal(signal, userAction ? &act : NULL,
2270 		userOldAction ? &oact : NULL);
2271 
2272 	// only copy the old action if a pointer has been given
2273 	if (status >= B_OK && userOldAction != NULL
2274 		&& user_memcpy(userOldAction, &oact, sizeof(struct sigaction)) < B_OK)
2275 		return B_BAD_ADDRESS;
2276 
2277 	return status;
2278 }
2279 
2280 
2281 status_t
2282 _user_sigwait(const sigset_t *userSet, siginfo_t *userInfo, uint32 flags,
2283 	bigtime_t timeout)
2284 {
2285 	// copy userSet to stack
2286 	sigset_t set;
2287 	if (userSet == NULL || !IS_USER_ADDRESS(userSet)
2288 		|| user_memcpy(&set, userSet, sizeof(sigset_t)) != B_OK) {
2289 		return B_BAD_ADDRESS;
2290 	}
2291 
2292 	// userInfo is optional, but must be a user address when given
2293 	if (userInfo != NULL && !IS_USER_ADDRESS(userInfo))
2294 		return B_BAD_ADDRESS;
2295 
2296 	syscall_restart_handle_timeout_pre(flags, timeout);
2297 
2298 	flags |= B_CAN_INTERRUPT;
2299 
2300 	siginfo_t info;
2301 	status_t status = sigwait_internal(&set, &info, flags, timeout);
2302 	if (status == B_OK) {
2303 		// copy the info back to userland, if userSet is non-NULL
2304 		if (userInfo != NULL)
2305 			status = user_memcpy(userInfo, &info, sizeof(info));
2306 	} else if (status == B_INTERRUPTED) {
2307 		// make sure we'll be restarted
2308 		Thread* thread = thread_get_current_thread();
2309 		atomic_or(&thread->flags, THREAD_FLAGS_ALWAYS_RESTART_SYSCALL);
2310 	}
2311 
2312 	return syscall_restart_handle_timeout_post(status, timeout);
2313 }
2314 
2315 
2316 status_t
2317 _user_sigsuspend(const sigset_t *userMask)
2318 {
2319 	sigset_t mask;
2320 
2321 	if (userMask == NULL)
2322 		return B_BAD_VALUE;
2323 	if (user_memcpy(&mask, userMask, sizeof(sigset_t)) < B_OK)
2324 		return B_BAD_ADDRESS;
2325 
2326 	return sigsuspend_internal(&mask);
2327 }
2328 
2329 
2330 status_t
2331 _user_sigpending(sigset_t *userSet)
2332 {
2333 	sigset_t set;
2334 	int status;
2335 
2336 	if (userSet == NULL)
2337 		return B_BAD_VALUE;
2338 	if (!IS_USER_ADDRESS(userSet))
2339 		return B_BAD_ADDRESS;
2340 
2341 	status = sigpending_internal(&set);
2342 	if (status == B_OK
2343 		&& user_memcpy(userSet, &set, sizeof(sigset_t)) < B_OK)
2344 		return B_BAD_ADDRESS;
2345 
2346 	return status;
2347 }
2348 
2349 
2350 status_t
2351 _user_set_signal_stack(const stack_t* newUserStack, stack_t* oldUserStack)
2352 {
2353 	Thread *thread = thread_get_current_thread();
2354 	struct stack_t newStack, oldStack;
2355 	bool onStack = false;
2356 
2357 	if ((newUserStack != NULL && user_memcpy(&newStack, newUserStack,
2358 				sizeof(stack_t)) < B_OK)
2359 		|| (oldUserStack != NULL && user_memcpy(&oldStack, oldUserStack,
2360 				sizeof(stack_t)) < B_OK))
2361 		return B_BAD_ADDRESS;
2362 
2363 	if (thread->signal_stack_enabled) {
2364 		// determine whether or not the user thread is currently
2365 		// on the active signal stack
2366 		onStack = arch_on_signal_stack(thread);
2367 	}
2368 
2369 	if (oldUserStack != NULL) {
2370 		oldStack.ss_sp = (void *)thread->signal_stack_base;
2371 		oldStack.ss_size = thread->signal_stack_size;
2372 		oldStack.ss_flags = (thread->signal_stack_enabled ? 0 : SS_DISABLE)
2373 			| (onStack ? SS_ONSTACK : 0);
2374 	}
2375 
2376 	if (newUserStack != NULL) {
2377 		// no flags other than SS_DISABLE are allowed
2378 		if ((newStack.ss_flags & ~SS_DISABLE) != 0)
2379 			return B_BAD_VALUE;
2380 
2381 		if ((newStack.ss_flags & SS_DISABLE) == 0) {
2382 			// check if the size is valid
2383 			if (newStack.ss_size < MINSIGSTKSZ)
2384 				return B_NO_MEMORY;
2385 			if (onStack)
2386 				return B_NOT_ALLOWED;
2387 			if (!IS_USER_ADDRESS(newStack.ss_sp))
2388 				return B_BAD_VALUE;
2389 
2390 			thread->signal_stack_base = (addr_t)newStack.ss_sp;
2391 			thread->signal_stack_size = newStack.ss_size;
2392 			thread->signal_stack_enabled = true;
2393 		} else
2394 			thread->signal_stack_enabled = false;
2395 	}
2396 
2397 	// only copy the old stack info if a pointer has been given
2398 	if (oldUserStack != NULL
2399 		&& user_memcpy(oldUserStack, &oldStack, sizeof(stack_t)) < B_OK)
2400 		return B_BAD_ADDRESS;
2401 
2402 	return B_OK;
2403 }
2404 
2405 
2406 /*!	Restores the environment of a function that was interrupted by a signal
2407 	handler call.
2408 	This syscall is invoked when a signal handler function returns. It
2409 	deconstructs the signal handler frame and restores the stack and register
2410 	state of the function that was interrupted by a signal. The syscall is
2411 	therefore somewhat unusual, since it does not return to the calling
2412 	function, but to someplace else. In case the signal interrupted a syscall,
2413 	it will appear as if the syscall just returned. That is also the reason, why
2414 	this syscall returns an int64, since it needs to return the value the
2415 	interrupted syscall returns, which is potentially 64 bits wide.
2416 
2417 	\param userSignalFrameData The signal frame data created for the signal
2418 		handler. Potentially some data (e.g. registers) have been modified by
2419 		the signal handler.
2420 	\return In case the signal interrupted a syscall, the return value of that
2421 		syscall. Otherwise (i.e. in case of a (hardware) interrupt/exception)
2422 		the value might need to be tailored such that after a return to userland
2423 		the interrupted environment is identical to the interrupted one (unless
2424 		explicitly modified). E.g. for x86 to achieve that, the return value
2425 		must contain the eax|edx values of the interrupted environment.
2426 */
2427 int64
2428 _user_restore_signal_frame(struct signal_frame_data* userSignalFrameData)
2429 {
2430 	syscall_64_bit_return_value();
2431 
2432 	Thread *thread = thread_get_current_thread();
2433 
2434 	// copy the signal frame data from userland
2435 	signal_frame_data signalFrameData;
2436 	if (userSignalFrameData == NULL || !IS_USER_ADDRESS(userSignalFrameData)
2437 		|| user_memcpy(&signalFrameData, userSignalFrameData,
2438 			sizeof(signalFrameData)) != B_OK) {
2439 		// We failed to copy the signal frame data from userland. This is a
2440 		// serious problem. Kill the thread.
2441 		dprintf("_user_restore_signal_frame(): thread %" B_PRId32 ": Failed to "
2442 			"copy signal frame data (%p) from userland. Killing thread...\n",
2443 			thread->id, userSignalFrameData);
2444 		kill_thread(thread->id);
2445 		return B_BAD_ADDRESS;
2446 	}
2447 
2448 	// restore the signal block mask
2449 	InterruptsSpinLocker locker(thread->team->signal_lock);
2450 
2451 	thread->sig_block_mask
2452 		= signalFrameData.context.uc_sigmask & BLOCKABLE_SIGNALS;
2453 	update_current_thread_signals_flag();
2454 
2455 	locker.Unlock();
2456 
2457 	// restore the syscall restart related thread flags and the syscall restart
2458 	// parameters
2459 	atomic_and(&thread->flags,
2460 		~(THREAD_FLAGS_RESTART_SYSCALL | THREAD_FLAGS_64_BIT_SYSCALL_RETURN));
2461 	atomic_or(&thread->flags, signalFrameData.thread_flags
2462 		& (THREAD_FLAGS_RESTART_SYSCALL | THREAD_FLAGS_64_BIT_SYSCALL_RETURN));
2463 
2464 	memcpy(thread->syscall_restart.parameters,
2465 		signalFrameData.syscall_restart_parameters,
2466 		sizeof(thread->syscall_restart.parameters));
2467 
2468 	// restore the previously stored Thread::user_signal_context
2469 	thread->user_signal_context = signalFrameData.context.uc_link;
2470 	if (thread->user_signal_context != NULL
2471 		&& !IS_USER_ADDRESS(thread->user_signal_context)) {
2472 		thread->user_signal_context = NULL;
2473 	}
2474 
2475 	// let the architecture specific code restore the registers
2476 	return arch_restore_signal_frame(&signalFrameData);
2477 }
2478