xref: /haiku/src/system/kernel/signal.cpp (revision 4a3268e14fff4dd5a456d824b48ce6503368e4c1)
1 /*
2  * Copyright 2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
4  * Copyright 2002, Angelo Mottola, a.mottola@libero.it.
5  *
6  * Distributed under the terms of the MIT License.
7  */
8 
9 
10 /*! POSIX signals handling routines */
11 
12 
13 #include <ksignal.h>
14 
15 #include <errno.h>
16 #include <stddef.h>
17 #include <string.h>
18 
19 #include <OS.h>
20 #include <KernelExport.h>
21 
22 #include <cpu.h>
23 #include <debug.h>
24 #include <kernel.h>
25 #include <kscheduler.h>
26 #include <sem.h>
27 #include <syscall_restart.h>
28 #include <syscall_utils.h>
29 #include <team.h>
30 #include <thread.h>
31 #include <tracing.h>
32 #include <user_debugger.h>
33 #include <user_thread.h>
34 #include <util/AutoLock.h>
35 
36 
37 //#define TRACE_SIGNAL
38 #ifdef TRACE_SIGNAL
39 #	define TRACE(x) dprintf x
40 #else
41 #	define TRACE(x) ;
42 #endif
43 
44 
45 #define BLOCKABLE_SIGNALS	\
46 	(~(KILL_SIGNALS | SIGNAL_TO_MASK(SIGSTOP)	\
47 	| SIGNAL_TO_MASK(SIGNAL_CONTINUE_THREAD)	\
48 	| SIGNAL_TO_MASK(SIGNAL_CANCEL_THREAD)))
49 #define STOP_SIGNALS \
50 	(SIGNAL_TO_MASK(SIGSTOP) | SIGNAL_TO_MASK(SIGTSTP) \
51 	| SIGNAL_TO_MASK(SIGTTIN) | SIGNAL_TO_MASK(SIGTTOU))
52 #define CONTINUE_SIGNALS \
53 	(SIGNAL_TO_MASK(SIGCONT) | SIGNAL_TO_MASK(SIGNAL_CONTINUE_THREAD))
54 #define DEFAULT_IGNORE_SIGNALS \
55 	(SIGNAL_TO_MASK(SIGCHLD) | SIGNAL_TO_MASK(SIGWINCH) \
56 	| SIGNAL_TO_MASK(SIGCONT) \
57 	| SIGNAL_RANGE_TO_MASK(SIGNAL_REALTIME_MIN, SIGNAL_REALTIME_MAX))
58 #define NON_DEFERRABLE_SIGNALS	\
59 	(KILL_SIGNALS				\
60 	| SIGNAL_TO_MASK(SIGILL)	\
61 	| SIGNAL_TO_MASK(SIGFPE)	\
62 	| SIGNAL_TO_MASK(SIGSEGV))
63 
64 
65 static const struct {
66 	const char*	name;
67 	int32		priority;
68 } kSignalInfos[__MAX_SIGNO + 1] = {
69 	{"NONE",			-1},
70 	{"HUP",				0},
71 	{"INT",				0},
72 	{"QUIT",			0},
73 	{"ILL",				0},
74 	{"CHLD",			0},
75 	{"ABRT",			0},
76 	{"PIPE",			0},
77 	{"FPE",				0},
78 	{"KILL",			100},
79 	{"STOP",			0},
80 	{"SEGV",			0},
81 	{"CONT",			0},
82 	{"TSTP",			0},
83 	{"ALRM",			0},
84 	{"TERM",			0},
85 	{"TTIN",			0},
86 	{"TTOU",			0},
87 	{"USR1",			0},
88 	{"USR2",			0},
89 	{"WINCH",			0},
90 	{"KILLTHR",			100},
91 	{"TRAP",			0},
92 	{"POLL",			0},
93 	{"PROF",			0},
94 	{"SYS",				0},
95 	{"URG",				0},
96 	{"VTALRM",			0},
97 	{"XCPU",			0},
98 	{"XFSZ",			0},
99 	{"SIGBUS",			0},
100 	{"SIGRESERVED1",	0},
101 	{"SIGRESERVED2",	0},
102 	{"SIGRT1",			8},
103 	{"SIGRT2",			7},
104 	{"SIGRT3",			6},
105 	{"SIGRT4",			5},
106 	{"SIGRT5",			4},
107 	{"SIGRT6",			3},
108 	{"SIGRT7",			2},
109 	{"SIGRT8",			1},
110 	{"invalid 41",		0},
111 	{"invalid 42",		0},
112 	{"invalid 43",		0},
113 	{"invalid 44",		0},
114 	{"invalid 45",		0},
115 	{"invalid 46",		0},
116 	{"invalid 47",		0},
117 	{"invalid 48",		0},
118 	{"invalid 49",		0},
119 	{"invalid 50",		0},
120 	{"invalid 51",		0},
121 	{"invalid 52",		0},
122 	{"invalid 53",		0},
123 	{"invalid 54",		0},
124 	{"invalid 55",		0},
125 	{"invalid 56",		0},
126 	{"invalid 57",		0},
127 	{"invalid 58",		0},
128 	{"invalid 59",		0},
129 	{"invalid 60",		0},
130 	{"invalid 61",		0},
131 	{"invalid 62",		0},
132 	{"CANCEL_THREAD",	0},
133 	{"CONTINUE_THREAD",	0}	// priority must be <= that of SIGSTOP
134 };
135 
136 
137 static inline const char*
138 signal_name(uint32 number)
139 {
140 	return number <= __MAX_SIGNO ? kSignalInfos[number].name : "invalid";
141 }
142 
143 
144 // #pragma mark - SignalHandledCaller
145 
146 
147 struct SignalHandledCaller {
148 	SignalHandledCaller(Signal* signal)
149 		:
150 		fSignal(signal)
151 	{
152 	}
153 
154 	~SignalHandledCaller()
155 	{
156 		Done();
157 	}
158 
159 	void Done()
160 	{
161 		if (fSignal != NULL) {
162 			fSignal->Handled();
163 			fSignal = NULL;
164 		}
165 	}
166 
167 private:
168 	Signal*	fSignal;
169 };
170 
171 
172 // #pragma mark - QueuedSignalsCounter
173 
174 
175 /*!	Creates a counter with the given limit.
176 	The limit defines the maximum the counter may reach. Since the
177 	BReferenceable's reference count is used, it is assumed that the owning
178 	team holds a reference and the reference count is one greater than the
179 	counter value.
180 	\param limit The maximum allowed value the counter may have. When
181 		\code < 0 \endcode, the value is not limited.
182 */
183 QueuedSignalsCounter::QueuedSignalsCounter(int32 limit)
184 	:
185 	fLimit(limit)
186 {
187 }
188 
189 
190 /*!	Increments the counter, if the limit allows that.
191 	\return \c true, if incrementing the counter succeeded, \c false otherwise.
192 */
193 bool
194 QueuedSignalsCounter::Increment()
195 {
196 	// no limit => no problem
197 	if (fLimit < 0) {
198 		AcquireReference();
199 		return true;
200 	}
201 
202 	// Increment the reference count manually, so we can check atomically. We
203 	// compare the old value > fLimit, assuming that our (primary) owner has a
204 	// reference, we don't want to count.
205 	if (atomic_add(&fReferenceCount, 1) > fLimit) {
206 		ReleaseReference();
207 		return false;
208 	}
209 
210 	return true;
211 }
212 
213 
214 // #pragma mark - Signal
215 
216 
217 Signal::Signal()
218 	:
219 	fCounter(NULL),
220 	fPending(false)
221 {
222 }
223 
224 
225 Signal::Signal(const Signal& other)
226 	:
227 	fCounter(NULL),
228 	fNumber(other.fNumber),
229 	fSignalCode(other.fSignalCode),
230 	fErrorCode(other.fErrorCode),
231 	fSendingProcess(other.fSendingProcess),
232 	fSendingUser(other.fSendingUser),
233 	fStatus(other.fStatus),
234 	fPollBand(other.fPollBand),
235 	fAddress(other.fAddress),
236 	fUserValue(other.fUserValue),
237 	fPending(false)
238 {
239 }
240 
241 
242 Signal::Signal(uint32 number, int32 signalCode, int32 errorCode,
243 	pid_t sendingProcess)
244 	:
245 	fCounter(NULL),
246 	fNumber(number),
247 	fSignalCode(signalCode),
248 	fErrorCode(errorCode),
249 	fSendingProcess(sendingProcess),
250 	fSendingUser(getuid()),
251 	fStatus(0),
252 	fPollBand(0),
253 	fAddress(NULL),
254 	fPending(false)
255 {
256 	fUserValue.sival_ptr = NULL;
257 }
258 
259 
260 Signal::~Signal()
261 {
262 	if (fCounter != NULL)
263 		fCounter->ReleaseReference();
264 }
265 
266 
267 /*!	Creates a queuable clone of the given signal.
268 	Also enforces the current team's signal queuing limit.
269 
270 	\param signal The signal to clone.
271 	\param queuingRequired If \c true, the function will return an error code
272 		when creating the clone fails for any reason. Otherwise, the function
273 		will set \a _signalToQueue to \c NULL, but still return \c B_OK.
274 	\param _signalToQueue Return parameter. Set to the clone of the signal.
275 	\return When \c queuingRequired is \c false, always \c B_OK. Otherwise
276 		\c B_OK, when creating the signal clone succeeds, another error code,
277 		when it fails.
278 */
279 /*static*/ status_t
280 Signal::CreateQueuable(const Signal& signal, bool queuingRequired,
281 	Signal*& _signalToQueue)
282 {
283 	_signalToQueue = NULL;
284 
285 	// If interrupts are disabled, we can't allocate a signal.
286 	if (!are_interrupts_enabled())
287 		return queuingRequired ? B_BAD_VALUE : B_OK;
288 
289 	// increment the queued signals counter
290 	QueuedSignalsCounter* counter
291 		= thread_get_current_thread()->team->QueuedSignalsCounter();
292 	if (!counter->Increment())
293 		return queuingRequired ? EAGAIN : B_OK;
294 
295 	// allocate the signal
296 	Signal* signalToQueue = new(std::nothrow) Signal(signal);
297 	if (signalToQueue == NULL) {
298 		counter->Decrement();
299 		return queuingRequired ? B_NO_MEMORY : B_OK;
300 	}
301 
302 	signalToQueue->fCounter = counter;
303 
304 	_signalToQueue = signalToQueue;
305 	return B_OK;
306 }
307 
308 void
309 Signal::SetTo(uint32 number)
310 {
311 	Team* team = thread_get_current_thread()->team;
312 
313 	fNumber = number;
314 	fSignalCode = SI_USER;
315 	fErrorCode = 0;
316 	fSendingProcess = team->id;
317 	fSendingUser = team->effective_uid;
318 		// assuming scheduler lock is being held
319 	fStatus = 0;
320 	fPollBand = 0;
321 	fAddress = NULL;
322 	fUserValue.sival_ptr = NULL;
323 }
324 
325 
326 int32
327 Signal::Priority() const
328 {
329 	return kSignalInfos[fNumber].priority;
330 }
331 
332 
333 void
334 Signal::Handled()
335 {
336 	ReleaseReference();
337 }
338 
339 
340 void
341 Signal::LastReferenceReleased()
342 {
343 	if (are_interrupts_enabled())
344 		delete this;
345 	else
346 		deferred_delete(this);
347 }
348 
349 
350 // #pragma mark - PendingSignals
351 
352 
353 PendingSignals::PendingSignals()
354 	:
355 	fQueuedSignalsMask(0),
356 	fUnqueuedSignalsMask(0)
357 {
358 }
359 
360 
361 PendingSignals::~PendingSignals()
362 {
363 	Clear();
364 }
365 
366 
367 /*!	Of the signals in \a nonBlocked returns the priority of that with the
368 	highest priority.
369 	\param nonBlocked The mask with the non-blocked signals.
370 	\return The priority of the highest priority non-blocked signal, or, if all
371 		signals are blocked, \c -1.
372 */
373 int32
374 PendingSignals::HighestSignalPriority(sigset_t nonBlocked) const
375 {
376 	Signal* queuedSignal;
377 	int32 unqueuedSignal;
378 	return _GetHighestPrioritySignal(nonBlocked, queuedSignal, unqueuedSignal);
379 }
380 
381 
382 void
383 PendingSignals::Clear()
384 {
385 	// release references of all queued signals
386 	while (Signal* signal = fQueuedSignals.RemoveHead())
387 		signal->Handled();
388 
389 	fQueuedSignalsMask = 0;
390 	fUnqueuedSignalsMask = 0;
391 }
392 
393 
394 /*!	Adds a signal.
395 	Takes over the reference to the signal from the caller.
396 */
397 void
398 PendingSignals::AddSignal(Signal* signal)
399 {
400 	// queue according to priority
401 	int32 priority = signal->Priority();
402 	Signal* otherSignal = NULL;
403 	for (SignalList::Iterator it = fQueuedSignals.GetIterator();
404 			(otherSignal = it.Next()) != NULL;) {
405 		if (priority > otherSignal->Priority())
406 			break;
407 	}
408 
409 	fQueuedSignals.InsertBefore(otherSignal, signal);
410 	signal->SetPending(true);
411 
412 	fQueuedSignalsMask |= SIGNAL_TO_MASK(signal->Number());
413 }
414 
415 
416 void
417 PendingSignals::RemoveSignal(Signal* signal)
418 {
419 	signal->SetPending(false);
420 	fQueuedSignals.Remove(signal);
421 	_UpdateQueuedSignalMask();
422 }
423 
424 
425 void
426 PendingSignals::RemoveSignals(sigset_t mask)
427 {
428 	// remove from queued signals
429 	if ((fQueuedSignalsMask & mask) != 0) {
430 		for (SignalList::Iterator it = fQueuedSignals.GetIterator();
431 				Signal* signal = it.Next();) {
432 			// remove signal, if in mask
433 			if ((SIGNAL_TO_MASK(signal->Number()) & mask) != 0) {
434 				it.Remove();
435 				signal->SetPending(false);
436 				signal->Handled();
437 			}
438 		}
439 
440 		fQueuedSignalsMask &= ~mask;
441 	}
442 
443 	// remove from unqueued signals
444 	fUnqueuedSignalsMask &= ~mask;
445 }
446 
447 
448 /*!	Removes and returns a signal in \a nonBlocked that has the highest priority.
449 	The caller gets a reference to the returned signal, if any.
450 	\param nonBlocked The mask of non-blocked signals.
451 	\param buffer If the signal is not queued this buffer is returned. In this
452 		case the method acquires a reference to \a buffer, so that the caller
453 		gets a reference also in this case.
454 	\return The removed signal or \c NULL, if all signals are blocked.
455 */
456 Signal*
457 PendingSignals::DequeueSignal(sigset_t nonBlocked, Signal& buffer)
458 {
459 	// find the signal with the highest priority
460 	Signal* queuedSignal;
461 	int32 unqueuedSignal;
462 	if (_GetHighestPrioritySignal(nonBlocked, queuedSignal, unqueuedSignal) < 0)
463 		return NULL;
464 
465 	// if it is a queued signal, dequeue it
466 	if (queuedSignal != NULL) {
467 		fQueuedSignals.Remove(queuedSignal);
468 		queuedSignal->SetPending(false);
469 		_UpdateQueuedSignalMask();
470 		return queuedSignal;
471 	}
472 
473 	// it is unqueued -- remove from mask
474 	fUnqueuedSignalsMask &= ~SIGNAL_TO_MASK(unqueuedSignal);
475 
476 	// init buffer
477 	buffer.SetTo(unqueuedSignal);
478 	buffer.AcquireReference();
479 	return &buffer;
480 }
481 
482 
483 /*!	Of the signals not it \a blocked returns the priority of that with the
484 	highest priority.
485 	\param blocked The mask with the non-blocked signals.
486 	\param _queuedSignal If the found signal is a queued signal, the variable
487 		will be set to that signal, otherwise to \c NULL.
488 	\param _unqueuedSignal If the found signal is an unqueued signal, the
489 		variable is set to that signal's number, otherwise to \c -1.
490 	\return The priority of the highest priority non-blocked signal, or, if all
491 		signals are blocked, \c -1.
492 */
493 int32
494 PendingSignals::_GetHighestPrioritySignal(sigset_t nonBlocked,
495 	Signal*& _queuedSignal, int32& _unqueuedSignal) const
496 {
497 	// check queued signals
498 	Signal* queuedSignal = NULL;
499 	int32 queuedPriority = -1;
500 
501 	if ((fQueuedSignalsMask & nonBlocked) != 0) {
502 		for (SignalList::ConstIterator it = fQueuedSignals.GetIterator();
503 				Signal* signal = it.Next();) {
504 			if ((SIGNAL_TO_MASK(signal->Number()) & nonBlocked) != 0) {
505 				queuedPriority = signal->Priority();
506 				queuedSignal = signal;
507 				break;
508 			}
509 		}
510 	}
511 
512 	// check unqueued signals
513 	int32 unqueuedSignal = -1;
514 	int32 unqueuedPriority = -1;
515 
516 	sigset_t unqueuedSignals = fUnqueuedSignalsMask & nonBlocked;
517 	if (unqueuedSignals != 0) {
518 		int32 signal = 1;
519 		while (unqueuedSignals != 0) {
520 			sigset_t mask = SIGNAL_TO_MASK(signal);
521 			if ((unqueuedSignals & mask) != 0) {
522 				int32 priority = kSignalInfos[signal].priority;
523 				if (priority > unqueuedPriority) {
524 					unqueuedSignal = signal;
525 					unqueuedPriority = priority;
526 				}
527 				unqueuedSignals &= ~mask;
528 			}
529 
530 			signal++;
531 		}
532 	}
533 
534 	// Return found queued or unqueued signal, whichever has the higher
535 	// priority.
536 	if (queuedPriority >= unqueuedPriority) {
537 		_queuedSignal = queuedSignal;
538 		_unqueuedSignal = -1;
539 		return queuedPriority;
540 	}
541 
542 	_queuedSignal = NULL;
543 	_unqueuedSignal = unqueuedSignal;
544 	return unqueuedPriority;
545 }
546 
547 
548 void
549 PendingSignals::_UpdateQueuedSignalMask()
550 {
551 	sigset_t mask = 0;
552 	for (SignalList::Iterator it = fQueuedSignals.GetIterator();
553 			Signal* signal = it.Next();) {
554 		mask |= SIGNAL_TO_MASK(signal->Number());
555 	}
556 
557 	fQueuedSignalsMask = mask;
558 }
559 
560 
561 // #pragma mark - signal tracing
562 
563 
564 #if SIGNAL_TRACING
565 
566 namespace SignalTracing {
567 
568 
569 class HandleSignal : public AbstractTraceEntry {
570 	public:
571 		HandleSignal(uint32 signal)
572 			:
573 			fSignal(signal)
574 		{
575 			Initialized();
576 		}
577 
578 		virtual void AddDump(TraceOutput& out)
579 		{
580 			out.Print("signal handle:  %" B_PRIu32 " (%s)" , fSignal,
581 				signal_name(fSignal));
582 		}
583 
584 	private:
585 		uint32		fSignal;
586 };
587 
588 
589 class ExecuteSignalHandler : public AbstractTraceEntry {
590 	public:
591 		ExecuteSignalHandler(uint32 signal, struct sigaction* handler)
592 			:
593 			fSignal(signal),
594 			fHandler((void*)handler->sa_handler)
595 		{
596 			Initialized();
597 		}
598 
599 		virtual void AddDump(TraceOutput& out)
600 		{
601 			out.Print("signal exec handler: signal: %" B_PRIu32 " (%s), "
602 				"handler: %p", fSignal, signal_name(fSignal), fHandler);
603 		}
604 
605 	private:
606 		uint32	fSignal;
607 		void*	fHandler;
608 };
609 
610 
611 class SendSignal : public AbstractTraceEntry {
612 	public:
613 		SendSignal(pid_t target, uint32 signal, uint32 flags)
614 			:
615 			fTarget(target),
616 			fSignal(signal),
617 			fFlags(flags)
618 		{
619 			Initialized();
620 		}
621 
622 		virtual void AddDump(TraceOutput& out)
623 		{
624 			out.Print("signal send: target: %" B_PRId32 ", signal: %" B_PRIu32
625 				" (%s), flags: %#" B_PRIx32, fTarget, fSignal,
626 				signal_name(fSignal), fFlags);
627 		}
628 
629 	private:
630 		pid_t	fTarget;
631 		uint32	fSignal;
632 		uint32	fFlags;
633 };
634 
635 
636 class SigAction : public AbstractTraceEntry {
637 	public:
638 		SigAction(uint32 signal, const struct sigaction* act)
639 			:
640 			fSignal(signal),
641 			fAction(*act)
642 		{
643 			Initialized();
644 		}
645 
646 		virtual void AddDump(TraceOutput& out)
647 		{
648 			out.Print("signal action: signal: %" B_PRIu32 " (%s), "
649 				"action: {handler: %p, flags: %#x, mask: %#" B_PRIx64 "}",
650 				fSignal, signal_name(fSignal), fAction.sa_handler,
651 				fAction.sa_flags, (uint64)fAction.sa_mask);
652 		}
653 
654 	private:
655 		uint32				fSignal;
656 		struct sigaction	fAction;
657 };
658 
659 
660 class SigProcMask : public AbstractTraceEntry {
661 	public:
662 		SigProcMask(int how, sigset_t mask)
663 			:
664 			fHow(how),
665 			fMask(mask),
666 			fOldMask(thread_get_current_thread()->sig_block_mask)
667 		{
668 			Initialized();
669 		}
670 
671 		virtual void AddDump(TraceOutput& out)
672 		{
673 			const char* how = "invalid";
674 			switch (fHow) {
675 				case SIG_BLOCK:
676 					how = "block";
677 					break;
678 				case SIG_UNBLOCK:
679 					how = "unblock";
680 					break;
681 				case SIG_SETMASK:
682 					how = "set";
683 					break;
684 			}
685 
686 			out.Print("signal proc mask: %s 0x%llx, old mask: 0x%llx", how,
687 				(long long)fMask, (long long)fOldMask);
688 		}
689 
690 	private:
691 		int			fHow;
692 		sigset_t	fMask;
693 		sigset_t	fOldMask;
694 };
695 
696 
697 class SigSuspend : public AbstractTraceEntry {
698 	public:
699 		SigSuspend(sigset_t mask)
700 			:
701 			fMask(mask),
702 			fOldMask(thread_get_current_thread()->sig_block_mask)
703 		{
704 			Initialized();
705 		}
706 
707 		virtual void AddDump(TraceOutput& out)
708 		{
709 			out.Print("signal suspend: %#llx, old mask: %#llx",
710 				(long long)fMask, (long long)fOldMask);
711 		}
712 
713 	private:
714 		sigset_t	fMask;
715 		sigset_t	fOldMask;
716 };
717 
718 
719 class SigSuspendDone : public AbstractTraceEntry {
720 	public:
721 		SigSuspendDone()
722 			:
723 			fSignals(thread_get_current_thread()->ThreadPendingSignals())
724 		{
725 			Initialized();
726 		}
727 
728 		virtual void AddDump(TraceOutput& out)
729 		{
730 			out.Print("signal suspend done: %#" B_PRIx32, fSignals);
731 		}
732 
733 	private:
734 		uint32		fSignals;
735 };
736 
737 }	// namespace SignalTracing
738 
739 #	define T(x)	new(std::nothrow) SignalTracing::x
740 
741 #else
742 #	define T(x)
743 #endif	// SIGNAL_TRACING
744 
745 
746 // #pragma mark -
747 
748 
749 /*!	Updates the given thread's Thread::flags field according to what signals are
750 	pending.
751 	The caller must hold the scheduler lock.
752 */
753 static void
754 update_thread_signals_flag(Thread* thread)
755 {
756 	sigset_t mask = ~thread->sig_block_mask;
757 	if ((thread->AllPendingSignals() & mask) != 0)
758 		atomic_or(&thread->flags, THREAD_FLAGS_SIGNALS_PENDING);
759 	else
760 		atomic_and(&thread->flags, ~THREAD_FLAGS_SIGNALS_PENDING);
761 }
762 
763 
764 /*!	Updates the current thread's Thread::flags field according to what signals
765 	are pending.
766 	The caller must hold the scheduler lock.
767 */
768 static void
769 update_current_thread_signals_flag()
770 {
771 	update_thread_signals_flag(thread_get_current_thread());
772 }
773 
774 
775 /*!	Updates all of the given team's threads' Thread::flags fields according to
776 	what signals are pending.
777 	The caller must hold the scheduler lock.
778 */
779 static void
780 update_team_threads_signal_flag(Team* team)
781 {
782 	for (Thread* thread = team->thread_list; thread != NULL;
783 			thread = thread->team_next) {
784 		update_thread_signals_flag(thread);
785 	}
786 }
787 
788 
789 /*!	Notifies the user debugger about a signal to be handled.
790 
791 	The caller must not hold any locks.
792 
793 	\param thread The current thread.
794 	\param signal The signal to be handled.
795 	\param handler The installed signal handler for the signal.
796 	\param deadly Indicates whether the signal is deadly.
797 	\return \c true, if the signal shall be handled, \c false, if it shall be
798 		ignored.
799 */
800 static bool
801 notify_debugger(Thread* thread, Signal* signal, struct sigaction& handler,
802 	bool deadly)
803 {
804 	uint64 signalMask = SIGNAL_TO_MASK(signal->Number());
805 
806 	// first check the ignore signal masks the debugger specified for the thread
807 	InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock);
808 
809 	if ((thread->debug_info.ignore_signals_once & signalMask) != 0) {
810 		thread->debug_info.ignore_signals_once &= ~signalMask;
811 		return true;
812 	}
813 
814 	if ((thread->debug_info.ignore_signals & signalMask) != 0)
815 		return true;
816 
817 	threadDebugInfoLocker.Unlock();
818 
819 	// deliver the event
820 	return user_debug_handle_signal(signal->Number(), &handler, deadly);
821 }
822 
823 
824 /*!	Removes and returns a signal with the highest priority in \a nonBlocked that
825 	is pending in the given thread or its team.
826 	After dequeuing the signal the Thread::flags field of the affected threads
827 	are updated.
828 	The caller gets a reference to the returned signal, if any.
829 	The caller must hold the scheduler lock.
830 	\param thread The thread.
831 	\param nonBlocked The mask of non-blocked signals.
832 	\param buffer If the signal is not queued this buffer is returned. In this
833 		case the method acquires a reference to \a buffer, so that the caller
834 		gets a reference also in this case.
835 	\return The removed signal or \c NULL, if all signals are blocked.
836 */
837 static Signal*
838 dequeue_thread_or_team_signal(Thread* thread, sigset_t nonBlocked,
839 	Signal& buffer)
840 {
841 	Team* team = thread->team;
842 	Signal* signal;
843 	if (team->HighestPendingSignalPriority(nonBlocked)
844 			> thread->HighestPendingSignalPriority(nonBlocked)) {
845 		signal = team->DequeuePendingSignal(nonBlocked, buffer);
846 		update_team_threads_signal_flag(team);
847 	} else {
848 		signal = thread->DequeuePendingSignal(nonBlocked, buffer);
849 		update_thread_signals_flag(thread);
850 	}
851 
852 	return signal;
853 }
854 
855 
856 static status_t
857 setup_signal_frame(Thread* thread, struct sigaction* action, Signal* signal,
858 	sigset_t signalMask)
859 {
860 	// prepare the data, we need to copy onto the user stack
861 	signal_frame_data frameData;
862 
863 	// signal info
864 	frameData.info.si_signo = signal->Number();
865 	frameData.info.si_code = signal->SignalCode();
866 	frameData.info.si_errno = signal->ErrorCode();
867 	frameData.info.si_pid = signal->SendingProcess();
868 	frameData.info.si_uid = signal->SendingUser();
869 	frameData.info.si_addr = signal->Address();
870 	frameData.info.si_status = signal->Status();
871 	frameData.info.si_band = signal->PollBand();
872 	frameData.info.si_value = signal->UserValue();
873 
874 	// context
875 	frameData.context.uc_link = thread->user_signal_context;
876 	frameData.context.uc_sigmask = signalMask;
877 	// uc_stack and uc_mcontext are filled in by the architecture specific code.
878 
879 	// user data
880 	frameData.user_data = action->sa_userdata;
881 
882 	// handler function
883 	frameData.siginfo_handler = (action->sa_flags & SA_SIGINFO) != 0;
884 	frameData.handler = frameData.siginfo_handler
885 		? (void*)action->sa_sigaction : (void*)action->sa_handler;
886 
887 	// thread flags -- save the and clear the thread's syscall restart related
888 	// flags
889 	frameData.thread_flags = atomic_and(&thread->flags,
890 		~(THREAD_FLAGS_RESTART_SYSCALL | THREAD_FLAGS_64_BIT_SYSCALL_RETURN));
891 
892 	// syscall restart related fields
893 	memcpy(frameData.syscall_restart_parameters,
894 		thread->syscall_restart.parameters,
895 		sizeof(frameData.syscall_restart_parameters));
896 
897 	// commpage address
898 	frameData.commpage_address = thread->team->commpage_address;
899 
900 	// syscall_restart_return_value is filled in by the architecture specific
901 	// code.
902 
903 	return arch_setup_signal_frame(thread, action, &frameData);
904 }
905 
906 
907 /*! Actually handles pending signals -- i.e. the thread will exit, a custom
908 	signal handler is prepared, or whatever the signal demands.
909 	The function will not return, when a deadly signal is encountered. The
910 	function will suspend the thread indefinitely, when a stop signal is
911 	encountered.
912 	Interrupts must be enabled.
913 	\param thread The current thread.
914 */
915 void
916 handle_signals(Thread* thread)
917 {
918 	Team* team = thread->team;
919 
920 	TeamLocker teamLocker(team);
921 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
922 
923 	// If userland requested to defer signals, we check now, if this is
924 	// possible.
925 	sigset_t nonBlockedMask = ~thread->sig_block_mask;
926 	sigset_t signalMask = thread->AllPendingSignals() & nonBlockedMask;
927 
928 	if (thread->user_thread->defer_signals > 0
929 		&& (signalMask & NON_DEFERRABLE_SIGNALS) == 0
930 		&& thread->sigsuspend_original_unblocked_mask == 0) {
931 		thread->user_thread->pending_signals = signalMask;
932 		return;
933 	}
934 
935 	thread->user_thread->pending_signals = 0;
936 
937 	// determine syscall restart behavior
938 	uint32 restartFlags = atomic_and(&thread->flags,
939 		~THREAD_FLAGS_DONT_RESTART_SYSCALL);
940 	bool alwaysRestart
941 		= (restartFlags & THREAD_FLAGS_ALWAYS_RESTART_SYSCALL) != 0;
942 	bool restart = alwaysRestart
943 		|| (restartFlags & THREAD_FLAGS_DONT_RESTART_SYSCALL) == 0;
944 
945 	// Loop until we've handled all signals.
946 	bool initialIteration = true;
947 	while (true) {
948 		if (initialIteration) {
949 			initialIteration = false;
950 		} else {
951 			teamLocker.Lock();
952 			schedulerLocker.Lock();
953 
954 			signalMask = thread->AllPendingSignals() & nonBlockedMask;
955 		}
956 
957 		// Unless SIGKILL[THR] are pending, check, if the thread shall stop for
958 		// debugging.
959 		if ((signalMask & KILL_SIGNALS) == 0
960 			&& (atomic_get(&thread->debug_info.flags) & B_THREAD_DEBUG_STOP)
961 				!= 0) {
962 			schedulerLocker.Unlock();
963 			teamLocker.Unlock();
964 
965 			user_debug_stop_thread();
966 			continue;
967 		}
968 
969 		// We're done, if there aren't any pending signals anymore.
970 		if ((signalMask & nonBlockedMask) == 0)
971 			break;
972 
973 		// get pending non-blocked thread or team signal with the highest
974 		// priority
975 		Signal stackSignal;
976 		Signal* signal = dequeue_thread_or_team_signal(thread, nonBlockedMask,
977 			stackSignal);
978 		ASSERT(signal != NULL);
979 		SignalHandledCaller signalHandledCaller(signal);
980 
981 		schedulerLocker.Unlock();
982 
983 		// get the action for the signal
984 		struct sigaction handler;
985 		if (signal->Number() <= MAX_SIGNAL_NUMBER) {
986 			handler = team->SignalActionFor(signal->Number());
987 		} else {
988 			handler.sa_handler = SIG_DFL;
989 			handler.sa_flags = 0;
990 		}
991 
992 		if ((handler.sa_flags & SA_ONESHOT) != 0
993 			&& handler.sa_handler != SIG_IGN && handler.sa_handler != SIG_DFL) {
994 			team->SignalActionFor(signal->Number()).sa_handler = SIG_DFL;
995 		}
996 
997 		T(HandleSignal(signal->Number()));
998 
999 		teamLocker.Unlock();
1000 
1001 		// debug the signal, if a debugger is installed and the signal debugging
1002 		// flag is set
1003 		bool debugSignal = (~atomic_get(&team->debug_info.flags)
1004 				& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_SIGNALS))
1005 			== 0;
1006 
1007 		// handle the signal
1008 		TRACE(("Thread %" B_PRId32 " received signal %s\n", thread->id,
1009 			kSignalInfos[signal->Number()].name));
1010 
1011 		if (handler.sa_handler == SIG_IGN) {
1012 			// signal is to be ignored
1013 			// TODO: apply zombie cleaning on SIGCHLD
1014 
1015 			// notify the debugger
1016 			if (debugSignal)
1017 				notify_debugger(thread, signal, handler, false);
1018 			continue;
1019 		} else if (handler.sa_handler == SIG_DFL) {
1020 			// default signal behaviour
1021 
1022 			// realtime signals are ignored by default
1023 			if (signal->Number() >= SIGNAL_REALTIME_MIN
1024 				&& signal->Number() <= SIGNAL_REALTIME_MAX) {
1025 				// notify the debugger
1026 				if (debugSignal)
1027 					notify_debugger(thread, signal, handler, false);
1028 				continue;
1029 			}
1030 
1031 			bool killTeam = false;
1032 			switch (signal->Number()) {
1033 				case SIGCHLD:
1034 				case SIGWINCH:
1035 				case SIGURG:
1036 					// notify the debugger
1037 					if (debugSignal)
1038 						notify_debugger(thread, signal, handler, false);
1039 					continue;
1040 
1041 				case SIGNAL_CANCEL_THREAD:
1042 					// set up the signal handler
1043 					handler.sa_handler = thread->cancel_function;
1044 					handler.sa_flags = 0;
1045 					handler.sa_mask = 0;
1046 					handler.sa_userdata = NULL;
1047 
1048 					restart = false;
1049 						// we always want to interrupt
1050 					break;
1051 
1052 				case SIGNAL_CONTINUE_THREAD:
1053 					// prevent syscall restart, but otherwise ignore
1054 					restart = false;
1055 					atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
1056 					continue;
1057 
1058 				case SIGCONT:
1059 					// notify the debugger
1060 					if (debugSignal
1061 						&& !notify_debugger(thread, signal, handler, false))
1062 						continue;
1063 
1064 					// notify threads waiting for team state changes
1065 					if (thread == team->main_thread) {
1066 						team->LockTeamAndParent(false);
1067 
1068 						team_set_job_control_state(team,
1069 							JOB_CONTROL_STATE_CONTINUED, signal, false);
1070 
1071 						team->UnlockTeamAndParent();
1072 
1073 						// The standard states that the system *may* send a
1074 						// SIGCHLD when a child is continued. I haven't found
1075 						// a good reason why we would want to, though.
1076 					}
1077 					continue;
1078 
1079 				case SIGSTOP:
1080 				case SIGTSTP:
1081 				case SIGTTIN:
1082 				case SIGTTOU:
1083 				{
1084 					// notify the debugger
1085 					if (debugSignal
1086 						&& !notify_debugger(thread, signal, handler, false))
1087 						continue;
1088 
1089 					// The terminal-sent stop signals are allowed to stop the
1090 					// process only, if it doesn't belong to an orphaned process
1091 					// group. Otherwise the signal must be discarded.
1092 					team->LockProcessGroup();
1093 					AutoLocker<ProcessGroup> groupLocker(team->group, true);
1094 					if (signal->Number() != SIGSTOP
1095 						&& team->group->IsOrphaned()) {
1096 						continue;
1097 					}
1098 
1099 					// notify threads waiting for team state changes
1100 					if (thread == team->main_thread) {
1101 						team->LockTeamAndParent(false);
1102 
1103 						team_set_job_control_state(team,
1104 							JOB_CONTROL_STATE_STOPPED, signal, false);
1105 
1106 						// send a SIGCHLD to the parent (if it does have
1107 						// SA_NOCLDSTOP defined)
1108 						Team* parentTeam = team->parent;
1109 
1110 						struct sigaction& parentHandler
1111 							= parentTeam->SignalActionFor(SIGCHLD);
1112 						if ((parentHandler.sa_flags & SA_NOCLDSTOP) == 0) {
1113 							Signal childSignal(SIGCHLD, CLD_STOPPED, B_OK,
1114 								team->id);
1115 							childSignal.SetStatus(signal->Number());
1116 							childSignal.SetSendingUser(signal->SendingUser());
1117 							send_signal_to_team(parentTeam, childSignal, 0);
1118 						}
1119 
1120 						team->UnlockTeamAndParent();
1121 					}
1122 
1123 					groupLocker.Unlock();
1124 
1125 					// Suspend the thread, unless there's already a signal to
1126 					// continue or kill pending.
1127 					InterruptsSpinLocker schedulerLocker(gSchedulerLock);
1128 					if ((thread->AllPendingSignals()
1129 							& (CONTINUE_SIGNALS | KILL_SIGNALS)) == 0) {
1130 						thread->next_state = B_THREAD_SUSPENDED;
1131 						scheduler_reschedule();
1132 					}
1133 					schedulerLocker.Unlock();
1134 
1135 					continue;
1136 				}
1137 
1138 				case SIGSEGV:
1139 				case SIGBUS:
1140 				case SIGFPE:
1141 				case SIGILL:
1142 				case SIGTRAP:
1143 				case SIGABRT:
1144 				case SIGKILL:
1145 				case SIGQUIT:
1146 				case SIGPOLL:
1147 				case SIGPROF:
1148 				case SIGSYS:
1149 				case SIGVTALRM:
1150 				case SIGXCPU:
1151 				case SIGXFSZ:
1152 				default:
1153 					TRACE(("Shutting down team %" B_PRId32 " due to signal %"
1154 						B_PRIu32 " received in thread %" B_PRIu32 " \n",
1155 						team->id, signal->Number(), thread->id));
1156 
1157 					// This signal kills the team regardless which thread
1158 					// received it.
1159 					killTeam = true;
1160 
1161 					// fall through
1162 				case SIGKILLTHR:
1163 					// notify the debugger
1164 					if (debugSignal && signal->Number() != SIGKILL
1165 						&& signal->Number() != SIGKILLTHR
1166 						&& !notify_debugger(thread, signal, handler, true)) {
1167 						continue;
1168 					}
1169 
1170 					if (killTeam || thread == team->main_thread) {
1171 						// The signal is terminal for the team or the thread is
1172 						// the main thread. In either case the team is going
1173 						// down. Set its exit status, if that didn't happen yet.
1174 						teamLocker.Lock();
1175 
1176 						if (!team->exit.initialized) {
1177 							team->exit.reason = CLD_KILLED;
1178 							team->exit.signal = signal->Number();
1179 							team->exit.signaling_user = signal->SendingUser();
1180 							team->exit.status = 0;
1181 							team->exit.initialized = true;
1182 						}
1183 
1184 						teamLocker.Unlock();
1185 
1186 						// If this is not the main thread, send it a SIGKILLTHR
1187 						// so that the team terminates.
1188 						if (thread != team->main_thread) {
1189 							Signal childSignal(SIGKILLTHR, SI_USER, B_OK,
1190 								team->id);
1191 							send_signal_to_thread_id(team->id, childSignal, 0);
1192 						}
1193 					}
1194 
1195 					// explicitly get rid of the signal reference, since
1196 					// thread_exit() won't return
1197 					signalHandledCaller.Done();
1198 
1199 					thread_exit();
1200 						// won't return
1201 			}
1202 		}
1203 
1204 		// User defined signal handler
1205 
1206 		// notify the debugger
1207 		if (debugSignal && !notify_debugger(thread, signal, handler, false))
1208 			continue;
1209 
1210 		if (!restart
1211 				|| (!alwaysRestart && (handler.sa_flags & SA_RESTART) == 0)) {
1212 			atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
1213 		}
1214 
1215 		T(ExecuteSignalHandler(signal->Number(), &handler));
1216 
1217 		TRACE(("### Setting up custom signal handler frame...\n"));
1218 
1219 		// save the old block mask -- we may need to adjust it for the handler
1220 		schedulerLocker.Lock();
1221 
1222 		sigset_t oldBlockMask = thread->sigsuspend_original_unblocked_mask != 0
1223 			? ~thread->sigsuspend_original_unblocked_mask
1224 			: thread->sig_block_mask;
1225 
1226 		// Update the block mask while the signal handler is running -- it
1227 		// will be automatically restored when the signal frame is left.
1228 		thread->sig_block_mask |= handler.sa_mask & BLOCKABLE_SIGNALS;
1229 
1230 		if ((handler.sa_flags & SA_NOMASK) == 0) {
1231 			thread->sig_block_mask
1232 				|= SIGNAL_TO_MASK(signal->Number()) & BLOCKABLE_SIGNALS;
1233 		}
1234 
1235 		update_current_thread_signals_flag();
1236 
1237 		schedulerLocker.Unlock();
1238 
1239 		setup_signal_frame(thread, &handler, signal, oldBlockMask);
1240 
1241 		// Reset sigsuspend_original_unblocked_mask. It would have been set by
1242 		// sigsuspend_internal(). In that case, above we set oldBlockMask
1243 		// accordingly so that after the handler returns the thread's signal
1244 		// mask is reset.
1245 		thread->sigsuspend_original_unblocked_mask = 0;
1246 
1247 		return;
1248 	}
1249 
1250 	// We have not handled any signal (respectively only ignored ones).
1251 
1252 	// If sigsuspend_original_unblocked_mask is non-null, we came from a
1253 	// sigsuspend_internal(). Not having handled any signal, we should restart
1254 	// the syscall.
1255 	if (thread->sigsuspend_original_unblocked_mask != 0) {
1256 		restart = true;
1257 		atomic_or(&thread->flags, THREAD_FLAGS_RESTART_SYSCALL);
1258 	} else if (!restart) {
1259 		// clear syscall restart thread flag, if we're not supposed to restart
1260 		// the syscall
1261 		atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
1262 	}
1263 }
1264 
1265 
1266 /*!	Checks whether the given signal is blocked for the given team (i.e. all of
1267 	its threads).
1268 	The caller must hold the team's lock and the scheduler lock.
1269 */
1270 bool
1271 is_team_signal_blocked(Team* team, int signal)
1272 {
1273 	sigset_t mask = SIGNAL_TO_MASK(signal);
1274 
1275 	for (Thread* thread = team->thread_list; thread != NULL;
1276 			thread = thread->team_next) {
1277 		if ((thread->sig_block_mask & mask) == 0)
1278 			return false;
1279 	}
1280 
1281 	return true;
1282 }
1283 
1284 
1285 /*!	Gets (guesses) the current thread's currently used stack from the given
1286 	stack pointer.
1287 	Fills in \a stack with either the signal stack or the thread's user stack.
1288 	\param address A stack pointer address to be used to determine the used
1289 		stack.
1290 	\param stack Filled in by the function.
1291 */
1292 void
1293 signal_get_user_stack(addr_t address, stack_t* stack)
1294 {
1295 	// If a signal stack is enabled for the stack and the address is within it,
1296 	// return the signal stack. In all other cases return the thread's user
1297 	// stack, even if the address doesn't lie within it.
1298 	Thread* thread = thread_get_current_thread();
1299 	if (thread->signal_stack_enabled && address >= thread->signal_stack_base
1300 		&& address < thread->signal_stack_base + thread->signal_stack_size) {
1301 		stack->ss_sp = (void*)thread->signal_stack_base;
1302 		stack->ss_size = thread->signal_stack_size;
1303 	} else {
1304 		stack->ss_sp = (void*)thread->user_stack_base;
1305 		stack->ss_size = thread->user_stack_size;
1306 	}
1307 
1308 	stack->ss_flags = 0;
1309 }
1310 
1311 
1312 /*!	Checks whether any non-blocked signal is pending for the current thread.
1313 	The caller must hold the scheduler lock.
1314 	\param thread The current thread.
1315 */
1316 static bool
1317 has_signals_pending(Thread* thread)
1318 {
1319 	return (thread->AllPendingSignals() & ~thread->sig_block_mask) != 0;
1320 }
1321 
1322 
1323 /*!	Checks whether the current user has permission to send a signal to the given
1324 	target team.
1325 
1326 	The caller must hold the scheduler lock or \a team's lock.
1327 
1328 	\param team The target team.
1329 	\param schedulerLocked \c true, if the caller holds the scheduler lock,
1330 		\c false otherwise.
1331 */
1332 static bool
1333 has_permission_to_signal(Team* team, bool schedulerLocked)
1334 {
1335 	// get the current user
1336 	uid_t currentUser = schedulerLocked
1337 		? thread_get_current_thread()->team->effective_uid
1338 		: geteuid();
1339 
1340 	// root is omnipotent -- in the other cases the current user must match the
1341 	// target team's
1342 	return currentUser == 0 || currentUser == team->effective_uid;
1343 }
1344 
1345 
1346 /*!	Delivers a signal to the \a thread, but doesn't handle the signal -- it just
1347 	makes sure the thread gets the signal, i.e. unblocks it if needed.
1348 
1349 	The caller must hold the scheduler lock.
1350 
1351 	\param thread The thread the signal shall be delivered to.
1352 	\param signalNumber The number of the signal to be delivered. If \c 0, no
1353 		actual signal will be delivered. Only delivery checks will be performed.
1354 	\param signal If non-NULL the signal to be queued (has number
1355 		\a signalNumber in this case). The caller transfers an object reference
1356 		to this function. If \c NULL an unqueued signal will be delivered to the
1357 		thread.
1358 	\param flags A bitwise combination of any number of the following:
1359 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1360 			target thread the signal.
1361 	\return \c B_OK, when the signal was delivered successfully, another error
1362 		code otherwise.
1363 */
1364 status_t
1365 send_signal_to_thread_locked(Thread* thread, uint32 signalNumber,
1366 	Signal* signal, uint32 flags)
1367 {
1368 	ASSERT(signal == NULL || signalNumber == signal->Number());
1369 
1370 	T(SendSignal(thread->id, signalNumber, flags));
1371 
1372 	// The caller transferred a reference to the signal to us.
1373 	BReference<Signal> signalReference(signal, true);
1374 
1375 	if ((flags & B_CHECK_PERMISSION) != 0) {
1376 		if (!has_permission_to_signal(thread->team, true))
1377 			return EPERM;
1378 	}
1379 
1380 	if (signalNumber == 0)
1381 		return B_OK;
1382 
1383 	if (thread->team == team_get_kernel_team()) {
1384 		// Signals to kernel threads will only wake them up
1385 		if (thread->state == B_THREAD_SUSPENDED)
1386 			scheduler_enqueue_in_run_queue(thread);
1387 		return B_OK;
1388 	}
1389 
1390 	if (signal != NULL)
1391 		thread->AddPendingSignal(signal);
1392 	else
1393 		thread->AddPendingSignal(signalNumber);
1394 
1395 	// the thread has the signal reference, now
1396 	signalReference.Detach();
1397 
1398 	switch (signalNumber) {
1399 		case SIGKILL:
1400 		{
1401 			// If sent to a thread other than the team's main thread, also send
1402 			// a SIGKILLTHR to the main thread to kill the team.
1403 			Thread* mainThread = thread->team->main_thread;
1404 			if (mainThread != NULL && mainThread != thread) {
1405 				mainThread->AddPendingSignal(SIGKILLTHR);
1406 
1407 				// wake up main thread
1408 				if (mainThread->state == B_THREAD_SUSPENDED)
1409 					scheduler_enqueue_in_run_queue(mainThread);
1410 				else
1411 					thread_interrupt(mainThread, true);
1412 
1413 				update_thread_signals_flag(mainThread);
1414 			}
1415 
1416 			// supposed to fall through
1417 		}
1418 		case SIGKILLTHR:
1419 			// Wake up suspended threads and interrupt waiting ones
1420 			if (thread->state == B_THREAD_SUSPENDED)
1421 				scheduler_enqueue_in_run_queue(thread);
1422 			else
1423 				thread_interrupt(thread, true);
1424 			break;
1425 
1426 		case SIGNAL_CONTINUE_THREAD:
1427 			// wake up thread, and interrupt its current syscall
1428 			if (thread->state == B_THREAD_SUSPENDED)
1429 				scheduler_enqueue_in_run_queue(thread);
1430 
1431 			atomic_or(&thread->flags, THREAD_FLAGS_DONT_RESTART_SYSCALL);
1432 			break;
1433 
1434 		case SIGCONT:
1435 			// Wake up thread if it was suspended, otherwise interrupt it, if
1436 			// the signal isn't blocked.
1437 			if (thread->state == B_THREAD_SUSPENDED)
1438 				scheduler_enqueue_in_run_queue(thread);
1439 			else if ((SIGNAL_TO_MASK(SIGCONT) & ~thread->sig_block_mask) != 0)
1440 				thread_interrupt(thread, false);
1441 
1442 			// remove any pending stop signals
1443 			thread->RemovePendingSignals(STOP_SIGNALS);
1444 			break;
1445 
1446 		default:
1447 			// If the signal is not masked, interrupt the thread, if it is
1448 			// currently waiting (interruptibly).
1449 			if ((thread->AllPendingSignals()
1450 						& (~thread->sig_block_mask | SIGNAL_TO_MASK(SIGCHLD)))
1451 					!= 0) {
1452 				// Interrupt thread if it was waiting
1453 				thread_interrupt(thread, false);
1454 			}
1455 			break;
1456 	}
1457 
1458 	update_thread_signals_flag(thread);
1459 
1460 	return B_OK;
1461 }
1462 
1463 
1464 /*!	Sends the given signal to the given thread.
1465 
1466 	The caller must not hold the scheduler lock.
1467 
1468 	\param thread The thread the signal shall be sent to.
1469 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1470 		actual signal will be delivered. Only delivery checks will be performed.
1471 		The given object will be copied. The caller retains ownership.
1472 	\param flags A bitwise combination of any number of the following:
1473 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1474 			target thread the signal.
1475 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1476 			woken up, the scheduler will be invoked. If set that will not be
1477 			done explicitly, but rescheduling can still happen, e.g. when the
1478 			current thread's time slice runs out.
1479 	\return \c B_OK, when the signal was delivered successfully, another error
1480 		code otherwise.
1481 */
1482 status_t
1483 send_signal_to_thread(Thread* thread, const Signal& signal, uint32 flags)
1484 {
1485 	// Clone the signal -- the clone will be queued. If something fails and the
1486 	// caller doesn't require queuing, we will add an unqueued signal.
1487 	Signal* signalToQueue = NULL;
1488 	status_t error = Signal::CreateQueuable(signal,
1489 		(flags & SIGNAL_FLAG_QUEUING_REQUIRED) != 0, signalToQueue);
1490 	if (error != B_OK)
1491 		return error;
1492 
1493 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
1494 
1495 	error = send_signal_to_thread_locked(thread, signal.Number(), signalToQueue,
1496 		flags);
1497 	if (error != B_OK)
1498 		return error;
1499 
1500 	if ((flags & B_DO_NOT_RESCHEDULE) == 0)
1501 		scheduler_reschedule_if_necessary_locked();
1502 
1503 	return B_OK;
1504 }
1505 
1506 
1507 /*!	Sends the given signal to the thread with the given ID.
1508 
1509 	The caller must not hold the scheduler lock.
1510 
1511 	\param threadID The ID of the thread the signal shall be sent to.
1512 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1513 		actual signal will be delivered. Only delivery checks will be performed.
1514 		The given object will be copied. The caller retains ownership.
1515 	\param flags A bitwise combination of any number of the following:
1516 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1517 			target thread the signal.
1518 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1519 			woken up, the scheduler will be invoked. If set that will not be
1520 			done explicitly, but rescheduling can still happen, e.g. when the
1521 			current thread's time slice runs out.
1522 	\return \c B_OK, when the signal was delivered successfully, another error
1523 		code otherwise.
1524 */
1525 status_t
1526 send_signal_to_thread_id(thread_id threadID, const Signal& signal, uint32 flags)
1527 {
1528 	Thread* thread = Thread::Get(threadID);
1529 	if (thread == NULL)
1530 		return B_BAD_THREAD_ID;
1531 	BReference<Thread> threadReference(thread, true);
1532 
1533 	return send_signal_to_thread(thread, signal, flags);
1534 }
1535 
1536 
1537 /*!	Sends the given signal to the given team.
1538 
1539 	The caller must hold the scheduler lock.
1540 
1541 	\param team The team the signal shall be sent to.
1542 	\param signalNumber The number of the signal to be delivered. If \c 0, no
1543 		actual signal will be delivered. Only delivery checks will be performed.
1544 	\param signal If non-NULL the signal to be queued (has number
1545 		\a signalNumber in this case). The caller transfers an object reference
1546 		to this function. If \c NULL an unqueued signal will be delivered to the
1547 		thread.
1548 	\param flags A bitwise combination of any number of the following:
1549 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1550 			target thread the signal.
1551 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1552 			woken up, the scheduler will be invoked. If set that will not be
1553 			done explicitly, but rescheduling can still happen, e.g. when the
1554 			current thread's time slice runs out.
1555 	\return \c B_OK, when the signal was delivered successfully, another error
1556 		code otherwise.
1557 */
1558 status_t
1559 send_signal_to_team_locked(Team* team, uint32 signalNumber, Signal* signal,
1560 	uint32 flags)
1561 {
1562 	ASSERT(signal == NULL || signalNumber == signal->Number());
1563 
1564 	T(SendSignal(team->id, signalNumber, flags));
1565 
1566 	// The caller transferred a reference to the signal to us.
1567 	BReference<Signal> signalReference(signal, true);
1568 
1569 	if ((flags & B_CHECK_PERMISSION) != 0) {
1570 		if (!has_permission_to_signal(team, true))
1571 			return EPERM;
1572 	}
1573 
1574 	if (signalNumber == 0)
1575 		return B_OK;
1576 
1577 	if (team == team_get_kernel_team()) {
1578 		// signals to the kernel team are not allowed
1579 		return EPERM;
1580 	}
1581 
1582 	if (signal != NULL)
1583 		team->AddPendingSignal(signal);
1584 	else
1585 		team->AddPendingSignal(signalNumber);
1586 
1587 	// the team has the signal reference, now
1588 	signalReference.Detach();
1589 
1590 	switch (signalNumber) {
1591 		case SIGKILL:
1592 		case SIGKILLTHR:
1593 		{
1594 			// Also add a SIGKILLTHR to the main thread's signals and wake it
1595 			// up/interrupt it, so we get this over with as soon as possible
1596 			// (only the main thread shuts down the team).
1597 			Thread* mainThread = team->main_thread;
1598 			if (mainThread != NULL) {
1599 				mainThread->AddPendingSignal(SIGKILLTHR);
1600 
1601 				// wake up main thread
1602 				if (mainThread->state == B_THREAD_SUSPENDED)
1603 					scheduler_enqueue_in_run_queue(mainThread);
1604 				else
1605 					thread_interrupt(mainThread, true);
1606 			}
1607 			break;
1608 		}
1609 
1610 		case SIGCONT:
1611 			// Wake up any suspended threads, interrupt the others, if they
1612 			// don't block the signal.
1613 			for (Thread* thread = team->thread_list; thread != NULL;
1614 					thread = thread->team_next) {
1615 				if (thread->state == B_THREAD_SUSPENDED) {
1616 					scheduler_enqueue_in_run_queue(thread);
1617 				} else if ((SIGNAL_TO_MASK(SIGCONT) & ~thread->sig_block_mask)
1618 						!= 0) {
1619 					thread_interrupt(thread, false);
1620 				}
1621 
1622 				// remove any pending stop signals
1623 				thread->RemovePendingSignals(STOP_SIGNALS);
1624 			}
1625 
1626 			// remove any pending team stop signals
1627 			team->RemovePendingSignals(STOP_SIGNALS);
1628 			break;
1629 
1630 		case SIGSTOP:
1631 		case SIGTSTP:
1632 		case SIGTTIN:
1633 		case SIGTTOU:
1634 			// send the stop signal to all threads
1635 			// TODO: Is that correct or should we only target the main thread?
1636 			for (Thread* thread = team->thread_list; thread != NULL;
1637 					thread = thread->team_next) {
1638 				thread->AddPendingSignal(signalNumber);
1639 			}
1640 
1641 			// remove the stop signal from the team again
1642 			if (signal != NULL) {
1643 				team->RemovePendingSignal(signal);
1644 				signalReference.SetTo(signal, true);
1645 			} else
1646 				team->RemovePendingSignal(signalNumber);
1647 
1648 			// fall through to interrupt threads
1649 		default:
1650 			// Interrupt all interruptibly waiting threads, if the signal is
1651 			// not masked.
1652 			for (Thread* thread = team->thread_list; thread != NULL;
1653 					thread = thread->team_next) {
1654 				sigset_t nonBlocked = ~thread->sig_block_mask
1655 					| SIGNAL_TO_MASK(SIGCHLD);
1656 				if ((thread->AllPendingSignals() & nonBlocked) != 0)
1657 					thread_interrupt(thread, false);
1658 			}
1659 			break;
1660 	}
1661 
1662 	update_team_threads_signal_flag(team);
1663 
1664 	if ((flags & B_DO_NOT_RESCHEDULE) == 0)
1665 		scheduler_reschedule_if_necessary_locked();
1666 
1667 	return B_OK;
1668 }
1669 
1670 
1671 /*!	Sends the given signal to the given team.
1672 
1673 	\param team The team the signal shall be sent to.
1674 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1675 		actual signal will be delivered. Only delivery checks will be performed.
1676 		The given object will be copied. The caller retains ownership.
1677 	\param flags A bitwise combination of any number of the following:
1678 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1679 			target thread the signal.
1680 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1681 			woken up, the scheduler will be invoked. If set that will not be
1682 			done explicitly, but rescheduling can still happen, e.g. when the
1683 			current thread's time slice runs out.
1684 	\return \c B_OK, when the signal was delivered successfully, another error
1685 		code otherwise.
1686 */
1687 status_t
1688 send_signal_to_team(Team* team, const Signal& signal, uint32 flags)
1689 {
1690 	// Clone the signal -- the clone will be queued. If something fails and the
1691 	// caller doesn't require queuing, we will add an unqueued signal.
1692 	Signal* signalToQueue = NULL;
1693 	status_t error = Signal::CreateQueuable(signal,
1694 		(flags & SIGNAL_FLAG_QUEUING_REQUIRED) != 0, signalToQueue);
1695 	if (error != B_OK)
1696 		return error;
1697 
1698 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
1699 
1700 	return send_signal_to_team_locked(team, signal.Number(), signalToQueue,
1701 		flags);
1702 }
1703 
1704 
1705 /*!	Sends the given signal to the team with the given ID.
1706 
1707 	\param teamID The ID of the team the signal shall be sent to.
1708 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1709 		actual signal will be delivered. Only delivery checks will be performed.
1710 		The given object will be copied. The caller retains ownership.
1711 	\param flags A bitwise combination of any number of the following:
1712 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1713 			target thread the signal.
1714 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1715 			woken up, the scheduler will be invoked. If set that will not be
1716 			done explicitly, but rescheduling can still happen, e.g. when the
1717 			current thread's time slice runs out.
1718 	\return \c B_OK, when the signal was delivered successfully, another error
1719 		code otherwise.
1720 */
1721 status_t
1722 send_signal_to_team_id(team_id teamID, const Signal& signal, uint32 flags)
1723 {
1724 	// get the team
1725 	Team* team = Team::Get(teamID);
1726 	if (team == NULL)
1727 		return B_BAD_TEAM_ID;
1728 	BReference<Team> teamReference(team, true);
1729 
1730 	return send_signal_to_team(team, signal, flags);
1731 }
1732 
1733 
1734 /*!	Sends the given signal to the given process group.
1735 
1736 	The caller must hold the process group's lock. Interrupts must be enabled.
1737 
1738 	\param group The the process group the signal shall be sent to.
1739 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1740 		actual signal will be delivered. Only delivery checks will be performed.
1741 		The given object will be copied. The caller retains ownership.
1742 	\param flags A bitwise combination of any number of the following:
1743 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1744 			target thread the signal.
1745 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1746 			woken up, the scheduler will be invoked. If set that will not be
1747 			done explicitly, but rescheduling can still happen, e.g. when the
1748 			current thread's time slice runs out.
1749 	\return \c B_OK, when the signal was delivered successfully, another error
1750 		code otherwise.
1751 */
1752 status_t
1753 send_signal_to_process_group_locked(ProcessGroup* group, const Signal& signal,
1754 	uint32 flags)
1755 {
1756 	T(SendSignal(-group->id, signal.Number(), flags));
1757 
1758 	bool firstTeam = true;
1759 
1760 	for (Team* team = group->teams; team != NULL; team = team->group_next) {
1761 		status_t error = send_signal_to_team(team, signal,
1762 			flags | B_DO_NOT_RESCHEDULE);
1763 		// If sending to the first team in the group failed, let the whole call
1764 		// fail.
1765 		if (firstTeam) {
1766 			if (error != B_OK)
1767 				return error;
1768 			firstTeam = false;
1769 		}
1770 	}
1771 
1772 	if ((flags & B_DO_NOT_RESCHEDULE) == 0)
1773 		scheduler_reschedule_if_necessary();
1774 
1775 	return B_OK;
1776 }
1777 
1778 
1779 /*!	Sends the given signal to the process group specified by the given ID.
1780 
1781 	The caller must not hold any process group, team, or thread lock. Interrupts
1782 	must be enabled.
1783 
1784 	\param groupID The ID of the process group the signal shall be sent to.
1785 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1786 		actual signal will be delivered. Only delivery checks will be performed.
1787 		The given object will be copied. The caller retains ownership.
1788 	\param flags A bitwise combination of any number of the following:
1789 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1790 			target thread the signal.
1791 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1792 			woken up, the scheduler will be invoked. If set that will not be
1793 			done explicitly, but rescheduling can still happen, e.g. when the
1794 			current thread's time slice runs out.
1795 	\return \c B_OK, when the signal was delivered successfully, another error
1796 		code otherwise.
1797 */
1798 status_t
1799 send_signal_to_process_group(pid_t groupID, const Signal& signal, uint32 flags)
1800 {
1801 	ProcessGroup* group = ProcessGroup::Get(groupID);
1802 	if (group == NULL)
1803 		return B_BAD_TEAM_ID;
1804 	BReference<ProcessGroup> groupReference(group);
1805 
1806 	T(SendSignal(-group->id, signal.Number(), flags));
1807 
1808 	AutoLocker<ProcessGroup> groupLocker(group);
1809 
1810 	status_t error = send_signal_to_process_group_locked(group, signal,
1811 		flags | B_DO_NOT_RESCHEDULE);
1812 	if (error != B_OK)
1813 		return error;
1814 
1815 	groupLocker.Unlock();
1816 
1817 	if ((flags & B_DO_NOT_RESCHEDULE) == 0)
1818 		scheduler_reschedule_if_necessary();
1819 
1820 	return B_OK;
1821 }
1822 
1823 
1824 static status_t
1825 send_signal_internal(pid_t id, uint signalNumber, union sigval userValue,
1826 	uint32 flags)
1827 {
1828 	if (signalNumber > MAX_SIGNAL_NUMBER)
1829 		return B_BAD_VALUE;
1830 
1831 	Thread* thread = thread_get_current_thread();
1832 
1833 	Signal signal(signalNumber,
1834 		(flags & SIGNAL_FLAG_QUEUING_REQUIRED) != 0 ? SI_QUEUE : SI_USER,
1835 		B_OK, thread->team->id);
1836 		// Note: SI_USER/SI_QUEUE is not correct, if called from within the
1837 		// kernel (or a driver), but we don't have any info here.
1838 	signal.SetUserValue(userValue);
1839 
1840 	// If id is > 0, send the signal to the respective thread.
1841 	if (id > 0)
1842 		return send_signal_to_thread_id(id, signal, flags);
1843 
1844 	// If id == 0, send the signal to the current thread.
1845 	if (id == 0)
1846 		return send_signal_to_thread(thread, signal, flags);
1847 
1848 	// If id == -1, send the signal to all teams the calling team has permission
1849 	// to send signals to.
1850 	if (id == -1) {
1851 		// TODO: Implement correctly!
1852 		// currently only send to the current team
1853 		return send_signal_to_team_id(thread->team->id, signal, flags);
1854 	}
1855 
1856 	// Send a signal to the specified process group (the absolute value of the
1857 	// id).
1858 	return send_signal_to_process_group(-id, signal, flags);
1859 }
1860 
1861 
1862 int
1863 send_signal_etc(pid_t id, uint signalNumber, uint32 flags)
1864 {
1865 	// a dummy user value
1866 	union sigval userValue;
1867 	userValue.sival_ptr = NULL;
1868 
1869 	return send_signal_internal(id, signalNumber, userValue, flags);
1870 }
1871 
1872 
1873 int
1874 send_signal(pid_t threadID, uint signal)
1875 {
1876 	// The BeBook states that this function wouldn't be exported
1877 	// for drivers, but, of course, it's wrong.
1878 	return send_signal_etc(threadID, signal, 0);
1879 }
1880 
1881 
1882 static int
1883 sigprocmask_internal(int how, const sigset_t* set, sigset_t* oldSet)
1884 {
1885 	Thread* thread = thread_get_current_thread();
1886 
1887 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
1888 
1889 	sigset_t oldMask = thread->sig_block_mask;
1890 
1891 	if (set != NULL) {
1892 		T(SigProcMask(how, *set));
1893 
1894 		switch (how) {
1895 			case SIG_BLOCK:
1896 				thread->sig_block_mask |= *set & BLOCKABLE_SIGNALS;
1897 				break;
1898 			case SIG_UNBLOCK:
1899 				thread->sig_block_mask &= ~*set;
1900 				break;
1901 			case SIG_SETMASK:
1902 				thread->sig_block_mask = *set & BLOCKABLE_SIGNALS;
1903 				break;
1904 			default:
1905 				return B_BAD_VALUE;
1906 		}
1907 
1908 		update_current_thread_signals_flag();
1909 	}
1910 
1911 	if (oldSet != NULL)
1912 		*oldSet = oldMask;
1913 
1914 	return B_OK;
1915 }
1916 
1917 
1918 int
1919 sigprocmask(int how, const sigset_t* set, sigset_t* oldSet)
1920 {
1921 	RETURN_AND_SET_ERRNO(sigprocmask_internal(how, set, oldSet));
1922 }
1923 
1924 
1925 /*!	\brief Like sigaction(), but returning the error instead of setting errno.
1926 */
1927 static status_t
1928 sigaction_internal(int signal, const struct sigaction* act,
1929 	struct sigaction* oldAction)
1930 {
1931 	if (signal < 1 || signal > MAX_SIGNAL_NUMBER
1932 		|| (SIGNAL_TO_MASK(signal) & ~BLOCKABLE_SIGNALS) != 0)
1933 		return B_BAD_VALUE;
1934 
1935 	// get and lock the team
1936 	Team* team = thread_get_current_thread()->team;
1937 	TeamLocker teamLocker(team);
1938 
1939 	struct sigaction& teamHandler = team->SignalActionFor(signal);
1940 	if (oldAction) {
1941 		// save previous sigaction structure
1942 		*oldAction = teamHandler;
1943 	}
1944 
1945 	if (act) {
1946 		T(SigAction(signal, act));
1947 
1948 		// set new sigaction structure
1949 		teamHandler = *act;
1950 		teamHandler.sa_mask &= BLOCKABLE_SIGNALS;
1951 	}
1952 
1953 	// Remove pending signal if it should now be ignored and remove pending
1954 	// signal for those signals whose default action is to ignore them.
1955 	if ((act && act->sa_handler == SIG_IGN)
1956 		|| (act && act->sa_handler == SIG_DFL
1957 			&& (SIGNAL_TO_MASK(signal) & DEFAULT_IGNORE_SIGNALS) != 0)) {
1958 		InterruptsSpinLocker schedulerLocker(gSchedulerLock);
1959 
1960 		team->RemovePendingSignal(signal);
1961 
1962 		for (Thread* thread = team->thread_list; thread != NULL;
1963 				thread = thread->team_next) {
1964 			thread->RemovePendingSignal(signal);
1965 		}
1966 	}
1967 
1968 	return B_OK;
1969 }
1970 
1971 
1972 int
1973 sigaction(int signal, const struct sigaction* act, struct sigaction* oldAction)
1974 {
1975 	RETURN_AND_SET_ERRNO(sigaction_internal(signal, act, oldAction));
1976 }
1977 
1978 
1979 /*!	Wait for the specified signals, and return the information for the retrieved
1980 	signal in \a info.
1981 	The \c flags and \c timeout combination must either define an infinite
1982 	timeout (no timeout flags set), an absolute timeout (\c B_ABSOLUTE_TIMEOUT
1983 	set), or a relative timeout \code <= 0 \endcode (\c B_RELATIVE_TIMEOUT set).
1984 */
1985 static status_t
1986 sigwait_internal(const sigset_t* set, siginfo_t* info, uint32 flags,
1987 	bigtime_t timeout)
1988 {
1989 	// restrict mask to blockable signals
1990 	sigset_t requestedSignals = *set & BLOCKABLE_SIGNALS;
1991 
1992 	// make always interruptable
1993 	flags |= B_CAN_INTERRUPT;
1994 
1995 	// check whether we are allowed to wait at all
1996 	bool canWait = (flags & B_RELATIVE_TIMEOUT) == 0 || timeout > 0;
1997 
1998 	Thread* thread = thread_get_current_thread();
1999 
2000 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
2001 
2002 	bool timedOut = false;
2003 	status_t error = B_OK;
2004 
2005 	while (!timedOut) {
2006 		sigset_t pendingSignals = thread->AllPendingSignals();
2007 
2008 		// If a kill signal is pending, just bail out.
2009 		if ((pendingSignals & KILL_SIGNALS) != 0)
2010 			return B_INTERRUPTED;
2011 
2012 		if ((pendingSignals & requestedSignals) != 0) {
2013 			// get signal with the highest priority
2014 			Signal stackSignal;
2015 			Signal* signal = dequeue_thread_or_team_signal(thread,
2016 				requestedSignals, stackSignal);
2017 			ASSERT(signal != NULL);
2018 
2019 			SignalHandledCaller signalHandledCaller(signal);
2020 			schedulerLocker.Unlock();
2021 
2022 			info->si_signo = signal->Number();
2023 			info->si_code = signal->SignalCode();
2024 			info->si_errno = signal->ErrorCode();
2025 			info->si_pid = signal->SendingProcess();
2026 			info->si_uid = signal->SendingUser();
2027 			info->si_addr = signal->Address();
2028 			info->si_status = signal->Status();
2029 			info->si_band = signal->PollBand();
2030 			info->si_value = signal->UserValue();
2031 
2032 			return B_OK;
2033 		}
2034 
2035 		if (!canWait)
2036 			return B_WOULD_BLOCK;
2037 
2038 		sigset_t blockedSignals = thread->sig_block_mask;
2039 		if ((pendingSignals & ~blockedSignals) != 0) {
2040 			// Non-blocked signals are pending -- return to let them be handled.
2041 			return B_INTERRUPTED;
2042 		}
2043 
2044 		// No signals yet. Set the signal block mask to not include the
2045 		// requested mask and wait until we're interrupted.
2046 		thread->sig_block_mask = blockedSignals & ~requestedSignals;
2047 
2048 		while (!has_signals_pending(thread)) {
2049 			thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_SIGNAL,
2050 				NULL);
2051 
2052 			if ((flags & B_ABSOLUTE_TIMEOUT) != 0) {
2053 				error = thread_block_with_timeout_locked(flags, timeout);
2054 				if (error == B_WOULD_BLOCK || error == B_TIMED_OUT) {
2055 					error = B_WOULD_BLOCK;
2056 						// POSIX requires EAGAIN (B_WOULD_BLOCK) on timeout
2057 					timedOut = true;
2058 					break;
2059 				}
2060 			} else
2061 				thread_block_locked(thread);
2062 		}
2063 
2064 		// restore the original block mask
2065 		thread->sig_block_mask = blockedSignals;
2066 
2067 		update_current_thread_signals_flag();
2068 	}
2069 
2070 	// we get here only when timed out
2071 	return error;
2072 }
2073 
2074 
2075 /*!	Replace the current signal block mask and wait for any event to happen.
2076 	Before returning, the original signal block mask is reinstantiated.
2077 */
2078 static status_t
2079 sigsuspend_internal(const sigset_t* _mask)
2080 {
2081 	sigset_t mask = *_mask & BLOCKABLE_SIGNALS;
2082 
2083 	T(SigSuspend(mask));
2084 
2085 	Thread* thread = thread_get_current_thread();
2086 
2087 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
2088 
2089 	// Set the new block mask and block until interrupted. We might be here
2090 	// after a syscall restart, in which case sigsuspend_original_unblocked_mask
2091 	// will still be set.
2092 	sigset_t oldMask = thread->sigsuspend_original_unblocked_mask != 0
2093 		? ~thread->sigsuspend_original_unblocked_mask : thread->sig_block_mask;
2094 	thread->sig_block_mask = mask & BLOCKABLE_SIGNALS;
2095 
2096 	update_current_thread_signals_flag();
2097 
2098 	while (!has_signals_pending(thread)) {
2099 		thread_prepare_to_block(thread, B_CAN_INTERRUPT,
2100 			THREAD_BLOCK_TYPE_SIGNAL, NULL);
2101 		thread_block_locked(thread);
2102 	}
2103 
2104 	// Set sigsuspend_original_unblocked_mask (guaranteed to be non-0 due to
2105 	// BLOCKABLE_SIGNALS). This will indicate to handle_signals() that it is
2106 	// called after a _user_sigsuspend(). It will reset the field after invoking
2107 	// a signal handler, or restart the syscall, if there wasn't anything to
2108 	// handle anymore (e.g. because another thread was faster).
2109 	thread->sigsuspend_original_unblocked_mask = ~oldMask;
2110 
2111 	T(SigSuspendDone());
2112 
2113 	// we're not supposed to actually succeed
2114 	return B_INTERRUPTED;
2115 }
2116 
2117 
2118 static status_t
2119 sigpending_internal(sigset_t* set)
2120 {
2121 	Thread* thread = thread_get_current_thread();
2122 
2123 	if (set == NULL)
2124 		return B_BAD_VALUE;
2125 
2126 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
2127 
2128 	*set = thread->AllPendingSignals() & thread->sig_block_mask;
2129 
2130 	return B_OK;
2131 }
2132 
2133 
2134 // #pragma mark - syscalls
2135 
2136 
2137 /*!	Sends a signal to a thread, process, or process group.
2138 	\param id Specifies the ID of the target:
2139 		- \code id > 0 \endcode: If \a toThread is \c true, the target is the
2140 			thread with ID \a id, otherwise the team with the ID \a id.
2141 		- \code id == 0 \endcode: If toThread is \c true, the target is the
2142 			current thread, otherwise the current team.
2143 		- \code id == -1 \endcode: The target are all teams the current team has
2144 			permission to send signals to. Currently not implemented correctly.
2145 		- \code id < -1 \endcode: The target are is the process group with ID
2146 			\c -id.
2147 	\param signalNumber The signal number. \c 0 to just perform checks, but not
2148 		actually send any signal.
2149 	\param userUserValue A user value to be associated with the signal. Might be
2150 		ignored unless signal queuing is forced. Can be \c NULL.
2151 	\param flags A bitwise or of any number of the following:
2152 		- \c SIGNAL_FLAG_QUEUING_REQUIRED: Signal queuing is required. Fail
2153 			instead of falling back to unqueued signals, when queuing isn't
2154 			possible.
2155 		- \c SIGNAL_FLAG_SEND_TO_THREAD: Interpret the the given ID as a
2156 			\c thread_id rather than a \c team_id. Ignored when the \a id is
2157 			\code < 0 \endcode -- then the target is a process group.
2158 	\return \c B_OK on success, another error code otherwise.
2159 */
2160 status_t
2161 _user_send_signal(int32 id, uint32 signalNumber,
2162 	const union sigval* userUserValue, uint32 flags)
2163 {
2164 	// restrict flags to the allowed ones and add B_CHECK_PERMISSION
2165 	flags &= SIGNAL_FLAG_QUEUING_REQUIRED | SIGNAL_FLAG_SEND_TO_THREAD;
2166 	flags |= B_CHECK_PERMISSION;
2167 
2168 	// Copy the user value from userland. If not given, use a dummy value.
2169 	union sigval userValue;
2170 	if (userUserValue != NULL) {
2171 		if (!IS_USER_ADDRESS(userUserValue)
2172 			|| user_memcpy(&userValue, userUserValue, sizeof(userValue))
2173 				!= B_OK) {
2174 			return B_BAD_ADDRESS;
2175 		}
2176 	} else
2177 		userValue.sival_ptr = NULL;
2178 
2179 	// If to be sent to a thread, delegate to send_signal_internal(). Also do
2180 	// that when id < 0, since in this case the semantics is the same as well.
2181 	if ((flags & SIGNAL_FLAG_SEND_TO_THREAD) != 0 || id < 0)
2182 		return send_signal_internal(id, signalNumber, userValue, flags);
2183 
2184 	// kill() semantics for id >= 0
2185 	if (signalNumber > MAX_SIGNAL_NUMBER)
2186 		return B_BAD_VALUE;
2187 
2188 	Thread* thread = thread_get_current_thread();
2189 
2190 	Signal signal(signalNumber,
2191 		(flags & SIGNAL_FLAG_QUEUING_REQUIRED) != 0 ? SI_QUEUE : SI_USER,
2192 		B_OK, thread->team->id);
2193 	signal.SetUserValue(userValue);
2194 
2195 	// send to current team for id == 0, otherwise to the respective team
2196 	return send_signal_to_team_id(id == 0 ? team_get_current_team_id() : id,
2197 		signal, flags);
2198 }
2199 
2200 
2201 status_t
2202 _user_set_signal_mask(int how, const sigset_t *userSet, sigset_t *userOldSet)
2203 {
2204 	sigset_t set, oldSet;
2205 	status_t status;
2206 
2207 	if ((userSet != NULL && user_memcpy(&set, userSet, sizeof(sigset_t)) < B_OK)
2208 		|| (userOldSet != NULL && user_memcpy(&oldSet, userOldSet,
2209 				sizeof(sigset_t)) < B_OK))
2210 		return B_BAD_ADDRESS;
2211 
2212 	status = sigprocmask_internal(how, userSet ? &set : NULL,
2213 		userOldSet ? &oldSet : NULL);
2214 
2215 	// copy old set if asked for
2216 	if (status >= B_OK && userOldSet != NULL
2217 		&& user_memcpy(userOldSet, &oldSet, sizeof(sigset_t)) < B_OK)
2218 		return B_BAD_ADDRESS;
2219 
2220 	return status;
2221 }
2222 
2223 
2224 status_t
2225 _user_sigaction(int signal, const struct sigaction *userAction,
2226 	struct sigaction *userOldAction)
2227 {
2228 	struct sigaction act, oact;
2229 	status_t status;
2230 
2231 	if ((userAction != NULL && user_memcpy(&act, userAction,
2232 				sizeof(struct sigaction)) < B_OK)
2233 		|| (userOldAction != NULL && user_memcpy(&oact, userOldAction,
2234 				sizeof(struct sigaction)) < B_OK))
2235 		return B_BAD_ADDRESS;
2236 
2237 	status = sigaction_internal(signal, userAction ? &act : NULL,
2238 		userOldAction ? &oact : NULL);
2239 
2240 	// only copy the old action if a pointer has been given
2241 	if (status >= B_OK && userOldAction != NULL
2242 		&& user_memcpy(userOldAction, &oact, sizeof(struct sigaction)) < B_OK)
2243 		return B_BAD_ADDRESS;
2244 
2245 	return status;
2246 }
2247 
2248 
2249 status_t
2250 _user_sigwait(const sigset_t *userSet, siginfo_t *userInfo, uint32 flags,
2251 	bigtime_t timeout)
2252 {
2253 	// copy userSet to stack
2254 	sigset_t set;
2255 	if (userSet == NULL || !IS_USER_ADDRESS(userSet)
2256 		|| user_memcpy(&set, userSet, sizeof(sigset_t)) != B_OK) {
2257 		return B_BAD_ADDRESS;
2258 	}
2259 
2260 	// userInfo is optional, but must be a user address when given
2261 	if (userInfo != NULL && !IS_USER_ADDRESS(userInfo))
2262 		return B_BAD_ADDRESS;
2263 
2264 	syscall_restart_handle_timeout_pre(flags, timeout);
2265 
2266 	flags |= B_CAN_INTERRUPT;
2267 
2268 	siginfo_t info;
2269 	status_t status = sigwait_internal(&set, &info, flags, timeout);
2270 	if (status == B_OK) {
2271 		// copy the info back to userland, if userSet is non-NULL
2272 		if (userInfo != NULL)
2273 			status = user_memcpy(userInfo, &info, sizeof(info));
2274 	} else if (status == B_INTERRUPTED) {
2275 		// make sure we'll be restarted
2276 		Thread* thread = thread_get_current_thread();
2277 		atomic_or(&thread->flags, THREAD_FLAGS_ALWAYS_RESTART_SYSCALL);
2278 	}
2279 
2280 	return syscall_restart_handle_timeout_post(status, timeout);
2281 }
2282 
2283 
2284 status_t
2285 _user_sigsuspend(const sigset_t *userMask)
2286 {
2287 	sigset_t mask;
2288 
2289 	if (userMask == NULL)
2290 		return B_BAD_VALUE;
2291 	if (user_memcpy(&mask, userMask, sizeof(sigset_t)) < B_OK)
2292 		return B_BAD_ADDRESS;
2293 
2294 	return sigsuspend_internal(&mask);
2295 }
2296 
2297 
2298 status_t
2299 _user_sigpending(sigset_t *userSet)
2300 {
2301 	sigset_t set;
2302 	int status;
2303 
2304 	if (userSet == NULL)
2305 		return B_BAD_VALUE;
2306 	if (!IS_USER_ADDRESS(userSet))
2307 		return B_BAD_ADDRESS;
2308 
2309 	status = sigpending_internal(&set);
2310 	if (status == B_OK
2311 		&& user_memcpy(userSet, &set, sizeof(sigset_t)) < B_OK)
2312 		return B_BAD_ADDRESS;
2313 
2314 	return status;
2315 }
2316 
2317 
2318 status_t
2319 _user_set_signal_stack(const stack_t* newUserStack, stack_t* oldUserStack)
2320 {
2321 	Thread *thread = thread_get_current_thread();
2322 	struct stack_t newStack, oldStack;
2323 	bool onStack = false;
2324 
2325 	if ((newUserStack != NULL && user_memcpy(&newStack, newUserStack,
2326 				sizeof(stack_t)) < B_OK)
2327 		|| (oldUserStack != NULL && user_memcpy(&oldStack, oldUserStack,
2328 				sizeof(stack_t)) < B_OK))
2329 		return B_BAD_ADDRESS;
2330 
2331 	if (thread->signal_stack_enabled) {
2332 		// determine whether or not the user thread is currently
2333 		// on the active signal stack
2334 		onStack = arch_on_signal_stack(thread);
2335 	}
2336 
2337 	if (oldUserStack != NULL) {
2338 		oldStack.ss_sp = (void *)thread->signal_stack_base;
2339 		oldStack.ss_size = thread->signal_stack_size;
2340 		oldStack.ss_flags = (thread->signal_stack_enabled ? 0 : SS_DISABLE)
2341 			| (onStack ? SS_ONSTACK : 0);
2342 	}
2343 
2344 	if (newUserStack != NULL) {
2345 		// no flags other than SS_DISABLE are allowed
2346 		if ((newStack.ss_flags & ~SS_DISABLE) != 0)
2347 			return B_BAD_VALUE;
2348 
2349 		if ((newStack.ss_flags & SS_DISABLE) == 0) {
2350 			// check if the size is valid
2351 			if (newStack.ss_size < MINSIGSTKSZ)
2352 				return B_NO_MEMORY;
2353 			if (onStack)
2354 				return B_NOT_ALLOWED;
2355 			if (!IS_USER_ADDRESS(newStack.ss_sp))
2356 				return B_BAD_VALUE;
2357 
2358 			thread->signal_stack_base = (addr_t)newStack.ss_sp;
2359 			thread->signal_stack_size = newStack.ss_size;
2360 			thread->signal_stack_enabled = true;
2361 		} else
2362 			thread->signal_stack_enabled = false;
2363 	}
2364 
2365 	// only copy the old stack info if a pointer has been given
2366 	if (oldUserStack != NULL
2367 		&& user_memcpy(oldUserStack, &oldStack, sizeof(stack_t)) < B_OK)
2368 		return B_BAD_ADDRESS;
2369 
2370 	return B_OK;
2371 }
2372 
2373 
2374 /*!	Restores the environment of a function that was interrupted by a signal
2375 	handler call.
2376 	This syscall is invoked when a signal handler function returns. It
2377 	deconstructs the signal handler frame and restores the stack and register
2378 	state of the function that was interrupted by a signal. The syscall is
2379 	therefore somewhat unusual, since it does not return to the calling
2380 	function, but to someplace else. In case the signal interrupted a syscall,
2381 	it will appear as if the syscall just returned. That is also the reason, why
2382 	this syscall returns an int64, since it needs to return the value the
2383 	interrupted syscall returns, which is potentially 64 bits wide.
2384 
2385 	\param userSignalFrameData The signal frame data created for the signal
2386 		handler. Potentially some data (e.g. registers) have been modified by
2387 		the signal handler.
2388 	\return In case the signal interrupted a syscall, the return value of that
2389 		syscall. Otherwise (i.e. in case of a (hardware) interrupt/exception)
2390 		the value might need to be tailored such that after a return to userland
2391 		the interrupted environment is identical to the interrupted one (unless
2392 		explicitly modified). E.g. for x86 to achieve that, the return value
2393 		must contain the eax|edx values of the interrupted environment.
2394 */
2395 int64
2396 _user_restore_signal_frame(struct signal_frame_data* userSignalFrameData)
2397 {
2398 	syscall_64_bit_return_value();
2399 
2400 	Thread *thread = thread_get_current_thread();
2401 
2402 	// copy the signal frame data from userland
2403 	signal_frame_data signalFrameData;
2404 	if (userSignalFrameData == NULL || !IS_USER_ADDRESS(userSignalFrameData)
2405 		|| user_memcpy(&signalFrameData, userSignalFrameData,
2406 			sizeof(signalFrameData)) != B_OK) {
2407 		// We failed to copy the signal frame data from userland. This is a
2408 		// serious problem. Kill the thread.
2409 		dprintf("_user_restore_signal_frame(): thread %" B_PRId32 ": Failed to "
2410 			"copy signal frame data (%p) from userland. Killing thread...\n",
2411 			thread->id, userSignalFrameData);
2412 		kill_thread(thread->id);
2413 		return B_BAD_ADDRESS;
2414 	}
2415 
2416 	// restore the signal block mask
2417 	InterruptsSpinLocker schedulerLocker(gSchedulerLock);
2418 
2419 	thread->sig_block_mask
2420 		= signalFrameData.context.uc_sigmask & BLOCKABLE_SIGNALS;
2421 	update_current_thread_signals_flag();
2422 
2423 	schedulerLocker.Unlock();
2424 
2425 	// restore the syscall restart related thread flags and the syscall restart
2426 	// parameters
2427 	atomic_and(&thread->flags,
2428 		~(THREAD_FLAGS_RESTART_SYSCALL | THREAD_FLAGS_64_BIT_SYSCALL_RETURN));
2429 	atomic_or(&thread->flags, signalFrameData.thread_flags
2430 		& (THREAD_FLAGS_RESTART_SYSCALL | THREAD_FLAGS_64_BIT_SYSCALL_RETURN));
2431 
2432 	memcpy(thread->syscall_restart.parameters,
2433 		signalFrameData.syscall_restart_parameters,
2434 		sizeof(thread->syscall_restart.parameters));
2435 
2436 	// restore the previously stored Thread::user_signal_context
2437 	thread->user_signal_context = signalFrameData.context.uc_link;
2438 	if (thread->user_signal_context != NULL
2439 		&& !IS_USER_ADDRESS(thread->user_signal_context)) {
2440 		thread->user_signal_context = NULL;
2441 	}
2442 
2443 	// let the architecture specific code restore the registers
2444 	return arch_restore_signal_frame(&signalFrameData);
2445 }
2446