xref: /haiku/src/system/kernel/signal.cpp (revision 02354704729d38c3b078c696adc1bbbd33cbcf72)
1 /*
2  * Copyright 2018, Jérôme Duval, jerome.duval@gmail.com.
3  * Copyright 2014, Paweł Dziepak, pdziepak@quarnos.org.
4  * Copyright 2011-2016, Ingo Weinhold, ingo_weinhold@gmx.de.
5  * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
6  * Copyright 2002, Angelo Mottola, a.mottola@libero.it.
7  *
8  * Distributed under the terms of the MIT License.
9  */
10 
11 
12 /*! POSIX signals handling routines */
13 
14 
15 #include <ksignal.h>
16 
17 #include <errno.h>
18 #include <stddef.h>
19 #include <string.h>
20 
21 #include <OS.h>
22 #include <KernelExport.h>
23 
24 #include <cpu.h>
25 #include <core_dump.h>
26 #include <debug.h>
27 #include <kernel.h>
28 #include <kscheduler.h>
29 #include <sem.h>
30 #include <syscall_restart.h>
31 #include <syscall_utils.h>
32 #include <team.h>
33 #include <thread.h>
34 #include <tracing.h>
35 #include <user_debugger.h>
36 #include <user_thread.h>
37 #include <util/AutoLock.h>
38 #include <util/ThreadAutoLock.h>
39 
40 
41 //#define TRACE_SIGNAL
42 #ifdef TRACE_SIGNAL
43 #	define TRACE(x) dprintf x
44 #else
45 #	define TRACE(x) ;
46 #endif
47 
48 
49 #define BLOCKABLE_SIGNALS	\
50 	(~(KILL_SIGNALS | SIGNAL_TO_MASK(SIGSTOP)	\
51 	| SIGNAL_TO_MASK(SIGNAL_DEBUG_THREAD)	\
52 	| SIGNAL_TO_MASK(SIGNAL_CONTINUE_THREAD)	\
53 	| SIGNAL_TO_MASK(SIGNAL_CANCEL_THREAD)))
54 #define STOP_SIGNALS \
55 	(SIGNAL_TO_MASK(SIGSTOP) | SIGNAL_TO_MASK(SIGTSTP) \
56 	| SIGNAL_TO_MASK(SIGTTIN) | SIGNAL_TO_MASK(SIGTTOU))
57 #define CONTINUE_SIGNALS \
58 	(SIGNAL_TO_MASK(SIGCONT) | SIGNAL_TO_MASK(SIGNAL_CONTINUE_THREAD) \
59 	| SIGNAL_TO_MASK(SIGNAL_DEBUG_THREAD))
60 #define DEFAULT_IGNORE_SIGNALS \
61 	(SIGNAL_TO_MASK(SIGCHLD) | SIGNAL_TO_MASK(SIGWINCH) \
62 	| SIGNAL_TO_MASK(SIGCONT) \
63 	| SIGNAL_RANGE_TO_MASK(SIGNAL_REALTIME_MIN, SIGNAL_REALTIME_MAX))
64 #define NON_DEFERRABLE_SIGNALS	\
65 	(KILL_SIGNALS				\
66 	| SIGNAL_TO_MASK(SIGILL)	\
67 	| SIGNAL_TO_MASK(SIGFPE)	\
68 	| SIGNAL_TO_MASK(SIGSEGV))
69 
70 
71 static const struct {
72 	const char*	name;
73 	int32		priority;
74 } kSignalInfos[__MAX_SIGNO + 1] = {
75 	{"NONE",			-1},
76 	{"HUP",				0},
77 	{"INT",				0},
78 	{"QUIT",			0},
79 	{"ILL",				0},
80 	{"CHLD",			0},
81 	{"ABRT",			0},
82 	{"PIPE",			0},
83 	{"FPE",				0},
84 	{"KILL",			100},
85 	{"STOP",			0},
86 	{"SEGV",			0},
87 	{"CONT",			0},
88 	{"TSTP",			0},
89 	{"ALRM",			0},
90 	{"TERM",			0},
91 	{"TTIN",			0},
92 	{"TTOU",			0},
93 	{"USR1",			0},
94 	{"USR2",			0},
95 	{"WINCH",			0},
96 	{"KILLTHR",			100},
97 	{"TRAP",			0},
98 	{"POLL",			0},
99 	{"PROF",			0},
100 	{"SYS",				0},
101 	{"URG",				0},
102 	{"VTALRM",			0},
103 	{"XCPU",			0},
104 	{"XFSZ",			0},
105 	{"SIGBUS",			0},
106 	{"SIGRESERVED1",	0},
107 	{"SIGRESERVED2",	0},
108 	{"SIGRT1",			8},
109 	{"SIGRT2",			7},
110 	{"SIGRT3",			6},
111 	{"SIGRT4",			5},
112 	{"SIGRT5",			4},
113 	{"SIGRT6",			3},
114 	{"SIGRT7",			2},
115 	{"SIGRT8",			1},
116 	{"invalid 41",		0},
117 	{"invalid 42",		0},
118 	{"invalid 43",		0},
119 	{"invalid 44",		0},
120 	{"invalid 45",		0},
121 	{"invalid 46",		0},
122 	{"invalid 47",		0},
123 	{"invalid 48",		0},
124 	{"invalid 49",		0},
125 	{"invalid 50",		0},
126 	{"invalid 51",		0},
127 	{"invalid 52",		0},
128 	{"invalid 53",		0},
129 	{"invalid 54",		0},
130 	{"invalid 55",		0},
131 	{"invalid 56",		0},
132 	{"invalid 57",		0},
133 	{"invalid 58",		0},
134 	{"invalid 59",		0},
135 	{"invalid 60",		0},
136 	{"invalid 61",		0},
137 	{"invalid 62",		0},
138 	{"CANCEL_THREAD",	0},
139 	{"CONTINUE_THREAD",	0}	// priority must be <= that of SIGSTOP
140 };
141 
142 
143 static inline const char*
144 signal_name(uint32 number)
145 {
146 	return number <= __MAX_SIGNO ? kSignalInfos[number].name : "invalid";
147 }
148 
149 
150 // #pragma mark - SignalHandledCaller
151 
152 
153 struct SignalHandledCaller {
154 	SignalHandledCaller(Signal* signal)
155 		:
156 		fSignal(signal)
157 	{
158 	}
159 
160 	~SignalHandledCaller()
161 	{
162 		Done();
163 	}
164 
165 	void Done()
166 	{
167 		if (fSignal != NULL) {
168 			fSignal->Handled();
169 			fSignal = NULL;
170 		}
171 	}
172 
173 private:
174 	Signal*	fSignal;
175 };
176 
177 
178 // #pragma mark - QueuedSignalsCounter
179 
180 
181 /*!	Creates a counter with the given limit.
182 	The limit defines the maximum the counter may reach. Since the
183 	BReferenceable's reference count is used, it is assumed that the owning
184 	team holds a reference and the reference count is one greater than the
185 	counter value.
186 	\param limit The maximum allowed value the counter may have. When
187 		\code < 0 \endcode, the value is not limited.
188 */
189 QueuedSignalsCounter::QueuedSignalsCounter(int32 limit)
190 	:
191 	fLimit(limit)
192 {
193 }
194 
195 
196 /*!	Increments the counter, if the limit allows that.
197 	\return \c true, if incrementing the counter succeeded, \c false otherwise.
198 */
199 bool
200 QueuedSignalsCounter::Increment()
201 {
202 	// no limit => no problem
203 	if (fLimit < 0) {
204 		AcquireReference();
205 		return true;
206 	}
207 
208 	// Increment the reference count manually, so we can check atomically. We
209 	// compare the old value > fLimit, assuming that our (primary) owner has a
210 	// reference, we don't want to count.
211 	if (atomic_add(&fReferenceCount, 1) > fLimit) {
212 		ReleaseReference();
213 		return false;
214 	}
215 
216 	return true;
217 }
218 
219 
220 // #pragma mark - Signal
221 
222 
223 Signal::Signal()
224 	:
225 	fCounter(NULL),
226 	fPending(false)
227 {
228 }
229 
230 
231 Signal::Signal(const Signal& other)
232 	:
233 	fCounter(NULL),
234 	fNumber(other.fNumber),
235 	fSignalCode(other.fSignalCode),
236 	fErrorCode(other.fErrorCode),
237 	fSendingProcess(other.fSendingProcess),
238 	fSendingUser(other.fSendingUser),
239 	fStatus(other.fStatus),
240 	fPollBand(other.fPollBand),
241 	fAddress(other.fAddress),
242 	fUserValue(other.fUserValue),
243 	fPending(false)
244 {
245 }
246 
247 
248 Signal::Signal(uint32 number, int32 signalCode, int32 errorCode,
249 	pid_t sendingProcess)
250 	:
251 	fCounter(NULL),
252 	fNumber(number),
253 	fSignalCode(signalCode),
254 	fErrorCode(errorCode),
255 	fSendingProcess(sendingProcess),
256 	fSendingUser(getuid()),
257 	fStatus(0),
258 	fPollBand(0),
259 	fAddress(NULL),
260 	fPending(false)
261 {
262 	fUserValue.sival_ptr = NULL;
263 }
264 
265 
266 Signal::~Signal()
267 {
268 	if (fCounter != NULL)
269 		fCounter->ReleaseReference();
270 }
271 
272 
273 /*!	Creates a queuable clone of the given signal.
274 	Also enforces the current team's signal queuing limit.
275 
276 	\param signal The signal to clone.
277 	\param queuingRequired If \c true, the function will return an error code
278 		when creating the clone fails for any reason. Otherwise, the function
279 		will set \a _signalToQueue to \c NULL, but still return \c B_OK.
280 	\param _signalToQueue Return parameter. Set to the clone of the signal.
281 	\return When \c queuingRequired is \c false, always \c B_OK. Otherwise
282 		\c B_OK, when creating the signal clone succeeds, another error code,
283 		when it fails.
284 */
285 /*static*/ status_t
286 Signal::CreateQueuable(const Signal& signal, bool queuingRequired,
287 	Signal*& _signalToQueue)
288 {
289 	_signalToQueue = NULL;
290 
291 	// If interrupts are disabled, we can't allocate a signal.
292 	if (!are_interrupts_enabled())
293 		return queuingRequired ? B_BAD_VALUE : B_OK;
294 
295 	// increment the queued signals counter
296 	QueuedSignalsCounter* counter
297 		= thread_get_current_thread()->team->QueuedSignalsCounter();
298 	if (!counter->Increment())
299 		return queuingRequired ? EAGAIN : B_OK;
300 
301 	// allocate the signal
302 	Signal* signalToQueue = new(std::nothrow) Signal(signal);
303 	if (signalToQueue == NULL) {
304 		counter->Decrement();
305 		return queuingRequired ? B_NO_MEMORY : B_OK;
306 	}
307 
308 	signalToQueue->fCounter = counter;
309 
310 	_signalToQueue = signalToQueue;
311 	return B_OK;
312 }
313 
314 void
315 Signal::SetTo(uint32 number)
316 {
317 	Team* team = thread_get_current_thread()->team;
318 
319 	fNumber = number;
320 	fSignalCode = SI_USER;
321 	fErrorCode = 0;
322 	fSendingProcess = team->id;
323 	fSendingUser = team->effective_uid;
324 	fStatus = 0;
325 	fPollBand = 0;
326 	fAddress = NULL;
327 	fUserValue.sival_ptr = NULL;
328 }
329 
330 
331 int32
332 Signal::Priority() const
333 {
334 	return kSignalInfos[fNumber].priority;
335 }
336 
337 
338 void
339 Signal::Handled()
340 {
341 	ReleaseReference();
342 }
343 
344 
345 void
346 Signal::LastReferenceReleased()
347 {
348 	if (are_interrupts_enabled())
349 		delete this;
350 	else
351 		deferred_delete(this);
352 }
353 
354 
355 // #pragma mark - PendingSignals
356 
357 
358 PendingSignals::PendingSignals()
359 	:
360 	fQueuedSignalsMask(0),
361 	fUnqueuedSignalsMask(0)
362 {
363 }
364 
365 
366 PendingSignals::~PendingSignals()
367 {
368 	Clear();
369 }
370 
371 
372 /*!	Of the signals in \a nonBlocked returns the priority of that with the
373 	highest priority.
374 	\param nonBlocked The mask with the non-blocked signals.
375 	\return The priority of the highest priority non-blocked signal, or, if all
376 		signals are blocked, \c -1.
377 */
378 int32
379 PendingSignals::HighestSignalPriority(sigset_t nonBlocked) const
380 {
381 	Signal* queuedSignal;
382 	int32 unqueuedSignal;
383 	return _GetHighestPrioritySignal(nonBlocked, queuedSignal, unqueuedSignal);
384 }
385 
386 
387 void
388 PendingSignals::Clear()
389 {
390 	// release references of all queued signals
391 	while (Signal* signal = fQueuedSignals.RemoveHead())
392 		signal->Handled();
393 
394 	fQueuedSignalsMask = 0;
395 	fUnqueuedSignalsMask = 0;
396 }
397 
398 
399 /*!	Adds a signal.
400 	Takes over the reference to the signal from the caller.
401 */
402 void
403 PendingSignals::AddSignal(Signal* signal)
404 {
405 	// queue according to priority
406 	int32 priority = signal->Priority();
407 	Signal* otherSignal = NULL;
408 	for (SignalList::Iterator it = fQueuedSignals.GetIterator();
409 			(otherSignal = it.Next()) != NULL;) {
410 		if (priority > otherSignal->Priority())
411 			break;
412 	}
413 
414 	fQueuedSignals.InsertBefore(otherSignal, signal);
415 	signal->SetPending(true);
416 
417 	fQueuedSignalsMask |= SIGNAL_TO_MASK(signal->Number());
418 }
419 
420 
421 void
422 PendingSignals::RemoveSignal(Signal* signal)
423 {
424 	signal->SetPending(false);
425 	fQueuedSignals.Remove(signal);
426 	_UpdateQueuedSignalMask();
427 }
428 
429 
430 void
431 PendingSignals::RemoveSignals(sigset_t mask)
432 {
433 	// remove from queued signals
434 	if ((fQueuedSignalsMask & mask) != 0) {
435 		for (SignalList::Iterator it = fQueuedSignals.GetIterator();
436 				Signal* signal = it.Next();) {
437 			// remove signal, if in mask
438 			if ((SIGNAL_TO_MASK(signal->Number()) & mask) != 0) {
439 				it.Remove();
440 				signal->SetPending(false);
441 				signal->Handled();
442 			}
443 		}
444 
445 		fQueuedSignalsMask &= ~mask;
446 	}
447 
448 	// remove from unqueued signals
449 	fUnqueuedSignalsMask &= ~mask;
450 }
451 
452 
453 /*!	Removes and returns a signal in \a nonBlocked that has the highest priority.
454 	The caller gets a reference to the returned signal, if any.
455 	\param nonBlocked The mask of non-blocked signals.
456 	\param buffer If the signal is not queued this buffer is returned. In this
457 		case the method acquires a reference to \a buffer, so that the caller
458 		gets a reference also in this case.
459 	\return The removed signal or \c NULL, if all signals are blocked.
460 */
461 Signal*
462 PendingSignals::DequeueSignal(sigset_t nonBlocked, Signal& buffer)
463 {
464 	// find the signal with the highest priority
465 	Signal* queuedSignal;
466 	int32 unqueuedSignal;
467 	if (_GetHighestPrioritySignal(nonBlocked, queuedSignal, unqueuedSignal) < 0)
468 		return NULL;
469 
470 	// if it is a queued signal, dequeue it
471 	if (queuedSignal != NULL) {
472 		fQueuedSignals.Remove(queuedSignal);
473 		queuedSignal->SetPending(false);
474 		_UpdateQueuedSignalMask();
475 		return queuedSignal;
476 	}
477 
478 	// it is unqueued -- remove from mask
479 	fUnqueuedSignalsMask &= ~SIGNAL_TO_MASK(unqueuedSignal);
480 
481 	// init buffer
482 	buffer.SetTo(unqueuedSignal);
483 	buffer.AcquireReference();
484 	return &buffer;
485 }
486 
487 
488 /*!	Of the signals not it \a blocked returns the priority of that with the
489 	highest priority.
490 	\param blocked The mask with the non-blocked signals.
491 	\param _queuedSignal If the found signal is a queued signal, the variable
492 		will be set to that signal, otherwise to \c NULL.
493 	\param _unqueuedSignal If the found signal is an unqueued signal, the
494 		variable is set to that signal's number, otherwise to \c -1.
495 	\return The priority of the highest priority non-blocked signal, or, if all
496 		signals are blocked, \c -1.
497 */
498 int32
499 PendingSignals::_GetHighestPrioritySignal(sigset_t nonBlocked,
500 	Signal*& _queuedSignal, int32& _unqueuedSignal) const
501 {
502 	// check queued signals
503 	Signal* queuedSignal = NULL;
504 	int32 queuedPriority = -1;
505 
506 	if ((fQueuedSignalsMask & nonBlocked) != 0) {
507 		for (SignalList::ConstIterator it = fQueuedSignals.GetIterator();
508 				Signal* signal = it.Next();) {
509 			if ((SIGNAL_TO_MASK(signal->Number()) & nonBlocked) != 0) {
510 				queuedPriority = signal->Priority();
511 				queuedSignal = signal;
512 				break;
513 			}
514 		}
515 	}
516 
517 	// check unqueued signals
518 	int32 unqueuedSignal = -1;
519 	int32 unqueuedPriority = -1;
520 
521 	sigset_t unqueuedSignals = fUnqueuedSignalsMask & nonBlocked;
522 	if (unqueuedSignals != 0) {
523 		int32 signal = 1;
524 		while (unqueuedSignals != 0) {
525 			sigset_t mask = SIGNAL_TO_MASK(signal);
526 			if ((unqueuedSignals & mask) != 0) {
527 				int32 priority = kSignalInfos[signal].priority;
528 				if (priority > unqueuedPriority) {
529 					unqueuedSignal = signal;
530 					unqueuedPriority = priority;
531 				}
532 				unqueuedSignals &= ~mask;
533 			}
534 
535 			signal++;
536 		}
537 	}
538 
539 	// Return found queued or unqueued signal, whichever has the higher
540 	// priority.
541 	if (queuedPriority >= unqueuedPriority) {
542 		_queuedSignal = queuedSignal;
543 		_unqueuedSignal = -1;
544 		return queuedPriority;
545 	}
546 
547 	_queuedSignal = NULL;
548 	_unqueuedSignal = unqueuedSignal;
549 	return unqueuedPriority;
550 }
551 
552 
553 void
554 PendingSignals::_UpdateQueuedSignalMask()
555 {
556 	sigset_t mask = 0;
557 	for (SignalList::Iterator it = fQueuedSignals.GetIterator();
558 			Signal* signal = it.Next();) {
559 		mask |= SIGNAL_TO_MASK(signal->Number());
560 	}
561 
562 	fQueuedSignalsMask = mask;
563 }
564 
565 
566 // #pragma mark - signal tracing
567 
568 
569 #if SIGNAL_TRACING
570 
571 namespace SignalTracing {
572 
573 
574 class HandleSignal : public AbstractTraceEntry {
575 	public:
576 		HandleSignal(uint32 signal)
577 			:
578 			fSignal(signal)
579 		{
580 			Initialized();
581 		}
582 
583 		virtual void AddDump(TraceOutput& out)
584 		{
585 			out.Print("signal handle:  %" B_PRIu32 " (%s)" , fSignal,
586 				signal_name(fSignal));
587 		}
588 
589 	private:
590 		uint32		fSignal;
591 };
592 
593 
594 class ExecuteSignalHandler : public AbstractTraceEntry {
595 	public:
596 		ExecuteSignalHandler(uint32 signal, struct sigaction* handler)
597 			:
598 			fSignal(signal),
599 			fHandler((void*)handler->sa_handler)
600 		{
601 			Initialized();
602 		}
603 
604 		virtual void AddDump(TraceOutput& out)
605 		{
606 			out.Print("signal exec handler: signal: %" B_PRIu32 " (%s), "
607 				"handler: %p", fSignal, signal_name(fSignal), fHandler);
608 		}
609 
610 	private:
611 		uint32	fSignal;
612 		void*	fHandler;
613 };
614 
615 
616 class SendSignal : public AbstractTraceEntry {
617 	public:
618 		SendSignal(pid_t target, uint32 signal, uint32 flags)
619 			:
620 			fTarget(target),
621 			fSignal(signal),
622 			fFlags(flags)
623 		{
624 			Initialized();
625 		}
626 
627 		virtual void AddDump(TraceOutput& out)
628 		{
629 			out.Print("signal send: target: %" B_PRId32 ", signal: %" B_PRIu32
630 				" (%s), flags: %#" B_PRIx32, fTarget, fSignal,
631 				signal_name(fSignal), fFlags);
632 		}
633 
634 	private:
635 		pid_t	fTarget;
636 		uint32	fSignal;
637 		uint32	fFlags;
638 };
639 
640 
641 class SigAction : public AbstractTraceEntry {
642 	public:
643 		SigAction(uint32 signal, const struct sigaction* act)
644 			:
645 			fSignal(signal),
646 			fAction(*act)
647 		{
648 			Initialized();
649 		}
650 
651 		virtual void AddDump(TraceOutput& out)
652 		{
653 			out.Print("signal action: signal: %" B_PRIu32 " (%s), "
654 				"action: {handler: %p, flags: %#x, mask: %#" B_PRIx64 "}",
655 				fSignal, signal_name(fSignal), fAction.sa_handler,
656 				fAction.sa_flags, (uint64)fAction.sa_mask);
657 		}
658 
659 	private:
660 		uint32				fSignal;
661 		struct sigaction	fAction;
662 };
663 
664 
665 class SigProcMask : public AbstractTraceEntry {
666 	public:
667 		SigProcMask(int how, sigset_t mask)
668 			:
669 			fHow(how),
670 			fMask(mask),
671 			fOldMask(thread_get_current_thread()->sig_block_mask)
672 		{
673 			Initialized();
674 		}
675 
676 		virtual void AddDump(TraceOutput& out)
677 		{
678 			const char* how = "invalid";
679 			switch (fHow) {
680 				case SIG_BLOCK:
681 					how = "block";
682 					break;
683 				case SIG_UNBLOCK:
684 					how = "unblock";
685 					break;
686 				case SIG_SETMASK:
687 					how = "set";
688 					break;
689 			}
690 
691 			out.Print("signal proc mask: %s 0x%llx, old mask: 0x%llx", how,
692 				(long long)fMask, (long long)fOldMask);
693 		}
694 
695 	private:
696 		int			fHow;
697 		sigset_t	fMask;
698 		sigset_t	fOldMask;
699 };
700 
701 
702 class SigSuspend : public AbstractTraceEntry {
703 	public:
704 		SigSuspend(sigset_t mask)
705 			:
706 			fMask(mask),
707 			fOldMask(thread_get_current_thread()->sig_block_mask)
708 		{
709 			Initialized();
710 		}
711 
712 		virtual void AddDump(TraceOutput& out)
713 		{
714 			out.Print("signal suspend: %#llx, old mask: %#llx",
715 				(long long)fMask, (long long)fOldMask);
716 		}
717 
718 	private:
719 		sigset_t	fMask;
720 		sigset_t	fOldMask;
721 };
722 
723 
724 class SigSuspendDone : public AbstractTraceEntry {
725 	public:
726 		SigSuspendDone()
727 			:
728 			fSignals(thread_get_current_thread()->ThreadPendingSignals())
729 		{
730 			Initialized();
731 		}
732 
733 		virtual void AddDump(TraceOutput& out)
734 		{
735 			out.Print("signal suspend done: %#" B_PRIx32, fSignals);
736 		}
737 
738 	private:
739 		uint32		fSignals;
740 };
741 
742 }	// namespace SignalTracing
743 
744 #	define T(x)	new(std::nothrow) SignalTracing::x
745 
746 #else
747 #	define T(x)
748 #endif	// SIGNAL_TRACING
749 
750 
751 // #pragma mark -
752 
753 
754 /*!	Updates the given thread's Thread::flags field according to what signals are
755 	pending.
756 	The caller must hold \c team->signal_lock.
757 */
758 static void
759 update_thread_signals_flag(Thread* thread)
760 {
761 	sigset_t mask = ~thread->sig_block_mask;
762 	if ((thread->AllPendingSignals() & mask) != 0)
763 		atomic_or(&thread->flags, THREAD_FLAGS_SIGNALS_PENDING);
764 	else
765 		atomic_and(&thread->flags, ~THREAD_FLAGS_SIGNALS_PENDING);
766 }
767 
768 
769 /*!	Updates the current thread's Thread::flags field according to what signals
770 	are pending.
771 	The caller must hold \c team->signal_lock.
772 */
773 static void
774 update_current_thread_signals_flag()
775 {
776 	update_thread_signals_flag(thread_get_current_thread());
777 }
778 
779 
780 /*!	Updates all of the given team's threads' Thread::flags fields according to
781 	what signals are pending.
782 	The caller must hold \c signal_lock.
783 */
784 static void
785 update_team_threads_signal_flag(Team* team)
786 {
787 	for (Thread* thread = team->thread_list; thread != NULL;
788 			thread = thread->team_next) {
789 		update_thread_signals_flag(thread);
790 	}
791 }
792 
793 
794 /*!	Notifies the user debugger about a signal to be handled.
795 
796 	The caller must not hold any locks.
797 
798 	\param thread The current thread.
799 	\param signal The signal to be handled.
800 	\param handler The installed signal handler for the signal.
801 	\param deadly Indicates whether the signal is deadly.
802 	\return \c true, if the signal shall be handled, \c false, if it shall be
803 		ignored.
804 */
805 static bool
806 notify_debugger(Thread* thread, Signal* signal, struct sigaction& handler,
807 	bool deadly)
808 {
809 	uint64 signalMask = SIGNAL_TO_MASK(signal->Number());
810 
811 	// first check the ignore signal masks the debugger specified for the thread
812 	InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock);
813 
814 	if ((thread->debug_info.ignore_signals_once & signalMask) != 0) {
815 		thread->debug_info.ignore_signals_once &= ~signalMask;
816 		return true;
817 	}
818 
819 	if ((thread->debug_info.ignore_signals & signalMask) != 0)
820 		return true;
821 
822 	threadDebugInfoLocker.Unlock();
823 
824 	// deliver the event
825 	return user_debug_handle_signal(signal->Number(), &handler, deadly);
826 }
827 
828 
829 /*!	Removes and returns a signal with the highest priority in \a nonBlocked that
830 	is pending in the given thread or its team.
831 	After dequeuing the signal the Thread::flags field of the affected threads
832 	are updated.
833 	The caller gets a reference to the returned signal, if any.
834 	The caller must hold \c team->signal_lock.
835 	\param thread The thread.
836 	\param nonBlocked The mask of non-blocked signals.
837 	\param buffer If the signal is not queued this buffer is returned. In this
838 		case the method acquires a reference to \a buffer, so that the caller
839 		gets a reference also in this case.
840 	\return The removed signal or \c NULL, if all signals are blocked.
841 */
842 static Signal*
843 dequeue_thread_or_team_signal(Thread* thread, sigset_t nonBlocked,
844 	Signal& buffer)
845 {
846 	Team* team = thread->team;
847 	Signal* signal;
848 	if (team->HighestPendingSignalPriority(nonBlocked)
849 			> thread->HighestPendingSignalPriority(nonBlocked)) {
850 		signal = team->DequeuePendingSignal(nonBlocked, buffer);
851 		update_team_threads_signal_flag(team);
852 	} else {
853 		signal = thread->DequeuePendingSignal(nonBlocked, buffer);
854 		update_thread_signals_flag(thread);
855 	}
856 
857 	return signal;
858 }
859 
860 
861 static status_t
862 setup_signal_frame(Thread* thread, struct sigaction* action, Signal* signal,
863 	sigset_t signalMask)
864 {
865 	// prepare the data, we need to copy onto the user stack
866 	signal_frame_data frameData;
867 
868 	// signal info
869 	frameData.info.si_signo = signal->Number();
870 	frameData.info.si_code = signal->SignalCode();
871 	frameData.info.si_errno = signal->ErrorCode();
872 	frameData.info.si_pid = signal->SendingProcess();
873 	frameData.info.si_uid = signal->SendingUser();
874 	frameData.info.si_addr = signal->Address();
875 	frameData.info.si_status = signal->Status();
876 	frameData.info.si_band = signal->PollBand();
877 	frameData.info.si_value = signal->UserValue();
878 
879 	// context
880 	frameData.context.uc_link = thread->user_signal_context;
881 	frameData.context.uc_sigmask = signalMask;
882 	// uc_stack and uc_mcontext are filled in by the architecture specific code.
883 
884 	// user data
885 	frameData.user_data = action->sa_userdata;
886 
887 	// handler function
888 	frameData.siginfo_handler = (action->sa_flags & SA_SIGINFO) != 0;
889 	frameData.handler = frameData.siginfo_handler
890 		? (void*)action->sa_sigaction : (void*)action->sa_handler;
891 
892 	// thread flags -- save the and clear the thread's syscall restart related
893 	// flags
894 	frameData.thread_flags = atomic_and(&thread->flags,
895 		~(THREAD_FLAGS_RESTART_SYSCALL | THREAD_FLAGS_64_BIT_SYSCALL_RETURN));
896 
897 	// syscall restart related fields
898 	memcpy(frameData.syscall_restart_parameters,
899 		thread->syscall_restart.parameters,
900 		sizeof(frameData.syscall_restart_parameters));
901 
902 	// commpage address
903 	frameData.commpage_address = thread->team->commpage_address;
904 
905 	// syscall_restart_return_value is filled in by the architecture specific
906 	// code.
907 
908 	return arch_setup_signal_frame(thread, action, &frameData);
909 }
910 
911 
912 /*! Actually handles pending signals -- i.e. the thread will exit, a custom
913 	signal handler is prepared, or whatever the signal demands.
914 	The function will not return, when a deadly signal is encountered. The
915 	function will suspend the thread indefinitely, when a stop signal is
916 	encountered.
917 	Interrupts must be enabled.
918 	\param thread The current thread.
919 */
920 void
921 handle_signals(Thread* thread)
922 {
923 	Team* team = thread->team;
924 
925 	TeamLocker teamLocker(team);
926 	InterruptsSpinLocker locker(thread->team->signal_lock);
927 
928 	// If userland requested to defer signals, we check now, if this is
929 	// possible.
930 	sigset_t nonBlockedMask = ~thread->sig_block_mask;
931 	sigset_t signalMask = thread->AllPendingSignals() & nonBlockedMask;
932 
933 	set_ac();
934 	if (thread->user_thread->defer_signals > 0
935 		&& (signalMask & NON_DEFERRABLE_SIGNALS) == 0
936 		&& thread->sigsuspend_original_unblocked_mask == 0) {
937 		thread->user_thread->pending_signals = signalMask;
938 		clear_ac();
939 		return;
940 	}
941 
942 	thread->user_thread->pending_signals = 0;
943 	clear_ac();
944 
945 	// determine syscall restart behavior
946 	uint32 restartFlags = atomic_and(&thread->flags,
947 		~THREAD_FLAGS_DONT_RESTART_SYSCALL);
948 	bool alwaysRestart
949 		= (restartFlags & THREAD_FLAGS_ALWAYS_RESTART_SYSCALL) != 0;
950 	bool restart = alwaysRestart
951 		|| (restartFlags & THREAD_FLAGS_DONT_RESTART_SYSCALL) == 0;
952 
953 	// Loop until we've handled all signals.
954 	bool initialIteration = true;
955 	while (true) {
956 		if (initialIteration) {
957 			initialIteration = false;
958 		} else {
959 			teamLocker.Lock();
960 			locker.Lock();
961 
962 			signalMask = thread->AllPendingSignals() & nonBlockedMask;
963 		}
964 
965 		// Unless SIGKILL[THR] are pending, check, if the thread shall stop for
966 		// a core dump or for debugging.
967 		if ((signalMask & KILL_SIGNALS) == 0) {
968 			if ((atomic_get(&thread->flags) & THREAD_FLAGS_TRAP_FOR_CORE_DUMP)
969 					!= 0) {
970 				locker.Unlock();
971 				teamLocker.Unlock();
972 
973 				core_dump_trap_thread();
974 				continue;
975 			}
976 
977 			if ((atomic_get(&thread->debug_info.flags) & B_THREAD_DEBUG_STOP)
978 					!= 0) {
979 				locker.Unlock();
980 				teamLocker.Unlock();
981 
982 				user_debug_stop_thread();
983 				continue;
984 			}
985 		}
986 
987 		// We're done, if there aren't any pending signals anymore.
988 		if ((signalMask & nonBlockedMask) == 0)
989 			break;
990 
991 		// get pending non-blocked thread or team signal with the highest
992 		// priority
993 		Signal stackSignal;
994 		Signal* signal = dequeue_thread_or_team_signal(thread, nonBlockedMask,
995 			stackSignal);
996 		ASSERT(signal != NULL);
997 		SignalHandledCaller signalHandledCaller(signal);
998 
999 		locker.Unlock();
1000 
1001 		// get the action for the signal
1002 		struct sigaction handler;
1003 		if (signal->Number() <= MAX_SIGNAL_NUMBER) {
1004 			handler = team->SignalActionFor(signal->Number());
1005 		} else {
1006 			handler.sa_handler = SIG_DFL;
1007 			handler.sa_flags = 0;
1008 		}
1009 
1010 		if ((handler.sa_flags & SA_ONESHOT) != 0
1011 			&& handler.sa_handler != SIG_IGN && handler.sa_handler != SIG_DFL) {
1012 			team->SignalActionFor(signal->Number()).sa_handler = SIG_DFL;
1013 		}
1014 
1015 		T(HandleSignal(signal->Number()));
1016 
1017 		teamLocker.Unlock();
1018 
1019 		// debug the signal, if a debugger is installed and the signal debugging
1020 		// flag is set
1021 		bool debugSignal = (~atomic_get(&team->debug_info.flags)
1022 				& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_SIGNALS))
1023 			== 0;
1024 
1025 		// handle the signal
1026 		TRACE(("Thread %" B_PRId32 " received signal %s\n", thread->id,
1027 			kSignalInfos[signal->Number()].name));
1028 
1029 		if (handler.sa_handler == SIG_IGN) {
1030 			// signal is to be ignored
1031 			// TODO: apply zombie cleaning on SIGCHLD
1032 
1033 			// notify the debugger
1034 			if (debugSignal)
1035 				notify_debugger(thread, signal, handler, false);
1036 			continue;
1037 		} else if (handler.sa_handler == SIG_DFL) {
1038 			// default signal behaviour
1039 
1040 			// realtime signals are ignored by default
1041 			if (signal->Number() >= SIGNAL_REALTIME_MIN
1042 				&& signal->Number() <= SIGNAL_REALTIME_MAX) {
1043 				// notify the debugger
1044 				if (debugSignal)
1045 					notify_debugger(thread, signal, handler, false);
1046 				continue;
1047 			}
1048 
1049 			bool killTeam = false;
1050 			switch (signal->Number()) {
1051 				case SIGCHLD:
1052 				case SIGWINCH:
1053 				case SIGURG:
1054 					// notify the debugger
1055 					if (debugSignal)
1056 						notify_debugger(thread, signal, handler, false);
1057 					continue;
1058 
1059 				case SIGNAL_DEBUG_THREAD:
1060 					// ignore -- used together with B_THREAD_DEBUG_STOP, which
1061 					// is handled above
1062 					continue;
1063 
1064 				case SIGNAL_CANCEL_THREAD:
1065 					// set up the signal handler
1066 					handler.sa_handler = thread->cancel_function;
1067 					handler.sa_flags = 0;
1068 					handler.sa_mask = 0;
1069 					handler.sa_userdata = NULL;
1070 
1071 					restart = false;
1072 						// we always want to interrupt
1073 					break;
1074 
1075 				case SIGNAL_CONTINUE_THREAD:
1076 					// prevent syscall restart, but otherwise ignore
1077 					restart = false;
1078 					atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
1079 					continue;
1080 
1081 				case SIGCONT:
1082 					// notify the debugger
1083 					if (debugSignal
1084 						&& !notify_debugger(thread, signal, handler, false))
1085 						continue;
1086 
1087 					// notify threads waiting for team state changes
1088 					if (thread == team->main_thread) {
1089 						team->LockTeamAndParent(false);
1090 
1091 						team_set_job_control_state(team,
1092 							JOB_CONTROL_STATE_CONTINUED, signal);
1093 
1094 						team->UnlockTeamAndParent();
1095 
1096 						// The standard states that the system *may* send a
1097 						// SIGCHLD when a child is continued. I haven't found
1098 						// a good reason why we would want to, though.
1099 					}
1100 					continue;
1101 
1102 				case SIGSTOP:
1103 				case SIGTSTP:
1104 				case SIGTTIN:
1105 				case SIGTTOU:
1106 				{
1107 					// notify the debugger
1108 					if (debugSignal
1109 						&& !notify_debugger(thread, signal, handler, false))
1110 						continue;
1111 
1112 					// The terminal-sent stop signals are allowed to stop the
1113 					// process only, if it doesn't belong to an orphaned process
1114 					// group. Otherwise the signal must be discarded.
1115 					team->LockProcessGroup();
1116 					AutoLocker<ProcessGroup> groupLocker(team->group, true);
1117 					if (signal->Number() != SIGSTOP
1118 						&& team->group->IsOrphaned()) {
1119 						continue;
1120 					}
1121 
1122 					// notify threads waiting for team state changes
1123 					if (thread == team->main_thread) {
1124 						team->LockTeamAndParent(false);
1125 
1126 						team_set_job_control_state(team,
1127 							JOB_CONTROL_STATE_STOPPED, signal);
1128 
1129 						// send a SIGCHLD to the parent (if it does have
1130 						// SA_NOCLDSTOP defined)
1131 						Team* parentTeam = team->parent;
1132 
1133 						struct sigaction& parentHandler
1134 							= parentTeam->SignalActionFor(SIGCHLD);
1135 						if ((parentHandler.sa_flags & SA_NOCLDSTOP) == 0) {
1136 							Signal childSignal(SIGCHLD, CLD_STOPPED, B_OK,
1137 								team->id);
1138 							childSignal.SetStatus(signal->Number());
1139 							childSignal.SetSendingUser(signal->SendingUser());
1140 							send_signal_to_team(parentTeam, childSignal, 0);
1141 						}
1142 
1143 						team->UnlockTeamAndParent();
1144 					}
1145 
1146 					groupLocker.Unlock();
1147 
1148 					// Suspend the thread, unless there's already a signal to
1149 					// continue or kill pending.
1150 					locker.Lock();
1151 					bool resume = (thread->AllPendingSignals()
1152 								& (CONTINUE_SIGNALS | KILL_SIGNALS)) != 0;
1153 					locker.Unlock();
1154 
1155 					if (!resume)
1156 						thread_suspend();
1157 
1158 					continue;
1159 				}
1160 
1161 				case SIGSEGV:
1162 				case SIGBUS:
1163 				case SIGFPE:
1164 				case SIGILL:
1165 				case SIGTRAP:
1166 				case SIGABRT:
1167 				case SIGKILL:
1168 				case SIGQUIT:
1169 				case SIGPOLL:
1170 				case SIGPROF:
1171 				case SIGSYS:
1172 				case SIGVTALRM:
1173 				case SIGXCPU:
1174 				case SIGXFSZ:
1175 				default:
1176 					TRACE(("Shutting down team %" B_PRId32 " due to signal %"
1177 						B_PRIu32 " received in thread %" B_PRIu32 " \n",
1178 						team->id, signal->Number(), thread->id));
1179 
1180 					// This signal kills the team regardless which thread
1181 					// received it.
1182 					killTeam = true;
1183 
1184 					// fall through
1185 				case SIGKILLTHR:
1186 					// notify the debugger
1187 					if (debugSignal && signal->Number() != SIGKILL
1188 						&& signal->Number() != SIGKILLTHR
1189 						&& !notify_debugger(thread, signal, handler, true)) {
1190 						continue;
1191 					}
1192 
1193 					if (killTeam || thread == team->main_thread) {
1194 						// The signal is terminal for the team or the thread is
1195 						// the main thread. In either case the team is going
1196 						// down. Set its exit status, if that didn't happen yet.
1197 						teamLocker.Lock();
1198 
1199 						if (!team->exit.initialized) {
1200 							team->exit.reason = CLD_KILLED;
1201 							team->exit.signal = signal->Number();
1202 							team->exit.signaling_user = signal->SendingUser();
1203 							team->exit.status = 0;
1204 							team->exit.initialized = true;
1205 						}
1206 
1207 						teamLocker.Unlock();
1208 
1209 						// If this is not the main thread, send it a SIGKILLTHR
1210 						// so that the team terminates.
1211 						if (thread != team->main_thread) {
1212 							Signal childSignal(SIGKILLTHR, SI_USER, B_OK,
1213 								team->id);
1214 							send_signal_to_thread_id(team->id, childSignal, 0);
1215 						}
1216 					}
1217 
1218 					// explicitly get rid of the signal reference, since
1219 					// thread_exit() won't return
1220 					signalHandledCaller.Done();
1221 
1222 					thread_exit();
1223 						// won't return
1224 			}
1225 		}
1226 
1227 		// User defined signal handler
1228 
1229 		// notify the debugger
1230 		if (debugSignal && !notify_debugger(thread, signal, handler, false))
1231 			continue;
1232 
1233 		if (!restart
1234 				|| (!alwaysRestart && (handler.sa_flags & SA_RESTART) == 0)) {
1235 			atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
1236 		}
1237 
1238 		T(ExecuteSignalHandler(signal->Number(), &handler));
1239 
1240 		TRACE(("### Setting up custom signal handler frame...\n"));
1241 
1242 		// save the old block mask -- we may need to adjust it for the handler
1243 		locker.Lock();
1244 
1245 		sigset_t oldBlockMask = thread->sigsuspend_original_unblocked_mask != 0
1246 			? ~thread->sigsuspend_original_unblocked_mask
1247 			: thread->sig_block_mask;
1248 
1249 		// Update the block mask while the signal handler is running -- it
1250 		// will be automatically restored when the signal frame is left.
1251 		thread->sig_block_mask |= handler.sa_mask & BLOCKABLE_SIGNALS;
1252 
1253 		if ((handler.sa_flags & SA_NOMASK) == 0) {
1254 			thread->sig_block_mask
1255 				|= SIGNAL_TO_MASK(signal->Number()) & BLOCKABLE_SIGNALS;
1256 		}
1257 
1258 		update_current_thread_signals_flag();
1259 
1260 		locker.Unlock();
1261 
1262 		setup_signal_frame(thread, &handler, signal, oldBlockMask);
1263 
1264 		// Reset sigsuspend_original_unblocked_mask. It would have been set by
1265 		// sigsuspend_internal(). In that case, above we set oldBlockMask
1266 		// accordingly so that after the handler returns the thread's signal
1267 		// mask is reset.
1268 		thread->sigsuspend_original_unblocked_mask = 0;
1269 
1270 		return;
1271 	}
1272 
1273 	// We have not handled any signal (respectively only ignored ones).
1274 
1275 	// If sigsuspend_original_unblocked_mask is non-null, we came from a
1276 	// sigsuspend_internal(). Not having handled any signal, we should restart
1277 	// the syscall.
1278 	if (thread->sigsuspend_original_unblocked_mask != 0) {
1279 		restart = true;
1280 		atomic_or(&thread->flags, THREAD_FLAGS_RESTART_SYSCALL);
1281 	} else if (!restart) {
1282 		// clear syscall restart thread flag, if we're not supposed to restart
1283 		// the syscall
1284 		atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
1285 	}
1286 }
1287 
1288 
1289 /*!	Checks whether the given signal is blocked for the given team (i.e. all of
1290 	its threads).
1291 	The caller must hold the team's lock and \c signal_lock.
1292 */
1293 bool
1294 is_team_signal_blocked(Team* team, int signal)
1295 {
1296 	sigset_t mask = SIGNAL_TO_MASK(signal);
1297 
1298 	for (Thread* thread = team->thread_list; thread != NULL;
1299 			thread = thread->team_next) {
1300 		if ((thread->sig_block_mask & mask) == 0)
1301 			return false;
1302 	}
1303 
1304 	return true;
1305 }
1306 
1307 
1308 /*!	Gets (guesses) the current thread's currently used stack from the given
1309 	stack pointer.
1310 	Fills in \a stack with either the signal stack or the thread's user stack.
1311 	\param address A stack pointer address to be used to determine the used
1312 		stack.
1313 	\param stack Filled in by the function.
1314 */
1315 void
1316 signal_get_user_stack(addr_t address, stack_t* stack)
1317 {
1318 	// If a signal stack is enabled for the stack and the address is within it,
1319 	// return the signal stack. In all other cases return the thread's user
1320 	// stack, even if the address doesn't lie within it.
1321 	Thread* thread = thread_get_current_thread();
1322 	if (thread->signal_stack_enabled && address >= thread->signal_stack_base
1323 		&& address < thread->signal_stack_base + thread->signal_stack_size) {
1324 		stack->ss_sp = (void*)thread->signal_stack_base;
1325 		stack->ss_size = thread->signal_stack_size;
1326 	} else {
1327 		stack->ss_sp = (void*)thread->user_stack_base;
1328 		stack->ss_size = thread->user_stack_size;
1329 	}
1330 
1331 	stack->ss_flags = 0;
1332 }
1333 
1334 
1335 /*!	Checks whether any non-blocked signal is pending for the current thread.
1336 	The caller must hold \c team->signal_lock.
1337 	\param thread The current thread.
1338 */
1339 static bool
1340 has_signals_pending(Thread* thread)
1341 {
1342 	return (thread->AllPendingSignals() & ~thread->sig_block_mask) != 0;
1343 }
1344 
1345 
1346 /*!	Checks whether the current user has permission to send a signal to the given
1347 	target team.
1348 
1349 	\param team The target team.
1350 */
1351 static bool
1352 has_permission_to_signal(Team* team)
1353 {
1354 	// get the current user
1355 	uid_t currentUser = thread_get_current_thread()->team->effective_uid;
1356 
1357 	// root is omnipotent -- in the other cases the current user must match the
1358 	// target team's
1359 	return currentUser == 0 || currentUser == team->effective_uid;
1360 }
1361 
1362 
1363 /*!	Delivers a signal to the \a thread, but doesn't handle the signal -- it just
1364 	makes sure the thread gets the signal, i.e. unblocks it if needed.
1365 
1366 	The caller must hold \c team->signal_lock.
1367 
1368 	\param thread The thread the signal shall be delivered to.
1369 	\param signalNumber The number of the signal to be delivered. If \c 0, no
1370 		actual signal will be delivered. Only delivery checks will be performed.
1371 	\param signal If non-NULL the signal to be queued (has number
1372 		\a signalNumber in this case). The caller transfers an object reference
1373 		to this function. If \c NULL an unqueued signal will be delivered to the
1374 		thread.
1375 	\param flags A bitwise combination of any number of the following:
1376 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1377 			target thread the signal.
1378 	\return \c B_OK, when the signal was delivered successfully, another error
1379 		code otherwise.
1380 */
1381 status_t
1382 send_signal_to_thread_locked(Thread* thread, uint32 signalNumber,
1383 	Signal* signal, uint32 flags)
1384 {
1385 	ASSERT(signal == NULL || signalNumber == signal->Number());
1386 
1387 	T(SendSignal(thread->id, signalNumber, flags));
1388 
1389 	// The caller transferred a reference to the signal to us.
1390 	BReference<Signal> signalReference(signal, true);
1391 
1392 	if ((flags & B_CHECK_PERMISSION) != 0) {
1393 		if (!has_permission_to_signal(thread->team))
1394 			return EPERM;
1395 	}
1396 
1397 	if (signalNumber == 0)
1398 		return B_OK;
1399 
1400 	if (thread->team == team_get_kernel_team()) {
1401 		// Signals to kernel threads will only wake them up
1402 		thread_continue(thread);
1403 		return B_OK;
1404 	}
1405 
1406 	if (signal != NULL)
1407 		thread->AddPendingSignal(signal);
1408 	else
1409 		thread->AddPendingSignal(signalNumber);
1410 
1411 	// the thread has the signal reference, now
1412 	signalReference.Detach();
1413 
1414 	switch (signalNumber) {
1415 		case SIGKILL:
1416 		{
1417 			// If sent to a thread other than the team's main thread, also send
1418 			// a SIGKILLTHR to the main thread to kill the team.
1419 			Thread* mainThread = thread->team->main_thread;
1420 			if (mainThread != NULL && mainThread != thread) {
1421 				mainThread->AddPendingSignal(SIGKILLTHR);
1422 
1423 				// wake up main thread
1424 				thread->going_to_suspend = false;
1425 
1426 				SpinLocker locker(mainThread->scheduler_lock);
1427 				if (mainThread->state == B_THREAD_SUSPENDED)
1428 					scheduler_enqueue_in_run_queue(mainThread);
1429 				else
1430 					thread_interrupt(mainThread, true);
1431 				locker.Unlock();
1432 
1433 				update_thread_signals_flag(mainThread);
1434 			}
1435 
1436 			// supposed to fall through
1437 		}
1438 		case SIGKILLTHR:
1439 		{
1440 			// Wake up suspended threads and interrupt waiting ones
1441 			thread->going_to_suspend = false;
1442 
1443 			SpinLocker locker(thread->scheduler_lock);
1444 			if (thread->state == B_THREAD_SUSPENDED)
1445 				scheduler_enqueue_in_run_queue(thread);
1446 			else
1447 				thread_interrupt(thread, true);
1448 
1449 			break;
1450 		}
1451 		case SIGNAL_DEBUG_THREAD:
1452 		{
1453 			// Wake up thread if it was suspended, otherwise interrupt it.
1454 			thread->going_to_suspend = false;
1455 
1456 			SpinLocker locker(thread->scheduler_lock);
1457 			if (thread->state == B_THREAD_SUSPENDED)
1458 				scheduler_enqueue_in_run_queue(thread);
1459 			else
1460 				thread_interrupt(thread, false);
1461 
1462 			break;
1463 		}
1464 		case SIGNAL_CONTINUE_THREAD:
1465 		{
1466 			// wake up thread, and interrupt its current syscall
1467 			thread->going_to_suspend = false;
1468 
1469 			SpinLocker locker(thread->scheduler_lock);
1470 			if (thread->state == B_THREAD_SUSPENDED)
1471 				scheduler_enqueue_in_run_queue(thread);
1472 
1473 			atomic_or(&thread->flags, THREAD_FLAGS_DONT_RESTART_SYSCALL);
1474 			break;
1475 		}
1476 		case SIGCONT:
1477 		{
1478 			// Wake up thread if it was suspended, otherwise interrupt it, if
1479 			// the signal isn't blocked.
1480 			thread->going_to_suspend = false;
1481 
1482 			SpinLocker locker(thread->scheduler_lock);
1483 			if (thread->state == B_THREAD_SUSPENDED)
1484 				scheduler_enqueue_in_run_queue(thread);
1485 			else if ((SIGNAL_TO_MASK(SIGCONT) & ~thread->sig_block_mask) != 0)
1486 				thread_interrupt(thread, false);
1487 
1488 			// remove any pending stop signals
1489 			thread->RemovePendingSignals(STOP_SIGNALS);
1490 			break;
1491 		}
1492 		default:
1493 			// If the signal is not masked, interrupt the thread, if it is
1494 			// currently waiting (interruptibly).
1495 			if ((thread->AllPendingSignals()
1496 						& (~thread->sig_block_mask | SIGNAL_TO_MASK(SIGCHLD)))
1497 					!= 0) {
1498 				// Interrupt thread if it was waiting
1499 				SpinLocker locker(thread->scheduler_lock);
1500 				thread_interrupt(thread, false);
1501 			}
1502 			break;
1503 	}
1504 
1505 	update_thread_signals_flag(thread);
1506 
1507 	return B_OK;
1508 }
1509 
1510 
1511 /*!	Sends the given signal to the given thread.
1512 
1513 	\param thread The thread the signal shall be sent to.
1514 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1515 		actual signal will be delivered. Only delivery checks will be performed.
1516 		The given object will be copied. The caller retains ownership.
1517 	\param flags A bitwise combination of any number of the following:
1518 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1519 			target thread the signal.
1520 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1521 			woken up, the scheduler will be invoked. If set that will not be
1522 			done explicitly, but rescheduling can still happen, e.g. when the
1523 			current thread's time slice runs out.
1524 	\return \c B_OK, when the signal was delivered successfully, another error
1525 		code otherwise.
1526 */
1527 status_t
1528 send_signal_to_thread(Thread* thread, const Signal& signal, uint32 flags)
1529 {
1530 	// Clone the signal -- the clone will be queued. If something fails and the
1531 	// caller doesn't require queuing, we will add an unqueued signal.
1532 	Signal* signalToQueue = NULL;
1533 	status_t error = Signal::CreateQueuable(signal,
1534 		(flags & SIGNAL_FLAG_QUEUING_REQUIRED) != 0, signalToQueue);
1535 	if (error != B_OK)
1536 		return error;
1537 
1538 	InterruptsReadSpinLocker teamLocker(thread->team_lock);
1539 	SpinLocker locker(thread->team->signal_lock);
1540 
1541 	error = send_signal_to_thread_locked(thread, signal.Number(), signalToQueue,
1542 		flags);
1543 	if (error != B_OK)
1544 		return error;
1545 
1546 	locker.Unlock();
1547 	teamLocker.Unlock();
1548 
1549 	if ((flags & B_DO_NOT_RESCHEDULE) == 0)
1550 		scheduler_reschedule_if_necessary();
1551 
1552 	return B_OK;
1553 }
1554 
1555 
1556 /*!	Sends the given signal to the thread with the given ID.
1557 
1558 	\param threadID The ID of the thread the signal shall be sent to.
1559 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1560 		actual signal will be delivered. Only delivery checks will be performed.
1561 		The given object will be copied. The caller retains ownership.
1562 	\param flags A bitwise combination of any number of the following:
1563 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1564 			target thread the signal.
1565 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1566 			woken up, the scheduler will be invoked. If set that will not be
1567 			done explicitly, but rescheduling can still happen, e.g. when the
1568 			current thread's time slice runs out.
1569 	\return \c B_OK, when the signal was delivered successfully, another error
1570 		code otherwise.
1571 */
1572 status_t
1573 send_signal_to_thread_id(thread_id threadID, const Signal& signal, uint32 flags)
1574 {
1575 	Thread* thread = Thread::Get(threadID);
1576 	if (thread == NULL)
1577 		return B_BAD_THREAD_ID;
1578 	BReference<Thread> threadReference(thread, true);
1579 
1580 	return send_signal_to_thread(thread, signal, flags);
1581 }
1582 
1583 
1584 /*!	Sends the given signal to the given team.
1585 
1586 	The caller must hold \c signal_lock.
1587 
1588 	\param team The team the signal shall be sent to.
1589 	\param signalNumber The number of the signal to be delivered. If \c 0, no
1590 		actual signal will be delivered. Only delivery checks will be performed.
1591 	\param signal If non-NULL the signal to be queued (has number
1592 		\a signalNumber in this case). The caller transfers an object reference
1593 		to this function. If \c NULL an unqueued signal will be delivered to the
1594 		thread.
1595 	\param flags A bitwise combination of any number of the following:
1596 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1597 			target thread the signal.
1598 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1599 			woken up, the scheduler will be invoked. If set that will not be
1600 			done explicitly, but rescheduling can still happen, e.g. when the
1601 			current thread's time slice runs out.
1602 	\return \c B_OK, when the signal was delivered successfully, another error
1603 		code otherwise.
1604 */
1605 status_t
1606 send_signal_to_team_locked(Team* team, uint32 signalNumber, Signal* signal,
1607 	uint32 flags)
1608 {
1609 	ASSERT(signal == NULL || signalNumber == signal->Number());
1610 
1611 	T(SendSignal(team->id, signalNumber, flags));
1612 
1613 	// The caller transferred a reference to the signal to us.
1614 	BReference<Signal> signalReference(signal, true);
1615 
1616 	if ((flags & B_CHECK_PERMISSION) != 0) {
1617 		if (!has_permission_to_signal(team))
1618 			return EPERM;
1619 	}
1620 
1621 	if (signalNumber == 0)
1622 		return B_OK;
1623 
1624 	if (team == team_get_kernel_team()) {
1625 		// signals to the kernel team are not allowed
1626 		return EPERM;
1627 	}
1628 
1629 	if (signal != NULL)
1630 		team->AddPendingSignal(signal);
1631 	else
1632 		team->AddPendingSignal(signalNumber);
1633 
1634 	// the team has the signal reference, now
1635 	signalReference.Detach();
1636 
1637 	switch (signalNumber) {
1638 		case SIGKILL:
1639 		case SIGKILLTHR:
1640 		{
1641 			// Also add a SIGKILLTHR to the main thread's signals and wake it
1642 			// up/interrupt it, so we get this over with as soon as possible
1643 			// (only the main thread shuts down the team).
1644 			Thread* mainThread = team->main_thread;
1645 			if (mainThread != NULL) {
1646 				mainThread->AddPendingSignal(SIGKILLTHR);
1647 
1648 				// wake up main thread
1649 				mainThread->going_to_suspend = false;
1650 
1651 				SpinLocker _(mainThread->scheduler_lock);
1652 				if (mainThread->state == B_THREAD_SUSPENDED)
1653 					scheduler_enqueue_in_run_queue(mainThread);
1654 				else
1655 					thread_interrupt(mainThread, true);
1656 			}
1657 			break;
1658 		}
1659 
1660 		case SIGCONT:
1661 			// Wake up any suspended threads, interrupt the others, if they
1662 			// don't block the signal.
1663 			for (Thread* thread = team->thread_list; thread != NULL;
1664 					thread = thread->team_next) {
1665 				thread->going_to_suspend = false;
1666 
1667 				SpinLocker _(thread->scheduler_lock);
1668 				if (thread->state == B_THREAD_SUSPENDED) {
1669 					scheduler_enqueue_in_run_queue(thread);
1670 				} else if ((SIGNAL_TO_MASK(SIGCONT) & ~thread->sig_block_mask)
1671 						!= 0) {
1672 					thread_interrupt(thread, false);
1673 				}
1674 
1675 				// remove any pending stop signals
1676 				thread->RemovePendingSignals(STOP_SIGNALS);
1677 			}
1678 
1679 			// remove any pending team stop signals
1680 			team->RemovePendingSignals(STOP_SIGNALS);
1681 			break;
1682 
1683 		case SIGSTOP:
1684 		case SIGTSTP:
1685 		case SIGTTIN:
1686 		case SIGTTOU:
1687 			// send the stop signal to all threads
1688 			// TODO: Is that correct or should we only target the main thread?
1689 			for (Thread* thread = team->thread_list; thread != NULL;
1690 					thread = thread->team_next) {
1691 				thread->AddPendingSignal(signalNumber);
1692 			}
1693 
1694 			// remove the stop signal from the team again
1695 			if (signal != NULL) {
1696 				team->RemovePendingSignal(signal);
1697 				signalReference.SetTo(signal, true);
1698 			} else
1699 				team->RemovePendingSignal(signalNumber);
1700 
1701 			// fall through to interrupt threads
1702 		default:
1703 			// Interrupt all interruptibly waiting threads, if the signal is
1704 			// not masked.
1705 			for (Thread* thread = team->thread_list; thread != NULL;
1706 					thread = thread->team_next) {
1707 				sigset_t nonBlocked = ~thread->sig_block_mask
1708 					| SIGNAL_TO_MASK(SIGCHLD);
1709 				if ((thread->AllPendingSignals() & nonBlocked) != 0) {
1710 					SpinLocker _(thread->scheduler_lock);
1711 					thread_interrupt(thread, false);
1712 				}
1713 			}
1714 			break;
1715 	}
1716 
1717 	update_team_threads_signal_flag(team);
1718 
1719 	return B_OK;
1720 }
1721 
1722 
1723 /*!	Sends the given signal to the given team.
1724 
1725 	\param team The team the signal shall be sent to.
1726 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1727 		actual signal will be delivered. Only delivery checks will be performed.
1728 		The given object will be copied. The caller retains ownership.
1729 	\param flags A bitwise combination of any number of the following:
1730 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1731 			target thread the signal.
1732 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1733 			woken up, the scheduler will be invoked. If set that will not be
1734 			done explicitly, but rescheduling can still happen, e.g. when the
1735 			current thread's time slice runs out.
1736 	\return \c B_OK, when the signal was delivered successfully, another error
1737 		code otherwise.
1738 */
1739 status_t
1740 send_signal_to_team(Team* team, const Signal& signal, uint32 flags)
1741 {
1742 	// Clone the signal -- the clone will be queued. If something fails and the
1743 	// caller doesn't require queuing, we will add an unqueued signal.
1744 	Signal* signalToQueue = NULL;
1745 	status_t error = Signal::CreateQueuable(signal,
1746 		(flags & SIGNAL_FLAG_QUEUING_REQUIRED) != 0, signalToQueue);
1747 	if (error != B_OK)
1748 		return error;
1749 
1750 	InterruptsSpinLocker locker(team->signal_lock);
1751 
1752 	error = send_signal_to_team_locked(team, signal.Number(), signalToQueue,
1753 			flags);
1754 
1755 	locker.Unlock();
1756 
1757 	if ((flags & B_DO_NOT_RESCHEDULE) == 0)
1758 		scheduler_reschedule_if_necessary();
1759 
1760 	return error;
1761 }
1762 
1763 
1764 /*!	Sends the given signal to the team with the given ID.
1765 
1766 	\param teamID The ID of the team the signal shall be sent to.
1767 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1768 		actual signal will be delivered. Only delivery checks will be performed.
1769 		The given object will be copied. The caller retains ownership.
1770 	\param flags A bitwise combination of any number of the following:
1771 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1772 			target thread the signal.
1773 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1774 			woken up, the scheduler will be invoked. If set that will not be
1775 			done explicitly, but rescheduling can still happen, e.g. when the
1776 			current thread's time slice runs out.
1777 	\return \c B_OK, when the signal was delivered successfully, another error
1778 		code otherwise.
1779 */
1780 status_t
1781 send_signal_to_team_id(team_id teamID, const Signal& signal, uint32 flags)
1782 {
1783 	// get the team
1784 	Team* team = Team::Get(teamID);
1785 	if (team == NULL)
1786 		return B_BAD_TEAM_ID;
1787 	BReference<Team> teamReference(team, true);
1788 
1789 	return send_signal_to_team(team, signal, flags);
1790 }
1791 
1792 
1793 /*!	Sends the given signal to the given process group.
1794 
1795 	The caller must hold the process group's lock. Interrupts must be enabled.
1796 
1797 	\param group The the process group the signal shall be sent to.
1798 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1799 		actual signal will be delivered. Only delivery checks will be performed.
1800 		The given object will be copied. The caller retains ownership.
1801 	\param flags A bitwise combination of any number of the following:
1802 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1803 			target thread the signal.
1804 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1805 			woken up, the scheduler will be invoked. If set that will not be
1806 			done explicitly, but rescheduling can still happen, e.g. when the
1807 			current thread's time slice runs out.
1808 	\return \c B_OK, when the signal was delivered successfully, another error
1809 		code otherwise.
1810 */
1811 status_t
1812 send_signal_to_process_group_locked(ProcessGroup* group, const Signal& signal,
1813 	uint32 flags)
1814 {
1815 	T(SendSignal(-group->id, signal.Number(), flags));
1816 
1817 	bool firstTeam = true;
1818 
1819 	for (Team* team = group->teams; team != NULL; team = team->group_next) {
1820 		status_t error = send_signal_to_team(team, signal,
1821 			flags | B_DO_NOT_RESCHEDULE);
1822 		// If sending to the first team in the group failed, let the whole call
1823 		// fail.
1824 		if (firstTeam) {
1825 			if (error != B_OK)
1826 				return error;
1827 			firstTeam = false;
1828 		}
1829 	}
1830 
1831 	if ((flags & B_DO_NOT_RESCHEDULE) == 0)
1832 		scheduler_reschedule_if_necessary();
1833 
1834 	return B_OK;
1835 }
1836 
1837 
1838 /*!	Sends the given signal to the process group specified by the given ID.
1839 
1840 	The caller must not hold any process group, team, or thread lock. Interrupts
1841 	must be enabled.
1842 
1843 	\param groupID The ID of the process group the signal shall be sent to.
1844 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1845 		actual signal will be delivered. Only delivery checks will be performed.
1846 		The given object will be copied. The caller retains ownership.
1847 	\param flags A bitwise combination of any number of the following:
1848 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1849 			target thread the signal.
1850 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1851 			woken up, the scheduler will be invoked. If set that will not be
1852 			done explicitly, but rescheduling can still happen, e.g. when the
1853 			current thread's time slice runs out.
1854 	\return \c B_OK, when the signal was delivered successfully, another error
1855 		code otherwise.
1856 */
1857 status_t
1858 send_signal_to_process_group(pid_t groupID, const Signal& signal, uint32 flags)
1859 {
1860 	ProcessGroup* group = ProcessGroup::Get(groupID);
1861 	if (group == NULL)
1862 		return B_BAD_TEAM_ID;
1863 	BReference<ProcessGroup> groupReference(group);
1864 
1865 	T(SendSignal(-group->id, signal.Number(), flags));
1866 
1867 	AutoLocker<ProcessGroup> groupLocker(group);
1868 
1869 	status_t error = send_signal_to_process_group_locked(group, signal,
1870 		flags | B_DO_NOT_RESCHEDULE);
1871 	if (error != B_OK)
1872 		return error;
1873 
1874 	groupLocker.Unlock();
1875 
1876 	if ((flags & B_DO_NOT_RESCHEDULE) == 0)
1877 		scheduler_reschedule_if_necessary();
1878 
1879 	return B_OK;
1880 }
1881 
1882 
1883 static status_t
1884 send_signal_internal(pid_t id, uint signalNumber, union sigval userValue,
1885 	uint32 flags)
1886 {
1887 	if (signalNumber > MAX_SIGNAL_NUMBER)
1888 		return B_BAD_VALUE;
1889 
1890 	Thread* thread = thread_get_current_thread();
1891 
1892 	Signal signal(signalNumber,
1893 		(flags & SIGNAL_FLAG_QUEUING_REQUIRED) != 0 ? SI_QUEUE : SI_USER,
1894 		B_OK, thread->team->id);
1895 		// Note: SI_USER/SI_QUEUE is not correct, if called from within the
1896 		// kernel (or a driver), but we don't have any info here.
1897 	signal.SetUserValue(userValue);
1898 
1899 	// If id is > 0, send the signal to the respective thread.
1900 	if (id > 0)
1901 		return send_signal_to_thread_id(id, signal, flags);
1902 
1903 	// If id == 0, send the signal to the current thread.
1904 	if (id == 0)
1905 		return send_signal_to_thread(thread, signal, flags);
1906 
1907 	// If id == -1, send the signal to all teams the calling team has permission
1908 	// to send signals to.
1909 	if (id == -1) {
1910 		// TODO: Implement correctly!
1911 		// currently only send to the current team
1912 		return send_signal_to_team_id(thread->team->id, signal, flags);
1913 	}
1914 
1915 	// Send a signal to the specified process group (the absolute value of the
1916 	// id).
1917 	return send_signal_to_process_group(-id, signal, flags);
1918 }
1919 
1920 
1921 int
1922 send_signal_etc(pid_t id, uint signalNumber, uint32 flags)
1923 {
1924 	// a dummy user value
1925 	union sigval userValue;
1926 	userValue.sival_ptr = NULL;
1927 
1928 	return send_signal_internal(id, signalNumber, userValue, flags);
1929 }
1930 
1931 
1932 int
1933 send_signal(pid_t threadID, uint signal)
1934 {
1935 	// The BeBook states that this function wouldn't be exported
1936 	// for drivers, but, of course, it's wrong.
1937 	return send_signal_etc(threadID, signal, 0);
1938 }
1939 
1940 
1941 static int
1942 sigprocmask_internal(int how, const sigset_t* set, sigset_t* oldSet)
1943 {
1944 	Thread* thread = thread_get_current_thread();
1945 
1946 	InterruptsSpinLocker _(thread->team->signal_lock);
1947 
1948 	sigset_t oldMask = thread->sig_block_mask;
1949 
1950 	if (set != NULL) {
1951 		T(SigProcMask(how, *set));
1952 
1953 		switch (how) {
1954 			case SIG_BLOCK:
1955 				thread->sig_block_mask |= *set & BLOCKABLE_SIGNALS;
1956 				break;
1957 			case SIG_UNBLOCK:
1958 				thread->sig_block_mask &= ~*set;
1959 				break;
1960 			case SIG_SETMASK:
1961 				thread->sig_block_mask = *set & BLOCKABLE_SIGNALS;
1962 				break;
1963 			default:
1964 				return B_BAD_VALUE;
1965 		}
1966 
1967 		update_current_thread_signals_flag();
1968 	}
1969 
1970 	if (oldSet != NULL)
1971 		*oldSet = oldMask;
1972 
1973 	return B_OK;
1974 }
1975 
1976 
1977 int
1978 sigprocmask(int how, const sigset_t* set, sigset_t* oldSet)
1979 {
1980 	RETURN_AND_SET_ERRNO(sigprocmask_internal(how, set, oldSet));
1981 }
1982 
1983 
1984 /*!	\brief Like sigaction(), but returning the error instead of setting errno.
1985 */
1986 static status_t
1987 sigaction_internal(int signal, const struct sigaction* act,
1988 	struct sigaction* oldAction)
1989 {
1990 	if (signal < 1 || signal > MAX_SIGNAL_NUMBER
1991 		|| (SIGNAL_TO_MASK(signal) & ~BLOCKABLE_SIGNALS) != 0)
1992 		return B_BAD_VALUE;
1993 
1994 	// get and lock the team
1995 	Team* team = thread_get_current_thread()->team;
1996 	TeamLocker teamLocker(team);
1997 
1998 	struct sigaction& teamHandler = team->SignalActionFor(signal);
1999 	if (oldAction) {
2000 		// save previous sigaction structure
2001 		*oldAction = teamHandler;
2002 	}
2003 
2004 	if (act) {
2005 		T(SigAction(signal, act));
2006 
2007 		// set new sigaction structure
2008 		teamHandler = *act;
2009 		teamHandler.sa_mask &= BLOCKABLE_SIGNALS;
2010 	}
2011 
2012 	// Remove pending signal if it should now be ignored and remove pending
2013 	// signal for those signals whose default action is to ignore them.
2014 	if ((act && act->sa_handler == SIG_IGN)
2015 		|| (act && act->sa_handler == SIG_DFL
2016 			&& (SIGNAL_TO_MASK(signal) & DEFAULT_IGNORE_SIGNALS) != 0)) {
2017 		InterruptsSpinLocker locker(team->signal_lock);
2018 
2019 		team->RemovePendingSignal(signal);
2020 
2021 		for (Thread* thread = team->thread_list; thread != NULL;
2022 				thread = thread->team_next) {
2023 			thread->RemovePendingSignal(signal);
2024 		}
2025 	}
2026 
2027 	return B_OK;
2028 }
2029 
2030 
2031 int
2032 sigaction(int signal, const struct sigaction* act, struct sigaction* oldAction)
2033 {
2034 	RETURN_AND_SET_ERRNO(sigaction_internal(signal, act, oldAction));
2035 }
2036 
2037 
2038 /*!	Wait for the specified signals, and return the information for the retrieved
2039 	signal in \a info.
2040 	The \c flags and \c timeout combination must either define an infinite
2041 	timeout (no timeout flags set), an absolute timeout (\c B_ABSOLUTE_TIMEOUT
2042 	set), or a relative timeout \code <= 0 \endcode (\c B_RELATIVE_TIMEOUT set).
2043 */
2044 static status_t
2045 sigwait_internal(const sigset_t* set, siginfo_t* info, uint32 flags,
2046 	bigtime_t timeout)
2047 {
2048 	// restrict mask to blockable signals
2049 	sigset_t requestedSignals = *set & BLOCKABLE_SIGNALS;
2050 
2051 	// make always interruptable
2052 	flags |= B_CAN_INTERRUPT;
2053 
2054 	// check whether we are allowed to wait at all
2055 	bool canWait = (flags & B_RELATIVE_TIMEOUT) == 0 || timeout > 0;
2056 
2057 	Thread* thread = thread_get_current_thread();
2058 
2059 	InterruptsSpinLocker locker(thread->team->signal_lock);
2060 
2061 	bool timedOut = false;
2062 	status_t error = B_OK;
2063 
2064 	while (!timedOut) {
2065 		sigset_t pendingSignals = thread->AllPendingSignals();
2066 
2067 		// If a kill signal is pending, just bail out.
2068 		if ((pendingSignals & KILL_SIGNALS) != 0)
2069 			return B_INTERRUPTED;
2070 
2071 		if ((pendingSignals & requestedSignals) != 0) {
2072 			// get signal with the highest priority
2073 			Signal stackSignal;
2074 			Signal* signal = dequeue_thread_or_team_signal(thread,
2075 				requestedSignals, stackSignal);
2076 			ASSERT(signal != NULL);
2077 
2078 			SignalHandledCaller signalHandledCaller(signal);
2079 			locker.Unlock();
2080 
2081 			info->si_signo = signal->Number();
2082 			info->si_code = signal->SignalCode();
2083 			info->si_errno = signal->ErrorCode();
2084 			info->si_pid = signal->SendingProcess();
2085 			info->si_uid = signal->SendingUser();
2086 			info->si_addr = signal->Address();
2087 			info->si_status = signal->Status();
2088 			info->si_band = signal->PollBand();
2089 			info->si_value = signal->UserValue();
2090 
2091 			return B_OK;
2092 		}
2093 
2094 		if (!canWait)
2095 			return B_WOULD_BLOCK;
2096 
2097 		sigset_t blockedSignals = thread->sig_block_mask;
2098 		if ((pendingSignals & ~blockedSignals) != 0) {
2099 			// Non-blocked signals are pending -- return to let them be handled.
2100 			return B_INTERRUPTED;
2101 		}
2102 
2103 		// No signals yet. Set the signal block mask to not include the
2104 		// requested mask and wait until we're interrupted.
2105 		thread->sig_block_mask = blockedSignals & ~requestedSignals;
2106 
2107 		while (!has_signals_pending(thread)) {
2108 			thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_SIGNAL,
2109 				NULL);
2110 
2111 			locker.Unlock();
2112 
2113 			if ((flags & B_ABSOLUTE_TIMEOUT) != 0) {
2114 				error = thread_block_with_timeout(flags, timeout);
2115 				if (error == B_WOULD_BLOCK || error == B_TIMED_OUT) {
2116 					error = B_WOULD_BLOCK;
2117 						// POSIX requires EAGAIN (B_WOULD_BLOCK) on timeout
2118 					timedOut = true;
2119 
2120 					locker.Lock();
2121 					break;
2122 				}
2123 			} else
2124 				thread_block();
2125 
2126 			locker.Lock();
2127 		}
2128 
2129 		// restore the original block mask
2130 		thread->sig_block_mask = blockedSignals;
2131 
2132 		update_current_thread_signals_flag();
2133 	}
2134 
2135 	// we get here only when timed out
2136 	return error;
2137 }
2138 
2139 
2140 /*!	Replace the current signal block mask and wait for any event to happen.
2141 	Before returning, the original signal block mask is reinstantiated.
2142 */
2143 static status_t
2144 sigsuspend_internal(const sigset_t* _mask)
2145 {
2146 	sigset_t mask = *_mask & BLOCKABLE_SIGNALS;
2147 
2148 	T(SigSuspend(mask));
2149 
2150 	Thread* thread = thread_get_current_thread();
2151 
2152 	InterruptsSpinLocker locker(thread->team->signal_lock);
2153 
2154 	// Set the new block mask and block until interrupted. We might be here
2155 	// after a syscall restart, in which case sigsuspend_original_unblocked_mask
2156 	// will still be set.
2157 	sigset_t oldMask = thread->sigsuspend_original_unblocked_mask != 0
2158 		? ~thread->sigsuspend_original_unblocked_mask : thread->sig_block_mask;
2159 	thread->sig_block_mask = mask & BLOCKABLE_SIGNALS;
2160 
2161 	update_current_thread_signals_flag();
2162 
2163 	while (!has_signals_pending(thread)) {
2164 		thread_prepare_to_block(thread, B_CAN_INTERRUPT,
2165 			THREAD_BLOCK_TYPE_SIGNAL, NULL);
2166 
2167 		locker.Unlock();
2168 		thread_block();
2169 		locker.Lock();
2170 	}
2171 
2172 	// Set sigsuspend_original_unblocked_mask (guaranteed to be non-0 due to
2173 	// BLOCKABLE_SIGNALS). This will indicate to handle_signals() that it is
2174 	// called after a _user_sigsuspend(). It will reset the field after invoking
2175 	// a signal handler, or restart the syscall, if there wasn't anything to
2176 	// handle anymore (e.g. because another thread was faster).
2177 	thread->sigsuspend_original_unblocked_mask = ~oldMask;
2178 
2179 	T(SigSuspendDone());
2180 
2181 	// we're not supposed to actually succeed
2182 	return B_INTERRUPTED;
2183 }
2184 
2185 
2186 static status_t
2187 sigpending_internal(sigset_t* set)
2188 {
2189 	Thread* thread = thread_get_current_thread();
2190 
2191 	if (set == NULL)
2192 		return B_BAD_VALUE;
2193 
2194 	InterruptsSpinLocker locker(thread->team->signal_lock);
2195 
2196 	*set = thread->AllPendingSignals() & thread->sig_block_mask;
2197 
2198 	return B_OK;
2199 }
2200 
2201 
2202 // #pragma mark - syscalls
2203 
2204 
2205 /*!	Sends a signal to a thread, process, or process group.
2206 	\param id Specifies the ID of the target:
2207 		- \code id > 0 \endcode: If \a toThread is \c true, the target is the
2208 			thread with ID \a id, otherwise the team with the ID \a id.
2209 		- \code id == 0 \endcode: If toThread is \c true, the target is the
2210 			current thread, otherwise the current team.
2211 		- \code id == -1 \endcode: The target are all teams the current team has
2212 			permission to send signals to. Currently not implemented correctly.
2213 		- \code id < -1 \endcode: The target are is the process group with ID
2214 			\c -id.
2215 	\param signalNumber The signal number. \c 0 to just perform checks, but not
2216 		actually send any signal.
2217 	\param userUserValue A user value to be associated with the signal. Might be
2218 		ignored unless signal queuing is forced. Can be \c NULL.
2219 	\param flags A bitwise or of any number of the following:
2220 		- \c SIGNAL_FLAG_QUEUING_REQUIRED: Signal queuing is required. Fail
2221 			instead of falling back to unqueued signals, when queuing isn't
2222 			possible.
2223 		- \c SIGNAL_FLAG_SEND_TO_THREAD: Interpret the the given ID as a
2224 			\c thread_id rather than a \c team_id. Ignored when the \a id is
2225 			\code < 0 \endcode -- then the target is a process group.
2226 	\return \c B_OK on success, another error code otherwise.
2227 */
2228 status_t
2229 _user_send_signal(int32 id, uint32 signalNumber,
2230 	const union sigval* userUserValue, uint32 flags)
2231 {
2232 	// restrict flags to the allowed ones and add B_CHECK_PERMISSION
2233 	flags &= SIGNAL_FLAG_QUEUING_REQUIRED | SIGNAL_FLAG_SEND_TO_THREAD;
2234 	flags |= B_CHECK_PERMISSION;
2235 
2236 	// Copy the user value from userland. If not given, use a dummy value.
2237 	union sigval userValue;
2238 	if (userUserValue != NULL) {
2239 		if (!IS_USER_ADDRESS(userUserValue)
2240 			|| user_memcpy(&userValue, userUserValue, sizeof(userValue))
2241 				!= B_OK) {
2242 			return B_BAD_ADDRESS;
2243 		}
2244 	} else
2245 		userValue.sival_ptr = NULL;
2246 
2247 	// If to be sent to a thread, delegate to send_signal_internal(). Also do
2248 	// that when id < 0, since in this case the semantics is the same as well.
2249 	if ((flags & SIGNAL_FLAG_SEND_TO_THREAD) != 0 || id < 0)
2250 		return send_signal_internal(id, signalNumber, userValue, flags);
2251 
2252 	// kill() semantics for id >= 0
2253 	if (signalNumber > MAX_SIGNAL_NUMBER)
2254 		return B_BAD_VALUE;
2255 
2256 	Thread* thread = thread_get_current_thread();
2257 
2258 	Signal signal(signalNumber,
2259 		(flags & SIGNAL_FLAG_QUEUING_REQUIRED) != 0 ? SI_QUEUE : SI_USER,
2260 		B_OK, thread->team->id);
2261 	signal.SetUserValue(userValue);
2262 
2263 	// send to current team for id == 0, otherwise to the respective team
2264 	return send_signal_to_team_id(id == 0 ? team_get_current_team_id() : id,
2265 		signal, flags);
2266 }
2267 
2268 
2269 status_t
2270 _user_set_signal_mask(int how, const sigset_t *userSet, sigset_t *userOldSet)
2271 {
2272 	sigset_t set, oldSet;
2273 	status_t status;
2274 
2275 	if ((userSet != NULL && (!IS_USER_ADDRESS(userSet)
2276 			|| user_memcpy(&set, userSet, sizeof(sigset_t)) < B_OK))
2277 		|| (userOldSet != NULL && (!IS_USER_ADDRESS(userOldSet)
2278 			|| user_memcpy(&oldSet, userOldSet, sizeof(sigset_t)) < B_OK)))
2279 		return B_BAD_ADDRESS;
2280 
2281 	status = sigprocmask_internal(how, userSet ? &set : NULL,
2282 		userOldSet ? &oldSet : NULL);
2283 
2284 	// copy old set if asked for
2285 	if (status >= B_OK && userOldSet != NULL
2286 		&& user_memcpy(userOldSet, &oldSet, sizeof(sigset_t)) < B_OK)
2287 		return B_BAD_ADDRESS;
2288 
2289 	return status;
2290 }
2291 
2292 
2293 status_t
2294 _user_sigaction(int signal, const struct sigaction *userAction,
2295 	struct sigaction *userOldAction)
2296 {
2297 	struct sigaction act, oact;
2298 	status_t status;
2299 
2300 	if ((userAction != NULL && (!IS_USER_ADDRESS(userAction)
2301 			|| user_memcpy(&act, userAction, sizeof(struct sigaction)) < B_OK))
2302 		|| (userOldAction != NULL && (!IS_USER_ADDRESS(userOldAction)
2303 			|| user_memcpy(&oact, userOldAction, sizeof(struct sigaction))
2304 				< B_OK)))
2305 		return B_BAD_ADDRESS;
2306 
2307 	status = sigaction_internal(signal, userAction ? &act : NULL,
2308 		userOldAction ? &oact : NULL);
2309 
2310 	// only copy the old action if a pointer has been given
2311 	if (status >= B_OK && userOldAction != NULL
2312 		&& user_memcpy(userOldAction, &oact, sizeof(struct sigaction)) < B_OK)
2313 		return B_BAD_ADDRESS;
2314 
2315 	return status;
2316 }
2317 
2318 
2319 status_t
2320 _user_sigwait(const sigset_t *userSet, siginfo_t *userInfo, uint32 flags,
2321 	bigtime_t timeout)
2322 {
2323 	// copy userSet to stack
2324 	sigset_t set;
2325 	if (userSet == NULL || !IS_USER_ADDRESS(userSet)
2326 		|| user_memcpy(&set, userSet, sizeof(sigset_t)) != B_OK) {
2327 		return B_BAD_ADDRESS;
2328 	}
2329 
2330 	// userInfo is optional, but must be a user address when given
2331 	if (userInfo != NULL && !IS_USER_ADDRESS(userInfo))
2332 		return B_BAD_ADDRESS;
2333 
2334 	syscall_restart_handle_timeout_pre(flags, timeout);
2335 
2336 	flags |= B_CAN_INTERRUPT;
2337 
2338 	siginfo_t info;
2339 	status_t status = sigwait_internal(&set, &info, flags, timeout);
2340 	if (status == B_OK) {
2341 		// copy the info back to userland, if userSet is non-NULL
2342 		if (userInfo != NULL)
2343 			status = user_memcpy(userInfo, &info, sizeof(info));
2344 	} else if (status == B_INTERRUPTED) {
2345 		// make sure we'll be restarted
2346 		Thread* thread = thread_get_current_thread();
2347 		atomic_or(&thread->flags, THREAD_FLAGS_ALWAYS_RESTART_SYSCALL);
2348 	}
2349 
2350 	return syscall_restart_handle_timeout_post(status, timeout);
2351 }
2352 
2353 
2354 status_t
2355 _user_sigsuspend(const sigset_t *userMask)
2356 {
2357 	sigset_t mask;
2358 
2359 	if (userMask == NULL)
2360 		return B_BAD_VALUE;
2361 	if (!IS_USER_ADDRESS(userMask)
2362 		|| user_memcpy(&mask, userMask, sizeof(sigset_t)) < B_OK) {
2363 		return B_BAD_ADDRESS;
2364 	}
2365 
2366 	return sigsuspend_internal(&mask);
2367 }
2368 
2369 
2370 status_t
2371 _user_sigpending(sigset_t *userSet)
2372 {
2373 	sigset_t set;
2374 	int status;
2375 
2376 	if (userSet == NULL)
2377 		return B_BAD_VALUE;
2378 	if (!IS_USER_ADDRESS(userSet))
2379 		return B_BAD_ADDRESS;
2380 
2381 	status = sigpending_internal(&set);
2382 	if (status == B_OK
2383 		&& user_memcpy(userSet, &set, sizeof(sigset_t)) < B_OK)
2384 		return B_BAD_ADDRESS;
2385 
2386 	return status;
2387 }
2388 
2389 
2390 status_t
2391 _user_set_signal_stack(const stack_t* newUserStack, stack_t* oldUserStack)
2392 {
2393 	Thread *thread = thread_get_current_thread();
2394 	struct stack_t newStack, oldStack;
2395 	bool onStack = false;
2396 
2397 	if ((newUserStack != NULL && (!IS_USER_ADDRESS(newUserStack)
2398 			|| user_memcpy(&newStack, newUserStack, sizeof(stack_t)) < B_OK))
2399 		|| (oldUserStack != NULL && (!IS_USER_ADDRESS(oldUserStack)
2400 			|| user_memcpy(&oldStack, oldUserStack, sizeof(stack_t)) < B_OK)))
2401 		return B_BAD_ADDRESS;
2402 
2403 	if (thread->signal_stack_enabled) {
2404 		// determine whether or not the user thread is currently
2405 		// on the active signal stack
2406 		onStack = arch_on_signal_stack(thread);
2407 	}
2408 
2409 	if (oldUserStack != NULL) {
2410 		oldStack.ss_sp = (void *)thread->signal_stack_base;
2411 		oldStack.ss_size = thread->signal_stack_size;
2412 		oldStack.ss_flags = (thread->signal_stack_enabled ? 0 : SS_DISABLE)
2413 			| (onStack ? SS_ONSTACK : 0);
2414 	}
2415 
2416 	if (newUserStack != NULL) {
2417 		// no flags other than SS_DISABLE are allowed
2418 		if ((newStack.ss_flags & ~SS_DISABLE) != 0)
2419 			return B_BAD_VALUE;
2420 
2421 		if ((newStack.ss_flags & SS_DISABLE) == 0) {
2422 			// check if the size is valid
2423 			if (newStack.ss_size < MINSIGSTKSZ)
2424 				return B_NO_MEMORY;
2425 			if (onStack)
2426 				return B_NOT_ALLOWED;
2427 			if (!IS_USER_ADDRESS(newStack.ss_sp))
2428 				return B_BAD_VALUE;
2429 
2430 			thread->signal_stack_base = (addr_t)newStack.ss_sp;
2431 			thread->signal_stack_size = newStack.ss_size;
2432 			thread->signal_stack_enabled = true;
2433 		} else
2434 			thread->signal_stack_enabled = false;
2435 	}
2436 
2437 	// only copy the old stack info if a pointer has been given
2438 	if (oldUserStack != NULL
2439 		&& user_memcpy(oldUserStack, &oldStack, sizeof(stack_t)) < B_OK)
2440 		return B_BAD_ADDRESS;
2441 
2442 	return B_OK;
2443 }
2444 
2445 
2446 /*!	Restores the environment of a function that was interrupted by a signal
2447 	handler call.
2448 	This syscall is invoked when a signal handler function returns. It
2449 	deconstructs the signal handler frame and restores the stack and register
2450 	state of the function that was interrupted by a signal. The syscall is
2451 	therefore somewhat unusual, since it does not return to the calling
2452 	function, but to someplace else. In case the signal interrupted a syscall,
2453 	it will appear as if the syscall just returned. That is also the reason, why
2454 	this syscall returns an int64, since it needs to return the value the
2455 	interrupted syscall returns, which is potentially 64 bits wide.
2456 
2457 	\param userSignalFrameData The signal frame data created for the signal
2458 		handler. Potentially some data (e.g. registers) have been modified by
2459 		the signal handler.
2460 	\return In case the signal interrupted a syscall, the return value of that
2461 		syscall. Otherwise (i.e. in case of a (hardware) interrupt/exception)
2462 		the value might need to be tailored such that after a return to userland
2463 		the interrupted environment is identical to the interrupted one (unless
2464 		explicitly modified). E.g. for x86 to achieve that, the return value
2465 		must contain the eax|edx values of the interrupted environment.
2466 */
2467 int64
2468 _user_restore_signal_frame(struct signal_frame_data* userSignalFrameData)
2469 {
2470 	syscall_64_bit_return_value();
2471 
2472 	Thread *thread = thread_get_current_thread();
2473 
2474 	// copy the signal frame data from userland
2475 	signal_frame_data signalFrameData;
2476 	if (userSignalFrameData == NULL || !IS_USER_ADDRESS(userSignalFrameData)
2477 		|| user_memcpy(&signalFrameData, userSignalFrameData,
2478 			sizeof(signalFrameData)) != B_OK) {
2479 		// We failed to copy the signal frame data from userland. This is a
2480 		// serious problem. Kill the thread.
2481 		dprintf("_user_restore_signal_frame(): thread %" B_PRId32 ": Failed to "
2482 			"copy signal frame data (%p) from userland. Killing thread...\n",
2483 			thread->id, userSignalFrameData);
2484 		kill_thread(thread->id);
2485 		return B_BAD_ADDRESS;
2486 	}
2487 
2488 	// restore the signal block mask
2489 	InterruptsSpinLocker locker(thread->team->signal_lock);
2490 
2491 	thread->sig_block_mask
2492 		= signalFrameData.context.uc_sigmask & BLOCKABLE_SIGNALS;
2493 	update_current_thread_signals_flag();
2494 
2495 	locker.Unlock();
2496 
2497 	// restore the syscall restart related thread flags and the syscall restart
2498 	// parameters
2499 	atomic_and(&thread->flags,
2500 		~(THREAD_FLAGS_RESTART_SYSCALL | THREAD_FLAGS_64_BIT_SYSCALL_RETURN));
2501 	atomic_or(&thread->flags, signalFrameData.thread_flags
2502 		& (THREAD_FLAGS_RESTART_SYSCALL | THREAD_FLAGS_64_BIT_SYSCALL_RETURN));
2503 
2504 	memcpy(thread->syscall_restart.parameters,
2505 		signalFrameData.syscall_restart_parameters,
2506 		sizeof(thread->syscall_restart.parameters));
2507 
2508 	// restore the previously stored Thread::user_signal_context
2509 	thread->user_signal_context = signalFrameData.context.uc_link;
2510 	if (thread->user_signal_context != NULL
2511 		&& !IS_USER_ADDRESS(thread->user_signal_context)) {
2512 		thread->user_signal_context = NULL;
2513 	}
2514 
2515 	// let the architecture specific code restore the registers
2516 	return arch_restore_signal_frame(&signalFrameData);
2517 }
2518