xref: /haiku/src/system/kernel/signal.cpp (revision b31cb92f29fe89eaca84d173d0f70d38bf0c6a3d)
1 /*
2  * Copyright 2018, Jérôme Duval, jerome.duval@gmail.com.
3  * Copyright 2014, Paweł Dziepak, pdziepak@quarnos.org.
4  * Copyright 2011-2016, Ingo Weinhold, ingo_weinhold@gmx.de.
5  * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
6  * Copyright 2002, Angelo Mottola, a.mottola@libero.it.
7  *
8  * Distributed under the terms of the MIT License.
9  */
10 
11 
12 /*! POSIX signals handling routines */
13 
14 
15 #include <ksignal.h>
16 
17 #include <errno.h>
18 #include <stddef.h>
19 #include <string.h>
20 
21 #include <OS.h>
22 #include <KernelExport.h>
23 
24 #include <cpu.h>
25 #include <core_dump.h>
26 #include <debug.h>
27 #include <kernel.h>
28 #include <kscheduler.h>
29 #include <sem.h>
30 #include <syscall_restart.h>
31 #include <syscall_utils.h>
32 #include <team.h>
33 #include <thread.h>
34 #include <tracing.h>
35 #include <user_debugger.h>
36 #include <user_thread.h>
37 #include <util/AutoLock.h>
38 
39 
40 //#define TRACE_SIGNAL
41 #ifdef TRACE_SIGNAL
42 #	define TRACE(x) dprintf x
43 #else
44 #	define TRACE(x) ;
45 #endif
46 
47 
48 #define BLOCKABLE_SIGNALS	\
49 	(~(KILL_SIGNALS | SIGNAL_TO_MASK(SIGSTOP)	\
50 	| SIGNAL_TO_MASK(SIGNAL_DEBUG_THREAD)	\
51 	| SIGNAL_TO_MASK(SIGNAL_CONTINUE_THREAD)	\
52 	| SIGNAL_TO_MASK(SIGNAL_CANCEL_THREAD)))
53 #define STOP_SIGNALS \
54 	(SIGNAL_TO_MASK(SIGSTOP) | SIGNAL_TO_MASK(SIGTSTP) \
55 	| SIGNAL_TO_MASK(SIGTTIN) | SIGNAL_TO_MASK(SIGTTOU))
56 #define CONTINUE_SIGNALS \
57 	(SIGNAL_TO_MASK(SIGCONT) | SIGNAL_TO_MASK(SIGNAL_CONTINUE_THREAD) \
58 	| SIGNAL_TO_MASK(SIGNAL_DEBUG_THREAD))
59 #define DEFAULT_IGNORE_SIGNALS \
60 	(SIGNAL_TO_MASK(SIGCHLD) | SIGNAL_TO_MASK(SIGWINCH) \
61 	| SIGNAL_TO_MASK(SIGCONT) \
62 	| SIGNAL_RANGE_TO_MASK(SIGNAL_REALTIME_MIN, SIGNAL_REALTIME_MAX))
63 #define NON_DEFERRABLE_SIGNALS	\
64 	(KILL_SIGNALS				\
65 	| SIGNAL_TO_MASK(SIGILL)	\
66 	| SIGNAL_TO_MASK(SIGFPE)	\
67 	| SIGNAL_TO_MASK(SIGSEGV))
68 
69 
70 static const struct {
71 	const char*	name;
72 	int32		priority;
73 } kSignalInfos[__MAX_SIGNO + 1] = {
74 	{"NONE",			-1},
75 	{"HUP",				0},
76 	{"INT",				0},
77 	{"QUIT",			0},
78 	{"ILL",				0},
79 	{"CHLD",			0},
80 	{"ABRT",			0},
81 	{"PIPE",			0},
82 	{"FPE",				0},
83 	{"KILL",			100},
84 	{"STOP",			0},
85 	{"SEGV",			0},
86 	{"CONT",			0},
87 	{"TSTP",			0},
88 	{"ALRM",			0},
89 	{"TERM",			0},
90 	{"TTIN",			0},
91 	{"TTOU",			0},
92 	{"USR1",			0},
93 	{"USR2",			0},
94 	{"WINCH",			0},
95 	{"KILLTHR",			100},
96 	{"TRAP",			0},
97 	{"POLL",			0},
98 	{"PROF",			0},
99 	{"SYS",				0},
100 	{"URG",				0},
101 	{"VTALRM",			0},
102 	{"XCPU",			0},
103 	{"XFSZ",			0},
104 	{"SIGBUS",			0},
105 	{"SIGRESERVED1",	0},
106 	{"SIGRESERVED2",	0},
107 	{"SIGRT1",			8},
108 	{"SIGRT2",			7},
109 	{"SIGRT3",			6},
110 	{"SIGRT4",			5},
111 	{"SIGRT5",			4},
112 	{"SIGRT6",			3},
113 	{"SIGRT7",			2},
114 	{"SIGRT8",			1},
115 	{"invalid 41",		0},
116 	{"invalid 42",		0},
117 	{"invalid 43",		0},
118 	{"invalid 44",		0},
119 	{"invalid 45",		0},
120 	{"invalid 46",		0},
121 	{"invalid 47",		0},
122 	{"invalid 48",		0},
123 	{"invalid 49",		0},
124 	{"invalid 50",		0},
125 	{"invalid 51",		0},
126 	{"invalid 52",		0},
127 	{"invalid 53",		0},
128 	{"invalid 54",		0},
129 	{"invalid 55",		0},
130 	{"invalid 56",		0},
131 	{"invalid 57",		0},
132 	{"invalid 58",		0},
133 	{"invalid 59",		0},
134 	{"invalid 60",		0},
135 	{"invalid 61",		0},
136 	{"invalid 62",		0},
137 	{"CANCEL_THREAD",	0},
138 	{"CONTINUE_THREAD",	0}	// priority must be <= that of SIGSTOP
139 };
140 
141 
142 static inline const char*
143 signal_name(uint32 number)
144 {
145 	return number <= __MAX_SIGNO ? kSignalInfos[number].name : "invalid";
146 }
147 
148 
149 // #pragma mark - SignalHandledCaller
150 
151 
152 struct SignalHandledCaller {
153 	SignalHandledCaller(Signal* signal)
154 		:
155 		fSignal(signal)
156 	{
157 	}
158 
159 	~SignalHandledCaller()
160 	{
161 		Done();
162 	}
163 
164 	void Done()
165 	{
166 		if (fSignal != NULL) {
167 			fSignal->Handled();
168 			fSignal = NULL;
169 		}
170 	}
171 
172 private:
173 	Signal*	fSignal;
174 };
175 
176 
177 // #pragma mark - QueuedSignalsCounter
178 
179 
180 /*!	Creates a counter with the given limit.
181 	The limit defines the maximum the counter may reach. Since the
182 	BReferenceable's reference count is used, it is assumed that the owning
183 	team holds a reference and the reference count is one greater than the
184 	counter value.
185 	\param limit The maximum allowed value the counter may have. When
186 		\code < 0 \endcode, the value is not limited.
187 */
188 QueuedSignalsCounter::QueuedSignalsCounter(int32 limit)
189 	:
190 	fLimit(limit)
191 {
192 }
193 
194 
195 /*!	Increments the counter, if the limit allows that.
196 	\return \c true, if incrementing the counter succeeded, \c false otherwise.
197 */
198 bool
199 QueuedSignalsCounter::Increment()
200 {
201 	// no limit => no problem
202 	if (fLimit < 0) {
203 		AcquireReference();
204 		return true;
205 	}
206 
207 	// Increment the reference count manually, so we can check atomically. We
208 	// compare the old value > fLimit, assuming that our (primary) owner has a
209 	// reference, we don't want to count.
210 	if (atomic_add(&fReferenceCount, 1) > fLimit) {
211 		ReleaseReference();
212 		return false;
213 	}
214 
215 	return true;
216 }
217 
218 
219 // #pragma mark - Signal
220 
221 
222 Signal::Signal()
223 	:
224 	fCounter(NULL),
225 	fPending(false)
226 {
227 }
228 
229 
230 Signal::Signal(const Signal& other)
231 	:
232 	fCounter(NULL),
233 	fNumber(other.fNumber),
234 	fSignalCode(other.fSignalCode),
235 	fErrorCode(other.fErrorCode),
236 	fSendingProcess(other.fSendingProcess),
237 	fSendingUser(other.fSendingUser),
238 	fStatus(other.fStatus),
239 	fPollBand(other.fPollBand),
240 	fAddress(other.fAddress),
241 	fUserValue(other.fUserValue),
242 	fPending(false)
243 {
244 }
245 
246 
247 Signal::Signal(uint32 number, int32 signalCode, int32 errorCode,
248 	pid_t sendingProcess)
249 	:
250 	fCounter(NULL),
251 	fNumber(number),
252 	fSignalCode(signalCode),
253 	fErrorCode(errorCode),
254 	fSendingProcess(sendingProcess),
255 	fSendingUser(getuid()),
256 	fStatus(0),
257 	fPollBand(0),
258 	fAddress(NULL),
259 	fPending(false)
260 {
261 	fUserValue.sival_ptr = NULL;
262 }
263 
264 
265 Signal::~Signal()
266 {
267 	if (fCounter != NULL)
268 		fCounter->ReleaseReference();
269 }
270 
271 
272 /*!	Creates a queuable clone of the given signal.
273 	Also enforces the current team's signal queuing limit.
274 
275 	\param signal The signal to clone.
276 	\param queuingRequired If \c true, the function will return an error code
277 		when creating the clone fails for any reason. Otherwise, the function
278 		will set \a _signalToQueue to \c NULL, but still return \c B_OK.
279 	\param _signalToQueue Return parameter. Set to the clone of the signal.
280 	\return When \c queuingRequired is \c false, always \c B_OK. Otherwise
281 		\c B_OK, when creating the signal clone succeeds, another error code,
282 		when it fails.
283 */
284 /*static*/ status_t
285 Signal::CreateQueuable(const Signal& signal, bool queuingRequired,
286 	Signal*& _signalToQueue)
287 {
288 	_signalToQueue = NULL;
289 
290 	// If interrupts are disabled, we can't allocate a signal.
291 	if (!are_interrupts_enabled())
292 		return queuingRequired ? B_BAD_VALUE : B_OK;
293 
294 	// increment the queued signals counter
295 	QueuedSignalsCounter* counter
296 		= thread_get_current_thread()->team->QueuedSignalsCounter();
297 	if (!counter->Increment())
298 		return queuingRequired ? EAGAIN : B_OK;
299 
300 	// allocate the signal
301 	Signal* signalToQueue = new(std::nothrow) Signal(signal);
302 	if (signalToQueue == NULL) {
303 		counter->Decrement();
304 		return queuingRequired ? B_NO_MEMORY : B_OK;
305 	}
306 
307 	signalToQueue->fCounter = counter;
308 
309 	_signalToQueue = signalToQueue;
310 	return B_OK;
311 }
312 
313 void
314 Signal::SetTo(uint32 number)
315 {
316 	Team* team = thread_get_current_thread()->team;
317 
318 	fNumber = number;
319 	fSignalCode = SI_USER;
320 	fErrorCode = 0;
321 	fSendingProcess = team->id;
322 	fSendingUser = team->effective_uid;
323 	fStatus = 0;
324 	fPollBand = 0;
325 	fAddress = NULL;
326 	fUserValue.sival_ptr = NULL;
327 }
328 
329 
330 int32
331 Signal::Priority() const
332 {
333 	return kSignalInfos[fNumber].priority;
334 }
335 
336 
337 void
338 Signal::Handled()
339 {
340 	ReleaseReference();
341 }
342 
343 
344 void
345 Signal::LastReferenceReleased()
346 {
347 	if (are_interrupts_enabled())
348 		delete this;
349 	else
350 		deferred_delete(this);
351 }
352 
353 
354 // #pragma mark - PendingSignals
355 
356 
357 PendingSignals::PendingSignals()
358 	:
359 	fQueuedSignalsMask(0),
360 	fUnqueuedSignalsMask(0)
361 {
362 }
363 
364 
365 PendingSignals::~PendingSignals()
366 {
367 	Clear();
368 }
369 
370 
371 /*!	Of the signals in \a nonBlocked returns the priority of that with the
372 	highest priority.
373 	\param nonBlocked The mask with the non-blocked signals.
374 	\return The priority of the highest priority non-blocked signal, or, if all
375 		signals are blocked, \c -1.
376 */
377 int32
378 PendingSignals::HighestSignalPriority(sigset_t nonBlocked) const
379 {
380 	Signal* queuedSignal;
381 	int32 unqueuedSignal;
382 	return _GetHighestPrioritySignal(nonBlocked, queuedSignal, unqueuedSignal);
383 }
384 
385 
386 void
387 PendingSignals::Clear()
388 {
389 	// release references of all queued signals
390 	while (Signal* signal = fQueuedSignals.RemoveHead())
391 		signal->Handled();
392 
393 	fQueuedSignalsMask = 0;
394 	fUnqueuedSignalsMask = 0;
395 }
396 
397 
398 /*!	Adds a signal.
399 	Takes over the reference to the signal from the caller.
400 */
401 void
402 PendingSignals::AddSignal(Signal* signal)
403 {
404 	// queue according to priority
405 	int32 priority = signal->Priority();
406 	Signal* otherSignal = NULL;
407 	for (SignalList::Iterator it = fQueuedSignals.GetIterator();
408 			(otherSignal = it.Next()) != NULL;) {
409 		if (priority > otherSignal->Priority())
410 			break;
411 	}
412 
413 	fQueuedSignals.InsertBefore(otherSignal, signal);
414 	signal->SetPending(true);
415 
416 	fQueuedSignalsMask |= SIGNAL_TO_MASK(signal->Number());
417 }
418 
419 
420 void
421 PendingSignals::RemoveSignal(Signal* signal)
422 {
423 	signal->SetPending(false);
424 	fQueuedSignals.Remove(signal);
425 	_UpdateQueuedSignalMask();
426 }
427 
428 
429 void
430 PendingSignals::RemoveSignals(sigset_t mask)
431 {
432 	// remove from queued signals
433 	if ((fQueuedSignalsMask & mask) != 0) {
434 		for (SignalList::Iterator it = fQueuedSignals.GetIterator();
435 				Signal* signal = it.Next();) {
436 			// remove signal, if in mask
437 			if ((SIGNAL_TO_MASK(signal->Number()) & mask) != 0) {
438 				it.Remove();
439 				signal->SetPending(false);
440 				signal->Handled();
441 			}
442 		}
443 
444 		fQueuedSignalsMask &= ~mask;
445 	}
446 
447 	// remove from unqueued signals
448 	fUnqueuedSignalsMask &= ~mask;
449 }
450 
451 
452 /*!	Removes and returns a signal in \a nonBlocked that has the highest priority.
453 	The caller gets a reference to the returned signal, if any.
454 	\param nonBlocked The mask of non-blocked signals.
455 	\param buffer If the signal is not queued this buffer is returned. In this
456 		case the method acquires a reference to \a buffer, so that the caller
457 		gets a reference also in this case.
458 	\return The removed signal or \c NULL, if all signals are blocked.
459 */
460 Signal*
461 PendingSignals::DequeueSignal(sigset_t nonBlocked, Signal& buffer)
462 {
463 	// find the signal with the highest priority
464 	Signal* queuedSignal;
465 	int32 unqueuedSignal;
466 	if (_GetHighestPrioritySignal(nonBlocked, queuedSignal, unqueuedSignal) < 0)
467 		return NULL;
468 
469 	// if it is a queued signal, dequeue it
470 	if (queuedSignal != NULL) {
471 		fQueuedSignals.Remove(queuedSignal);
472 		queuedSignal->SetPending(false);
473 		_UpdateQueuedSignalMask();
474 		return queuedSignal;
475 	}
476 
477 	// it is unqueued -- remove from mask
478 	fUnqueuedSignalsMask &= ~SIGNAL_TO_MASK(unqueuedSignal);
479 
480 	// init buffer
481 	buffer.SetTo(unqueuedSignal);
482 	buffer.AcquireReference();
483 	return &buffer;
484 }
485 
486 
487 /*!	Of the signals not it \a blocked returns the priority of that with the
488 	highest priority.
489 	\param blocked The mask with the non-blocked signals.
490 	\param _queuedSignal If the found signal is a queued signal, the variable
491 		will be set to that signal, otherwise to \c NULL.
492 	\param _unqueuedSignal If the found signal is an unqueued signal, the
493 		variable is set to that signal's number, otherwise to \c -1.
494 	\return The priority of the highest priority non-blocked signal, or, if all
495 		signals are blocked, \c -1.
496 */
497 int32
498 PendingSignals::_GetHighestPrioritySignal(sigset_t nonBlocked,
499 	Signal*& _queuedSignal, int32& _unqueuedSignal) const
500 {
501 	// check queued signals
502 	Signal* queuedSignal = NULL;
503 	int32 queuedPriority = -1;
504 
505 	if ((fQueuedSignalsMask & nonBlocked) != 0) {
506 		for (SignalList::ConstIterator it = fQueuedSignals.GetIterator();
507 				Signal* signal = it.Next();) {
508 			if ((SIGNAL_TO_MASK(signal->Number()) & nonBlocked) != 0) {
509 				queuedPriority = signal->Priority();
510 				queuedSignal = signal;
511 				break;
512 			}
513 		}
514 	}
515 
516 	// check unqueued signals
517 	int32 unqueuedSignal = -1;
518 	int32 unqueuedPriority = -1;
519 
520 	sigset_t unqueuedSignals = fUnqueuedSignalsMask & nonBlocked;
521 	if (unqueuedSignals != 0) {
522 		int32 signal = 1;
523 		while (unqueuedSignals != 0) {
524 			sigset_t mask = SIGNAL_TO_MASK(signal);
525 			if ((unqueuedSignals & mask) != 0) {
526 				int32 priority = kSignalInfos[signal].priority;
527 				if (priority > unqueuedPriority) {
528 					unqueuedSignal = signal;
529 					unqueuedPriority = priority;
530 				}
531 				unqueuedSignals &= ~mask;
532 			}
533 
534 			signal++;
535 		}
536 	}
537 
538 	// Return found queued or unqueued signal, whichever has the higher
539 	// priority.
540 	if (queuedPriority >= unqueuedPriority) {
541 		_queuedSignal = queuedSignal;
542 		_unqueuedSignal = -1;
543 		return queuedPriority;
544 	}
545 
546 	_queuedSignal = NULL;
547 	_unqueuedSignal = unqueuedSignal;
548 	return unqueuedPriority;
549 }
550 
551 
552 void
553 PendingSignals::_UpdateQueuedSignalMask()
554 {
555 	sigset_t mask = 0;
556 	for (SignalList::Iterator it = fQueuedSignals.GetIterator();
557 			Signal* signal = it.Next();) {
558 		mask |= SIGNAL_TO_MASK(signal->Number());
559 	}
560 
561 	fQueuedSignalsMask = mask;
562 }
563 
564 
565 // #pragma mark - signal tracing
566 
567 
568 #if SIGNAL_TRACING
569 
570 namespace SignalTracing {
571 
572 
573 class HandleSignal : public AbstractTraceEntry {
574 	public:
575 		HandleSignal(uint32 signal)
576 			:
577 			fSignal(signal)
578 		{
579 			Initialized();
580 		}
581 
582 		virtual void AddDump(TraceOutput& out)
583 		{
584 			out.Print("signal handle:  %" B_PRIu32 " (%s)" , fSignal,
585 				signal_name(fSignal));
586 		}
587 
588 	private:
589 		uint32		fSignal;
590 };
591 
592 
593 class ExecuteSignalHandler : public AbstractTraceEntry {
594 	public:
595 		ExecuteSignalHandler(uint32 signal, struct sigaction* handler)
596 			:
597 			fSignal(signal),
598 			fHandler((void*)handler->sa_handler)
599 		{
600 			Initialized();
601 		}
602 
603 		virtual void AddDump(TraceOutput& out)
604 		{
605 			out.Print("signal exec handler: signal: %" B_PRIu32 " (%s), "
606 				"handler: %p", fSignal, signal_name(fSignal), fHandler);
607 		}
608 
609 	private:
610 		uint32	fSignal;
611 		void*	fHandler;
612 };
613 
614 
615 class SendSignal : public AbstractTraceEntry {
616 	public:
617 		SendSignal(pid_t target, uint32 signal, uint32 flags)
618 			:
619 			fTarget(target),
620 			fSignal(signal),
621 			fFlags(flags)
622 		{
623 			Initialized();
624 		}
625 
626 		virtual void AddDump(TraceOutput& out)
627 		{
628 			out.Print("signal send: target: %" B_PRId32 ", signal: %" B_PRIu32
629 				" (%s), flags: %#" B_PRIx32, fTarget, fSignal,
630 				signal_name(fSignal), fFlags);
631 		}
632 
633 	private:
634 		pid_t	fTarget;
635 		uint32	fSignal;
636 		uint32	fFlags;
637 };
638 
639 
640 class SigAction : public AbstractTraceEntry {
641 	public:
642 		SigAction(uint32 signal, const struct sigaction* act)
643 			:
644 			fSignal(signal),
645 			fAction(*act)
646 		{
647 			Initialized();
648 		}
649 
650 		virtual void AddDump(TraceOutput& out)
651 		{
652 			out.Print("signal action: signal: %" B_PRIu32 " (%s), "
653 				"action: {handler: %p, flags: %#x, mask: %#" B_PRIx64 "}",
654 				fSignal, signal_name(fSignal), fAction.sa_handler,
655 				fAction.sa_flags, (uint64)fAction.sa_mask);
656 		}
657 
658 	private:
659 		uint32				fSignal;
660 		struct sigaction	fAction;
661 };
662 
663 
664 class SigProcMask : public AbstractTraceEntry {
665 	public:
666 		SigProcMask(int how, sigset_t mask)
667 			:
668 			fHow(how),
669 			fMask(mask),
670 			fOldMask(thread_get_current_thread()->sig_block_mask)
671 		{
672 			Initialized();
673 		}
674 
675 		virtual void AddDump(TraceOutput& out)
676 		{
677 			const char* how = "invalid";
678 			switch (fHow) {
679 				case SIG_BLOCK:
680 					how = "block";
681 					break;
682 				case SIG_UNBLOCK:
683 					how = "unblock";
684 					break;
685 				case SIG_SETMASK:
686 					how = "set";
687 					break;
688 			}
689 
690 			out.Print("signal proc mask: %s 0x%llx, old mask: 0x%llx", how,
691 				(long long)fMask, (long long)fOldMask);
692 		}
693 
694 	private:
695 		int			fHow;
696 		sigset_t	fMask;
697 		sigset_t	fOldMask;
698 };
699 
700 
701 class SigSuspend : public AbstractTraceEntry {
702 	public:
703 		SigSuspend(sigset_t mask)
704 			:
705 			fMask(mask),
706 			fOldMask(thread_get_current_thread()->sig_block_mask)
707 		{
708 			Initialized();
709 		}
710 
711 		virtual void AddDump(TraceOutput& out)
712 		{
713 			out.Print("signal suspend: %#llx, old mask: %#llx",
714 				(long long)fMask, (long long)fOldMask);
715 		}
716 
717 	private:
718 		sigset_t	fMask;
719 		sigset_t	fOldMask;
720 };
721 
722 
723 class SigSuspendDone : public AbstractTraceEntry {
724 	public:
725 		SigSuspendDone()
726 			:
727 			fSignals(thread_get_current_thread()->ThreadPendingSignals())
728 		{
729 			Initialized();
730 		}
731 
732 		virtual void AddDump(TraceOutput& out)
733 		{
734 			out.Print("signal suspend done: %#" B_PRIx32, fSignals);
735 		}
736 
737 	private:
738 		uint32		fSignals;
739 };
740 
741 }	// namespace SignalTracing
742 
743 #	define T(x)	new(std::nothrow) SignalTracing::x
744 
745 #else
746 #	define T(x)
747 #endif	// SIGNAL_TRACING
748 
749 
750 // #pragma mark -
751 
752 
753 /*!	Updates the given thread's Thread::flags field according to what signals are
754 	pending.
755 	The caller must hold \c team->signal_lock.
756 */
757 static void
758 update_thread_signals_flag(Thread* thread)
759 {
760 	sigset_t mask = ~thread->sig_block_mask;
761 	if ((thread->AllPendingSignals() & mask) != 0)
762 		atomic_or(&thread->flags, THREAD_FLAGS_SIGNALS_PENDING);
763 	else
764 		atomic_and(&thread->flags, ~THREAD_FLAGS_SIGNALS_PENDING);
765 }
766 
767 
768 /*!	Updates the current thread's Thread::flags field according to what signals
769 	are pending.
770 	The caller must hold \c team->signal_lock.
771 */
772 static void
773 update_current_thread_signals_flag()
774 {
775 	update_thread_signals_flag(thread_get_current_thread());
776 }
777 
778 
779 /*!	Updates all of the given team's threads' Thread::flags fields according to
780 	what signals are pending.
781 	The caller must hold \c signal_lock.
782 */
783 static void
784 update_team_threads_signal_flag(Team* team)
785 {
786 	for (Thread* thread = team->thread_list; thread != NULL;
787 			thread = thread->team_next) {
788 		update_thread_signals_flag(thread);
789 	}
790 }
791 
792 
793 /*!	Notifies the user debugger about a signal to be handled.
794 
795 	The caller must not hold any locks.
796 
797 	\param thread The current thread.
798 	\param signal The signal to be handled.
799 	\param handler The installed signal handler for the signal.
800 	\param deadly Indicates whether the signal is deadly.
801 	\return \c true, if the signal shall be handled, \c false, if it shall be
802 		ignored.
803 */
804 static bool
805 notify_debugger(Thread* thread, Signal* signal, struct sigaction& handler,
806 	bool deadly)
807 {
808 	uint64 signalMask = SIGNAL_TO_MASK(signal->Number());
809 
810 	// first check the ignore signal masks the debugger specified for the thread
811 	InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock);
812 
813 	if ((thread->debug_info.ignore_signals_once & signalMask) != 0) {
814 		thread->debug_info.ignore_signals_once &= ~signalMask;
815 		return true;
816 	}
817 
818 	if ((thread->debug_info.ignore_signals & signalMask) != 0)
819 		return true;
820 
821 	threadDebugInfoLocker.Unlock();
822 
823 	// deliver the event
824 	return user_debug_handle_signal(signal->Number(), &handler, deadly);
825 }
826 
827 
828 /*!	Removes and returns a signal with the highest priority in \a nonBlocked that
829 	is pending in the given thread or its team.
830 	After dequeuing the signal the Thread::flags field of the affected threads
831 	are updated.
832 	The caller gets a reference to the returned signal, if any.
833 	The caller must hold \c team->signal_lock.
834 	\param thread The thread.
835 	\param nonBlocked The mask of non-blocked signals.
836 	\param buffer If the signal is not queued this buffer is returned. In this
837 		case the method acquires a reference to \a buffer, so that the caller
838 		gets a reference also in this case.
839 	\return The removed signal or \c NULL, if all signals are blocked.
840 */
841 static Signal*
842 dequeue_thread_or_team_signal(Thread* thread, sigset_t nonBlocked,
843 	Signal& buffer)
844 {
845 	Team* team = thread->team;
846 	Signal* signal;
847 	if (team->HighestPendingSignalPriority(nonBlocked)
848 			> thread->HighestPendingSignalPriority(nonBlocked)) {
849 		signal = team->DequeuePendingSignal(nonBlocked, buffer);
850 		update_team_threads_signal_flag(team);
851 	} else {
852 		signal = thread->DequeuePendingSignal(nonBlocked, buffer);
853 		update_thread_signals_flag(thread);
854 	}
855 
856 	return signal;
857 }
858 
859 
860 static status_t
861 setup_signal_frame(Thread* thread, struct sigaction* action, Signal* signal,
862 	sigset_t signalMask)
863 {
864 	// prepare the data, we need to copy onto the user stack
865 	signal_frame_data frameData;
866 
867 	// signal info
868 	frameData.info.si_signo = signal->Number();
869 	frameData.info.si_code = signal->SignalCode();
870 	frameData.info.si_errno = signal->ErrorCode();
871 	frameData.info.si_pid = signal->SendingProcess();
872 	frameData.info.si_uid = signal->SendingUser();
873 	frameData.info.si_addr = signal->Address();
874 	frameData.info.si_status = signal->Status();
875 	frameData.info.si_band = signal->PollBand();
876 	frameData.info.si_value = signal->UserValue();
877 
878 	// context
879 	frameData.context.uc_link = thread->user_signal_context;
880 	frameData.context.uc_sigmask = signalMask;
881 	// uc_stack and uc_mcontext are filled in by the architecture specific code.
882 
883 	// user data
884 	frameData.user_data = action->sa_userdata;
885 
886 	// handler function
887 	frameData.siginfo_handler = (action->sa_flags & SA_SIGINFO) != 0;
888 	frameData.handler = frameData.siginfo_handler
889 		? (void*)action->sa_sigaction : (void*)action->sa_handler;
890 
891 	// thread flags -- save the and clear the thread's syscall restart related
892 	// flags
893 	frameData.thread_flags = atomic_and(&thread->flags,
894 		~(THREAD_FLAGS_RESTART_SYSCALL | THREAD_FLAGS_64_BIT_SYSCALL_RETURN));
895 
896 	// syscall restart related fields
897 	memcpy(frameData.syscall_restart_parameters,
898 		thread->syscall_restart.parameters,
899 		sizeof(frameData.syscall_restart_parameters));
900 
901 	// commpage address
902 	frameData.commpage_address = thread->team->commpage_address;
903 
904 	// syscall_restart_return_value is filled in by the architecture specific
905 	// code.
906 
907 	return arch_setup_signal_frame(thread, action, &frameData);
908 }
909 
910 
911 /*! Actually handles pending signals -- i.e. the thread will exit, a custom
912 	signal handler is prepared, or whatever the signal demands.
913 	The function will not return, when a deadly signal is encountered. The
914 	function will suspend the thread indefinitely, when a stop signal is
915 	encountered.
916 	Interrupts must be enabled.
917 	\param thread The current thread.
918 */
919 void
920 handle_signals(Thread* thread)
921 {
922 	Team* team = thread->team;
923 
924 	TeamLocker teamLocker(team);
925 	InterruptsSpinLocker locker(thread->team->signal_lock);
926 
927 	// If userland requested to defer signals, we check now, if this is
928 	// possible.
929 	sigset_t nonBlockedMask = ~thread->sig_block_mask;
930 	sigset_t signalMask = thread->AllPendingSignals() & nonBlockedMask;
931 
932 	set_ac();
933 	if (thread->user_thread->defer_signals > 0
934 		&& (signalMask & NON_DEFERRABLE_SIGNALS) == 0
935 		&& thread->sigsuspend_original_unblocked_mask == 0) {
936 		thread->user_thread->pending_signals = signalMask;
937 		clear_ac();
938 		return;
939 	}
940 
941 	thread->user_thread->pending_signals = 0;
942 	clear_ac();
943 
944 	// determine syscall restart behavior
945 	uint32 restartFlags = atomic_and(&thread->flags,
946 		~THREAD_FLAGS_DONT_RESTART_SYSCALL);
947 	bool alwaysRestart
948 		= (restartFlags & THREAD_FLAGS_ALWAYS_RESTART_SYSCALL) != 0;
949 	bool restart = alwaysRestart
950 		|| (restartFlags & THREAD_FLAGS_DONT_RESTART_SYSCALL) == 0;
951 
952 	// Loop until we've handled all signals.
953 	bool initialIteration = true;
954 	while (true) {
955 		if (initialIteration) {
956 			initialIteration = false;
957 		} else {
958 			teamLocker.Lock();
959 			locker.Lock();
960 
961 			signalMask = thread->AllPendingSignals() & nonBlockedMask;
962 		}
963 
964 		// Unless SIGKILL[THR] are pending, check, if the thread shall stop for
965 		// a core dump or for debugging.
966 		if ((signalMask & KILL_SIGNALS) == 0) {
967 			if ((atomic_get(&thread->flags) & THREAD_FLAGS_TRAP_FOR_CORE_DUMP)
968 					!= 0) {
969 				locker.Unlock();
970 				teamLocker.Unlock();
971 
972 				core_dump_trap_thread();
973 				continue;
974 			}
975 
976 			if ((atomic_get(&thread->debug_info.flags) & B_THREAD_DEBUG_STOP)
977 					!= 0) {
978 				locker.Unlock();
979 				teamLocker.Unlock();
980 
981 				user_debug_stop_thread();
982 				continue;
983 			}
984 		}
985 
986 		// We're done, if there aren't any pending signals anymore.
987 		if ((signalMask & nonBlockedMask) == 0)
988 			break;
989 
990 		// get pending non-blocked thread or team signal with the highest
991 		// priority
992 		Signal stackSignal;
993 		Signal* signal = dequeue_thread_or_team_signal(thread, nonBlockedMask,
994 			stackSignal);
995 		ASSERT(signal != NULL);
996 		SignalHandledCaller signalHandledCaller(signal);
997 
998 		locker.Unlock();
999 
1000 		// get the action for the signal
1001 		struct sigaction handler;
1002 		if (signal->Number() <= MAX_SIGNAL_NUMBER) {
1003 			handler = team->SignalActionFor(signal->Number());
1004 		} else {
1005 			handler.sa_handler = SIG_DFL;
1006 			handler.sa_flags = 0;
1007 		}
1008 
1009 		if ((handler.sa_flags & SA_ONESHOT) != 0
1010 			&& handler.sa_handler != SIG_IGN && handler.sa_handler != SIG_DFL) {
1011 			team->SignalActionFor(signal->Number()).sa_handler = SIG_DFL;
1012 		}
1013 
1014 		T(HandleSignal(signal->Number()));
1015 
1016 		teamLocker.Unlock();
1017 
1018 		// debug the signal, if a debugger is installed and the signal debugging
1019 		// flag is set
1020 		bool debugSignal = (~atomic_get(&team->debug_info.flags)
1021 				& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_SIGNALS))
1022 			== 0;
1023 
1024 		// handle the signal
1025 		TRACE(("Thread %" B_PRId32 " received signal %s\n", thread->id,
1026 			kSignalInfos[signal->Number()].name));
1027 
1028 		if (handler.sa_handler == SIG_IGN) {
1029 			// signal is to be ignored
1030 			// TODO: apply zombie cleaning on SIGCHLD
1031 
1032 			// notify the debugger
1033 			if (debugSignal)
1034 				notify_debugger(thread, signal, handler, false);
1035 			continue;
1036 		} else if (handler.sa_handler == SIG_DFL) {
1037 			// default signal behaviour
1038 
1039 			// realtime signals are ignored by default
1040 			if (signal->Number() >= SIGNAL_REALTIME_MIN
1041 				&& signal->Number() <= SIGNAL_REALTIME_MAX) {
1042 				// notify the debugger
1043 				if (debugSignal)
1044 					notify_debugger(thread, signal, handler, false);
1045 				continue;
1046 			}
1047 
1048 			bool killTeam = false;
1049 			switch (signal->Number()) {
1050 				case SIGCHLD:
1051 				case SIGWINCH:
1052 				case SIGURG:
1053 					// notify the debugger
1054 					if (debugSignal)
1055 						notify_debugger(thread, signal, handler, false);
1056 					continue;
1057 
1058 				case SIGNAL_DEBUG_THREAD:
1059 					// ignore -- used together with B_THREAD_DEBUG_STOP, which
1060 					// is handled above
1061 					continue;
1062 
1063 				case SIGNAL_CANCEL_THREAD:
1064 					// set up the signal handler
1065 					handler.sa_handler = thread->cancel_function;
1066 					handler.sa_flags = 0;
1067 					handler.sa_mask = 0;
1068 					handler.sa_userdata = NULL;
1069 
1070 					restart = false;
1071 						// we always want to interrupt
1072 					break;
1073 
1074 				case SIGNAL_CONTINUE_THREAD:
1075 					// prevent syscall restart, but otherwise ignore
1076 					restart = false;
1077 					atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
1078 					continue;
1079 
1080 				case SIGCONT:
1081 					// notify the debugger
1082 					if (debugSignal
1083 						&& !notify_debugger(thread, signal, handler, false))
1084 						continue;
1085 
1086 					// notify threads waiting for team state changes
1087 					if (thread == team->main_thread) {
1088 						team->LockTeamAndParent(false);
1089 
1090 						team_set_job_control_state(team,
1091 							JOB_CONTROL_STATE_CONTINUED, signal);
1092 
1093 						team->UnlockTeamAndParent();
1094 
1095 						// The standard states that the system *may* send a
1096 						// SIGCHLD when a child is continued. I haven't found
1097 						// a good reason why we would want to, though.
1098 					}
1099 					continue;
1100 
1101 				case SIGSTOP:
1102 				case SIGTSTP:
1103 				case SIGTTIN:
1104 				case SIGTTOU:
1105 				{
1106 					// notify the debugger
1107 					if (debugSignal
1108 						&& !notify_debugger(thread, signal, handler, false))
1109 						continue;
1110 
1111 					// The terminal-sent stop signals are allowed to stop the
1112 					// process only, if it doesn't belong to an orphaned process
1113 					// group. Otherwise the signal must be discarded.
1114 					team->LockProcessGroup();
1115 					AutoLocker<ProcessGroup> groupLocker(team->group, true);
1116 					if (signal->Number() != SIGSTOP
1117 						&& team->group->IsOrphaned()) {
1118 						continue;
1119 					}
1120 
1121 					// notify threads waiting for team state changes
1122 					if (thread == team->main_thread) {
1123 						team->LockTeamAndParent(false);
1124 
1125 						team_set_job_control_state(team,
1126 							JOB_CONTROL_STATE_STOPPED, signal);
1127 
1128 						// send a SIGCHLD to the parent (if it does have
1129 						// SA_NOCLDSTOP defined)
1130 						Team* parentTeam = team->parent;
1131 
1132 						struct sigaction& parentHandler
1133 							= parentTeam->SignalActionFor(SIGCHLD);
1134 						if ((parentHandler.sa_flags & SA_NOCLDSTOP) == 0) {
1135 							Signal childSignal(SIGCHLD, CLD_STOPPED, B_OK,
1136 								team->id);
1137 							childSignal.SetStatus(signal->Number());
1138 							childSignal.SetSendingUser(signal->SendingUser());
1139 							send_signal_to_team(parentTeam, childSignal, 0);
1140 						}
1141 
1142 						team->UnlockTeamAndParent();
1143 					}
1144 
1145 					groupLocker.Unlock();
1146 
1147 					// Suspend the thread, unless there's already a signal to
1148 					// continue or kill pending.
1149 					locker.Lock();
1150 					bool resume = (thread->AllPendingSignals()
1151 								& (CONTINUE_SIGNALS | KILL_SIGNALS)) != 0;
1152 					locker.Unlock();
1153 
1154 					if (!resume)
1155 						thread_suspend();
1156 
1157 					continue;
1158 				}
1159 
1160 				case SIGSEGV:
1161 				case SIGBUS:
1162 				case SIGFPE:
1163 				case SIGILL:
1164 				case SIGTRAP:
1165 				case SIGABRT:
1166 				case SIGKILL:
1167 				case SIGQUIT:
1168 				case SIGPOLL:
1169 				case SIGPROF:
1170 				case SIGSYS:
1171 				case SIGVTALRM:
1172 				case SIGXCPU:
1173 				case SIGXFSZ:
1174 				default:
1175 					TRACE(("Shutting down team %" B_PRId32 " due to signal %"
1176 						B_PRIu32 " received in thread %" B_PRIu32 " \n",
1177 						team->id, signal->Number(), thread->id));
1178 
1179 					// This signal kills the team regardless which thread
1180 					// received it.
1181 					killTeam = true;
1182 
1183 					// fall through
1184 				case SIGKILLTHR:
1185 					// notify the debugger
1186 					if (debugSignal && signal->Number() != SIGKILL
1187 						&& signal->Number() != SIGKILLTHR
1188 						&& !notify_debugger(thread, signal, handler, true)) {
1189 						continue;
1190 					}
1191 
1192 					if (killTeam || thread == team->main_thread) {
1193 						// The signal is terminal for the team or the thread is
1194 						// the main thread. In either case the team is going
1195 						// down. Set its exit status, if that didn't happen yet.
1196 						teamLocker.Lock();
1197 
1198 						if (!team->exit.initialized) {
1199 							team->exit.reason = CLD_KILLED;
1200 							team->exit.signal = signal->Number();
1201 							team->exit.signaling_user = signal->SendingUser();
1202 							team->exit.status = 0;
1203 							team->exit.initialized = true;
1204 						}
1205 
1206 						teamLocker.Unlock();
1207 
1208 						// If this is not the main thread, send it a SIGKILLTHR
1209 						// so that the team terminates.
1210 						if (thread != team->main_thread) {
1211 							Signal childSignal(SIGKILLTHR, SI_USER, B_OK,
1212 								team->id);
1213 							send_signal_to_thread_id(team->id, childSignal, 0);
1214 						}
1215 					}
1216 
1217 					// explicitly get rid of the signal reference, since
1218 					// thread_exit() won't return
1219 					signalHandledCaller.Done();
1220 
1221 					thread_exit();
1222 						// won't return
1223 			}
1224 		}
1225 
1226 		// User defined signal handler
1227 
1228 		// notify the debugger
1229 		if (debugSignal && !notify_debugger(thread, signal, handler, false))
1230 			continue;
1231 
1232 		if (!restart
1233 				|| (!alwaysRestart && (handler.sa_flags & SA_RESTART) == 0)) {
1234 			atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
1235 		}
1236 
1237 		T(ExecuteSignalHandler(signal->Number(), &handler));
1238 
1239 		TRACE(("### Setting up custom signal handler frame...\n"));
1240 
1241 		// save the old block mask -- we may need to adjust it for the handler
1242 		locker.Lock();
1243 
1244 		sigset_t oldBlockMask = thread->sigsuspend_original_unblocked_mask != 0
1245 			? ~thread->sigsuspend_original_unblocked_mask
1246 			: thread->sig_block_mask;
1247 
1248 		// Update the block mask while the signal handler is running -- it
1249 		// will be automatically restored when the signal frame is left.
1250 		thread->sig_block_mask |= handler.sa_mask & BLOCKABLE_SIGNALS;
1251 
1252 		if ((handler.sa_flags & SA_NOMASK) == 0) {
1253 			thread->sig_block_mask
1254 				|= SIGNAL_TO_MASK(signal->Number()) & BLOCKABLE_SIGNALS;
1255 		}
1256 
1257 		update_current_thread_signals_flag();
1258 
1259 		locker.Unlock();
1260 
1261 		setup_signal_frame(thread, &handler, signal, oldBlockMask);
1262 
1263 		// Reset sigsuspend_original_unblocked_mask. It would have been set by
1264 		// sigsuspend_internal(). In that case, above we set oldBlockMask
1265 		// accordingly so that after the handler returns the thread's signal
1266 		// mask is reset.
1267 		thread->sigsuspend_original_unblocked_mask = 0;
1268 
1269 		return;
1270 	}
1271 
1272 	// We have not handled any signal (respectively only ignored ones).
1273 
1274 	// If sigsuspend_original_unblocked_mask is non-null, we came from a
1275 	// sigsuspend_internal(). Not having handled any signal, we should restart
1276 	// the syscall.
1277 	if (thread->sigsuspend_original_unblocked_mask != 0) {
1278 		restart = true;
1279 		atomic_or(&thread->flags, THREAD_FLAGS_RESTART_SYSCALL);
1280 	} else if (!restart) {
1281 		// clear syscall restart thread flag, if we're not supposed to restart
1282 		// the syscall
1283 		atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
1284 	}
1285 }
1286 
1287 
1288 /*!	Checks whether the given signal is blocked for the given team (i.e. all of
1289 	its threads).
1290 	The caller must hold the team's lock and \c signal_lock.
1291 */
1292 bool
1293 is_team_signal_blocked(Team* team, int signal)
1294 {
1295 	sigset_t mask = SIGNAL_TO_MASK(signal);
1296 
1297 	for (Thread* thread = team->thread_list; thread != NULL;
1298 			thread = thread->team_next) {
1299 		if ((thread->sig_block_mask & mask) == 0)
1300 			return false;
1301 	}
1302 
1303 	return true;
1304 }
1305 
1306 
1307 /*!	Gets (guesses) the current thread's currently used stack from the given
1308 	stack pointer.
1309 	Fills in \a stack with either the signal stack or the thread's user stack.
1310 	\param address A stack pointer address to be used to determine the used
1311 		stack.
1312 	\param stack Filled in by the function.
1313 */
1314 void
1315 signal_get_user_stack(addr_t address, stack_t* stack)
1316 {
1317 	// If a signal stack is enabled for the stack and the address is within it,
1318 	// return the signal stack. In all other cases return the thread's user
1319 	// stack, even if the address doesn't lie within it.
1320 	Thread* thread = thread_get_current_thread();
1321 	if (thread->signal_stack_enabled && address >= thread->signal_stack_base
1322 		&& address < thread->signal_stack_base + thread->signal_stack_size) {
1323 		stack->ss_sp = (void*)thread->signal_stack_base;
1324 		stack->ss_size = thread->signal_stack_size;
1325 	} else {
1326 		stack->ss_sp = (void*)thread->user_stack_base;
1327 		stack->ss_size = thread->user_stack_size;
1328 	}
1329 
1330 	stack->ss_flags = 0;
1331 }
1332 
1333 
1334 /*!	Checks whether any non-blocked signal is pending for the current thread.
1335 	The caller must hold \c team->signal_lock.
1336 	\param thread The current thread.
1337 */
1338 static bool
1339 has_signals_pending(Thread* thread)
1340 {
1341 	return (thread->AllPendingSignals() & ~thread->sig_block_mask) != 0;
1342 }
1343 
1344 
1345 /*!	Checks whether the current user has permission to send a signal to the given
1346 	target team.
1347 
1348 	\param team The target team.
1349 */
1350 static bool
1351 has_permission_to_signal(Team* team)
1352 {
1353 	// get the current user
1354 	uid_t currentUser = thread_get_current_thread()->team->effective_uid;
1355 
1356 	// root is omnipotent -- in the other cases the current user must match the
1357 	// target team's
1358 	return currentUser == 0 || currentUser == team->effective_uid;
1359 }
1360 
1361 
1362 /*!	Delivers a signal to the \a thread, but doesn't handle the signal -- it just
1363 	makes sure the thread gets the signal, i.e. unblocks it if needed.
1364 
1365 	The caller must hold \c team->signal_lock.
1366 
1367 	\param thread The thread the signal shall be delivered to.
1368 	\param signalNumber The number of the signal to be delivered. If \c 0, no
1369 		actual signal will be delivered. Only delivery checks will be performed.
1370 	\param signal If non-NULL the signal to be queued (has number
1371 		\a signalNumber in this case). The caller transfers an object reference
1372 		to this function. If \c NULL an unqueued signal will be delivered to the
1373 		thread.
1374 	\param flags A bitwise combination of any number of the following:
1375 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1376 			target thread the signal.
1377 	\return \c B_OK, when the signal was delivered successfully, another error
1378 		code otherwise.
1379 */
1380 status_t
1381 send_signal_to_thread_locked(Thread* thread, uint32 signalNumber,
1382 	Signal* signal, uint32 flags)
1383 {
1384 	ASSERT(signal == NULL || signalNumber == signal->Number());
1385 
1386 	T(SendSignal(thread->id, signalNumber, flags));
1387 
1388 	// The caller transferred a reference to the signal to us.
1389 	BReference<Signal> signalReference(signal, true);
1390 
1391 	if ((flags & B_CHECK_PERMISSION) != 0) {
1392 		if (!has_permission_to_signal(thread->team))
1393 			return EPERM;
1394 	}
1395 
1396 	if (signalNumber == 0)
1397 		return B_OK;
1398 
1399 	if (thread->team == team_get_kernel_team()) {
1400 		// Signals to kernel threads will only wake them up
1401 		thread_continue(thread);
1402 		return B_OK;
1403 	}
1404 
1405 	if (signal != NULL)
1406 		thread->AddPendingSignal(signal);
1407 	else
1408 		thread->AddPendingSignal(signalNumber);
1409 
1410 	// the thread has the signal reference, now
1411 	signalReference.Detach();
1412 
1413 	switch (signalNumber) {
1414 		case SIGKILL:
1415 		{
1416 			// If sent to a thread other than the team's main thread, also send
1417 			// a SIGKILLTHR to the main thread to kill the team.
1418 			Thread* mainThread = thread->team->main_thread;
1419 			if (mainThread != NULL && mainThread != thread) {
1420 				mainThread->AddPendingSignal(SIGKILLTHR);
1421 
1422 				// wake up main thread
1423 				thread->going_to_suspend = false;
1424 
1425 				SpinLocker locker(mainThread->scheduler_lock);
1426 				if (mainThread->state == B_THREAD_SUSPENDED)
1427 					scheduler_enqueue_in_run_queue(mainThread);
1428 				else
1429 					thread_interrupt(mainThread, true);
1430 				locker.Unlock();
1431 
1432 				update_thread_signals_flag(mainThread);
1433 			}
1434 
1435 			// supposed to fall through
1436 		}
1437 		case SIGKILLTHR:
1438 		{
1439 			// Wake up suspended threads and interrupt waiting ones
1440 			thread->going_to_suspend = false;
1441 
1442 			SpinLocker locker(thread->scheduler_lock);
1443 			if (thread->state == B_THREAD_SUSPENDED)
1444 				scheduler_enqueue_in_run_queue(thread);
1445 			else
1446 				thread_interrupt(thread, true);
1447 
1448 			break;
1449 		}
1450 		case SIGNAL_DEBUG_THREAD:
1451 		{
1452 			// Wake up thread if it was suspended, otherwise interrupt it.
1453 			thread->going_to_suspend = false;
1454 
1455 			SpinLocker locker(thread->scheduler_lock);
1456 			if (thread->state == B_THREAD_SUSPENDED)
1457 				scheduler_enqueue_in_run_queue(thread);
1458 			else
1459 				thread_interrupt(thread, false);
1460 
1461 			break;
1462 		}
1463 		case SIGNAL_CONTINUE_THREAD:
1464 		{
1465 			// wake up thread, and interrupt its current syscall
1466 			thread->going_to_suspend = false;
1467 
1468 			SpinLocker locker(thread->scheduler_lock);
1469 			if (thread->state == B_THREAD_SUSPENDED)
1470 				scheduler_enqueue_in_run_queue(thread);
1471 
1472 			atomic_or(&thread->flags, THREAD_FLAGS_DONT_RESTART_SYSCALL);
1473 			break;
1474 		}
1475 		case SIGCONT:
1476 		{
1477 			// Wake up thread if it was suspended, otherwise interrupt it, if
1478 			// the signal isn't blocked.
1479 			thread->going_to_suspend = false;
1480 
1481 			SpinLocker locker(thread->scheduler_lock);
1482 			if (thread->state == B_THREAD_SUSPENDED)
1483 				scheduler_enqueue_in_run_queue(thread);
1484 			else if ((SIGNAL_TO_MASK(SIGCONT) & ~thread->sig_block_mask) != 0)
1485 				thread_interrupt(thread, false);
1486 
1487 			// remove any pending stop signals
1488 			thread->RemovePendingSignals(STOP_SIGNALS);
1489 			break;
1490 		}
1491 		default:
1492 			// If the signal is not masked, interrupt the thread, if it is
1493 			// currently waiting (interruptibly).
1494 			if ((thread->AllPendingSignals()
1495 						& (~thread->sig_block_mask | SIGNAL_TO_MASK(SIGCHLD)))
1496 					!= 0) {
1497 				// Interrupt thread if it was waiting
1498 				SpinLocker locker(thread->scheduler_lock);
1499 				thread_interrupt(thread, false);
1500 			}
1501 			break;
1502 	}
1503 
1504 	update_thread_signals_flag(thread);
1505 
1506 	return B_OK;
1507 }
1508 
1509 
1510 /*!	Sends the given signal to the given thread.
1511 
1512 	\param thread The thread the signal shall be sent to.
1513 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1514 		actual signal will be delivered. Only delivery checks will be performed.
1515 		The given object will be copied. The caller retains ownership.
1516 	\param flags A bitwise combination of any number of the following:
1517 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1518 			target thread the signal.
1519 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1520 			woken up, the scheduler will be invoked. If set that will not be
1521 			done explicitly, but rescheduling can still happen, e.g. when the
1522 			current thread's time slice runs out.
1523 	\return \c B_OK, when the signal was delivered successfully, another error
1524 		code otherwise.
1525 */
1526 status_t
1527 send_signal_to_thread(Thread* thread, const Signal& signal, uint32 flags)
1528 {
1529 	// Clone the signal -- the clone will be queued. If something fails and the
1530 	// caller doesn't require queuing, we will add an unqueued signal.
1531 	Signal* signalToQueue = NULL;
1532 	status_t error = Signal::CreateQueuable(signal,
1533 		(flags & SIGNAL_FLAG_QUEUING_REQUIRED) != 0, signalToQueue);
1534 	if (error != B_OK)
1535 		return error;
1536 
1537 	InterruptsReadSpinLocker teamLocker(thread->team_lock);
1538 	SpinLocker locker(thread->team->signal_lock);
1539 
1540 	error = send_signal_to_thread_locked(thread, signal.Number(), signalToQueue,
1541 		flags);
1542 	if (error != B_OK)
1543 		return error;
1544 
1545 	locker.Unlock();
1546 	teamLocker.Unlock();
1547 
1548 	if ((flags & B_DO_NOT_RESCHEDULE) == 0)
1549 		scheduler_reschedule_if_necessary();
1550 
1551 	return B_OK;
1552 }
1553 
1554 
1555 /*!	Sends the given signal to the thread with the given ID.
1556 
1557 	\param threadID The ID of the thread the signal shall be sent to.
1558 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1559 		actual signal will be delivered. Only delivery checks will be performed.
1560 		The given object will be copied. The caller retains ownership.
1561 	\param flags A bitwise combination of any number of the following:
1562 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1563 			target thread the signal.
1564 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1565 			woken up, the scheduler will be invoked. If set that will not be
1566 			done explicitly, but rescheduling can still happen, e.g. when the
1567 			current thread's time slice runs out.
1568 	\return \c B_OK, when the signal was delivered successfully, another error
1569 		code otherwise.
1570 */
1571 status_t
1572 send_signal_to_thread_id(thread_id threadID, const Signal& signal, uint32 flags)
1573 {
1574 	Thread* thread = Thread::Get(threadID);
1575 	if (thread == NULL)
1576 		return B_BAD_THREAD_ID;
1577 	BReference<Thread> threadReference(thread, true);
1578 
1579 	return send_signal_to_thread(thread, signal, flags);
1580 }
1581 
1582 
1583 /*!	Sends the given signal to the given team.
1584 
1585 	The caller must hold \c signal_lock.
1586 
1587 	\param team The team the signal shall be sent to.
1588 	\param signalNumber The number of the signal to be delivered. If \c 0, no
1589 		actual signal will be delivered. Only delivery checks will be performed.
1590 	\param signal If non-NULL the signal to be queued (has number
1591 		\a signalNumber in this case). The caller transfers an object reference
1592 		to this function. If \c NULL an unqueued signal will be delivered to the
1593 		thread.
1594 	\param flags A bitwise combination of any number of the following:
1595 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1596 			target thread the signal.
1597 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1598 			woken up, the scheduler will be invoked. If set that will not be
1599 			done explicitly, but rescheduling can still happen, e.g. when the
1600 			current thread's time slice runs out.
1601 	\return \c B_OK, when the signal was delivered successfully, another error
1602 		code otherwise.
1603 */
1604 status_t
1605 send_signal_to_team_locked(Team* team, uint32 signalNumber, Signal* signal,
1606 	uint32 flags)
1607 {
1608 	ASSERT(signal == NULL || signalNumber == signal->Number());
1609 
1610 	T(SendSignal(team->id, signalNumber, flags));
1611 
1612 	// The caller transferred a reference to the signal to us.
1613 	BReference<Signal> signalReference(signal, true);
1614 
1615 	if ((flags & B_CHECK_PERMISSION) != 0) {
1616 		if (!has_permission_to_signal(team))
1617 			return EPERM;
1618 	}
1619 
1620 	if (signalNumber == 0)
1621 		return B_OK;
1622 
1623 	if (team == team_get_kernel_team()) {
1624 		// signals to the kernel team are not allowed
1625 		return EPERM;
1626 	}
1627 
1628 	if (signal != NULL)
1629 		team->AddPendingSignal(signal);
1630 	else
1631 		team->AddPendingSignal(signalNumber);
1632 
1633 	// the team has the signal reference, now
1634 	signalReference.Detach();
1635 
1636 	switch (signalNumber) {
1637 		case SIGKILL:
1638 		case SIGKILLTHR:
1639 		{
1640 			// Also add a SIGKILLTHR to the main thread's signals and wake it
1641 			// up/interrupt it, so we get this over with as soon as possible
1642 			// (only the main thread shuts down the team).
1643 			Thread* mainThread = team->main_thread;
1644 			if (mainThread != NULL) {
1645 				mainThread->AddPendingSignal(SIGKILLTHR);
1646 
1647 				// wake up main thread
1648 				mainThread->going_to_suspend = false;
1649 
1650 				SpinLocker _(mainThread->scheduler_lock);
1651 				if (mainThread->state == B_THREAD_SUSPENDED)
1652 					scheduler_enqueue_in_run_queue(mainThread);
1653 				else
1654 					thread_interrupt(mainThread, true);
1655 			}
1656 			break;
1657 		}
1658 
1659 		case SIGCONT:
1660 			// Wake up any suspended threads, interrupt the others, if they
1661 			// don't block the signal.
1662 			for (Thread* thread = team->thread_list; thread != NULL;
1663 					thread = thread->team_next) {
1664 				thread->going_to_suspend = false;
1665 
1666 				SpinLocker _(thread->scheduler_lock);
1667 				if (thread->state == B_THREAD_SUSPENDED) {
1668 					scheduler_enqueue_in_run_queue(thread);
1669 				} else if ((SIGNAL_TO_MASK(SIGCONT) & ~thread->sig_block_mask)
1670 						!= 0) {
1671 					thread_interrupt(thread, false);
1672 				}
1673 
1674 				// remove any pending stop signals
1675 				thread->RemovePendingSignals(STOP_SIGNALS);
1676 			}
1677 
1678 			// remove any pending team stop signals
1679 			team->RemovePendingSignals(STOP_SIGNALS);
1680 			break;
1681 
1682 		case SIGSTOP:
1683 		case SIGTSTP:
1684 		case SIGTTIN:
1685 		case SIGTTOU:
1686 			// send the stop signal to all threads
1687 			// TODO: Is that correct or should we only target the main thread?
1688 			for (Thread* thread = team->thread_list; thread != NULL;
1689 					thread = thread->team_next) {
1690 				thread->AddPendingSignal(signalNumber);
1691 			}
1692 
1693 			// remove the stop signal from the team again
1694 			if (signal != NULL) {
1695 				team->RemovePendingSignal(signal);
1696 				signalReference.SetTo(signal, true);
1697 			} else
1698 				team->RemovePendingSignal(signalNumber);
1699 
1700 			// fall through to interrupt threads
1701 		default:
1702 			// Interrupt all interruptibly waiting threads, if the signal is
1703 			// not masked.
1704 			for (Thread* thread = team->thread_list; thread != NULL;
1705 					thread = thread->team_next) {
1706 				sigset_t nonBlocked = ~thread->sig_block_mask
1707 					| SIGNAL_TO_MASK(SIGCHLD);
1708 				if ((thread->AllPendingSignals() & nonBlocked) != 0) {
1709 					SpinLocker _(thread->scheduler_lock);
1710 					thread_interrupt(thread, false);
1711 				}
1712 			}
1713 			break;
1714 	}
1715 
1716 	update_team_threads_signal_flag(team);
1717 
1718 	return B_OK;
1719 }
1720 
1721 
1722 /*!	Sends the given signal to the given team.
1723 
1724 	\param team The team the signal shall be sent to.
1725 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1726 		actual signal will be delivered. Only delivery checks will be performed.
1727 		The given object will be copied. The caller retains ownership.
1728 	\param flags A bitwise combination of any number of the following:
1729 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1730 			target thread the signal.
1731 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1732 			woken up, the scheduler will be invoked. If set that will not be
1733 			done explicitly, but rescheduling can still happen, e.g. when the
1734 			current thread's time slice runs out.
1735 	\return \c B_OK, when the signal was delivered successfully, another error
1736 		code otherwise.
1737 */
1738 status_t
1739 send_signal_to_team(Team* team, const Signal& signal, uint32 flags)
1740 {
1741 	// Clone the signal -- the clone will be queued. If something fails and the
1742 	// caller doesn't require queuing, we will add an unqueued signal.
1743 	Signal* signalToQueue = NULL;
1744 	status_t error = Signal::CreateQueuable(signal,
1745 		(flags & SIGNAL_FLAG_QUEUING_REQUIRED) != 0, signalToQueue);
1746 	if (error != B_OK)
1747 		return error;
1748 
1749 	InterruptsSpinLocker locker(team->signal_lock);
1750 
1751 	error = send_signal_to_team_locked(team, signal.Number(), signalToQueue,
1752 			flags);
1753 
1754 	locker.Unlock();
1755 
1756 	if ((flags & B_DO_NOT_RESCHEDULE) == 0)
1757 		scheduler_reschedule_if_necessary();
1758 
1759 	return error;
1760 }
1761 
1762 
1763 /*!	Sends the given signal to the team with the given ID.
1764 
1765 	\param teamID The ID of the team the signal shall be sent to.
1766 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1767 		actual signal will be delivered. Only delivery checks will be performed.
1768 		The given object will be copied. The caller retains ownership.
1769 	\param flags A bitwise combination of any number of the following:
1770 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1771 			target thread the signal.
1772 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1773 			woken up, the scheduler will be invoked. If set that will not be
1774 			done explicitly, but rescheduling can still happen, e.g. when the
1775 			current thread's time slice runs out.
1776 	\return \c B_OK, when the signal was delivered successfully, another error
1777 		code otherwise.
1778 */
1779 status_t
1780 send_signal_to_team_id(team_id teamID, const Signal& signal, uint32 flags)
1781 {
1782 	// get the team
1783 	Team* team = Team::Get(teamID);
1784 	if (team == NULL)
1785 		return B_BAD_TEAM_ID;
1786 	BReference<Team> teamReference(team, true);
1787 
1788 	return send_signal_to_team(team, signal, flags);
1789 }
1790 
1791 
1792 /*!	Sends the given signal to the given process group.
1793 
1794 	The caller must hold the process group's lock. Interrupts must be enabled.
1795 
1796 	\param group The the process group the signal shall be sent to.
1797 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1798 		actual signal will be delivered. Only delivery checks will be performed.
1799 		The given object will be copied. The caller retains ownership.
1800 	\param flags A bitwise combination of any number of the following:
1801 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1802 			target thread the signal.
1803 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1804 			woken up, the scheduler will be invoked. If set that will not be
1805 			done explicitly, but rescheduling can still happen, e.g. when the
1806 			current thread's time slice runs out.
1807 	\return \c B_OK, when the signal was delivered successfully, another error
1808 		code otherwise.
1809 */
1810 status_t
1811 send_signal_to_process_group_locked(ProcessGroup* group, const Signal& signal,
1812 	uint32 flags)
1813 {
1814 	T(SendSignal(-group->id, signal.Number(), flags));
1815 
1816 	bool firstTeam = true;
1817 
1818 	for (Team* team = group->teams; team != NULL; team = team->group_next) {
1819 		status_t error = send_signal_to_team(team, signal,
1820 			flags | B_DO_NOT_RESCHEDULE);
1821 		// If sending to the first team in the group failed, let the whole call
1822 		// fail.
1823 		if (firstTeam) {
1824 			if (error != B_OK)
1825 				return error;
1826 			firstTeam = false;
1827 		}
1828 	}
1829 
1830 	if ((flags & B_DO_NOT_RESCHEDULE) == 0)
1831 		scheduler_reschedule_if_necessary();
1832 
1833 	return B_OK;
1834 }
1835 
1836 
1837 /*!	Sends the given signal to the process group specified by the given ID.
1838 
1839 	The caller must not hold any process group, team, or thread lock. Interrupts
1840 	must be enabled.
1841 
1842 	\param groupID The ID of the process group the signal shall be sent to.
1843 	\param signal The signal to be delivered. If the signal's number is \c 0, no
1844 		actual signal will be delivered. Only delivery checks will be performed.
1845 		The given object will be copied. The caller retains ownership.
1846 	\param flags A bitwise combination of any number of the following:
1847 		- \c B_CHECK_PERMISSION: Check the caller's permission to send the
1848 			target thread the signal.
1849 		- \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1850 			woken up, the scheduler will be invoked. If set that will not be
1851 			done explicitly, but rescheduling can still happen, e.g. when the
1852 			current thread's time slice runs out.
1853 	\return \c B_OK, when the signal was delivered successfully, another error
1854 		code otherwise.
1855 */
1856 status_t
1857 send_signal_to_process_group(pid_t groupID, const Signal& signal, uint32 flags)
1858 {
1859 	ProcessGroup* group = ProcessGroup::Get(groupID);
1860 	if (group == NULL)
1861 		return B_BAD_TEAM_ID;
1862 	BReference<ProcessGroup> groupReference(group);
1863 
1864 	T(SendSignal(-group->id, signal.Number(), flags));
1865 
1866 	AutoLocker<ProcessGroup> groupLocker(group);
1867 
1868 	status_t error = send_signal_to_process_group_locked(group, signal,
1869 		flags | B_DO_NOT_RESCHEDULE);
1870 	if (error != B_OK)
1871 		return error;
1872 
1873 	groupLocker.Unlock();
1874 
1875 	if ((flags & B_DO_NOT_RESCHEDULE) == 0)
1876 		scheduler_reschedule_if_necessary();
1877 
1878 	return B_OK;
1879 }
1880 
1881 
1882 static status_t
1883 send_signal_internal(pid_t id, uint signalNumber, union sigval userValue,
1884 	uint32 flags)
1885 {
1886 	if (signalNumber > MAX_SIGNAL_NUMBER)
1887 		return B_BAD_VALUE;
1888 
1889 	Thread* thread = thread_get_current_thread();
1890 
1891 	Signal signal(signalNumber,
1892 		(flags & SIGNAL_FLAG_QUEUING_REQUIRED) != 0 ? SI_QUEUE : SI_USER,
1893 		B_OK, thread->team->id);
1894 		// Note: SI_USER/SI_QUEUE is not correct, if called from within the
1895 		// kernel (or a driver), but we don't have any info here.
1896 	signal.SetUserValue(userValue);
1897 
1898 	// If id is > 0, send the signal to the respective thread.
1899 	if (id > 0)
1900 		return send_signal_to_thread_id(id, signal, flags);
1901 
1902 	// If id == 0, send the signal to the current thread.
1903 	if (id == 0)
1904 		return send_signal_to_thread(thread, signal, flags);
1905 
1906 	// If id == -1, send the signal to all teams the calling team has permission
1907 	// to send signals to.
1908 	if (id == -1) {
1909 		// TODO: Implement correctly!
1910 		// currently only send to the current team
1911 		return send_signal_to_team_id(thread->team->id, signal, flags);
1912 	}
1913 
1914 	// Send a signal to the specified process group (the absolute value of the
1915 	// id).
1916 	return send_signal_to_process_group(-id, signal, flags);
1917 }
1918 
1919 
1920 int
1921 send_signal_etc(pid_t id, uint signalNumber, uint32 flags)
1922 {
1923 	// a dummy user value
1924 	union sigval userValue;
1925 	userValue.sival_ptr = NULL;
1926 
1927 	return send_signal_internal(id, signalNumber, userValue, flags);
1928 }
1929 
1930 
1931 int
1932 send_signal(pid_t threadID, uint signal)
1933 {
1934 	// The BeBook states that this function wouldn't be exported
1935 	// for drivers, but, of course, it's wrong.
1936 	return send_signal_etc(threadID, signal, 0);
1937 }
1938 
1939 
1940 static int
1941 sigprocmask_internal(int how, const sigset_t* set, sigset_t* oldSet)
1942 {
1943 	Thread* thread = thread_get_current_thread();
1944 
1945 	InterruptsSpinLocker _(thread->team->signal_lock);
1946 
1947 	sigset_t oldMask = thread->sig_block_mask;
1948 
1949 	if (set != NULL) {
1950 		T(SigProcMask(how, *set));
1951 
1952 		switch (how) {
1953 			case SIG_BLOCK:
1954 				thread->sig_block_mask |= *set & BLOCKABLE_SIGNALS;
1955 				break;
1956 			case SIG_UNBLOCK:
1957 				thread->sig_block_mask &= ~*set;
1958 				break;
1959 			case SIG_SETMASK:
1960 				thread->sig_block_mask = *set & BLOCKABLE_SIGNALS;
1961 				break;
1962 			default:
1963 				return B_BAD_VALUE;
1964 		}
1965 
1966 		update_current_thread_signals_flag();
1967 	}
1968 
1969 	if (oldSet != NULL)
1970 		*oldSet = oldMask;
1971 
1972 	return B_OK;
1973 }
1974 
1975 
1976 int
1977 sigprocmask(int how, const sigset_t* set, sigset_t* oldSet)
1978 {
1979 	RETURN_AND_SET_ERRNO(sigprocmask_internal(how, set, oldSet));
1980 }
1981 
1982 
1983 /*!	\brief Like sigaction(), but returning the error instead of setting errno.
1984 */
1985 static status_t
1986 sigaction_internal(int signal, const struct sigaction* act,
1987 	struct sigaction* oldAction)
1988 {
1989 	if (signal < 1 || signal > MAX_SIGNAL_NUMBER
1990 		|| (SIGNAL_TO_MASK(signal) & ~BLOCKABLE_SIGNALS) != 0)
1991 		return B_BAD_VALUE;
1992 
1993 	// get and lock the team
1994 	Team* team = thread_get_current_thread()->team;
1995 	TeamLocker teamLocker(team);
1996 
1997 	struct sigaction& teamHandler = team->SignalActionFor(signal);
1998 	if (oldAction) {
1999 		// save previous sigaction structure
2000 		*oldAction = teamHandler;
2001 	}
2002 
2003 	if (act) {
2004 		T(SigAction(signal, act));
2005 
2006 		// set new sigaction structure
2007 		teamHandler = *act;
2008 		teamHandler.sa_mask &= BLOCKABLE_SIGNALS;
2009 	}
2010 
2011 	// Remove pending signal if it should now be ignored and remove pending
2012 	// signal for those signals whose default action is to ignore them.
2013 	if ((act && act->sa_handler == SIG_IGN)
2014 		|| (act && act->sa_handler == SIG_DFL
2015 			&& (SIGNAL_TO_MASK(signal) & DEFAULT_IGNORE_SIGNALS) != 0)) {
2016 		InterruptsSpinLocker locker(team->signal_lock);
2017 
2018 		team->RemovePendingSignal(signal);
2019 
2020 		for (Thread* thread = team->thread_list; thread != NULL;
2021 				thread = thread->team_next) {
2022 			thread->RemovePendingSignal(signal);
2023 		}
2024 	}
2025 
2026 	return B_OK;
2027 }
2028 
2029 
2030 int
2031 sigaction(int signal, const struct sigaction* act, struct sigaction* oldAction)
2032 {
2033 	RETURN_AND_SET_ERRNO(sigaction_internal(signal, act, oldAction));
2034 }
2035 
2036 
2037 /*!	Wait for the specified signals, and return the information for the retrieved
2038 	signal in \a info.
2039 	The \c flags and \c timeout combination must either define an infinite
2040 	timeout (no timeout flags set), an absolute timeout (\c B_ABSOLUTE_TIMEOUT
2041 	set), or a relative timeout \code <= 0 \endcode (\c B_RELATIVE_TIMEOUT set).
2042 */
2043 static status_t
2044 sigwait_internal(const sigset_t* set, siginfo_t* info, uint32 flags,
2045 	bigtime_t timeout)
2046 {
2047 	// restrict mask to blockable signals
2048 	sigset_t requestedSignals = *set & BLOCKABLE_SIGNALS;
2049 
2050 	// make always interruptable
2051 	flags |= B_CAN_INTERRUPT;
2052 
2053 	// check whether we are allowed to wait at all
2054 	bool canWait = (flags & B_RELATIVE_TIMEOUT) == 0 || timeout > 0;
2055 
2056 	Thread* thread = thread_get_current_thread();
2057 
2058 	InterruptsSpinLocker locker(thread->team->signal_lock);
2059 
2060 	bool timedOut = false;
2061 	status_t error = B_OK;
2062 
2063 	while (!timedOut) {
2064 		sigset_t pendingSignals = thread->AllPendingSignals();
2065 
2066 		// If a kill signal is pending, just bail out.
2067 		if ((pendingSignals & KILL_SIGNALS) != 0)
2068 			return B_INTERRUPTED;
2069 
2070 		if ((pendingSignals & requestedSignals) != 0) {
2071 			// get signal with the highest priority
2072 			Signal stackSignal;
2073 			Signal* signal = dequeue_thread_or_team_signal(thread,
2074 				requestedSignals, stackSignal);
2075 			ASSERT(signal != NULL);
2076 
2077 			SignalHandledCaller signalHandledCaller(signal);
2078 			locker.Unlock();
2079 
2080 			info->si_signo = signal->Number();
2081 			info->si_code = signal->SignalCode();
2082 			info->si_errno = signal->ErrorCode();
2083 			info->si_pid = signal->SendingProcess();
2084 			info->si_uid = signal->SendingUser();
2085 			info->si_addr = signal->Address();
2086 			info->si_status = signal->Status();
2087 			info->si_band = signal->PollBand();
2088 			info->si_value = signal->UserValue();
2089 
2090 			return B_OK;
2091 		}
2092 
2093 		if (!canWait)
2094 			return B_WOULD_BLOCK;
2095 
2096 		sigset_t blockedSignals = thread->sig_block_mask;
2097 		if ((pendingSignals & ~blockedSignals) != 0) {
2098 			// Non-blocked signals are pending -- return to let them be handled.
2099 			return B_INTERRUPTED;
2100 		}
2101 
2102 		// No signals yet. Set the signal block mask to not include the
2103 		// requested mask and wait until we're interrupted.
2104 		thread->sig_block_mask = blockedSignals & ~requestedSignals;
2105 
2106 		while (!has_signals_pending(thread)) {
2107 			thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_SIGNAL,
2108 				NULL);
2109 
2110 			locker.Unlock();
2111 
2112 			if ((flags & B_ABSOLUTE_TIMEOUT) != 0) {
2113 				error = thread_block_with_timeout(flags, timeout);
2114 				if (error == B_WOULD_BLOCK || error == B_TIMED_OUT) {
2115 					error = B_WOULD_BLOCK;
2116 						// POSIX requires EAGAIN (B_WOULD_BLOCK) on timeout
2117 					timedOut = true;
2118 
2119 					locker.Lock();
2120 					break;
2121 				}
2122 			} else
2123 				thread_block();
2124 
2125 			locker.Lock();
2126 		}
2127 
2128 		// restore the original block mask
2129 		thread->sig_block_mask = blockedSignals;
2130 
2131 		update_current_thread_signals_flag();
2132 	}
2133 
2134 	// we get here only when timed out
2135 	return error;
2136 }
2137 
2138 
2139 /*!	Replace the current signal block mask and wait for any event to happen.
2140 	Before returning, the original signal block mask is reinstantiated.
2141 */
2142 static status_t
2143 sigsuspend_internal(const sigset_t* _mask)
2144 {
2145 	sigset_t mask = *_mask & BLOCKABLE_SIGNALS;
2146 
2147 	T(SigSuspend(mask));
2148 
2149 	Thread* thread = thread_get_current_thread();
2150 
2151 	InterruptsSpinLocker locker(thread->team->signal_lock);
2152 
2153 	// Set the new block mask and block until interrupted. We might be here
2154 	// after a syscall restart, in which case sigsuspend_original_unblocked_mask
2155 	// will still be set.
2156 	sigset_t oldMask = thread->sigsuspend_original_unblocked_mask != 0
2157 		? ~thread->sigsuspend_original_unblocked_mask : thread->sig_block_mask;
2158 	thread->sig_block_mask = mask & BLOCKABLE_SIGNALS;
2159 
2160 	update_current_thread_signals_flag();
2161 
2162 	while (!has_signals_pending(thread)) {
2163 		thread_prepare_to_block(thread, B_CAN_INTERRUPT,
2164 			THREAD_BLOCK_TYPE_SIGNAL, NULL);
2165 
2166 		locker.Unlock();
2167 		thread_block();
2168 		locker.Lock();
2169 	}
2170 
2171 	// Set sigsuspend_original_unblocked_mask (guaranteed to be non-0 due to
2172 	// BLOCKABLE_SIGNALS). This will indicate to handle_signals() that it is
2173 	// called after a _user_sigsuspend(). It will reset the field after invoking
2174 	// a signal handler, or restart the syscall, if there wasn't anything to
2175 	// handle anymore (e.g. because another thread was faster).
2176 	thread->sigsuspend_original_unblocked_mask = ~oldMask;
2177 
2178 	T(SigSuspendDone());
2179 
2180 	// we're not supposed to actually succeed
2181 	return B_INTERRUPTED;
2182 }
2183 
2184 
2185 static status_t
2186 sigpending_internal(sigset_t* set)
2187 {
2188 	Thread* thread = thread_get_current_thread();
2189 
2190 	if (set == NULL)
2191 		return B_BAD_VALUE;
2192 
2193 	InterruptsSpinLocker locker(thread->team->signal_lock);
2194 
2195 	*set = thread->AllPendingSignals() & thread->sig_block_mask;
2196 
2197 	return B_OK;
2198 }
2199 
2200 
2201 // #pragma mark - syscalls
2202 
2203 
2204 /*!	Sends a signal to a thread, process, or process group.
2205 	\param id Specifies the ID of the target:
2206 		- \code id > 0 \endcode: If \a toThread is \c true, the target is the
2207 			thread with ID \a id, otherwise the team with the ID \a id.
2208 		- \code id == 0 \endcode: If toThread is \c true, the target is the
2209 			current thread, otherwise the current team.
2210 		- \code id == -1 \endcode: The target are all teams the current team has
2211 			permission to send signals to. Currently not implemented correctly.
2212 		- \code id < -1 \endcode: The target are is the process group with ID
2213 			\c -id.
2214 	\param signalNumber The signal number. \c 0 to just perform checks, but not
2215 		actually send any signal.
2216 	\param userUserValue A user value to be associated with the signal. Might be
2217 		ignored unless signal queuing is forced. Can be \c NULL.
2218 	\param flags A bitwise or of any number of the following:
2219 		- \c SIGNAL_FLAG_QUEUING_REQUIRED: Signal queuing is required. Fail
2220 			instead of falling back to unqueued signals, when queuing isn't
2221 			possible.
2222 		- \c SIGNAL_FLAG_SEND_TO_THREAD: Interpret the the given ID as a
2223 			\c thread_id rather than a \c team_id. Ignored when the \a id is
2224 			\code < 0 \endcode -- then the target is a process group.
2225 	\return \c B_OK on success, another error code otherwise.
2226 */
2227 status_t
2228 _user_send_signal(int32 id, uint32 signalNumber,
2229 	const union sigval* userUserValue, uint32 flags)
2230 {
2231 	// restrict flags to the allowed ones and add B_CHECK_PERMISSION
2232 	flags &= SIGNAL_FLAG_QUEUING_REQUIRED | SIGNAL_FLAG_SEND_TO_THREAD;
2233 	flags |= B_CHECK_PERMISSION;
2234 
2235 	// Copy the user value from userland. If not given, use a dummy value.
2236 	union sigval userValue;
2237 	if (userUserValue != NULL) {
2238 		if (!IS_USER_ADDRESS(userUserValue)
2239 			|| user_memcpy(&userValue, userUserValue, sizeof(userValue))
2240 				!= B_OK) {
2241 			return B_BAD_ADDRESS;
2242 		}
2243 	} else
2244 		userValue.sival_ptr = NULL;
2245 
2246 	// If to be sent to a thread, delegate to send_signal_internal(). Also do
2247 	// that when id < 0, since in this case the semantics is the same as well.
2248 	if ((flags & SIGNAL_FLAG_SEND_TO_THREAD) != 0 || id < 0)
2249 		return send_signal_internal(id, signalNumber, userValue, flags);
2250 
2251 	// kill() semantics for id >= 0
2252 	if (signalNumber > MAX_SIGNAL_NUMBER)
2253 		return B_BAD_VALUE;
2254 
2255 	Thread* thread = thread_get_current_thread();
2256 
2257 	Signal signal(signalNumber,
2258 		(flags & SIGNAL_FLAG_QUEUING_REQUIRED) != 0 ? SI_QUEUE : SI_USER,
2259 		B_OK, thread->team->id);
2260 	signal.SetUserValue(userValue);
2261 
2262 	// send to current team for id == 0, otherwise to the respective team
2263 	return send_signal_to_team_id(id == 0 ? team_get_current_team_id() : id,
2264 		signal, flags);
2265 }
2266 
2267 
2268 status_t
2269 _user_set_signal_mask(int how, const sigset_t *userSet, sigset_t *userOldSet)
2270 {
2271 	sigset_t set, oldSet;
2272 	status_t status;
2273 
2274 	if ((userSet != NULL && (!IS_USER_ADDRESS(userSet)
2275 			|| user_memcpy(&set, userSet, sizeof(sigset_t)) < B_OK))
2276 		|| (userOldSet != NULL && (!IS_USER_ADDRESS(userOldSet)
2277 			|| user_memcpy(&oldSet, userOldSet, sizeof(sigset_t)) < B_OK)))
2278 		return B_BAD_ADDRESS;
2279 
2280 	status = sigprocmask_internal(how, userSet ? &set : NULL,
2281 		userOldSet ? &oldSet : NULL);
2282 
2283 	// copy old set if asked for
2284 	if (status >= B_OK && userOldSet != NULL
2285 		&& user_memcpy(userOldSet, &oldSet, sizeof(sigset_t)) < B_OK)
2286 		return B_BAD_ADDRESS;
2287 
2288 	return status;
2289 }
2290 
2291 
2292 status_t
2293 _user_sigaction(int signal, const struct sigaction *userAction,
2294 	struct sigaction *userOldAction)
2295 {
2296 	struct sigaction act, oact;
2297 	status_t status;
2298 
2299 	if ((userAction != NULL && (!IS_USER_ADDRESS(userAction)
2300 			|| user_memcpy(&act, userAction, sizeof(struct sigaction)) < B_OK))
2301 		|| (userOldAction != NULL && (!IS_USER_ADDRESS(userOldAction)
2302 			|| user_memcpy(&oact, userOldAction, sizeof(struct sigaction))
2303 				< B_OK)))
2304 		return B_BAD_ADDRESS;
2305 
2306 	status = sigaction_internal(signal, userAction ? &act : NULL,
2307 		userOldAction ? &oact : NULL);
2308 
2309 	// only copy the old action if a pointer has been given
2310 	if (status >= B_OK && userOldAction != NULL
2311 		&& user_memcpy(userOldAction, &oact, sizeof(struct sigaction)) < B_OK)
2312 		return B_BAD_ADDRESS;
2313 
2314 	return status;
2315 }
2316 
2317 
2318 status_t
2319 _user_sigwait(const sigset_t *userSet, siginfo_t *userInfo, uint32 flags,
2320 	bigtime_t timeout)
2321 {
2322 	// copy userSet to stack
2323 	sigset_t set;
2324 	if (userSet == NULL || !IS_USER_ADDRESS(userSet)
2325 		|| user_memcpy(&set, userSet, sizeof(sigset_t)) != B_OK) {
2326 		return B_BAD_ADDRESS;
2327 	}
2328 
2329 	// userInfo is optional, but must be a user address when given
2330 	if (userInfo != NULL && !IS_USER_ADDRESS(userInfo))
2331 		return B_BAD_ADDRESS;
2332 
2333 	syscall_restart_handle_timeout_pre(flags, timeout);
2334 
2335 	flags |= B_CAN_INTERRUPT;
2336 
2337 	siginfo_t info;
2338 	status_t status = sigwait_internal(&set, &info, flags, timeout);
2339 	if (status == B_OK) {
2340 		// copy the info back to userland, if userSet is non-NULL
2341 		if (userInfo != NULL)
2342 			status = user_memcpy(userInfo, &info, sizeof(info));
2343 	} else if (status == B_INTERRUPTED) {
2344 		// make sure we'll be restarted
2345 		Thread* thread = thread_get_current_thread();
2346 		atomic_or(&thread->flags, THREAD_FLAGS_ALWAYS_RESTART_SYSCALL);
2347 	}
2348 
2349 	return syscall_restart_handle_timeout_post(status, timeout);
2350 }
2351 
2352 
2353 status_t
2354 _user_sigsuspend(const sigset_t *userMask)
2355 {
2356 	sigset_t mask;
2357 
2358 	if (userMask == NULL)
2359 		return B_BAD_VALUE;
2360 	if (!IS_USER_ADDRESS(userMask)
2361 		|| user_memcpy(&mask, userMask, sizeof(sigset_t)) < B_OK) {
2362 		return B_BAD_ADDRESS;
2363 	}
2364 
2365 	return sigsuspend_internal(&mask);
2366 }
2367 
2368 
2369 status_t
2370 _user_sigpending(sigset_t *userSet)
2371 {
2372 	sigset_t set;
2373 	int status;
2374 
2375 	if (userSet == NULL)
2376 		return B_BAD_VALUE;
2377 	if (!IS_USER_ADDRESS(userSet))
2378 		return B_BAD_ADDRESS;
2379 
2380 	status = sigpending_internal(&set);
2381 	if (status == B_OK
2382 		&& user_memcpy(userSet, &set, sizeof(sigset_t)) < B_OK)
2383 		return B_BAD_ADDRESS;
2384 
2385 	return status;
2386 }
2387 
2388 
2389 status_t
2390 _user_set_signal_stack(const stack_t* newUserStack, stack_t* oldUserStack)
2391 {
2392 	Thread *thread = thread_get_current_thread();
2393 	struct stack_t newStack, oldStack;
2394 	bool onStack = false;
2395 
2396 	if ((newUserStack != NULL && (!IS_USER_ADDRESS(newUserStack)
2397 			|| user_memcpy(&newStack, newUserStack, sizeof(stack_t)) < B_OK))
2398 		|| (oldUserStack != NULL && (!IS_USER_ADDRESS(oldUserStack)
2399 			|| user_memcpy(&oldStack, oldUserStack, sizeof(stack_t)) < B_OK)))
2400 		return B_BAD_ADDRESS;
2401 
2402 	if (thread->signal_stack_enabled) {
2403 		// determine whether or not the user thread is currently
2404 		// on the active signal stack
2405 		onStack = arch_on_signal_stack(thread);
2406 	}
2407 
2408 	if (oldUserStack != NULL) {
2409 		oldStack.ss_sp = (void *)thread->signal_stack_base;
2410 		oldStack.ss_size = thread->signal_stack_size;
2411 		oldStack.ss_flags = (thread->signal_stack_enabled ? 0 : SS_DISABLE)
2412 			| (onStack ? SS_ONSTACK : 0);
2413 	}
2414 
2415 	if (newUserStack != NULL) {
2416 		// no flags other than SS_DISABLE are allowed
2417 		if ((newStack.ss_flags & ~SS_DISABLE) != 0)
2418 			return B_BAD_VALUE;
2419 
2420 		if ((newStack.ss_flags & SS_DISABLE) == 0) {
2421 			// check if the size is valid
2422 			if (newStack.ss_size < MINSIGSTKSZ)
2423 				return B_NO_MEMORY;
2424 			if (onStack)
2425 				return B_NOT_ALLOWED;
2426 			if (!IS_USER_ADDRESS(newStack.ss_sp))
2427 				return B_BAD_VALUE;
2428 
2429 			thread->signal_stack_base = (addr_t)newStack.ss_sp;
2430 			thread->signal_stack_size = newStack.ss_size;
2431 			thread->signal_stack_enabled = true;
2432 		} else
2433 			thread->signal_stack_enabled = false;
2434 	}
2435 
2436 	// only copy the old stack info if a pointer has been given
2437 	if (oldUserStack != NULL
2438 		&& user_memcpy(oldUserStack, &oldStack, sizeof(stack_t)) < B_OK)
2439 		return B_BAD_ADDRESS;
2440 
2441 	return B_OK;
2442 }
2443 
2444 
2445 /*!	Restores the environment of a function that was interrupted by a signal
2446 	handler call.
2447 	This syscall is invoked when a signal handler function returns. It
2448 	deconstructs the signal handler frame and restores the stack and register
2449 	state of the function that was interrupted by a signal. The syscall is
2450 	therefore somewhat unusual, since it does not return to the calling
2451 	function, but to someplace else. In case the signal interrupted a syscall,
2452 	it will appear as if the syscall just returned. That is also the reason, why
2453 	this syscall returns an int64, since it needs to return the value the
2454 	interrupted syscall returns, which is potentially 64 bits wide.
2455 
2456 	\param userSignalFrameData The signal frame data created for the signal
2457 		handler. Potentially some data (e.g. registers) have been modified by
2458 		the signal handler.
2459 	\return In case the signal interrupted a syscall, the return value of that
2460 		syscall. Otherwise (i.e. in case of a (hardware) interrupt/exception)
2461 		the value might need to be tailored such that after a return to userland
2462 		the interrupted environment is identical to the interrupted one (unless
2463 		explicitly modified). E.g. for x86 to achieve that, the return value
2464 		must contain the eax|edx values of the interrupted environment.
2465 */
2466 int64
2467 _user_restore_signal_frame(struct signal_frame_data* userSignalFrameData)
2468 {
2469 	syscall_64_bit_return_value();
2470 
2471 	Thread *thread = thread_get_current_thread();
2472 
2473 	// copy the signal frame data from userland
2474 	signal_frame_data signalFrameData;
2475 	if (userSignalFrameData == NULL || !IS_USER_ADDRESS(userSignalFrameData)
2476 		|| user_memcpy(&signalFrameData, userSignalFrameData,
2477 			sizeof(signalFrameData)) != B_OK) {
2478 		// We failed to copy the signal frame data from userland. This is a
2479 		// serious problem. Kill the thread.
2480 		dprintf("_user_restore_signal_frame(): thread %" B_PRId32 ": Failed to "
2481 			"copy signal frame data (%p) from userland. Killing thread...\n",
2482 			thread->id, userSignalFrameData);
2483 		kill_thread(thread->id);
2484 		return B_BAD_ADDRESS;
2485 	}
2486 
2487 	// restore the signal block mask
2488 	InterruptsSpinLocker locker(thread->team->signal_lock);
2489 
2490 	thread->sig_block_mask
2491 		= signalFrameData.context.uc_sigmask & BLOCKABLE_SIGNALS;
2492 	update_current_thread_signals_flag();
2493 
2494 	locker.Unlock();
2495 
2496 	// restore the syscall restart related thread flags and the syscall restart
2497 	// parameters
2498 	atomic_and(&thread->flags,
2499 		~(THREAD_FLAGS_RESTART_SYSCALL | THREAD_FLAGS_64_BIT_SYSCALL_RETURN));
2500 	atomic_or(&thread->flags, signalFrameData.thread_flags
2501 		& (THREAD_FLAGS_RESTART_SYSCALL | THREAD_FLAGS_64_BIT_SYSCALL_RETURN));
2502 
2503 	memcpy(thread->syscall_restart.parameters,
2504 		signalFrameData.syscall_restart_parameters,
2505 		sizeof(thread->syscall_restart.parameters));
2506 
2507 	// restore the previously stored Thread::user_signal_context
2508 	thread->user_signal_context = signalFrameData.context.uc_link;
2509 	if (thread->user_signal_context != NULL
2510 		&& !IS_USER_ADDRESS(thread->user_signal_context)) {
2511 		thread->user_signal_context = NULL;
2512 	}
2513 
2514 	// let the architecture specific code restore the registers
2515 	return arch_restore_signal_frame(&signalFrameData);
2516 }
2517