xref: /haiku/src/system/kernel/signal.cpp (revision 125183f9e5c136781f71c879faaeab43fdc3ea7b)
1 /*
2  * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
3  * Copyright 2002, Angelo Mottola, a.mottola@libero.it.
4  *
5  * Distributed under the terms of the MIT License.
6  */
7 
8 
9 /*! POSIX signals handling routines */
10 
11 
12 #include <ksignal.h>
13 
14 #include <errno.h>
15 #include <stddef.h>
16 #include <string.h>
17 
18 #include <OS.h>
19 #include <KernelExport.h>
20 
21 #include <cpu.h>
22 #include <debug.h>
23 #include <kernel.h>
24 #include <kscheduler.h>
25 #include <sem.h>
26 #include <syscall_restart.h>
27 #include <syscall_utils.h>
28 #include <team.h>
29 #include <thread.h>
30 #include <tracing.h>
31 #include <user_debugger.h>
32 #include <user_thread.h>
33 #include <util/AutoLock.h>
34 
35 
36 //#define TRACE_SIGNAL
37 #ifdef TRACE_SIGNAL
38 #	define TRACE(x) dprintf x
39 #else
40 #	define TRACE(x) ;
41 #endif
42 
43 
44 #define BLOCKABLE_SIGNALS		(~(KILL_SIGNALS | SIGNAL_TO_MASK(SIGSTOP)))
45 #define STOP_SIGNALS \
46 	(SIGNAL_TO_MASK(SIGSTOP) | SIGNAL_TO_MASK(SIGTSTP) \
47 	| SIGNAL_TO_MASK(SIGTTIN) | SIGNAL_TO_MASK(SIGTTOU))
48 #define DEFAULT_IGNORE_SIGNALS \
49 	(SIGNAL_TO_MASK(SIGCHLD) | SIGNAL_TO_MASK(SIGWINCH) \
50 	| SIGNAL_TO_MASK(SIGCONT))
51 #define NON_DEFERRABLE_SIGNALS	\
52 	(KILL_SIGNALS				\
53 	| SIGNAL_TO_MASK(SIGILL)	\
54 	| SIGNAL_TO_MASK(SIGFPE)	\
55 	| SIGNAL_TO_MASK(SIGSEGV))
56 
57 
58 const char * const sigstr[NSIG] = {
59 	"NONE", "HUP", "INT", "QUIT", "ILL", "CHLD", "ABRT", "PIPE",
60 	"FPE", "KILL", "STOP", "SEGV", "CONT", "TSTP", "ALRM", "TERM",
61 	"TTIN", "TTOU", "USR1", "USR2", "WINCH", "KILLTHR", "TRAP",
62 	"POLL", "PROF", "SYS", "URG", "VTALRM", "XCPU", "XFSZ"
63 };
64 
65 
66 static status_t deliver_signal(struct thread *thread, uint signal,
67 	uint32 flags);
68 
69 
70 
71 // #pragma mark - signal tracing
72 
73 
74 #if SIGNAL_TRACING
75 
76 namespace SignalTracing {
77 
78 
79 class HandleSignals : public AbstractTraceEntry {
80 	public:
81 		HandleSignals(uint32 signals)
82 			:
83 			fSignals(signals)
84 		{
85 			Initialized();
86 		}
87 
88 		virtual void AddDump(TraceOutput& out)
89 		{
90 			out.Print("signal handle:  0x%lx", fSignals);
91 		}
92 
93 	private:
94 		uint32		fSignals;
95 };
96 
97 
98 class ExecuteSignalHandler : public AbstractTraceEntry {
99 	public:
100 		ExecuteSignalHandler(int signal, struct sigaction* handler)
101 			:
102 			fSignal(signal),
103 			fHandler((void*)handler->sa_handler)
104 		{
105 			Initialized();
106 		}
107 
108 		virtual void AddDump(TraceOutput& out)
109 		{
110 			out.Print("signal exec handler: signal: %d, handler: %p",
111 				fSignal, fHandler);
112 		}
113 
114 	private:
115 		int		fSignal;
116 		void*	fHandler;
117 };
118 
119 
120 class SendSignal : public AbstractTraceEntry {
121 	public:
122 		SendSignal(pid_t target, uint32 signal, uint32 flags)
123 			:
124 			fTarget(target),
125 			fSignal(signal),
126 			fFlags(flags)
127 		{
128 			Initialized();
129 		}
130 
131 		virtual void AddDump(TraceOutput& out)
132 		{
133 			out.Print("signal send: target: %ld, signal: %lu (%s), "
134 				"flags: 0x%lx", fTarget, fSignal,
135 				(fSignal < NSIG ? sigstr[fSignal] : "invalid"), fFlags);
136 		}
137 
138 	private:
139 		pid_t	fTarget;
140 		uint32	fSignal;
141 		uint32	fFlags;
142 };
143 
144 
145 class SigAction : public AbstractTraceEntry {
146 	public:
147 		SigAction(struct thread* thread, uint32 signal,
148 			const struct sigaction* act)
149 			:
150 			fThread(thread->id),
151 			fSignal(signal),
152 			fAction(*act)
153 		{
154 			Initialized();
155 		}
156 
157 		virtual void AddDump(TraceOutput& out)
158 		{
159 			out.Print("signal action: thread: %ld, signal: %lu (%s), "
160 				"action: {handler: %p, flags: 0x%x, mask: 0x%lx}",
161 				fThread, fSignal,
162 				(fSignal < NSIG ? sigstr[fSignal] : "invalid"),
163 				fAction.sa_handler, fAction.sa_flags, fAction.sa_mask);
164 		}
165 
166 	private:
167 		thread_id			fThread;
168 		uint32				fSignal;
169 		struct sigaction	fAction;
170 };
171 
172 
173 class SigProcMask : public AbstractTraceEntry {
174 	public:
175 		SigProcMask(int how, sigset_t mask)
176 			:
177 			fHow(how),
178 			fMask(mask),
179 			fOldMask(thread_get_current_thread()->sig_block_mask)
180 		{
181 			Initialized();
182 		}
183 
184 		virtual void AddDump(TraceOutput& out)
185 		{
186 			const char* how = "invalid";
187 			switch (fHow) {
188 				case SIG_BLOCK:
189 					how = "block";
190 					break;
191 				case SIG_UNBLOCK:
192 					how = "unblock";
193 					break;
194 				case SIG_SETMASK:
195 					how = "set";
196 					break;
197 			}
198 
199 			out.Print("signal proc mask: %s 0x%lx, old mask: 0x%lx", how, fMask,
200 				fOldMask);
201 		}
202 
203 	private:
204 		int			fHow;
205 		sigset_t	fMask;
206 		sigset_t	fOldMask;
207 };
208 
209 
210 class SigSuspend : public AbstractTraceEntry {
211 	public:
212 		SigSuspend(sigset_t mask)
213 			:
214 			fMask(mask),
215 			fOldMask(thread_get_current_thread()->sig_block_mask)
216 		{
217 			Initialized();
218 		}
219 
220 		virtual void AddDump(TraceOutput& out)
221 		{
222 			out.Print("signal suspend: %#" B_PRIx32 ", old mask: %#" B_PRIx32,
223 				fMask, fOldMask);
224 		}
225 
226 	private:
227 		sigset_t	fMask;
228 		sigset_t	fOldMask;
229 };
230 
231 
232 class SigSuspendDone : public AbstractTraceEntry {
233 	public:
234 		SigSuspendDone()
235 			:
236 			fSignals(thread_get_current_thread()->sig_pending)
237 		{
238 			Initialized();
239 		}
240 
241 		virtual void AddDump(TraceOutput& out)
242 		{
243 			out.Print("signal suspend done: %#" B_PRIx32, fSignals);
244 		}
245 
246 	private:
247 		uint32		fSignals;
248 };
249 
250 }	// namespace SignalTracing
251 
252 #	define T(x)	new(std::nothrow) SignalTracing::x
253 
254 #else
255 #	define T(x)
256 #endif	// SIGNAL_TRACING
257 
258 
259 // #pragma mark -
260 
261 
262 /*!	Updates the thread::flags field according to what signals are pending.
263 	Interrupts must be disabled and the thread lock must be held.
264 */
265 static void
266 update_thread_signals_flag(struct thread* thread)
267 {
268 	sigset_t mask = ~atomic_get(&thread->sig_block_mask)
269 		| thread->sig_temp_enabled;
270 	if (atomic_get(&thread->sig_pending) & mask)
271 		atomic_or(&thread->flags, THREAD_FLAGS_SIGNALS_PENDING);
272 	else
273 		atomic_and(&thread->flags, ~THREAD_FLAGS_SIGNALS_PENDING);
274 }
275 
276 
277 void
278 update_current_thread_signals_flag()
279 {
280 	InterruptsSpinLocker locker(gThreadSpinlock);
281 
282 	update_thread_signals_flag(thread_get_current_thread());
283 }
284 
285 
286 static bool
287 notify_debugger(struct thread *thread, int signal, struct sigaction *handler,
288 	bool deadly)
289 {
290 	uint64 signalMask = SIGNAL_TO_MASK(signal);
291 
292 	// first check the ignore signal masks the debugger specified for the thread
293 
294 	if (atomic_get(&thread->debug_info.ignore_signals_once) & signalMask) {
295 		atomic_and(&thread->debug_info.ignore_signals_once, ~signalMask);
296 		return true;
297 	}
298 
299 	if (atomic_get(&thread->debug_info.ignore_signals) & signalMask)
300 		return true;
301 
302 	// deliver the event
303 	return user_debug_handle_signal(signal, handler, deadly);
304 }
305 
306 
307 /*! Actually handles the signal - ie. the thread will exit, a custom signal
308 	handler is prepared, or whatever the signal demands.
309 */
310 bool
311 handle_signals(struct thread *thread)
312 {
313 	uint32 signalMask = atomic_get(&thread->sig_pending)
314 		& (~atomic_get(&thread->sig_block_mask) | thread->sig_temp_enabled);
315 	thread->sig_temp_enabled = 0;
316 
317 	// If SIGKILL[THR] are pending, we ignore other signals.
318 	// Otherwise check, if the thread shall stop for debugging.
319 	if (signalMask & KILL_SIGNALS) {
320 		signalMask &= KILL_SIGNALS;
321 	} else if (thread->debug_info.flags & B_THREAD_DEBUG_STOP) {
322 		user_debug_stop_thread();
323 	}
324 
325 	if (signalMask == 0)
326 		return 0;
327 
328 	if (thread->user_thread->defer_signals > 0
329 		&& (signalMask & NON_DEFERRABLE_SIGNALS) == 0) {
330 		thread->user_thread->pending_signals = signalMask;
331 		return 0;
332 	}
333 
334 	thread->user_thread->pending_signals = 0;
335 
336 	uint32 restartFlags = atomic_and(&thread->flags,
337 		~THREAD_FLAGS_DONT_RESTART_SYSCALL);
338 	bool alwaysRestart
339 		= (restartFlags & THREAD_FLAGS_ALWAYS_RESTART_SYSCALL) != 0;
340 	bool restart = alwaysRestart
341 		|| (restartFlags & THREAD_FLAGS_DONT_RESTART_SYSCALL) == 0;
342 
343 	T(HandleSignals(signalMask));
344 
345 	for (int32 i = 0; i < NSIG; i++) {
346 		bool debugSignal;
347 		int32 signal = i + 1;
348 
349 		if ((signalMask & SIGNAL_TO_MASK(signal)) == 0)
350 			continue;
351 
352 		// clear the signal that we will handle
353 		atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal));
354 
355 		debugSignal = !(~atomic_get(&thread->team->debug_info.flags)
356 				& (B_TEAM_DEBUG_SIGNALS | B_TEAM_DEBUG_DEBUGGER_INSTALLED));
357 
358 		// TODO: since sigaction_etc() could clobber the fields at any time,
359 		//		we should actually copy the relevant fields atomically before
360 		//		accessing them (only the debugger is calling sigaction_etc()
361 		//		right now).
362 		//		Update: sigaction_etc() is only used by the userland debugger
363 		//		support. We can just as well restrict getting/setting signal
364 		//		handlers to work only when the respective thread is stopped.
365 		//		Then sigaction() could be used instead and we could get rid of
366 		//		sigaction_etc().
367 		struct sigaction* handler = &thread->sig_action[i];
368 
369 		TRACE(("Thread 0x%lx received signal %s\n", thread->id, sigstr[signal]));
370 
371 		if (handler->sa_handler == SIG_IGN) {
372 			// signal is to be ignored
373 			// ToDo: apply zombie cleaning on SIGCHLD
374 
375 			// notify the debugger
376 			if (debugSignal)
377 				notify_debugger(thread, signal, handler, false);
378 			continue;
379 		} else if (handler->sa_handler == SIG_DFL) {
380 			// default signal behaviour
381 			switch (signal) {
382 				case SIGCHLD:
383 				case SIGWINCH:
384 				case SIGURG:
385 					// notify the debugger
386 					if (debugSignal)
387 						notify_debugger(thread, signal, handler, false);
388 					continue;
389 
390 				case SIGCONT:
391 					// notify the debugger
392 					if (debugSignal
393 						&& !notify_debugger(thread, signal, handler, false))
394 						continue;
395 
396 					// notify threads waiting for team state changes
397 					if (thread == thread->team->main_thread) {
398 						InterruptsSpinLocker locker(gTeamSpinlock);
399 						team_set_job_control_state(thread->team,
400 							JOB_CONTROL_STATE_CONTINUED, signal, false);
401 
402 						// The standard states that the system *may* send a
403 						// SIGCHLD when a child is continued. I haven't found
404 						// a good reason why we would want to, though.
405 					}
406 					continue;
407 
408 				case SIGSTOP:
409 				case SIGTSTP:
410 				case SIGTTIN:
411 				case SIGTTOU:
412 					// notify the debugger
413 					if (debugSignal
414 						&& !notify_debugger(thread, signal, handler, false))
415 						continue;
416 
417 					thread->next_state = B_THREAD_SUSPENDED;
418 
419 					// notify threads waiting for team state changes
420 					if (thread == thread->team->main_thread) {
421 						InterruptsSpinLocker locker(gTeamSpinlock);
422 						team_set_job_control_state(thread->team,
423 							JOB_CONTROL_STATE_STOPPED, signal, false);
424 
425 						// send a SIGCHLD to the parent (if it does have
426 						// SA_NOCLDSTOP defined)
427 						SpinLocker _(gThreadSpinlock);
428 						struct thread* parentThread
429 							= thread->team->parent->main_thread;
430 						struct sigaction& parentHandler
431 							= parentThread->sig_action[SIGCHLD - 1];
432 						if ((parentHandler.sa_flags & SA_NOCLDSTOP) == 0)
433 							deliver_signal(parentThread, SIGCHLD, 0);
434 					}
435 
436 					return true;
437 
438 				case SIGSEGV:
439 				case SIGFPE:
440 				case SIGILL:
441 				case SIGTRAP:
442 				case SIGABRT:
443 					// If this is the main thread, we just fall through and let
444 					// this signal kill the team. Otherwise we send a SIGKILL to
445 					// the main thread first, since the signal will kill this
446 					// thread only.
447 					if (thread != thread->team->main_thread)
448 						send_signal(thread->team->main_thread->id, SIGKILL);
449 				case SIGQUIT:
450 				case SIGPOLL:
451 				case SIGPROF:
452 				case SIGSYS:
453 				case SIGVTALRM:
454 				case SIGXCPU:
455 				case SIGXFSZ:
456 					TRACE(("Shutting down thread 0x%lx due to signal #%ld\n",
457 						thread->id, signal));
458 				case SIGKILL:
459 				case SIGKILLTHR:
460 				default:
461 					// if the thread exited normally, the exit reason is already set
462 					if (thread->exit.reason != THREAD_RETURN_EXIT) {
463 						thread->exit.reason = THREAD_RETURN_INTERRUPTED;
464 						thread->exit.signal = (uint16)signal;
465 					}
466 
467 					// notify the debugger
468 					if (debugSignal && signal != SIGKILL && signal != SIGKILLTHR
469 						&& !notify_debugger(thread, signal, handler, true))
470 						continue;
471 
472 					thread_exit();
473 						// won't return
474 			}
475 		}
476 
477 		// User defined signal handler
478 
479 		// notify the debugger
480 		if (debugSignal && !notify_debugger(thread, signal, handler, false))
481 			continue;
482 
483 		if (!restart
484 				|| ((!alwaysRestart && handler->sa_flags & SA_RESTART) == 0)) {
485 			atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
486 		}
487 
488 		T(ExecuteSignalHandler(signal, handler));
489 
490 		TRACE(("### Setting up custom signal handler frame...\n"));
491 		arch_setup_signal_frame(thread, handler, signal,
492 			atomic_get(&thread->sig_block_mask));
493 
494 		if (handler->sa_flags & SA_ONESHOT)
495 			handler->sa_handler = SIG_DFL;
496 		if ((handler->sa_flags & SA_NOMASK) == 0) {
497 			// Update the block mask while the signal handler is running - it
498 			// will be automatically restored when the signal frame is left.
499 			atomic_or(&thread->sig_block_mask,
500 				(handler->sa_mask | SIGNAL_TO_MASK(signal)) & BLOCKABLE_SIGNALS);
501 		}
502 
503 		update_current_thread_signals_flag();
504 
505 		return false;
506 	}
507 
508 	// clear syscall restart thread flag, if we're not supposed to restart the
509 	// syscall
510 	if (!restart)
511 		atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
512 
513 	update_current_thread_signals_flag();
514 
515 	return false;
516 }
517 
518 
519 bool
520 is_kill_signal_pending(void)
521 {
522 	return (atomic_get(&thread_get_current_thread()->sig_pending)
523 		& KILL_SIGNALS) != 0;
524 }
525 
526 
527 bool
528 is_signal_blocked(int signal)
529 {
530 	return (atomic_get(&thread_get_current_thread()->sig_block_mask)
531 		& SIGNAL_TO_MASK(signal)) != 0;
532 }
533 
534 
535 /*!	Delivers the \a signal to the \a thread, but doesn't handle the signal -
536 	it just makes sure the thread gets the signal, ie. unblocks it if needed.
537 	This function must be called with interrupts disabled and the
538 	thread lock held.
539 */
540 static status_t
541 deliver_signal(struct thread *thread, uint signal, uint32 flags)
542 {
543 	if (flags & B_CHECK_PERMISSION) {
544 		// ToDo: introduce euid & uid fields to the team and check permission
545 	}
546 
547 	if (signal == 0)
548 		return B_OK;
549 
550 	if (thread->team == team_get_kernel_team()) {
551 		// Signals to kernel threads will only wake them up
552 		if (thread->state == B_THREAD_SUSPENDED)
553 			scheduler_enqueue_in_run_queue(thread);
554 		return B_OK;
555 	}
556 
557 	atomic_or(&thread->sig_pending, SIGNAL_TO_MASK(signal));
558 
559 	switch (signal) {
560 		case SIGKILL:
561 		{
562 			// Forward KILLTHR to the main thread of the team
563 			struct thread *mainThread = thread->team->main_thread;
564 			atomic_or(&mainThread->sig_pending, SIGNAL_TO_MASK(SIGKILLTHR));
565 
566 			// Wake up main thread
567 			if (mainThread->state == B_THREAD_SUSPENDED)
568 				scheduler_enqueue_in_run_queue(mainThread);
569 			else
570 				thread_interrupt(mainThread, true);
571 
572 			update_thread_signals_flag(mainThread);
573 
574 			// Supposed to fall through
575 		}
576 		case SIGKILLTHR:
577 			// Wake up suspended threads and interrupt waiting ones
578 			if (thread->state == B_THREAD_SUSPENDED)
579 				scheduler_enqueue_in_run_queue(thread);
580 			else
581 				thread_interrupt(thread, true);
582 			break;
583 
584 		case SIGCONT:
585 			// Wake up thread if it was suspended
586 			if (thread->state == B_THREAD_SUSPENDED)
587 				scheduler_enqueue_in_run_queue(thread);
588 
589 			if ((flags & SIGNAL_FLAG_DONT_RESTART_SYSCALL) != 0)
590 				atomic_or(&thread->flags, THREAD_FLAGS_DONT_RESTART_SYSCALL);
591 
592 			atomic_and(&thread->sig_pending, ~STOP_SIGNALS);
593 				// remove any pending stop signals
594 			break;
595 
596 		default:
597 			if (thread->sig_pending
598 				& (~thread->sig_block_mask | SIGNAL_TO_MASK(SIGCHLD))) {
599 				// Interrupt thread if it was waiting
600 				thread_interrupt(thread, false);
601 			}
602 			break;
603 	}
604 
605 	update_thread_signals_flag(thread);
606 
607 	return B_OK;
608 }
609 
610 
611 int
612 send_signal_etc(pid_t id, uint signal, uint32 flags)
613 {
614 	status_t status = B_BAD_THREAD_ID;
615 	struct thread *thread;
616 	cpu_status state = 0;
617 
618 	if (signal < 0 || signal > MAX_SIGNO)
619 		return B_BAD_VALUE;
620 
621 	T(SendSignal(id, signal, flags));
622 
623 	if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
624 		state = disable_interrupts();
625 
626 	if (id > 0) {
627 		// send a signal to the specified thread
628 
629 		GRAB_THREAD_LOCK();
630 
631 		thread = thread_get_thread_struct_locked(id);
632 		if (thread != NULL)
633 			status = deliver_signal(thread, signal, flags);
634 	} else {
635 		// send a signal to the specified process group
636 		// (the absolute value of the id)
637 
638 		struct process_group *group;
639 
640 		// TODO: handle -1 correctly
641 		if (id == 0 || id == -1) {
642 			// send a signal to the current team
643 			id = thread_get_current_thread()->team->id;
644 		} else
645 			id = -id;
646 
647 		if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
648 			GRAB_TEAM_LOCK();
649 
650 		group = team_get_process_group_locked(NULL, id);
651 		if (group != NULL) {
652 			struct team *team, *next;
653 
654 			// Send a signal to all teams in this process group
655 
656 			for (team = group->teams; team != NULL; team = next) {
657 				next = team->group_next;
658 				id = team->id;
659 
660 				GRAB_THREAD_LOCK();
661 
662 				thread = thread_get_thread_struct_locked(id);
663 				if (thread != NULL) {
664 					// we don't stop because of an error sending the signal; we
665 					// rather want to send as much signals as possible
666 					status = deliver_signal(thread, signal, flags);
667 				}
668 
669 				RELEASE_THREAD_LOCK();
670 			}
671 		}
672 
673 		if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
674 			RELEASE_TEAM_LOCK();
675 
676 		GRAB_THREAD_LOCK();
677 	}
678 
679 	if ((flags & (B_DO_NOT_RESCHEDULE | SIGNAL_FLAG_TEAMS_LOCKED)) == 0)
680 		scheduler_reschedule_if_necessary_locked();
681 
682 	RELEASE_THREAD_LOCK();
683 
684 	if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
685 		restore_interrupts(state);
686 
687 	return status;
688 }
689 
690 
691 int
692 send_signal(pid_t threadID, uint signal)
693 {
694 	// The BeBook states that this function wouldn't be exported
695 	// for drivers, but, of course, it's wrong.
696 	return send_signal_etc(threadID, signal, 0);
697 }
698 
699 
700 int
701 has_signals_pending(void *_thread)
702 {
703 	struct thread *thread = (struct thread *)_thread;
704 	if (thread == NULL)
705 		thread = thread_get_current_thread();
706 
707 	return atomic_get(&thread->sig_pending)
708 		& ~atomic_get(&thread->sig_block_mask);
709 }
710 
711 
712 static int
713 sigprocmask_internal(int how, const sigset_t *set, sigset_t *oldSet)
714 {
715 	struct thread *thread = thread_get_current_thread();
716 	sigset_t oldMask = atomic_get(&thread->sig_block_mask);
717 
718 	if (set != NULL) {
719 		T(SigProcMask(how, *set));
720 
721 		switch (how) {
722 			case SIG_BLOCK:
723 				atomic_or(&thread->sig_block_mask, *set & BLOCKABLE_SIGNALS);
724 				break;
725 			case SIG_UNBLOCK:
726 				atomic_and(&thread->sig_block_mask, ~*set);
727 				break;
728 			case SIG_SETMASK:
729 				atomic_set(&thread->sig_block_mask, *set & BLOCKABLE_SIGNALS);
730 				break;
731 			default:
732 				return B_BAD_VALUE;
733 		}
734 
735 		update_current_thread_signals_flag();
736 	}
737 
738 	if (oldSet != NULL)
739 		*oldSet = oldMask;
740 
741 	return B_OK;
742 }
743 
744 
745 int
746 sigprocmask(int how, const sigset_t *set, sigset_t *oldSet)
747 {
748 	RETURN_AND_SET_ERRNO(sigprocmask_internal(how, set, oldSet));
749 }
750 
751 
752 /*!	\brief sigaction() for the specified thread.
753 	A \a threadID is < 0 specifies the current thread.
754 */
755 static status_t
756 sigaction_etc_internal(thread_id threadID, int signal, const struct sigaction *act,
757 	struct sigaction *oldAction)
758 {
759 	struct thread *thread;
760 	cpu_status state;
761 	status_t error = B_OK;
762 
763 	if (signal < 1 || signal > MAX_SIGNO
764 		|| (SIGNAL_TO_MASK(signal) & ~BLOCKABLE_SIGNALS) != 0)
765 		return B_BAD_VALUE;
766 
767 	state = disable_interrupts();
768 	GRAB_THREAD_LOCK();
769 
770 	thread = (threadID < 0
771 		? thread_get_current_thread()
772 		: thread_get_thread_struct_locked(threadID));
773 
774 	if (thread) {
775 		if (oldAction) {
776 			// save previous sigaction structure
777 			memcpy(oldAction, &thread->sig_action[signal - 1],
778 				sizeof(struct sigaction));
779 		}
780 
781 		if (act) {
782 			T(SigAction(thread, signal, act));
783 
784 			// set new sigaction structure
785 			memcpy(&thread->sig_action[signal - 1], act,
786 				sizeof(struct sigaction));
787 			thread->sig_action[signal - 1].sa_mask &= BLOCKABLE_SIGNALS;
788 		}
789 
790 		if (act && act->sa_handler == SIG_IGN) {
791 			// remove pending signal if it should now be ignored
792 			atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal));
793 		} else if (act && act->sa_handler == SIG_DFL
794 			&& (SIGNAL_TO_MASK(signal) & DEFAULT_IGNORE_SIGNALS) != 0) {
795 			// remove pending signal for those signals whose default
796 			// action is to ignore them
797 			atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal));
798 		}
799 	} else
800 		error = B_BAD_THREAD_ID;
801 
802 	RELEASE_THREAD_LOCK();
803 	restore_interrupts(state);
804 
805 	return error;
806 }
807 
808 
809 int
810 sigaction_etc(thread_id threadID, int signal, const struct sigaction *act,
811 	struct sigaction *oldAction)
812 {
813 	RETURN_AND_SET_ERRNO(sigaction_etc_internal(threadID, signal, act,
814 		oldAction));
815 }
816 
817 
818 int
819 sigaction(int signal, const struct sigaction *act, struct sigaction *oldAction)
820 {
821 	return sigaction_etc(-1, signal, act, oldAction);
822 }
823 
824 
825 /*! Triggers a SIGALRM to the thread that issued the timer and reschedules */
826 static int32
827 alarm_event(timer *t)
828 {
829 	// The hook can be called from any context, but we have to
830 	// deliver the signal to the thread that originally called
831 	// set_alarm().
832 	// Since thread->alarm is this timer structure, we can just
833 	// cast it back - ugly but it works for now
834 	struct thread *thread = (struct thread *)((uint8 *)t
835 		- offsetof(struct thread, alarm));
836 		// ToDo: investigate adding one user parameter to the timer structure to fix this hack
837 
838 	TRACE(("alarm_event: thread = %p\n", thread));
839 	send_signal_etc(thread->id, SIGALRM, B_DO_NOT_RESCHEDULE);
840 
841 	return B_HANDLED_INTERRUPT;
842 }
843 
844 
845 /*!	Sets the alarm timer for the current thread. The timer fires at the
846 	specified time in the future, periodically or just once, as determined
847 	by \a mode.
848 	\return the time left until a previous set alarm would have fired.
849 */
850 bigtime_t
851 set_alarm(bigtime_t time, uint32 mode)
852 {
853 	struct thread *thread = thread_get_current_thread();
854 	bigtime_t remainingTime = 0;
855 
856 	ASSERT(B_ONE_SHOT_RELATIVE_ALARM == B_ONE_SHOT_RELATIVE_TIMER);
857 		// just to be sure no one changes the headers some day
858 
859 	TRACE(("set_alarm: thread = %p\n", thread));
860 
861 	if (thread->alarm.period)
862 		remainingTime = (bigtime_t)thread->alarm.schedule_time - system_time();
863 
864 	cancel_timer(&thread->alarm);
865 
866 	if (time != B_INFINITE_TIMEOUT)
867 		add_timer(&thread->alarm, &alarm_event, time, mode);
868 	else {
869 		// this marks the alarm as canceled (for returning the remaining time)
870 		thread->alarm.period = 0;
871 	}
872 
873 	return remainingTime;
874 }
875 
876 
877 /*!	Wait for the specified signals, and return the signal retrieved in
878 	\a _signal.
879 */
880 static status_t
881 sigwait_internal(const sigset_t *set, int *_signal)
882 {
883 	sigset_t requestedSignals = *set & BLOCKABLE_SIGNALS;
884 
885 	struct thread* thread = thread_get_current_thread();
886 
887 	while (true) {
888 		sigset_t pendingSignals = atomic_get(&thread->sig_pending);
889 		sigset_t blockedSignals = atomic_get(&thread->sig_block_mask);
890 		sigset_t pendingRequestedSignals = pendingSignals & requestedSignals;
891 		if ((pendingRequestedSignals) != 0) {
892 			// select the lowest pending signal to return in _signal
893 			for (int signal = 1; signal < NSIG; signal++) {
894 				if ((SIGNAL_TO_MASK(signal) & pendingSignals) != 0) {
895 					atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal));
896 					*_signal = signal;
897 					return B_OK;
898 				}
899 			}
900 		}
901 
902 		if ((pendingSignals & ~blockedSignals) != 0) {
903 			// Non-blocked signals are pending -- return to let them be handled.
904 			return B_INTERRUPTED;
905 		}
906 
907 		// No signals yet. Set the signal block mask to not include the
908 		// requested mask and wait until we're interrupted.
909 		atomic_set(&thread->sig_block_mask,
910 			blockedSignals & ~(requestedSignals & BLOCKABLE_SIGNALS));
911 
912 		while (!has_signals_pending(thread)) {
913 			thread_prepare_to_block(thread, B_CAN_INTERRUPT,
914 				THREAD_BLOCK_TYPE_SIGNAL, NULL);
915 			thread_block();
916 		}
917 
918 		// restore the original block mask
919 		atomic_set(&thread->sig_block_mask, blockedSignals);
920 
921 		update_current_thread_signals_flag();
922 	}
923 }
924 
925 
926 int
927 sigwait(const sigset_t *set, int *_signal)
928 {
929 	RETURN_AND_SET_ERRNO(sigwait_internal(set, _signal));
930 }
931 
932 
933 /*!	Replace the current signal block mask and wait for any event to happen.
934 	Before returning, the original signal block mask is reinstantiated.
935 */
936 static status_t
937 sigsuspend_internal(const sigset_t *mask)
938 {
939 	T(SigSuspend(*mask));
940 
941 	struct thread *thread = thread_get_current_thread();
942 	sigset_t oldMask = atomic_get(&thread->sig_block_mask);
943 
944 	// Set the new block mask and block until interrupted.
945 
946 	atomic_set(&thread->sig_block_mask, *mask & BLOCKABLE_SIGNALS);
947 
948 	while (!has_signals_pending(thread)) {
949 		thread_prepare_to_block(thread, B_CAN_INTERRUPT,
950 			THREAD_BLOCK_TYPE_SIGNAL, NULL);
951 		thread_block();
952 	}
953 
954 	// restore the original block mask
955 	atomic_set(&thread->sig_block_mask, oldMask);
956 
957 	thread->sig_temp_enabled = ~*mask;
958 
959 	update_current_thread_signals_flag();
960 
961 	T(SigSuspendDone());
962 
963 	// we're not supposed to actually succeed
964 	return B_INTERRUPTED;
965 }
966 
967 
968 int
969 sigsuspend(const sigset_t *mask)
970 {
971 	RETURN_AND_SET_ERRNO(sigsuspend_internal(mask));
972 }
973 
974 
975 static status_t
976 sigpending_internal(sigset_t *set)
977 {
978 	struct thread *thread = thread_get_current_thread();
979 
980 	if (set == NULL)
981 		return B_BAD_VALUE;
982 
983 	*set = atomic_get(&thread->sig_pending);
984 	return B_OK;
985 }
986 
987 
988 int
989 sigpending(sigset_t *set)
990 {
991 	RETURN_AND_SET_ERRNO(sigpending_internal(set));
992 }
993 
994 
995 // #pragma mark - syscalls
996 
997 
998 bigtime_t
999 _user_set_alarm(bigtime_t time, uint32 mode)
1000 {
1001 	syscall_64_bit_return_value();
1002 
1003 	return set_alarm(time, mode);
1004 }
1005 
1006 
1007 status_t
1008 _user_send_signal(pid_t team, uint signal)
1009 {
1010 	return send_signal_etc(team, signal, B_CHECK_PERMISSION);
1011 }
1012 
1013 
1014 status_t
1015 _user_sigprocmask(int how, const sigset_t *userSet, sigset_t *userOldSet)
1016 {
1017 	sigset_t set, oldSet;
1018 	status_t status;
1019 
1020 	if ((userSet != NULL && user_memcpy(&set, userSet, sizeof(sigset_t)) < B_OK)
1021 		|| (userOldSet != NULL && user_memcpy(&oldSet, userOldSet,
1022 				sizeof(sigset_t)) < B_OK))
1023 		return B_BAD_ADDRESS;
1024 
1025 	status = sigprocmask_internal(how, userSet ? &set : NULL,
1026 		userOldSet ? &oldSet : NULL);
1027 
1028 	// copy old set if asked for
1029 	if (status >= B_OK && userOldSet != NULL
1030 		&& user_memcpy(userOldSet, &oldSet, sizeof(sigset_t)) < B_OK)
1031 		return B_BAD_ADDRESS;
1032 
1033 	return status;
1034 }
1035 
1036 
1037 status_t
1038 _user_sigaction(int signal, const struct sigaction *userAction,
1039 	struct sigaction *userOldAction)
1040 {
1041 	struct sigaction act, oact;
1042 	status_t status;
1043 
1044 	if ((userAction != NULL && user_memcpy(&act, userAction,
1045 				sizeof(struct sigaction)) < B_OK)
1046 		|| (userOldAction != NULL && user_memcpy(&oact, userOldAction,
1047 				sizeof(struct sigaction)) < B_OK))
1048 		return B_BAD_ADDRESS;
1049 
1050 	status = sigaction(signal, userAction ? &act : NULL,
1051 		userOldAction ? &oact : NULL);
1052 
1053 	// only copy the old action if a pointer has been given
1054 	if (status >= B_OK && userOldAction != NULL
1055 		&& user_memcpy(userOldAction, &oact, sizeof(struct sigaction)) < B_OK)
1056 		return B_BAD_ADDRESS;
1057 
1058 	return status;
1059 }
1060 
1061 
1062 status_t
1063 _user_sigwait(const sigset_t *userSet, int *_userSignal)
1064 {
1065 	if (userSet == NULL || _userSignal == NULL)
1066 		return B_BAD_VALUE;
1067 
1068 	sigset_t set;
1069 	if (user_memcpy(&set, userSet, sizeof(sigset_t)) < B_OK)
1070 		return B_BAD_ADDRESS;
1071 
1072 	int signal;
1073 	status_t status = sigwait_internal(&set, &signal);
1074 	if (status == B_INTERRUPTED) {
1075 		// make sure we'll be restarted
1076 		struct thread* thread = thread_get_current_thread();
1077 		atomic_or(&thread->flags,
1078 			THREAD_FLAGS_ALWAYS_RESTART_SYSCALL | THREAD_FLAGS_RESTART_SYSCALL);
1079 		return status;
1080 	}
1081 
1082 	return user_memcpy(_userSignal, &signal, sizeof(int));
1083 }
1084 
1085 
1086 status_t
1087 _user_sigsuspend(const sigset_t *userMask)
1088 {
1089 	sigset_t mask;
1090 
1091 	if (userMask == NULL)
1092 		return B_BAD_VALUE;
1093 	if (user_memcpy(&mask, userMask, sizeof(sigset_t)) < B_OK)
1094 		return B_BAD_ADDRESS;
1095 
1096 	return sigsuspend_internal(&mask);
1097 }
1098 
1099 
1100 status_t
1101 _user_sigpending(sigset_t *userSet)
1102 {
1103 	sigset_t set;
1104 	int status;
1105 
1106 	if (userSet == NULL)
1107 		return B_BAD_VALUE;
1108 	if (!IS_USER_ADDRESS(userSet))
1109 		return B_BAD_ADDRESS;
1110 
1111 	status = sigpending_internal(&set);
1112 	if (status == B_OK
1113 		&& user_memcpy(userSet, &set, sizeof(sigset_t)) < B_OK)
1114 		return B_BAD_ADDRESS;
1115 
1116 	return status;
1117 }
1118 
1119 
1120 status_t
1121 _user_set_signal_stack(const stack_t *newUserStack, stack_t *oldUserStack)
1122 {
1123 	struct thread *thread = thread_get_current_thread();
1124 	struct stack_t newStack, oldStack;
1125 	bool onStack = false;
1126 
1127 	if ((newUserStack != NULL && user_memcpy(&newStack, newUserStack,
1128 				sizeof(stack_t)) < B_OK)
1129 		|| (oldUserStack != NULL && user_memcpy(&oldStack, oldUserStack,
1130 				sizeof(stack_t)) < B_OK))
1131 		return B_BAD_ADDRESS;
1132 
1133 	if (thread->signal_stack_enabled) {
1134 		// determine wether or not the user thread is currently
1135 		// on the active signal stack
1136 		onStack = arch_on_signal_stack(thread);
1137 	}
1138 
1139 	if (oldUserStack != NULL) {
1140 		oldStack.ss_sp = (void *)thread->signal_stack_base;
1141 		oldStack.ss_size = thread->signal_stack_size;
1142 		oldStack.ss_flags = (thread->signal_stack_enabled ? 0 : SS_DISABLE)
1143 			| (onStack ? SS_ONSTACK : 0);
1144 	}
1145 
1146 	if (newUserStack != NULL) {
1147 		// no flags other than SS_DISABLE are allowed
1148 		if ((newStack.ss_flags & ~SS_DISABLE) != 0)
1149 			return B_BAD_VALUE;
1150 
1151 		if ((newStack.ss_flags & SS_DISABLE) == 0) {
1152 			// check if the size is valid
1153 			if (newStack.ss_size < MINSIGSTKSZ)
1154 				return B_NO_MEMORY;
1155 			if (onStack)
1156 				return B_NOT_ALLOWED;
1157 			if (!IS_USER_ADDRESS(newStack.ss_sp))
1158 				return B_BAD_VALUE;
1159 
1160 			thread->signal_stack_base = (addr_t)newStack.ss_sp;
1161 			thread->signal_stack_size = newStack.ss_size;
1162 			thread->signal_stack_enabled = true;
1163 		} else
1164 			thread->signal_stack_enabled = false;
1165 	}
1166 
1167 	// only copy the old stack info if a pointer has been given
1168 	if (oldUserStack != NULL
1169 		&& user_memcpy(oldUserStack, &oldStack, sizeof(stack_t)) < B_OK)
1170 		return B_BAD_ADDRESS;
1171 
1172 	return B_OK;
1173 }
1174 
1175