xref: /haiku/src/system/kernel/signal.cpp (revision cf02b29e4e0dd6d61c4bb25fcc8620e99d4908bf)
1 /*
2  * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
3  * Copyright 2002, Angelo Mottola, a.mottola@libero.it.
4  *
5  * Distributed under the terms of the MIT License.
6  */
7 
8 /*! POSIX signals handling routines */
9 
10 #include <ksignal.h>
11 
12 #include <stddef.h>
13 #include <string.h>
14 
15 #include <OS.h>
16 #include <KernelExport.h>
17 
18 #include <debug.h>
19 #include <kernel.h>
20 #include <kscheduler.h>
21 #include <sem.h>
22 #include <syscall_restart.h>
23 #include <team.h>
24 #include <thread.h>
25 #include <tracing.h>
26 #include <user_debugger.h>
27 #include <user_thread.h>
28 #include <util/AutoLock.h>
29 
30 
31 //#define TRACE_SIGNAL
32 #ifdef TRACE_SIGNAL
33 #	define TRACE(x) dprintf x
34 #else
35 #	define TRACE(x) ;
36 #endif
37 
38 
39 #define BLOCKABLE_SIGNALS		(~(KILL_SIGNALS | SIGNAL_TO_MASK(SIGSTOP)))
40 #define STOP_SIGNALS \
41 	(SIGNAL_TO_MASK(SIGSTOP) | SIGNAL_TO_MASK(SIGTSTP) \
42 	| SIGNAL_TO_MASK(SIGTTIN) | SIGNAL_TO_MASK(SIGTTOU))
43 #define DEFAULT_IGNORE_SIGNALS \
44 	(SIGNAL_TO_MASK(SIGCHLD) | SIGNAL_TO_MASK(SIGWINCH) \
45 	| SIGNAL_TO_MASK(SIGCONT))
46 #define NON_DEFERRABLE_SIGNALS	\
47 	(KILL_SIGNALS				\
48 	| SIGNAL_TO_MASK(SIGILL)	\
49 	| SIGNAL_TO_MASK(SIGFPE)	\
50 	| SIGNAL_TO_MASK(SIGSEGV))
51 
52 
53 const char * const sigstr[NSIG] = {
54 	"NONE", "HUP", "INT", "QUIT", "ILL", "CHLD", "ABRT", "PIPE",
55 	"FPE", "KILL", "STOP", "SEGV", "CONT", "TSTP", "ALRM", "TERM",
56 	"TTIN", "TTOU", "USR1", "USR2", "WINCH", "KILLTHR", "TRAP",
57 	"POLL", "PROF", "SYS", "URG", "VTALRM", "XCPU", "XFSZ"
58 };
59 
60 
61 static status_t deliver_signal(struct thread *thread, uint signal,
62 	uint32 flags, bool& reschedule);
63 
64 
65 
66 // #pragma mark - signal tracing
67 
68 
69 #if SIGNAL_TRACING
70 
71 namespace SignalTracing {
72 
73 
74 class HandleSignals : public AbstractTraceEntry {
75 	public:
76 		HandleSignals(uint32 signals)
77 			:
78 			fSignals(signals)
79 		{
80 			Initialized();
81 		}
82 
83 		virtual void AddDump(TraceOutput& out)
84 		{
85 			out.Print("signal handle:  0x%lx", fSignals);
86 		}
87 
88 	private:
89 		uint32		fSignals;
90 };
91 
92 
93 class ExecuteSignalHandler : public AbstractTraceEntry {
94 	public:
95 		ExecuteSignalHandler(int signal, struct sigaction* handler)
96 			:
97 			fSignal(signal),
98 			fHandler((void*)handler->sa_handler)
99 		{
100 			Initialized();
101 		}
102 
103 		virtual void AddDump(TraceOutput& out)
104 		{
105 			out.Print("signal exec handler: signal: %d, handler: %p",
106 				fSignal, fHandler);
107 		}
108 
109 	private:
110 		int		fSignal;
111 		void*	fHandler;
112 };
113 
114 
115 class SendSignal : public AbstractTraceEntry {
116 	public:
117 		SendSignal(pid_t target, uint32 signal, uint32 flags)
118 			:
119 			fTarget(target),
120 			fSignal(signal),
121 			fFlags(flags)
122 		{
123 			Initialized();
124 		}
125 
126 		virtual void AddDump(TraceOutput& out)
127 		{
128 			out.Print("signal send: target: %ld, signal: %lu (%s), "
129 				"flags: 0x%lx", fTarget, fSignal,
130 				(fSignal < NSIG ? sigstr[fSignal] : "invalid"), fFlags);
131 		}
132 
133 	private:
134 		pid_t	fTarget;
135 		uint32	fSignal;
136 		uint32	fFlags;
137 };
138 
139 
140 class SigAction : public AbstractTraceEntry {
141 	public:
142 		SigAction(struct thread* thread, uint32 signal,
143 			const struct sigaction* act)
144 			:
145 			fThread(thread->id),
146 			fSignal(signal),
147 			fAction(*act)
148 		{
149 			Initialized();
150 		}
151 
152 		virtual void AddDump(TraceOutput& out)
153 		{
154 			out.Print("signal action: thread: %ld, signal: %lu (%s), "
155 				"action: {handler: %p, flags: 0x%x, mask: 0x%lx}",
156 				fThread, fSignal,
157 				(fSignal < NSIG ? sigstr[fSignal] : "invalid"),
158 				fAction.sa_handler, fAction.sa_flags, fAction.sa_mask);
159 		}
160 
161 	private:
162 		thread_id			fThread;
163 		uint32				fSignal;
164 		struct sigaction	fAction;
165 };
166 
167 
168 class SigProcMask : public AbstractTraceEntry {
169 	public:
170 		SigProcMask(int how, sigset_t mask)
171 			:
172 			fHow(how),
173 			fMask(mask),
174 			fOldMask(thread_get_current_thread()->sig_block_mask)
175 		{
176 			Initialized();
177 		}
178 
179 		virtual void AddDump(TraceOutput& out)
180 		{
181 			const char* how = "invalid";
182 			switch (fHow) {
183 				case SIG_BLOCK:
184 					how = "block";
185 					break;
186 				case SIG_UNBLOCK:
187 					how = "unblock";
188 					break;
189 				case SIG_SETMASK:
190 					how = "set";
191 					break;
192 			}
193 
194 			out.Print("signal proc mask: %s 0x%lx, old mask: 0x%lx", how, fMask,
195 				fOldMask);
196 		}
197 
198 	private:
199 		int			fHow;
200 		sigset_t	fMask;
201 		sigset_t	fOldMask;
202 };
203 
204 }	// namespace SignalTracing
205 
206 #	define T(x)	new(std::nothrow) SignalTracing::x
207 
208 #else
209 #	define T(x)
210 #endif	// SIGNAL_TRACING
211 
212 
213 // #pragma mark -
214 
215 
216 /*!	Updates the thread::flags field according to what signals are pending.
217 	Interrupts must be disabled and the thread lock must be held.
218 */
219 static void
220 update_thread_signals_flag(struct thread* thread)
221 {
222 	if (atomic_get(&thread->sig_pending) & ~atomic_get(&thread->sig_block_mask))
223 		atomic_or(&thread->flags, THREAD_FLAGS_SIGNALS_PENDING);
224 	else
225 		atomic_and(&thread->flags, ~THREAD_FLAGS_SIGNALS_PENDING);
226 }
227 
228 
229 void
230 update_current_thread_signals_flag()
231 {
232 	InterruptsSpinLocker locker(gThreadSpinlock);
233 
234 	update_thread_signals_flag(thread_get_current_thread());
235 }
236 
237 
238 static bool
239 notify_debugger(struct thread *thread, int signal, struct sigaction *handler,
240 	bool deadly)
241 {
242 	uint64 signalMask = SIGNAL_TO_MASK(signal);
243 
244 	// first check the ignore signal masks the debugger specified for the thread
245 
246 	if (atomic_get(&thread->debug_info.ignore_signals_once) & signalMask) {
247 		atomic_and(&thread->debug_info.ignore_signals_once, ~signalMask);
248 		return true;
249 	}
250 
251 	if (atomic_get(&thread->debug_info.ignore_signals) & signalMask)
252 		return true;
253 
254 	// deliver the event
255 	return user_debug_handle_signal(signal, handler, deadly);
256 }
257 
258 
259 /*! Actually handles the signal - ie. the thread will exit, a custom signal
260 	handler is prepared, or whatever the signal demands.
261 */
262 bool
263 handle_signals(struct thread *thread)
264 {
265 	uint32 signalMask = atomic_get(&thread->sig_pending)
266 		& ~atomic_get(&thread->sig_block_mask);
267 
268 	// If SIGKILL[THR] are pending, we ignore other signals.
269 	// Otherwise check, if the thread shall stop for debugging.
270 	if (signalMask & KILL_SIGNALS) {
271 		signalMask &= KILL_SIGNALS;
272 	} else if (thread->debug_info.flags & B_THREAD_DEBUG_STOP) {
273 		user_debug_stop_thread();
274 	}
275 
276 	if (signalMask == 0)
277 		return 0;
278 
279 	if (thread->user_thread->defer_signals > 0
280 		&& (signalMask & NON_DEFERRABLE_SIGNALS) == 0) {
281 		thread->user_thread->pending_signals = signalMask;
282 		return 0;
283 	}
284 
285 	thread->user_thread->pending_signals = 0;
286 
287 	bool restart = (atomic_and(&thread->flags,
288 			~THREAD_FLAGS_DONT_RESTART_SYSCALL)
289 		& THREAD_FLAGS_DONT_RESTART_SYSCALL) == 0;
290 
291 	T(HandleSignals(signalMask));
292 
293 	for (int32 i = 0; i < NSIG; i++) {
294 		bool debugSignal;
295 		int32 signal = i + 1;
296 
297 		if ((signalMask & SIGNAL_TO_MASK(signal)) == 0)
298 			continue;
299 
300 		// clear the signal that we will handle
301 		atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal));
302 
303 		debugSignal = !(~atomic_get(&thread->team->debug_info.flags)
304 				& (B_TEAM_DEBUG_SIGNALS | B_TEAM_DEBUG_DEBUGGER_INSTALLED));
305 
306 		// TODO: since sigaction_etc() could clobber the fields at any time,
307 		//		we should actually copy the relevant fields atomically before
308 		//		accessing them (only the debugger is calling sigaction_etc()
309 		//		right now).
310 		//		Update: sigaction_etc() is only used by the userland debugger
311 		//		support. We can just as well restrict getting/setting signal
312 		//		handlers to work only when the respective thread is stopped.
313 		//		Then sigaction() could be used instead and we could get rid of
314 		//		sigaction_etc().
315 		struct sigaction* handler = &thread->sig_action[i];
316 
317 		TRACE(("Thread 0x%lx received signal %s\n", thread->id, sigstr[signal]));
318 
319 		if (handler->sa_handler == SIG_IGN) {
320 			// signal is to be ignored
321 			// ToDo: apply zombie cleaning on SIGCHLD
322 
323 			// notify the debugger
324 			if (debugSignal)
325 				notify_debugger(thread, signal, handler, false);
326 			continue;
327 		} else if (handler->sa_handler == SIG_DFL) {
328 			// default signal behaviour
329 			switch (signal) {
330 				case SIGCHLD:
331 				case SIGWINCH:
332 				case SIGURG:
333 					// notify the debugger
334 					if (debugSignal)
335 						notify_debugger(thread, signal, handler, false);
336 					continue;
337 
338 				case SIGCONT:
339 					// notify the debugger
340 					if (debugSignal
341 						&& !notify_debugger(thread, signal, handler, false))
342 						continue;
343 
344 					// notify threads waiting for team state changes
345 					if (thread == thread->team->main_thread) {
346 						InterruptsSpinLocker locker(gTeamSpinlock);
347 						team_set_job_control_state(thread->team,
348 							JOB_CONTROL_STATE_CONTINUED, signal, false);
349 
350 						// The standard states that the system *may* send a
351 						// SIGCHLD when a child is continued. I haven't found
352 						// a good reason why we would want to, though.
353 					}
354 					continue;
355 
356 				case SIGSTOP:
357 				case SIGTSTP:
358 				case SIGTTIN:
359 				case SIGTTOU:
360 					// notify the debugger
361 					if (debugSignal
362 						&& !notify_debugger(thread, signal, handler, false))
363 						continue;
364 
365 					thread->next_state = B_THREAD_SUSPENDED;
366 
367 					// notify threads waiting for team state changes
368 					if (thread == thread->team->main_thread) {
369 						InterruptsSpinLocker locker(gTeamSpinlock);
370 						team_set_job_control_state(thread->team,
371 							JOB_CONTROL_STATE_STOPPED, signal, false);
372 
373 						// send a SIGCHLD to the parent (if it does have
374 						// SA_NOCLDSTOP defined)
375 						SpinLocker _(gThreadSpinlock);
376 						struct thread* parentThread
377 							= thread->team->parent->main_thread;
378 						struct sigaction& parentHandler
379 							= parentThread->sig_action[SIGCHLD - 1];
380 						// TODO: do we need to worry about rescheduling here?
381 						bool unused = false;
382 						if ((parentHandler.sa_flags & SA_NOCLDSTOP) == 0) {
383 							deliver_signal(parentThread, SIGCHLD, 0,
384 								unused);
385 						}
386 					}
387 
388 					return true;
389 
390 				case SIGSEGV:
391 				case SIGFPE:
392 				case SIGILL:
393 				case SIGTRAP:
394 					// If this is the main thread, we just fall through and let
395 					// this signal kill the team. Otherwise we send a SIGKILL to
396 					// the main thread first, since the signal will kill this
397 					// thread only.
398 					if (thread != thread->team->main_thread)
399 						send_signal(thread->team->main_thread->id, SIGKILL);
400 				case SIGQUIT:
401 				case SIGABRT:
402 				case SIGPOLL:
403 				case SIGPROF:
404 				case SIGSYS:
405 				case SIGVTALRM:
406 				case SIGXCPU:
407 				case SIGXFSZ:
408 					TRACE(("Shutting down thread 0x%lx due to signal #%ld\n",
409 						thread->id, signal));
410 				case SIGKILL:
411 				case SIGKILLTHR:
412 				default:
413 					// if the thread exited normally, the exit reason is already set
414 					if (thread->exit.reason != THREAD_RETURN_EXIT) {
415 						thread->exit.reason = THREAD_RETURN_INTERRUPTED;
416 						thread->exit.signal = (uint16)signal;
417 					}
418 
419 					// notify the debugger
420 					if (debugSignal && signal != SIGKILL && signal != SIGKILLTHR
421 						&& !notify_debugger(thread, signal, handler, true))
422 						continue;
423 
424 					thread_exit();
425 						// won't return
426 			}
427 		}
428 
429 		// User defined signal handler
430 
431 		// notify the debugger
432 		if (debugSignal && !notify_debugger(thread, signal, handler, false))
433 			continue;
434 
435 		if (!restart || (handler->sa_flags & SA_RESTART) == 0)
436 			atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
437 
438 		T(ExecuteSignalHandler(signal, handler));
439 
440 		TRACE(("### Setting up custom signal handler frame...\n"));
441 		arch_setup_signal_frame(thread, handler, signal,
442 			atomic_get(&thread->sig_block_mask));
443 
444 		if (handler->sa_flags & SA_ONESHOT)
445 			handler->sa_handler = SIG_DFL;
446 		if ((handler->sa_flags & SA_NOMASK) == 0) {
447 			// Update the block mask while the signal handler is running - it
448 			// will be automatically restored when the signal frame is left.
449 			atomic_or(&thread->sig_block_mask,
450 				(handler->sa_mask | SIGNAL_TO_MASK(signal)) & BLOCKABLE_SIGNALS);
451 		}
452 
453 		update_current_thread_signals_flag();
454 
455 		return false;
456 	}
457 
458 	// clear syscall restart thread flag, if we're not supposed to restart the
459 	// syscall
460 	if (!restart)
461 		atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
462 
463 	update_current_thread_signals_flag();
464 
465 	return false;
466 }
467 
468 
469 bool
470 is_kill_signal_pending(void)
471 {
472 	return (atomic_get(&thread_get_current_thread()->sig_pending)
473 		& KILL_SIGNALS) != 0;
474 }
475 
476 
477 bool
478 is_signal_blocked(int signal)
479 {
480 	return (atomic_get(&thread_get_current_thread()->sig_block_mask)
481 		& SIGNAL_TO_MASK(signal)) != 0;
482 }
483 
484 
485 /*!	Delivers the \a signal to the \a thread, but doesn't handle the signal -
486 	it just makes sure the thread gets the signal, ie. unblocks it if needed.
487 	This function must be called with interrupts disabled and the
488 	thread lock held.
489 */
490 static status_t
491 deliver_signal(struct thread *thread, uint signal, uint32 flags,
492 	bool& reschedule)
493 {
494 	if (flags & B_CHECK_PERMISSION) {
495 		// ToDo: introduce euid & uid fields to the team and check permission
496 	}
497 
498 	if (signal == 0)
499 		return B_OK;
500 
501 	if (thread->team == team_get_kernel_team()) {
502 		// Signals to kernel threads will only wake them up
503 		if (thread->state == B_THREAD_SUSPENDED)
504 			reschedule |= scheduler_enqueue_in_run_queue(thread);
505 		return B_OK;
506 	}
507 
508 	atomic_or(&thread->sig_pending, SIGNAL_TO_MASK(signal));
509 
510 	switch (signal) {
511 		case SIGKILL:
512 		{
513 			// Forward KILLTHR to the main thread of the team
514 			struct thread *mainThread = thread->team->main_thread;
515 			atomic_or(&mainThread->sig_pending, SIGNAL_TO_MASK(SIGKILLTHR));
516 
517 			// Wake up main thread
518 			if (mainThread->state == B_THREAD_SUSPENDED)
519 				reschedule |= scheduler_enqueue_in_run_queue(mainThread);
520 			else
521 				thread_interrupt(mainThread, true);
522 
523 			update_thread_signals_flag(mainThread);
524 
525 			// Supposed to fall through
526 		}
527 		case SIGKILLTHR:
528 			// Wake up suspended threads and interrupt waiting ones
529 			if (thread->state == B_THREAD_SUSPENDED)
530 				reschedule |= scheduler_enqueue_in_run_queue(thread);
531 			else
532 				thread_interrupt(thread, true);
533 			break;
534 
535 		case SIGCONT:
536 			// Wake up thread if it was suspended
537 			if (thread->state == B_THREAD_SUSPENDED)
538 				reschedule |= scheduler_enqueue_in_run_queue(thread);
539 
540 			if ((flags & SIGNAL_FLAG_DONT_RESTART_SYSCALL) != 0)
541 				atomic_or(&thread->flags, THREAD_FLAGS_DONT_RESTART_SYSCALL);
542 
543 			atomic_and(&thread->sig_pending, ~STOP_SIGNALS);
544 				// remove any pending stop signals
545 			break;
546 
547 		default:
548 			if (thread->sig_pending
549 				& (~thread->sig_block_mask | SIGNAL_TO_MASK(SIGCHLD))) {
550 				// Interrupt thread if it was waiting
551 				thread_interrupt(thread, false);
552 			}
553 			break;
554 	}
555 
556 	update_thread_signals_flag(thread);
557 
558 	return B_OK;
559 }
560 
561 
562 int
563 send_signal_etc(pid_t id, uint signal, uint32 flags)
564 {
565 	status_t status = B_BAD_THREAD_ID;
566 	struct thread *thread;
567 	cpu_status state = 0;
568 	bool reschedule = false;
569 
570 	if (signal < 0 || signal > MAX_SIGNO)
571 		return B_BAD_VALUE;
572 
573 	T(SendSignal(id, signal, flags));
574 
575 	if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
576 		state = disable_interrupts();
577 
578 	if (id > 0) {
579 		// send a signal to the specified thread
580 
581 		GRAB_THREAD_LOCK();
582 
583 		thread = thread_get_thread_struct_locked(id);
584 		if (thread != NULL) {
585 			status = deliver_signal(thread, signal, flags,
586 				reschedule);
587 		}
588 	} else {
589 		// send a signal to the specified process group
590 		// (the absolute value of the id)
591 
592 		struct process_group *group;
593 
594 		// TODO: handle -1 correctly
595 		if (id == 0 || id == -1) {
596 			// send a signal to the current team
597 			id = thread_get_current_thread()->team->id;
598 		} else
599 			id = -id;
600 
601 		if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
602 			GRAB_TEAM_LOCK();
603 
604 		group = team_get_process_group_locked(NULL, id);
605 		if (group != NULL) {
606 			struct team *team, *next;
607 
608 			// Send a signal to all teams in this process group
609 
610 			for (team = group->teams; team != NULL; team = next) {
611 				next = team->group_next;
612 				id = team->id;
613 
614 				GRAB_THREAD_LOCK();
615 
616 				thread = thread_get_thread_struct_locked(id);
617 				if (thread != NULL) {
618 					// we don't stop because of an error sending the signal; we
619 					// rather want to send as much signals as possible
620 					status = deliver_signal(thread, signal, flags,
621 						reschedule);
622 				}
623 
624 				RELEASE_THREAD_LOCK();
625 			}
626 		}
627 
628 		if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
629 			RELEASE_TEAM_LOCK();
630 
631 		GRAB_THREAD_LOCK();
632 	}
633 
634 	if ((flags & (B_DO_NOT_RESCHEDULE | SIGNAL_FLAG_TEAMS_LOCKED)) == 0
635 		&& reschedule)
636 		scheduler_reschedule();
637 
638 	RELEASE_THREAD_LOCK();
639 
640 	if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
641 		restore_interrupts(state);
642 
643 	return status;
644 }
645 
646 
647 int
648 send_signal(pid_t threadID, uint signal)
649 {
650 	// The BeBook states that this function wouldn't be exported
651 	// for drivers, but, of course, it's wrong.
652 	return send_signal_etc(threadID, signal, 0);
653 }
654 
655 
656 int
657 has_signals_pending(void *_thread)
658 {
659 	struct thread *thread = (struct thread *)_thread;
660 	if (thread == NULL)
661 		thread = thread_get_current_thread();
662 
663 	return atomic_get(&thread->sig_pending)
664 		& ~atomic_get(&thread->sig_block_mask);
665 }
666 
667 
668 int
669 sigprocmask(int how, const sigset_t *set, sigset_t *oldSet)
670 {
671 	struct thread *thread = thread_get_current_thread();
672 	sigset_t oldMask = atomic_get(&thread->sig_block_mask);
673 
674 	if (set != NULL) {
675 		T(SigProcMask(how, *set));
676 
677 		switch (how) {
678 			case SIG_BLOCK:
679 				atomic_or(&thread->sig_block_mask, *set & BLOCKABLE_SIGNALS);
680 				break;
681 			case SIG_UNBLOCK:
682 				atomic_and(&thread->sig_block_mask, ~*set);
683 				break;
684 			case SIG_SETMASK:
685 				atomic_set(&thread->sig_block_mask, *set & BLOCKABLE_SIGNALS);
686 				break;
687 			default:
688 				return B_BAD_VALUE;
689 		}
690 
691 		update_current_thread_signals_flag();
692 	}
693 
694 	if (oldSet != NULL)
695 		*oldSet = oldMask;
696 
697 	return B_OK;
698 }
699 
700 
701 /*!	\brief sigaction() for the specified thread.
702 	A \a threadID is < 0 specifies the current thread.
703 */
704 int
705 sigaction_etc(thread_id threadID, int signal, const struct sigaction *act,
706 	struct sigaction *oldAction)
707 {
708 	struct thread *thread;
709 	cpu_status state;
710 	status_t error = B_OK;
711 
712 	if (signal < 1 || signal > MAX_SIGNO
713 		|| (SIGNAL_TO_MASK(signal) & ~BLOCKABLE_SIGNALS) != 0)
714 		return B_BAD_VALUE;
715 
716 	state = disable_interrupts();
717 	GRAB_THREAD_LOCK();
718 
719 	thread = (threadID < 0
720 		? thread_get_current_thread()
721 		: thread_get_thread_struct_locked(threadID));
722 
723 	if (thread) {
724 		if (oldAction) {
725 			// save previous sigaction structure
726 			memcpy(oldAction, &thread->sig_action[signal - 1],
727 				sizeof(struct sigaction));
728 		}
729 
730 		if (act) {
731 			T(SigAction(thread, signal, act));
732 
733 			// set new sigaction structure
734 			memcpy(&thread->sig_action[signal - 1], act,
735 				sizeof(struct sigaction));
736 			thread->sig_action[signal - 1].sa_mask &= BLOCKABLE_SIGNALS;
737 		}
738 
739 		if (act && act->sa_handler == SIG_IGN) {
740 			// remove pending signal if it should now be ignored
741 			atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal));
742 		} else if (act && act->sa_handler == SIG_DFL
743 			&& (SIGNAL_TO_MASK(signal) & DEFAULT_IGNORE_SIGNALS) != 0) {
744 			// remove pending signal for those signals whose default
745 			// action is to ignore them
746 			atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal));
747 		}
748 	} else
749 		error = B_BAD_THREAD_ID;
750 
751 	RELEASE_THREAD_LOCK();
752 	restore_interrupts(state);
753 
754 	return error;
755 }
756 
757 
758 int
759 sigaction(int signal, const struct sigaction *act, struct sigaction *oldAction)
760 {
761 	return sigaction_etc(-1, signal, act, oldAction);
762 }
763 
764 
765 /*! Triggers a SIGALRM to the thread that issued the timer and reschedules */
766 static int32
767 alarm_event(timer *t)
768 {
769 	// The hook can be called from any context, but we have to
770 	// deliver the signal to the thread that originally called
771 	// set_alarm().
772 	// Since thread->alarm is this timer structure, we can just
773 	// cast it back - ugly but it works for now
774 	struct thread *thread = (struct thread *)((uint8 *)t
775 		- offsetof(struct thread, alarm));
776 		// ToDo: investigate adding one user parameter to the timer structure to fix this hack
777 
778 	TRACE(("alarm_event: thread = %p\n", thread));
779 	send_signal_etc(thread->id, SIGALRM, B_DO_NOT_RESCHEDULE);
780 
781 	return B_INVOKE_SCHEDULER;
782 }
783 
784 
785 /*!	Sets the alarm timer for the current thread. The timer fires at the
786 	specified time in the future, periodically or just once, as determined
787 	by \a mode.
788 	\return the time left until a previous set alarm would have fired.
789 */
790 bigtime_t
791 set_alarm(bigtime_t time, uint32 mode)
792 {
793 	struct thread *thread = thread_get_current_thread();
794 	bigtime_t remainingTime = 0;
795 
796 	ASSERT(B_ONE_SHOT_RELATIVE_ALARM == B_ONE_SHOT_RELATIVE_TIMER);
797 		// just to be sure no one changes the headers some day
798 
799 	TRACE(("set_alarm: thread = %p\n", thread));
800 
801 	if (thread->alarm.period)
802 		remainingTime = (bigtime_t)thread->alarm.schedule_time - system_time();
803 
804 	cancel_timer(&thread->alarm);
805 
806 	if (time != B_INFINITE_TIMEOUT)
807 		add_timer(&thread->alarm, &alarm_event, time, mode);
808 	else {
809 		// this marks the alarm as canceled (for returning the remaining time)
810 		thread->alarm.period = 0;
811 	}
812 
813 	return remainingTime;
814 }
815 
816 
817 /*!	Wait for the specified signals, and return the signal retrieved in
818 	\a _signal.
819 */
820 int
821 sigwait(const sigset_t *set, int *_signal)
822 {
823 	struct thread *thread = thread_get_current_thread();
824 
825 	while (!has_signals_pending(thread)) {
826 		thread_prepare_to_block(thread, B_CAN_INTERRUPT,
827 			THREAD_BLOCK_TYPE_SIGNAL, NULL);
828 		thread_block();
829 	}
830 
831 	int signalsPending = atomic_get(&thread->sig_pending) & *set;
832 
833 	update_current_thread_signals_flag();
834 
835 	if (signalsPending) {
836 		// select the lowest pending signal to return in _signal
837 		for (int signal = 1; signal < NSIG; signal++) {
838 			if ((SIGNAL_TO_MASK(signal) & signalsPending) != 0) {
839 				*_signal = signal;
840 				return B_OK;
841 			}
842 		}
843 	}
844 
845 	return B_INTERRUPTED;
846 }
847 
848 
849 /*!	Replace the current signal block mask and wait for any event to happen.
850 	Before returning, the original signal block mask is reinstantiated.
851 */
852 int
853 sigsuspend(const sigset_t *mask)
854 {
855 	struct thread *thread = thread_get_current_thread();
856 	sigset_t oldMask = atomic_get(&thread->sig_block_mask);
857 
858 	// Set the new block mask and block until interrupted.
859 
860 	atomic_set(&thread->sig_block_mask, *mask & BLOCKABLE_SIGNALS);
861 
862 	while (!has_signals_pending(thread)) {
863 		thread_prepare_to_block(thread, B_CAN_INTERRUPT,
864 			THREAD_BLOCK_TYPE_SIGNAL, NULL);
865 		thread_block();
866 	}
867 
868 	// restore the original block mask
869 	atomic_set(&thread->sig_block_mask, oldMask);
870 
871 	update_current_thread_signals_flag();
872 
873 	// we're not supposed to actually succeed
874 	return B_INTERRUPTED;
875 }
876 
877 
878 int
879 sigpending(sigset_t *set)
880 {
881 	struct thread *thread = thread_get_current_thread();
882 
883 	if (set == NULL)
884 		return B_BAD_VALUE;
885 
886 	*set = atomic_get(&thread->sig_pending);
887 	return B_OK;
888 }
889 
890 
891 //	#pragma mark -
892 
893 
894 bigtime_t
895 _user_set_alarm(bigtime_t time, uint32 mode)
896 {
897 	syscall_64_bit_return_value();
898 
899 	return set_alarm(time, mode);
900 }
901 
902 
903 status_t
904 _user_send_signal(pid_t team, uint signal)
905 {
906 	return send_signal_etc(team, signal, B_CHECK_PERMISSION);
907 }
908 
909 
910 status_t
911 _user_sigprocmask(int how, const sigset_t *userSet, sigset_t *userOldSet)
912 {
913 	sigset_t set, oldSet;
914 	status_t status;
915 
916 	if ((userSet != NULL && user_memcpy(&set, userSet, sizeof(sigset_t)) < B_OK)
917 		|| (userOldSet != NULL && user_memcpy(&oldSet, userOldSet,
918 				sizeof(sigset_t)) < B_OK))
919 		return B_BAD_ADDRESS;
920 
921 	status = sigprocmask(how, userSet ? &set : NULL,
922 		userOldSet ? &oldSet : NULL);
923 
924 	// copy old set if asked for
925 	if (status >= B_OK && userOldSet != NULL
926 		&& user_memcpy(userOldSet, &oldSet, sizeof(sigset_t)) < B_OK)
927 		return B_BAD_ADDRESS;
928 
929 	return status;
930 }
931 
932 
933 status_t
934 _user_sigaction(int signal, const struct sigaction *userAction,
935 	struct sigaction *userOldAction)
936 {
937 	struct sigaction act, oact;
938 	status_t status;
939 
940 	if ((userAction != NULL && user_memcpy(&act, userAction,
941 				sizeof(struct sigaction)) < B_OK)
942 		|| (userOldAction != NULL && user_memcpy(&oact, userOldAction,
943 				sizeof(struct sigaction)) < B_OK))
944 		return B_BAD_ADDRESS;
945 
946 	status = sigaction(signal, userAction ? &act : NULL,
947 		userOldAction ? &oact : NULL);
948 
949 	// only copy the old action if a pointer has been given
950 	if (status >= B_OK && userOldAction != NULL
951 		&& user_memcpy(userOldAction, &oact, sizeof(struct sigaction)) < B_OK)
952 		return B_BAD_ADDRESS;
953 
954 	return status;
955 }
956 
957 
958 status_t
959 _user_sigwait(const sigset_t *userSet, int *_userSignal)
960 {
961 	if (userSet == NULL || _userSignal == NULL)
962 		return B_BAD_VALUE;
963 
964 	sigset_t set;
965 	if (user_memcpy(&set, userSet, sizeof(sigset_t)) < B_OK)
966 		return B_BAD_ADDRESS;
967 
968 	int signal;
969 	status_t status = sigwait(&set, &signal);
970 	if (status < B_OK)
971 		return syscall_restart_handle_post(status);
972 
973 	return user_memcpy(_userSignal, &signal, sizeof(int));
974 }
975 
976 
977 status_t
978 _user_sigsuspend(const sigset_t *userMask)
979 {
980 	sigset_t mask;
981 
982 	if (userMask == NULL)
983 		return B_BAD_VALUE;
984 	if (user_memcpy(&mask, userMask, sizeof(sigset_t)) < B_OK)
985 		return B_BAD_ADDRESS;
986 
987 	return sigsuspend(&mask);
988 }
989 
990 
991 status_t
992 _user_sigpending(sigset_t *userSet)
993 {
994 	sigset_t set;
995 	int status;
996 
997 	if (userSet == NULL)
998 		return B_BAD_VALUE;
999 	if (!IS_USER_ADDRESS(userSet))
1000 		return B_BAD_ADDRESS;
1001 
1002 	status = sigpending(&set);
1003 	if (status == B_OK
1004 		&& user_memcpy(userSet, &set, sizeof(sigset_t)) < B_OK)
1005 		return B_BAD_ADDRESS;
1006 
1007 	return status;
1008 }
1009 
1010 
1011 status_t
1012 _user_set_signal_stack(const stack_t *newUserStack, stack_t *oldUserStack)
1013 {
1014 	struct thread *thread = thread_get_current_thread();
1015 	struct stack_t newStack, oldStack;
1016 	bool onStack = false;
1017 
1018 	if ((newUserStack != NULL && user_memcpy(&newStack, newUserStack,
1019 				sizeof(stack_t)) < B_OK)
1020 		|| (oldUserStack != NULL && user_memcpy(&oldStack, oldUserStack,
1021 				sizeof(stack_t)) < B_OK))
1022 		return B_BAD_ADDRESS;
1023 
1024 	if (thread->signal_stack_enabled) {
1025 		// determine wether or not the user thread is currently
1026 		// on the active signal stack
1027 		onStack = arch_on_signal_stack(thread);
1028 	}
1029 
1030 	if (oldUserStack != NULL) {
1031 		oldStack.ss_sp = (void *)thread->signal_stack_base;
1032 		oldStack.ss_size = thread->signal_stack_size;
1033 		oldStack.ss_flags = (thread->signal_stack_enabled ? 0 : SS_DISABLE)
1034 			| (onStack ? SS_ONSTACK : 0);
1035 	}
1036 
1037 	if (newUserStack != NULL) {
1038 		// no flags other than SS_DISABLE are allowed
1039 		if ((newStack.ss_flags & ~SS_DISABLE) != 0)
1040 			return B_BAD_VALUE;
1041 
1042 		if ((newStack.ss_flags & SS_DISABLE) == 0) {
1043 			// check if the size is valid
1044 			if (newStack.ss_size < MINSIGSTKSZ)
1045 				return B_NO_MEMORY;
1046 			if (onStack)
1047 				return B_NOT_ALLOWED;
1048 			if (!IS_USER_ADDRESS(newStack.ss_sp))
1049 				return B_BAD_VALUE;
1050 
1051 			thread->signal_stack_base = (addr_t)newStack.ss_sp;
1052 			thread->signal_stack_size = newStack.ss_size;
1053 			thread->signal_stack_enabled = true;
1054 		} else
1055 			thread->signal_stack_enabled = false;
1056 	}
1057 
1058 	// only copy the old stack info if a pointer has been given
1059 	if (oldUserStack != NULL
1060 		&& user_memcpy(oldUserStack, &oldStack, sizeof(stack_t)) < B_OK)
1061 		return B_BAD_ADDRESS;
1062 
1063 	return B_OK;
1064 }
1065 
1066