xref: /haiku/src/system/kernel/signal.cpp (revision e6b30aee0fd7a23d6a6baab9f3718945a0cd838a)
1 /*
2  * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de.
3  * Copyright 2002, Angelo Mottola, a.mottola@libero.it.
4  *
5  * Distributed under the terms of the MIT License.
6  */
7 
8 /* POSIX signals handling routines */
9 
10 #include <stddef.h>
11 #include <string.h>
12 
13 #include <OS.h>
14 #include <KernelExport.h>
15 
16 #include <condition_variable.h>
17 #include <debug.h>
18 #include <kernel.h>
19 #include <kscheduler.h>
20 #include <ksignal.h>
21 #include <sem.h>
22 #include <team.h>
23 #include <thread.h>
24 #include <tracing.h>
25 #include <user_debugger.h>
26 #include <util/AutoLock.h>
27 
28 
29 //#define TRACE_SIGNAL
30 #ifdef TRACE_SIGNAL
31 #	define TRACE(x) dprintf x
32 #else
33 #	define TRACE(x) ;
34 #endif
35 
36 
37 #define BLOCKABLE_SIGNALS		(~(KILL_SIGNALS | SIGNAL_TO_MASK(SIGSTOP)))
38 #define STOP_SIGNALS \
39 	(SIGNAL_TO_MASK(SIGSTOP) | SIGNAL_TO_MASK(SIGTSTP) \
40 	| SIGNAL_TO_MASK(SIGTTIN) | SIGNAL_TO_MASK(SIGTTOU))
41 #define DEFAULT_IGNORE_SIGNALS \
42 	(SIGNAL_TO_MASK(SIGCHLD) | SIGNAL_TO_MASK(SIGWINCH) \
43 	| SIGNAL_TO_MASK(SIGCONT))
44 
45 
46 const char * const sigstr[NSIG] = {
47 	"NONE", "HUP", "INT", "QUIT", "ILL", "CHLD", "ABRT", "PIPE",
48 	"FPE", "KILL", "STOP", "SEGV", "CONT", "TSTP", "ALRM", "TERM",
49 	"TTIN", "TTOU", "USR1", "USR2", "WINCH", "KILLTHR", "TRAP",
50 	"POLL", "PROF", "SYS", "URG", "VTALRM", "XCPU", "XFSZ"
51 };
52 
53 
54 static status_t deliver_signal(struct thread *thread, uint signal,
55 	uint32 flags);
56 
57 
58 
59 // #pragma mark - signal tracing
60 
61 
62 #ifdef SIGNAL_TRACING
63 
64 namespace SignalTracing {
65 
66 
67 class HandleSignals : public AbstractTraceEntry {
68 	public:
69 		HandleSignals(uint32 signals)
70 			:
71 			fSignals(signals)
72 		{
73 			Initialized();
74 		}
75 
76 		virtual void AddDump(TraceOutput& out)
77 		{
78 			out.Print("signal handle:  0x%lx", fSignals);
79 		}
80 
81 	private:
82 		uint32		fSignals;
83 };
84 
85 
86 class SendSignal : public AbstractTraceEntry {
87 	public:
88 		SendSignal(pid_t target, uint32 signal, uint32 flags)
89 			:
90 			fTarget(target),
91 			fSignal(signal),
92 			fFlags(flags)
93 		{
94 			Initialized();
95 		}
96 
97 		virtual void AddDump(TraceOutput& out)
98 		{
99 			out.Print("signal send: target: %ld, signal: %lu (%s), "
100 				"flags: 0x%lx", fTarget, fSignal,
101 				(fSignal < NSIG ? sigstr[fSignal] : "invalid"), fFlags);
102 		}
103 
104 	private:
105 		pid_t	fTarget;
106 		uint32	fSignal;
107 		uint32	fFlags;
108 };
109 
110 
111 class SigAction : public AbstractTraceEntry {
112 	public:
113 		SigAction(struct thread* thread, uint32 signal,
114 			const struct sigaction* act)
115 			:
116 			fThread(thread->id),
117 			fSignal(signal),
118 			fAction(*act)
119 		{
120 			Initialized();
121 		}
122 
123 		virtual void AddDump(TraceOutput& out)
124 		{
125 			out.Print("signal action: thread: %ld, signal: %lu (%s), "
126 				"action: {handler: %p, flags: 0x%x, mask: 0x%lx}",
127 				fThread, fSignal,
128 				(fSignal < NSIG ? sigstr[fSignal] : "invalid"),
129 				fAction.sa_handler, fAction.sa_flags, fAction.sa_mask);
130 		}
131 
132 	private:
133 		thread_id			fThread;
134 		uint32				fSignal;
135 		struct sigaction	fAction;
136 };
137 
138 
139 class SigProcMask : public AbstractTraceEntry {
140 	public:
141 		SigProcMask(int how, sigset_t mask)
142 			:
143 			fHow(how),
144 			fMask(mask),
145 			fOldMask(thread_get_current_thread()->sig_block_mask)
146 		{
147 			Initialized();
148 		}
149 
150 		virtual void AddDump(TraceOutput& out)
151 		{
152 			const char* how = "invalid";
153 			switch (fHow) {
154 				case SIG_BLOCK:
155 					how = "block";
156 					break;
157 				case SIG_UNBLOCK:
158 					how = "unblock";
159 					break;
160 				case SIG_SETMASK:
161 					how = "set";
162 					break;
163 			}
164 
165 			out.Print("signal proc mask: %s 0x%lx, old mask: 0x%lx", how, fMask,
166 				fOldMask);
167 		}
168 
169 	private:
170 		int			fHow;
171 		sigset_t	fMask;
172 		sigset_t	fOldMask;
173 };
174 
175 }	// namespace SignalTracing
176 
177 #	define T(x)	new(std::nothrow) SignalTracing::x
178 
179 #else
180 #	define T(x)
181 #endif	// SIGNAL_TRACING
182 
183 
184 // #pragma mark -
185 
186 
187 /*!	Updates the thread::flags field according to what signals are pending.
188 	Interrupts must be disabled and the thread lock must be held.
189 */
190 static void
191 update_thread_signals_flag(struct thread* thread)
192 {
193 	if (atomic_get(&thread->sig_pending) & ~atomic_get(&thread->sig_block_mask))
194 		atomic_or(&thread->flags, THREAD_FLAGS_SIGNALS_PENDING);
195 	else
196 		atomic_and(&thread->flags, ~THREAD_FLAGS_SIGNALS_PENDING);
197 }
198 
199 
200 static void
201 update_current_thread_signals_flag()
202 {
203 	InterruptsSpinLocker locker(thread_spinlock);
204 
205 	update_thread_signals_flag(thread_get_current_thread());
206 }
207 
208 
209 static bool
210 notify_debugger(struct thread *thread, int signal, struct sigaction *handler,
211 	bool deadly)
212 {
213 	uint64 signalMask = SIGNAL_TO_MASK(signal);
214 
215 	// first check the ignore signal masks the debugger specified for the thread
216 
217 	if (atomic_get(&thread->debug_info.ignore_signals_once) & signalMask) {
218 		atomic_and(&thread->debug_info.ignore_signals_once, ~signalMask);
219 		return true;
220 	}
221 
222 	if (atomic_get(&thread->debug_info.ignore_signals) & signalMask)
223 		return true;
224 
225 	// deliver the event
226 	return user_debug_handle_signal(signal, handler, deadly);
227 }
228 
229 
230 /*! Actually handles the signal - ie. the thread will exit, a custom signal
231 	handler is prepared, or whatever the signal demands.
232 */
233 bool
234 handle_signals(struct thread *thread)
235 {
236 	uint32 signalMask = atomic_get(&thread->sig_pending)
237 		& ~atomic_get(&thread->sig_block_mask);
238 	struct sigaction *handler;
239 	bool reschedule = false;
240 	bool restart = false;
241 	int32 i;
242 
243 	// If SIGKILL[THR] are pending, we ignore other signals.
244 	// Otherwise check, if the thread shall stop for debugging.
245 	if (signalMask & KILL_SIGNALS) {
246 		signalMask &= KILL_SIGNALS;
247 	} else if (thread->debug_info.flags & B_THREAD_DEBUG_STOP) {
248 		user_debug_stop_thread();
249 	}
250 
251 	if (signalMask == 0)
252 		return 0;
253 
254 	T(HandleSignals(signalMask));
255 
256 	for (i = 0; i < NSIG; i++) {
257 		bool debugSignal;
258 		int32 signal = i + 1;
259 
260 		if ((signalMask & SIGNAL_TO_MASK(signal)) == 0)
261 			continue;
262 
263 		// clear the signal that we will handle
264 		atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal));
265 
266 		debugSignal = !(~atomic_get(&thread->team->debug_info.flags)
267 				& (B_TEAM_DEBUG_SIGNALS | B_TEAM_DEBUG_DEBUGGER_INSTALLED));
268 
269 		// TODO: since sigaction_etc() could clobber the fields at any time,
270 		//		we should actually copy the relevant fields atomically before
271 		//		accessing them (only the debugger is calling sigaction_etc()
272 		//		right now).
273 		//		Update: sigaction_etc() is only used by the userland debugger
274 		//		support. We can just as well restrict getting/setting signal
275 		//		handlers to work only when the respective thread is stopped.
276 		//		Then sigaction() could be used instead and we could get rid of
277 		//		sigaction_etc().
278 		handler = &thread->sig_action[i];
279 
280 		TRACE(("Thread 0x%lx received signal %s\n", thread->id, sigstr[signal]));
281 
282 		if ((handler->sa_flags & SA_RESTART) != 0)
283 			restart = true;
284 
285 		if (handler->sa_handler == SIG_IGN) {
286 			// signal is to be ignored
287 			// ToDo: apply zombie cleaning on SIGCHLD
288 
289 			// notify the debugger
290 			if (debugSignal)
291 				notify_debugger(thread, signal, handler, false);
292 			continue;
293 		}
294 		if (handler->sa_handler == SIG_DFL) {
295 			// default signal behaviour
296 			switch (signal) {
297 				case SIGCHLD:
298 				case SIGWINCH:
299 				case SIGURG:
300 					// notify the debugger
301 					if (debugSignal)
302 						notify_debugger(thread, signal, handler, false);
303 					continue;
304 
305 				case SIGCONT:
306 					// notify the debugger
307 					if (debugSignal
308 						&& !notify_debugger(thread, signal, handler, false))
309 						continue;
310 
311 					// notify threads waiting for team state changes
312 					if (thread == thread->team->main_thread) {
313 						InterruptsSpinLocker locker(team_spinlock);
314 						team_set_job_control_state(thread->team,
315 							JOB_CONTROL_STATE_CONTINUED, signal, false);
316 
317 						// The standard states that the system *may* send a
318 						// SIGCHLD when a child is continued. I haven't found
319 						// a good reason why we would want to, though.
320 					}
321 					continue;
322 
323 				case SIGSTOP:
324 				case SIGTSTP:
325 				case SIGTTIN:
326 				case SIGTTOU:
327 					// notify the debugger
328 					if (debugSignal
329 						&& !notify_debugger(thread, signal, handler, false))
330 						continue;
331 
332 					thread->next_state = B_THREAD_SUSPENDED;
333 					reschedule = true;
334 
335 					// notify threads waiting for team state changes
336 					if (thread == thread->team->main_thread) {
337 						InterruptsSpinLocker locker(team_spinlock);
338 						team_set_job_control_state(thread->team,
339 							JOB_CONTROL_STATE_STOPPED, signal, false);
340 
341 						// send a SIGCHLD to the parent (if it does have
342 						// SA_NOCLDSTOP defined)
343 						SpinLocker _(thread_spinlock);
344 						struct thread* parentThread
345 							= thread->team->parent->main_thread;
346 						struct sigaction& parentHandler
347 							= parentThread->sig_action[SIGCHLD - 1];
348 						if ((parentHandler.sa_flags & SA_NOCLDSTOP) == 0)
349 							deliver_signal(parentThread, SIGCHLD, 0);
350 					}
351 					continue;
352 
353 				case SIGQUIT:
354 				case SIGILL:
355 				case SIGTRAP:
356 				case SIGABRT:
357 				case SIGFPE:
358 				case SIGSEGV:
359 				case SIGPOLL:
360 				case SIGPROF:
361 				case SIGSYS:
362 				case SIGVTALRM:
363 				case SIGXCPU:
364 				case SIGXFSZ:
365 					TRACE(("Shutting down thread 0x%lx due to signal #%ld\n",
366 						thread->id, signal));
367 				case SIGKILL:
368 				case SIGKILLTHR:
369 				default:
370 					// if the thread exited normally, the exit reason is already set
371 					if (thread->exit.reason != THREAD_RETURN_EXIT) {
372 						thread->exit.reason = THREAD_RETURN_INTERRUPTED;
373 						thread->exit.signal = (uint16)signal;
374 					}
375 
376 					// notify the debugger
377 					if (debugSignal && signal != SIGKILL && signal != SIGKILLTHR
378 						&& !notify_debugger(thread, signal, handler, true))
379 						continue;
380 
381 					thread_exit();
382 						// won't return
383 			}
384 		}
385 
386 		// notify the debugger
387 		if (debugSignal && !notify_debugger(thread, signal, handler, false))
388 			continue;
389 
390 		// User defined signal handler
391 		TRACE(("### Setting up custom signal handler frame...\n"));
392 		arch_setup_signal_frame(thread, handler, signal, atomic_get(&thread->sig_block_mask));
393 
394 		if (handler->sa_flags & SA_ONESHOT)
395 			handler->sa_handler = SIG_DFL;
396 		if ((handler->sa_flags & SA_NOMASK) == 0) {
397 			// Update the block mask while the signal handler is running - it
398 			// will be automatically restored when the signal frame is left.
399 			atomic_or(&thread->sig_block_mask,
400 				(handler->sa_mask | SIGNAL_TO_MASK(signal)) & BLOCKABLE_SIGNALS);
401 		}
402 
403 		update_current_thread_signals_flag();
404 
405 		return reschedule;
406 	}
407 
408 	// only restart if SA_RESTART was set on at least one handler
409 	if (restart)
410 		arch_check_syscall_restart(thread);
411 
412 	update_current_thread_signals_flag();
413 
414 	return reschedule;
415 }
416 
417 
418 bool
419 is_kill_signal_pending(void)
420 {
421 	return (atomic_get(&thread_get_current_thread()->sig_pending)
422 		& KILL_SIGNALS) != 0;
423 }
424 
425 
426 bool
427 is_signal_blocked(int signal)
428 {
429 	return (atomic_get(&thread_get_current_thread()->sig_block_mask)
430 		& SIGNAL_TO_MASK(signal)) != 0;
431 }
432 
433 
434 /*!	Tries to interrupt a thread waiting for a semaphore or a condition variable.
435 	Interrupts must be disabled, the thread lock be held.
436 */
437 static status_t
438 signal_interrupt_thread(struct thread* thread)
439 {
440 	if (thread->sem.blocking >= 0)
441 		return sem_interrupt_thread(thread);
442 	else if (thread->condition_variable_entry)
443 		return condition_variable_interrupt_thread(thread);
444 	return B_BAD_VALUE;
445 }
446 
447 
448 /*!	Delivers the \a signal to the \a thread, but doesn't handle the signal -
449 	it just makes sure the thread gets the signal, ie. unblocks it if needed.
450 	This function must be called with interrupts disabled and the
451 	thread lock held.
452 */
453 static status_t
454 deliver_signal(struct thread *thread, uint signal, uint32 flags)
455 {
456 	if (flags & B_CHECK_PERMISSION) {
457 		// ToDo: introduce euid & uid fields to the team and check permission
458 	}
459 
460 	if (signal == 0)
461 		return B_OK;
462 
463 	if (thread->team == team_get_kernel_team()) {
464 		// Signals to kernel threads will only wake them up
465 		if (thread->state == B_THREAD_SUSPENDED)
466 			scheduler_enqueue_in_run_queue(thread);
467 		return B_OK;
468 	}
469 
470 	atomic_or(&thread->sig_pending, SIGNAL_TO_MASK(signal));
471 
472 	switch (signal) {
473 		case SIGKILL:
474 		{
475 			struct thread *mainThread = thread->team->main_thread;
476 			// Forward KILLTHR to the main thread of the team
477 
478 			mainThread->sig_pending |= SIGNAL_TO_MASK(SIGKILLTHR);
479 			// Wake up main thread
480 			if (mainThread->state == B_THREAD_SUSPENDED)
481 				scheduler_enqueue_in_run_queue(mainThread);
482 			else if (mainThread->state == B_THREAD_WAITING)
483 				signal_interrupt_thread(mainThread);
484 
485 			// Supposed to fall through
486 		}
487 		case SIGKILLTHR:
488 			// Wake up suspended threads and interrupt waiting ones
489 			if (thread->state == B_THREAD_SUSPENDED)
490 				scheduler_enqueue_in_run_queue(thread);
491 			else if (thread->state == B_THREAD_WAITING)
492 				signal_interrupt_thread(thread);
493 			break;
494 
495 		case SIGCONT:
496 			// Wake up thread if it was suspended
497 			if (thread->state == B_THREAD_SUSPENDED)
498 				scheduler_enqueue_in_run_queue(thread);
499 
500 			atomic_and(&thread->sig_pending, ~STOP_SIGNALS);
501 				// remove any pending stop signals
502 			break;
503 
504 		default:
505 			if (thread->sig_pending
506 				& (~thread->sig_block_mask | SIGNAL_TO_MASK(SIGCHLD))) {
507 				// Interrupt thread if it was waiting
508 				if (thread->state == B_THREAD_WAITING)
509 					signal_interrupt_thread(thread);
510 			}
511 			break;
512 	}
513 
514 	update_thread_signals_flag(thread);
515 
516 	return B_OK;
517 }
518 
519 
520 int
521 send_signal_etc(pid_t id, uint signal, uint32 flags)
522 {
523 	status_t status = B_BAD_THREAD_ID;
524 	struct thread *thread;
525 	cpu_status state = 0;
526 
527 	if (signal < 0 || signal > MAX_SIGNO)
528 		return B_BAD_VALUE;
529 
530 	T(SendSignal(id, signal, flags));
531 
532 	if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
533 		state = disable_interrupts();
534 
535 	if (id > 0) {
536 		// send a signal to the specified thread
537 
538 		GRAB_THREAD_LOCK();
539 
540 		thread = thread_get_thread_struct_locked(id);
541 		if (thread != NULL)
542 			status = deliver_signal(thread, signal, flags);
543 	} else {
544 		// send a signal to the specified process group
545 		// (the absolute value of the id)
546 
547 		struct process_group *group;
548 
549 		// TODO: handle -1 correctly
550 		if (id == 0 || id == -1) {
551 			// send a signal to the current team
552 			id = thread_get_current_thread()->team->id;
553 		} else
554 			id = -id;
555 
556 		if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
557 			GRAB_TEAM_LOCK();
558 
559 		group = team_get_process_group_locked(NULL, id);
560 		if (group != NULL) {
561 			struct team *team, *next;
562 
563 			// Send a signal to all teams in this process group
564 
565 			for (team = group->teams; team != NULL; team = next) {
566 				next = team->group_next;
567 				id = team->id;
568 
569 				GRAB_THREAD_LOCK();
570 
571 				thread = thread_get_thread_struct_locked(id);
572 				if (thread != NULL) {
573 					// we don't stop because of an error sending the signal; we
574 					// rather want to send as much signals as possible
575 					status = deliver_signal(thread, signal, flags);
576 				}
577 
578 				RELEASE_THREAD_LOCK();
579 			}
580 		}
581 
582 		if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
583 			RELEASE_TEAM_LOCK();
584 
585 		GRAB_THREAD_LOCK();
586 	}
587 
588 	// ToDo: maybe the scheduler should only be invoked if there is reason to do it?
589 	//	(ie. deliver_signal() moved some threads in the running queue?)
590 	if ((flags & (B_DO_NOT_RESCHEDULE | SIGNAL_FLAG_TEAMS_LOCKED)) == 0)
591 		scheduler_reschedule();
592 
593 	RELEASE_THREAD_LOCK();
594 
595 	if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
596 		restore_interrupts(state);
597 
598 	return status;
599 }
600 
601 
602 int
603 send_signal(pid_t threadID, uint signal)
604 {
605 	// The BeBook states that this function wouldn't be exported
606 	// for drivers, but, of course, it's wrong.
607 	return send_signal_etc(threadID, signal, 0);
608 }
609 
610 
611 int
612 has_signals_pending(void *_thread)
613 {
614 	struct thread *thread = (struct thread *)_thread;
615 	if (thread == NULL)
616 		thread = thread_get_current_thread();
617 
618 	return atomic_get(&thread->sig_pending) & ~atomic_get(&thread->sig_block_mask);
619 }
620 
621 
622 int
623 sigprocmask(int how, const sigset_t *set, sigset_t *oldSet)
624 {
625 	struct thread *thread = thread_get_current_thread();
626 	sigset_t oldMask = atomic_get(&thread->sig_block_mask);
627 
628 	if (set != NULL) {
629 		T(SigProcMask(how, *set));
630 
631 		switch (how) {
632 			case SIG_BLOCK:
633 				atomic_or(&thread->sig_block_mask, *set & BLOCKABLE_SIGNALS);
634 				break;
635 			case SIG_UNBLOCK:
636 				atomic_and(&thread->sig_block_mask, ~*set);
637 				break;
638 			case SIG_SETMASK:
639 				atomic_set(&thread->sig_block_mask, *set & BLOCKABLE_SIGNALS);
640 				break;
641 			default:
642 				return B_BAD_VALUE;
643 		}
644 
645 		update_current_thread_signals_flag();
646 	}
647 
648 	if (oldSet != NULL)
649 		*oldSet = oldMask;
650 
651 	return B_OK;
652 }
653 
654 
655 /**	\brief sigaction() for the specified thread.
656  *
657  *	A \a threadID is < 0 specifies the current thread.
658  *
659  */
660 
661 int
662 sigaction_etc(thread_id threadID, int signal, const struct sigaction *act,
663 	struct sigaction *oldAction)
664 {
665 	struct thread *thread;
666 	cpu_status state;
667 	status_t error = B_OK;
668 
669 	if (signal < 1 || signal > MAX_SIGNO
670 		|| (SIGNAL_TO_MASK(signal) & ~BLOCKABLE_SIGNALS) != 0)
671 		return B_BAD_VALUE;
672 
673 	state = disable_interrupts();
674 	GRAB_THREAD_LOCK();
675 
676 	thread = (threadID < 0
677 		? thread_get_current_thread()
678 		: thread_get_thread_struct_locked(threadID));
679 
680 	if (thread) {
681 		if (oldAction) {
682 			// save previous sigaction structure
683 			memcpy(oldAction, &thread->sig_action[signal - 1],
684 				sizeof(struct sigaction));
685 		}
686 
687 		if (act) {
688 			T(SigAction(thread, signal, act));
689 
690 			// set new sigaction structure
691 			memcpy(&thread->sig_action[signal - 1], act,
692 				sizeof(struct sigaction));
693 			thread->sig_action[signal - 1].sa_mask &= BLOCKABLE_SIGNALS;
694 		}
695 
696 		if (act && act->sa_handler == SIG_IGN) {
697 			// remove pending signal if it should now be ignored
698 			atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal));
699 		} else if (act && act->sa_handler == SIG_DFL
700 			&& (SIGNAL_TO_MASK(signal) & DEFAULT_IGNORE_SIGNALS) != NULL) {
701 			// remove pending signal for those signals whose default
702 			// action is to ignore them
703 			atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal));
704 		}
705 	} else
706 		error = B_BAD_THREAD_ID;
707 
708 	RELEASE_THREAD_LOCK();
709 	restore_interrupts(state);
710 
711 	return error;
712 }
713 
714 
715 int
716 sigaction(int signal, const struct sigaction *act, struct sigaction *oldAction)
717 {
718 	return sigaction_etc(-1, signal, act, oldAction);
719 }
720 
721 
722 /** Triggers a SIGALRM to the thread that issued the timer and reschedules */
723 
724 static int32
725 alarm_event(timer *t)
726 {
727 	// The hook can be called from any context, but we have to
728 	// deliver the signal to the thread that originally called
729 	// set_alarm().
730 	// Since thread->alarm is this timer structure, we can just
731 	// cast it back - ugly but it works for now
732 	struct thread *thread = (struct thread *)((uint8 *)t - offsetof(struct thread, alarm));
733 		// ToDo: investigate adding one user parameter to the timer structure to fix this hack
734 
735 	TRACE(("alarm_event: thread = %p\n", thread));
736 	send_signal_etc(thread->id, SIGALRM, B_DO_NOT_RESCHEDULE);
737 
738 	return B_INVOKE_SCHEDULER;
739 }
740 
741 
742 /** Sets the alarm timer for the current thread. The timer fires at the
743  *	specified time in the future, periodically or just once, as determined
744  *	by \a mode.
745  *	\return the time left until a previous set alarm would have fired.
746  */
747 
748 bigtime_t
749 set_alarm(bigtime_t time, uint32 mode)
750 {
751 	struct thread *thread = thread_get_current_thread();
752 	bigtime_t remainingTime = 0;
753 
754 	ASSERT(B_ONE_SHOT_RELATIVE_ALARM == B_ONE_SHOT_RELATIVE_TIMER);
755 		// just to be sure no one changes the headers some day
756 
757 	TRACE(("set_alarm: thread = %p\n", thread));
758 
759 	if (thread->alarm.period)
760 		remainingTime = (bigtime_t)thread->alarm.entry.key - system_time();
761 
762 	cancel_timer(&thread->alarm);
763 
764 	if (time != B_INFINITE_TIMEOUT)
765 		add_timer(&thread->alarm, &alarm_event, time, mode);
766 	else {
767 		// this marks the alarm as canceled (for returning the remaining time)
768 		thread->alarm.period = 0;
769 	}
770 
771 	return remainingTime;
772 }
773 
774 
775 /**	Replace the current signal block mask and wait for any event to happen.
776  *	Before returning, the original signal block mask is reinstantiated.
777  */
778 
779 int
780 sigsuspend(const sigset_t *mask)
781 {
782 	struct thread *thread = thread_get_current_thread();
783 	sigset_t oldMask = atomic_get(&thread->sig_block_mask);
784 	cpu_status state;
785 
786 	// set the new block mask and suspend ourselves - we cannot use
787 	// SIGSTOP for this, as signals are only handled upon kernel exit
788 
789 	atomic_set(&thread->sig_block_mask, *mask);
790 
791 	while (true) {
792 		thread->next_state = B_THREAD_SUSPENDED;
793 
794 		state = disable_interrupts();
795 		GRAB_THREAD_LOCK();
796 
797 		update_thread_signals_flag(thread);
798 
799 		scheduler_reschedule();
800 
801 		RELEASE_THREAD_LOCK();
802 		restore_interrupts(state);
803 
804 		if (has_signals_pending(thread))
805 			break;
806 	}
807 
808 	// restore the original block mask
809 	atomic_set(&thread->sig_block_mask, oldMask);
810 
811 	update_current_thread_signals_flag();
812 
813 	// we're not supposed to actually succeed
814 	// ToDo: could this get us into trouble with SA_RESTART handlers?
815 	return B_INTERRUPTED;
816 }
817 
818 
819 int
820 sigpending(sigset_t *set)
821 {
822 	struct thread *thread = thread_get_current_thread();
823 
824 	if (set == NULL)
825 		return B_BAD_VALUE;
826 
827 	*set = atomic_get(&thread->sig_pending);
828 	return B_OK;
829 }
830 
831 
832 //	#pragma mark -
833 
834 
835 bigtime_t
836 _user_set_alarm(bigtime_t time, uint32 mode)
837 {
838 	syscall_64_bit_return_value();
839 
840 	return set_alarm(time, mode);
841 }
842 
843 
844 status_t
845 _user_send_signal(pid_t team, uint signal)
846 {
847 	return send_signal_etc(team, signal, B_CHECK_PERMISSION);
848 }
849 
850 
851 status_t
852 _user_sigprocmask(int how, const sigset_t *userSet, sigset_t *userOldSet)
853 {
854 	sigset_t set, oldSet;
855 	status_t status;
856 
857 	if ((userSet != NULL && user_memcpy(&set, userSet, sizeof(sigset_t)) < B_OK)
858 		|| (userOldSet != NULL && user_memcpy(&oldSet, userOldSet,
859 				sizeof(sigset_t)) < B_OK))
860 		return B_BAD_ADDRESS;
861 
862 	status = sigprocmask(how, userSet ? &set : NULL,
863 		userOldSet ? &oldSet : NULL);
864 
865 	// copy old set if asked for
866 	if (status >= B_OK && userOldSet != NULL
867 		&& user_memcpy(userOldSet, &oldSet, sizeof(sigset_t)) < B_OK)
868 		return B_BAD_ADDRESS;
869 
870 	return status;
871 }
872 
873 
874 status_t
875 _user_sigaction(int signal, const struct sigaction *userAction,
876 	struct sigaction *userOldAction)
877 {
878 	struct sigaction act, oact;
879 	status_t status;
880 
881 	if ((userAction != NULL && user_memcpy(&act, userAction,
882 				sizeof(struct sigaction)) < B_OK)
883 		|| (userOldAction != NULL && user_memcpy(&oact, userOldAction,
884 				sizeof(struct sigaction)) < B_OK))
885 		return B_BAD_ADDRESS;
886 
887 	status = sigaction(signal, userAction ? &act : NULL,
888 		userOldAction ? &oact : NULL);
889 
890 	// only copy the old action if a pointer has been given
891 	if (status >= B_OK && userOldAction != NULL
892 		&& user_memcpy(userOldAction, &oact, sizeof(struct sigaction)) < B_OK)
893 		return B_BAD_ADDRESS;
894 
895 	return status;
896 }
897 
898 
899 status_t
900 _user_sigsuspend(const sigset_t *userMask)
901 {
902 	sigset_t mask;
903 
904 	if (userMask == NULL)
905 		return B_BAD_VALUE;
906 	if (user_memcpy(&mask, userMask, sizeof(sigset_t)) < B_OK)
907 		return B_BAD_ADDRESS;
908 
909 	return sigsuspend(&mask);
910 }
911 
912 
913 status_t
914 _user_sigpending(sigset_t *userSet)
915 {
916 	sigset_t set;
917 	int status;
918 
919 	if (userSet == NULL)
920 		return B_BAD_VALUE;
921 	if (!IS_USER_ADDRESS(userSet))
922 		return B_BAD_ADDRESS;
923 
924 	status = sigpending(&set);
925 	if (status == B_OK
926 		&& user_memcpy(userSet, &set, sizeof(sigset_t)) < B_OK)
927 		return B_BAD_ADDRESS;
928 
929 	return status;
930 }
931 
932 
933 status_t
934 _user_set_signal_stack(const stack_t *newUserStack, stack_t *oldUserStack)
935 {
936 	struct thread *thread = thread_get_current_thread();
937 	struct stack_t newStack, oldStack;
938 	bool onStack = false;
939 
940 	if ((newUserStack != NULL && user_memcpy(&newStack, newUserStack,
941 				sizeof(stack_t)) < B_OK)
942 		|| (oldUserStack != NULL && user_memcpy(&oldStack, oldUserStack,
943 				sizeof(stack_t)) < B_OK))
944 		return B_BAD_ADDRESS;
945 
946 	if (thread->signal_stack_enabled) {
947 		// determine wether or not the user thread is currently
948 		// on the active signal stack
949 		onStack = arch_on_signal_stack(thread);
950 	}
951 
952 	if (oldUserStack != NULL) {
953 		oldStack.ss_sp = (void *)thread->signal_stack_base;
954 		oldStack.ss_size = thread->signal_stack_size;
955 		oldStack.ss_flags = (thread->signal_stack_enabled ? 0 : SS_DISABLE)
956 			| (onStack ? SS_ONSTACK : 0);
957 	}
958 
959 	if (newUserStack != NULL) {
960 		// no flags other than SS_DISABLE are allowed
961 		if ((newStack.ss_flags & ~SS_DISABLE) != 0)
962 			return B_BAD_VALUE;
963 
964 		if ((newStack.ss_flags & SS_DISABLE) == 0) {
965 			// check if the size is valid
966 			if (newStack.ss_size < MINSIGSTKSZ)
967 				return B_NO_MEMORY;
968 			if (onStack)
969 				return B_NOT_ALLOWED;
970 			if (!IS_USER_ADDRESS(newStack.ss_sp))
971 				return B_BAD_VALUE;
972 
973 			thread->signal_stack_base = (addr_t)newStack.ss_sp;
974 			thread->signal_stack_size = newStack.ss_size;
975 			thread->signal_stack_enabled = true;
976 		} else
977 			thread->signal_stack_enabled = false;
978 	}
979 
980 	// only copy the old stack info if a pointer has been given
981 	if (oldUserStack != NULL
982 		&& user_memcpy(oldUserStack, &oldStack, sizeof(stack_t)) < B_OK)
983 		return B_BAD_ADDRESS;
984 
985 	return B_OK;
986 }
987 
988