xref: /haiku/src/system/kernel/signal.cpp (revision 020cbad9d40235a2c50a81a42d69912a5ff8fbc4)
1 /*
2  * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de.
3  * Copyright 2002, Angelo Mottola, a.mottola@libero.it.
4  *
5  * Distributed under the terms of the MIT License.
6  */
7 
8 /* POSIX signals handling routines */
9 
10 #include <stddef.h>
11 #include <string.h>
12 
13 #include <OS.h>
14 #include <KernelExport.h>
15 
16 #include <condition_variable.h>
17 #include <debug.h>
18 #include <kernel.h>
19 #include <kscheduler.h>
20 #include <ksignal.h>
21 #include <sem.h>
22 #include <team.h>
23 #include <thread.h>
24 #include <tracing.h>
25 #include <user_debugger.h>
26 #include <util/AutoLock.h>
27 
28 
29 //#define TRACE_SIGNAL
30 #ifdef TRACE_SIGNAL
31 #	define TRACE(x) dprintf x
32 #else
33 #	define TRACE(x) ;
34 #endif
35 
36 
37 #define BLOCKABLE_SIGNALS		(~(KILL_SIGNALS | SIGNAL_TO_MASK(SIGSTOP)))
38 #define STOP_SIGNALS \
39 	(SIGNAL_TO_MASK(SIGSTOP) | SIGNAL_TO_MASK(SIGTSTP) \
40 	| SIGNAL_TO_MASK(SIGTTIN) | SIGNAL_TO_MASK(SIGTTOU))
41 #define DEFAULT_IGNORE_SIGNALS \
42 	(SIGNAL_TO_MASK(SIGCHLD) | SIGNAL_TO_MASK(SIGWINCH) \
43 	| SIGNAL_TO_MASK(SIGCONT))
44 
45 
46 const char * const sigstr[NSIG] = {
47 	"NONE", "HUP", "INT", "QUIT", "ILL", "CHLD", "ABRT", "PIPE",
48 	"FPE", "KILL", "STOP", "SEGV", "CONT", "TSTP", "ALRM", "TERM",
49 	"TTIN", "TTOU", "USR1", "USR2", "WINCH", "KILLTHR", "TRAP",
50 	"POLL", "PROF", "SYS", "URG", "VTALRM", "XCPU", "XFSZ"
51 };
52 
53 
54 static status_t deliver_signal(struct thread *thread, uint signal,
55 	uint32 flags);
56 
57 
58 
59 // #pragma mark - signal tracing
60 
61 
62 #ifdef SIGNAL_TRACING
63 
64 namespace SignalTracing {
65 
66 
67 class HandleSignals : public AbstractTraceEntry {
68 	public:
69 		HandleSignals(uint32 signals)
70 			:
71 			fSignals(signals)
72 		{
73 			Initialized();
74 		}
75 
76 		virtual void AddDump(TraceOutput& out)
77 		{
78 			out.Print("signal handle:  0x%lx", fSignals);
79 		}
80 
81 	private:
82 		uint32		fSignals;
83 };
84 
85 
86 class SendSignal : public AbstractTraceEntry {
87 	public:
88 		SendSignal(pid_t target, uint32 signal, uint32 flags)
89 			:
90 			fTarget(target),
91 			fSignal(signal),
92 			fFlags(flags)
93 		{
94 			Initialized();
95 		}
96 
97 		virtual void AddDump(TraceOutput& out)
98 		{
99 			out.Print("signal send: target: %ld, signal: %lu (%s), "
100 				"flags: 0x%lx", fTarget, fSignal,
101 				(fSignal < NSIG ? sigstr[fSignal] : "invalid"), fFlags);
102 		}
103 
104 	private:
105 		pid_t	fTarget;
106 		uint32	fSignal;
107 		uint32	fFlags;
108 };
109 
110 
111 class SigAction : public AbstractTraceEntry {
112 	public:
113 		SigAction(struct thread* thread, uint32 signal,
114 			const struct sigaction* act)
115 			:
116 			fThread(thread->id),
117 			fSignal(signal),
118 			fAction(*act)
119 		{
120 			Initialized();
121 		}
122 
123 		virtual void AddDump(TraceOutput& out)
124 		{
125 			out.Print("signal action: thread: %ld, signal: %lu (%s), "
126 				"action: {handler: %p, flags: 0x%x, mask: 0x%lx}",
127 				fThread, fSignal,
128 				(fSignal < NSIG ? sigstr[fSignal] : "invalid"),
129 				fAction.sa_handler, fAction.sa_flags, fAction.sa_mask);
130 		}
131 
132 	private:
133 		thread_id			fThread;
134 		uint32				fSignal;
135 		struct sigaction	fAction;
136 };
137 
138 
139 class SigProcMask : public AbstractTraceEntry {
140 	public:
141 		SigProcMask(int how, sigset_t mask)
142 			:
143 			fHow(how),
144 			fMask(mask),
145 			fOldMask(thread_get_current_thread()->sig_block_mask)
146 		{
147 			Initialized();
148 		}
149 
150 		virtual void AddDump(TraceOutput& out)
151 		{
152 			const char* how = "invalid";
153 			switch (fHow) {
154 				case SIG_BLOCK:
155 					how = "block";
156 					break;
157 				case SIG_UNBLOCK:
158 					how = "unblock";
159 					break;
160 				case SIG_SETMASK:
161 					how = "set";
162 					break;
163 			}
164 
165 			out.Print("signal proc mask: %s 0x%lx, old mask: 0x%lx", how, fMask,
166 				fOldMask);
167 		}
168 
169 	private:
170 		int			fHow;
171 		sigset_t	fMask;
172 		sigset_t	fOldMask;
173 };
174 
175 }	// namespace SignalTracing
176 
177 #	define T(x)	new(std::nothrow) SignalTracing::x
178 
179 #else
180 #	define T(x)
181 #endif	// SIGNAL_TRACING
182 
183 
184 // #pragma mark -
185 
186 
187 /*!	Updates the thread::flags field according to what signals are pending.
188 	Interrupts must be disabled and the thread lock must be held.
189 */
190 static void
191 update_thread_signals_flag(struct thread* thread)
192 {
193 	if (atomic_get(&thread->sig_pending) & ~atomic_get(&thread->sig_block_mask))
194 		atomic_or(&thread->flags, THREAD_FLAGS_SIGNALS_PENDING);
195 	else
196 		atomic_and(&thread->flags, ~THREAD_FLAGS_SIGNALS_PENDING);
197 }
198 
199 
200 static void
201 update_current_thread_signals_flag()
202 {
203 	InterruptsSpinLocker locker(thread_spinlock);
204 
205 	update_thread_signals_flag(thread_get_current_thread());
206 }
207 
208 
209 static bool
210 notify_debugger(struct thread *thread, int signal, struct sigaction *handler,
211 	bool deadly)
212 {
213 	uint64 signalMask = SIGNAL_TO_MASK(signal);
214 
215 	// first check the ignore signal masks the debugger specified for the thread
216 
217 	if (atomic_get(&thread->debug_info.ignore_signals_once) & signalMask) {
218 		atomic_and(&thread->debug_info.ignore_signals_once, ~signalMask);
219 		return true;
220 	}
221 
222 	if (atomic_get(&thread->debug_info.ignore_signals) & signalMask)
223 		return true;
224 
225 	// deliver the event
226 	return user_debug_handle_signal(signal, handler, deadly);
227 }
228 
229 
230 /*! Actually handles the signal - ie. the thread will exit, a custom signal
231 	handler is prepared, or whatever the signal demands.
232 */
233 bool
234 handle_signals(struct thread *thread)
235 {
236 	uint32 signalMask = atomic_get(&thread->sig_pending)
237 		& ~atomic_get(&thread->sig_block_mask);
238 	struct sigaction *handler;
239 	bool reschedule = false;
240 	bool restart = false;
241 	int32 i;
242 
243 	// If SIGKILL[THR] are pending, we ignore other signals.
244 	// Otherwise check, if the thread shall stop for debugging.
245 	if (signalMask & KILL_SIGNALS) {
246 		signalMask &= KILL_SIGNALS;
247 	} else if (thread->debug_info.flags & B_THREAD_DEBUG_STOP) {
248 		user_debug_stop_thread();
249 	}
250 
251 	if (signalMask == 0)
252 		return 0;
253 
254 	T(HandleSignals(signalMask));
255 
256 	for (i = 0; i < NSIG; i++) {
257 		bool debugSignal;
258 		int32 signal = i + 1;
259 
260 		if ((signalMask & SIGNAL_TO_MASK(signal)) == 0)
261 			continue;
262 
263 		// clear the signal that we will handle
264 		atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal));
265 
266 		debugSignal = !(~atomic_get(&thread->team->debug_info.flags)
267 				& (B_TEAM_DEBUG_SIGNALS | B_TEAM_DEBUG_DEBUGGER_INSTALLED));
268 
269 		// TODO: since sigaction_etc() could clobber the fields at any time,
270 		//		we should actually copy the relevant fields atomically before
271 		//		accessing them (only the debugger is calling sigaction_etc()
272 		//		right now).
273 		//		Update: sigaction_etc() is only used by the userland debugger
274 		//		support. We can just as well restrict getting/setting signal
275 		//		handlers to work only when the respective thread is stopped.
276 		//		Then sigaction() could be used instead and we could get rid of
277 		//		sigaction_etc().
278 		handler = &thread->sig_action[i];
279 
280 		TRACE(("Thread 0x%lx received signal %s\n", thread->id, sigstr[signal]));
281 
282 		if ((handler->sa_flags & SA_RESTART) != 0)
283 			restart = true;
284 
285 		if (handler->sa_handler == SIG_IGN) {
286 			// signal is to be ignored
287 			// ToDo: apply zombie cleaning on SIGCHLD
288 
289 			// notify the debugger
290 			if (debugSignal)
291 				notify_debugger(thread, signal, handler, false);
292 			continue;
293 		}
294 		if (handler->sa_handler == SIG_DFL) {
295 			// default signal behaviour
296 			switch (signal) {
297 				case SIGCHLD:
298 				case SIGWINCH:
299 				case SIGURG:
300 					// notify the debugger
301 					if (debugSignal)
302 						notify_debugger(thread, signal, handler, false);
303 					continue;
304 
305 				case SIGCONT:
306 					// notify the debugger
307 					if (debugSignal
308 						&& !notify_debugger(thread, signal, handler, false))
309 						continue;
310 
311 					// notify threads waiting for team state changes
312 					if (thread == thread->team->main_thread) {
313 						InterruptsSpinLocker locker(team_spinlock);
314 						team_set_job_control_state(thread->team,
315 							JOB_CONTROL_STATE_CONTINUED, signal, false);
316 
317 						// The standard states that the system *may* send a
318 						// SIGCHLD when a child is continued. I haven't found
319 						// a good reason why we would want to, though.
320 					}
321 					continue;
322 
323 				case SIGSTOP:
324 				case SIGTSTP:
325 				case SIGTTIN:
326 				case SIGTTOU:
327 					// notify the debugger
328 					if (debugSignal
329 						&& !notify_debugger(thread, signal, handler, false))
330 						continue;
331 
332 					thread->next_state = B_THREAD_SUSPENDED;
333 					reschedule = true;
334 
335 					// notify threads waiting for team state changes
336 					if (thread == thread->team->main_thread) {
337 						InterruptsSpinLocker locker(team_spinlock);
338 						team_set_job_control_state(thread->team,
339 							JOB_CONTROL_STATE_STOPPED, signal, false);
340 
341 						// send a SIGCHLD to the parent (if it does have
342 						// SA_NOCLDSTOP defined)
343 						SpinLocker _(thread_spinlock);
344 						struct thread* parentThread
345 							= thread->team->parent->main_thread;
346 						struct sigaction& parentHandler
347 							= parentThread->sig_action[SIGCHLD - 1];
348 						if ((parentHandler.sa_flags & SA_NOCLDSTOP) == 0)
349 							deliver_signal(parentThread, SIGCHLD, 0);
350 					}
351 					continue;
352 
353 				case SIGQUIT:
354 				case SIGILL:
355 				case SIGTRAP:
356 				case SIGABRT:
357 				case SIGFPE:
358 				case SIGSEGV:
359 				case SIGPOLL:
360 				case SIGPROF:
361 				case SIGSYS:
362 				case SIGVTALRM:
363 				case SIGXCPU:
364 				case SIGXFSZ:
365 					TRACE(("Shutting down thread 0x%lx due to signal #%ld\n",
366 						thread->id, signal));
367 				case SIGKILL:
368 				case SIGKILLTHR:
369 				default:
370 					// if the thread exited normally, the exit reason is already set
371 					if (thread->exit.reason != THREAD_RETURN_EXIT) {
372 						thread->exit.reason = THREAD_RETURN_INTERRUPTED;
373 						thread->exit.signal = (uint16)signal;
374 					}
375 
376 					// notify the debugger
377 					if (debugSignal && signal != SIGKILL && signal != SIGKILLTHR
378 						&& !notify_debugger(thread, signal, handler, true))
379 						continue;
380 
381 					thread_exit();
382 						// won't return
383 			}
384 		}
385 
386 		// notify the debugger
387 		if (debugSignal && !notify_debugger(thread, signal, handler, false))
388 			continue;
389 
390 		// User defined signal handler
391 		TRACE(("### Setting up custom signal handler frame...\n"));
392 		arch_setup_signal_frame(thread, handler, signal, atomic_get(&thread->sig_block_mask));
393 
394 		if (handler->sa_flags & SA_ONESHOT)
395 			handler->sa_handler = SIG_DFL;
396 		if ((handler->sa_flags & SA_NOMASK) == 0) {
397 			// Update the block mask while the signal handler is running - it
398 			// will be automatically restored when the signal frame is left.
399 			atomic_or(&thread->sig_block_mask,
400 				(handler->sa_mask | SIGNAL_TO_MASK(signal)) & BLOCKABLE_SIGNALS);
401 		}
402 
403 		update_current_thread_signals_flag();
404 
405 		return reschedule;
406 	}
407 
408 	// only restart if SA_RESTART was set on at least one handler
409 	if (restart)
410 		arch_check_syscall_restart(thread);
411 
412 	update_current_thread_signals_flag();
413 
414 	return reschedule;
415 }
416 
417 
418 bool
419 is_kill_signal_pending(void)
420 {
421 	return (atomic_get(&thread_get_current_thread()->sig_pending)
422 		& KILL_SIGNALS) != 0;
423 }
424 
425 
426 bool
427 is_signal_blocked(int signal)
428 {
429 	return (atomic_get(&thread_get_current_thread()->sig_block_mask)
430 		& SIGNAL_TO_MASK(signal)) != 0;
431 }
432 
433 
434 /*!	Tries to interrupt a thread waiting for a semaphore or a condition variable.
435 	Interrupts must be disabled, the thread lock be held.
436 */
437 static status_t
438 signal_interrupt_thread(struct thread* thread)
439 {
440 	if (thread->sem.blocking >= 0)
441 		return sem_interrupt_thread(thread);
442 	else if (thread->condition_variable_entry)
443 		return condition_variable_interrupt_thread(thread);
444 	return B_BAD_VALUE;
445 }
446 
447 
448 /*!	Delivers the \a signal to the \a thread, but doesn't handle the signal -
449 	it just makes sure the thread gets the signal, ie. unblocks it if needed.
450 	This function must be called with interrupts disabled and the
451 	thread lock held.
452 */
453 static status_t
454 deliver_signal(struct thread *thread, uint signal, uint32 flags)
455 {
456 	if (flags & B_CHECK_PERMISSION) {
457 		// ToDo: introduce euid & uid fields to the team and check permission
458 	}
459 
460 	if (signal == 0)
461 		return B_OK;
462 
463 	if (thread->team == team_get_kernel_team()) {
464 		// Signals to kernel threads will only wake them up
465 		if (thread->state == B_THREAD_SUSPENDED) {
466 			thread->state = thread->next_state = B_THREAD_READY;
467 			scheduler_enqueue_in_run_queue(thread);
468 		}
469 		return B_OK;
470 	}
471 
472 	atomic_or(&thread->sig_pending, SIGNAL_TO_MASK(signal));
473 
474 	switch (signal) {
475 		case SIGKILL:
476 		{
477 			struct thread *mainThread = thread->team->main_thread;
478 			// Forward KILLTHR to the main thread of the team
479 
480 			mainThread->sig_pending |= SIGNAL_TO_MASK(SIGKILLTHR);
481 			// Wake up main thread
482 			if (mainThread->state == B_THREAD_SUSPENDED) {
483 				mainThread->state = mainThread->next_state = B_THREAD_READY;
484 				scheduler_enqueue_in_run_queue(mainThread);
485 			} else if (mainThread->state == B_THREAD_WAITING)
486 				signal_interrupt_thread(mainThread);
487 
488 			// Supposed to fall through
489 		}
490 		case SIGKILLTHR:
491 			// Wake up suspended threads and interrupt waiting ones
492 			if (thread->state == B_THREAD_SUSPENDED) {
493 				thread->state = thread->next_state = B_THREAD_READY;
494 				scheduler_enqueue_in_run_queue(thread);
495 			} else if (thread->state == B_THREAD_WAITING)
496 				signal_interrupt_thread(thread);
497 			break;
498 
499 		case SIGCONT:
500 			// Wake up thread if it was suspended
501 			if (thread->state == B_THREAD_SUSPENDED) {
502 				thread->state = thread->next_state = B_THREAD_READY;
503 				scheduler_enqueue_in_run_queue(thread);
504 			}
505 
506 			atomic_and(&thread->sig_pending, ~STOP_SIGNALS);
507 				// remove any pending stop signals
508 			break;
509 
510 		default:
511 			if (thread->sig_pending
512 				& (~thread->sig_block_mask | SIGNAL_TO_MASK(SIGCHLD))) {
513 				// Interrupt thread if it was waiting
514 				if (thread->state == B_THREAD_WAITING)
515 					signal_interrupt_thread(thread);
516 			}
517 			break;
518 	}
519 
520 	update_thread_signals_flag(thread);
521 
522 	return B_OK;
523 }
524 
525 
526 int
527 send_signal_etc(pid_t id, uint signal, uint32 flags)
528 {
529 	status_t status = B_BAD_THREAD_ID;
530 	struct thread *thread;
531 	cpu_status state = 0;
532 
533 	if (signal < 0 || signal > MAX_SIGNO)
534 		return B_BAD_VALUE;
535 
536 	T(SendSignal(id, signal, flags));
537 
538 	if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
539 		state = disable_interrupts();
540 
541 	if (id > 0) {
542 		// send a signal to the specified thread
543 
544 		GRAB_THREAD_LOCK();
545 
546 		thread = thread_get_thread_struct_locked(id);
547 		if (thread != NULL)
548 			status = deliver_signal(thread, signal, flags);
549 	} else {
550 		// send a signal to the specified process group
551 		// (the absolute value of the id)
552 
553 		struct process_group *group;
554 
555 		// TODO: handle -1 correctly
556 		if (id == 0 || id == -1) {
557 			// send a signal to the current team
558 			id = thread_get_current_thread()->team->id;
559 		} else
560 			id = -id;
561 
562 		if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
563 			GRAB_TEAM_LOCK();
564 
565 		group = team_get_process_group_locked(NULL, id);
566 		if (group != NULL) {
567 			struct team *team, *next;
568 
569 			// Send a signal to all teams in this process group
570 
571 			for (team = group->teams; team != NULL; team = next) {
572 				next = team->group_next;
573 				id = team->id;
574 
575 				GRAB_THREAD_LOCK();
576 
577 				thread = thread_get_thread_struct_locked(id);
578 				if (thread != NULL) {
579 					// we don't stop because of an error sending the signal; we
580 					// rather want to send as much signals as possible
581 					status = deliver_signal(thread, signal, flags);
582 				}
583 
584 				RELEASE_THREAD_LOCK();
585 			}
586 		}
587 
588 		if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
589 			RELEASE_TEAM_LOCK();
590 
591 		GRAB_THREAD_LOCK();
592 	}
593 
594 	// ToDo: maybe the scheduler should only be invoked if there is reason to do it?
595 	//	(ie. deliver_signal() moved some threads in the running queue?)
596 	if ((flags & (B_DO_NOT_RESCHEDULE | SIGNAL_FLAG_TEAMS_LOCKED)) == 0)
597 		scheduler_reschedule();
598 
599 	RELEASE_THREAD_LOCK();
600 
601 	if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
602 		restore_interrupts(state);
603 
604 	return status;
605 }
606 
607 
608 int
609 send_signal(pid_t threadID, uint signal)
610 {
611 	// The BeBook states that this function wouldn't be exported
612 	// for drivers, but, of course, it's wrong.
613 	return send_signal_etc(threadID, signal, 0);
614 }
615 
616 
617 int
618 has_signals_pending(void *_thread)
619 {
620 	struct thread *thread = (struct thread *)_thread;
621 	if (thread == NULL)
622 		thread = thread_get_current_thread();
623 
624 	return atomic_get(&thread->sig_pending) & ~atomic_get(&thread->sig_block_mask);
625 }
626 
627 
628 int
629 sigprocmask(int how, const sigset_t *set, sigset_t *oldSet)
630 {
631 	struct thread *thread = thread_get_current_thread();
632 	sigset_t oldMask = atomic_get(&thread->sig_block_mask);
633 
634 	if (set != NULL) {
635 		T(SigProcMask(how, *set));
636 
637 		switch (how) {
638 			case SIG_BLOCK:
639 				atomic_or(&thread->sig_block_mask, *set & BLOCKABLE_SIGNALS);
640 				break;
641 			case SIG_UNBLOCK:
642 				atomic_and(&thread->sig_block_mask, ~*set);
643 				break;
644 			case SIG_SETMASK:
645 				atomic_set(&thread->sig_block_mask, *set & BLOCKABLE_SIGNALS);
646 				break;
647 			default:
648 				return B_BAD_VALUE;
649 		}
650 
651 		update_current_thread_signals_flag();
652 	}
653 
654 	if (oldSet != NULL)
655 		*oldSet = oldMask;
656 
657 	return B_OK;
658 }
659 
660 
661 /**	\brief sigaction() for the specified thread.
662  *
663  *	A \a threadID is < 0 specifies the current thread.
664  *
665  */
666 
667 int
668 sigaction_etc(thread_id threadID, int signal, const struct sigaction *act,
669 	struct sigaction *oldAction)
670 {
671 	struct thread *thread;
672 	cpu_status state;
673 	status_t error = B_OK;
674 
675 	if (signal < 1 || signal > MAX_SIGNO
676 		|| (SIGNAL_TO_MASK(signal) & ~BLOCKABLE_SIGNALS) != 0)
677 		return B_BAD_VALUE;
678 
679 	state = disable_interrupts();
680 	GRAB_THREAD_LOCK();
681 
682 	thread = (threadID < 0
683 		? thread_get_current_thread()
684 		: thread_get_thread_struct_locked(threadID));
685 
686 	if (thread) {
687 		if (oldAction) {
688 			// save previous sigaction structure
689 			memcpy(oldAction, &thread->sig_action[signal - 1],
690 				sizeof(struct sigaction));
691 		}
692 
693 		if (act) {
694 			T(SigAction(thread, signal, act));
695 
696 			// set new sigaction structure
697 			memcpy(&thread->sig_action[signal - 1], act,
698 				sizeof(struct sigaction));
699 			thread->sig_action[signal - 1].sa_mask &= BLOCKABLE_SIGNALS;
700 		}
701 
702 		if (act && act->sa_handler == SIG_IGN) {
703 			// remove pending signal if it should now be ignored
704 			atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal));
705 		} else if (act && act->sa_handler == SIG_DFL
706 			&& (SIGNAL_TO_MASK(signal) & DEFAULT_IGNORE_SIGNALS) != NULL) {
707 			// remove pending signal for those signals whose default
708 			// action is to ignore them
709 			atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal));
710 		}
711 	} else
712 		error = B_BAD_THREAD_ID;
713 
714 	RELEASE_THREAD_LOCK();
715 	restore_interrupts(state);
716 
717 	return error;
718 }
719 
720 
721 int
722 sigaction(int signal, const struct sigaction *act, struct sigaction *oldAction)
723 {
724 	return sigaction_etc(-1, signal, act, oldAction);
725 }
726 
727 
728 /** Triggers a SIGALRM to the thread that issued the timer and reschedules */
729 
730 static int32
731 alarm_event(timer *t)
732 {
733 	// The hook can be called from any context, but we have to
734 	// deliver the signal to the thread that originally called
735 	// set_alarm().
736 	// Since thread->alarm is this timer structure, we can just
737 	// cast it back - ugly but it works for now
738 	struct thread *thread = (struct thread *)((uint8 *)t - offsetof(struct thread, alarm));
739 		// ToDo: investigate adding one user parameter to the timer structure to fix this hack
740 
741 	TRACE(("alarm_event: thread = %p\n", thread));
742 	send_signal_etc(thread->id, SIGALRM, B_DO_NOT_RESCHEDULE);
743 
744 	return B_INVOKE_SCHEDULER;
745 }
746 
747 
748 /** Sets the alarm timer for the current thread. The timer fires at the
749  *	specified time in the future, periodically or just once, as determined
750  *	by \a mode.
751  *	\return the time left until a previous set alarm would have fired.
752  */
753 
754 bigtime_t
755 set_alarm(bigtime_t time, uint32 mode)
756 {
757 	struct thread *thread = thread_get_current_thread();
758 	bigtime_t remainingTime = 0;
759 
760 	ASSERT(B_ONE_SHOT_RELATIVE_ALARM == B_ONE_SHOT_RELATIVE_TIMER);
761 		// just to be sure no one changes the headers some day
762 
763 	TRACE(("set_alarm: thread = %p\n", thread));
764 
765 	if (thread->alarm.period)
766 		remainingTime = (bigtime_t)thread->alarm.entry.key - system_time();
767 
768 	cancel_timer(&thread->alarm);
769 
770 	if (time != B_INFINITE_TIMEOUT)
771 		add_timer(&thread->alarm, &alarm_event, time, mode);
772 	else {
773 		// this marks the alarm as canceled (for returning the remaining time)
774 		thread->alarm.period = 0;
775 	}
776 
777 	return remainingTime;
778 }
779 
780 
781 /**	Replace the current signal block mask and wait for any event to happen.
782  *	Before returning, the original signal block mask is reinstantiated.
783  */
784 
785 int
786 sigsuspend(const sigset_t *mask)
787 {
788 	struct thread *thread = thread_get_current_thread();
789 	sigset_t oldMask = atomic_get(&thread->sig_block_mask);
790 	cpu_status state;
791 
792 	// set the new block mask and suspend ourselves - we cannot use
793 	// SIGSTOP for this, as signals are only handled upon kernel exit
794 
795 	atomic_set(&thread->sig_block_mask, *mask);
796 
797 	while (true) {
798 		thread->next_state = B_THREAD_SUSPENDED;
799 
800 		state = disable_interrupts();
801 		GRAB_THREAD_LOCK();
802 
803 		update_thread_signals_flag(thread);
804 
805 		scheduler_reschedule();
806 
807 		RELEASE_THREAD_LOCK();
808 		restore_interrupts(state);
809 
810 		if (has_signals_pending(thread))
811 			break;
812 	}
813 
814 	// restore the original block mask
815 	atomic_set(&thread->sig_block_mask, oldMask);
816 
817 	update_current_thread_signals_flag();
818 
819 	// we're not supposed to actually succeed
820 	// ToDo: could this get us into trouble with SA_RESTART handlers?
821 	return B_INTERRUPTED;
822 }
823 
824 
825 int
826 sigpending(sigset_t *set)
827 {
828 	struct thread *thread = thread_get_current_thread();
829 
830 	if (set == NULL)
831 		return B_BAD_VALUE;
832 
833 	*set = atomic_get(&thread->sig_pending);
834 	return B_OK;
835 }
836 
837 
838 //	#pragma mark -
839 
840 
841 bigtime_t
842 _user_set_alarm(bigtime_t time, uint32 mode)
843 {
844 	syscall_64_bit_return_value();
845 
846 	return set_alarm(time, mode);
847 }
848 
849 
850 status_t
851 _user_send_signal(pid_t team, uint signal)
852 {
853 	return send_signal_etc(team, signal, B_CHECK_PERMISSION);
854 }
855 
856 
857 status_t
858 _user_sigprocmask(int how, const sigset_t *userSet, sigset_t *userOldSet)
859 {
860 	sigset_t set, oldSet;
861 	status_t status;
862 
863 	if ((userSet != NULL && user_memcpy(&set, userSet, sizeof(sigset_t)) < B_OK)
864 		|| (userOldSet != NULL && user_memcpy(&oldSet, userOldSet,
865 				sizeof(sigset_t)) < B_OK))
866 		return B_BAD_ADDRESS;
867 
868 	status = sigprocmask(how, userSet ? &set : NULL,
869 		userOldSet ? &oldSet : NULL);
870 
871 	// copy old set if asked for
872 	if (status >= B_OK && userOldSet != NULL
873 		&& user_memcpy(userOldSet, &oldSet, sizeof(sigset_t)) < B_OK)
874 		return B_BAD_ADDRESS;
875 
876 	return status;
877 }
878 
879 
880 status_t
881 _user_sigaction(int signal, const struct sigaction *userAction,
882 	struct sigaction *userOldAction)
883 {
884 	struct sigaction act, oact;
885 	status_t status;
886 
887 	if ((userAction != NULL && user_memcpy(&act, userAction,
888 				sizeof(struct sigaction)) < B_OK)
889 		|| (userOldAction != NULL && user_memcpy(&oact, userOldAction,
890 				sizeof(struct sigaction)) < B_OK))
891 		return B_BAD_ADDRESS;
892 
893 	status = sigaction(signal, userAction ? &act : NULL,
894 		userOldAction ? &oact : NULL);
895 
896 	// only copy the old action if a pointer has been given
897 	if (status >= B_OK && userOldAction != NULL
898 		&& user_memcpy(userOldAction, &oact, sizeof(struct sigaction)) < B_OK)
899 		return B_BAD_ADDRESS;
900 
901 	return status;
902 }
903 
904 
905 status_t
906 _user_sigsuspend(const sigset_t *userMask)
907 {
908 	sigset_t mask;
909 
910 	if (userMask == NULL)
911 		return B_BAD_VALUE;
912 	if (user_memcpy(&mask, userMask, sizeof(sigset_t)) < B_OK)
913 		return B_BAD_ADDRESS;
914 
915 	return sigsuspend(&mask);
916 }
917 
918 
919 status_t
920 _user_sigpending(sigset_t *userSet)
921 {
922 	sigset_t set;
923 	int status;
924 
925 	if (userSet == NULL)
926 		return B_BAD_VALUE;
927 	if (!IS_USER_ADDRESS(userSet))
928 		return B_BAD_ADDRESS;
929 
930 	status = sigpending(&set);
931 	if (status == B_OK
932 		&& user_memcpy(userSet, &set, sizeof(sigset_t)) < B_OK)
933 		return B_BAD_ADDRESS;
934 
935 	return status;
936 }
937 
938 
939 status_t
940 _user_set_signal_stack(const stack_t *newUserStack, stack_t *oldUserStack)
941 {
942 	struct thread *thread = thread_get_current_thread();
943 	struct stack_t newStack, oldStack;
944 	bool onStack = false;
945 
946 	if ((newUserStack != NULL && user_memcpy(&newStack, newUserStack,
947 				sizeof(stack_t)) < B_OK)
948 		|| (oldUserStack != NULL && user_memcpy(&oldStack, oldUserStack,
949 				sizeof(stack_t)) < B_OK))
950 		return B_BAD_ADDRESS;
951 
952 	if (thread->signal_stack_enabled) {
953 		// determine wether or not the user thread is currently
954 		// on the active signal stack
955 		onStack = arch_on_signal_stack(thread);
956 	}
957 
958 	if (oldUserStack != NULL) {
959 		oldStack.ss_sp = (void *)thread->signal_stack_base;
960 		oldStack.ss_size = thread->signal_stack_size;
961 		oldStack.ss_flags = (thread->signal_stack_enabled ? 0 : SS_DISABLE)
962 			| (onStack ? SS_ONSTACK : 0);
963 	}
964 
965 	if (newUserStack != NULL) {
966 		// no flags other than SS_DISABLE are allowed
967 		if ((newStack.ss_flags & ~SS_DISABLE) != 0)
968 			return B_BAD_VALUE;
969 
970 		if ((newStack.ss_flags & SS_DISABLE) == 0) {
971 			// check if the size is valid
972 			if (newStack.ss_size < MINSIGSTKSZ)
973 				return B_NO_MEMORY;
974 			if (onStack)
975 				return B_NOT_ALLOWED;
976 			if (!IS_USER_ADDRESS(newStack.ss_sp))
977 				return B_BAD_VALUE;
978 
979 			thread->signal_stack_base = (addr_t)newStack.ss_sp;
980 			thread->signal_stack_size = newStack.ss_size;
981 			thread->signal_stack_enabled = true;
982 		} else
983 			thread->signal_stack_enabled = false;
984 	}
985 
986 	// only copy the old stack info if a pointer has been given
987 	if (oldUserStack != NULL
988 		&& user_memcpy(oldUserStack, &oldStack, sizeof(stack_t)) < B_OK)
989 		return B_BAD_ADDRESS;
990 
991 	return B_OK;
992 }
993 
994