xref: /haiku/headers/private/kernel/thread_types.h (revision f7a85eea1500ce588305b7a27f6c36069cc7620c)
1 /*
2  * Copyright 2004-2016, Haiku, Inc.
3  * Distributed under the terms of the MIT License.
4  *
5  * Thread definition and structures
6  */
7 #ifndef _KERNEL_THREAD_TYPES_H
8 #define _KERNEL_THREAD_TYPES_H
9 
10 
11 #ifndef _ASSEMBLER
12 
13 #include <pthread.h>
14 
15 #include <arch/thread_types.h>
16 #include <condition_variable.h>
17 #include <heap.h>
18 #include <ksignal.h>
19 #include <lock.h>
20 #include <smp.h>
21 #include <thread_defs.h>
22 #include <timer.h>
23 #include <UserTimer.h>
24 #include <user_debugger.h>
25 #include <util/DoublyLinkedList.h>
26 #include <util/KernelReferenceable.h>
27 #include <util/list.h>
28 
29 #include <SupportDefs.h>
30 
31 
32 enum additional_thread_state {
33 	THREAD_STATE_FREE_ON_RESCHED = 7, // free the thread structure upon reschedule
34 //	THREAD_STATE_BIRTH	// thread is being created
35 };
36 
37 #define THREAD_MIN_SET_PRIORITY				B_LOWEST_ACTIVE_PRIORITY
38 #define THREAD_MAX_SET_PRIORITY				B_REAL_TIME_PRIORITY
39 
40 enum team_state {
41 	TEAM_STATE_NORMAL,		// normal state
42 	TEAM_STATE_BIRTH,		// being constructed
43 	TEAM_STATE_SHUTDOWN,	// still lives, but is going down
44 	TEAM_STATE_DEATH		// only the Team object still exists, threads are
45 							// gone
46 };
47 
48 #define	TEAM_FLAG_EXEC_DONE	0x01
49 	// team has executed exec*()
50 #define	TEAM_FLAG_DUMP_CORE	0x02
51 	// a core dump is in progress
52 
53 typedef enum job_control_state {
54 	JOB_CONTROL_STATE_NONE,
55 	JOB_CONTROL_STATE_STOPPED,
56 	JOB_CONTROL_STATE_CONTINUED,
57 	JOB_CONTROL_STATE_DEAD
58 } job_control_state;
59 
60 
61 struct cpu_ent;
62 struct image;					// defined in image.c
63 struct io_context;
64 struct realtime_sem_context;	// defined in realtime_sem.cpp
65 struct select_info;
66 struct user_thread;				// defined in libroot/user_thread.h
67 struct VMAddressSpace;
68 struct user_mutex_context;		// defined in user_mutex.cpp
69 struct xsi_sem_context;			// defined in xsi_semaphore.cpp
70 
71 namespace Scheduler {
72 	struct ThreadData;
73 }
74 
75 namespace BKernel {
76 	struct Team;
77 	struct Thread;
78 	struct ProcessGroup;
79 }
80 
81 
82 struct thread_death_entry {
83 	struct list_link	link;
84 	thread_id			thread;
85 	status_t			status;
86 };
87 
88 struct team_loading_info {
89 	ConditionVariable	condition;
90 	status_t			result;		// the result of the loading
91 };
92 
93 struct team_watcher {
94 	struct list_link	link;
95 	void				(*hook)(team_id team, void *data);
96 	void				*data;
97 };
98 
99 
100 #define MAX_DEAD_CHILDREN	32
101 	// this is a soft limit for the number of child death entries in a team
102 
103 
104 struct job_control_entry : DoublyLinkedListLinkImpl<job_control_entry> {
105 	job_control_state	state;		// current team job control state
106 	thread_id			thread;		// main thread ID == team ID
107 	uint16				signal;		// signal causing the current state
108 	bool				has_group_ref;
109 	uid_t				signaling_user;
110 
111 	// valid while state != JOB_CONTROL_STATE_DEAD
112 	BKernel::Team*		team;
113 
114 	// valid when state == JOB_CONTROL_STATE_DEAD
115 	pid_t				group_id;
116 	status_t			status;
117 	uint16				reason;		// reason for the team's demise, one of the
118 									// CLD_* values defined in <signal.h>
119 	bigtime_t			user_time;
120 	bigtime_t			kernel_time;
121 
122 	job_control_entry();
123 	~job_control_entry();
124 
125 	void InitDeadState();
126 
127 	job_control_entry& operator=(const job_control_entry& other);
128 };
129 
130 typedef DoublyLinkedList<job_control_entry> JobControlEntryList;
131 
132 struct team_job_control_children {
133 	JobControlEntryList		entries;
134 };
135 
136 struct team_dead_children : team_job_control_children {
137 	ConditionVariable	condition_variable;
138 	uint32				count;
139 	bigtime_t			kernel_time;
140 	bigtime_t			user_time;
141 };
142 
143 
144 struct team_death_entry {
145 	int32				remaining_threads;
146 	ConditionVariable	condition;
147 };
148 
149 
150 struct free_user_thread {
151 	struct free_user_thread*	next;
152 	struct user_thread*			thread;
153 };
154 
155 
156 class AssociatedDataOwner;
157 
158 class AssociatedData : public BReferenceable,
159 	public DoublyLinkedListLinkImpl<AssociatedData> {
160 public:
161 								AssociatedData();
162 	virtual						~AssociatedData();
163 
Owner()164 			AssociatedDataOwner* Owner() const
165 									{ return fOwner; }
SetOwner(AssociatedDataOwner * owner)166 			void				SetOwner(AssociatedDataOwner* owner)
167 									{ fOwner = owner; }
168 
169 	virtual	void				OwnerDeleted(AssociatedDataOwner* owner);
170 
171 private:
172 			AssociatedDataOwner* fOwner;
173 };
174 
175 
176 class AssociatedDataOwner {
177 public:
178 								AssociatedDataOwner();
179 								~AssociatedDataOwner();
180 
181 			bool				AddData(AssociatedData* data);
182 			bool				RemoveData(AssociatedData* data);
183 
184 			void				PrepareForDeletion();
185 
186 private:
187 			typedef DoublyLinkedList<AssociatedData> DataList;
188 
189 private:
190 
191 			mutex				fLock;
192 			DataList			fList;
193 };
194 
195 
196 typedef int32 (*thread_entry_func)(thread_func, void *);
197 
198 
199 namespace BKernel {
200 
201 
202 struct GroupsArray : KernelReferenceable {
203 	int		count;
204 	gid_t	groups[];
205 };
206 
207 
208 template<typename IDType>
209 struct TeamThreadIteratorEntry
210 	: DoublyLinkedListLinkImpl<TeamThreadIteratorEntry<IDType> > {
211 	typedef IDType	id_type;
212 	typedef TeamThreadIteratorEntry<id_type> iterator_type;
213 
214 	id_type	id;			// -1 for iterator entries, >= 0 for actual elements
215 	bool	visible;	// the entry is publicly visible
216 };
217 
218 
219 struct Team : TeamThreadIteratorEntry<team_id>, KernelReferenceable,
220 		AssociatedDataOwner {
221 	DoublyLinkedListLink<Team>	global_list_link;
222 	Team			*hash_next;		// next in hash
223 	Team			*siblings_next;	// next in parent's list; protected by
224 									// parent's fLock
225 	Team			*parent;		// write-protected by both parent (if any)
226 									// and this team's fLock
227 	Team			*children;		// protected by this team's fLock;
228 									// adding/removing a child also requires the
229 									// child's fLock
230 	Team			*group_next;	// protected by the group's lock
231 
232 	int64			serial_number;	// immutable after adding team to hash
233 
234 	// process group info -- write-protected by both the group's lock, the
235 	// team's lock, and the team's parent's lock
236 	pid_t			group_id;
237 	pid_t			session_id;
238 	ProcessGroup	*group;
239 
240 	int				num_threads;	// number of threads in this team
241 	int				state;			// current team state, see above
242 	int32			flags;
243 	struct io_context *io_context;
244 	struct user_mutex_context *user_mutex_context;
245 	struct realtime_sem_context	*realtime_sem_context;
246 	struct xsi_sem_context *xsi_sem_context;
247 	struct team_death_entry *death_entry;	// protected by fLock
248 	struct list		dead_threads;
249 
250 	// protected by the team's fLock
251 	team_dead_children dead_children;
252 	team_job_control_children stopped_children;
253 	team_job_control_children continued_children;
254 
255 	// protected by the parent team's fLock
256 	struct job_control_entry* job_control_entry;
257 
258 	VMAddressSpace	*address_space;
259 	Thread			*main_thread;	// protected by fLock, immutable
260 									// after first set
261 	Thread			*thread_list;	// protected by fLock, signal_lock and
262 									// gThreadCreationLock
263 	struct team_loading_info *loading_info;	// protected by fLock
264 	struct list		image_list;		// protected by sImageMutex
265 	struct list		watcher_list;
266 	struct list		sem_list;		// protected by sSemsSpinlock
267 	struct list		port_list;		// protected by sPortsLock
268 	struct arch_team arch_info;
269 
270 	addr_t			user_data;
271 	area_id			user_data_area;
272 	size_t			user_data_size;
273 	size_t			used_user_data;
274 	struct free_user_thread* free_user_threads;
275 
276 	void*			commpage_address;
277 
278 	struct team_debug_info debug_info;
279 
280 	bigtime_t		start_time;
281 
282 	// protected by time_lock
283 	bigtime_t		dead_threads_kernel_time;
284 	bigtime_t		dead_threads_user_time;
285 	bigtime_t		cpu_clock_offset;
286 	spinlock		time_lock;
287 
288 	// user group information; protected by fLock
289 	uid_t			saved_set_uid;
290 	uid_t			real_uid;
291 	uid_t			effective_uid;
292 	gid_t			saved_set_gid;
293 	gid_t			real_gid;
294 	gid_t			effective_gid;
295 	BReference<GroupsArray> supplementary_groups;
296 
297 	// Exit status information. Set when the first terminal event occurs,
298 	// immutable afterwards. Protected by fLock.
299 	struct {
300 		uint16		reason;			// reason for the team's demise, one of the
301 									// CLD_* values defined in <signal.h>
302 		uint16		signal;			// signal killing the team
303 		uid_t		signaling_user;	// real UID of the signal sender
304 		status_t	status;			// exit status, if normal team exit
305 		bool		initialized;	// true when the state has been initialized
306 	} exit;
307 
308 	spinlock		signal_lock;
309 
310 public:
311 								~Team();
312 
313 	static	Team*				Create(team_id id, const char* name,
314 									bool kernel);
315 	static	Team*				Get(team_id id);
316 	static	Team*				GetAndLock(team_id id);
317 
LockTeam318 			bool				Lock()
319 									{ mutex_lock(&fLock); return true; }
TryLockTeam320 			bool				TryLock()
321 									{ return mutex_trylock(&fLock) == B_OK; }
UnlockTeam322 			void				Unlock()
323 									{ mutex_unlock(&fLock); }
324 
UnlockAndReleaseReferenceTeam325 			void				UnlockAndReleaseReference()
326 									{ Unlock(); ReleaseReference(); }
327 
328 			void				LockTeamAndParent(bool dontLockParentIfKernel);
329 			void				UnlockTeamAndParent();
330 			void				LockTeamAndProcessGroup();
331 			void				UnlockTeamAndProcessGroup();
332 			void				LockTeamParentAndProcessGroup();
333 			void				UnlockTeamParentAndProcessGroup();
LockProcessGroupTeam334 			void				LockProcessGroup()
335 									{ LockTeamAndProcessGroup(); Unlock(); }
336 
NameTeam337 			const char*			Name() const	{ return fName; }
338 			void				SetName(const char* name);
339 
ArgsTeam340 			const char*			Args() const	{ return fArgs; }
341 			void				SetArgs(const char* args);
342 			void				SetArgs(const char* path,
343 									const char* const* otherArgs,
344 									int otherArgCount);
345 
QueuedSignalsCounterTeam346 			BKernel::QueuedSignalsCounter* QueuedSignalsCounter() const
347 									{ return fQueuedSignalsCounter; }
PendingSignalsTeam348 			sigset_t			PendingSignals() const
349 									{ return fPendingSignals.AllSignals(); }
350 
AddPendingSignalTeam351 			void				AddPendingSignal(int signal)
352 									{ fPendingSignals.AddSignal(signal); }
AddPendingSignalTeam353 			void				AddPendingSignal(Signal* signal)
354 									{ fPendingSignals.AddSignal(signal); }
RemovePendingSignalTeam355 			void				RemovePendingSignal(int signal)
356 									{ fPendingSignals.RemoveSignal(signal); }
RemovePendingSignalTeam357 			void				RemovePendingSignal(Signal* signal)
358 									{ fPendingSignals.RemoveSignal(signal); }
RemovePendingSignalsTeam359 			void				RemovePendingSignals(sigset_t mask)
360 									{ fPendingSignals.RemoveSignals(mask); }
361 			void				ResetSignalsOnExec();
362 
363 	inline	int32				HighestPendingSignalPriority(
364 									sigset_t nonBlocked) const;
365 	inline	Signal*				DequeuePendingSignal(sigset_t nonBlocked,
366 									Signal& buffer);
367 
SignalActionForTeam368 			struct sigaction&	SignalActionFor(int32 signal)
369 									{ return fSignalActions[signal - 1]; }
370 			void				InheritSignalActions(Team* parent);
371 
372 			// user timers -- protected by fLock
UserTimerForTeam373 			UserTimer*			UserTimerFor(int32 id) const
374 									{ return fUserTimers.TimerFor(id); }
375 			status_t			AddUserTimer(UserTimer* timer);
376 			void				RemoveUserTimer(UserTimer* timer);
377 			void				DeleteUserTimers(bool userDefinedOnly);
378 
379 			bool				CheckAddUserDefinedTimer();
380 			void				UserDefinedTimersRemoved(int32 count);
381 
UserTimerActivatedTeam382 			void				UserTimerActivated(TeamTimeUserTimer* timer)
383 									{ fCPUTimeUserTimers.Add(timer); }
UserTimerActivatedTeam384 			void				UserTimerActivated(TeamUserTimeUserTimer* timer)
385 									{ fUserTimeUserTimers.Add(timer); }
UserTimerDeactivatedTeam386 			void				UserTimerDeactivated(TeamTimeUserTimer* timer)
387 									{ fCPUTimeUserTimers.Remove(timer); }
UserTimerDeactivatedTeam388 			void				UserTimerDeactivated(
389 									TeamUserTimeUserTimer* timer)
390 									{ fUserTimeUserTimers.Remove(timer); }
391 			void				DeactivateCPUTimeUserTimers();
392 									// both total and user CPU timers
HasActiveCPUTimeUserTimersTeam393 			bool				HasActiveCPUTimeUserTimers() const
394 									{ return !fCPUTimeUserTimers.IsEmpty(); }
HasActiveUserTimeUserTimersTeam395 			bool				HasActiveUserTimeUserTimers() const
396 									{ return !fUserTimeUserTimers.IsEmpty(); }
397 			TeamTimeUserTimerList::ConstIterator
CPUTimeUserTimerIteratorTeam398 									CPUTimeUserTimerIterator() const
399 									{ return fCPUTimeUserTimers.GetIterator(); }
400 	inline	TeamUserTimeUserTimerList::ConstIterator
401 									UserTimeUserTimerIterator() const;
402 
403 			bigtime_t			CPUTime(bool ignoreCurrentRun,
404 									Thread* lockedThread = NULL) const;
405 			bigtime_t			UserCPUTime() const;
406 
CoreDumpConditionTeam407 			ConditionVariable*	CoreDumpCondition() const
408 									{ return fCoreDumpCondition; }
SetCoreDumpConditionTeam409 			void				SetCoreDumpCondition(
410 									ConditionVariable* condition)
411 									{ fCoreDumpCondition = condition; }
412 private:
413 								Team(team_id id, bool kernel);
414 
415 private:
416 			mutex				fLock;
417 			char				fName[B_OS_NAME_LENGTH];
418 			char				fArgs[64];
419 									// contents for the team_info::args field
420 
421 			BKernel::QueuedSignalsCounter* fQueuedSignalsCounter;
422 			BKernel::PendingSignals	fPendingSignals;
423 									// protected by signal_lock
424 			struct sigaction 	fSignalActions[MAX_SIGNAL_NUMBER];
425 									// indexed signal - 1, protected by fLock
426 
427 			UserTimerList		fUserTimers;			// protected by fLock
428 			TeamTimeUserTimerList fCPUTimeUserTimers;
429 									// protected by scheduler lock
430 			TeamUserTimeUserTimerList fUserTimeUserTimers;
431 			int32				fUserDefinedTimerCount;	// accessed atomically
432 
433 			ConditionVariable*	fCoreDumpCondition;
434 									// protected by fLock
435 };
436 
437 
438 struct Thread : TeamThreadIteratorEntry<thread_id>, KernelReferenceable {
439 	int32			flags;			// summary of events relevant in interrupt
440 									// handlers (signals pending, user debugging
441 									// enabled, etc.)
442 	int64			serial_number;	// immutable after adding thread to hash
443 	Thread			*hash_next;		// protected by thread hash lock
444 	Thread			*team_next;		// protected by team lock and fLock
445 	char			name[B_OS_NAME_LENGTH];	// protected by fLock
446 	bool			going_to_suspend;	// protected by scheduler lock
447 	int32			priority;		// protected by scheduler lock
448 	int32			io_priority;	// protected by fLock
449 	int32			state;			// protected by scheduler lock
450 	struct cpu_ent	*cpu;			// protected by scheduler lock
451 	struct cpu_ent	*previous_cpu;	// protected by scheduler lock
452 	CPUSet			cpumask;
453 	int32			pinned_to_cpu;	// only accessed by this thread or in the
454 									// scheduler, when thread is not running
455 	spinlock		scheduler_lock;
456 
457 	sigset_t		sig_block_mask;	// protected by team->signal_lock,
458 									// only modified by the thread itself
459 	sigset_t		sigsuspend_original_unblocked_mask;
460 		// non-0 after a return from _user_sigsuspend(), containing the inverted
461 		// original signal mask, reset in handle_signals(); only accessed by
462 		// this thread
463 	sigset_t		old_sig_block_mask;
464 		// the old sig_block_mask to be restored when returning to userland
465 		// when THREAD_FLAGS_OLD_SIGMASK is set
466 
467 	ucontext_t*		user_signal_context;	// only accessed by this thread
468 	addr_t			signal_stack_base;		// only accessed by this thread
469 	size_t			signal_stack_size;		// only accessed by this thread
470 	bool			signal_stack_enabled;	// only accessed by this thread
471 
472 	bool			in_kernel;		// protected by time_lock, only written by
473 									// this thread
474 	bool			has_yielded;	// protected by scheduler lock
475 	Scheduler::ThreadData*	scheduler_data; // protected by scheduler lock
476 
477 	struct user_thread*	user_thread;	// write-protected by fLock, only
478 										// modified by the thread itself and
479 										// thus freely readable by it
480 
481 	void			(*cancel_function)(int);
482 
483 	struct {
484 		uint8		parameters[SYSCALL_RESTART_PARAMETER_SIZE];
485 	} syscall_restart;
486 
487 	struct {
488 		status_t	status;				// current wait status
489 		uint32		flags;				// interrupable flags
490 		uint32		type;				// type of the object waited on
491 		const void*	object;				// pointer to the object waited on
492 		timer		unblock_timer;		// timer for block with timeout
493 	} wait;
494 
495 	struct {
496 		sem_id		write_sem;	// acquired by writers before writing
497 		sem_id		read_sem;	// release by writers after writing, acquired
498 								// by this thread when reading
499 		thread_id	sender;
500 		int32		code;
501 		size_t		size;
502 		void*		buffer;
503 	} msg;	// write_sem/read_sem are protected by fLock when accessed by
504 			// others, the other fields are protected by write_sem/read_sem
505 
506 	void			(*fault_handler)(void);
507 	jmp_buf			fault_handler_state;
508 	int32			page_faults_allowed;
509 		/* this field may only stay in debug builds in the future */
510 
511 	BKernel::Team	*team;	// protected by team lock, thread lock, scheduler
512 							// lock, team_lock
513 	rw_spinlock		team_lock;
514 
515 	struct {
516 		sem_id		sem;		// immutable after thread creation
517 		status_t	status;		// accessed only by this thread
518 		struct list	waiters;	// protected by fLock
519 	} exit;
520 
521 	struct select_info *select_infos;	// protected by fLock
522 
523 	struct thread_debug_info debug_info;
524 
525 	// stack
526 	area_id			kernel_stack_area;	// immutable after thread creation
527 	addr_t			kernel_stack_base;	// immutable after thread creation
528 	addr_t			kernel_stack_top;	// immutable after thread creation
529 	area_id			user_stack_area;	// protected by thread lock
530 	addr_t			user_stack_base;	// protected by thread lock
531 	size_t			user_stack_size;	// protected by thread lock
532 
533 	addr_t			user_local_storage;
534 		// usually allocated at the safe side of the stack
535 	int				kernel_errno;
536 		// kernel "errno" differs from its userspace alter ego
537 
538 	// user_time, kernel_time, and last_time are only written by the thread
539 	// itself, so they can be read by the thread without lock. Holding the
540 	// scheduler lock and checking that the thread does not run also guarantees
541 	// that the times will not change.
542 	spinlock		time_lock;
543 	bigtime_t		user_time;			// protected by time_lock
544 	bigtime_t		kernel_time;		// protected by time_lock
545 	bigtime_t		last_time;			// protected by time_lock
546 	bigtime_t		cpu_clock_offset;	// protected by time_lock
547 
548 	void			(*post_interrupt_callback)(void*);
549 	void*			post_interrupt_data;
550 
551 #if KDEBUG_RW_LOCK_DEBUG
552 	rw_lock*		held_read_locks[64] = {}; // only modified by this thread
553 #endif
554 
555 	// architecture dependent section
556 	struct arch_thread arch_info;
557 
558 public:
ThreadThread559 								Thread() {}
560 									// dummy for the idle threads
561 								Thread(const char *name, thread_id threadID,
562 									struct cpu_ent *cpu);
563 								~Thread();
564 
565 	static	status_t			Create(const char* name, Thread*& _thread);
566 
567 	static	Thread*				Get(thread_id id);
568 	static	Thread*				GetAndLock(thread_id id);
569 	static	Thread*				GetDebug(thread_id id);
570 									// in kernel debugger only
571 
572 	static	bool				IsAlive(thread_id id);
573 
574 			void*				operator new(size_t size);
575 			void*				operator new(size_t, void* pointer);
576 			void				operator delete(void* pointer, size_t size);
577 
578 			status_t			Init(bool idleThread);
579 
LockThread580 			bool				Lock()
581 									{ mutex_lock(&fLock); return true; }
TryLockThread582 			bool				TryLock()
583 									{ return mutex_trylock(&fLock) == B_OK; }
UnlockThread584 			void				Unlock()
585 									{ mutex_unlock(&fLock); }
586 
UnlockAndReleaseReferenceThread587 			void				UnlockAndReleaseReference()
588 									{ Unlock(); ReleaseReference(); }
589 
590 			bool				IsAlive() const;
591 
IsRunningThread592 			bool				IsRunning() const
593 									{ return cpu != NULL; }
594 									// scheduler lock must be held
595 
ThreadPendingSignalsThread596 			sigset_t			ThreadPendingSignals() const
597 									{ return fPendingSignals.AllSignals(); }
598 	inline	sigset_t			AllPendingSignals() const;
AddPendingSignalThread599 			void				AddPendingSignal(int signal)
600 									{ fPendingSignals.AddSignal(signal); }
AddPendingSignalThread601 			void				AddPendingSignal(Signal* signal)
602 									{ fPendingSignals.AddSignal(signal); }
RemovePendingSignalThread603 			void				RemovePendingSignal(int signal)
604 									{ fPendingSignals.RemoveSignal(signal); }
RemovePendingSignalThread605 			void				RemovePendingSignal(Signal* signal)
606 									{ fPendingSignals.RemoveSignal(signal); }
RemovePendingSignalsThread607 			void				RemovePendingSignals(sigset_t mask)
608 									{ fPendingSignals.RemoveSignals(mask); }
609 			void				ResetSignalsOnExec();
610 
611 	inline	int32				HighestPendingSignalPriority(
612 									sigset_t nonBlocked) const;
613 	inline	Signal*				DequeuePendingSignal(sigset_t nonBlocked,
614 									Signal& buffer);
615 
616 			// user timers -- protected by fLock
UserTimerForThread617 			UserTimer*			UserTimerFor(int32 id) const
618 									{ return fUserTimers.TimerFor(id); }
619 			status_t			AddUserTimer(UserTimer* timer);
620 			void				RemoveUserTimer(UserTimer* timer);
621 			void				DeleteUserTimers(bool userDefinedOnly);
622 
UserTimerActivatedThread623 			void				UserTimerActivated(ThreadTimeUserTimer* timer)
624 									{ fCPUTimeUserTimers.Add(timer); }
UserTimerDeactivatedThread625 			void				UserTimerDeactivated(ThreadTimeUserTimer* timer)
626 									{ fCPUTimeUserTimers.Remove(timer); }
627 			void				DeactivateCPUTimeUserTimers();
HasActiveCPUTimeUserTimersThread628 			bool				HasActiveCPUTimeUserTimers() const
629 									{ return !fCPUTimeUserTimers.IsEmpty(); }
630 			ThreadTimeUserTimerList::ConstIterator
CPUTimeUserTimerIteratorThread631 									CPUTimeUserTimerIterator() const
632 									{ return fCPUTimeUserTimers.GetIterator(); }
633 
634 	inline	bigtime_t			CPUTime(bool ignoreCurrentRun) const;
635 
636 private:
637 			mutex				fLock;
638 
639 			BKernel::PendingSignals	fPendingSignals;
640 									// protected by team->signal_lock
641 
642 			UserTimerList		fUserTimers;			// protected by fLock
643 			ThreadTimeUserTimerList fCPUTimeUserTimers;
644 									// protected by time_lock
645 };
646 
647 
648 struct ProcessSession : BReferenceable {
649 	pid_t				id;
650 	void*				controlling_tty;
651 	pid_t				foreground_group;
652 
653 public:
654 								ProcessSession(pid_t id);
655 								~ProcessSession();
656 
LockProcessSession657 			bool				Lock()
658 									{ mutex_lock(&fLock); return true; }
TryLockProcessSession659 			bool				TryLock()
660 									{ return mutex_trylock(&fLock) == B_OK; }
UnlockProcessSession661 			void				Unlock()
662 									{ mutex_unlock(&fLock); }
663 
664 private:
665 			mutex				fLock;
666 };
667 
668 
669 struct ProcessGroup : KernelReferenceable {
670 	struct ProcessGroup *next;		// next in hash
671 	pid_t				id;
672 	BKernel::Team		*teams;
673 
674 public:
675 								ProcessGroup(pid_t id);
676 								~ProcessGroup();
677 
678 	static	ProcessGroup*		Get(pid_t id);
679 
LockProcessGroup680 			bool				Lock()
681 									{ mutex_lock(&fLock); return true; }
TryLockProcessGroup682 			bool				TryLock()
683 									{ return mutex_trylock(&fLock) == B_OK; }
UnlockProcessGroup684 			void				Unlock()
685 									{ mutex_unlock(&fLock); }
686 
SessionProcessGroup687 			ProcessSession*		Session() const
688 									{ return fSession; }
689 			void				Publish(ProcessSession* session);
690 			void				PublishLocked(ProcessSession* session);
691 
692 			bool				IsOrphaned() const;
693 
694 			void				ScheduleOrphanedCheck();
695 			void				UnsetOrphanedCheck();
696 
697 public:
698 			SinglyLinkedListLink<ProcessGroup> fOrphanedCheckListLink;
699 
700 private:
701 			mutex				fLock;
702 			ProcessSession*		fSession;
703 			bool				fInOrphanedCheckList;	// protected by
704 														// sOrphanedCheckLock
705 };
706 
707 typedef SinglyLinkedList<ProcessGroup,
708 	SinglyLinkedListMemberGetLink<ProcessGroup,
709 		&ProcessGroup::fOrphanedCheckListLink> > ProcessGroupList;
710 
711 
712 /*!	\brief Allows to iterate through all teams.
713 */
714 struct TeamListIterator {
715 								TeamListIterator();
716 								~TeamListIterator();
717 
718 			Team*				Next();
719 
720 private:
721 			TeamThreadIteratorEntry<team_id> fEntry;
722 };
723 
724 
725 /*!	\brief Allows to iterate through all threads.
726 */
727 struct ThreadListIterator {
728 								ThreadListIterator();
729 								~ThreadListIterator();
730 
731 			Thread*				Next();
732 
733 private:
734 			TeamThreadIteratorEntry<thread_id> fEntry;
735 };
736 
737 
738 inline int32
HighestPendingSignalPriority(sigset_t nonBlocked)739 Team::HighestPendingSignalPriority(sigset_t nonBlocked) const
740 {
741 	return fPendingSignals.HighestSignalPriority(nonBlocked);
742 }
743 
744 
745 inline Signal*
DequeuePendingSignal(sigset_t nonBlocked,Signal & buffer)746 Team::DequeuePendingSignal(sigset_t nonBlocked, Signal& buffer)
747 {
748 	return fPendingSignals.DequeueSignal(nonBlocked, buffer);
749 }
750 
751 
752 inline TeamUserTimeUserTimerList::ConstIterator
UserTimeUserTimerIterator()753 Team::UserTimeUserTimerIterator() const
754 {
755 	return fUserTimeUserTimers.GetIterator();
756 }
757 
758 
759 inline sigset_t
AllPendingSignals()760 Thread::AllPendingSignals() const
761 {
762 	return fPendingSignals.AllSignals() | team->PendingSignals();
763 }
764 
765 
766 inline int32
HighestPendingSignalPriority(sigset_t nonBlocked)767 Thread::HighestPendingSignalPriority(sigset_t nonBlocked) const
768 {
769 	return fPendingSignals.HighestSignalPriority(nonBlocked);
770 }
771 
772 
773 inline Signal*
DequeuePendingSignal(sigset_t nonBlocked,Signal & buffer)774 Thread::DequeuePendingSignal(sigset_t nonBlocked, Signal& buffer)
775 {
776 	return fPendingSignals.DequeueSignal(nonBlocked, buffer);
777 }
778 
779 
780 /*!	Returns the thread's current total CPU time (kernel + user + offset).
781 
782 	The caller must hold \c time_lock.
783 
784 	\param ignoreCurrentRun If \c true and the thread is currently running,
785 		don't add the time since the last time \c last_time was updated. Should
786 		be used in "thread unscheduled" scheduler callbacks, since although the
787 		thread is still running at that time, its time has already been stopped.
788 	\return The thread's current total CPU time.
789 */
790 inline bigtime_t
CPUTime(bool ignoreCurrentRun)791 Thread::CPUTime(bool ignoreCurrentRun) const
792 {
793 	bigtime_t time = user_time + kernel_time + cpu_clock_offset;
794 
795 	// If currently running, also add the time since the last check, unless
796 	// requested otherwise.
797 	if (!ignoreCurrentRun && last_time != 0)
798 		time += system_time() - last_time;
799 
800 	return time;
801 }
802 
803 
804 }	// namespace BKernel
805 
806 using BKernel::Team;
807 using BKernel::TeamListIterator;
808 using BKernel::Thread;
809 using BKernel::ThreadListIterator;
810 using BKernel::ProcessSession;
811 using BKernel::ProcessGroup;
812 using BKernel::ProcessGroupList;
813 
814 
815 #endif	// !_ASSEMBLER
816 
817 
818 // bits for the thread::flags field
819 #define	THREAD_FLAGS_SIGNALS_PENDING		0x0001
820 	// unblocked signals are pending (computed flag for optimization purposes)
821 #define	THREAD_FLAGS_DEBUG_THREAD			0x0002
822 	// forces the thread into the debugger as soon as possible (set by
823 	// debug_thread())
824 #define	THREAD_FLAGS_SINGLE_STEP			0x0004
825 	// indicates that the thread is in single-step mode (in userland)
826 #define	THREAD_FLAGS_DEBUGGER_INSTALLED		0x0008
827 	// a debugger is installed for the current team (computed flag for
828 	// optimization purposes)
829 #define	THREAD_FLAGS_BREAKPOINTS_DEFINED	0x0010
830 	// hardware breakpoints are defined for the current team (computed flag for
831 	// optimization purposes)
832 #define	THREAD_FLAGS_BREAKPOINTS_INSTALLED	0x0020
833 	// breakpoints are currently installed for the thread (i.e. the hardware is
834 	// actually set up to trigger debug events for them)
835 #define	THREAD_FLAGS_64_BIT_SYSCALL_RETURN	0x0040
836 	// set by 64 bit return value syscalls
837 #define	THREAD_FLAGS_RESTART_SYSCALL		0x0080
838 	// set by handle_signals(), if the current syscall shall be restarted
839 #define	THREAD_FLAGS_DONT_RESTART_SYSCALL	0x0100
840 	// explicitly disables automatic syscall restarts (e.g. resume_thread())
841 #define	THREAD_FLAGS_ALWAYS_RESTART_SYSCALL	0x0200
842 	// force syscall restart, even if a signal handler without SA_RESTART was
843 	// invoked (e.g. sigwait())
844 #define	THREAD_FLAGS_SYSCALL_RESTARTED		0x0400
845 	// the current syscall has been restarted
846 #define	THREAD_FLAGS_SYSCALL				0x0800
847 	// the thread is currently in a syscall; set/reset only for certain
848 	// functions (e.g. ioctl()) to allow inner functions to discriminate
849 	// whether e.g. parameters were passed from userland or kernel
850 #define	THREAD_FLAGS_TRAP_FOR_CORE_DUMP		0x1000
851 	// core dump in progress; the thread shall not exit the kernel to userland,
852 	// but shall invoke core_dump_trap_thread() instead.
853 #ifdef _COMPAT_MODE
854 #define	THREAD_FLAGS_COMPAT_MODE			0x2000
855 	// the thread runs a compatibility mode (for instance IA32 on x86_64).
856 #endif
857 #define	THREAD_FLAGS_OLD_SIGMASK			0x4000
858 	// the thread has an old sigmask to be restored
859 
860 #endif	/* _KERNEL_THREAD_TYPES_H */
861