xref: /haiku/headers/private/kernel/thread.h (revision e85e399fd7b229b8bc92f28928a059876d7216d3)
1 /*
2  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 #ifndef _THREAD_H
10 #define _THREAD_H
11 
12 
13 #include <OS.h>
14 #include <thread_types.h>
15 #include <arch/thread.h>
16 
17 // For the thread blocking inline functions only.
18 #include <kscheduler.h>
19 #include <ksignal.h>
20 
21 
22 struct arch_fork_arg;
23 struct kernel_args;
24 struct select_info;
25 struct thread_creation_attributes;
26 
27 
28 // thread notifications
29 #define THREAD_MONITOR		'_tm_'
30 #define THREAD_ADDED		0x01
31 #define THREAD_REMOVED		0x02
32 #define THREAD_NAME_CHANGED	0x04
33 
34 
35 namespace BKernel {
36 
37 
38 struct ThreadCreationAttributes : thread_creation_attributes {
39 	// when calling from kernel only
40 	team_id			team;
41 	Thread*			thread;
42 	sigset_t		signal_mask;
43 	size_t			additional_stack_size;	// additional space in the stack
44 											// area after the TLS region, not
45 											// used as thread stack
46 	thread_func		kernelEntry;
47 	void*			kernelArgument;
48 	arch_fork_arg*	forkArgs;				// If non-NULL, the userland thread
49 											// will be started with this
50 											// register context.
51 
52 public:
53 								ThreadCreationAttributes() {}
54 									// no-init constructor
55 								ThreadCreationAttributes(
56 									thread_func function, const char* name,
57 									int32 priority, void* arg,
58 									team_id team = -1, Thread* thread = NULL);
59 
60 			status_t			InitFromUserAttributes(
61 									const thread_creation_attributes*
62 										userAttributes,
63 									char* nameBuffer);
64 };
65 
66 
67 }	// namespace BKernel
68 
69 using BKernel::ThreadCreationAttributes;
70 
71 
72 #ifdef __cplusplus
73 extern "C" {
74 #endif
75 
76 void thread_enqueue(Thread *t, struct thread_queue *q);
77 Thread *thread_lookat_queue(struct thread_queue *q);
78 Thread *thread_dequeue(struct thread_queue *q);
79 Thread *thread_dequeue_id(struct thread_queue *q, thread_id id);
80 
81 void thread_at_kernel_entry(bigtime_t now);
82 	// called when the thread enters the kernel on behalf of the thread
83 void thread_at_kernel_exit(void);
84 void thread_at_kernel_exit_no_signals(void);
85 void thread_reset_for_exec(void);
86 
87 status_t thread_init(struct kernel_args *args);
88 status_t thread_preboot_init_percpu(struct kernel_args *args, int32 cpuNum);
89 void thread_yield(bool force);
90 void thread_exit(void);
91 
92 int32 thread_max_threads(void);
93 int32 thread_used_threads(void);
94 
95 const char* thread_state_to_text(Thread* thread, int32 state);
96 
97 int32 thread_get_io_priority(thread_id id);
98 void thread_set_io_priority(int32 priority);
99 
100 #define thread_get_current_thread arch_thread_get_current_thread
101 
102 static thread_id thread_get_current_thread_id(void);
103 static inline thread_id
104 thread_get_current_thread_id(void)
105 {
106 	Thread *thread = thread_get_current_thread();
107 	return thread ? thread->id : 0;
108 }
109 
110 static inline bool
111 thread_is_idle_thread(Thread *thread)
112 {
113 	return thread->priority == B_IDLE_PRIORITY;
114 }
115 
116 thread_id allocate_thread_id();
117 thread_id peek_next_thread_id();
118 
119 status_t thread_enter_userspace_new_team(Thread* thread, addr_t entryFunction,
120 	void* argument1, void* argument2);
121 status_t thread_create_user_stack(Team* team, Thread* thread, void* stackBase,
122 	size_t stackSize, size_t additionalSize);
123 thread_id thread_create_thread(const ThreadCreationAttributes& attributes,
124 	bool kernel);
125 
126 thread_id spawn_kernel_thread_etc(thread_func, const char *name, int32 priority,
127 	void *args, team_id team);
128 status_t wait_for_thread_etc(thread_id id, uint32 flags, bigtime_t timeout,
129 	status_t *_returnCode);
130 
131 status_t select_thread(int32 object, struct select_info *info, bool kernel);
132 status_t deselect_thread(int32 object, struct select_info *info, bool kernel);
133 
134 #define syscall_64_bit_return_value() arch_syscall_64_bit_return_value()
135 
136 status_t thread_block();
137 status_t thread_block_with_timeout(uint32 timeoutFlags, bigtime_t timeout);
138 status_t thread_block_with_timeout_locked(uint32 timeoutFlags,
139 			bigtime_t timeout);
140 
141 // used in syscalls.c
142 status_t _user_set_thread_priority(thread_id thread, int32 newPriority);
143 status_t _user_rename_thread(thread_id thread, const char *name);
144 status_t _user_suspend_thread(thread_id thread);
145 status_t _user_resume_thread(thread_id thread);
146 status_t _user_rename_thread(thread_id thread, const char *name);
147 thread_id _user_spawn_thread(struct thread_creation_attributes* attributes);
148 status_t _user_wait_for_thread(thread_id id, status_t *_returnCode);
149 status_t _user_snooze_etc(bigtime_t timeout, int timebase, uint32 flags,
150 	bigtime_t* _remainingTime);
151 status_t _user_kill_thread(thread_id thread);
152 status_t _user_cancel_thread(thread_id threadID, void (*cancelFunction)(int));
153 void _user_thread_yield(void);
154 void _user_exit_thread(status_t return_value);
155 bool _user_has_data(thread_id thread);
156 status_t _user_send_data(thread_id thread, int32 code, const void *buffer, size_t buffer_size);
157 status_t _user_receive_data(thread_id *_sender, void *buffer, size_t buffer_size);
158 thread_id _user_find_thread(const char *name);
159 status_t _user_get_thread_info(thread_id id, thread_info *info);
160 status_t _user_get_next_thread_info(team_id team, int32 *cookie, thread_info *info);
161 
162 status_t _user_block_thread(uint32 flags, bigtime_t timeout);
163 status_t _user_unblock_thread(thread_id thread, status_t status);
164 status_t _user_unblock_threads(thread_id* threads, uint32 count,
165 	status_t status);
166 
167 // ToDo: these don't belong here
168 struct rlimit;
169 int _user_getrlimit(int resource, struct rlimit * rlp);
170 int _user_setrlimit(int resource, const struct rlimit * rlp);
171 
172 #ifdef __cplusplus
173 }
174 #endif
175 
176 
177 /*!	Checks whether the current thread would immediately be interrupted when
178 	blocking it with the given wait/interrupt flags.
179 
180 	The caller must hold the scheduler lock.
181 
182 	\param thread The current thread.
183 	\param flags Wait/interrupt flags to be considered. Relevant are:
184 		- \c B_CAN_INTERRUPT: The thread can be interrupted by any non-blocked
185 			signal. Implies \c B_KILL_CAN_INTERRUPT (specified or not).
186 		- \c B_KILL_CAN_INTERRUPT: The thread can be interrupted by a kill
187 			signal.
188 	\return \c true, if the thread would be interrupted, \c false otherwise.
189 */
190 static inline bool
191 thread_is_interrupted(Thread* thread, uint32 flags)
192 {
193 	sigset_t pendingSignals = thread->AllPendingSignals();
194 	return ((flags & B_CAN_INTERRUPT) != 0
195 			&& (pendingSignals & ~thread->sig_block_mask) != 0)
196 		|| ((flags & B_KILL_CAN_INTERRUPT) != 0
197 			&& (pendingSignals & KILL_SIGNALS) != 0);
198 }
199 
200 
201 /*!	Checks whether the given thread is currently blocked (i.e. still waiting
202 	for something).
203 
204 	If a stable answer is required, the caller must hold the scheduler lock.
205 	Alternatively, if waiting is not interruptible and cannot time out, holding
206 	the client lock held when calling thread_prepare_to_block() and the
207 	unblocking functions works as well.
208 
209 	\param thread The thread in question.
210 	\return \c true, if the thread is blocked, \c false otherwise.
211 */
212 static inline bool
213 thread_is_blocked(Thread* thread)
214 {
215 	return thread->wait.status == 1;
216 }
217 
218 
219 /*!	Prepares the current thread for waiting.
220 
221 	This is the first of two steps necessary to block the current thread
222 	(IOW, to let it wait for someone else to unblock it or optionally time out
223 	after a specified delay). The process consists of two steps to avoid race
224 	conditions in case a lock other than the scheduler lock is involved.
225 
226 	Usually the thread waits for some condition to change and this condition is
227 	something reflected in the caller's data structures which should be
228 	protected by a client lock the caller knows about. E.g. in the semaphore
229 	code that lock is a per-semaphore spinlock that protects the semaphore data,
230 	including the semaphore count and the queue of waiting threads. For certain
231 	low-level locking primitives (e.g. mutexes) that client lock is the
232 	scheduler lock itself, which simplifies things a bit.
233 
234 	If a client lock other than the scheduler lock is used, this function must
235 	be called with that lock being held. Afterwards that lock should be dropped
236 	and the function that actually blocks the thread shall be invoked
237 	(thread_block[_locked]() or thread_block_with_timeout[_locked]()). In
238 	between these two steps no functionality that uses the thread blocking API
239 	for this thread shall be used.
240 
241 	When the caller determines that the condition for unblocking the thread
242 	occurred, it calls thread_unblock_locked() to unblock the thread. At that
243 	time one of locks that are held when calling thread_prepare_to_block() must
244 	be held. Usually that would be the client lock. In two cases it generally
245 	isn't, however, since the unblocking code doesn't know about the client
246 	lock: 1. When thread_block_with_timeout[_locked]() had been used and the
247 	timeout occurs. 2. When thread_prepare_to_block() had been called with one
248 	or both of the \c B_CAN_INTERRUPT or \c B_KILL_CAN_INTERRUPT flags specified
249 	and someone calls thread_interrupt() that is supposed to wake up the thread.
250 	In either of these two cases only the scheduler lock is held by the
251 	unblocking code. A timeout can only happen after
252 	thread_block_with_timeout_locked() has been called, but an interruption is
253 	possible at any time. The client code must deal with those situations.
254 
255 	Generally blocking and unblocking threads proceed in the following manner:
256 
257 	Blocking thread:
258 	- Acquire client lock.
259 	- Check client condition and decide whether blocking is necessary.
260 	- Modify some client data structure to indicate that this thread is now
261 		waiting.
262 	- Release client lock (unless client lock is the scheduler lock).
263 	- Block.
264 	- Acquire client lock (unless client lock is the scheduler lock).
265 	- Check client condition and compare with block result. E.g. if the wait was
266 		interrupted or timed out, but the client condition indicates success, it
267 		may be considered a success after all, since usually that happens when
268 		another thread concurrently changed the client condition and also tried
269 		to unblock the waiting thread. It is even necessary when that other
270 		thread changed the client data structures in a way that associate some
271 		resource with the unblocked thread, or otherwise the unblocked thread
272 		would have to reverse that here.
273 	- If still necessary -- i.e. not already taken care of by an unblocking
274 		thread -- modify some client structure to indicate that the thread is no
275 		longer waiting, so it isn't erroneously unblocked later.
276 
277 	Unblocking thread:
278 	- Acquire client lock.
279 	- Check client condition and decide whether a blocked thread can be woken
280 		up.
281 	- Check the client data structure that indicates whether one or more threads
282 		are waiting and which thread(s) need(s) to be woken up.
283 	- Unblock respective thread(s).
284 	- Possibly change some client structure, so that an unblocked thread can
285 		decide whether a concurrent timeout/interruption can be ignored, or
286 		simply so that it doesn't have to do any more cleanup.
287 
288 	Note that in the blocking thread the steps after blocking are strictly
289 	required only if timeouts or interruptions are possible. If they are not,
290 	the blocking thread can only be woken up explicitly by an unblocking thread,
291 	which could already take care of all the necessary client data structure
292 	modifications, so that the blocking thread wouldn't have to do that.
293 
294 	Note that the client lock can but does not have to be a spinlock.
295 	A mutex, a semaphore, or anything that doesn't try to use the thread
296 	blocking API for the calling thread when releasing the lock is fine.
297 	In particular that means in principle thread_prepare_to_block() can be
298 	called with interrupts enabled.
299 
300 	Care must be taken when the wait can be interrupted or can time out,
301 	especially with a client lock that uses the thread blocking API. After a
302 	blocked thread has been interrupted or the the time out occurred it cannot
303 	acquire the client lock (or any other lock using the thread blocking API)
304 	without first making sure that the thread doesn't still appears to be
305 	waiting to other client code. Otherwise another thread could try to unblock
306 	it which could erroneously unblock the thread while already waiting on the
307 	client lock. So usually when interruptions or timeouts are possible a
308 	spinlock needs to be involved.
309 
310 	\param thread The current thread.
311 	\param flags The blocking flags. Relevant are:
312 		- \c B_CAN_INTERRUPT: The thread can be interrupted by any non-blocked
313 			signal. Implies \c B_KILL_CAN_INTERRUPT (specified or not).
314 		- \c B_KILL_CAN_INTERRUPT: The thread can be interrupted by a kill
315 			signal.
316 	\param type The type of object the thread will be blocked at. Informative/
317 		for debugging purposes. Must be one of the \c THREAD_BLOCK_TYPE_*
318 		constants. \c THREAD_BLOCK_TYPE_OTHER implies that \a object is a
319 		string.
320 	\param object The object the thread will be blocked at.  Informative/for
321 		debugging purposes.
322 */
323 static inline void
324 thread_prepare_to_block(Thread* thread, uint32 flags, uint32 type,
325 	const void* object)
326 {
327 	thread->wait.flags = flags;
328 	thread->wait.type = type;
329 	thread->wait.object = object;
330 	atomic_set(&thread->wait.status, 1);
331 		// Set status last to guarantee that the other fields are initialized
332 		// when a thread is waiting.
333 }
334 
335 
336 /*!	Blocks the current thread.
337 
338 	The thread is blocked until someone else unblock it. Must be called after a
339 	call to thread_prepare_to_block(). If the thread has already been unblocked
340 	after the previous call to thread_prepare_to_block(), this function will
341 	return immediately. Cf. the documentation of thread_prepare_to_block() for
342 	more details.
343 
344 	The caller must hold the scheduler lock.
345 
346 	\param thread The current thread.
347 	\return The error code passed to the unblocking function. thread_interrupt()
348 		uses \c B_INTERRUPTED. By convention \c B_OK means that the wait was
349 		successful while another error code indicates a failure (what that means
350 		depends on the client code).
351 */
352 static inline status_t
353 thread_block_locked(Thread* thread)
354 {
355 	if (thread->wait.status == 1) {
356 		// check for signals, if interruptible
357 		if (thread_is_interrupted(thread, thread->wait.flags)) {
358 			thread->wait.status = B_INTERRUPTED;
359 		} else {
360 			thread->next_state = B_THREAD_WAITING;
361 			scheduler_reschedule();
362 		}
363 	}
364 
365 	return thread->wait.status;
366 }
367 
368 
369 /*!	Unblocks the specified blocked thread.
370 
371 	If the thread is no longer waiting (e.g. because thread_unblock_locked() has
372 	already been called in the meantime), this function does not have any
373 	effect.
374 
375 	The caller must hold the scheduler lock and the client lock (might be the
376 	same).
377 
378 	\param thread The thread to be unblocked.
379 	\param status The unblocking status. That's what the unblocked thread's
380 		call to thread_block_locked() will return.
381 */
382 static inline void
383 thread_unblock_locked(Thread* thread, status_t status)
384 {
385 	if (atomic_test_and_set(&thread->wait.status, status, 1) != 1)
386 		return;
387 
388 	// wake up the thread, if it is sleeping
389 	if (thread->state == B_THREAD_WAITING)
390 		scheduler_enqueue_in_run_queue(thread);
391 }
392 
393 
394 /*!	Interrupts the specified blocked thread, if possible.
395 
396 	The function checks whether the thread can be interrupted and, if so, calls
397 	\code thread_unblock_locked(thread, B_INTERRUPTED) \endcode. Otherwise the
398 	function is a no-op.
399 
400 	The caller must hold the scheduler lock. Normally thread_unblock_locked()
401 	also requires the client lock to be held, but in this case the caller
402 	usually doesn't know it. This implies that the client code needs to take
403 	special care, if waits are interruptible. See thread_prepare_to_block() for
404 	more information.
405 
406 	\param thread The thread to be interrupted.
407 	\param kill If \c false, the blocked thread is only interrupted, when the
408 		flag \c B_CAN_INTERRUPT was specified for the blocked thread. If
409 		\c true, it is only interrupted, when at least one of the flags
410 		\c B_CAN_INTERRUPT or \c B_KILL_CAN_INTERRUPT was specified for the
411 		blocked thread.
412 	\return \c B_OK, if the thread is interruptible and thread_unblock_locked()
413 		was called, \c B_NOT_ALLOWED otherwise. \c B_OK doesn't imply that the
414 		thread actually has been interrupted -- it could have been unblocked
415 		before already.
416 */
417 static inline status_t
418 thread_interrupt(Thread* thread, bool kill)
419 {
420 	if ((thread->wait.flags & B_CAN_INTERRUPT) != 0
421 		|| (kill && (thread->wait.flags & B_KILL_CAN_INTERRUPT) != 0)) {
422 		thread_unblock_locked(thread, B_INTERRUPTED);
423 		return B_OK;
424 	}
425 
426 	return B_NOT_ALLOWED;
427 }
428 
429 
430 static inline void
431 thread_pin_to_current_cpu(Thread* thread)
432 {
433 	thread->pinned_to_cpu++;
434 }
435 
436 
437 static inline void
438 thread_unpin_from_current_cpu(Thread* thread)
439 {
440 	thread->pinned_to_cpu--;
441 }
442 
443 
444 #endif /* _THREAD_H */
445