xref: /haiku/headers/private/kernel/thread.h (revision 13581b3d2a71545960b98fefebc5225b5bf29072)
1 /*
2  * Copyright 2014, Paweł Dziepak, pdziepak@quarnos.org.
3  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
4  * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de.
5  * Distributed under the terms of the MIT License.
6  *
7  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
8  * Distributed under the terms of the NewOS License.
9  */
10 #ifndef _THREAD_H
11 #define _THREAD_H
12 
13 
14 #include <OS.h>
15 
16 #include <arch/atomic.h>
17 #include <arch/thread.h>
18 // For the thread blocking inline functions only.
19 #include <kscheduler.h>
20 #include <ksignal.h>
21 #include <thread_types.h>
22 
23 
24 struct arch_fork_arg;
25 struct kernel_args;
26 struct select_info;
27 struct thread_creation_attributes;
28 
29 
30 // thread notifications
31 #define THREAD_MONITOR		'_tm_'
32 #define THREAD_ADDED		0x01
33 #define THREAD_REMOVED		0x02
34 #define THREAD_NAME_CHANGED	0x04
35 
36 
37 namespace BKernel {
38 
39 
40 struct ThreadCreationAttributes : thread_creation_attributes {
41 	// when calling from kernel only
42 	team_id			team;
43 	Thread*			thread;
44 	sigset_t		signal_mask;
45 	size_t			additional_stack_size;	// additional space in the stack
46 											// area after the TLS region, not
47 											// used as thread stack
48 	thread_func		kernelEntry;
49 	void*			kernelArgument;
50 	arch_fork_arg*	forkArgs;				// If non-NULL, the userland thread
51 											// will be started with this
52 											// register context.
53 
54 public:
55 								ThreadCreationAttributes() {}
56 									// no-init constructor
57 								ThreadCreationAttributes(
58 									thread_func function, const char* name,
59 									int32 priority, void* arg,
60 									team_id team = -1, Thread* thread = NULL);
61 
62 			status_t			InitFromUserAttributes(
63 									const thread_creation_attributes*
64 										userAttributes,
65 									char* nameBuffer);
66 };
67 
68 
69 }	// namespace BKernel
70 
71 using BKernel::ThreadCreationAttributes;
72 
73 
74 extern spinlock gThreadCreationLock;
75 
76 
77 #ifdef __cplusplus
78 extern "C" {
79 #endif
80 
81 void thread_at_kernel_entry(bigtime_t now);
82 	// called when the thread enters the kernel on behalf of the thread
83 void thread_at_kernel_exit(void);
84 void thread_at_kernel_exit_no_signals(void);
85 void thread_reset_for_exec(void);
86 
87 status_t thread_init(struct kernel_args *args);
88 status_t thread_preboot_init_percpu(struct kernel_args *args, int32 cpuNum);
89 void thread_yield(void);
90 void thread_exit(void);
91 
92 void thread_map(void (*function)(Thread* thread, void* data), void* data);
93 
94 int32 thread_max_threads(void);
95 int32 thread_used_threads(void);
96 
97 const char* thread_state_to_text(Thread* thread, int32 state);
98 
99 int32 thread_get_io_priority(thread_id id);
100 void thread_set_io_priority(int32 priority);
101 
102 #define thread_get_current_thread arch_thread_get_current_thread
103 
104 static thread_id thread_get_current_thread_id(void);
105 static inline thread_id
106 thread_get_current_thread_id(void)
107 {
108 	Thread *thread = thread_get_current_thread();
109 	return thread ? thread->id : 0;
110 }
111 
112 static inline bool
113 thread_is_idle_thread(Thread *thread)
114 {
115 	return thread->priority == B_IDLE_PRIORITY;
116 }
117 
118 thread_id allocate_thread_id();
119 thread_id peek_next_thread_id();
120 
121 status_t thread_enter_userspace_new_team(Thread* thread, addr_t entryFunction,
122 	void* argument1, void* argument2);
123 status_t thread_create_user_stack(Team* team, Thread* thread, void* stackBase,
124 	size_t stackSize, size_t additionalSize);
125 thread_id thread_create_thread(const ThreadCreationAttributes& attributes,
126 	bool kernel);
127 
128 thread_id spawn_kernel_thread_etc(thread_func, const char *name, int32 priority,
129 	void *args, team_id team);
130 
131 status_t select_thread(int32 object, struct select_info *info, bool kernel);
132 status_t deselect_thread(int32 object, struct select_info *info, bool kernel);
133 
134 #define syscall_64_bit_return_value() arch_syscall_64_bit_return_value()
135 
136 status_t thread_block();
137 status_t thread_block_with_timeout(uint32 timeoutFlags, bigtime_t timeout);
138 void thread_unblock(Thread* thread, status_t status);
139 
140 // used in syscalls.c
141 status_t _user_set_thread_priority(thread_id thread, int32 newPriority);
142 status_t _user_rename_thread(thread_id thread, const char *name);
143 status_t _user_suspend_thread(thread_id thread);
144 status_t _user_resume_thread(thread_id thread);
145 status_t _user_rename_thread(thread_id thread, const char *name);
146 thread_id _user_spawn_thread(struct thread_creation_attributes* attributes);
147 status_t _user_wait_for_thread(thread_id id, status_t *_returnCode);
148 status_t _user_wait_for_thread_etc(thread_id id, uint32 flags, bigtime_t timeout,
149 	status_t *_returnCode);
150 status_t _user_snooze_etc(bigtime_t timeout, int timebase, uint32 flags,
151 	bigtime_t* _remainingTime);
152 status_t _user_kill_thread(thread_id thread);
153 status_t _user_cancel_thread(thread_id threadID, void (*cancelFunction)(int));
154 void _user_thread_yield(void);
155 void _user_exit_thread(status_t return_value);
156 bool _user_has_data(thread_id thread);
157 status_t _user_send_data(thread_id thread, int32 code, const void *buffer, size_t buffer_size);
158 status_t _user_receive_data(thread_id *_sender, void *buffer, size_t buffer_size);
159 thread_id _user_find_thread(const char *name);
160 status_t _user_get_thread_info(thread_id id, thread_info *info);
161 status_t _user_get_next_thread_info(team_id team, int32 *cookie, thread_info *info);
162 int _user_get_cpu();
163 status_t _user_get_thread_affinity(thread_id id, void* userMask, size_t size);
164 status_t _user_set_thread_affinity(thread_id id, const void* userMask, size_t size);
165 
166 
167 status_t _user_block_thread(uint32 flags, bigtime_t timeout);
168 status_t _user_unblock_thread(thread_id thread, status_t status);
169 status_t _user_unblock_threads(thread_id* threads, uint32 count,
170 	status_t status);
171 
172 // ToDo: these don't belong here
173 struct rlimit;
174 int _user_getrlimit(int resource, struct rlimit * rlp);
175 int _user_setrlimit(int resource, const struct rlimit * rlp);
176 
177 #ifdef __cplusplus
178 }
179 #endif
180 
181 
182 /*!	Checks whether the current thread would immediately be interrupted when
183 	blocking it with the given wait/interrupt flags.
184 
185 	The caller must hold the scheduler lock.
186 
187 	\param thread The current thread.
188 	\param flags Wait/interrupt flags to be considered. Relevant are:
189 		- \c B_CAN_INTERRUPT: The thread can be interrupted by any non-blocked
190 			signal. Implies \c B_KILL_CAN_INTERRUPT (specified or not).
191 		- \c B_KILL_CAN_INTERRUPT: The thread can be interrupted by a kill
192 			signal.
193 	\return \c true, if the thread would be interrupted, \c false otherwise.
194 */
195 static inline bool
196 thread_is_interrupted(Thread* thread, uint32 flags)
197 {
198 	sigset_t pendingSignals = thread->AllPendingSignals();
199 	return ((flags & B_CAN_INTERRUPT) != 0
200 			&& (pendingSignals & ~thread->sig_block_mask) != 0)
201 		|| ((flags & B_KILL_CAN_INTERRUPT) != 0
202 			&& (pendingSignals & KILL_SIGNALS) != 0);
203 }
204 
205 
206 /*!	Checks whether the given thread is currently blocked (i.e. still waiting
207 	for something).
208 
209 	If a stable answer is required, the caller must hold the scheduler lock.
210 	Alternatively, if waiting is not interruptible and cannot time out, holding
211 	the client lock held when calling thread_prepare_to_block() and the
212 	unblocking functions works as well.
213 
214 	\param thread The thread in question.
215 	\return \c true, if the thread is blocked, \c false otherwise.
216 */
217 static inline bool
218 thread_is_blocked(Thread* thread)
219 {
220 	return atomic_get(&thread->wait.status) == 1;
221 }
222 
223 
224 /*!	Prepares the current thread for waiting.
225 
226 	This is the first of two steps necessary to block the current thread
227 	(IOW, to let it wait for someone else to unblock it or optionally time out
228 	after a specified delay). The process consists of two steps to avoid race
229 	conditions in case a lock other than the scheduler lock is involved.
230 
231 	Usually the thread waits for some condition to change and this condition is
232 	something reflected in the caller's data structures which should be
233 	protected by a client lock the caller knows about. E.g. in the semaphore
234 	code that lock is a per-semaphore spinlock that protects the semaphore data,
235 	including the semaphore count and the queue of waiting threads. For certain
236 	low-level locking primitives (e.g. mutexes) that client lock is the
237 	scheduler lock itself, which simplifies things a bit.
238 
239 	If a client lock other than the scheduler lock is used, this function must
240 	be called with that lock being held. Afterwards that lock should be dropped
241 	and the function that actually blocks the thread shall be invoked
242 	(thread_block[_locked]() or thread_block_with_timeout()). In between these
243 	two steps no functionality that uses the thread blocking API for this thread
244 	shall be used.
245 
246 	When the caller determines that the condition for unblocking the thread
247 	occurred, it calls thread_unblock_locked() to unblock the thread. At that
248 	time one of locks that are held when calling thread_prepare_to_block() must
249 	be held. Usually that would be the client lock. In two cases it generally
250 	isn't, however, since the unblocking code doesn't know about the client
251 	lock: 1. When thread_block_with_timeout() had been used and the timeout
252 	occurs. 2. When thread_prepare_to_block() had been called with one or both
253 	of the \c B_CAN_INTERRUPT or \c B_KILL_CAN_INTERRUPT flags specified and
254 	someone calls thread_interrupt() that is supposed to wake up the thread.
255 	In either of these two cases only the scheduler lock is held by the
256 	unblocking code. A timeout can only happen after
257 	thread_block_with_timeout() has been called, but an interruption is
258 	possible at any time. The client code must deal with those situations.
259 
260 	Generally blocking and unblocking threads proceed in the following manner:
261 
262 	Blocking thread:
263 	- Acquire client lock.
264 	- Check client condition and decide whether blocking is necessary.
265 	- Modify some client data structure to indicate that this thread is now
266 		waiting.
267 	- Release client lock (unless client lock is the scheduler lock).
268 	- Block.
269 	- Acquire client lock (unless client lock is the scheduler lock).
270 	- Check client condition and compare with block result. E.g. if the wait was
271 		interrupted or timed out, but the client condition indicates success, it
272 		may be considered a success after all, since usually that happens when
273 		another thread concurrently changed the client condition and also tried
274 		to unblock the waiting thread. It is even necessary when that other
275 		thread changed the client data structures in a way that associate some
276 		resource with the unblocked thread, or otherwise the unblocked thread
277 		would have to reverse that here.
278 	- If still necessary -- i.e. not already taken care of by an unblocking
279 		thread -- modify some client structure to indicate that the thread is no
280 		longer waiting, so it isn't erroneously unblocked later.
281 
282 	Unblocking thread:
283 	- Acquire client lock.
284 	- Check client condition and decide whether a blocked thread can be woken
285 		up.
286 	- Check the client data structure that indicates whether one or more threads
287 		are waiting and which thread(s) need(s) to be woken up.
288 	- Unblock respective thread(s).
289 	- Possibly change some client structure, so that an unblocked thread can
290 		decide whether a concurrent timeout/interruption can be ignored, or
291 		simply so that it doesn't have to do any more cleanup.
292 
293 	Note that in the blocking thread the steps after blocking are strictly
294 	required only if timeouts or interruptions are possible. If they are not,
295 	the blocking thread can only be woken up explicitly by an unblocking thread,
296 	which could already take care of all the necessary client data structure
297 	modifications, so that the blocking thread wouldn't have to do that.
298 
299 	Note that the client lock can but does not have to be a spinlock.
300 	A mutex, a semaphore, or anything that doesn't try to use the thread
301 	blocking API for the calling thread when releasing the lock is fine.
302 	In particular that means in principle thread_prepare_to_block() can be
303 	called with interrupts enabled.
304 
305 	Care must be taken when the wait can be interrupted or can time out,
306 	especially with a client lock that uses the thread blocking API. After a
307 	blocked thread has been interrupted or the the time out occurred it cannot
308 	acquire the client lock (or any other lock using the thread blocking API)
309 	without first making sure that the thread doesn't still appear to be
310 	waiting to other client code. Otherwise another thread could try to unblock
311 	it which could erroneously unblock the thread while already waiting on the
312 	client lock. So usually when interruptions or timeouts are possible a
313 	spinlock needs to be involved.
314 
315 	\param thread The current thread.
316 	\param flags The blocking flags. Relevant are:
317 		- \c B_CAN_INTERRUPT: The thread can be interrupted by any non-blocked
318 			signal. Implies \c B_KILL_CAN_INTERRUPT (specified or not).
319 		- \c B_KILL_CAN_INTERRUPT: The thread can be interrupted by a kill
320 			signal.
321 	\param type The type of object the thread will be blocked at. Informative/
322 		for debugging purposes. Must be one of the \c THREAD_BLOCK_TYPE_*
323 		constants. \c THREAD_BLOCK_TYPE_OTHER implies that \a object is a
324 		string.
325 	\param object The object the thread will be blocked at.  Informative/for
326 		debugging purposes.
327 */
328 static inline void
329 thread_prepare_to_block(Thread* thread, uint32 flags, uint32 type,
330 	const void* object)
331 {
332 	thread->wait.flags = flags;
333 	thread->wait.type = type;
334 	thread->wait.object = object;
335 	atomic_set(&thread->wait.status, 1);
336 		// Set status last to guarantee that the other fields are initialized
337 		// when a thread is waiting.
338 }
339 
340 
341 /*!	Unblocks the specified blocked thread.
342 
343 	If the thread is no longer waiting (e.g. because thread_unblock_locked() has
344 	already been called in the meantime), this function does not have any
345 	effect.
346 
347 	The caller must hold the scheduler lock and the client lock (might be the
348 	same).
349 
350 	\param thread The thread to be unblocked.
351 	\param status The unblocking status. That's what the unblocked thread's
352 		call to thread_block_locked() will return.
353 */
354 static inline void
355 thread_unblock_locked(Thread* thread, status_t status)
356 {
357 	if (atomic_test_and_set(&thread->wait.status, status, 1) != 1)
358 		return;
359 
360 	// wake up the thread, if it is sleeping
361 	if (thread->state == B_THREAD_WAITING)
362 		scheduler_enqueue_in_run_queue(thread);
363 }
364 
365 
366 /*!	Interrupts the specified blocked thread, if possible.
367 
368 	The function checks whether the thread can be interrupted and, if so, calls
369 	\code thread_unblock_locked(thread, B_INTERRUPTED) \endcode. Otherwise the
370 	function is a no-op.
371 
372 	The caller must hold the scheduler lock. Normally thread_unblock_locked()
373 	also requires the client lock to be held, but in this case the caller
374 	usually doesn't know it. This implies that the client code needs to take
375 	special care, if waits are interruptible. See thread_prepare_to_block() for
376 	more information.
377 
378 	\param thread The thread to be interrupted.
379 	\param kill If \c false, the blocked thread is only interrupted, when the
380 		flag \c B_CAN_INTERRUPT was specified for the blocked thread. If
381 		\c true, it is only interrupted, when at least one of the flags
382 		\c B_CAN_INTERRUPT or \c B_KILL_CAN_INTERRUPT was specified for the
383 		blocked thread.
384 	\return \c B_OK, if the thread is interruptible and thread_unblock_locked()
385 		was called, \c B_NOT_ALLOWED otherwise. \c B_OK doesn't imply that the
386 		thread actually has been interrupted -- it could have been unblocked
387 		before already.
388 */
389 static inline status_t
390 thread_interrupt(Thread* thread, bool kill)
391 {
392 	if (thread_is_blocked(thread)) {
393 		if ((thread->wait.flags & B_CAN_INTERRUPT) != 0
394 			|| (kill && (thread->wait.flags & B_KILL_CAN_INTERRUPT) != 0)) {
395 			thread_unblock_locked(thread, B_INTERRUPTED);
396 			return B_OK;
397 		}
398 	}
399 
400 	return B_NOT_ALLOWED;
401 }
402 
403 
404 static inline void
405 thread_pin_to_current_cpu(Thread* thread)
406 {
407 	thread->pinned_to_cpu++;
408 }
409 
410 
411 static inline void
412 thread_unpin_from_current_cpu(Thread* thread)
413 {
414 	thread->pinned_to_cpu--;
415 }
416 
417 
418 static inline void
419 thread_prepare_suspend()
420 {
421 	Thread* thread = thread_get_current_thread();
422 	thread->going_to_suspend = true;
423 }
424 
425 
426 static inline void
427 thread_suspend(bool alreadyPrepared = false)
428 {
429 	Thread* thread = thread_get_current_thread();
430 	if (!alreadyPrepared)
431 		thread_prepare_suspend();
432 
433 	cpu_status state = disable_interrupts();
434 	acquire_spinlock(&thread->scheduler_lock);
435 
436 	if (thread->going_to_suspend)
437 		scheduler_reschedule(B_THREAD_SUSPENDED);
438 
439 	release_spinlock(&thread->scheduler_lock);
440 	restore_interrupts(state);
441 }
442 
443 
444 static inline void
445 thread_continue(Thread* thread)
446 {
447 	thread->going_to_suspend = false;
448 
449 	cpu_status state = disable_interrupts();
450 	acquire_spinlock(&thread->scheduler_lock);
451 
452 	if (thread->state == B_THREAD_SUSPENDED)
453 		scheduler_enqueue_in_run_queue(thread);
454 
455 	release_spinlock(&thread->scheduler_lock);
456 	restore_interrupts(state);
457 }
458 
459 
460 #endif /* _THREAD_H */
461