1 /*
2 * Copyright 2014, Paweł Dziepak, pdziepak@quarnos.org.
3 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
4 * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de.
5 * Distributed under the terms of the MIT License.
6 *
7 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
8 * Distributed under the terms of the NewOS License.
9 */
10 #ifndef _THREAD_H
11 #define _THREAD_H
12
13
14 #include <OS.h>
15
16 #include <arch/atomic.h>
17 #include <arch/thread.h>
18 // For the thread blocking inline functions only.
19 #include <kscheduler.h>
20 #include <ksignal.h>
21 #include <thread_types.h>
22
23
24 struct arch_fork_arg;
25 struct kernel_args;
26 struct select_info;
27 struct thread_creation_attributes;
28
29
30 // thread notifications
31 #define THREAD_MONITOR '_tm_'
32 #define THREAD_ADDED 0x01
33 #define THREAD_REMOVED 0x02
34 #define THREAD_NAME_CHANGED 0x04
35
36
37 namespace BKernel {
38
39
40 struct ThreadCreationAttributes : thread_creation_attributes {
41 // when calling from kernel only
42 team_id team;
43 Thread* thread;
44 sigset_t signal_mask;
45 size_t additional_stack_size; // additional space in the stack
46 // area after the TLS region, not
47 // used as thread stack
48 thread_func kernelEntry;
49 void* kernelArgument;
50 arch_fork_arg* forkArgs; // If non-NULL, the userland thread
51 // will be started with this
52 // register context.
53
54 public:
ThreadCreationAttributesThreadCreationAttributes55 ThreadCreationAttributes() {}
56 // no-init constructor
57 ThreadCreationAttributes(
58 thread_func function, const char* name,
59 int32 priority, void* arg,
60 team_id team = -1, Thread* thread = NULL);
61
62 status_t InitFromUserAttributes(
63 const thread_creation_attributes*
64 userAttributes,
65 char* nameBuffer);
66 };
67
68
69 } // namespace BKernel
70
71 using BKernel::ThreadCreationAttributes;
72
73
74 extern spinlock gThreadCreationLock;
75
76
77 #ifdef __cplusplus
78 extern "C" {
79 #endif
80
81 void thread_at_kernel_entry(bigtime_t now);
82 // called when the thread enters the kernel on behalf of the thread
83 void thread_at_kernel_exit(void);
84 void thread_at_kernel_exit_no_signals(void);
85 void thread_reset_for_exec(void);
86
87 status_t thread_init(struct kernel_args *args);
88 status_t thread_preboot_init_percpu(struct kernel_args *args, int32 cpuNum);
89 void thread_yield(void);
90 void thread_exit(void);
91
92 void thread_map(void (*function)(Thread* thread, void* data), void* data);
93
94 int32 thread_max_threads(void);
95 int32 thread_used_threads(void);
96
97 const char* thread_state_to_text(Thread* thread, int32 state);
98
99 int32 thread_get_io_priority(thread_id id);
100 void thread_set_io_priority(int32 priority);
101
102 #define thread_get_current_thread arch_thread_get_current_thread
103
104 static thread_id thread_get_current_thread_id(void);
105 static inline thread_id
thread_get_current_thread_id(void)106 thread_get_current_thread_id(void)
107 {
108 Thread *thread = thread_get_current_thread();
109 return thread ? thread->id : 0;
110 }
111
112 static inline bool
thread_is_idle_thread(Thread * thread)113 thread_is_idle_thread(Thread *thread)
114 {
115 return thread->priority == B_IDLE_PRIORITY;
116 }
117
118 thread_id allocate_thread_id();
119 thread_id peek_next_thread_id();
120
121 status_t thread_enter_userspace_new_team(Thread* thread, addr_t entryFunction,
122 void* argument1, void* argument2);
123 status_t thread_create_user_stack(Team* team, Thread* thread, void* stackBase,
124 size_t stackSize, size_t additionalSize);
125 thread_id thread_create_thread(const ThreadCreationAttributes& attributes,
126 bool kernel);
127
128 thread_id spawn_kernel_thread_etc(thread_func, const char *name, int32 priority,
129 void *args, team_id team);
130
131 status_t select_thread(int32 object, struct select_info *info, bool kernel);
132 status_t deselect_thread(int32 object, struct select_info *info, bool kernel);
133
134 #define syscall_64_bit_return_value() arch_syscall_64_bit_return_value()
135
136 status_t thread_block();
137 status_t thread_block_with_timeout(uint32 timeoutFlags, bigtime_t timeout);
138 void thread_unblock(Thread* thread, status_t status);
139
140 // used in syscalls.c
141 status_t _user_set_thread_priority(thread_id thread, int32 newPriority);
142 status_t _user_rename_thread(thread_id thread, const char *name);
143 status_t _user_suspend_thread(thread_id thread);
144 status_t _user_resume_thread(thread_id thread);
145 thread_id _user_spawn_thread(struct thread_creation_attributes* attributes);
146 status_t _user_wait_for_thread(thread_id id, status_t *_returnCode);
147 status_t _user_wait_for_thread_etc(thread_id id, uint32 flags, bigtime_t timeout,
148 status_t *_returnCode);
149 status_t _user_snooze_etc(bigtime_t timeout, int timebase, uint32 flags,
150 bigtime_t* _remainingTime);
151 status_t _user_kill_thread(thread_id thread);
152 status_t _user_cancel_thread(thread_id threadID, void (*cancelFunction)(int));
153 void _user_thread_yield(void);
154 void _user_exit_thread(status_t return_value);
155 bool _user_has_data(thread_id thread);
156 status_t _user_send_data(thread_id thread, int32 code, const void *buffer, size_t buffer_size);
157 status_t _user_receive_data(thread_id *_sender, void *buffer, size_t buffer_size);
158 thread_id _user_find_thread(const char *name);
159 status_t _user_get_thread_info(thread_id id, thread_info *info);
160 status_t _user_get_next_thread_info(team_id team, int32 *cookie, thread_info *info);
161 int _user_get_cpu();
162 status_t _user_get_thread_affinity(thread_id id, void* userMask, size_t size);
163 status_t _user_set_thread_affinity(thread_id id, const void* userMask, size_t size);
164
165
166 status_t _user_block_thread(uint32 flags, bigtime_t timeout);
167 status_t _user_unblock_thread(thread_id thread, status_t status);
168 status_t _user_unblock_threads(thread_id* threads, uint32 count,
169 status_t status);
170
171 // ToDo: these don't belong here
172 struct rlimit;
173 int _user_getrlimit(int resource, struct rlimit * rlp);
174 int _user_setrlimit(int resource, const struct rlimit * rlp);
175
176 #ifdef __cplusplus
177 }
178 #endif
179
180
181 /*! Checks whether the current thread would immediately be interrupted when
182 blocking it with the given wait/interrupt flags.
183
184 The caller must hold the scheduler lock.
185
186 \param thread The current thread.
187 \param flags Wait/interrupt flags to be considered. Relevant are:
188 - \c B_CAN_INTERRUPT: The thread can be interrupted by any non-blocked
189 signal. Implies \c B_KILL_CAN_INTERRUPT (specified or not).
190 - \c B_KILL_CAN_INTERRUPT: The thread can be interrupted by a kill
191 signal.
192 \return \c true, if the thread would be interrupted, \c false otherwise.
193 */
194 static inline bool
thread_is_interrupted(Thread * thread,uint32 flags)195 thread_is_interrupted(Thread* thread, uint32 flags)
196 {
197 sigset_t pendingSignals = thread->AllPendingSignals();
198 return ((flags & B_CAN_INTERRUPT) != 0
199 && (pendingSignals & ~thread->sig_block_mask) != 0)
200 || ((flags & B_KILL_CAN_INTERRUPT) != 0
201 && (pendingSignals & KILL_SIGNALS) != 0);
202 }
203
204
205 /*! Checks whether the given thread is currently blocked (i.e. still waiting
206 for something).
207
208 If a stable answer is required, the caller must hold the scheduler lock.
209 Alternatively, if waiting is not interruptible and cannot time out, holding
210 the client lock held when calling thread_prepare_to_block() and the
211 unblocking functions works as well.
212
213 \param thread The thread in question.
214 \return \c true, if the thread is blocked, \c false otherwise.
215 */
216 static inline bool
thread_is_blocked(Thread * thread)217 thread_is_blocked(Thread* thread)
218 {
219 return atomic_get(&thread->wait.status) == 1;
220 }
221
222
223 /*! Prepares the current thread for waiting.
224
225 This is the first of two steps necessary to block the current thread
226 (IOW, to let it wait for someone else to unblock it or optionally time out
227 after a specified delay). The process consists of two steps to avoid race
228 conditions in case a lock other than the scheduler lock is involved.
229
230 Usually the thread waits for some condition to change and this condition is
231 something reflected in the caller's data structures which should be
232 protected by a client lock the caller knows about. E.g. in the semaphore
233 code that lock is a per-semaphore spinlock that protects the semaphore data,
234 including the semaphore count and the queue of waiting threads. For certain
235 low-level locking primitives (e.g. mutexes) that client lock is the
236 scheduler lock itself, which simplifies things a bit.
237
238 If a client lock other than the scheduler lock is used, this function must
239 be called with that lock being held. Afterwards that lock should be dropped
240 and the function that actually blocks the thread shall be invoked
241 (thread_block[_locked]() or thread_block_with_timeout()). In between these
242 two steps no functionality that uses the thread blocking API for this thread
243 shall be used.
244
245 When the caller determines that the condition for unblocking the thread
246 occurred, it calls thread_unblock_locked() to unblock the thread. At that
247 time one of locks that are held when calling thread_prepare_to_block() must
248 be held. Usually that would be the client lock. In two cases it generally
249 isn't, however, since the unblocking code doesn't know about the client
250 lock: 1. When thread_block_with_timeout() had been used and the timeout
251 occurs. 2. When thread_prepare_to_block() had been called with one or both
252 of the \c B_CAN_INTERRUPT or \c B_KILL_CAN_INTERRUPT flags specified and
253 someone calls thread_interrupt() that is supposed to wake up the thread.
254 In either of these two cases only the scheduler lock is held by the
255 unblocking code. A timeout can only happen after
256 thread_block_with_timeout() has been called, but an interruption is
257 possible at any time. The client code must deal with those situations.
258
259 Generally blocking and unblocking threads proceed in the following manner:
260
261 Blocking thread:
262 - Acquire client lock.
263 - Check client condition and decide whether blocking is necessary.
264 - Modify some client data structure to indicate that this thread is now
265 waiting.
266 - Release client lock (unless client lock is the scheduler lock).
267 - Block.
268 - Acquire client lock (unless client lock is the scheduler lock).
269 - Check client condition and compare with block result. E.g. if the wait was
270 interrupted or timed out, but the client condition indicates success, it
271 may be considered a success after all, since usually that happens when
272 another thread concurrently changed the client condition and also tried
273 to unblock the waiting thread. It is even necessary when that other
274 thread changed the client data structures in a way that associate some
275 resource with the unblocked thread, or otherwise the unblocked thread
276 would have to reverse that here.
277 - If still necessary -- i.e. not already taken care of by an unblocking
278 thread -- modify some client structure to indicate that the thread is no
279 longer waiting, so it isn't erroneously unblocked later.
280
281 Unblocking thread:
282 - Acquire client lock.
283 - Check client condition and decide whether a blocked thread can be woken
284 up.
285 - Check the client data structure that indicates whether one or more threads
286 are waiting and which thread(s) need(s) to be woken up.
287 - Unblock respective thread(s).
288 - Possibly change some client structure, so that an unblocked thread can
289 decide whether a concurrent timeout/interruption can be ignored, or
290 simply so that it doesn't have to do any more cleanup.
291
292 Note that in the blocking thread the steps after blocking are strictly
293 required only if timeouts or interruptions are possible. If they are not,
294 the blocking thread can only be woken up explicitly by an unblocking thread,
295 which could already take care of all the necessary client data structure
296 modifications, so that the blocking thread wouldn't have to do that.
297
298 Note that the client lock can but does not have to be a spinlock.
299 A mutex, a semaphore, or anything that doesn't try to use the thread
300 blocking API for the calling thread when releasing the lock is fine.
301 In particular that means in principle thread_prepare_to_block() can be
302 called with interrupts enabled.
303
304 Care must be taken when the wait can be interrupted or can time out,
305 especially with a client lock that uses the thread blocking API. After a
306 blocked thread has been interrupted or the the time out occurred it cannot
307 acquire the client lock (or any other lock using the thread blocking API)
308 without first making sure that the thread doesn't still appear to be
309 waiting to other client code. Otherwise another thread could try to unblock
310 it which could erroneously unblock the thread while already waiting on the
311 client lock. So usually when interruptions or timeouts are possible a
312 spinlock needs to be involved.
313
314 \param thread The current thread.
315 \param flags The blocking flags. Relevant are:
316 - \c B_CAN_INTERRUPT: The thread can be interrupted by any non-blocked
317 signal. Implies \c B_KILL_CAN_INTERRUPT (specified or not).
318 - \c B_KILL_CAN_INTERRUPT: The thread can be interrupted by a kill
319 signal.
320 \param type The type of object the thread will be blocked at. Informative/
321 for debugging purposes. Must be one of the \c THREAD_BLOCK_TYPE_*
322 constants. \c THREAD_BLOCK_TYPE_OTHER implies that \a object is a
323 string.
324 \param object The object the thread will be blocked at. Informative/for
325 debugging purposes.
326 */
327 static inline void
thread_prepare_to_block(Thread * thread,uint32 flags,uint32 type,const void * object)328 thread_prepare_to_block(Thread* thread, uint32 flags, uint32 type,
329 const void* object)
330 {
331 thread->wait.flags = flags;
332 thread->wait.type = type;
333 thread->wait.object = object;
334 atomic_set(&thread->wait.status, 1);
335 // Set status last to guarantee that the other fields are initialized
336 // when a thread is waiting.
337 }
338
339
340 /*! Unblocks the specified blocked thread.
341
342 If the thread is no longer waiting (e.g. because thread_unblock_locked() has
343 already been called in the meantime), this function does not have any
344 effect.
345
346 The caller must hold the scheduler lock and the client lock (might be the
347 same).
348
349 \param thread The thread to be unblocked.
350 \param status The unblocking status. That's what the unblocked thread's
351 call to thread_block_locked() will return.
352 */
353 static inline void
thread_unblock_locked(Thread * thread,status_t status)354 thread_unblock_locked(Thread* thread, status_t status)
355 {
356 if (atomic_test_and_set(&thread->wait.status, status, 1) != 1)
357 return;
358
359 // wake up the thread, if it is sleeping
360 if (thread->state == B_THREAD_WAITING)
361 scheduler_enqueue_in_run_queue(thread);
362 }
363
364
365 /*! Interrupts the specified blocked thread, if possible.
366
367 The function checks whether the thread can be interrupted and, if so, calls
368 \code thread_unblock_locked(thread, B_INTERRUPTED) \endcode. Otherwise the
369 function is a no-op.
370
371 The caller must hold the scheduler lock. Normally thread_unblock_locked()
372 also requires the client lock to be held, but in this case the caller
373 usually doesn't know it. This implies that the client code needs to take
374 special care, if waits are interruptible. See thread_prepare_to_block() for
375 more information.
376
377 \param thread The thread to be interrupted.
378 \param kill If \c false, the blocked thread is only interrupted, when the
379 flag \c B_CAN_INTERRUPT was specified for the blocked thread. If
380 \c true, it is only interrupted, when at least one of the flags
381 \c B_CAN_INTERRUPT or \c B_KILL_CAN_INTERRUPT was specified for the
382 blocked thread.
383 \return \c B_OK, if the thread is interruptible and thread_unblock_locked()
384 was called, \c B_NOT_ALLOWED otherwise. \c B_OK doesn't imply that the
385 thread actually has been interrupted -- it could have been unblocked
386 before already.
387 */
388 static inline status_t
thread_interrupt(Thread * thread,bool kill)389 thread_interrupt(Thread* thread, bool kill)
390 {
391 if (thread_is_blocked(thread)) {
392 if ((thread->wait.flags & B_CAN_INTERRUPT) != 0
393 || (kill && (thread->wait.flags & B_KILL_CAN_INTERRUPT) != 0)) {
394 thread_unblock_locked(thread, B_INTERRUPTED);
395 return B_OK;
396 }
397 }
398
399 return B_NOT_ALLOWED;
400 }
401
402
403 static inline void
thread_pin_to_current_cpu(Thread * thread)404 thread_pin_to_current_cpu(Thread* thread)
405 {
406 thread->pinned_to_cpu++;
407 }
408
409
410 static inline void
thread_unpin_from_current_cpu(Thread * thread)411 thread_unpin_from_current_cpu(Thread* thread)
412 {
413 thread->pinned_to_cpu--;
414 }
415
416
417 static inline void
thread_prepare_suspend()418 thread_prepare_suspend()
419 {
420 Thread* thread = thread_get_current_thread();
421 thread->going_to_suspend = true;
422 }
423
424
425 static inline void
426 thread_suspend(bool alreadyPrepared = false)
427 {
428 Thread* thread = thread_get_current_thread();
429 if (!alreadyPrepared)
430 thread_prepare_suspend();
431
432 cpu_status state = disable_interrupts();
433 acquire_spinlock(&thread->scheduler_lock);
434
435 if (thread->going_to_suspend)
436 scheduler_reschedule(B_THREAD_SUSPENDED);
437
438 release_spinlock(&thread->scheduler_lock);
439 restore_interrupts(state);
440 }
441
442
443 static inline void
thread_continue(Thread * thread)444 thread_continue(Thread* thread)
445 {
446 thread->going_to_suspend = false;
447
448 cpu_status state = disable_interrupts();
449 acquire_spinlock(&thread->scheduler_lock);
450
451 if (thread->state == B_THREAD_SUSPENDED)
452 scheduler_enqueue_in_run_queue(thread);
453
454 release_spinlock(&thread->scheduler_lock);
455 restore_interrupts(state);
456 }
457
458
459 #endif /* _THREAD_H */
460