xref: /haiku/headers/private/kernel/thread.h (revision b6b0567fbd186f8ce8a0c90bdc7a7b5b4c649678)
1 /*
2  * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de.
3  * Distributed under the terms of the MIT License.
4  *
5  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
6  * Distributed under the terms of the NewOS License.
7  */
8 #ifndef _THREAD_H
9 #define _THREAD_H
10 
11 
12 #include <OS.h>
13 #include <thread_types.h>
14 #include <arch/thread.h>
15 
16 // For the thread blocking inline functions only.
17 #include <kscheduler.h>
18 #include <ksignal.h>
19 
20 
21 struct kernel_args;
22 struct select_info;
23 struct thread_creation_attributes;
24 
25 
26 // thread notifications
27 #define THREAD_MONITOR	'_tm_'
28 #define THREAD_ADDED	0x01
29 #define THREAD_REMOVED	0x02
30 
31 
32 #ifdef __cplusplus
33 extern "C" {
34 #endif
35 
36 void thread_enqueue(struct thread *t, struct thread_queue *q);
37 struct thread *thread_lookat_queue(struct thread_queue *q);
38 struct thread *thread_dequeue(struct thread_queue *q);
39 struct thread *thread_dequeue_id(struct thread_queue *q, thread_id id);
40 
41 void thread_at_kernel_entry(bigtime_t now);
42 	// called when the thread enters the kernel on behalf of the thread
43 void thread_at_kernel_exit(void);
44 void thread_at_kernel_exit_no_signals(void);
45 void thread_reset_for_exec(void);
46 
47 status_t thread_init(struct kernel_args *args);
48 status_t thread_preboot_init_percpu(struct kernel_args *args, int32 cpuNum);
49 void thread_yield(bool force);
50 void thread_exit(void);
51 
52 int32 thread_max_threads(void);
53 int32 thread_used_threads(void);
54 
55 const char* thread_state_to_text(struct thread* thread, int32 state);
56 
57 int32 thread_get_io_priority(thread_id id);
58 void thread_set_io_priority(int32 priority);
59 
60 #define thread_get_current_thread arch_thread_get_current_thread
61 
62 struct thread *thread_get_thread_struct(thread_id id);
63 struct thread *thread_get_thread_struct_locked(thread_id id);
64 
65 static thread_id thread_get_current_thread_id(void);
66 static inline thread_id
67 thread_get_current_thread_id(void)
68 {
69 	struct thread *thread = thread_get_current_thread();
70 	return thread ? thread->id : 0;
71 }
72 
73 static inline bool
74 thread_is_idle_thread(struct thread *thread)
75 {
76 	return thread->entry == NULL;
77 }
78 
79 typedef bool (*thread_iterator_callback)(struct thread* thread, void* cookie);
80 struct thread* thread_iterate_through_threads(thread_iterator_callback callback,
81 	void* cookie);
82 
83 thread_id allocate_thread_id(void);
84 thread_id peek_next_thread_id(void);
85 
86 thread_id spawn_kernel_thread_etc(thread_func, const char *name, int32 priority,
87 	void *args, team_id team, thread_id threadID);
88 status_t wait_for_thread_etc(thread_id id, uint32 flags, bigtime_t timeout,
89 	status_t *_returnCode);
90 
91 status_t select_thread(int32 object, struct select_info *info, bool kernel);
92 status_t deselect_thread(int32 object, struct select_info *info, bool kernel);
93 
94 #define syscall_64_bit_return_value() arch_syscall_64_bit_return_value()
95 
96 status_t thread_block();
97 status_t thread_block_with_timeout(uint32 timeoutFlags, bigtime_t timeout);
98 status_t thread_block_with_timeout_locked(uint32 timeoutFlags,
99 			bigtime_t timeout);
100 bool thread_unblock(status_t threadID, status_t status);
101 
102 // used in syscalls.c
103 status_t _user_set_thread_priority(thread_id thread, int32 newPriority);
104 status_t _user_rename_thread(thread_id thread, const char *name);
105 status_t _user_suspend_thread(thread_id thread);
106 status_t _user_resume_thread(thread_id thread);
107 status_t _user_rename_thread(thread_id thread, const char *name);
108 thread_id _user_spawn_thread(struct thread_creation_attributes* attributes);
109 status_t _user_wait_for_thread(thread_id id, status_t *_returnCode);
110 status_t _user_snooze_etc(bigtime_t timeout, int timebase, uint32 flags);
111 status_t _user_kill_thread(thread_id thread);
112 void _user_thread_yield(void);
113 void _user_exit_thread(status_t return_value);
114 bool _user_has_data(thread_id thread);
115 status_t _user_send_data(thread_id thread, int32 code, const void *buffer, size_t buffer_size);
116 status_t _user_receive_data(thread_id *_sender, void *buffer, size_t buffer_size);
117 thread_id _user_find_thread(const char *name);
118 status_t _user_get_thread_info(thread_id id, thread_info *info);
119 status_t _user_get_next_thread_info(team_id team, int32 *cookie, thread_info *info);
120 
121 status_t _user_block_thread(uint32 flags, bigtime_t timeout);
122 status_t _user_unblock_thread(thread_id thread, status_t status);
123 status_t _user_unblock_threads(thread_id* threads, uint32 count,
124 	status_t status);
125 
126 // ToDo: these don't belong here
127 struct rlimit;
128 int _user_getrlimit(int resource, struct rlimit * rlp);
129 int _user_setrlimit(int resource, const struct rlimit * rlp);
130 
131 #ifdef __cplusplus
132 }
133 #endif
134 
135 
136 /*!
137 	\a thread must be the current thread.
138 	Thread lock can be, but doesn't need to be held.
139 */
140 static inline bool
141 thread_is_interrupted(struct thread* thread, uint32 flags)
142 {
143 	return ((flags & B_CAN_INTERRUPT)
144 			&& (thread->sig_pending & ~thread->sig_block_mask) != 0)
145 		|| ((flags & B_KILL_CAN_INTERRUPT)
146 			&& (thread->sig_pending & KILL_SIGNALS));
147 }
148 
149 
150 static inline bool
151 thread_is_blocked(struct thread* thread)
152 {
153 	return thread->wait.status == 1;
154 }
155 
156 
157 /*!
158 	\a thread must be the current thread.
159 	Thread lock can be, but doesn't need to be locked.
160 */
161 static inline void
162 thread_prepare_to_block(struct thread* thread, uint32 flags, uint32 type,
163 	const void* object)
164 {
165 	thread->wait.flags = flags;
166 	thread->wait.type = type;
167 	thread->wait.object = object;
168 	atomic_set(&thread->wait.status, 1);
169 		// Set status last to guarantee that the other fields are initialized
170 		// when a thread is waiting.
171 }
172 
173 
174 static inline status_t
175 thread_block_locked(struct thread* thread)
176 {
177 	if (thread->wait.status == 1) {
178 		// check for signals, if interruptable
179 		if (thread_is_interrupted(thread, thread->wait.flags)) {
180 			thread->wait.status = B_INTERRUPTED;
181 		} else {
182 			thread->next_state = B_THREAD_WAITING;
183 			scheduler_reschedule();
184 		}
185 	}
186 
187 	return thread->wait.status;
188 }
189 
190 
191 static inline bool
192 thread_unblock_locked(struct thread* thread, status_t status)
193 {
194 	if (atomic_test_and_set(&thread->wait.status, status, 1) != 1)
195 		return false;
196 
197 	// wake up the thread, if it is sleeping
198 	if (thread->state == B_THREAD_WAITING)
199 		scheduler_enqueue_in_run_queue(thread);
200 
201 	return true;
202 }
203 
204 
205 static inline status_t
206 thread_interrupt(struct thread* thread, bool kill)
207 {
208 	if ((thread->wait.flags & B_CAN_INTERRUPT) != 0
209 		|| (kill && (thread->wait.flags & B_KILL_CAN_INTERRUPT) != 0)) {
210 		thread_unblock_locked(thread, B_INTERRUPTED);
211 		return B_OK;
212 	}
213 
214 	return B_NOT_ALLOWED;
215 }
216 
217 
218 static inline void
219 thread_pin_to_current_cpu(struct thread* thread)
220 {
221 	thread->pinned_to_cpu++;
222 }
223 
224 
225 static inline void
226 thread_unpin_from_current_cpu(struct thread* thread)
227 {
228 	thread->pinned_to_cpu--;
229 }
230 
231 
232 static inline void
233 thread_disable_scheduling(struct thread* thread)
234 {
235 	thread->keep_scheduled++;
236 }
237 
238 
239 static inline void
240 thread_enable_scheduling(struct thread* thread)
241 {
242 	thread->keep_scheduled--;
243 }
244 
245 
246 #endif /* _THREAD_H */
247