xref: /haiku/headers/private/kernel/lock.h (revision 1026b0a1a76dc88927bb8175c470f638dc5464ee)
1 /*
2  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 #ifndef _KERNEL_LOCK_H
10 #define _KERNEL_LOCK_H
11 
12 #include <OS.h>
13 #include <debug.h>
14 
15 
16 struct mutex_waiter;
17 
18 typedef struct mutex {
19 	const char*				name;
20 	struct mutex_waiter*	waiters;
21 #if KDEBUG
22 	thread_id				holder;
23 #else
24 	int32					count;
25 	uint16					ignore_unlock_count;
26 #endif
27 	uint8					flags;
28 } mutex;
29 
30 #define MUTEX_FLAG_CLONE_NAME	0x1
31 
32 
33 typedef struct recursive_lock {
34 	mutex		lock;
35 #if !KDEBUG
36 	thread_id	holder;
37 #endif
38 	int			recursion;
39 } recursive_lock;
40 
41 
42 struct rw_lock_waiter;
43 
44 typedef struct rw_lock {
45 	const char*				name;
46 	struct rw_lock_waiter*	waiters;
47 	thread_id				holder;
48 	vint32					count;
49 	int32					owner_count;
50 	int16					active_readers;
51 								// Only > 0 while a writer is waiting: number
52 								// of active readers when the first waiting
53 								// writer started waiting.
54 	int16					pending_readers;
55 								// Number of readers that have already
56 								// incremented "count", but have not yet started
57 								// to wait at the time the last writer unlocked.
58 	uint32					flags;
59 } rw_lock;
60 
61 #define RW_LOCK_WRITER_COUNT_BASE	0x10000
62 
63 #define RW_LOCK_FLAG_CLONE_NAME	0x1
64 
65 
66 #if KDEBUG
67 #	define KDEBUG_RW_LOCK_DEBUG 0
68 		// Define to 1 if you want to use ASSERT_READ_LOCKED_RW_LOCK().
69 		// The rw_lock will just behave like a recursive locker then.
70 #	define ASSERT_LOCKED_RECURSIVE(r) \
71 		{ ASSERT(find_thread(NULL) == (r)->lock.holder); }
72 #	define ASSERT_LOCKED_MUTEX(m) { ASSERT(find_thread(NULL) == (m)->holder); }
73 #	define ASSERT_WRITE_LOCKED_RW_LOCK(l) \
74 		{ ASSERT(find_thread(NULL) == (l)->holder); }
75 #	if KDEBUG_RW_LOCK_DEBUG
76 #		define ASSERT_READ_LOCKED_RW_LOCK(l) \
77 			{ ASSERT(find_thread(NULL) == (l)->holder); }
78 #	else
79 #		define ASSERT_READ_LOCKED_RW_LOCK(l) do {} while (false)
80 #	endif
81 #else
82 #	define ASSERT_LOCKED_RECURSIVE(r)		do {} while (false)
83 #	define ASSERT_LOCKED_MUTEX(m)			do {} while (false)
84 #	define ASSERT_WRITE_LOCKED_RW_LOCK(m)	do {} while (false)
85 #	define ASSERT_READ_LOCKED_RW_LOCK(l)	do {} while (false)
86 #endif
87 
88 
89 // static initializers
90 #if KDEBUG
91 #	define MUTEX_INITIALIZER(name)			{ name, NULL, -1, 0 }
92 #	define RECURSIVE_LOCK_INITIALIZER(name)	{ MUTEX_INITIALIZER(name), 0 }
93 #else
94 #	define MUTEX_INITIALIZER(name)			{ name, NULL, 0, 0, 0 }
95 #	define RECURSIVE_LOCK_INITIALIZER(name)	{ MUTEX_INITIALIZER(name), -1, 0 }
96 #endif
97 
98 #define RW_LOCK_INITIALIZER(name)			{ name, NULL, -1, 0, 0, 0 }
99 
100 
101 #if KDEBUG
102 #	define RECURSIVE_LOCK_HOLDER(recursiveLock)	((recursiveLock)->lock.holder)
103 #else
104 #	define RECURSIVE_LOCK_HOLDER(recursiveLock)	((recursiveLock)->holder)
105 #endif
106 
107 
108 #ifdef __cplusplus
109 extern "C" {
110 #endif
111 
112 extern void	recursive_lock_init(recursive_lock *lock, const char *name);
113 	// name is *not* cloned nor freed in recursive_lock_destroy()
114 extern void recursive_lock_init_etc(recursive_lock *lock, const char *name,
115 	uint32 flags);
116 extern void recursive_lock_destroy(recursive_lock *lock);
117 extern status_t recursive_lock_lock(recursive_lock *lock);
118 extern status_t recursive_lock_trylock(recursive_lock *lock);
119 extern void recursive_lock_unlock(recursive_lock *lock);
120 extern int32 recursive_lock_get_recursion(recursive_lock *lock);
121 
122 extern void rw_lock_init(rw_lock* lock, const char* name);
123 	// name is *not* cloned nor freed in rw_lock_destroy()
124 extern void rw_lock_init_etc(rw_lock* lock, const char* name, uint32 flags);
125 extern void rw_lock_destroy(rw_lock* lock);
126 extern status_t rw_lock_write_lock(rw_lock* lock);
127 
128 extern void mutex_init(mutex* lock, const char* name);
129 	// name is *not* cloned nor freed in mutex_destroy()
130 extern void mutex_init_etc(mutex* lock, const char* name, uint32 flags);
131 extern void mutex_destroy(mutex* lock);
132 extern status_t mutex_switch_lock(mutex* from, mutex* to);
133 	// Unlocks "from" and locks "to" such that unlocking and starting to wait
134 	// for the lock is atomically. I.e. if "from" guards the object "to" belongs
135 	// to, the operation is safe as long as "from" is held while destroying
136 	// "to".
137 extern status_t mutex_switch_from_read_lock(rw_lock* from, mutex* to);
138 	// Like mutex_switch_lock(), just for a switching from a read-locked
139 	// rw_lock.
140 
141 
142 // implementation private:
143 
144 extern status_t _rw_lock_read_lock(rw_lock* lock);
145 extern status_t _rw_lock_read_lock_with_timeout(rw_lock* lock,
146 	uint32 timeoutFlags, bigtime_t timeout);
147 extern void _rw_lock_read_unlock(rw_lock* lock, bool schedulerLocked);
148 extern void _rw_lock_write_unlock(rw_lock* lock, bool schedulerLocked);
149 
150 extern status_t _mutex_lock(mutex* lock, bool schedulerLocked);
151 extern void _mutex_unlock(mutex* lock, bool schedulerLocked);
152 extern status_t _mutex_trylock(mutex* lock);
153 extern status_t _mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags,
154 	bigtime_t timeout);
155 
156 
157 static inline status_t
158 rw_lock_read_lock(rw_lock* lock)
159 {
160 #if KDEBUG_RW_LOCK_DEBUG
161 	return rw_lock_write_lock(lock);
162 #else
163 	int32 oldCount = atomic_add(&lock->count, 1);
164 	if (oldCount >= RW_LOCK_WRITER_COUNT_BASE)
165 		return _rw_lock_read_lock(lock);
166 	return B_OK;
167 #endif
168 }
169 
170 
171 static inline status_t
172 rw_lock_read_lock_with_timeout(rw_lock* lock, uint32 timeoutFlags,
173 	bigtime_t timeout)
174 {
175 #if KDEBUG_RW_LOCK_DEBUG
176 	return mutex_lock_with_timeout(lock, timeoutFlags, timeout);
177 #else
178 	int32 oldCount = atomic_add(&lock->count, 1);
179 	if (oldCount >= RW_LOCK_WRITER_COUNT_BASE)
180 		return _rw_lock_read_lock_with_timeout(lock, timeoutFlags, timeout);
181 	return B_OK;
182 #endif
183 }
184 
185 
186 static inline void
187 rw_lock_read_unlock(rw_lock* lock)
188 {
189 #if KDEBUG_RW_LOCK_DEBUG
190 	rw_lock_write_unlock(lock);
191 #else
192 	int32 oldCount = atomic_add(&lock->count, -1);
193 	if (oldCount >= RW_LOCK_WRITER_COUNT_BASE)
194 		_rw_lock_read_unlock(lock, false);
195 #endif
196 }
197 
198 
199 static inline void
200 rw_lock_write_unlock(rw_lock* lock)
201 {
202 	_rw_lock_write_unlock(lock, false);
203 }
204 
205 
206 static inline status_t
207 mutex_lock(mutex* lock)
208 {
209 #if KDEBUG
210 	return _mutex_lock(lock, false);
211 #else
212 	if (atomic_add(&lock->count, -1) < 0)
213 		return _mutex_lock(lock, false);
214 	return B_OK;
215 #endif
216 }
217 
218 
219 static inline status_t
220 mutex_lock_threads_locked(mutex* lock)
221 {
222 #if KDEBUG
223 	return _mutex_lock(lock, true);
224 #else
225 	if (atomic_add(&lock->count, -1) < 0)
226 		return _mutex_lock(lock, true);
227 	return B_OK;
228 #endif
229 }
230 
231 
232 static inline status_t
233 mutex_trylock(mutex* lock)
234 {
235 #if KDEBUG
236 	return _mutex_trylock(lock);
237 #else
238 	if (atomic_test_and_set(&lock->count, -1, 0) != 0)
239 		return B_WOULD_BLOCK;
240 	return B_OK;
241 #endif
242 }
243 
244 
245 static inline status_t
246 mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, bigtime_t timeout)
247 {
248 #if KDEBUG
249 	return _mutex_lock_with_timeout(lock, timeoutFlags, timeout);
250 #else
251 	if (atomic_add(&lock->count, -1) < 0)
252 		return _mutex_lock_with_timeout(lock, timeoutFlags, timeout);
253 	return B_OK;
254 #endif
255 }
256 
257 
258 static inline void
259 mutex_unlock(mutex* lock)
260 {
261 #if !KDEBUG
262 	if (atomic_add(&lock->count, 1) < -1)
263 #endif
264 		_mutex_unlock(lock, false);
265 }
266 
267 
268 static inline void
269 mutex_transfer_lock(mutex* lock, thread_id thread)
270 {
271 #if KDEBUG
272 	lock->holder = thread;
273 #endif
274 }
275 
276 
277 static inline void
278 recursive_lock_transfer_lock(recursive_lock* lock, thread_id thread)
279 {
280 	if (lock->recursion != 1)
281 		panic("invalid recursion level for lock transfer!");
282 
283 #if KDEBUG
284 	lock->lock.holder = thread;
285 #else
286 	lock->holder = thread;
287 #endif
288 }
289 
290 
291 extern void lock_debug_init();
292 
293 #ifdef __cplusplus
294 }
295 #endif
296 
297 #endif	/* _KERNEL_LOCK_H */
298