xref: /haiku/headers/private/kernel/lock.h (revision 899e0ef82b5624ace2ccfa5f5a58c8ebee54aaef)
1 /*
2  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 #ifndef _KERNEL_LOCK_H
10 #define _KERNEL_LOCK_H
11 
12 
13 #include <OS.h>
14 
15 #include <arch/atomic.h>
16 #include <debug.h>
17 
18 
19 struct mutex_waiter;
20 
21 typedef struct mutex {
22 	const char*				name;
23 	struct mutex_waiter*	waiters;
24 	spinlock				lock;
25 #if KDEBUG
26 	thread_id				holder;
27 #else
28 	int32					count;
29 #endif
30 	uint8					flags;
31 } mutex;
32 
33 #define MUTEX_FLAG_CLONE_NAME	0x1
34 
35 
36 typedef struct recursive_lock {
37 	mutex		lock;
38 #if !KDEBUG
39 	thread_id	holder;
40 #else
41 	int32		_unused;
42 #endif
43 	int			recursion;
44 } recursive_lock;
45 
46 
47 struct rw_lock_waiter;
48 
49 typedef struct rw_lock {
50 	const char*				name;
51 	struct rw_lock_waiter*	waiters;
52 	spinlock				lock;
53 	thread_id				holder;
54 	int32					count;
55 	int32					owner_count;
56 	int16					active_readers;
57 								// Only > 0 while a writer is waiting: number
58 								// of active readers when the first waiting
59 								// writer started waiting.
60 	int16					pending_readers;
61 								// Number of readers that have already
62 								// incremented "count", but have not yet started
63 								// to wait at the time the last writer unlocked.
64 	uint32					flags;
65 } rw_lock;
66 
67 #define RW_LOCK_WRITER_COUNT_BASE	0x10000
68 
69 #define RW_LOCK_FLAG_CLONE_NAME	0x1
70 
71 
72 #if KDEBUG
73 #	define KDEBUG_RW_LOCK_DEBUG 0
74 		// Define to 1 if you want to use ASSERT_READ_LOCKED_RW_LOCK().
75 #	define ASSERT_LOCKED_RECURSIVE(r) \
76 		{ ASSERT(find_thread(NULL) == (r)->lock.holder); }
77 #	define ASSERT_LOCKED_MUTEX(m) { ASSERT(find_thread(NULL) == (m)->holder); }
78 #	define ASSERT_WRITE_LOCKED_RW_LOCK(l) \
79 		{ ASSERT(find_thread(NULL) == (l)->holder); }
80 #	if KDEBUG_RW_LOCK_DEBUG
81 		extern bool _rw_lock_is_read_locked(rw_lock* lock);
82 #		define ASSERT_READ_LOCKED_RW_LOCK(l) \
83 			{ ASSERT_PRINT(_rw_lock_is_read_locked(l), "rwlock %p", l); }
84 #		define ASSERT_UNLOCKED_RW_LOCK(l) \
85 			{ ASSERT_PRINT(!_rw_lock_is_read_locked(l), "rwlock %p", l); }
86 #	else
87 #		define ASSERT_READ_LOCKED_RW_LOCK(l) do {} while (false)
88 #		define ASSERT_UNLOCKED_RW_LOCK(l)	 do {} while (false)
89 #	endif
90 #else
91 #	define ASSERT_LOCKED_RECURSIVE(r)		do {} while (false)
92 #	define ASSERT_LOCKED_MUTEX(m)			do {} while (false)
93 #	define ASSERT_WRITE_LOCKED_RW_LOCK(m)	do {} while (false)
94 #	define ASSERT_READ_LOCKED_RW_LOCK(l)	do {} while (false)
95 #	define ASSERT_UNLOCKED_RW_LOCK(l)		do {} while (false)
96 #endif
97 
98 
99 // static initializers
100 #if KDEBUG
101 #	define MUTEX_INITIALIZER(name) \
102 	{ name, NULL, B_SPINLOCK_INITIALIZER, -1, 0 }
103 #	define RECURSIVE_LOCK_INITIALIZER(name)	{ MUTEX_INITIALIZER(name), 0 }
104 #else
105 #	define MUTEX_INITIALIZER(name) \
106 	{ name, NULL, B_SPINLOCK_INITIALIZER, 0, 0 }
107 #	define RECURSIVE_LOCK_INITIALIZER(name)	{ MUTEX_INITIALIZER(name), -1, 0 }
108 #endif
109 
110 #define RW_LOCK_INITIALIZER(name) \
111 	{ name, NULL, B_SPINLOCK_INITIALIZER, -1, 0, 0, 0, 0, 0 }
112 
113 
114 #if KDEBUG
115 #	define RECURSIVE_LOCK_HOLDER(recursiveLock)	((recursiveLock)->lock.holder)
116 #else
117 #	define RECURSIVE_LOCK_HOLDER(recursiveLock)	((recursiveLock)->holder)
118 #endif
119 
120 
121 #ifdef __cplusplus
122 extern "C" {
123 #endif
124 
125 extern void	recursive_lock_init(recursive_lock *lock, const char *name);
126 	// name is *not* cloned nor freed in recursive_lock_destroy()
127 extern void recursive_lock_init_etc(recursive_lock *lock, const char *name,
128 	uint32 flags);
129 extern void recursive_lock_destroy(recursive_lock *lock);
130 extern status_t recursive_lock_lock(recursive_lock *lock);
131 extern status_t recursive_lock_trylock(recursive_lock *lock);
132 extern void recursive_lock_unlock(recursive_lock *lock);
133 extern status_t recursive_lock_switch_lock(recursive_lock* from,
134 	recursive_lock* to);
135 	// Unlocks "from" and locks "to" such that unlocking and starting to wait
136 	// for the lock is atomic. I.e. if "from" guards the object "to" belongs
137 	// to, the operation is safe as long as "from" is held while destroying
138 	// "to".
139 extern status_t recursive_lock_switch_from_mutex(mutex* from,
140 	recursive_lock* to);
141 	// Like recursive_lock_switch_lock(), just for switching from a mutex.
142 extern status_t recursive_lock_switch_from_read_lock(rw_lock* from,
143 	recursive_lock* to);
144 	// Like recursive_lock_switch_lock(), just for switching from a read-locked
145 	// rw_lock.
146 extern int32 recursive_lock_get_recursion(recursive_lock *lock);
147 
148 extern void rw_lock_init(rw_lock* lock, const char* name);
149 	// name is *not* cloned nor freed in rw_lock_destroy()
150 extern void rw_lock_init_etc(rw_lock* lock, const char* name, uint32 flags);
151 extern void rw_lock_destroy(rw_lock* lock);
152 extern status_t rw_lock_write_lock(rw_lock* lock);
153 
154 extern void mutex_init(mutex* lock, const char* name);
155 	// name is *not* cloned nor freed in mutex_destroy()
156 extern void mutex_init_etc(mutex* lock, const char* name, uint32 flags);
157 extern void mutex_destroy(mutex* lock);
158 extern void mutex_transfer_lock(mutex* lock, thread_id thread);
159 extern status_t mutex_switch_lock(mutex* from, mutex* to);
160 	// Unlocks "from" and locks "to" such that unlocking and starting to wait
161 	// for the lock is atomic. I.e. if "from" guards the object "to" belongs
162 	// to, the operation is safe as long as "from" is held while destroying
163 	// "to".
164 extern status_t mutex_switch_from_read_lock(rw_lock* from, mutex* to);
165 	// Like mutex_switch_lock(), just for switching from a read-locked rw_lock.
166 
167 
168 // implementation private:
169 
170 extern status_t _rw_lock_read_lock(rw_lock* lock);
171 extern status_t _rw_lock_read_lock_with_timeout(rw_lock* lock,
172 	uint32 timeoutFlags, bigtime_t timeout);
173 extern void _rw_lock_read_unlock(rw_lock* lock);
174 extern void _rw_lock_write_unlock(rw_lock* lock);
175 
176 extern status_t _mutex_lock(mutex* lock, void* locker);
177 extern void _mutex_unlock(mutex* lock);
178 extern status_t _mutex_trylock(mutex* lock);
179 extern status_t _mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags,
180 	bigtime_t timeout);
181 
182 
183 static inline status_t
184 rw_lock_read_lock(rw_lock* lock)
185 {
186 #if KDEBUG_RW_LOCK_DEBUG
187 	return _rw_lock_read_lock(lock);
188 #else
189 	int32 oldCount = atomic_add(&lock->count, 1);
190 	if (oldCount >= RW_LOCK_WRITER_COUNT_BASE)
191 		return _rw_lock_read_lock(lock);
192 	return B_OK;
193 #endif
194 }
195 
196 
197 static inline status_t
198 rw_lock_read_lock_with_timeout(rw_lock* lock, uint32 timeoutFlags,
199 	bigtime_t timeout)
200 {
201 #if KDEBUG_RW_LOCK_DEBUG
202 	return _rw_lock_read_lock_with_timeout(lock, timeoutFlags, timeout);
203 #else
204 	int32 oldCount = atomic_add(&lock->count, 1);
205 	if (oldCount >= RW_LOCK_WRITER_COUNT_BASE)
206 		return _rw_lock_read_lock_with_timeout(lock, timeoutFlags, timeout);
207 	return B_OK;
208 #endif
209 }
210 
211 
212 static inline void
213 rw_lock_read_unlock(rw_lock* lock)
214 {
215 #if KDEBUG_RW_LOCK_DEBUG
216 	_rw_lock_read_unlock(lock);
217 #else
218 	int32 oldCount = atomic_add(&lock->count, -1);
219 	if (oldCount >= RW_LOCK_WRITER_COUNT_BASE)
220 		_rw_lock_read_unlock(lock);
221 #endif
222 }
223 
224 
225 static inline void
226 rw_lock_write_unlock(rw_lock* lock)
227 {
228 	_rw_lock_write_unlock(lock);
229 }
230 
231 
232 static inline status_t
233 mutex_lock(mutex* lock)
234 {
235 #if KDEBUG
236 	return _mutex_lock(lock, NULL);
237 #else
238 	if (atomic_add(&lock->count, -1) < 0)
239 		return _mutex_lock(lock, NULL);
240 	return B_OK;
241 #endif
242 }
243 
244 
245 static inline status_t
246 mutex_trylock(mutex* lock)
247 {
248 #if KDEBUG
249 	return _mutex_trylock(lock);
250 #else
251 	if (atomic_test_and_set(&lock->count, -1, 0) != 0)
252 		return B_WOULD_BLOCK;
253 	return B_OK;
254 #endif
255 }
256 
257 
258 static inline status_t
259 mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, bigtime_t timeout)
260 {
261 #if KDEBUG
262 	return _mutex_lock_with_timeout(lock, timeoutFlags, timeout);
263 #else
264 	if (atomic_add(&lock->count, -1) < 0)
265 		return _mutex_lock_with_timeout(lock, timeoutFlags, timeout);
266 	return B_OK;
267 #endif
268 }
269 
270 
271 static inline void
272 mutex_unlock(mutex* lock)
273 {
274 #if !KDEBUG
275 	if (atomic_add(&lock->count, 1) < -1)
276 #endif
277 		_mutex_unlock(lock);
278 }
279 
280 
281 static inline void
282 recursive_lock_transfer_lock(recursive_lock* lock, thread_id thread)
283 {
284 	if (lock->recursion != 1)
285 		panic("invalid recursion level for lock transfer!");
286 
287 #if KDEBUG
288 	mutex_transfer_lock(&lock->lock, thread);
289 #else
290 	lock->holder = thread;
291 #endif
292 }
293 
294 
295 extern void lock_debug_init();
296 
297 #ifdef __cplusplus
298 }
299 #endif
300 
301 #endif	/* _KERNEL_LOCK_H */
302