xref: /haiku/headers/private/kernel/lock.h (revision 3b07762c548ec4016dea480d1061577cd15ec614)
1 /*
2  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 #ifndef _KERNEL_LOCK_H
10 #define _KERNEL_LOCK_H
11 
12 
13 #include <OS.h>
14 
15 #include <arch/atomic.h>
16 #include <debug.h>
17 
18 
19 struct mutex_waiter;
20 
21 typedef struct mutex {
22 	const char*				name;
23 	struct mutex_waiter*	waiters;
24 	spinlock				lock;
25 #if KDEBUG
26 	thread_id				holder;
27 #else
28 	int32					count;
29 	uint16					ignore_unlock_count;
30 #endif
31 	uint8					flags;
32 } mutex;
33 
34 #define MUTEX_FLAG_CLONE_NAME	0x1
35 
36 
37 typedef struct recursive_lock {
38 	mutex		lock;
39 #if !KDEBUG
40 	thread_id	holder;
41 #endif
42 	int			recursion;
43 } recursive_lock;
44 
45 
46 struct rw_lock_waiter;
47 
48 typedef struct rw_lock {
49 	const char*				name;
50 	struct rw_lock_waiter*	waiters;
51 	spinlock				lock;
52 	thread_id				holder;
53 	int32					count;
54 	int32					owner_count;
55 	int16					active_readers;
56 								// Only > 0 while a writer is waiting: number
57 								// of active readers when the first waiting
58 								// writer started waiting.
59 	int16					pending_readers;
60 								// Number of readers that have already
61 								// incremented "count", but have not yet started
62 								// to wait at the time the last writer unlocked.
63 	uint32					flags;
64 } rw_lock;
65 
66 #define RW_LOCK_WRITER_COUNT_BASE	0x10000
67 
68 #define RW_LOCK_FLAG_CLONE_NAME	0x1
69 
70 
71 #if KDEBUG
72 #	define KDEBUG_RW_LOCK_DEBUG 0
73 		// Define to 1 if you want to use ASSERT_READ_LOCKED_RW_LOCK().
74 		// The rw_lock will just behave like a recursive locker then.
75 #	define ASSERT_LOCKED_RECURSIVE(r) \
76 		{ ASSERT(find_thread(NULL) == (r)->lock.holder); }
77 #	define ASSERT_LOCKED_MUTEX(m) { ASSERT(find_thread(NULL) == (m)->holder); }
78 #	define ASSERT_WRITE_LOCKED_RW_LOCK(l) \
79 		{ ASSERT(find_thread(NULL) == (l)->holder); }
80 #	if KDEBUG_RW_LOCK_DEBUG
81 #		define ASSERT_READ_LOCKED_RW_LOCK(l) \
82 			{ ASSERT(find_thread(NULL) == (l)->holder); }
83 #	else
84 #		define ASSERT_READ_LOCKED_RW_LOCK(l) do {} while (false)
85 #	endif
86 #else
87 #	define ASSERT_LOCKED_RECURSIVE(r)		do {} while (false)
88 #	define ASSERT_LOCKED_MUTEX(m)			do {} while (false)
89 #	define ASSERT_WRITE_LOCKED_RW_LOCK(m)	do {} while (false)
90 #	define ASSERT_READ_LOCKED_RW_LOCK(l)	do {} while (false)
91 #endif
92 
93 
94 // static initializers
95 #if KDEBUG
96 #	define MUTEX_INITIALIZER(name) \
97 	{ name, NULL, B_SPINLOCK_INITIALIZER, -1, 0 }
98 #	define RECURSIVE_LOCK_INITIALIZER(name)	{ MUTEX_INITIALIZER(name), 0 }
99 #else
100 #	define MUTEX_INITIALIZER(name) \
101 	{ name, NULL, B_SPINLOCK_INITIALIZER, 0, 0, 0 }
102 #	define RECURSIVE_LOCK_INITIALIZER(name)	{ MUTEX_INITIALIZER(name), -1, 0 }
103 #endif
104 
105 #define RW_LOCK_INITIALIZER(name) \
106 	{ name, NULL, B_SPINLOCK_INITIALIZER, -1, 0, 0, 0 }
107 
108 
109 #if KDEBUG
110 #	define RECURSIVE_LOCK_HOLDER(recursiveLock)	((recursiveLock)->lock.holder)
111 #else
112 #	define RECURSIVE_LOCK_HOLDER(recursiveLock)	((recursiveLock)->holder)
113 #endif
114 
115 
116 #ifdef __cplusplus
117 extern "C" {
118 #endif
119 
120 extern void	recursive_lock_init(recursive_lock *lock, const char *name);
121 	// name is *not* cloned nor freed in recursive_lock_destroy()
122 extern void recursive_lock_init_etc(recursive_lock *lock, const char *name,
123 	uint32 flags);
124 extern void recursive_lock_destroy(recursive_lock *lock);
125 extern status_t recursive_lock_lock(recursive_lock *lock);
126 extern status_t recursive_lock_trylock(recursive_lock *lock);
127 extern void recursive_lock_unlock(recursive_lock *lock);
128 extern int32 recursive_lock_get_recursion(recursive_lock *lock);
129 
130 extern void rw_lock_init(rw_lock* lock, const char* name);
131 	// name is *not* cloned nor freed in rw_lock_destroy()
132 extern void rw_lock_init_etc(rw_lock* lock, const char* name, uint32 flags);
133 extern void rw_lock_destroy(rw_lock* lock);
134 extern status_t rw_lock_write_lock(rw_lock* lock);
135 
136 extern void mutex_init(mutex* lock, const char* name);
137 	// name is *not* cloned nor freed in mutex_destroy()
138 extern void mutex_init_etc(mutex* lock, const char* name, uint32 flags);
139 extern void mutex_destroy(mutex* lock);
140 extern status_t mutex_switch_lock(mutex* from, mutex* to);
141 	// Unlocks "from" and locks "to" such that unlocking and starting to wait
142 	// for the lock is atomically. I.e. if "from" guards the object "to" belongs
143 	// to, the operation is safe as long as "from" is held while destroying
144 	// "to".
145 extern status_t mutex_switch_from_read_lock(rw_lock* from, mutex* to);
146 	// Like mutex_switch_lock(), just for a switching from a read-locked
147 	// rw_lock.
148 
149 
150 // implementation private:
151 
152 extern status_t _rw_lock_read_lock(rw_lock* lock);
153 extern status_t _rw_lock_read_lock_with_timeout(rw_lock* lock,
154 	uint32 timeoutFlags, bigtime_t timeout);
155 extern void _rw_lock_read_unlock(rw_lock* lock);
156 extern void _rw_lock_write_unlock(rw_lock* lock);
157 
158 extern status_t _mutex_lock(mutex* lock, void* locker);
159 extern void _mutex_unlock(mutex* lock);
160 extern status_t _mutex_trylock(mutex* lock);
161 extern status_t _mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags,
162 	bigtime_t timeout);
163 
164 
165 static inline status_t
166 rw_lock_read_lock(rw_lock* lock)
167 {
168 #if KDEBUG_RW_LOCK_DEBUG
169 	return rw_lock_write_lock(lock);
170 #else
171 	int32 oldCount = atomic_add(&lock->count, 1);
172 	if (oldCount >= RW_LOCK_WRITER_COUNT_BASE)
173 		return _rw_lock_read_lock(lock);
174 	return B_OK;
175 #endif
176 }
177 
178 
179 static inline status_t
180 rw_lock_read_lock_with_timeout(rw_lock* lock, uint32 timeoutFlags,
181 	bigtime_t timeout)
182 {
183 #if KDEBUG_RW_LOCK_DEBUG
184 	return mutex_lock_with_timeout(lock, timeoutFlags, timeout);
185 #else
186 	int32 oldCount = atomic_add(&lock->count, 1);
187 	if (oldCount >= RW_LOCK_WRITER_COUNT_BASE)
188 		return _rw_lock_read_lock_with_timeout(lock, timeoutFlags, timeout);
189 	return B_OK;
190 #endif
191 }
192 
193 
194 static inline void
195 rw_lock_read_unlock(rw_lock* lock)
196 {
197 #if KDEBUG_RW_LOCK_DEBUG
198 	rw_lock_write_unlock(lock);
199 #else
200 	int32 oldCount = atomic_add(&lock->count, -1);
201 	if (oldCount >= RW_LOCK_WRITER_COUNT_BASE)
202 		_rw_lock_read_unlock(lock);
203 #endif
204 }
205 
206 
207 static inline void
208 rw_lock_write_unlock(rw_lock* lock)
209 {
210 	_rw_lock_write_unlock(lock);
211 }
212 
213 
214 static inline status_t
215 mutex_lock(mutex* lock)
216 {
217 #if KDEBUG
218 	return _mutex_lock(lock, NULL);
219 #else
220 	if (atomic_add(&lock->count, -1) < 0)
221 		return _mutex_lock(lock, NULL);
222 	return B_OK;
223 #endif
224 }
225 
226 
227 static inline status_t
228 mutex_trylock(mutex* lock)
229 {
230 #if KDEBUG
231 	return _mutex_trylock(lock);
232 #else
233 	if (atomic_test_and_set(&lock->count, -1, 0) != 0)
234 		return B_WOULD_BLOCK;
235 	return B_OK;
236 #endif
237 }
238 
239 
240 static inline status_t
241 mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, bigtime_t timeout)
242 {
243 #if KDEBUG
244 	return _mutex_lock_with_timeout(lock, timeoutFlags, timeout);
245 #else
246 	if (atomic_add(&lock->count, -1) < 0)
247 		return _mutex_lock_with_timeout(lock, timeoutFlags, timeout);
248 	return B_OK;
249 #endif
250 }
251 
252 
253 static inline void
254 mutex_unlock(mutex* lock)
255 {
256 #if !KDEBUG
257 	if (atomic_add(&lock->count, 1) < -1)
258 #endif
259 		_mutex_unlock(lock);
260 }
261 
262 
263 static inline void
264 mutex_transfer_lock(mutex* lock, thread_id thread)
265 {
266 #if KDEBUG
267 	lock->holder = thread;
268 #endif
269 }
270 
271 
272 static inline void
273 recursive_lock_transfer_lock(recursive_lock* lock, thread_id thread)
274 {
275 	if (lock->recursion != 1)
276 		panic("invalid recursion level for lock transfer!");
277 
278 #if KDEBUG
279 	lock->lock.holder = thread;
280 #else
281 	lock->holder = thread;
282 #endif
283 }
284 
285 
286 extern void lock_debug_init();
287 
288 #ifdef __cplusplus
289 }
290 #endif
291 
292 #endif	/* _KERNEL_LOCK_H */
293