xref: /haiku/headers/private/kernel/lock.h (revision cb29eafe2586fdb2d7685afa69fdab5d88a8b576)
1 /*
2  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 #ifndef _KERNEL_LOCK_H
10 #define _KERNEL_LOCK_H
11 
12 
13 #include <OS.h>
14 
15 #include <arch/atomic.h>
16 #include <debug.h>
17 
18 
19 struct mutex_waiter;
20 
21 typedef struct mutex {
22 	const char*				name;
23 	struct mutex_waiter*	waiters;
24 	spinlock				lock;
25 #if KDEBUG
26 	thread_id				holder;
27 	uint16					_unused;
28 #else
29 	int32					count;
30 	uint16					ignore_unlock_count;
31 #endif
32 	uint8					flags;
33 } mutex;
34 
35 #define MUTEX_FLAG_CLONE_NAME	0x1
36 
37 
38 typedef struct recursive_lock {
39 	mutex		lock;
40 #if !KDEBUG
41 	thread_id	holder;
42 #else
43 	int32		_unused;
44 #endif
45 	int			recursion;
46 } recursive_lock;
47 
48 
49 struct rw_lock_waiter;
50 
51 typedef struct rw_lock {
52 	const char*				name;
53 	struct rw_lock_waiter*	waiters;
54 	spinlock				lock;
55 	thread_id				holder;
56 	int32					count;
57 	int32					owner_count;
58 	int16					active_readers;
59 								// Only > 0 while a writer is waiting: number
60 								// of active readers when the first waiting
61 								// writer started waiting.
62 	int16					pending_readers;
63 								// Number of readers that have already
64 								// incremented "count", but have not yet started
65 								// to wait at the time the last writer unlocked.
66 	uint32					flags;
67 } rw_lock;
68 
69 #define RW_LOCK_WRITER_COUNT_BASE	0x10000
70 
71 #define RW_LOCK_FLAG_CLONE_NAME	0x1
72 
73 
74 #if KDEBUG
75 #	define KDEBUG_RW_LOCK_DEBUG 0
76 		// Define to 1 if you want to use ASSERT_READ_LOCKED_RW_LOCK().
77 		// The rw_lock will just behave like a recursive locker then.
78 #	define ASSERT_LOCKED_RECURSIVE(r) \
79 		{ ASSERT(find_thread(NULL) == (r)->lock.holder); }
80 #	define ASSERT_LOCKED_MUTEX(m) { ASSERT(find_thread(NULL) == (m)->holder); }
81 #	define ASSERT_WRITE_LOCKED_RW_LOCK(l) \
82 		{ ASSERT(find_thread(NULL) == (l)->holder); }
83 #	if KDEBUG_RW_LOCK_DEBUG
84 #		define ASSERT_READ_LOCKED_RW_LOCK(l) \
85 			{ ASSERT(find_thread(NULL) == (l)->holder); }
86 #	else
87 #		define ASSERT_READ_LOCKED_RW_LOCK(l) do {} while (false)
88 #	endif
89 #else
90 #	define ASSERT_LOCKED_RECURSIVE(r)		do {} while (false)
91 #	define ASSERT_LOCKED_MUTEX(m)			do {} while (false)
92 #	define ASSERT_WRITE_LOCKED_RW_LOCK(m)	do {} while (false)
93 #	define ASSERT_READ_LOCKED_RW_LOCK(l)	do {} while (false)
94 #endif
95 
96 
97 // static initializers
98 #if KDEBUG
99 #	define MUTEX_INITIALIZER(name) \
100 	{ name, NULL, B_SPINLOCK_INITIALIZER, -1, 0 }
101 #	define RECURSIVE_LOCK_INITIALIZER(name)	{ MUTEX_INITIALIZER(name), 0 }
102 #else
103 #	define MUTEX_INITIALIZER(name) \
104 	{ name, NULL, B_SPINLOCK_INITIALIZER, 0, 0, 0 }
105 #	define RECURSIVE_LOCK_INITIALIZER(name)	{ MUTEX_INITIALIZER(name), -1, 0 }
106 #endif
107 
108 #define RW_LOCK_INITIALIZER(name) \
109 	{ name, NULL, B_SPINLOCK_INITIALIZER, -1, 0, 0, 0, 0, 0 }
110 
111 
112 #if KDEBUG
113 #	define RECURSIVE_LOCK_HOLDER(recursiveLock)	((recursiveLock)->lock.holder)
114 #else
115 #	define RECURSIVE_LOCK_HOLDER(recursiveLock)	((recursiveLock)->holder)
116 #endif
117 
118 
119 #ifdef __cplusplus
120 extern "C" {
121 #endif
122 
123 extern void	recursive_lock_init(recursive_lock *lock, const char *name);
124 	// name is *not* cloned nor freed in recursive_lock_destroy()
125 extern void recursive_lock_init_etc(recursive_lock *lock, const char *name,
126 	uint32 flags);
127 extern void recursive_lock_destroy(recursive_lock *lock);
128 extern status_t recursive_lock_lock(recursive_lock *lock);
129 extern status_t recursive_lock_trylock(recursive_lock *lock);
130 extern void recursive_lock_unlock(recursive_lock *lock);
131 extern int32 recursive_lock_get_recursion(recursive_lock *lock);
132 
133 extern void rw_lock_init(rw_lock* lock, const char* name);
134 	// name is *not* cloned nor freed in rw_lock_destroy()
135 extern void rw_lock_init_etc(rw_lock* lock, const char* name, uint32 flags);
136 extern void rw_lock_destroy(rw_lock* lock);
137 extern status_t rw_lock_write_lock(rw_lock* lock);
138 
139 extern void mutex_init(mutex* lock, const char* name);
140 	// name is *not* cloned nor freed in mutex_destroy()
141 extern void mutex_init_etc(mutex* lock, const char* name, uint32 flags);
142 extern void mutex_destroy(mutex* lock);
143 extern void mutex_transfer_lock(mutex* lock, thread_id thread);
144 extern status_t mutex_switch_lock(mutex* from, mutex* to);
145 	// Unlocks "from" and locks "to" such that unlocking and starting to wait
146 	// for the lock is atomically. I.e. if "from" guards the object "to" belongs
147 	// to, the operation is safe as long as "from" is held while destroying
148 	// "to".
149 extern status_t mutex_switch_from_read_lock(rw_lock* from, mutex* to);
150 	// Like mutex_switch_lock(), just for a switching from a read-locked
151 	// rw_lock.
152 
153 
154 // implementation private:
155 
156 extern status_t _rw_lock_read_lock(rw_lock* lock);
157 extern status_t _rw_lock_read_lock_with_timeout(rw_lock* lock,
158 	uint32 timeoutFlags, bigtime_t timeout);
159 extern void _rw_lock_read_unlock(rw_lock* lock);
160 extern void _rw_lock_write_unlock(rw_lock* lock);
161 
162 extern status_t _mutex_lock(mutex* lock, void* locker);
163 extern void _mutex_unlock(mutex* lock);
164 extern status_t _mutex_trylock(mutex* lock);
165 extern status_t _mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags,
166 	bigtime_t timeout);
167 
168 
169 static inline status_t
170 rw_lock_read_lock(rw_lock* lock)
171 {
172 #if KDEBUG_RW_LOCK_DEBUG
173 	return rw_lock_write_lock(lock);
174 #else
175 	int32 oldCount = atomic_add(&lock->count, 1);
176 	if (oldCount >= RW_LOCK_WRITER_COUNT_BASE)
177 		return _rw_lock_read_lock(lock);
178 	return B_OK;
179 #endif
180 }
181 
182 
183 static inline status_t
184 rw_lock_read_lock_with_timeout(rw_lock* lock, uint32 timeoutFlags,
185 	bigtime_t timeout)
186 {
187 #if KDEBUG_RW_LOCK_DEBUG
188 	return mutex_lock_with_timeout(lock, timeoutFlags, timeout);
189 #else
190 	int32 oldCount = atomic_add(&lock->count, 1);
191 	if (oldCount >= RW_LOCK_WRITER_COUNT_BASE)
192 		return _rw_lock_read_lock_with_timeout(lock, timeoutFlags, timeout);
193 	return B_OK;
194 #endif
195 }
196 
197 
198 static inline void
199 rw_lock_read_unlock(rw_lock* lock)
200 {
201 #if KDEBUG_RW_LOCK_DEBUG
202 	rw_lock_write_unlock(lock);
203 #else
204 	int32 oldCount = atomic_add(&lock->count, -1);
205 	if (oldCount >= RW_LOCK_WRITER_COUNT_BASE)
206 		_rw_lock_read_unlock(lock);
207 #endif
208 }
209 
210 
211 static inline void
212 rw_lock_write_unlock(rw_lock* lock)
213 {
214 	_rw_lock_write_unlock(lock);
215 }
216 
217 
218 static inline status_t
219 mutex_lock(mutex* lock)
220 {
221 #if KDEBUG
222 	return _mutex_lock(lock, NULL);
223 #else
224 	if (atomic_add(&lock->count, -1) < 0)
225 		return _mutex_lock(lock, NULL);
226 	return B_OK;
227 #endif
228 }
229 
230 
231 static inline status_t
232 mutex_trylock(mutex* lock)
233 {
234 #if KDEBUG
235 	return _mutex_trylock(lock);
236 #else
237 	if (atomic_test_and_set(&lock->count, -1, 0) != 0)
238 		return B_WOULD_BLOCK;
239 	return B_OK;
240 #endif
241 }
242 
243 
244 static inline status_t
245 mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, bigtime_t timeout)
246 {
247 #if KDEBUG
248 	return _mutex_lock_with_timeout(lock, timeoutFlags, timeout);
249 #else
250 	if (atomic_add(&lock->count, -1) < 0)
251 		return _mutex_lock_with_timeout(lock, timeoutFlags, timeout);
252 	return B_OK;
253 #endif
254 }
255 
256 
257 static inline void
258 mutex_unlock(mutex* lock)
259 {
260 #if !KDEBUG
261 	if (atomic_add(&lock->count, 1) < -1)
262 #endif
263 		_mutex_unlock(lock);
264 }
265 
266 
267 static inline void
268 recursive_lock_transfer_lock(recursive_lock* lock, thread_id thread)
269 {
270 	if (lock->recursion != 1)
271 		panic("invalid recursion level for lock transfer!");
272 
273 #if KDEBUG
274 	mutex_transfer_lock(&lock->lock, thread);
275 #else
276 	lock->holder = thread;
277 #endif
278 }
279 
280 
281 extern void lock_debug_init();
282 
283 #ifdef __cplusplus
284 }
285 #endif
286 
287 #endif	/* _KERNEL_LOCK_H */
288