xref: /haiku/headers/private/kernel/lock.h (revision b671e9bbdbd10268a042b4f4cc4317ccd03d105e)
1 /*
2  * Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 #ifndef _KERNEL_LOCK_H
10 #define _KERNEL_LOCK_H
11 
12 #include <OS.h>
13 #include <debug.h>
14 
15 
16 struct mutex_waiter;
17 
18 typedef struct mutex {
19 	const char*				name;
20 	struct mutex_waiter*	waiters;
21 #if KDEBUG
22 	thread_id				holder;
23 #else
24 	int32					count;
25 #endif
26 	uint8					flags;
27 } mutex;
28 
29 #define MUTEX_FLAG_CLONE_NAME	0x1
30 
31 
32 typedef struct recursive_lock {
33 	mutex		lock;
34 #if !KDEBUG
35 	thread_id	holder;
36 #endif
37 	int			recursion;
38 } recursive_lock;
39 
40 
41 struct rw_lock_waiter;
42 
43 typedef struct rw_lock {
44 	const char*				name;
45 	struct rw_lock_waiter*	waiters;
46 	thread_id				holder;
47 	int32					reader_count;
48 	int32					writer_count;
49 	int32					owner_count;
50 	uint32					flags;
51 } rw_lock;
52 
53 #define RW_LOCK_FLAG_CLONE_NAME	0x1
54 
55 
56 #if KDEBUG
57 #	define KDEBUG_RW_LOCK_DEBUG 0
58 		// Define to 1 if you want to use ASSERT_READ_LOCKED_RW_LOCK().
59 		// The rw_lock will just behave like a recursive locker then.
60 #	define ASSERT_LOCKED_RECURSIVE(r) \
61 		{ ASSERT(find_thread(NULL) == (r)->lock.holder); }
62 #	define ASSERT_LOCKED_MUTEX(m) { ASSERT(find_thread(NULL) == (m)->holder); }
63 #	define ASSERT_WRITE_LOCKED_RW_LOCK(l) \
64 		{ ASSERT(find_thread(NULL) == (l)->holder); }
65 #	if KDEBUG_RW_LOCK_DEBUG
66 #		define ASSERT_READ_LOCKED_RW_LOCK(l) \
67 			{ ASSERT(find_thread(NULL) == (l)->holder); }
68 #	else
69 #		define ASSERT_READ_LOCKED_RW_LOCK(l) do {} while (false)
70 #	endif
71 #else
72 #	define ASSERT_LOCKED_RECURSIVE(r)		do {} while (false)
73 #	define ASSERT_LOCKED_MUTEX(m)			do {} while (false)
74 #	define ASSERT_WRITE_LOCKED_RW_LOCK(m)	do {} while (false)
75 #	define ASSERT_READ_LOCKED_RW_LOCK(l)	do {} while (false)
76 #endif
77 
78 
79 // static initializers
80 #if KDEBUG
81 #	define MUTEX_INITIALIZER(name)			{ name, NULL, -1, 0 }
82 #	define RECURSIVE_LOCK_INITIALIZER(name)	{ MUTEX_INITIALIZER(name), 0 }
83 #else
84 #	define MUTEX_INITIALIZER(name)			{ name, NULL, 0, 0 }
85 #	define RECURSIVE_LOCK_INITIALIZER(name)	{ MUTEX_INITIALIZER(name), -1, 0 }
86 #endif
87 
88 #define RW_LOCK_INITIALIZER(name)			{ name, NULL, -1, 0, 0, 0 }
89 
90 
91 #if KDEBUG
92 #	define RECURSIVE_LOCK_HOLDER(recursiveLock)	((recursiveLock)->lock.holder)
93 #else
94 #	define RECURSIVE_LOCK_HOLDER(recursiveLock)	((recursiveLock)->holder)
95 #endif
96 
97 
98 #ifdef __cplusplus
99 extern "C" {
100 #endif
101 
102 extern void	recursive_lock_init(recursive_lock *lock, const char *name);
103 	// name is *not* cloned nor freed in recursive_lock_destroy()
104 extern void recursive_lock_init_etc(recursive_lock *lock, const char *name,
105 	uint32 flags);
106 extern void recursive_lock_destroy(recursive_lock *lock);
107 extern status_t recursive_lock_lock(recursive_lock *lock);
108 extern status_t recursive_lock_trylock(recursive_lock *lock);
109 extern void recursive_lock_unlock(recursive_lock *lock);
110 extern int32 recursive_lock_get_recursion(recursive_lock *lock);
111 
112 extern void rw_lock_init(rw_lock* lock, const char* name);
113 	// name is *not* cloned nor freed in rw_lock_destroy()
114 extern void rw_lock_init_etc(rw_lock* lock, const char* name, uint32 flags);
115 extern void rw_lock_destroy(rw_lock* lock);
116 extern status_t rw_lock_read_lock(rw_lock* lock);
117 extern status_t rw_lock_read_unlock(rw_lock* lock);
118 extern status_t rw_lock_write_lock(rw_lock* lock);
119 extern status_t rw_lock_write_unlock(rw_lock* lock);
120 
121 extern void mutex_init(mutex* lock, const char* name);
122 	// name is *not* cloned nor freed in mutex_destroy()
123 extern void mutex_init_etc(mutex* lock, const char* name, uint32 flags);
124 extern void mutex_destroy(mutex* lock);
125 extern status_t mutex_switch_lock(mutex* from, mutex* to);
126 	// Unlocks "from" and locks "to" such that unlocking and starting to wait
127 	// for the lock is atomically. I.e. if "from" guards the object "to" belongs
128 	// to, the operation is safe as long as "from" is held while destroying
129 	// "to".
130 
131 // implementation private:
132 extern status_t _mutex_lock(mutex* lock, bool threadsLocked);
133 extern void _mutex_unlock(mutex* lock, bool threadsLocked);
134 extern status_t _mutex_trylock(mutex* lock);
135 
136 
137 static inline status_t
138 mutex_lock(mutex* lock)
139 {
140 #if KDEBUG
141 	return _mutex_lock(lock, false);
142 #else
143 	if (atomic_add(&lock->count, -1) < 0)
144 		return _mutex_lock(lock, false);
145 	return B_OK;
146 #endif
147 }
148 
149 
150 static inline status_t
151 mutex_lock_threads_locked(mutex* lock)
152 {
153 #if KDEBUG
154 	return _mutex_lock(lock, true);
155 #else
156 	if (atomic_add(&lock->count, -1) < 0)
157 		return _mutex_lock(lock, true);
158 	return B_OK;
159 #endif
160 }
161 
162 
163 static inline status_t
164 mutex_trylock(mutex* lock)
165 {
166 #if KDEBUG
167 	return _mutex_trylock(lock);
168 #else
169 	if (atomic_test_and_set(&lock->count, -1, 0) != 0)
170 		return B_WOULD_BLOCK;
171 	return B_OK;
172 #endif
173 }
174 
175 
176 static inline void
177 mutex_unlock(mutex* lock)
178 {
179 #if !KDEBUG
180 	if (atomic_add(&lock->count, 1) < -1)
181 #endif
182 		_mutex_unlock(lock, false);
183 }
184 
185 
186 static inline void
187 mutex_transfer_lock(mutex* lock, thread_id thread)
188 {
189 #if KDEBUG
190 	lock->holder = thread;
191 #endif
192 }
193 
194 
195 extern void lock_debug_init();
196 
197 #ifdef __cplusplus
198 }
199 #endif
200 
201 #endif	/* _KERNEL_LOCK_H */
202