xref: /haiku/headers/private/kernel/lock.h (revision c9060eb991e10e477ece52478d6743fc7691c143)
1 /*
2  * Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 #ifndef _KERNEL_LOCK_H
10 #define _KERNEL_LOCK_H
11 
12 #include <OS.h>
13 #include <debug.h>
14 
15 
16 struct mutex_waiter;
17 
18 typedef struct mutex {
19 	const char*				name;
20 	struct mutex_waiter*	waiters;
21 #ifdef KDEBUG
22 	thread_id				holder;
23 #else
24 	int32					count;
25 #endif
26 	uint8					flags;
27 } mutex;
28 
29 #define MUTEX_FLAG_CLONE_NAME	0x1
30 
31 
32 typedef struct recursive_lock {
33 	mutex		lock;
34 #ifndef KDEBUG
35 	thread_id	holder;
36 #endif
37 	int			recursion;
38 } recursive_lock;
39 
40 
41 struct rw_lock_waiter;
42 
43 typedef struct rw_lock {
44 	const char*				name;
45 	struct rw_lock_waiter*	waiters;
46 	thread_id				holder;
47 	int32					reader_count;
48 	int32					writer_count;
49 	int32					owner_count;
50 	uint32					flags;
51 } rw_lock;
52 
53 #define RW_LOCK_FLAG_CLONE_NAME	0x1
54 
55 
56 #if 0 && KDEBUG // XXX disable this for now, it causes problems when including thread.h here
57 #	include <thread.h>
58 #define ASSERT_LOCKED_RECURSIVE(r) { ASSERT(thread_get_current_thread_id() == (r)->holder); }
59 #define ASSERT_LOCKED_MUTEX(m) { ASSERT(thread_get_current_thread_id() == (m)->holder); }
60 #else
61 #define ASSERT_LOCKED_RECURSIVE(r)
62 #define ASSERT_LOCKED_MUTEX(m)
63 #endif
64 
65 
66 // static initializers
67 #ifdef KDEBUG
68 #	define MUTEX_INITIALIZER(name)			{ name, NULL, -1, 0 }
69 #	define RECURSIVE_LOCK_INITIALIZER(name)	{ MUTEX_INITIALIZER(name), 0 }
70 #else
71 #	define MUTEX_INITIALIZER(name)			{ name, NULL, 0, 0 }
72 #	define RECURSIVE_LOCK_INITIALIZER(name)	{ MUTEX_INITIALIZER(name), -1, 0 }
73 #endif
74 
75 #define RW_LOCK_INITIALIZER(name)			{ name, NULL, -1, 0, 0, 0 }
76 
77 
78 #ifdef __cplusplus
79 extern "C" {
80 #endif
81 
82 extern void	recursive_lock_init(recursive_lock *lock, const char *name);
83 	// name is *not* cloned nor freed in recursive_lock_destroy()
84 extern void recursive_lock_init_etc(recursive_lock *lock, const char *name,
85 	uint32 flags);
86 extern void recursive_lock_destroy(recursive_lock *lock);
87 extern status_t recursive_lock_lock(recursive_lock *lock);
88 extern status_t recursive_lock_trylock(recursive_lock *lock);
89 extern void recursive_lock_unlock(recursive_lock *lock);
90 extern int32 recursive_lock_get_recursion(recursive_lock *lock);
91 
92 extern void rw_lock_init(rw_lock* lock, const char* name);
93 	// name is *not* cloned nor freed in rw_lock_destroy()
94 extern void rw_lock_init_etc(rw_lock* lock, const char* name, uint32 flags);
95 extern void rw_lock_destroy(rw_lock* lock);
96 extern status_t rw_lock_read_lock(rw_lock* lock);
97 extern status_t rw_lock_read_unlock(rw_lock* lock);
98 extern status_t rw_lock_write_lock(rw_lock* lock);
99 extern status_t rw_lock_write_unlock(rw_lock* lock);
100 
101 extern void mutex_init(mutex* lock, const char* name);
102 	// name is *not* cloned nor freed in mutex_destroy()
103 extern void mutex_init_etc(mutex* lock, const char* name, uint32 flags);
104 extern void mutex_destroy(mutex* lock);
105 
106 // implementation private:
107 extern status_t _mutex_lock(mutex* lock, bool threadsLocked);
108 extern void _mutex_unlock(mutex* lock);
109 extern status_t _mutex_trylock(mutex* lock);
110 
111 
112 static inline status_t
113 mutex_lock(mutex* lock)
114 {
115 #ifdef KDEBUG
116 	return _mutex_lock(lock, false);
117 #else
118 	if (atomic_add(&lock->count, -1) < 0)
119 		return _mutex_lock(lock, false);
120 	return B_OK;
121 #endif
122 }
123 
124 
125 static inline status_t
126 mutex_lock_threads_locked(mutex* lock)
127 {
128 #ifdef KDEBUG
129 	return _mutex_lock(lock, true);
130 #else
131 	if (atomic_add(&lock->count, -1) < 0)
132 		return _mutex_lock(lock, true);
133 	return B_OK;
134 #endif
135 }
136 
137 
138 static inline status_t
139 mutex_trylock(mutex* lock)
140 {
141 #ifdef KDEBUG
142 	return _mutex_trylock(lock);
143 #else
144 	if (atomic_test_and_set(&lock->count, -1, 0) != 0)
145 		return B_WOULD_BLOCK;
146 	return B_OK;
147 #endif
148 }
149 
150 
151 static inline void
152 mutex_unlock(mutex* lock)
153 {
154 #ifdef KDEBUG
155 	_mutex_unlock(lock);
156 #else
157 	if (atomic_add(&lock->count, 1) < -1)
158 		_mutex_unlock(lock);
159 #endif
160 }
161 
162 
163 static inline void
164 mutex_transfer_lock(mutex* lock, thread_id thread)
165 {
166 #ifdef KDEBUG
167 	lock->holder = thread;
168 #endif
169 }
170 
171 
172 extern void lock_debug_init();
173 
174 #ifdef __cplusplus
175 }
176 #endif
177 
178 #endif	/* _KERNEL_LOCK_H */
179