xref: /haiku/headers/private/kernel/lock.h (revision b2c7de82305294ddf7dd438eecf63f281ef33eba)
1 /*
2  * Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 #ifndef _KERNEL_LOCK_H
10 #define _KERNEL_LOCK_H
11 
12 
13 #include <OS.h>
14 #include <debug.h>
15 
16 typedef struct recursive_lock {
17 	sem_id		sem;
18 	thread_id	holder;
19 	int			recursion;
20 } recursive_lock;
21 
22 typedef struct benaphore {
23 	sem_id	sem;
24 	int32	count;
25 } benaphore;
26 
27 // Note: this is currently a trivial r/w lock implementation
28 //	it will be replaced with something better later - this
29 //	or a similar API will be made publically available at this point.
30 typedef struct rw_lock {
31 	sem_id		sem;
32 	int32		count;
33 	benaphore	writeLock;
34 } rw_lock;
35 
36 #define RW_MAX_READERS 1000000
37 
38 struct mutex_waiter;
39 
40 typedef struct mutex {
41 	const char*				name;
42 	struct mutex_waiter*	waiters;
43 #ifdef KDEBUG
44 	thread_id				holder;
45 #else
46 	int32					count;
47 #endif
48 	uint8					flags;
49 } mutex;
50 
51 #define MUTEX_FLAG_CLONE_NAME	0x1
52 
53 
54 #if 0 && KDEBUG // XXX disable this for now, it causes problems when including thread.h here
55 #	include <thread.h>
56 #define ASSERT_LOCKED_RECURSIVE(r) { ASSERT(thread_get_current_thread_id() == (r)->holder); }
57 #define ASSERT_LOCKED_MUTEX(m) { ASSERT(thread_get_current_thread_id() == (m)->holder); }
58 #else
59 #define ASSERT_LOCKED_RECURSIVE(r)
60 #define ASSERT_LOCKED_MUTEX(m)
61 #endif
62 
63 
64 #ifdef __cplusplus
65 extern "C" {
66 #endif
67 
68 extern status_t	recursive_lock_init(recursive_lock *lock, const char *name);
69 extern void recursive_lock_destroy(recursive_lock *lock);
70 extern status_t recursive_lock_lock(recursive_lock *lock);
71 extern void recursive_lock_unlock(recursive_lock *lock);
72 extern int32 recursive_lock_get_recursion(recursive_lock *lock);
73 
74 extern status_t benaphore_init(benaphore *ben, const char *name);
75 extern void benaphore_destroy(benaphore *ben);
76 
77 
78 static inline status_t
79 benaphore_lock(benaphore *ben)
80 {
81 #ifdef KDEBUG
82 	return acquire_sem(ben->sem);
83 #else
84 	if (atomic_add(&ben->count, -1) <= 0)
85 		return acquire_sem(ben->sem);
86 
87 	return B_OK;
88 #endif
89 }
90 
91 
92 static inline status_t
93 benaphore_unlock(benaphore *ben)
94 {
95 #ifdef KDEBUG
96 	return release_sem(ben->sem);
97 #else
98 	if (atomic_add(&ben->count, 1) < 0)
99 		return release_sem(ben->sem);
100 
101 	return B_OK;
102 #endif
103 }
104 
105 extern status_t rw_lock_init(rw_lock *lock, const char *name);
106 extern void rw_lock_destroy(rw_lock *lock);
107 extern status_t rw_lock_read_lock(rw_lock *lock);
108 extern status_t rw_lock_read_unlock(rw_lock *lock);
109 extern status_t rw_lock_write_lock(rw_lock *lock);
110 extern status_t rw_lock_write_unlock(rw_lock *lock);
111 
112 extern void mutex_init(mutex* lock, const char *name);
113 	// name is *not* cloned nor freed in mutex_destroy()
114 extern void mutex_init_etc(mutex* lock, const char *name, uint32 flags);
115 extern void mutex_destroy(mutex* lock);
116 
117 // implementation private:
118 extern status_t _mutex_lock(mutex* lock, bool threadsLocked);
119 extern void _mutex_unlock(mutex* lock);
120 extern status_t _mutex_trylock(mutex* lock);
121 
122 
123 static inline status_t
124 mutex_lock(mutex* lock)
125 {
126 #ifdef KDEBUG
127 	return _mutex_lock(lock, false);
128 #else
129 	if (atomic_add(&lock->count, -1) < 0)
130 		return _mutex_lock(lock, false);
131 	return B_OK;
132 #endif
133 }
134 
135 
136 static inline status_t
137 mutex_lock_threads_locked(mutex* lock)
138 {
139 #ifdef KDEBUG
140 	return _mutex_lock(lock, true);
141 #else
142 	if (atomic_add(&lock->count, -1) < 0)
143 		return _mutex_lock(lock, true);
144 	return B_OK;
145 #endif
146 }
147 
148 
149 static inline status_t
150 mutex_trylock(mutex* lock)
151 {
152 #ifdef KDEBUG
153 	return _mutex_trylock(lock);
154 #else
155 	if (atomic_test_and_set(&lock->count, -1, 0) != 0)
156 		return B_WOULD_BLOCK;
157 	return B_OK;
158 #endif
159 }
160 
161 
162 static inline void
163 mutex_unlock(mutex* lock)
164 {
165 #ifdef KDEBUG
166 	_mutex_unlock(lock);
167 #else
168 	if (atomic_add(&lock->count, 1) < -1)
169 		_mutex_unlock(lock);
170 #endif
171 }
172 
173 
174 extern void lock_debug_init();
175 
176 #ifdef __cplusplus
177 }
178 #endif
179 
180 #endif	/* _KERNEL_LOCK_H */
181