1 /* 2 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de. 3 * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de. 4 * Distributed under the terms of the MIT License. 5 * 6 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved. 7 * Distributed under the terms of the NewOS License. 8 */ 9 #ifndef _KERNEL_LOCK_H 10 #define _KERNEL_LOCK_H 11 12 13 #include <OS.h> 14 15 #include <arch/atomic.h> 16 #include <debug.h> 17 18 19 struct mutex_waiter; 20 21 typedef struct mutex { 22 const char* name; 23 struct mutex_waiter* waiters; 24 spinlock lock; 25 #if KDEBUG 26 thread_id holder; 27 #else 28 int32 count; 29 #endif 30 uint8 flags; 31 } mutex; 32 33 #define MUTEX_FLAG_CLONE_NAME 0x1 34 35 36 typedef struct recursive_lock { 37 mutex lock; 38 #if !KDEBUG 39 thread_id holder; 40 #else 41 int32 _unused; 42 #endif 43 int recursion; 44 } recursive_lock; 45 46 47 struct rw_lock_waiter; 48 49 typedef struct rw_lock { 50 const char* name; 51 struct rw_lock_waiter* waiters; 52 spinlock lock; 53 thread_id holder; 54 int32 count; 55 int32 owner_count; 56 int16 active_readers; 57 // Only > 0 while a writer is waiting: number 58 // of active readers when the first waiting 59 // writer started waiting. 60 int16 pending_readers; 61 // Number of readers that have already 62 // incremented "count", but have not yet started 63 // to wait at the time the last writer unlocked. 64 uint32 flags; 65 } rw_lock; 66 67 #define RW_LOCK_WRITER_COUNT_BASE 0x10000 68 69 #define RW_LOCK_FLAG_CLONE_NAME 0x1 70 71 72 #if KDEBUG 73 # define KDEBUG_RW_LOCK_DEBUG 0 74 // Define to 1 if you want to use ASSERT_READ_LOCKED_RW_LOCK(). 75 // The rw_lock will just behave like a recursive locker then. 76 # define ASSERT_LOCKED_RECURSIVE(r) \ 77 { ASSERT(find_thread(NULL) == (r)->lock.holder); } 78 # define ASSERT_LOCKED_MUTEX(m) { ASSERT(find_thread(NULL) == (m)->holder); } 79 # define ASSERT_WRITE_LOCKED_RW_LOCK(l) \ 80 { ASSERT(find_thread(NULL) == (l)->holder); } 81 # if KDEBUG_RW_LOCK_DEBUG 82 # define ASSERT_READ_LOCKED_RW_LOCK(l) \ 83 { ASSERT(find_thread(NULL) == (l)->holder); } 84 # else 85 # define ASSERT_READ_LOCKED_RW_LOCK(l) do {} while (false) 86 # endif 87 #else 88 # define ASSERT_LOCKED_RECURSIVE(r) do {} while (false) 89 # define ASSERT_LOCKED_MUTEX(m) do {} while (false) 90 # define ASSERT_WRITE_LOCKED_RW_LOCK(m) do {} while (false) 91 # define ASSERT_READ_LOCKED_RW_LOCK(l) do {} while (false) 92 #endif 93 94 95 // static initializers 96 #if KDEBUG 97 # define MUTEX_INITIALIZER(name) \ 98 { name, NULL, B_SPINLOCK_INITIALIZER, -1, 0 } 99 # define RECURSIVE_LOCK_INITIALIZER(name) { MUTEX_INITIALIZER(name), 0 } 100 #else 101 # define MUTEX_INITIALIZER(name) \ 102 { name, NULL, B_SPINLOCK_INITIALIZER, 0, 0 } 103 # define RECURSIVE_LOCK_INITIALIZER(name) { MUTEX_INITIALIZER(name), -1, 0 } 104 #endif 105 106 #define RW_LOCK_INITIALIZER(name) \ 107 { name, NULL, B_SPINLOCK_INITIALIZER, -1, 0, 0, 0, 0, 0 } 108 109 110 #if KDEBUG 111 # define RECURSIVE_LOCK_HOLDER(recursiveLock) ((recursiveLock)->lock.holder) 112 #else 113 # define RECURSIVE_LOCK_HOLDER(recursiveLock) ((recursiveLock)->holder) 114 #endif 115 116 117 #ifdef __cplusplus 118 extern "C" { 119 #endif 120 121 extern void recursive_lock_init(recursive_lock *lock, const char *name); 122 // name is *not* cloned nor freed in recursive_lock_destroy() 123 extern void recursive_lock_init_etc(recursive_lock *lock, const char *name, 124 uint32 flags); 125 extern void recursive_lock_destroy(recursive_lock *lock); 126 extern status_t recursive_lock_lock(recursive_lock *lock); 127 extern status_t recursive_lock_trylock(recursive_lock *lock); 128 extern void recursive_lock_unlock(recursive_lock *lock); 129 extern status_t recursive_lock_switch_lock(recursive_lock* from, 130 recursive_lock* to); 131 // Unlocks "from" and locks "to" such that unlocking and starting to wait 132 // for the lock is atomic. I.e. if "from" guards the object "to" belongs 133 // to, the operation is safe as long as "from" is held while destroying 134 // "to". 135 extern status_t recursive_lock_switch_from_mutex(mutex* from, 136 recursive_lock* to); 137 // Like recursive_lock_switch_lock(), just for switching from a mutex. 138 extern status_t recursive_lock_switch_from_read_lock(rw_lock* from, 139 recursive_lock* to); 140 // Like recursive_lock_switch_lock(), just for switching from a read-locked 141 // rw_lock. 142 extern int32 recursive_lock_get_recursion(recursive_lock *lock); 143 144 extern void rw_lock_init(rw_lock* lock, const char* name); 145 // name is *not* cloned nor freed in rw_lock_destroy() 146 extern void rw_lock_init_etc(rw_lock* lock, const char* name, uint32 flags); 147 extern void rw_lock_destroy(rw_lock* lock); 148 extern status_t rw_lock_write_lock(rw_lock* lock); 149 150 extern void mutex_init(mutex* lock, const char* name); 151 // name is *not* cloned nor freed in mutex_destroy() 152 extern void mutex_init_etc(mutex* lock, const char* name, uint32 flags); 153 extern void mutex_destroy(mutex* lock); 154 extern void mutex_transfer_lock(mutex* lock, thread_id thread); 155 extern status_t mutex_switch_lock(mutex* from, mutex* to); 156 // Unlocks "from" and locks "to" such that unlocking and starting to wait 157 // for the lock is atomically. I.e. if "from" guards the object "to" belongs 158 // to, the operation is safe as long as "from" is held while destroying 159 // "to". 160 extern status_t mutex_switch_from_read_lock(rw_lock* from, mutex* to); 161 // Like mutex_switch_lock(), just for a switching from a read-locked 162 // rw_lock. 163 164 165 // implementation private: 166 167 extern status_t _rw_lock_read_lock(rw_lock* lock); 168 extern status_t _rw_lock_read_lock_with_timeout(rw_lock* lock, 169 uint32 timeoutFlags, bigtime_t timeout); 170 extern void _rw_lock_read_unlock(rw_lock* lock); 171 extern void _rw_lock_write_unlock(rw_lock* lock); 172 173 extern status_t _mutex_lock(mutex* lock, void* locker); 174 extern void _mutex_unlock(mutex* lock); 175 extern status_t _mutex_trylock(mutex* lock); 176 extern status_t _mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, 177 bigtime_t timeout); 178 179 180 static inline status_t 181 rw_lock_read_lock(rw_lock* lock) 182 { 183 #if KDEBUG_RW_LOCK_DEBUG 184 return rw_lock_write_lock(lock); 185 #else 186 int32 oldCount = atomic_add(&lock->count, 1); 187 if (oldCount >= RW_LOCK_WRITER_COUNT_BASE) 188 return _rw_lock_read_lock(lock); 189 return B_OK; 190 #endif 191 } 192 193 194 static inline status_t 195 rw_lock_read_lock_with_timeout(rw_lock* lock, uint32 timeoutFlags, 196 bigtime_t timeout) 197 { 198 #if KDEBUG_RW_LOCK_DEBUG 199 return mutex_lock_with_timeout(lock, timeoutFlags, timeout); 200 #else 201 int32 oldCount = atomic_add(&lock->count, 1); 202 if (oldCount >= RW_LOCK_WRITER_COUNT_BASE) 203 return _rw_lock_read_lock_with_timeout(lock, timeoutFlags, timeout); 204 return B_OK; 205 #endif 206 } 207 208 209 static inline void 210 rw_lock_read_unlock(rw_lock* lock) 211 { 212 #if KDEBUG_RW_LOCK_DEBUG 213 rw_lock_write_unlock(lock); 214 #else 215 int32 oldCount = atomic_add(&lock->count, -1); 216 if (oldCount >= RW_LOCK_WRITER_COUNT_BASE) 217 _rw_lock_read_unlock(lock); 218 #endif 219 } 220 221 222 static inline void 223 rw_lock_write_unlock(rw_lock* lock) 224 { 225 _rw_lock_write_unlock(lock); 226 } 227 228 229 static inline status_t 230 mutex_lock(mutex* lock) 231 { 232 #if KDEBUG 233 return _mutex_lock(lock, NULL); 234 #else 235 if (atomic_add(&lock->count, -1) < 0) 236 return _mutex_lock(lock, NULL); 237 return B_OK; 238 #endif 239 } 240 241 242 static inline status_t 243 mutex_trylock(mutex* lock) 244 { 245 #if KDEBUG 246 return _mutex_trylock(lock); 247 #else 248 if (atomic_test_and_set(&lock->count, -1, 0) != 0) 249 return B_WOULD_BLOCK; 250 return B_OK; 251 #endif 252 } 253 254 255 static inline status_t 256 mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, bigtime_t timeout) 257 { 258 #if KDEBUG 259 return _mutex_lock_with_timeout(lock, timeoutFlags, timeout); 260 #else 261 if (atomic_add(&lock->count, -1) < 0) 262 return _mutex_lock_with_timeout(lock, timeoutFlags, timeout); 263 return B_OK; 264 #endif 265 } 266 267 268 static inline void 269 mutex_unlock(mutex* lock) 270 { 271 #if !KDEBUG 272 if (atomic_add(&lock->count, 1) < -1) 273 #endif 274 _mutex_unlock(lock); 275 } 276 277 278 static inline void 279 recursive_lock_transfer_lock(recursive_lock* lock, thread_id thread) 280 { 281 if (lock->recursion != 1) 282 panic("invalid recursion level for lock transfer!"); 283 284 #if KDEBUG 285 mutex_transfer_lock(&lock->lock, thread); 286 #else 287 lock->holder = thread; 288 #endif 289 } 290 291 292 extern void lock_debug_init(); 293 294 #ifdef __cplusplus 295 } 296 #endif 297 298 #endif /* _KERNEL_LOCK_H */ 299