xref: /haiku/src/system/kernel/locks/lock.cpp (revision 3369e03d5cde9709c8aa70c99bfe6ce24ba65bf9)
1 /*
2  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 /*! Mutex and recursive_lock code */
12 
13 
14 #include <lock.h>
15 
16 #include <stdlib.h>
17 #include <string.h>
18 
19 #include <OS.h>
20 
21 #include <debug.h>
22 #include <int.h>
23 #include <kernel.h>
24 #include <listeners.h>
25 #include <scheduling_analysis.h>
26 #include <thread.h>
27 #include <util/AutoLock.h>
28 
29 
30 struct mutex_waiter {
31 	Thread*			thread;
32 	mutex_waiter*	next;		// next in queue
33 	mutex_waiter*	last;		// last in queue (valid for the first in queue)
34 };
35 
36 struct rw_lock_waiter {
37 	Thread*			thread;
38 	rw_lock_waiter*	next;		// next in queue
39 	rw_lock_waiter*	last;		// last in queue (valid for the first in queue)
40 	bool			writer;
41 };
42 
43 #define MUTEX_FLAG_RELEASED		0x2
44 
45 
46 int32
47 recursive_lock_get_recursion(recursive_lock *lock)
48 {
49 	if (RECURSIVE_LOCK_HOLDER(lock) == thread_get_current_thread_id())
50 		return lock->recursion;
51 
52 	return -1;
53 }
54 
55 
56 void
57 recursive_lock_init(recursive_lock *lock, const char *name)
58 {
59 	mutex_init(&lock->lock, name != NULL ? name : "recursive lock");
60 	RECURSIVE_LOCK_HOLDER(lock) = -1;
61 	lock->recursion = 0;
62 }
63 
64 
65 void
66 recursive_lock_init_etc(recursive_lock *lock, const char *name, uint32 flags)
67 {
68 	mutex_init_etc(&lock->lock, name != NULL ? name : "recursive lock", flags);
69 	RECURSIVE_LOCK_HOLDER(lock) = -1;
70 	lock->recursion = 0;
71 }
72 
73 
74 void
75 recursive_lock_destroy(recursive_lock *lock)
76 {
77 	if (lock == NULL)
78 		return;
79 
80 	mutex_destroy(&lock->lock);
81 }
82 
83 
84 status_t
85 recursive_lock_lock(recursive_lock *lock)
86 {
87 	thread_id thread = thread_get_current_thread_id();
88 
89 	if (!gKernelStartup && !are_interrupts_enabled()) {
90 		panic("recursive_lock_lock: called with interrupts disabled for lock "
91 			"%p (\"%s\")\n", lock, lock->lock.name);
92 	}
93 
94 	if (thread != RECURSIVE_LOCK_HOLDER(lock)) {
95 		mutex_lock(&lock->lock);
96 #if !KDEBUG
97 		lock->holder = thread;
98 #endif
99 	}
100 
101 	lock->recursion++;
102 	return B_OK;
103 }
104 
105 
106 status_t
107 recursive_lock_trylock(recursive_lock *lock)
108 {
109 	thread_id thread = thread_get_current_thread_id();
110 
111 	if (!gKernelStartup && !are_interrupts_enabled())
112 		panic("recursive_lock_lock: called with interrupts disabled for lock "
113 			"%p (\"%s\")\n", lock, lock->lock.name);
114 
115 	if (thread != RECURSIVE_LOCK_HOLDER(lock)) {
116 		status_t status = mutex_trylock(&lock->lock);
117 		if (status != B_OK)
118 			return status;
119 
120 #if !KDEBUG
121 		lock->holder = thread;
122 #endif
123 	}
124 
125 	lock->recursion++;
126 	return B_OK;
127 }
128 
129 
130 void
131 recursive_lock_unlock(recursive_lock *lock)
132 {
133 	if (thread_get_current_thread_id() != RECURSIVE_LOCK_HOLDER(lock))
134 		panic("recursive_lock %p unlocked by non-holder thread!\n", lock);
135 
136 	if (--lock->recursion == 0) {
137 #if !KDEBUG
138 		lock->holder = -1;
139 #endif
140 		mutex_unlock(&lock->lock);
141 	}
142 }
143 
144 
145 //	#pragma mark -
146 
147 
148 static status_t
149 rw_lock_wait(rw_lock* lock, bool writer, InterruptsSpinLocker& locker)
150 {
151 	// enqueue in waiter list
152 	rw_lock_waiter waiter;
153 	waiter.thread = thread_get_current_thread();
154 	waiter.next = NULL;
155 	waiter.writer = writer;
156 
157 	if (lock->waiters != NULL)
158 		lock->waiters->last->next = &waiter;
159 	else
160 		lock->waiters = &waiter;
161 
162 	lock->waiters->last = &waiter;
163 
164 	// block
165 	thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_RW_LOCK, lock);
166 	locker.Unlock();
167 
168 	status_t result = thread_block();
169 
170 	locker.Lock();
171 	return result;
172 }
173 
174 
175 static int32
176 rw_lock_unblock(rw_lock* lock)
177 {
178 	// Check whether there are any waiting threads at all and whether anyone
179 	// has the write lock.
180 	rw_lock_waiter* waiter = lock->waiters;
181 	if (waiter == NULL || lock->holder >= 0)
182 		return 0;
183 
184 	// writer at head of queue?
185 	if (waiter->writer) {
186 		if (lock->active_readers > 0 || lock->pending_readers > 0)
187 			return 0;
188 
189 		// dequeue writer
190 		lock->waiters = waiter->next;
191 		if (lock->waiters != NULL)
192 			lock->waiters->last = waiter->last;
193 
194 		lock->holder = waiter->thread->id;
195 
196 		// unblock thread
197 		thread_unblock(waiter->thread, B_OK);
198 
199 		waiter->thread = NULL;
200 		return RW_LOCK_WRITER_COUNT_BASE;
201 	}
202 
203 	// wake up one or more readers
204 	uint32 readerCount = 0;
205 	do {
206 		// dequeue reader
207 		lock->waiters = waiter->next;
208 		if (lock->waiters != NULL)
209 			lock->waiters->last = waiter->last;
210 
211 		readerCount++;
212 
213 		// unblock thread
214 		thread_unblock(waiter->thread, B_OK);
215 
216 		waiter->thread = NULL;
217 	} while ((waiter = lock->waiters) != NULL && !waiter->writer);
218 
219 	if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
220 		lock->active_readers += readerCount;
221 
222 	return readerCount;
223 }
224 
225 
226 void
227 rw_lock_init(rw_lock* lock, const char* name)
228 {
229 	lock->name = name;
230 	lock->waiters = NULL;
231 	B_INITIALIZE_SPINLOCK(&lock->lock);
232 	lock->holder = -1;
233 	lock->count = 0;
234 	lock->owner_count = 0;
235 	lock->active_readers = 0;
236 	lock->pending_readers = 0;
237 	lock->flags = 0;
238 
239 	T_SCHEDULING_ANALYSIS(InitRWLock(lock, name));
240 	NotifyWaitObjectListeners(&WaitObjectListener::RWLockInitialized, lock);
241 }
242 
243 
244 void
245 rw_lock_init_etc(rw_lock* lock, const char* name, uint32 flags)
246 {
247 	lock->name = (flags & RW_LOCK_FLAG_CLONE_NAME) != 0 ? strdup(name) : name;
248 	lock->waiters = NULL;
249 	B_INITIALIZE_SPINLOCK(&lock->lock);
250 	lock->holder = -1;
251 	lock->count = 0;
252 	lock->owner_count = 0;
253 	lock->active_readers = 0;
254 	lock->pending_readers = 0;
255 	lock->flags = flags & RW_LOCK_FLAG_CLONE_NAME;
256 
257 	T_SCHEDULING_ANALYSIS(InitRWLock(lock, name));
258 	NotifyWaitObjectListeners(&WaitObjectListener::RWLockInitialized, lock);
259 }
260 
261 
262 void
263 rw_lock_destroy(rw_lock* lock)
264 {
265 	char* name = (lock->flags & RW_LOCK_FLAG_CLONE_NAME) != 0
266 		? (char*)lock->name : NULL;
267 
268 	// unblock all waiters
269 	InterruptsSpinLocker locker(lock->lock);
270 
271 #if KDEBUG
272 	if (lock->waiters != NULL && thread_get_current_thread_id()
273 			!= lock->holder) {
274 		panic("rw_lock_destroy(): there are blocking threads, but the caller "
275 			"doesn't hold the write lock (%p)", lock);
276 
277 		locker.Unlock();
278 		if (rw_lock_write_lock(lock) != B_OK)
279 			return;
280 		locker.Lock();
281 	}
282 #endif
283 
284 	while (rw_lock_waiter* waiter = lock->waiters) {
285 		// dequeue
286 		lock->waiters = waiter->next;
287 
288 		// unblock thread
289 		thread_unblock(waiter->thread, B_ERROR);
290 	}
291 
292 	lock->name = NULL;
293 
294 	locker.Unlock();
295 
296 	free(name);
297 }
298 
299 
300 #if !KDEBUG_RW_LOCK_DEBUG
301 
302 status_t
303 _rw_lock_read_lock(rw_lock* lock)
304 {
305 	InterruptsSpinLocker locker(lock->lock);
306 
307 	// We might be the writer ourselves.
308 	if (lock->holder == thread_get_current_thread_id()) {
309 		lock->owner_count++;
310 		return B_OK;
311 	}
312 
313 	// The writer that originally had the lock when we called atomic_add() might
314 	// already have gone and another writer could have overtaken us. In this
315 	// case the original writer set pending_readers, so we know that we don't
316 	// have to wait.
317 	if (lock->pending_readers > 0) {
318 		lock->pending_readers--;
319 
320 		if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
321 			lock->active_readers++;
322 
323 		return B_OK;
324 	}
325 
326 	ASSERT(lock->count >= RW_LOCK_WRITER_COUNT_BASE);
327 
328 	// we need to wait
329 	return rw_lock_wait(lock, false, locker);
330 }
331 
332 
333 status_t
334 _rw_lock_read_lock_with_timeout(rw_lock* lock, uint32 timeoutFlags,
335 	bigtime_t timeout)
336 {
337 	InterruptsSpinLocker locker(lock->lock);
338 
339 	// We might be the writer ourselves.
340 	if (lock->holder == thread_get_current_thread_id()) {
341 		lock->owner_count++;
342 		return B_OK;
343 	}
344 
345 	// The writer that originally had the lock when we called atomic_add() might
346 	// already have gone and another writer could have overtaken us. In this
347 	// case the original writer set pending_readers, so we know that we don't
348 	// have to wait.
349 	if (lock->pending_readers > 0) {
350 		lock->pending_readers--;
351 
352 		if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
353 			lock->active_readers++;
354 
355 		return B_OK;
356 	}
357 
358 	ASSERT(lock->count >= RW_LOCK_WRITER_COUNT_BASE);
359 
360 	// we need to wait
361 
362 	// enqueue in waiter list
363 	rw_lock_waiter waiter;
364 	waiter.thread = thread_get_current_thread();
365 	waiter.next = NULL;
366 	waiter.writer = false;
367 
368 	if (lock->waiters != NULL)
369 		lock->waiters->last->next = &waiter;
370 	else
371 		lock->waiters = &waiter;
372 
373 	lock->waiters->last = &waiter;
374 
375 	// block
376 	thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_RW_LOCK, lock);
377 	locker.Unlock();
378 
379 	status_t error = thread_block_with_timeout(timeoutFlags, timeout);
380 	if (error == B_OK || waiter.thread == NULL) {
381 		// We were unblocked successfully -- potentially our unblocker overtook
382 		// us after we already failed. In either case, we've got the lock, now.
383 		return B_OK;
384 	}
385 
386 	locker.Lock();
387 	// We failed to get the lock -- dequeue from waiter list.
388 	rw_lock_waiter* previous = NULL;
389 	rw_lock_waiter* other = lock->waiters;
390 	while (other != &waiter) {
391 		previous = other;
392 		other = other->next;
393 	}
394 
395 	if (previous == NULL) {
396 		// we are the first in line
397 		lock->waiters = waiter.next;
398 		if (lock->waiters != NULL)
399 			lock->waiters->last = waiter.last;
400 	} else {
401 		// one or more other waiters are before us in the queue
402 		previous->next = waiter.next;
403 		if (lock->waiters->last == &waiter)
404 			lock->waiters->last = previous;
405 	}
406 
407 	// Decrement the count. ATM this is all we have to do. There's at least
408 	// one writer ahead of us -- otherwise the last writer would have unblocked
409 	// us (writers only manipulate the lock data with thread spinlock being
410 	// held) -- so our leaving doesn't make a difference to the ones behind us
411 	// in the queue.
412 	atomic_add(&lock->count, -1);
413 
414 	return error;
415 }
416 
417 
418 void
419 _rw_lock_read_unlock(rw_lock* lock)
420 {
421 	InterruptsSpinLocker locker(lock->lock);
422 
423 	// If we're still holding the write lock or if there are other readers,
424 	// no-one can be woken up.
425 	if (lock->holder == thread_get_current_thread_id()) {
426 		ASSERT(lock->owner_count % RW_LOCK_WRITER_COUNT_BASE > 0);
427 		lock->owner_count--;
428 		return;
429 	}
430 
431 	if (--lock->active_readers > 0)
432 		return;
433 
434 	if (lock->active_readers < 0) {
435 		panic("rw_lock_read_unlock(): lock %p not read-locked", lock);
436 		lock->active_readers = 0;
437 		return;
438 	}
439 
440 	rw_lock_unblock(lock);
441 }
442 
443 #endif	// !KDEBUG_RW_LOCK_DEBUG
444 
445 
446 status_t
447 rw_lock_write_lock(rw_lock* lock)
448 {
449 	InterruptsSpinLocker locker(lock->lock);
450 
451 	// If we're already the lock holder, we just need to increment the owner
452 	// count.
453 	thread_id thread = thread_get_current_thread_id();
454 	if (lock->holder == thread) {
455 		lock->owner_count += RW_LOCK_WRITER_COUNT_BASE;
456 		return B_OK;
457 	}
458 
459 	// announce our claim
460 	int32 oldCount = atomic_add(&lock->count, RW_LOCK_WRITER_COUNT_BASE);
461 
462 	if (oldCount == 0) {
463 		// No-one else held a read or write lock, so it's ours now.
464 		lock->holder = thread;
465 		lock->owner_count = RW_LOCK_WRITER_COUNT_BASE;
466 		return B_OK;
467 	}
468 
469 	// We have to wait. If we're the first writer, note the current reader
470 	// count.
471 	if (oldCount < RW_LOCK_WRITER_COUNT_BASE)
472 		lock->active_readers = oldCount - lock->pending_readers;
473 
474 	status_t status = rw_lock_wait(lock, true, locker);
475 	if (status == B_OK) {
476 		lock->holder = thread;
477 		lock->owner_count = RW_LOCK_WRITER_COUNT_BASE;
478 	}
479 
480 	return status;
481 }
482 
483 
484 void
485 _rw_lock_write_unlock(rw_lock* lock)
486 {
487 	InterruptsSpinLocker locker(lock->lock);
488 
489 	if (thread_get_current_thread_id() != lock->holder) {
490 		panic("rw_lock_write_unlock(): lock %p not write-locked by this thread",
491 			lock);
492 		return;
493 	}
494 
495 	ASSERT(lock->owner_count >= RW_LOCK_WRITER_COUNT_BASE);
496 
497 	lock->owner_count -= RW_LOCK_WRITER_COUNT_BASE;
498 	if (lock->owner_count >= RW_LOCK_WRITER_COUNT_BASE)
499 		return;
500 
501 	// We gave up our last write lock -- clean up and unblock waiters.
502 	int32 readerCount = lock->owner_count;
503 	lock->holder = -1;
504 	lock->owner_count = 0;
505 
506 	int32 oldCount = atomic_add(&lock->count, -RW_LOCK_WRITER_COUNT_BASE);
507 	oldCount -= RW_LOCK_WRITER_COUNT_BASE;
508 
509 	if (oldCount != 0) {
510 		// If writers are waiting, take over our reader count.
511 		if (oldCount >= RW_LOCK_WRITER_COUNT_BASE) {
512 			lock->active_readers = readerCount;
513 			rw_lock_unblock(lock);
514 		} else {
515 			// No waiting writer, but there are one or more readers. We will
516 			// unblock all waiting readers -- that's the easy part -- and must
517 			// also make sure that all readers that haven't entered the critical
518 			// section yet, won't start to wait. Otherwise a writer overtaking
519 			// such a reader will correctly start to wait, but the reader,
520 			// seeing the writer count > 0, would also start to wait. We set
521 			// pending_readers to the number of readers that are still expected
522 			// to enter the critical section.
523 			lock->pending_readers = oldCount - readerCount
524 				- rw_lock_unblock(lock);
525 		}
526 	}
527 }
528 
529 
530 static int
531 dump_rw_lock_info(int argc, char** argv)
532 {
533 	if (argc < 2) {
534 		print_debugger_command_usage(argv[0]);
535 		return 0;
536 	}
537 
538 	rw_lock* lock = (rw_lock*)parse_expression(argv[1]);
539 
540 	if (!IS_KERNEL_ADDRESS(lock)) {
541 		kprintf("invalid address: %p\n", lock);
542 		return 0;
543 	}
544 
545 	kprintf("rw lock %p:\n", lock);
546 	kprintf("  name:            %s\n", lock->name);
547 	kprintf("  holder:          %" B_PRId32 "\n", lock->holder);
548 	kprintf("  count:           %#" B_PRIx32 "\n", lock->count);
549 	kprintf("  active readers   %d\n", lock->active_readers);
550 	kprintf("  pending readers  %d\n", lock->pending_readers);
551 	kprintf("  owner count:     %#" B_PRIx32 "\n", lock->owner_count);
552 	kprintf("  flags:           %#" B_PRIx32 "\n", lock->flags);
553 
554 	kprintf("  waiting threads:");
555 	rw_lock_waiter* waiter = lock->waiters;
556 	while (waiter != NULL) {
557 		kprintf(" %" B_PRId32 "/%c", waiter->thread->id, waiter->writer ? 'w' : 'r');
558 		waiter = waiter->next;
559 	}
560 	kputs("\n");
561 
562 	return 0;
563 }
564 
565 
566 // #pragma mark -
567 
568 
569 void
570 mutex_init(mutex* lock, const char *name)
571 {
572 	mutex_init_etc(lock, name, 0);
573 }
574 
575 
576 void
577 mutex_init_etc(mutex* lock, const char *name, uint32 flags)
578 {
579 	lock->name = (flags & MUTEX_FLAG_CLONE_NAME) != 0 ? strdup(name) : name;
580 	lock->waiters = NULL;
581 	B_INITIALIZE_SPINLOCK(&lock->lock);
582 #if KDEBUG
583 	lock->holder = -1;
584 #else
585 	lock->count = 0;
586 	lock->ignore_unlock_count = 0;
587 #endif
588 	lock->flags = flags & MUTEX_FLAG_CLONE_NAME;
589 
590 	T_SCHEDULING_ANALYSIS(InitMutex(lock, name));
591 	NotifyWaitObjectListeners(&WaitObjectListener::MutexInitialized, lock);
592 }
593 
594 
595 void
596 mutex_destroy(mutex* lock)
597 {
598 	char* name = (lock->flags & MUTEX_FLAG_CLONE_NAME) != 0
599 		? (char*)lock->name : NULL;
600 
601 	// unblock all waiters
602 	InterruptsSpinLocker locker(lock->lock);
603 
604 #if KDEBUG
605 	if (lock->waiters != NULL && thread_get_current_thread_id()
606 			!= lock->holder) {
607 		panic("mutex_destroy(): there are blocking threads, but caller doesn't "
608 			"hold the lock (%p)", lock);
609 		if (_mutex_lock(lock, &locker) != B_OK)
610 			return;
611 		locker.Lock();
612 	}
613 #endif
614 
615 	while (mutex_waiter* waiter = lock->waiters) {
616 		// dequeue
617 		lock->waiters = waiter->next;
618 
619 		// unblock thread
620 		thread_unblock(waiter->thread, B_ERROR);
621 	}
622 
623 	lock->name = NULL;
624 	lock->flags = 0;
625 #if KDEBUG
626 	lock->holder = 0;
627 #else
628 	lock->count = INT16_MIN;
629 #endif
630 
631 	locker.Unlock();
632 
633 	free(name);
634 }
635 
636 
637 static inline status_t
638 mutex_lock_threads_locked(mutex* lock, InterruptsSpinLocker* locker)
639 {
640 #if KDEBUG
641 	return _mutex_lock(lock, locker);
642 #else
643 	if (atomic_add(&lock->count, -1) < 0)
644 		return _mutex_lock(lock, locker);
645 	return B_OK;
646 #endif
647 }
648 
649 
650 status_t
651 mutex_switch_lock(mutex* from, mutex* to)
652 {
653 	InterruptsSpinLocker locker(to->lock);
654 
655 #if !KDEBUG
656 	if (atomic_add(&from->count, 1) < -1)
657 #endif
658 		_mutex_unlock(from);
659 
660 	return mutex_lock_threads_locked(to, &locker);
661 }
662 
663 
664 status_t
665 mutex_switch_from_read_lock(rw_lock* from, mutex* to)
666 {
667 	InterruptsSpinLocker locker(to->lock);
668 
669 #if KDEBUG_RW_LOCK_DEBUG
670 	_rw_lock_write_unlock(from);
671 #else
672 	int32 oldCount = atomic_add(&from->count, -1);
673 	if (oldCount >= RW_LOCK_WRITER_COUNT_BASE)
674 		_rw_lock_read_unlock(from);
675 #endif
676 
677 	return mutex_lock_threads_locked(to, &locker);
678 }
679 
680 
681 status_t
682 _mutex_lock(mutex* lock, void* _locker)
683 {
684 #if KDEBUG
685 	if (!gKernelStartup && _locker == NULL && !are_interrupts_enabled()) {
686 		panic("_mutex_lock(): called with interrupts disabled for lock %p",
687 			lock);
688 	}
689 #endif
690 
691 	// lock only, if !lockLocked
692 	InterruptsSpinLocker* locker
693 		= reinterpret_cast<InterruptsSpinLocker*>(_locker);
694 
695 	InterruptsSpinLocker lockLocker;
696 	if (locker == NULL) {
697 		lockLocker.SetTo(lock->lock, false);
698 		locker = &lockLocker;
699 	}
700 
701 	// Might have been released after we decremented the count, but before
702 	// we acquired the spinlock.
703 #if KDEBUG
704 	if (lock->holder < 0) {
705 		lock->holder = thread_get_current_thread_id();
706 		return B_OK;
707 	} else if (lock->holder == thread_get_current_thread_id()) {
708 		panic("_mutex_lock(): double lock of %p by thread %" B_PRId32, lock,
709 			lock->holder);
710 	} else if (lock->holder == 0)
711 		panic("_mutex_lock(): using uninitialized lock %p", lock);
712 #else
713 	if ((lock->flags & MUTEX_FLAG_RELEASED) != 0) {
714 		lock->flags &= ~MUTEX_FLAG_RELEASED;
715 		return B_OK;
716 	}
717 #endif
718 
719 	// enqueue in waiter list
720 	mutex_waiter waiter;
721 	waiter.thread = thread_get_current_thread();
722 	waiter.next = NULL;
723 
724 	if (lock->waiters != NULL) {
725 		lock->waiters->last->next = &waiter;
726 	} else
727 		lock->waiters = &waiter;
728 
729 	lock->waiters->last = &waiter;
730 
731 	// block
732 	thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_MUTEX, lock);
733 	locker->Unlock();
734 
735 	status_t error = thread_block();
736 #if KDEBUG
737 	if (error == B_OK)
738 		atomic_set(&lock->holder, waiter.thread->id);
739 #endif
740 	return error;
741 }
742 
743 
744 void
745 _mutex_unlock(mutex* lock)
746 {
747 	InterruptsSpinLocker locker(lock->lock);
748 
749 #if KDEBUG
750 	if (thread_get_current_thread_id() != lock->holder) {
751 		panic("_mutex_unlock() failure: thread %" B_PRId32 " is trying to "
752 			"release mutex %p (current holder %" B_PRId32 ")\n",
753 			thread_get_current_thread_id(), lock, lock->holder);
754 		return;
755 	}
756 #else
757 	if (lock->ignore_unlock_count > 0) {
758 		lock->ignore_unlock_count--;
759 		return;
760 	}
761 #endif
762 
763 	mutex_waiter* waiter = lock->waiters;
764 	if (waiter != NULL) {
765 		// dequeue the first waiter
766 		lock->waiters = waiter->next;
767 		if (lock->waiters != NULL)
768 			lock->waiters->last = waiter->last;
769 #if KDEBUG
770 		thread_id unblockedThread = waiter->thread->id;
771 #endif
772 
773 		// unblock thread
774 		thread_unblock(waiter->thread, B_OK);
775 
776 #if KDEBUG
777 		// Already set the holder to the unblocked thread. Besides that this
778 		// actually reflects the current situation, setting it to -1 would
779 		// cause a race condition, since another locker could think the lock
780 		// is not held by anyone.
781 		lock->holder = unblockedThread;
782 #endif
783 	} else {
784 		// We've acquired the spinlock before the locker that is going to wait.
785 		// Just mark the lock as released.
786 #if KDEBUG
787 		lock->holder = -1;
788 #else
789 		lock->flags |= MUTEX_FLAG_RELEASED;
790 #endif
791 	}
792 }
793 
794 
795 status_t
796 _mutex_trylock(mutex* lock)
797 {
798 #if KDEBUG
799 	InterruptsSpinLocker _(lock->lock);
800 
801 	if (lock->holder <= 0) {
802 		lock->holder = thread_get_current_thread_id();
803 		return B_OK;
804 	}
805 #endif
806 	return B_WOULD_BLOCK;
807 }
808 
809 
810 status_t
811 _mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, bigtime_t timeout)
812 {
813 #if KDEBUG
814 	if (!gKernelStartup && !are_interrupts_enabled()) {
815 		panic("_mutex_lock(): called with interrupts disabled for lock %p",
816 			lock);
817 	}
818 #endif
819 
820 	InterruptsSpinLocker locker(lock->lock);
821 
822 	// Might have been released after we decremented the count, but before
823 	// we acquired the spinlock.
824 #if KDEBUG
825 	if (lock->holder < 0) {
826 		lock->holder = thread_get_current_thread_id();
827 		return B_OK;
828 	} else if (lock->holder == thread_get_current_thread_id()) {
829 		panic("_mutex_lock(): double lock of %p by thread %" B_PRId32, lock,
830 			lock->holder);
831 	} else if (lock->holder == 0)
832 		panic("_mutex_lock(): using unitialized lock %p", lock);
833 #else
834 	if ((lock->flags & MUTEX_FLAG_RELEASED) != 0) {
835 		lock->flags &= ~MUTEX_FLAG_RELEASED;
836 		return B_OK;
837 	}
838 #endif
839 
840 	// enqueue in waiter list
841 	mutex_waiter waiter;
842 	waiter.thread = thread_get_current_thread();
843 	waiter.next = NULL;
844 
845 	if (lock->waiters != NULL) {
846 		lock->waiters->last->next = &waiter;
847 	} else
848 		lock->waiters = &waiter;
849 
850 	lock->waiters->last = &waiter;
851 
852 	// block
853 	thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_MUTEX, lock);
854 	locker.Unlock();
855 
856 	status_t error = thread_block_with_timeout(timeoutFlags, timeout);
857 
858 	if (error == B_OK) {
859 #if KDEBUG
860 		lock->holder = waiter.thread->id;
861 #endif
862 	} else {
863 		locker.Lock();
864 
865 		// If the timeout occurred, we must remove our waiter structure from
866 		// the queue.
867 		mutex_waiter* previousWaiter = NULL;
868 		mutex_waiter* otherWaiter = lock->waiters;
869 		while (otherWaiter != NULL && otherWaiter != &waiter) {
870 			previousWaiter = otherWaiter;
871 			otherWaiter = otherWaiter->next;
872 		}
873 		if (otherWaiter == &waiter) {
874 			// the structure is still in the list -- dequeue
875 			if (&waiter == lock->waiters) {
876 				if (waiter.next != NULL)
877 					waiter.next->last = waiter.last;
878 				lock->waiters = waiter.next;
879 			} else {
880 				if (waiter.next == NULL)
881 					lock->waiters->last = previousWaiter;
882 				previousWaiter->next = waiter.next;
883 			}
884 
885 #if !KDEBUG
886 			// we need to fix the lock count
887 			if (atomic_add(&lock->count, 1) == -1) {
888 				// This means we were the only thread waiting for the lock and
889 				// the lock owner has already called atomic_add() in
890 				// mutex_unlock(). That is we probably would get the lock very
891 				// soon (if the lock holder has a low priority, that might
892 				// actually take rather long, though), but the timeout already
893 				// occurred, so we don't try to wait. Just increment the ignore
894 				// unlock count.
895 				lock->ignore_unlock_count++;
896 			}
897 #endif
898 		}
899 	}
900 
901 	return error;
902 }
903 
904 
905 static int
906 dump_mutex_info(int argc, char** argv)
907 {
908 	if (argc < 2) {
909 		print_debugger_command_usage(argv[0]);
910 		return 0;
911 	}
912 
913 	mutex* lock = (mutex*)parse_expression(argv[1]);
914 
915 	if (!IS_KERNEL_ADDRESS(lock)) {
916 		kprintf("invalid address: %p\n", lock);
917 		return 0;
918 	}
919 
920 	kprintf("mutex %p:\n", lock);
921 	kprintf("  name:            %s\n", lock->name);
922 	kprintf("  flags:           0x%x\n", lock->flags);
923 #if KDEBUG
924 	kprintf("  holder:          %" B_PRId32 "\n", lock->holder);
925 #else
926 	kprintf("  count:           %" B_PRId32 "\n", lock->count);
927 #endif
928 
929 	kprintf("  waiting threads:");
930 	mutex_waiter* waiter = lock->waiters;
931 	while (waiter != NULL) {
932 		kprintf(" %" B_PRId32, waiter->thread->id);
933 		waiter = waiter->next;
934 	}
935 	kputs("\n");
936 
937 	return 0;
938 }
939 
940 
941 // #pragma mark -
942 
943 
944 void
945 lock_debug_init()
946 {
947 	add_debugger_command_etc("mutex", &dump_mutex_info,
948 		"Dump info about a mutex",
949 		"<mutex>\n"
950 		"Prints info about the specified mutex.\n"
951 		"  <mutex>  - pointer to the mutex to print the info for.\n", 0);
952 	add_debugger_command_etc("rwlock", &dump_rw_lock_info,
953 		"Dump info about an rw lock",
954 		"<lock>\n"
955 		"Prints info about the specified rw lock.\n"
956 		"  <lock>  - pointer to the rw lock to print the info for.\n", 0);
957 }
958