xref: /haiku/src/system/kernel/locks/lock.cpp (revision 3b07762c548ec4016dea480d1061577cd15ec614)
1 /*
2  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 /*! Mutex and recursive_lock code */
12 
13 
14 #include <lock.h>
15 
16 #include <stdlib.h>
17 #include <string.h>
18 
19 #include <OS.h>
20 
21 #include <debug.h>
22 #include <int.h>
23 #include <kernel.h>
24 #include <listeners.h>
25 #include <scheduling_analysis.h>
26 #include <thread.h>
27 #include <util/AutoLock.h>
28 
29 
30 struct mutex_waiter {
31 	Thread*			thread;
32 	mutex_waiter*	next;		// next in queue
33 	mutex_waiter*	last;		// last in queue (valid for the first in queue)
34 };
35 
36 struct rw_lock_waiter {
37 	Thread*			thread;
38 	rw_lock_waiter*	next;		// next in queue
39 	rw_lock_waiter*	last;		// last in queue (valid for the first in queue)
40 	bool			writer;
41 };
42 
43 #define MUTEX_FLAG_OWNS_NAME	MUTEX_FLAG_CLONE_NAME
44 #define MUTEX_FLAG_RELEASED		0x2
45 
46 #define RW_LOCK_FLAG_OWNS_NAME	RW_LOCK_FLAG_CLONE_NAME
47 
48 
49 int32
50 recursive_lock_get_recursion(recursive_lock *lock)
51 {
52 	if (RECURSIVE_LOCK_HOLDER(lock) == thread_get_current_thread_id())
53 		return lock->recursion;
54 
55 	return -1;
56 }
57 
58 
59 void
60 recursive_lock_init(recursive_lock *lock, const char *name)
61 {
62 	mutex_init(&lock->lock, name != NULL ? name : "recursive lock");
63 	RECURSIVE_LOCK_HOLDER(lock) = -1;
64 	lock->recursion = 0;
65 }
66 
67 
68 void
69 recursive_lock_init_etc(recursive_lock *lock, const char *name, uint32 flags)
70 {
71 	mutex_init_etc(&lock->lock, name != NULL ? name : "recursive lock", flags);
72 	RECURSIVE_LOCK_HOLDER(lock) = -1;
73 	lock->recursion = 0;
74 }
75 
76 
77 void
78 recursive_lock_destroy(recursive_lock *lock)
79 {
80 	if (lock == NULL)
81 		return;
82 
83 	mutex_destroy(&lock->lock);
84 }
85 
86 
87 status_t
88 recursive_lock_lock(recursive_lock *lock)
89 {
90 	thread_id thread = thread_get_current_thread_id();
91 
92 	if (!gKernelStartup && !are_interrupts_enabled()) {
93 		panic("recursive_lock_lock: called with interrupts disabled for lock "
94 			"%p (\"%s\")\n", lock, lock->lock.name);
95 	}
96 
97 	if (thread != RECURSIVE_LOCK_HOLDER(lock)) {
98 		mutex_lock(&lock->lock);
99 #if !KDEBUG
100 		lock->holder = thread;
101 #endif
102 	}
103 
104 	lock->recursion++;
105 	return B_OK;
106 }
107 
108 
109 status_t
110 recursive_lock_trylock(recursive_lock *lock)
111 {
112 	thread_id thread = thread_get_current_thread_id();
113 
114 	if (!gKernelStartup && !are_interrupts_enabled())
115 		panic("recursive_lock_lock: called with interrupts disabled for lock "
116 			"%p (\"%s\")\n", lock, lock->lock.name);
117 
118 	if (thread != RECURSIVE_LOCK_HOLDER(lock)) {
119 		status_t status = mutex_trylock(&lock->lock);
120 		if (status != B_OK)
121 			return status;
122 
123 #if !KDEBUG
124 		lock->holder = thread;
125 #endif
126 	}
127 
128 	lock->recursion++;
129 	return B_OK;
130 }
131 
132 
133 void
134 recursive_lock_unlock(recursive_lock *lock)
135 {
136 	if (thread_get_current_thread_id() != RECURSIVE_LOCK_HOLDER(lock))
137 		panic("recursive_lock %p unlocked by non-holder thread!\n", lock);
138 
139 	if (--lock->recursion == 0) {
140 #if !KDEBUG
141 		lock->holder = -1;
142 #endif
143 		mutex_unlock(&lock->lock);
144 	}
145 }
146 
147 
148 //	#pragma mark -
149 
150 
151 static status_t
152 rw_lock_wait(rw_lock* lock, bool writer, InterruptsSpinLocker& locker)
153 {
154 	// enqueue in waiter list
155 	rw_lock_waiter waiter;
156 	waiter.thread = thread_get_current_thread();
157 	waiter.next = NULL;
158 	waiter.writer = writer;
159 
160 	if (lock->waiters != NULL)
161 		lock->waiters->last->next = &waiter;
162 	else
163 		lock->waiters = &waiter;
164 
165 	lock->waiters->last = &waiter;
166 
167 	// block
168 	thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_RW_LOCK, lock);
169 	locker.Unlock();
170 
171 	status_t result = thread_block();
172 
173 	locker.Lock();
174 	return result;
175 }
176 
177 
178 static int32
179 rw_lock_unblock(rw_lock* lock)
180 {
181 	// Check whether there are any waiting threads at all and whether anyone
182 	// has the write lock.
183 	rw_lock_waiter* waiter = lock->waiters;
184 	if (waiter == NULL || lock->holder >= 0)
185 		return 0;
186 
187 	// writer at head of queue?
188 	if (waiter->writer) {
189 		if (lock->active_readers > 0 || lock->pending_readers > 0)
190 			return 0;
191 
192 		// dequeue writer
193 		lock->waiters = waiter->next;
194 		if (lock->waiters != NULL)
195 			lock->waiters->last = waiter->last;
196 
197 		lock->holder = waiter->thread->id;
198 
199 		// unblock thread
200 		thread_unblock(waiter->thread, B_OK);
201 
202 		waiter->thread = NULL;
203 		return RW_LOCK_WRITER_COUNT_BASE;
204 	}
205 
206 	// wake up one or more readers
207 	uint32 readerCount = 0;
208 	do {
209 		// dequeue reader
210 		lock->waiters = waiter->next;
211 		if (lock->waiters != NULL)
212 			lock->waiters->last = waiter->last;
213 
214 		readerCount++;
215 
216 		// unblock thread
217 		thread_unblock(waiter->thread, B_OK);
218 
219 		waiter->thread = NULL;
220 	} while ((waiter = lock->waiters) != NULL && !waiter->writer);
221 
222 	if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
223 		lock->active_readers += readerCount;
224 
225 	return readerCount;
226 }
227 
228 
229 void
230 rw_lock_init(rw_lock* lock, const char* name)
231 {
232 	lock->name = name;
233 	lock->waiters = NULL;
234 	B_INITIALIZE_SPINLOCK(&lock->lock);
235 	lock->holder = -1;
236 	lock->count = 0;
237 	lock->owner_count = 0;
238 	lock->active_readers = 0;
239 	lock->pending_readers = 0;
240 	lock->flags = 0;
241 
242 	T_SCHEDULING_ANALYSIS(InitRWLock(lock, name));
243 	NotifyWaitObjectListeners(&WaitObjectListener::RWLockInitialized, lock);
244 }
245 
246 
247 void
248 rw_lock_init_etc(rw_lock* lock, const char* name, uint32 flags)
249 {
250 	lock->name = (flags & RW_LOCK_FLAG_CLONE_NAME) != 0 ? strdup(name) : name;
251 	lock->waiters = NULL;
252 	B_INITIALIZE_SPINLOCK(&lock->lock);
253 	lock->holder = -1;
254 	lock->count = 0;
255 	lock->owner_count = 0;
256 	lock->active_readers = 0;
257 	lock->pending_readers = 0;
258 	lock->flags = flags & RW_LOCK_FLAG_CLONE_NAME;
259 
260 	T_SCHEDULING_ANALYSIS(InitRWLock(lock, name));
261 	NotifyWaitObjectListeners(&WaitObjectListener::RWLockInitialized, lock);
262 }
263 
264 
265 void
266 rw_lock_destroy(rw_lock* lock)
267 {
268 	char* name = (lock->flags & RW_LOCK_FLAG_CLONE_NAME) != 0
269 		? (char*)lock->name : NULL;
270 
271 	// unblock all waiters
272 	InterruptsSpinLocker locker(lock->lock);
273 
274 #if KDEBUG
275 	if (lock->waiters != NULL && thread_get_current_thread_id()
276 			!= lock->holder) {
277 		panic("rw_lock_destroy(): there are blocking threads, but the caller "
278 			"doesn't hold the write lock (%p)", lock);
279 
280 		locker.Unlock();
281 		if (rw_lock_write_lock(lock) != B_OK)
282 			return;
283 		locker.Lock();
284 	}
285 #endif
286 
287 	while (rw_lock_waiter* waiter = lock->waiters) {
288 		// dequeue
289 		lock->waiters = waiter->next;
290 
291 		// unblock thread
292 		thread_unblock(waiter->thread, B_ERROR);
293 	}
294 
295 	lock->name = NULL;
296 
297 	locker.Unlock();
298 
299 	free(name);
300 }
301 
302 
303 #if !KDEBUG_RW_LOCK_DEBUG
304 
305 status_t
306 _rw_lock_read_lock(rw_lock* lock)
307 {
308 	InterruptsSpinLocker locker(lock->lock);
309 
310 	// We might be the writer ourselves.
311 	if (lock->holder == thread_get_current_thread_id()) {
312 		lock->owner_count++;
313 		return B_OK;
314 	}
315 
316 	// The writer that originally had the lock when we called atomic_add() might
317 	// already have gone and another writer could have overtaken us. In this
318 	// case the original writer set pending_readers, so we know that we don't
319 	// have to wait.
320 	if (lock->pending_readers > 0) {
321 		lock->pending_readers--;
322 
323 		if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
324 			lock->active_readers++;
325 
326 		return B_OK;
327 	}
328 
329 	ASSERT(lock->count >= RW_LOCK_WRITER_COUNT_BASE);
330 
331 	// we need to wait
332 	return rw_lock_wait(lock, false, locker);
333 }
334 
335 
336 status_t
337 _rw_lock_read_lock_with_timeout(rw_lock* lock, uint32 timeoutFlags,
338 	bigtime_t timeout)
339 {
340 	InterruptsSpinLocker locker(lock->lock);
341 
342 	// We might be the writer ourselves.
343 	if (lock->holder == thread_get_current_thread_id()) {
344 		lock->owner_count++;
345 		return B_OK;
346 	}
347 
348 	// The writer that originally had the lock when we called atomic_add() might
349 	// already have gone and another writer could have overtaken us. In this
350 	// case the original writer set pending_readers, so we know that we don't
351 	// have to wait.
352 	if (lock->pending_readers > 0) {
353 		lock->pending_readers--;
354 
355 		if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
356 			lock->active_readers++;
357 
358 		return B_OK;
359 	}
360 
361 	ASSERT(lock->count >= RW_LOCK_WRITER_COUNT_BASE);
362 
363 	// we need to wait
364 
365 	// enqueue in waiter list
366 	rw_lock_waiter waiter;
367 	waiter.thread = thread_get_current_thread();
368 	waiter.next = NULL;
369 	waiter.writer = false;
370 
371 	if (lock->waiters != NULL)
372 		lock->waiters->last->next = &waiter;
373 	else
374 		lock->waiters = &waiter;
375 
376 	lock->waiters->last = &waiter;
377 
378 	// block
379 	thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_RW_LOCK, lock);
380 	locker.Unlock();
381 
382 	status_t error = thread_block_with_timeout(timeoutFlags, timeout);
383 	if (error == B_OK || waiter.thread == NULL) {
384 		// We were unblocked successfully -- potentially our unblocker overtook
385 		// us after we already failed. In either case, we've got the lock, now.
386 		return B_OK;
387 	}
388 
389 	locker.Lock();
390 	// We failed to get the lock -- dequeue from waiter list.
391 	rw_lock_waiter* previous = NULL;
392 	rw_lock_waiter* other = lock->waiters;
393 	while (other != &waiter) {
394 		previous = other;
395 		other = other->next;
396 	}
397 
398 	if (previous == NULL) {
399 		// we are the first in line
400 		lock->waiters = waiter.next;
401 		if (lock->waiters != NULL)
402 			lock->waiters->last = waiter.last;
403 	} else {
404 		// one or more other waiters are before us in the queue
405 		previous->next = waiter.next;
406 		if (lock->waiters->last == &waiter)
407 			lock->waiters->last = previous;
408 	}
409 
410 	// Decrement the count. ATM this is all we have to do. There's at least
411 	// one writer ahead of us -- otherwise the last writer would have unblocked
412 	// us (writers only manipulate the lock data with thread spinlock being
413 	// held) -- so our leaving doesn't make a difference to the ones behind us
414 	// in the queue.
415 	atomic_add(&lock->count, -1);
416 
417 	return error;
418 }
419 
420 
421 void
422 _rw_lock_read_unlock(rw_lock* lock)
423 {
424 	InterruptsSpinLocker locker(lock->lock);
425 
426 	// If we're still holding the write lock or if there are other readers,
427 	// no-one can be woken up.
428 	if (lock->holder == thread_get_current_thread_id()) {
429 		ASSERT(lock->owner_count % RW_LOCK_WRITER_COUNT_BASE > 0);
430 		lock->owner_count--;
431 		return;
432 	}
433 
434 	if (--lock->active_readers > 0)
435 		return;
436 
437  	if (lock->active_readers < 0) {
438  		panic("rw_lock_read_unlock(): lock %p not read-locked", lock);
439 		lock->active_readers = 0;
440  		return;
441  	}
442 
443 	rw_lock_unblock(lock);
444 }
445 
446 #endif	// !KDEBUG_RW_LOCK_DEBUG
447 
448 
449 status_t
450 rw_lock_write_lock(rw_lock* lock)
451 {
452 	InterruptsSpinLocker locker(lock->lock);
453 
454 	// If we're already the lock holder, we just need to increment the owner
455 	// count.
456 	thread_id thread = thread_get_current_thread_id();
457 	if (lock->holder == thread) {
458 		lock->owner_count += RW_LOCK_WRITER_COUNT_BASE;
459 		return B_OK;
460 	}
461 
462 	// announce our claim
463 	int32 oldCount = atomic_add(&lock->count, RW_LOCK_WRITER_COUNT_BASE);
464 
465 	if (oldCount == 0) {
466 		// No-one else held a read or write lock, so it's ours now.
467 		lock->holder = thread;
468 		lock->owner_count = RW_LOCK_WRITER_COUNT_BASE;
469 		return B_OK;
470 	}
471 
472 	// We have to wait. If we're the first writer, note the current reader
473 	// count.
474 	if (oldCount < RW_LOCK_WRITER_COUNT_BASE)
475 		lock->active_readers = oldCount - lock->pending_readers;
476 
477 	status_t status = rw_lock_wait(lock, true, locker);
478 	if (status == B_OK) {
479 		lock->holder = thread;
480 		lock->owner_count = RW_LOCK_WRITER_COUNT_BASE;
481 	}
482 
483 	return status;
484 }
485 
486 
487 void
488 _rw_lock_write_unlock(rw_lock* lock)
489 {
490 	InterruptsSpinLocker locker(lock->lock);
491 
492 	if (thread_get_current_thread_id() != lock->holder) {
493 		panic("rw_lock_write_unlock(): lock %p not write-locked by this thread",
494 			lock);
495 		return;
496 	}
497 
498 	ASSERT(lock->owner_count >= RW_LOCK_WRITER_COUNT_BASE);
499 
500 	lock->owner_count -= RW_LOCK_WRITER_COUNT_BASE;
501 	if (lock->owner_count >= RW_LOCK_WRITER_COUNT_BASE)
502 		return;
503 
504 	// We gave up our last write lock -- clean up and unblock waiters.
505 	int32 readerCount = lock->owner_count;
506 	lock->holder = -1;
507 	lock->owner_count = 0;
508 
509 	int32 oldCount = atomic_add(&lock->count, -RW_LOCK_WRITER_COUNT_BASE);
510 	oldCount -= RW_LOCK_WRITER_COUNT_BASE;
511 
512 	if (oldCount != 0) {
513 		// If writers are waiting, take over our reader count.
514 		if (oldCount >= RW_LOCK_WRITER_COUNT_BASE) {
515 			lock->active_readers = readerCount;
516 			rw_lock_unblock(lock);
517 		} else {
518 			// No waiting writer, but there are one or more readers. We will
519 			// unblock all waiting readers -- that's the easy part -- and must
520 			// also make sure that all readers that haven't entered the critical
521 			// section yet, won't start to wait. Otherwise a writer overtaking
522 			// such a reader will correctly start to wait, but the reader,
523 			// seeing the writer count > 0, would also start to wait. We set
524 			// pending_readers to the number of readers that are still expected
525 			// to enter the critical section.
526 			lock->pending_readers = oldCount - readerCount
527 				- rw_lock_unblock(lock);
528 		}
529 	}
530 }
531 
532 
533 static int
534 dump_rw_lock_info(int argc, char** argv)
535 {
536 	if (argc < 2) {
537 		print_debugger_command_usage(argv[0]);
538 		return 0;
539 	}
540 
541 	rw_lock* lock = (rw_lock*)parse_expression(argv[1]);
542 
543 	if (!IS_KERNEL_ADDRESS(lock)) {
544 		kprintf("invalid address: %p\n", lock);
545 		return 0;
546 	}
547 
548 	kprintf("rw lock %p:\n", lock);
549 	kprintf("  name:            %s\n", lock->name);
550 	kprintf("  holder:          %" B_PRId32 "\n", lock->holder);
551 	kprintf("  count:           %#" B_PRIx32 "\n", lock->count);
552 	kprintf("  active readers   %d\n", lock->active_readers);
553 	kprintf("  pending readers  %d\n", lock->pending_readers);
554 	kprintf("  owner count:     %#" B_PRIx32 "\n", lock->owner_count);
555 	kprintf("  flags:           %#" B_PRIx32 "\n", lock->flags);
556 
557 	kprintf("  waiting threads:");
558 	rw_lock_waiter* waiter = lock->waiters;
559 	while (waiter != NULL) {
560 		kprintf(" %" B_PRId32 "/%c", waiter->thread->id, waiter->writer ? 'w' : 'r');
561 		waiter = waiter->next;
562 	}
563 	kputs("\n");
564 
565 	return 0;
566 }
567 
568 
569 // #pragma mark -
570 
571 
572 void
573 mutex_init(mutex* lock, const char *name)
574 {
575 	lock->name = name;
576 	lock->waiters = NULL;
577 	B_INITIALIZE_SPINLOCK(&lock->lock);
578 #if KDEBUG
579 	lock->holder = -1;
580 #else
581 	lock->count = 0;
582 	lock->ignore_unlock_count = 0;
583 #endif
584 	lock->flags = 0;
585 
586 	T_SCHEDULING_ANALYSIS(InitMutex(lock, name));
587 	NotifyWaitObjectListeners(&WaitObjectListener::MutexInitialized, lock);
588 }
589 
590 
591 void
592 mutex_init_etc(mutex* lock, const char *name, uint32 flags)
593 {
594 	lock->name = (flags & MUTEX_FLAG_CLONE_NAME) != 0 ? strdup(name) : name;
595 	lock->waiters = NULL;
596 	B_INITIALIZE_SPINLOCK(&lock->lock);
597 #if KDEBUG
598 	lock->holder = -1;
599 #else
600 	lock->count = 0;
601 	lock->ignore_unlock_count = 0;
602 #endif
603 	lock->flags = flags & MUTEX_FLAG_CLONE_NAME;
604 
605 	T_SCHEDULING_ANALYSIS(InitMutex(lock, name));
606 	NotifyWaitObjectListeners(&WaitObjectListener::MutexInitialized, lock);
607 }
608 
609 
610 void
611 mutex_destroy(mutex* lock)
612 {
613 	char* name = (lock->flags & MUTEX_FLAG_CLONE_NAME) != 0
614 		? (char*)lock->name : NULL;
615 
616 	// unblock all waiters
617 	InterruptsSpinLocker locker(lock->lock);
618 
619 #if KDEBUG
620 	if (lock->waiters != NULL && thread_get_current_thread_id()
621 		!= lock->holder) {
622 		panic("mutex_destroy(): there are blocking threads, but caller doesn't "
623 			"hold the lock (%p)", lock);
624 		if (_mutex_lock(lock, &locker) != B_OK)
625 			return;
626 		locker.Lock();
627 	}
628 #endif
629 
630 	while (mutex_waiter* waiter = lock->waiters) {
631 		// dequeue
632 		lock->waiters = waiter->next;
633 
634 		// unblock thread
635 		thread_unblock(waiter->thread, B_ERROR);
636 	}
637 
638 	lock->name = NULL;
639 
640 	locker.Unlock();
641 
642 	free(name);
643 }
644 
645 
646 static inline status_t
647 mutex_lock_threads_locked(mutex* lock, InterruptsSpinLocker* locker)
648 {
649 #if KDEBUG
650 	return _mutex_lock(lock, locker);
651 #else
652 	if (atomic_add(&lock->count, -1) < 0)
653 		return _mutex_lock(lock, locker);
654 	return B_OK;
655 #endif
656 }
657 
658 
659 status_t
660 mutex_switch_lock(mutex* from, mutex* to)
661 {
662 	InterruptsSpinLocker locker(to->lock);
663 
664 #if !KDEBUG
665 	if (atomic_add(&from->count, 1) < -1)
666 #endif
667 		_mutex_unlock(from);
668 
669 	return mutex_lock_threads_locked(to, &locker);
670 }
671 
672 
673 status_t
674 mutex_switch_from_read_lock(rw_lock* from, mutex* to)
675 {
676 	InterruptsSpinLocker locker(to->lock);
677 
678 #if KDEBUG_RW_LOCK_DEBUG
679 	_rw_lock_write_unlock(from);
680 #else
681 	int32 oldCount = atomic_add(&from->count, -1);
682 	if (oldCount >= RW_LOCK_WRITER_COUNT_BASE)
683 		_rw_lock_read_unlock(from);
684 #endif
685 
686 	return mutex_lock_threads_locked(to, &locker);
687 }
688 
689 
690 status_t
691 _mutex_lock(mutex* lock, void* _locker)
692 {
693 #if KDEBUG
694 	if (!gKernelStartup && _locker == NULL && !are_interrupts_enabled()) {
695 		panic("_mutex_lock(): called with interrupts disabled for lock %p",
696 			lock);
697 	}
698 #endif
699 
700 	// lock only, if !lockLocked
701 	InterruptsSpinLocker* locker
702 		= reinterpret_cast<InterruptsSpinLocker*>(_locker);
703 
704 	InterruptsSpinLocker lockLocker;
705 	if (locker == NULL) {
706 		lockLocker.SetTo(lock->lock, false);
707 		locker = &lockLocker;
708 	}
709 
710 	// Might have been released after we decremented the count, but before
711 	// we acquired the spinlock.
712 #if KDEBUG
713 	if (lock->holder < 0) {
714 		lock->holder = thread_get_current_thread_id();
715 		return B_OK;
716 	} else if (lock->holder == thread_get_current_thread_id()) {
717 		panic("_mutex_lock(): double lock of %p by thread %" B_PRId32, lock,
718 			lock->holder);
719 	} else if (lock->holder == 0)
720 		panic("_mutex_lock(): using unitialized lock %p", lock);
721 #else
722 	if ((lock->flags & MUTEX_FLAG_RELEASED) != 0) {
723 		lock->flags &= ~MUTEX_FLAG_RELEASED;
724 		return B_OK;
725 	}
726 #endif
727 
728 	// enqueue in waiter list
729 	mutex_waiter waiter;
730 	waiter.thread = thread_get_current_thread();
731 	waiter.next = NULL;
732 
733 	if (lock->waiters != NULL) {
734 		lock->waiters->last->next = &waiter;
735 	} else
736 		lock->waiters = &waiter;
737 
738 	lock->waiters->last = &waiter;
739 
740 	// block
741 	thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_MUTEX, lock);
742 	locker->Unlock();
743 
744 	status_t error = thread_block();
745 #if KDEBUG
746 	if (error == B_OK)
747 		atomic_set(&lock->holder, waiter.thread->id);
748 #endif
749 	return error;
750 }
751 
752 
753 void
754 _mutex_unlock(mutex* lock)
755 {
756 	InterruptsSpinLocker locker(lock->lock);
757 
758 #if KDEBUG
759 	if (thread_get_current_thread_id() != lock->holder) {
760 		panic("_mutex_unlock() failure: thread %" B_PRId32 " is trying to "
761 			"release mutex %p (current holder %" B_PRId32 ")\n",
762 			thread_get_current_thread_id(), lock, lock->holder);
763 		return;
764 	}
765 #else
766 	if (lock->ignore_unlock_count > 0) {
767 		lock->ignore_unlock_count--;
768 		return;
769 	}
770 #endif
771 
772 	mutex_waiter* waiter = lock->waiters;
773 	if (waiter != NULL) {
774 		// dequeue the first waiter
775 		lock->waiters = waiter->next;
776 		if (lock->waiters != NULL)
777 			lock->waiters->last = waiter->last;
778 #if KDEBUG
779 		thread_id unblockedThread = waiter->thread->id;
780 #endif
781 
782 		// unblock thread
783 		thread_unblock(waiter->thread, B_OK);
784 
785 #if KDEBUG
786 		// Already set the holder to the unblocked thread. Besides that this
787 		// actually reflects the current situation, setting it to -1 would
788 		// cause a race condition, since another locker could think the lock
789 		// is not held by anyone.
790 		lock->holder = unblockedThread;
791 #endif
792 	} else {
793 		// We've acquired the spinlock before the locker that is going to wait.
794 		// Just mark the lock as released.
795 #if KDEBUG
796 		lock->holder = -1;
797 #else
798 		lock->flags |= MUTEX_FLAG_RELEASED;
799 #endif
800 	}
801 }
802 
803 
804 status_t
805 _mutex_trylock(mutex* lock)
806 {
807 #if KDEBUG
808 	InterruptsSpinLocker _(lock->lock);
809 
810 	if (lock->holder <= 0) {
811 		lock->holder = thread_get_current_thread_id();
812 		return B_OK;
813 	}
814 #endif
815 	return B_WOULD_BLOCK;
816 }
817 
818 
819 status_t
820 _mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, bigtime_t timeout)
821 {
822 #if KDEBUG
823 	if (!gKernelStartup && !are_interrupts_enabled()) {
824 		panic("_mutex_lock(): called with interrupts disabled for lock %p",
825 			lock);
826 	}
827 #endif
828 
829 	InterruptsSpinLocker locker(lock->lock);
830 
831 	// Might have been released after we decremented the count, but before
832 	// we acquired the spinlock.
833 #if KDEBUG
834 	if (lock->holder < 0) {
835 		lock->holder = thread_get_current_thread_id();
836 		return B_OK;
837 	} else if (lock->holder == thread_get_current_thread_id()) {
838 		panic("_mutex_lock(): double lock of %p by thread %" B_PRId32, lock,
839 			lock->holder);
840 	} else if (lock->holder == 0)
841 		panic("_mutex_lock(): using unitialized lock %p", lock);
842 #else
843 	if ((lock->flags & MUTEX_FLAG_RELEASED) != 0) {
844 		lock->flags &= ~MUTEX_FLAG_RELEASED;
845 		return B_OK;
846 	}
847 #endif
848 
849 	// enqueue in waiter list
850 	mutex_waiter waiter;
851 	waiter.thread = thread_get_current_thread();
852 	waiter.next = NULL;
853 
854 	if (lock->waiters != NULL) {
855 		lock->waiters->last->next = &waiter;
856 	} else
857 		lock->waiters = &waiter;
858 
859 	lock->waiters->last = &waiter;
860 
861 	// block
862 	thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_MUTEX, lock);
863 	locker.Unlock();
864 
865 	status_t error = thread_block_with_timeout(timeoutFlags, timeout);
866 
867 	if (error == B_OK) {
868 #if KDEBUG
869 		lock->holder = waiter.thread->id;
870 #endif
871 	} else {
872 		locker.Lock();
873 
874 		// If the timeout occurred, we must remove our waiter structure from
875 		// the queue.
876 		mutex_waiter* previousWaiter = NULL;
877 		mutex_waiter* otherWaiter = lock->waiters;
878 		while (otherWaiter != NULL && otherWaiter != &waiter) {
879 			previousWaiter = otherWaiter;
880 			otherWaiter = otherWaiter->next;
881 		}
882 		if (otherWaiter == &waiter) {
883 			// the structure is still in the list -- dequeue
884 			if (&waiter == lock->waiters) {
885 				if (waiter.next != NULL)
886 					waiter.next->last = waiter.last;
887 				lock->waiters = waiter.next;
888 			} else {
889 				if (waiter.next == NULL)
890 					lock->waiters->last = previousWaiter;
891 				previousWaiter->next = waiter.next;
892 			}
893 
894 #if !KDEBUG
895 			// we need to fix the lock count
896 			if (atomic_add(&lock->count, 1) == -1) {
897 				// This means we were the only thread waiting for the lock and
898 				// the lock owner has already called atomic_add() in
899 				// mutex_unlock(). That is we probably would get the lock very
900 				// soon (if the lock holder has a low priority, that might
901 				// actually take rather long, though), but the timeout already
902 				// occurred, so we don't try to wait. Just increment the ignore
903 				// unlock count.
904 				lock->ignore_unlock_count++;
905 			}
906 #endif
907 		}
908 	}
909 
910 	return error;
911 }
912 
913 
914 static int
915 dump_mutex_info(int argc, char** argv)
916 {
917 	if (argc < 2) {
918 		print_debugger_command_usage(argv[0]);
919 		return 0;
920 	}
921 
922 	mutex* lock = (mutex*)parse_expression(argv[1]);
923 
924 	if (!IS_KERNEL_ADDRESS(lock)) {
925 		kprintf("invalid address: %p\n", lock);
926 		return 0;
927 	}
928 
929 	kprintf("mutex %p:\n", lock);
930 	kprintf("  name:            %s\n", lock->name);
931 	kprintf("  flags:           0x%x\n", lock->flags);
932 #if KDEBUG
933 	kprintf("  holder:          %" B_PRId32 "\n", lock->holder);
934 #else
935 	kprintf("  count:           %" B_PRId32 "\n", lock->count);
936 #endif
937 
938 	kprintf("  waiting threads:");
939 	mutex_waiter* waiter = lock->waiters;
940 	while (waiter != NULL) {
941 		kprintf(" %" B_PRId32, waiter->thread->id);
942 		waiter = waiter->next;
943 	}
944 	kputs("\n");
945 
946 	return 0;
947 }
948 
949 
950 // #pragma mark -
951 
952 
953 void
954 lock_debug_init()
955 {
956 	add_debugger_command_etc("mutex", &dump_mutex_info,
957 		"Dump info about a mutex",
958 		"<mutex>\n"
959 		"Prints info about the specified mutex.\n"
960 		"  <mutex>  - pointer to the mutex to print the info for.\n", 0);
961 	add_debugger_command_etc("rwlock", &dump_rw_lock_info,
962 		"Dump info about an rw lock",
963 		"<lock>\n"
964 		"Prints info about the specified rw lock.\n"
965 		"  <lock>  - pointer to the rw lock to print the info for.\n", 0);
966 }
967