xref: /haiku/src/system/kernel/locks/lock.cpp (revision db6fcb750a1afb5fdc752322972adf6044d3b4c4)
1 /*
2  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 /*! Mutex and recursive_lock code */
12 
13 
14 #include <lock.h>
15 
16 #include <stdlib.h>
17 #include <string.h>
18 
19 #include <OS.h>
20 
21 #include <debug.h>
22 #include <int.h>
23 #include <kernel.h>
24 #include <listeners.h>
25 #include <scheduling_analysis.h>
26 #include <thread.h>
27 #include <util/AutoLock.h>
28 
29 
30 struct mutex_waiter {
31 	Thread*			thread;
32 	mutex_waiter*	next;		// next in queue
33 	mutex_waiter*	last;		// last in queue (valid for the first in queue)
34 };
35 
36 struct rw_lock_waiter {
37 	Thread*			thread;
38 	rw_lock_waiter*	next;		// next in queue
39 	rw_lock_waiter*	last;		// last in queue (valid for the first in queue)
40 	bool			writer;
41 };
42 
43 #define MUTEX_FLAG_RELEASED		0x2
44 
45 
46 int32
47 recursive_lock_get_recursion(recursive_lock *lock)
48 {
49 	if (RECURSIVE_LOCK_HOLDER(lock) == thread_get_current_thread_id())
50 		return lock->recursion;
51 
52 	return -1;
53 }
54 
55 
56 void
57 recursive_lock_init(recursive_lock *lock, const char *name)
58 {
59 	mutex_init(&lock->lock, name != NULL ? name : "recursive lock");
60 	RECURSIVE_LOCK_HOLDER(lock) = -1;
61 	lock->recursion = 0;
62 }
63 
64 
65 void
66 recursive_lock_init_etc(recursive_lock *lock, const char *name, uint32 flags)
67 {
68 	mutex_init_etc(&lock->lock, name != NULL ? name : "recursive lock", flags);
69 	RECURSIVE_LOCK_HOLDER(lock) = -1;
70 	lock->recursion = 0;
71 }
72 
73 
74 void
75 recursive_lock_destroy(recursive_lock *lock)
76 {
77 	if (lock == NULL)
78 		return;
79 
80 	mutex_destroy(&lock->lock);
81 }
82 
83 
84 status_t
85 recursive_lock_lock(recursive_lock *lock)
86 {
87 #if KDEBUG
88 	if (!gKernelStartup && !are_interrupts_enabled()) {
89 		panic("recursive_lock_lock: called with interrupts disabled for lock "
90 			"%p (\"%s\")\n", lock, lock->lock.name);
91 	}
92 #endif
93 
94 	thread_id thread = thread_get_current_thread_id();
95 
96 	if (thread != RECURSIVE_LOCK_HOLDER(lock)) {
97 		mutex_lock(&lock->lock);
98 #if !KDEBUG
99 		lock->holder = thread;
100 #endif
101 	}
102 
103 	lock->recursion++;
104 	return B_OK;
105 }
106 
107 
108 status_t
109 recursive_lock_trylock(recursive_lock *lock)
110 {
111 	thread_id thread = thread_get_current_thread_id();
112 
113 #if KDEBUG
114 	if (!gKernelStartup && !are_interrupts_enabled()) {
115 		panic("recursive_lock_lock: called with interrupts disabled for lock "
116 			"%p (\"%s\")\n", lock, lock->lock.name);
117 	}
118 #endif
119 
120 	if (thread != RECURSIVE_LOCK_HOLDER(lock)) {
121 		status_t status = mutex_trylock(&lock->lock);
122 		if (status != B_OK)
123 			return status;
124 
125 #if !KDEBUG
126 		lock->holder = thread;
127 #endif
128 	}
129 
130 	lock->recursion++;
131 	return B_OK;
132 }
133 
134 
135 void
136 recursive_lock_unlock(recursive_lock *lock)
137 {
138 	if (thread_get_current_thread_id() != RECURSIVE_LOCK_HOLDER(lock))
139 		panic("recursive_lock %p unlocked by non-holder thread!\n", lock);
140 
141 	if (--lock->recursion == 0) {
142 #if !KDEBUG
143 		lock->holder = -1;
144 #endif
145 		mutex_unlock(&lock->lock);
146 	}
147 }
148 
149 
150 //	#pragma mark -
151 
152 
153 static status_t
154 rw_lock_wait(rw_lock* lock, bool writer, InterruptsSpinLocker& locker)
155 {
156 	// enqueue in waiter list
157 	rw_lock_waiter waiter;
158 	waiter.thread = thread_get_current_thread();
159 	waiter.next = NULL;
160 	waiter.writer = writer;
161 
162 	if (lock->waiters != NULL)
163 		lock->waiters->last->next = &waiter;
164 	else
165 		lock->waiters = &waiter;
166 
167 	lock->waiters->last = &waiter;
168 
169 	// block
170 	thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_RW_LOCK, lock);
171 	locker.Unlock();
172 
173 	status_t result = thread_block();
174 
175 	locker.Lock();
176 	return result;
177 }
178 
179 
180 static int32
181 rw_lock_unblock(rw_lock* lock)
182 {
183 	// Check whether there are any waiting threads at all and whether anyone
184 	// has the write lock.
185 	rw_lock_waiter* waiter = lock->waiters;
186 	if (waiter == NULL || lock->holder >= 0)
187 		return 0;
188 
189 	// writer at head of queue?
190 	if (waiter->writer) {
191 		if (lock->active_readers > 0 || lock->pending_readers > 0)
192 			return 0;
193 
194 		// dequeue writer
195 		lock->waiters = waiter->next;
196 		if (lock->waiters != NULL)
197 			lock->waiters->last = waiter->last;
198 
199 		lock->holder = waiter->thread->id;
200 
201 		// unblock thread
202 		thread_unblock(waiter->thread, B_OK);
203 
204 		waiter->thread = NULL;
205 		return RW_LOCK_WRITER_COUNT_BASE;
206 	}
207 
208 	// wake up one or more readers
209 	uint32 readerCount = 0;
210 	do {
211 		// dequeue reader
212 		lock->waiters = waiter->next;
213 		if (lock->waiters != NULL)
214 			lock->waiters->last = waiter->last;
215 
216 		readerCount++;
217 
218 		// unblock thread
219 		thread_unblock(waiter->thread, B_OK);
220 
221 		waiter->thread = NULL;
222 	} while ((waiter = lock->waiters) != NULL && !waiter->writer);
223 
224 	if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
225 		lock->active_readers += readerCount;
226 
227 	return readerCount;
228 }
229 
230 
231 void
232 rw_lock_init(rw_lock* lock, const char* name)
233 {
234 	lock->name = name;
235 	lock->waiters = NULL;
236 	B_INITIALIZE_SPINLOCK(&lock->lock);
237 	lock->holder = -1;
238 	lock->count = 0;
239 	lock->owner_count = 0;
240 	lock->active_readers = 0;
241 	lock->pending_readers = 0;
242 	lock->flags = 0;
243 
244 	T_SCHEDULING_ANALYSIS(InitRWLock(lock, name));
245 	NotifyWaitObjectListeners(&WaitObjectListener::RWLockInitialized, lock);
246 }
247 
248 
249 void
250 rw_lock_init_etc(rw_lock* lock, const char* name, uint32 flags)
251 {
252 	lock->name = (flags & RW_LOCK_FLAG_CLONE_NAME) != 0 ? strdup(name) : name;
253 	lock->waiters = NULL;
254 	B_INITIALIZE_SPINLOCK(&lock->lock);
255 	lock->holder = -1;
256 	lock->count = 0;
257 	lock->owner_count = 0;
258 	lock->active_readers = 0;
259 	lock->pending_readers = 0;
260 	lock->flags = flags & RW_LOCK_FLAG_CLONE_NAME;
261 
262 	T_SCHEDULING_ANALYSIS(InitRWLock(lock, name));
263 	NotifyWaitObjectListeners(&WaitObjectListener::RWLockInitialized, lock);
264 }
265 
266 
267 void
268 rw_lock_destroy(rw_lock* lock)
269 {
270 	char* name = (lock->flags & RW_LOCK_FLAG_CLONE_NAME) != 0
271 		? (char*)lock->name : NULL;
272 
273 	// unblock all waiters
274 	InterruptsSpinLocker locker(lock->lock);
275 
276 #if KDEBUG
277 	if (lock->waiters != NULL && thread_get_current_thread_id()
278 			!= lock->holder) {
279 		panic("rw_lock_destroy(): there are blocking threads, but the caller "
280 			"doesn't hold the write lock (%p)", lock);
281 
282 		locker.Unlock();
283 		if (rw_lock_write_lock(lock) != B_OK)
284 			return;
285 		locker.Lock();
286 	}
287 #endif
288 
289 	while (rw_lock_waiter* waiter = lock->waiters) {
290 		// dequeue
291 		lock->waiters = waiter->next;
292 
293 		// unblock thread
294 		thread_unblock(waiter->thread, B_ERROR);
295 	}
296 
297 	lock->name = NULL;
298 
299 	locker.Unlock();
300 
301 	free(name);
302 }
303 
304 
305 #if !KDEBUG_RW_LOCK_DEBUG
306 
307 status_t
308 _rw_lock_read_lock(rw_lock* lock)
309 {
310 #if KDEBUG
311 	if (!gKernelStartup && !are_interrupts_enabled()) {
312 		panic("_rw_lock_read_lock(): called with interrupts disabled for lock %p",
313 			lock);
314 	}
315 #endif
316 
317 	InterruptsSpinLocker locker(lock->lock);
318 
319 	// We might be the writer ourselves.
320 	if (lock->holder == thread_get_current_thread_id()) {
321 		lock->owner_count++;
322 		return B_OK;
323 	}
324 
325 	// The writer that originally had the lock when we called atomic_add() might
326 	// already have gone and another writer could have overtaken us. In this
327 	// case the original writer set pending_readers, so we know that we don't
328 	// have to wait.
329 	if (lock->pending_readers > 0) {
330 		lock->pending_readers--;
331 
332 		if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
333 			lock->active_readers++;
334 
335 		return B_OK;
336 	}
337 
338 	ASSERT(lock->count >= RW_LOCK_WRITER_COUNT_BASE);
339 
340 	// we need to wait
341 	return rw_lock_wait(lock, false, locker);
342 }
343 
344 
345 status_t
346 _rw_lock_read_lock_with_timeout(rw_lock* lock, uint32 timeoutFlags,
347 	bigtime_t timeout)
348 {
349 #if KDEBUG
350 	if (!gKernelStartup && !are_interrupts_enabled()) {
351 		panic("_rw_lock_read_lock_with_timeout(): called with interrupts "
352 			"disabled for lock %p", lock);
353 	}
354 #endif
355 
356 	InterruptsSpinLocker locker(lock->lock);
357 
358 	// We might be the writer ourselves.
359 	if (lock->holder == thread_get_current_thread_id()) {
360 		lock->owner_count++;
361 		return B_OK;
362 	}
363 
364 	// The writer that originally had the lock when we called atomic_add() might
365 	// already have gone and another writer could have overtaken us. In this
366 	// case the original writer set pending_readers, so we know that we don't
367 	// have to wait.
368 	if (lock->pending_readers > 0) {
369 		lock->pending_readers--;
370 
371 		if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
372 			lock->active_readers++;
373 
374 		return B_OK;
375 	}
376 
377 	ASSERT(lock->count >= RW_LOCK_WRITER_COUNT_BASE);
378 
379 	// we need to wait
380 
381 	// enqueue in waiter list
382 	rw_lock_waiter waiter;
383 	waiter.thread = thread_get_current_thread();
384 	waiter.next = NULL;
385 	waiter.writer = false;
386 
387 	if (lock->waiters != NULL)
388 		lock->waiters->last->next = &waiter;
389 	else
390 		lock->waiters = &waiter;
391 
392 	lock->waiters->last = &waiter;
393 
394 	// block
395 	thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_RW_LOCK, lock);
396 	locker.Unlock();
397 
398 	status_t error = thread_block_with_timeout(timeoutFlags, timeout);
399 	if (error == B_OK || waiter.thread == NULL) {
400 		// We were unblocked successfully -- potentially our unblocker overtook
401 		// us after we already failed. In either case, we've got the lock, now.
402 		return B_OK;
403 	}
404 
405 	locker.Lock();
406 	// We failed to get the lock -- dequeue from waiter list.
407 	rw_lock_waiter* previous = NULL;
408 	rw_lock_waiter* other = lock->waiters;
409 	while (other != &waiter) {
410 		previous = other;
411 		other = other->next;
412 	}
413 
414 	if (previous == NULL) {
415 		// we are the first in line
416 		lock->waiters = waiter.next;
417 		if (lock->waiters != NULL)
418 			lock->waiters->last = waiter.last;
419 	} else {
420 		// one or more other waiters are before us in the queue
421 		previous->next = waiter.next;
422 		if (lock->waiters->last == &waiter)
423 			lock->waiters->last = previous;
424 	}
425 
426 	// Decrement the count. ATM this is all we have to do. There's at least
427 	// one writer ahead of us -- otherwise the last writer would have unblocked
428 	// us (writers only manipulate the lock data with thread spinlock being
429 	// held) -- so our leaving doesn't make a difference to the ones behind us
430 	// in the queue.
431 	atomic_add(&lock->count, -1);
432 
433 	return error;
434 }
435 
436 
437 void
438 _rw_lock_read_unlock(rw_lock* lock)
439 {
440 	InterruptsSpinLocker locker(lock->lock);
441 
442 	// If we're still holding the write lock or if there are other readers,
443 	// no-one can be woken up.
444 	if (lock->holder == thread_get_current_thread_id()) {
445 		ASSERT(lock->owner_count % RW_LOCK_WRITER_COUNT_BASE > 0);
446 		lock->owner_count--;
447 		return;
448 	}
449 
450 	if (--lock->active_readers > 0)
451 		return;
452 
453 	if (lock->active_readers < 0) {
454 		panic("rw_lock_read_unlock(): lock %p not read-locked", lock);
455 		lock->active_readers = 0;
456 		return;
457 	}
458 
459 	rw_lock_unblock(lock);
460 }
461 
462 #endif	// !KDEBUG_RW_LOCK_DEBUG
463 
464 
465 status_t
466 rw_lock_write_lock(rw_lock* lock)
467 {
468 #if KDEBUG
469 	if (!gKernelStartup && !are_interrupts_enabled()) {
470 		panic("_rw_lock_write_lock(): called with interrupts disabled for lock %p",
471 			lock);
472 	}
473 #endif
474 
475 	InterruptsSpinLocker locker(lock->lock);
476 
477 	// If we're already the lock holder, we just need to increment the owner
478 	// count.
479 	thread_id thread = thread_get_current_thread_id();
480 	if (lock->holder == thread) {
481 		lock->owner_count += RW_LOCK_WRITER_COUNT_BASE;
482 		return B_OK;
483 	}
484 
485 	// announce our claim
486 	int32 oldCount = atomic_add(&lock->count, RW_LOCK_WRITER_COUNT_BASE);
487 
488 	if (oldCount == 0) {
489 		// No-one else held a read or write lock, so it's ours now.
490 		lock->holder = thread;
491 		lock->owner_count = RW_LOCK_WRITER_COUNT_BASE;
492 		return B_OK;
493 	}
494 
495 	// We have to wait. If we're the first writer, note the current reader
496 	// count.
497 	if (oldCount < RW_LOCK_WRITER_COUNT_BASE)
498 		lock->active_readers = oldCount - lock->pending_readers;
499 
500 	status_t status = rw_lock_wait(lock, true, locker);
501 	if (status == B_OK) {
502 		lock->holder = thread;
503 		lock->owner_count = RW_LOCK_WRITER_COUNT_BASE;
504 	}
505 
506 	return status;
507 }
508 
509 
510 void
511 _rw_lock_write_unlock(rw_lock* lock)
512 {
513 	InterruptsSpinLocker locker(lock->lock);
514 
515 	if (thread_get_current_thread_id() != lock->holder) {
516 		panic("rw_lock_write_unlock(): lock %p not write-locked by this thread",
517 			lock);
518 		return;
519 	}
520 
521 	ASSERT(lock->owner_count >= RW_LOCK_WRITER_COUNT_BASE);
522 
523 	lock->owner_count -= RW_LOCK_WRITER_COUNT_BASE;
524 	if (lock->owner_count >= RW_LOCK_WRITER_COUNT_BASE)
525 		return;
526 
527 	// We gave up our last write lock -- clean up and unblock waiters.
528 	int32 readerCount = lock->owner_count;
529 	lock->holder = -1;
530 	lock->owner_count = 0;
531 
532 	int32 oldCount = atomic_add(&lock->count, -RW_LOCK_WRITER_COUNT_BASE);
533 	oldCount -= RW_LOCK_WRITER_COUNT_BASE;
534 
535 	if (oldCount != 0) {
536 		// If writers are waiting, take over our reader count.
537 		if (oldCount >= RW_LOCK_WRITER_COUNT_BASE) {
538 			lock->active_readers = readerCount;
539 			rw_lock_unblock(lock);
540 		} else {
541 			// No waiting writer, but there are one or more readers. We will
542 			// unblock all waiting readers -- that's the easy part -- and must
543 			// also make sure that all readers that haven't entered the critical
544 			// section yet, won't start to wait. Otherwise a writer overtaking
545 			// such a reader will correctly start to wait, but the reader,
546 			// seeing the writer count > 0, would also start to wait. We set
547 			// pending_readers to the number of readers that are still expected
548 			// to enter the critical section.
549 			lock->pending_readers = oldCount - readerCount
550 				- rw_lock_unblock(lock);
551 		}
552 	}
553 }
554 
555 
556 static int
557 dump_rw_lock_info(int argc, char** argv)
558 {
559 	if (argc < 2) {
560 		print_debugger_command_usage(argv[0]);
561 		return 0;
562 	}
563 
564 	rw_lock* lock = (rw_lock*)parse_expression(argv[1]);
565 
566 	if (!IS_KERNEL_ADDRESS(lock)) {
567 		kprintf("invalid address: %p\n", lock);
568 		return 0;
569 	}
570 
571 	kprintf("rw lock %p:\n", lock);
572 	kprintf("  name:            %s\n", lock->name);
573 	kprintf("  holder:          %" B_PRId32 "\n", lock->holder);
574 	kprintf("  count:           %#" B_PRIx32 "\n", lock->count);
575 	kprintf("  active readers   %d\n", lock->active_readers);
576 	kprintf("  pending readers  %d\n", lock->pending_readers);
577 	kprintf("  owner count:     %#" B_PRIx32 "\n", lock->owner_count);
578 	kprintf("  flags:           %#" B_PRIx32 "\n", lock->flags);
579 
580 	kprintf("  waiting threads:");
581 	rw_lock_waiter* waiter = lock->waiters;
582 	while (waiter != NULL) {
583 		kprintf(" %" B_PRId32 "/%c", waiter->thread->id, waiter->writer ? 'w' : 'r');
584 		waiter = waiter->next;
585 	}
586 	kputs("\n");
587 
588 	return 0;
589 }
590 
591 
592 // #pragma mark -
593 
594 
595 void
596 mutex_init(mutex* lock, const char *name)
597 {
598 	mutex_init_etc(lock, name, 0);
599 }
600 
601 
602 void
603 mutex_init_etc(mutex* lock, const char *name, uint32 flags)
604 {
605 	lock->name = (flags & MUTEX_FLAG_CLONE_NAME) != 0 ? strdup(name) : name;
606 	lock->waiters = NULL;
607 	B_INITIALIZE_SPINLOCK(&lock->lock);
608 #if KDEBUG
609 	lock->holder = -1;
610 #else
611 	lock->count = 0;
612 	lock->ignore_unlock_count = 0;
613 #endif
614 	lock->flags = flags & MUTEX_FLAG_CLONE_NAME;
615 
616 	T_SCHEDULING_ANALYSIS(InitMutex(lock, name));
617 	NotifyWaitObjectListeners(&WaitObjectListener::MutexInitialized, lock);
618 }
619 
620 
621 void
622 mutex_destroy(mutex* lock)
623 {
624 	char* name = (lock->flags & MUTEX_FLAG_CLONE_NAME) != 0
625 		? (char*)lock->name : NULL;
626 
627 	// unblock all waiters
628 	InterruptsSpinLocker locker(lock->lock);
629 
630 #if KDEBUG
631 	if (lock->holder != -1 && thread_get_current_thread_id() != lock->holder) {
632 		panic("mutex_destroy(): the lock (%p) is held by %" B_PRId32 ", not "
633 			"by the caller", lock, lock->holder);
634 		if (_mutex_lock(lock, &locker) != B_OK)
635 			return;
636 		locker.Lock();
637 	}
638 #endif
639 
640 	while (mutex_waiter* waiter = lock->waiters) {
641 		// dequeue
642 		lock->waiters = waiter->next;
643 
644 		// unblock thread
645 		thread_unblock(waiter->thread, B_ERROR);
646 	}
647 
648 	lock->name = NULL;
649 	lock->flags = 0;
650 #if KDEBUG
651 	lock->holder = 0;
652 #else
653 	lock->count = INT16_MIN;
654 #endif
655 
656 	locker.Unlock();
657 
658 	free(name);
659 }
660 
661 
662 static inline status_t
663 mutex_lock_threads_locked(mutex* lock, InterruptsSpinLocker* locker)
664 {
665 #if KDEBUG
666 	return _mutex_lock(lock, locker);
667 #else
668 	if (atomic_add(&lock->count, -1) < 0)
669 		return _mutex_lock(lock, locker);
670 	return B_OK;
671 #endif
672 }
673 
674 
675 status_t
676 mutex_switch_lock(mutex* from, mutex* to)
677 {
678 #if KDEBUG
679 	if (!gKernelStartup && !are_interrupts_enabled()) {
680 		panic("mutex_switch_lock(): called with interrupts disabled "
681 			"for locks %p, %p", from, to);
682 	}
683 #endif
684 
685 	InterruptsSpinLocker locker(to->lock);
686 
687 	mutex_unlock(from);
688 
689 	return mutex_lock_threads_locked(to, &locker);
690 }
691 
692 
693 void
694 mutex_transfer_lock(mutex* lock, thread_id thread)
695 {
696 #if KDEBUG
697 	if (thread_get_current_thread_id() != lock->holder)
698 		panic("mutex_transfer_lock(): current thread is not the lock holder!");
699 	lock->holder = thread;
700 #endif
701 }
702 
703 
704 status_t
705 mutex_switch_from_read_lock(rw_lock* from, mutex* to)
706 {
707 #if KDEBUG
708 	if (!gKernelStartup && !are_interrupts_enabled()) {
709 		panic("mutex_switch_from_read_lock(): called with interrupts disabled "
710 			"for locks %p, %p", from, to);
711 	}
712 #endif
713 
714 	InterruptsSpinLocker locker(to->lock);
715 
716 #if KDEBUG_RW_LOCK_DEBUG
717 	_rw_lock_write_unlock(from);
718 #else
719 	int32 oldCount = atomic_add(&from->count, -1);
720 	if (oldCount >= RW_LOCK_WRITER_COUNT_BASE)
721 		_rw_lock_read_unlock(from);
722 #endif
723 
724 	return mutex_lock_threads_locked(to, &locker);
725 }
726 
727 
728 status_t
729 _mutex_lock(mutex* lock, void* _locker)
730 {
731 #if KDEBUG
732 	if (!gKernelStartup && _locker == NULL && !are_interrupts_enabled()) {
733 		panic("_mutex_lock(): called with interrupts disabled for lock %p",
734 			lock);
735 	}
736 #endif
737 
738 	// lock only, if !lockLocked
739 	InterruptsSpinLocker* locker
740 		= reinterpret_cast<InterruptsSpinLocker*>(_locker);
741 
742 	InterruptsSpinLocker lockLocker;
743 	if (locker == NULL) {
744 		lockLocker.SetTo(lock->lock, false);
745 		locker = &lockLocker;
746 	}
747 
748 	// Might have been released after we decremented the count, but before
749 	// we acquired the spinlock.
750 #if KDEBUG
751 	if (lock->holder < 0) {
752 		lock->holder = thread_get_current_thread_id();
753 		return B_OK;
754 	} else if (lock->holder == thread_get_current_thread_id()) {
755 		panic("_mutex_lock(): double lock of %p by thread %" B_PRId32, lock,
756 			lock->holder);
757 	} else if (lock->holder == 0)
758 		panic("_mutex_lock(): using uninitialized lock %p", lock);
759 #else
760 	if ((lock->flags & MUTEX_FLAG_RELEASED) != 0) {
761 		lock->flags &= ~MUTEX_FLAG_RELEASED;
762 		return B_OK;
763 	}
764 #endif
765 
766 	// enqueue in waiter list
767 	mutex_waiter waiter;
768 	waiter.thread = thread_get_current_thread();
769 	waiter.next = NULL;
770 
771 	if (lock->waiters != NULL) {
772 		lock->waiters->last->next = &waiter;
773 	} else
774 		lock->waiters = &waiter;
775 
776 	lock->waiters->last = &waiter;
777 
778 	// block
779 	thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_MUTEX, lock);
780 	locker->Unlock();
781 
782 	status_t error = thread_block();
783 #if KDEBUG
784 	if (error == B_OK) {
785 		ASSERT(lock->holder == waiter.thread->id);
786 	}
787 #endif
788 	return error;
789 }
790 
791 
792 void
793 _mutex_unlock(mutex* lock)
794 {
795 	InterruptsSpinLocker locker(lock->lock);
796 
797 #if KDEBUG
798 	if (thread_get_current_thread_id() != lock->holder) {
799 		panic("_mutex_unlock() failure: thread %" B_PRId32 " is trying to "
800 			"release mutex %p (current holder %" B_PRId32 ")\n",
801 			thread_get_current_thread_id(), lock, lock->holder);
802 		return;
803 	}
804 #else
805 	if (lock->ignore_unlock_count > 0) {
806 		lock->ignore_unlock_count--;
807 		return;
808 	}
809 #endif
810 
811 	mutex_waiter* waiter = lock->waiters;
812 	if (waiter != NULL) {
813 		// dequeue the first waiter
814 		lock->waiters = waiter->next;
815 		if (lock->waiters != NULL)
816 			lock->waiters->last = waiter->last;
817 
818 #if KDEBUG
819 		// Already set the holder to the unblocked thread. Besides that this
820 		// actually reflects the current situation, setting it to -1 would
821 		// cause a race condition, since another locker could think the lock
822 		// is not held by anyone.
823 		lock->holder = waiter->thread->id;
824 #endif
825 
826 		// unblock thread
827 		thread_unblock(waiter->thread, B_OK);
828 	} else {
829 		// Nobody is waiting to acquire this lock. Just mark it as released.
830 #if KDEBUG
831 		lock->holder = -1;
832 #else
833 		lock->flags |= MUTEX_FLAG_RELEASED;
834 #endif
835 	}
836 }
837 
838 
839 status_t
840 _mutex_trylock(mutex* lock)
841 {
842 #if KDEBUG
843 	InterruptsSpinLocker _(lock->lock);
844 
845 	if (lock->holder < 0) {
846 		lock->holder = thread_get_current_thread_id();
847 		return B_OK;
848 	} else if (lock->holder == 0)
849 		panic("_mutex_trylock(): using uninitialized lock %p", lock);
850 	return B_WOULD_BLOCK;
851 #else
852 	return mutex_trylock(lock);
853 #endif
854 }
855 
856 
857 status_t
858 _mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, bigtime_t timeout)
859 {
860 #if KDEBUG
861 	if (!gKernelStartup && !are_interrupts_enabled()) {
862 		panic("_mutex_lock(): called with interrupts disabled for lock %p",
863 			lock);
864 	}
865 #endif
866 
867 	InterruptsSpinLocker locker(lock->lock);
868 
869 	// Might have been released after we decremented the count, but before
870 	// we acquired the spinlock.
871 #if KDEBUG
872 	if (lock->holder < 0) {
873 		lock->holder = thread_get_current_thread_id();
874 		return B_OK;
875 	} else if (lock->holder == thread_get_current_thread_id()) {
876 		panic("_mutex_lock(): double lock of %p by thread %" B_PRId32, lock,
877 			lock->holder);
878 	} else if (lock->holder == 0)
879 		panic("_mutex_lock(): using uninitialized lock %p", lock);
880 #else
881 	if ((lock->flags & MUTEX_FLAG_RELEASED) != 0) {
882 		lock->flags &= ~MUTEX_FLAG_RELEASED;
883 		return B_OK;
884 	}
885 #endif
886 
887 	// enqueue in waiter list
888 	mutex_waiter waiter;
889 	waiter.thread = thread_get_current_thread();
890 	waiter.next = NULL;
891 
892 	if (lock->waiters != NULL) {
893 		lock->waiters->last->next = &waiter;
894 	} else
895 		lock->waiters = &waiter;
896 
897 	lock->waiters->last = &waiter;
898 
899 	// block
900 	thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_MUTEX, lock);
901 	locker.Unlock();
902 
903 	status_t error = thread_block_with_timeout(timeoutFlags, timeout);
904 
905 	if (error == B_OK) {
906 #if KDEBUG
907 		ASSERT(lock->holder == waiter.thread->id);
908 #endif
909 	} else {
910 		locker.Lock();
911 
912 		// If the timeout occurred, we must remove our waiter structure from
913 		// the queue.
914 		mutex_waiter* previousWaiter = NULL;
915 		mutex_waiter* otherWaiter = lock->waiters;
916 		while (otherWaiter != NULL && otherWaiter != &waiter) {
917 			previousWaiter = otherWaiter;
918 			otherWaiter = otherWaiter->next;
919 		}
920 		if (otherWaiter == &waiter) {
921 			// the structure is still in the list -- dequeue
922 			if (&waiter == lock->waiters) {
923 				if (waiter.next != NULL)
924 					waiter.next->last = waiter.last;
925 				lock->waiters = waiter.next;
926 			} else {
927 				if (waiter.next == NULL)
928 					lock->waiters->last = previousWaiter;
929 				previousWaiter->next = waiter.next;
930 			}
931 
932 #if !KDEBUG
933 			// we need to fix the lock count
934 			if (atomic_add(&lock->count, 1) == -1) {
935 				// This means we were the only thread waiting for the lock and
936 				// the lock owner has already called atomic_add() in
937 				// mutex_unlock(). That is we probably would get the lock very
938 				// soon (if the lock holder has a low priority, that might
939 				// actually take rather long, though), but the timeout already
940 				// occurred, so we don't try to wait. Just increment the ignore
941 				// unlock count.
942 				lock->ignore_unlock_count++;
943 			}
944 #endif
945 		}
946 	}
947 
948 	return error;
949 }
950 
951 
952 static int
953 dump_mutex_info(int argc, char** argv)
954 {
955 	if (argc < 2) {
956 		print_debugger_command_usage(argv[0]);
957 		return 0;
958 	}
959 
960 	mutex* lock = (mutex*)parse_expression(argv[1]);
961 
962 	if (!IS_KERNEL_ADDRESS(lock)) {
963 		kprintf("invalid address: %p\n", lock);
964 		return 0;
965 	}
966 
967 	kprintf("mutex %p:\n", lock);
968 	kprintf("  name:            %s\n", lock->name);
969 	kprintf("  flags:           0x%x\n", lock->flags);
970 #if KDEBUG
971 	kprintf("  holder:          %" B_PRId32 "\n", lock->holder);
972 #else
973 	kprintf("  count:           %" B_PRId32 "\n", lock->count);
974 #endif
975 
976 	kprintf("  waiting threads:");
977 	mutex_waiter* waiter = lock->waiters;
978 	while (waiter != NULL) {
979 		kprintf(" %" B_PRId32, waiter->thread->id);
980 		waiter = waiter->next;
981 	}
982 	kputs("\n");
983 
984 	return 0;
985 }
986 
987 
988 // #pragma mark -
989 
990 
991 void
992 lock_debug_init()
993 {
994 	add_debugger_command_etc("mutex", &dump_mutex_info,
995 		"Dump info about a mutex",
996 		"<mutex>\n"
997 		"Prints info about the specified mutex.\n"
998 		"  <mutex>  - pointer to the mutex to print the info for.\n", 0);
999 	add_debugger_command_etc("rwlock", &dump_rw_lock_info,
1000 		"Dump info about an rw lock",
1001 		"<lock>\n"
1002 		"Prints info about the specified rw lock.\n"
1003 		"  <lock>  - pointer to the rw lock to print the info for.\n", 0);
1004 }
1005