xref: /haiku/src/system/kernel/locks/lock.cpp (revision 830f67ef991407f287dbc1238aa5f5906d90c991)
1 /*
2  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 /*! Mutex and recursive_lock code */
12 
13 
14 #include <lock.h>
15 
16 #include <stdlib.h>
17 #include <string.h>
18 
19 #include <OS.h>
20 
21 #include <debug.h>
22 #include <int.h>
23 #include <kernel.h>
24 #include <listeners.h>
25 #include <scheduling_analysis.h>
26 #include <thread.h>
27 #include <util/AutoLock.h>
28 
29 
30 struct mutex_waiter {
31 	Thread*			thread;
32 	mutex_waiter*	next;		// next in queue
33 	mutex_waiter*	last;		// last in queue (valid for the first in queue)
34 };
35 
36 struct rw_lock_waiter {
37 	Thread*			thread;
38 	rw_lock_waiter*	next;		// next in queue
39 	rw_lock_waiter*	last;		// last in queue (valid for the first in queue)
40 	bool			writer;
41 };
42 
43 #define MUTEX_FLAG_RELEASED		0x2
44 
45 
46 int32
47 recursive_lock_get_recursion(recursive_lock *lock)
48 {
49 	if (RECURSIVE_LOCK_HOLDER(lock) == thread_get_current_thread_id())
50 		return lock->recursion;
51 
52 	return -1;
53 }
54 
55 
56 void
57 recursive_lock_init(recursive_lock *lock, const char *name)
58 {
59 	recursive_lock_init_etc(lock, name, 0);
60 }
61 
62 
63 void
64 recursive_lock_init_etc(recursive_lock *lock, const char *name, uint32 flags)
65 {
66 	mutex_init_etc(&lock->lock, name != NULL ? name : "recursive lock", flags);
67 #if !KDEBUG
68 	lock->holder = -1;
69 #endif
70 	lock->recursion = 0;
71 }
72 
73 
74 void
75 recursive_lock_destroy(recursive_lock *lock)
76 {
77 	if (lock == NULL)
78 		return;
79 
80 	mutex_destroy(&lock->lock);
81 }
82 
83 
84 status_t
85 recursive_lock_lock(recursive_lock *lock)
86 {
87 #if KDEBUG
88 	if (!gKernelStartup && !are_interrupts_enabled()) {
89 		panic("recursive_lock_lock: called with interrupts disabled for lock "
90 			"%p (\"%s\")\n", lock, lock->lock.name);
91 	}
92 #endif
93 
94 	thread_id thread = thread_get_current_thread_id();
95 
96 	if (thread != RECURSIVE_LOCK_HOLDER(lock)) {
97 		mutex_lock(&lock->lock);
98 #if !KDEBUG
99 		lock->holder = thread;
100 #endif
101 	}
102 
103 	lock->recursion++;
104 	return B_OK;
105 }
106 
107 
108 status_t
109 recursive_lock_trylock(recursive_lock *lock)
110 {
111 	thread_id thread = thread_get_current_thread_id();
112 
113 #if KDEBUG
114 	if (!gKernelStartup && !are_interrupts_enabled()) {
115 		panic("recursive_lock_lock: called with interrupts disabled for lock "
116 			"%p (\"%s\")\n", lock, lock->lock.name);
117 	}
118 #endif
119 
120 	if (thread != RECURSIVE_LOCK_HOLDER(lock)) {
121 		status_t status = mutex_trylock(&lock->lock);
122 		if (status != B_OK)
123 			return status;
124 
125 #if !KDEBUG
126 		lock->holder = thread;
127 #endif
128 	}
129 
130 	lock->recursion++;
131 	return B_OK;
132 }
133 
134 
135 void
136 recursive_lock_unlock(recursive_lock *lock)
137 {
138 	if (thread_get_current_thread_id() != RECURSIVE_LOCK_HOLDER(lock))
139 		panic("recursive_lock %p unlocked by non-holder thread!\n", lock);
140 
141 	if (--lock->recursion == 0) {
142 #if !KDEBUG
143 		lock->holder = -1;
144 #endif
145 		mutex_unlock(&lock->lock);
146 	}
147 }
148 
149 
150 status_t
151 recursive_lock_switch_lock(recursive_lock* from, recursive_lock* to)
152 {
153 #if KDEBUG
154 	if (!gKernelStartup && !are_interrupts_enabled()) {
155 		panic("recursive_lock_switch_lock(): called with interrupts "
156 			"disabled for locks %p, %p", from, to);
157 	}
158 #endif
159 
160 	if (--from->recursion > 0)
161 		return recursive_lock_lock(to);
162 
163 #if !KDEBUG
164 	from->holder = -1;
165 #endif
166 
167 	thread_id thread = thread_get_current_thread_id();
168 
169 	if (thread == RECURSIVE_LOCK_HOLDER(to)) {
170 		to->recursion++;
171 		mutex_unlock(&from->lock);
172 		return B_OK;
173 	}
174 
175 	status_t status = mutex_switch_lock(&from->lock, &to->lock);
176 	if (status != B_OK) {
177 		from->recursion++;
178 #if !KDEBUG
179 		from->holder = thread;
180 #endif
181 		return status;
182 	}
183 
184 #if !KDEBUG
185 	to->holder = thread;
186 #endif
187 	to->recursion++;
188 	return B_OK;
189 }
190 
191 
192 status_t
193 recursive_lock_switch_from_mutex(mutex* from, recursive_lock* to)
194 {
195 #if KDEBUG
196 	if (!gKernelStartup && !are_interrupts_enabled()) {
197 		panic("recursive_lock_switch_from_mutex(): called with interrupts "
198 			"disabled for locks %p, %p", from, to);
199 	}
200 #endif
201 
202 	thread_id thread = thread_get_current_thread_id();
203 
204 	if (thread == RECURSIVE_LOCK_HOLDER(to)) {
205 		to->recursion++;
206 		mutex_unlock(from);
207 		return B_OK;
208 	}
209 
210 	status_t status = mutex_switch_lock(from, &to->lock);
211 	if (status != B_OK)
212 		return status;
213 
214 #if !KDEBUG
215 	to->holder = thread;
216 #endif
217 	to->recursion++;
218 	return B_OK;
219 }
220 
221 
222 status_t
223 recursive_lock_switch_from_read_lock(rw_lock* from, recursive_lock* to)
224 {
225 #if KDEBUG
226 	if (!gKernelStartup && !are_interrupts_enabled()) {
227 		panic("recursive_lock_switch_from_read_lock(): called with interrupts "
228 			"disabled for locks %p, %p", from, to);
229 	}
230 #endif
231 
232 	thread_id thread = thread_get_current_thread_id();
233 
234 	if (thread != RECURSIVE_LOCK_HOLDER(to)) {
235 		status_t status = mutex_switch_from_read_lock(from, &to->lock);
236 		if (status != B_OK)
237 			return status;
238 
239 #if !KDEBUG
240 		to->holder = thread;
241 #endif
242 	} else {
243 #if KDEBUG_RW_LOCK_DEBUG
244 		_rw_lock_write_unlock(from);
245 #else
246 		int32 oldCount = atomic_add(&from->count, -1);
247 		if (oldCount >= RW_LOCK_WRITER_COUNT_BASE)
248 			_rw_lock_read_unlock(from);
249 #endif
250 	}
251 
252 	to->recursion++;
253 	return B_OK;
254 }
255 
256 
257 static int
258 dump_recursive_lock_info(int argc, char** argv)
259 {
260 	if (argc < 2) {
261 		print_debugger_command_usage(argv[0]);
262 		return 0;
263 	}
264 
265 	recursive_lock* lock = (recursive_lock*)parse_expression(argv[1]);
266 
267 	if (!IS_KERNEL_ADDRESS(lock)) {
268 		kprintf("invalid address: %p\n", lock);
269 		return 0;
270 	}
271 
272 	kprintf("recrusive_lock %p:\n", lock);
273 	kprintf("  mutex:           %p\n", &lock->lock);
274 	kprintf("  name:            %s\n", lock->lock.name);
275 	kprintf("  flags:           0x%x\n", lock->lock.flags);
276 #if KDEBUG
277 	kprintf("  holder:          %" B_PRId32 "\n", lock->lock.holder);
278 #else
279 	kprintf("  holder:          %" B_PRId32 "\n", lock->holder);
280 #endif
281 	kprintf("  recursion:       %d\n", lock->recursion);
282 
283 	kprintf("  waiting threads:");
284 	mutex_waiter* waiter = lock->lock.waiters;
285 	while (waiter != NULL) {
286 		kprintf(" %" B_PRId32, waiter->thread->id);
287 		waiter = waiter->next;
288 	}
289 	kputs("\n");
290 
291 	return 0;
292 }
293 
294 
295 //	#pragma mark -
296 
297 
298 static status_t
299 rw_lock_wait(rw_lock* lock, bool writer, InterruptsSpinLocker& locker)
300 {
301 	// enqueue in waiter list
302 	rw_lock_waiter waiter;
303 	waiter.thread = thread_get_current_thread();
304 	waiter.next = NULL;
305 	waiter.writer = writer;
306 
307 	if (lock->waiters != NULL)
308 		lock->waiters->last->next = &waiter;
309 	else
310 		lock->waiters = &waiter;
311 
312 	lock->waiters->last = &waiter;
313 
314 	// block
315 	thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_RW_LOCK, lock);
316 	locker.Unlock();
317 
318 	status_t result = thread_block();
319 
320 	locker.Lock();
321 	return result;
322 }
323 
324 
325 static int32
326 rw_lock_unblock(rw_lock* lock)
327 {
328 	// Check whether there are any waiting threads at all and whether anyone
329 	// has the write lock.
330 	rw_lock_waiter* waiter = lock->waiters;
331 	if (waiter == NULL || lock->holder >= 0)
332 		return 0;
333 
334 	// writer at head of queue?
335 	if (waiter->writer) {
336 		if (lock->active_readers > 0 || lock->pending_readers > 0)
337 			return 0;
338 
339 		// dequeue writer
340 		lock->waiters = waiter->next;
341 		if (lock->waiters != NULL)
342 			lock->waiters->last = waiter->last;
343 
344 		lock->holder = waiter->thread->id;
345 
346 		// unblock thread
347 		thread_unblock(waiter->thread, B_OK);
348 
349 		waiter->thread = NULL;
350 		return RW_LOCK_WRITER_COUNT_BASE;
351 	}
352 
353 	// wake up one or more readers
354 	uint32 readerCount = 0;
355 	do {
356 		// dequeue reader
357 		lock->waiters = waiter->next;
358 		if (lock->waiters != NULL)
359 			lock->waiters->last = waiter->last;
360 
361 		readerCount++;
362 
363 		// unblock thread
364 		thread_unblock(waiter->thread, B_OK);
365 
366 		waiter->thread = NULL;
367 	} while ((waiter = lock->waiters) != NULL && !waiter->writer);
368 
369 	if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
370 		lock->active_readers += readerCount;
371 
372 	return readerCount;
373 }
374 
375 
376 void
377 rw_lock_init(rw_lock* lock, const char* name)
378 {
379 	lock->name = name;
380 	lock->waiters = NULL;
381 	B_INITIALIZE_SPINLOCK(&lock->lock);
382 	lock->holder = -1;
383 	lock->count = 0;
384 	lock->owner_count = 0;
385 	lock->active_readers = 0;
386 	lock->pending_readers = 0;
387 	lock->flags = 0;
388 
389 	T_SCHEDULING_ANALYSIS(InitRWLock(lock, name));
390 	NotifyWaitObjectListeners(&WaitObjectListener::RWLockInitialized, lock);
391 }
392 
393 
394 void
395 rw_lock_init_etc(rw_lock* lock, const char* name, uint32 flags)
396 {
397 	lock->name = (flags & RW_LOCK_FLAG_CLONE_NAME) != 0 ? strdup(name) : name;
398 	lock->waiters = NULL;
399 	B_INITIALIZE_SPINLOCK(&lock->lock);
400 	lock->holder = -1;
401 	lock->count = 0;
402 	lock->owner_count = 0;
403 	lock->active_readers = 0;
404 	lock->pending_readers = 0;
405 	lock->flags = flags & RW_LOCK_FLAG_CLONE_NAME;
406 
407 	T_SCHEDULING_ANALYSIS(InitRWLock(lock, name));
408 	NotifyWaitObjectListeners(&WaitObjectListener::RWLockInitialized, lock);
409 }
410 
411 
412 void
413 rw_lock_destroy(rw_lock* lock)
414 {
415 	char* name = (lock->flags & RW_LOCK_FLAG_CLONE_NAME) != 0
416 		? (char*)lock->name : NULL;
417 
418 	// unblock all waiters
419 	InterruptsSpinLocker locker(lock->lock);
420 
421 #if KDEBUG
422 	if (lock->waiters != NULL && thread_get_current_thread_id()
423 			!= lock->holder) {
424 		panic("rw_lock_destroy(): there are blocking threads, but the caller "
425 			"doesn't hold the write lock (%p)", lock);
426 
427 		locker.Unlock();
428 		if (rw_lock_write_lock(lock) != B_OK)
429 			return;
430 		locker.Lock();
431 	}
432 #endif
433 
434 	while (rw_lock_waiter* waiter = lock->waiters) {
435 		// dequeue
436 		lock->waiters = waiter->next;
437 
438 		// unblock thread
439 		thread_unblock(waiter->thread, B_ERROR);
440 	}
441 
442 	lock->name = NULL;
443 
444 	locker.Unlock();
445 
446 	free(name);
447 }
448 
449 
450 #if !KDEBUG_RW_LOCK_DEBUG
451 
452 status_t
453 _rw_lock_read_lock(rw_lock* lock)
454 {
455 #if KDEBUG
456 	if (!gKernelStartup && !are_interrupts_enabled()) {
457 		panic("_rw_lock_read_lock(): called with interrupts disabled for lock %p",
458 			lock);
459 	}
460 #endif
461 
462 	InterruptsSpinLocker locker(lock->lock);
463 
464 	// We might be the writer ourselves.
465 	if (lock->holder == thread_get_current_thread_id()) {
466 		lock->owner_count++;
467 		return B_OK;
468 	}
469 
470 	// The writer that originally had the lock when we called atomic_add() might
471 	// already have gone and another writer could have overtaken us. In this
472 	// case the original writer set pending_readers, so we know that we don't
473 	// have to wait.
474 	if (lock->pending_readers > 0) {
475 		lock->pending_readers--;
476 
477 		if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
478 			lock->active_readers++;
479 
480 		return B_OK;
481 	}
482 
483 	ASSERT(lock->count >= RW_LOCK_WRITER_COUNT_BASE);
484 
485 	// we need to wait
486 	return rw_lock_wait(lock, false, locker);
487 }
488 
489 
490 status_t
491 _rw_lock_read_lock_with_timeout(rw_lock* lock, uint32 timeoutFlags,
492 	bigtime_t timeout)
493 {
494 #if KDEBUG
495 	if (!gKernelStartup && !are_interrupts_enabled()) {
496 		panic("_rw_lock_read_lock_with_timeout(): called with interrupts "
497 			"disabled for lock %p", lock);
498 	}
499 #endif
500 
501 	InterruptsSpinLocker locker(lock->lock);
502 
503 	// We might be the writer ourselves.
504 	if (lock->holder == thread_get_current_thread_id()) {
505 		lock->owner_count++;
506 		return B_OK;
507 	}
508 
509 	// The writer that originally had the lock when we called atomic_add() might
510 	// already have gone and another writer could have overtaken us. In this
511 	// case the original writer set pending_readers, so we know that we don't
512 	// have to wait.
513 	if (lock->pending_readers > 0) {
514 		lock->pending_readers--;
515 
516 		if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
517 			lock->active_readers++;
518 
519 		return B_OK;
520 	}
521 
522 	ASSERT(lock->count >= RW_LOCK_WRITER_COUNT_BASE);
523 
524 	// we need to wait
525 
526 	// enqueue in waiter list
527 	rw_lock_waiter waiter;
528 	waiter.thread = thread_get_current_thread();
529 	waiter.next = NULL;
530 	waiter.writer = false;
531 
532 	if (lock->waiters != NULL)
533 		lock->waiters->last->next = &waiter;
534 	else
535 		lock->waiters = &waiter;
536 
537 	lock->waiters->last = &waiter;
538 
539 	// block
540 	thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_RW_LOCK, lock);
541 	locker.Unlock();
542 
543 	status_t error = thread_block_with_timeout(timeoutFlags, timeout);
544 	if (error == B_OK || waiter.thread == NULL) {
545 		// We were unblocked successfully -- potentially our unblocker overtook
546 		// us after we already failed. In either case, we've got the lock, now.
547 		return B_OK;
548 	}
549 
550 	locker.Lock();
551 	// We failed to get the lock -- dequeue from waiter list.
552 	rw_lock_waiter* previous = NULL;
553 	rw_lock_waiter* other = lock->waiters;
554 	while (other != &waiter) {
555 		previous = other;
556 		other = other->next;
557 	}
558 
559 	if (previous == NULL) {
560 		// we are the first in line
561 		lock->waiters = waiter.next;
562 		if (lock->waiters != NULL)
563 			lock->waiters->last = waiter.last;
564 	} else {
565 		// one or more other waiters are before us in the queue
566 		previous->next = waiter.next;
567 		if (lock->waiters->last == &waiter)
568 			lock->waiters->last = previous;
569 	}
570 
571 	// Decrement the count. ATM this is all we have to do. There's at least
572 	// one writer ahead of us -- otherwise the last writer would have unblocked
573 	// us (writers only manipulate the lock data with thread spinlock being
574 	// held) -- so our leaving doesn't make a difference to the ones behind us
575 	// in the queue.
576 	atomic_add(&lock->count, -1);
577 
578 	return error;
579 }
580 
581 
582 void
583 _rw_lock_read_unlock(rw_lock* lock)
584 {
585 	InterruptsSpinLocker locker(lock->lock);
586 
587 	// If we're still holding the write lock or if there are other readers,
588 	// no-one can be woken up.
589 	if (lock->holder == thread_get_current_thread_id()) {
590 		ASSERT(lock->owner_count % RW_LOCK_WRITER_COUNT_BASE > 0);
591 		lock->owner_count--;
592 		return;
593 	}
594 
595 	if (--lock->active_readers > 0)
596 		return;
597 
598 	if (lock->active_readers < 0) {
599 		panic("rw_lock_read_unlock(): lock %p not read-locked", lock);
600 		lock->active_readers = 0;
601 		return;
602 	}
603 
604 	rw_lock_unblock(lock);
605 }
606 
607 #endif	// !KDEBUG_RW_LOCK_DEBUG
608 
609 
610 status_t
611 rw_lock_write_lock(rw_lock* lock)
612 {
613 #if KDEBUG
614 	if (!gKernelStartup && !are_interrupts_enabled()) {
615 		panic("_rw_lock_write_lock(): called with interrupts disabled for lock %p",
616 			lock);
617 	}
618 #endif
619 
620 	InterruptsSpinLocker locker(lock->lock);
621 
622 	// If we're already the lock holder, we just need to increment the owner
623 	// count.
624 	thread_id thread = thread_get_current_thread_id();
625 	if (lock->holder == thread) {
626 		lock->owner_count += RW_LOCK_WRITER_COUNT_BASE;
627 		return B_OK;
628 	}
629 
630 	// announce our claim
631 	int32 oldCount = atomic_add(&lock->count, RW_LOCK_WRITER_COUNT_BASE);
632 
633 	if (oldCount == 0) {
634 		// No-one else held a read or write lock, so it's ours now.
635 		lock->holder = thread;
636 		lock->owner_count = RW_LOCK_WRITER_COUNT_BASE;
637 		return B_OK;
638 	}
639 
640 	// We have to wait. If we're the first writer, note the current reader
641 	// count.
642 	if (oldCount < RW_LOCK_WRITER_COUNT_BASE)
643 		lock->active_readers = oldCount - lock->pending_readers;
644 
645 	status_t status = rw_lock_wait(lock, true, locker);
646 	if (status == B_OK) {
647 		lock->holder = thread;
648 		lock->owner_count = RW_LOCK_WRITER_COUNT_BASE;
649 	}
650 
651 	return status;
652 }
653 
654 
655 void
656 _rw_lock_write_unlock(rw_lock* lock)
657 {
658 	InterruptsSpinLocker locker(lock->lock);
659 
660 	if (thread_get_current_thread_id() != lock->holder) {
661 		panic("rw_lock_write_unlock(): lock %p not write-locked by this thread",
662 			lock);
663 		return;
664 	}
665 
666 	ASSERT(lock->owner_count >= RW_LOCK_WRITER_COUNT_BASE);
667 
668 	lock->owner_count -= RW_LOCK_WRITER_COUNT_BASE;
669 	if (lock->owner_count >= RW_LOCK_WRITER_COUNT_BASE)
670 		return;
671 
672 	// We gave up our last write lock -- clean up and unblock waiters.
673 	int32 readerCount = lock->owner_count;
674 	lock->holder = -1;
675 	lock->owner_count = 0;
676 
677 	int32 oldCount = atomic_add(&lock->count, -RW_LOCK_WRITER_COUNT_BASE);
678 	oldCount -= RW_LOCK_WRITER_COUNT_BASE;
679 
680 	if (oldCount != 0) {
681 		// If writers are waiting, take over our reader count.
682 		if (oldCount >= RW_LOCK_WRITER_COUNT_BASE) {
683 			lock->active_readers = readerCount;
684 			rw_lock_unblock(lock);
685 		} else {
686 			// No waiting writer, but there are one or more readers. We will
687 			// unblock all waiting readers -- that's the easy part -- and must
688 			// also make sure that all readers that haven't entered the critical
689 			// section yet, won't start to wait. Otherwise a writer overtaking
690 			// such a reader will correctly start to wait, but the reader,
691 			// seeing the writer count > 0, would also start to wait. We set
692 			// pending_readers to the number of readers that are still expected
693 			// to enter the critical section.
694 			lock->pending_readers = oldCount - readerCount
695 				- rw_lock_unblock(lock);
696 		}
697 	}
698 }
699 
700 
701 static int
702 dump_rw_lock_info(int argc, char** argv)
703 {
704 	if (argc < 2) {
705 		print_debugger_command_usage(argv[0]);
706 		return 0;
707 	}
708 
709 	rw_lock* lock = (rw_lock*)parse_expression(argv[1]);
710 
711 	if (!IS_KERNEL_ADDRESS(lock)) {
712 		kprintf("invalid address: %p\n", lock);
713 		return 0;
714 	}
715 
716 	kprintf("rw lock %p:\n", lock);
717 	kprintf("  name:            %s\n", lock->name);
718 	kprintf("  holder:          %" B_PRId32 "\n", lock->holder);
719 	kprintf("  count:           %#" B_PRIx32 "\n", lock->count);
720 	kprintf("  active readers   %d\n", lock->active_readers);
721 	kprintf("  pending readers  %d\n", lock->pending_readers);
722 	kprintf("  owner count:     %#" B_PRIx32 "\n", lock->owner_count);
723 	kprintf("  flags:           %#" B_PRIx32 "\n", lock->flags);
724 
725 	kprintf("  waiting threads:");
726 	rw_lock_waiter* waiter = lock->waiters;
727 	while (waiter != NULL) {
728 		kprintf(" %" B_PRId32 "/%c", waiter->thread->id, waiter->writer ? 'w' : 'r');
729 		waiter = waiter->next;
730 	}
731 	kputs("\n");
732 
733 	return 0;
734 }
735 
736 
737 // #pragma mark -
738 
739 
740 void
741 mutex_init(mutex* lock, const char *name)
742 {
743 	mutex_init_etc(lock, name, 0);
744 }
745 
746 
747 void
748 mutex_init_etc(mutex* lock, const char *name, uint32 flags)
749 {
750 	lock->name = (flags & MUTEX_FLAG_CLONE_NAME) != 0 ? strdup(name) : name;
751 	lock->waiters = NULL;
752 	B_INITIALIZE_SPINLOCK(&lock->lock);
753 #if KDEBUG
754 	lock->holder = -1;
755 #else
756 	lock->count = 0;
757 #endif
758 	lock->flags = flags & MUTEX_FLAG_CLONE_NAME;
759 
760 	T_SCHEDULING_ANALYSIS(InitMutex(lock, name));
761 	NotifyWaitObjectListeners(&WaitObjectListener::MutexInitialized, lock);
762 }
763 
764 
765 void
766 mutex_destroy(mutex* lock)
767 {
768 	char* name = (lock->flags & MUTEX_FLAG_CLONE_NAME) != 0
769 		? (char*)lock->name : NULL;
770 
771 	// unblock all waiters
772 	InterruptsSpinLocker locker(lock->lock);
773 
774 #if KDEBUG
775 	if (lock->holder != -1 && thread_get_current_thread_id() != lock->holder) {
776 		panic("mutex_destroy(): the lock (%p) is held by %" B_PRId32 ", not "
777 			"by the caller", lock, lock->holder);
778 		if (_mutex_lock(lock, &locker) != B_OK)
779 			return;
780 		locker.Lock();
781 	}
782 #endif
783 
784 	while (mutex_waiter* waiter = lock->waiters) {
785 		// dequeue
786 		lock->waiters = waiter->next;
787 
788 		// unblock thread
789 		Thread* thread = waiter->thread;
790 		waiter->thread = NULL;
791 		thread_unblock(thread, B_ERROR);
792 	}
793 
794 	lock->name = NULL;
795 	lock->flags = 0;
796 #if KDEBUG
797 	lock->holder = 0;
798 #else
799 	lock->count = INT16_MIN;
800 #endif
801 
802 	locker.Unlock();
803 
804 	free(name);
805 }
806 
807 
808 static inline status_t
809 mutex_lock_threads_locked(mutex* lock, InterruptsSpinLocker* locker)
810 {
811 #if KDEBUG
812 	return _mutex_lock(lock, locker);
813 #else
814 	if (atomic_add(&lock->count, -1) < 0)
815 		return _mutex_lock(lock, locker);
816 	return B_OK;
817 #endif
818 }
819 
820 
821 status_t
822 mutex_switch_lock(mutex* from, mutex* to)
823 {
824 #if KDEBUG
825 	if (!gKernelStartup && !are_interrupts_enabled()) {
826 		panic("mutex_switch_lock(): called with interrupts disabled "
827 			"for locks %p, %p", from, to);
828 	}
829 #endif
830 
831 	InterruptsSpinLocker locker(to->lock);
832 
833 	mutex_unlock(from);
834 
835 	return mutex_lock_threads_locked(to, &locker);
836 }
837 
838 
839 void
840 mutex_transfer_lock(mutex* lock, thread_id thread)
841 {
842 #if KDEBUG
843 	if (thread_get_current_thread_id() != lock->holder)
844 		panic("mutex_transfer_lock(): current thread is not the lock holder!");
845 	lock->holder = thread;
846 #endif
847 }
848 
849 
850 status_t
851 mutex_switch_from_read_lock(rw_lock* from, mutex* to)
852 {
853 #if KDEBUG
854 	if (!gKernelStartup && !are_interrupts_enabled()) {
855 		panic("mutex_switch_from_read_lock(): called with interrupts disabled "
856 			"for locks %p, %p", from, to);
857 	}
858 #endif
859 
860 	InterruptsSpinLocker locker(to->lock);
861 
862 #if KDEBUG_RW_LOCK_DEBUG
863 	_rw_lock_write_unlock(from);
864 #else
865 	int32 oldCount = atomic_add(&from->count, -1);
866 	if (oldCount >= RW_LOCK_WRITER_COUNT_BASE)
867 		_rw_lock_read_unlock(from);
868 #endif
869 
870 	return mutex_lock_threads_locked(to, &locker);
871 }
872 
873 
874 status_t
875 _mutex_lock(mutex* lock, void* _locker)
876 {
877 #if KDEBUG
878 	if (!gKernelStartup && _locker == NULL && !are_interrupts_enabled()) {
879 		panic("_mutex_lock(): called with interrupts disabled for lock %p",
880 			lock);
881 	}
882 #endif
883 
884 	// lock only, if !lockLocked
885 	InterruptsSpinLocker* locker
886 		= reinterpret_cast<InterruptsSpinLocker*>(_locker);
887 
888 	InterruptsSpinLocker lockLocker;
889 	if (locker == NULL) {
890 		lockLocker.SetTo(lock->lock, false);
891 		locker = &lockLocker;
892 	}
893 
894 	// Might have been released after we decremented the count, but before
895 	// we acquired the spinlock.
896 #if KDEBUG
897 	if (lock->holder < 0) {
898 		lock->holder = thread_get_current_thread_id();
899 		return B_OK;
900 	} else if (lock->holder == thread_get_current_thread_id()) {
901 		panic("_mutex_lock(): double lock of %p by thread %" B_PRId32, lock,
902 			lock->holder);
903 	} else if (lock->holder == 0)
904 		panic("_mutex_lock(): using uninitialized lock %p", lock);
905 #else
906 	if ((lock->flags & MUTEX_FLAG_RELEASED) != 0) {
907 		lock->flags &= ~MUTEX_FLAG_RELEASED;
908 		return B_OK;
909 	}
910 #endif
911 
912 	// enqueue in waiter list
913 	mutex_waiter waiter;
914 	waiter.thread = thread_get_current_thread();
915 	waiter.next = NULL;
916 
917 	if (lock->waiters != NULL) {
918 		lock->waiters->last->next = &waiter;
919 	} else
920 		lock->waiters = &waiter;
921 
922 	lock->waiters->last = &waiter;
923 
924 	// block
925 	thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_MUTEX, lock);
926 	locker->Unlock();
927 
928 	status_t error = thread_block();
929 #if KDEBUG
930 	if (error == B_OK) {
931 		ASSERT(lock->holder == waiter.thread->id);
932 	}
933 #endif
934 	return error;
935 }
936 
937 
938 void
939 _mutex_unlock(mutex* lock)
940 {
941 	InterruptsSpinLocker locker(lock->lock);
942 
943 #if KDEBUG
944 	if (thread_get_current_thread_id() != lock->holder) {
945 		panic("_mutex_unlock() failure: thread %" B_PRId32 " is trying to "
946 			"release mutex %p (current holder %" B_PRId32 ")\n",
947 			thread_get_current_thread_id(), lock, lock->holder);
948 		return;
949 	}
950 #endif
951 
952 	mutex_waiter* waiter = lock->waiters;
953 	if (waiter != NULL) {
954 		// dequeue the first waiter
955 		lock->waiters = waiter->next;
956 		if (lock->waiters != NULL)
957 			lock->waiters->last = waiter->last;
958 
959 #if KDEBUG
960 		// Already set the holder to the unblocked thread. Besides that this
961 		// actually reflects the current situation, setting it to -1 would
962 		// cause a race condition, since another locker could think the lock
963 		// is not held by anyone.
964 		lock->holder = waiter->thread->id;
965 #endif
966 
967 		// unblock thread
968 		thread_unblock(waiter->thread, B_OK);
969 	} else {
970 		// There are no waiters, so mark the lock as released.
971 #if KDEBUG
972 		lock->holder = -1;
973 #else
974 		lock->flags |= MUTEX_FLAG_RELEASED;
975 #endif
976 	}
977 }
978 
979 
980 status_t
981 _mutex_trylock(mutex* lock)
982 {
983 #if KDEBUG
984 	InterruptsSpinLocker _(lock->lock);
985 
986 	if (lock->holder < 0) {
987 		lock->holder = thread_get_current_thread_id();
988 		return B_OK;
989 	} else if (lock->holder == 0)
990 		panic("_mutex_trylock(): using uninitialized lock %p", lock);
991 	return B_WOULD_BLOCK;
992 #else
993 	return mutex_trylock(lock);
994 #endif
995 }
996 
997 
998 status_t
999 _mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, bigtime_t timeout)
1000 {
1001 #if KDEBUG
1002 	if (!gKernelStartup && !are_interrupts_enabled()) {
1003 		panic("_mutex_lock(): called with interrupts disabled for lock %p",
1004 			lock);
1005 	}
1006 #endif
1007 
1008 	InterruptsSpinLocker locker(lock->lock);
1009 
1010 	// Might have been released after we decremented the count, but before
1011 	// we acquired the spinlock.
1012 #if KDEBUG
1013 	if (lock->holder < 0) {
1014 		lock->holder = thread_get_current_thread_id();
1015 		return B_OK;
1016 	} else if (lock->holder == thread_get_current_thread_id()) {
1017 		panic("_mutex_lock(): double lock of %p by thread %" B_PRId32, lock,
1018 			lock->holder);
1019 	} else if (lock->holder == 0)
1020 		panic("_mutex_lock(): using uninitialized lock %p", lock);
1021 #else
1022 	if ((lock->flags & MUTEX_FLAG_RELEASED) != 0) {
1023 		lock->flags &= ~MUTEX_FLAG_RELEASED;
1024 		return B_OK;
1025 	}
1026 #endif
1027 
1028 	// enqueue in waiter list
1029 	mutex_waiter waiter;
1030 	waiter.thread = thread_get_current_thread();
1031 	waiter.next = NULL;
1032 
1033 	if (lock->waiters != NULL) {
1034 		lock->waiters->last->next = &waiter;
1035 	} else
1036 		lock->waiters = &waiter;
1037 
1038 	lock->waiters->last = &waiter;
1039 
1040 	// block
1041 	thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_MUTEX, lock);
1042 	locker.Unlock();
1043 
1044 	status_t error = thread_block_with_timeout(timeoutFlags, timeout);
1045 
1046 	if (error == B_OK) {
1047 #if KDEBUG
1048 		ASSERT(lock->holder == waiter.thread->id);
1049 #endif
1050 	} else {
1051 		// If the lock was destroyed, our "thread" entry will be NULL.
1052 		if (waiter.thread == NULL)
1053 			return B_ERROR;
1054 
1055 		// TODO: There is still a race condition during mutex destruction,
1056 		// if we resume due to a timeout before our thread is set to NULL.
1057 
1058 		locker.Lock();
1059 
1060 		// If the timeout occurred, we must remove our waiter structure from
1061 		// the queue.
1062 		mutex_waiter* previousWaiter = NULL;
1063 		mutex_waiter* otherWaiter = lock->waiters;
1064 		while (otherWaiter != NULL && otherWaiter != &waiter) {
1065 			previousWaiter = otherWaiter;
1066 			otherWaiter = otherWaiter->next;
1067 		}
1068 		if (otherWaiter == &waiter) {
1069 			// the structure is still in the list -- dequeue
1070 			if (&waiter == lock->waiters) {
1071 				if (waiter.next != NULL)
1072 					waiter.next->last = waiter.last;
1073 				lock->waiters = waiter.next;
1074 			} else {
1075 				if (waiter.next == NULL)
1076 					lock->waiters->last = previousWaiter;
1077 				previousWaiter->next = waiter.next;
1078 			}
1079 
1080 #if !KDEBUG
1081 			// we need to fix the lock count
1082 			atomic_add(&lock->count, 1);
1083 #endif
1084 		} else {
1085 			// the structure is not in the list -- even though the timeout
1086 			// occurred, this means we own the lock now
1087 #if KDEBUG
1088 			ASSERT(lock->holder == waiter.thread->id);
1089 #endif
1090 			return B_OK;
1091 		}
1092 	}
1093 
1094 	return error;
1095 }
1096 
1097 
1098 static int
1099 dump_mutex_info(int argc, char** argv)
1100 {
1101 	if (argc < 2) {
1102 		print_debugger_command_usage(argv[0]);
1103 		return 0;
1104 	}
1105 
1106 	mutex* lock = (mutex*)parse_expression(argv[1]);
1107 
1108 	if (!IS_KERNEL_ADDRESS(lock)) {
1109 		kprintf("invalid address: %p\n", lock);
1110 		return 0;
1111 	}
1112 
1113 	kprintf("mutex %p:\n", lock);
1114 	kprintf("  name:            %s\n", lock->name);
1115 	kprintf("  flags:           0x%x\n", lock->flags);
1116 #if KDEBUG
1117 	kprintf("  holder:          %" B_PRId32 "\n", lock->holder);
1118 #else
1119 	kprintf("  count:           %" B_PRId32 "\n", lock->count);
1120 #endif
1121 
1122 	kprintf("  waiting threads:");
1123 	mutex_waiter* waiter = lock->waiters;
1124 	while (waiter != NULL) {
1125 		kprintf(" %" B_PRId32, waiter->thread->id);
1126 		waiter = waiter->next;
1127 	}
1128 	kputs("\n");
1129 
1130 	return 0;
1131 }
1132 
1133 
1134 // #pragma mark -
1135 
1136 
1137 void
1138 lock_debug_init()
1139 {
1140 	add_debugger_command_etc("mutex", &dump_mutex_info,
1141 		"Dump info about a mutex",
1142 		"<mutex>\n"
1143 		"Prints info about the specified mutex.\n"
1144 		"  <mutex>  - pointer to the mutex to print the info for.\n", 0);
1145 	add_debugger_command_etc("rwlock", &dump_rw_lock_info,
1146 		"Dump info about an rw lock",
1147 		"<lock>\n"
1148 		"Prints info about the specified rw lock.\n"
1149 		"  <lock>  - pointer to the rw lock to print the info for.\n", 0);
1150 	add_debugger_command_etc("recursivelock", &dump_recursive_lock_info,
1151 		"Dump info about a recursive lock",
1152 		"<lock>\n"
1153 		"Prints info about the specified recursive lock.\n"
1154 		"  <lock>  - pointer to the recursive lock to print the info for.\n",
1155 		0);
1156 }
1157