xref: /haiku/src/system/kernel/locks/lock.cpp (revision 445d4fd926c569e7b9ae28017da86280aaecbae2)
1 /*
2  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 /*! Mutex and recursive_lock code */
12 
13 
14 #include <lock.h>
15 
16 #include <stdlib.h>
17 #include <string.h>
18 
19 #include <OS.h>
20 
21 #include <debug.h>
22 #include <int.h>
23 #include <kernel.h>
24 #include <listeners.h>
25 #include <scheduling_analysis.h>
26 #include <thread.h>
27 #include <util/AutoLock.h>
28 
29 
30 struct mutex_waiter {
31 	Thread*			thread;
32 	mutex_waiter*	next;		// next in queue
33 	mutex_waiter*	last;		// last in queue (valid for the first in queue)
34 };
35 
36 struct rw_lock_waiter {
37 	Thread*			thread;
38 	rw_lock_waiter*	next;		// next in queue
39 	rw_lock_waiter*	last;		// last in queue (valid for the first in queue)
40 	bool			writer;
41 };
42 
43 #define MUTEX_FLAG_RELEASED		0x2
44 
45 
46 int32
47 recursive_lock_get_recursion(recursive_lock *lock)
48 {
49 	if (RECURSIVE_LOCK_HOLDER(lock) == thread_get_current_thread_id())
50 		return lock->recursion;
51 
52 	return -1;
53 }
54 
55 
56 void
57 recursive_lock_init(recursive_lock *lock, const char *name)
58 {
59 	recursive_lock_init_etc(lock, name, 0);
60 }
61 
62 
63 void
64 recursive_lock_init_etc(recursive_lock *lock, const char *name, uint32 flags)
65 {
66 	mutex_init_etc(&lock->lock, name != NULL ? name : "recursive lock", flags);
67 #if !KDEBUG
68 	lock->holder = -1;
69 #endif
70 	lock->recursion = 0;
71 }
72 
73 
74 void
75 recursive_lock_destroy(recursive_lock *lock)
76 {
77 	if (lock == NULL)
78 		return;
79 
80 	mutex_destroy(&lock->lock);
81 }
82 
83 
84 status_t
85 recursive_lock_lock(recursive_lock *lock)
86 {
87 #if KDEBUG
88 	if (!gKernelStartup && !are_interrupts_enabled()) {
89 		panic("recursive_lock_lock: called with interrupts disabled for lock "
90 			"%p (\"%s\")\n", lock, lock->lock.name);
91 	}
92 #endif
93 
94 	thread_id thread = thread_get_current_thread_id();
95 
96 	if (thread != RECURSIVE_LOCK_HOLDER(lock)) {
97 		mutex_lock(&lock->lock);
98 #if !KDEBUG
99 		lock->holder = thread;
100 #endif
101 	}
102 
103 	lock->recursion++;
104 	return B_OK;
105 }
106 
107 
108 status_t
109 recursive_lock_trylock(recursive_lock *lock)
110 {
111 	thread_id thread = thread_get_current_thread_id();
112 
113 #if KDEBUG
114 	if (!gKernelStartup && !are_interrupts_enabled()) {
115 		panic("recursive_lock_lock: called with interrupts disabled for lock "
116 			"%p (\"%s\")\n", lock, lock->lock.name);
117 	}
118 #endif
119 
120 	if (thread != RECURSIVE_LOCK_HOLDER(lock)) {
121 		status_t status = mutex_trylock(&lock->lock);
122 		if (status != B_OK)
123 			return status;
124 
125 #if !KDEBUG
126 		lock->holder = thread;
127 #endif
128 	}
129 
130 	lock->recursion++;
131 	return B_OK;
132 }
133 
134 
135 void
136 recursive_lock_unlock(recursive_lock *lock)
137 {
138 	if (thread_get_current_thread_id() != RECURSIVE_LOCK_HOLDER(lock))
139 		panic("recursive_lock %p unlocked by non-holder thread!\n", lock);
140 
141 	if (--lock->recursion == 0) {
142 #if !KDEBUG
143 		lock->holder = -1;
144 #endif
145 		mutex_unlock(&lock->lock);
146 	}
147 }
148 
149 
150 status_t
151 recursive_lock_switch_lock(recursive_lock* from, recursive_lock* to)
152 {
153 #if KDEBUG
154 	if (!gKernelStartup && !are_interrupts_enabled()) {
155 		panic("recursive_lock_switch_lock(): called with interrupts "
156 			"disabled for locks %p, %p", from, to);
157 	}
158 #endif
159 
160 	if (--from->recursion > 0)
161 		return recursive_lock_lock(to);
162 
163 #if !KDEBUG
164 	from->holder = -1;
165 #endif
166 
167 	thread_id thread = thread_get_current_thread_id();
168 
169 	if (thread == RECURSIVE_LOCK_HOLDER(to)) {
170 		to->recursion++;
171 		mutex_unlock(&from->lock);
172 		return B_OK;
173 	}
174 
175 	status_t status = mutex_switch_lock(&from->lock, &to->lock);
176 	if (status != B_OK) {
177 		from->recursion++;
178 #if !KDEBUG
179 		from->holder = thread;
180 #endif
181 		return status;
182 	}
183 
184 #if !KDEBUG
185 	to->holder = thread;
186 #endif
187 	to->recursion++;
188 	return B_OK;
189 }
190 
191 
192 status_t
193 recursive_lock_switch_from_mutex(mutex* from, recursive_lock* to)
194 {
195 #if KDEBUG
196 	if (!gKernelStartup && !are_interrupts_enabled()) {
197 		panic("recursive_lock_switch_from_mutex(): called with interrupts "
198 			"disabled for locks %p, %p", from, to);
199 	}
200 #endif
201 
202 	thread_id thread = thread_get_current_thread_id();
203 
204 	if (thread == RECURSIVE_LOCK_HOLDER(to)) {
205 		to->recursion++;
206 		mutex_unlock(from);
207 		return B_OK;
208 	}
209 
210 	status_t status = mutex_switch_lock(from, &to->lock);
211 	if (status != B_OK)
212 		return status;
213 
214 #if !KDEBUG
215 	to->holder = thread;
216 #endif
217 	to->recursion++;
218 	return B_OK;
219 }
220 
221 
222 status_t
223 recursive_lock_switch_from_read_lock(rw_lock* from, recursive_lock* to)
224 {
225 #if KDEBUG
226 	if (!gKernelStartup && !are_interrupts_enabled()) {
227 		panic("recursive_lock_switch_from_read_lock(): called with interrupts "
228 			"disabled for locks %p, %p", from, to);
229 	}
230 #endif
231 
232 	thread_id thread = thread_get_current_thread_id();
233 
234 	if (thread != RECURSIVE_LOCK_HOLDER(to)) {
235 		status_t status = mutex_switch_from_read_lock(from, &to->lock);
236 		if (status != B_OK)
237 			return status;
238 
239 #if !KDEBUG
240 		to->holder = thread;
241 #endif
242 	} else {
243 		rw_lock_read_unlock(from);
244 	}
245 
246 	to->recursion++;
247 	return B_OK;
248 }
249 
250 
251 static int
252 dump_recursive_lock_info(int argc, char** argv)
253 {
254 	if (argc < 2) {
255 		print_debugger_command_usage(argv[0]);
256 		return 0;
257 	}
258 
259 	recursive_lock* lock = (recursive_lock*)parse_expression(argv[1]);
260 
261 	if (!IS_KERNEL_ADDRESS(lock)) {
262 		kprintf("invalid address: %p\n", lock);
263 		return 0;
264 	}
265 
266 	kprintf("recrusive_lock %p:\n", lock);
267 	kprintf("  mutex:           %p\n", &lock->lock);
268 	kprintf("  name:            %s\n", lock->lock.name);
269 	kprintf("  flags:           0x%x\n", lock->lock.flags);
270 #if KDEBUG
271 	kprintf("  holder:          %" B_PRId32 "\n", lock->lock.holder);
272 #else
273 	kprintf("  holder:          %" B_PRId32 "\n", lock->holder);
274 #endif
275 	kprintf("  recursion:       %d\n", lock->recursion);
276 
277 	kprintf("  waiting threads:");
278 	mutex_waiter* waiter = lock->lock.waiters;
279 	while (waiter != NULL) {
280 		kprintf(" %" B_PRId32, waiter->thread->id);
281 		waiter = waiter->next;
282 	}
283 	kputs("\n");
284 
285 	return 0;
286 }
287 
288 
289 //	#pragma mark -
290 
291 
292 static status_t
293 rw_lock_wait(rw_lock* lock, bool writer, InterruptsSpinLocker& locker)
294 {
295 	// enqueue in waiter list
296 	rw_lock_waiter waiter;
297 	waiter.thread = thread_get_current_thread();
298 	waiter.next = NULL;
299 	waiter.writer = writer;
300 
301 	if (lock->waiters != NULL)
302 		lock->waiters->last->next = &waiter;
303 	else
304 		lock->waiters = &waiter;
305 
306 	lock->waiters->last = &waiter;
307 
308 	// block
309 	thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_RW_LOCK, lock);
310 	locker.Unlock();
311 
312 	status_t result = thread_block();
313 
314 	locker.Lock();
315 	return result;
316 }
317 
318 
319 static int32
320 rw_lock_unblock(rw_lock* lock)
321 {
322 	// Check whether there are any waiting threads at all and whether anyone
323 	// has the write lock.
324 	rw_lock_waiter* waiter = lock->waiters;
325 	if (waiter == NULL || lock->holder >= 0)
326 		return 0;
327 
328 	// writer at head of queue?
329 	if (waiter->writer) {
330 		if (lock->active_readers > 0 || lock->pending_readers > 0)
331 			return 0;
332 
333 		// dequeue writer
334 		lock->waiters = waiter->next;
335 		if (lock->waiters != NULL)
336 			lock->waiters->last = waiter->last;
337 
338 		lock->holder = waiter->thread->id;
339 
340 		// unblock thread
341 		thread_unblock(waiter->thread, B_OK);
342 
343 		waiter->thread = NULL;
344 		return RW_LOCK_WRITER_COUNT_BASE;
345 	}
346 
347 	// wake up one or more readers
348 	uint32 readerCount = 0;
349 	do {
350 		// dequeue reader
351 		lock->waiters = waiter->next;
352 		if (lock->waiters != NULL)
353 			lock->waiters->last = waiter->last;
354 
355 		readerCount++;
356 
357 		// unblock thread
358 		thread_unblock(waiter->thread, B_OK);
359 
360 		waiter->thread = NULL;
361 	} while ((waiter = lock->waiters) != NULL && !waiter->writer);
362 
363 	if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
364 		lock->active_readers += readerCount;
365 
366 	return readerCount;
367 }
368 
369 
370 void
371 rw_lock_init(rw_lock* lock, const char* name)
372 {
373 	lock->name = name;
374 	lock->waiters = NULL;
375 	B_INITIALIZE_SPINLOCK(&lock->lock);
376 	lock->holder = -1;
377 	lock->count = 0;
378 	lock->owner_count = 0;
379 	lock->active_readers = 0;
380 	lock->pending_readers = 0;
381 	lock->flags = 0;
382 
383 	T_SCHEDULING_ANALYSIS(InitRWLock(lock, name));
384 	NotifyWaitObjectListeners(&WaitObjectListener::RWLockInitialized, lock);
385 }
386 
387 
388 void
389 rw_lock_init_etc(rw_lock* lock, const char* name, uint32 flags)
390 {
391 	lock->name = (flags & RW_LOCK_FLAG_CLONE_NAME) != 0 ? strdup(name) : name;
392 	lock->waiters = NULL;
393 	B_INITIALIZE_SPINLOCK(&lock->lock);
394 	lock->holder = -1;
395 	lock->count = 0;
396 	lock->owner_count = 0;
397 	lock->active_readers = 0;
398 	lock->pending_readers = 0;
399 	lock->flags = flags & RW_LOCK_FLAG_CLONE_NAME;
400 
401 	T_SCHEDULING_ANALYSIS(InitRWLock(lock, name));
402 	NotifyWaitObjectListeners(&WaitObjectListener::RWLockInitialized, lock);
403 }
404 
405 
406 void
407 rw_lock_destroy(rw_lock* lock)
408 {
409 	char* name = (lock->flags & RW_LOCK_FLAG_CLONE_NAME) != 0
410 		? (char*)lock->name : NULL;
411 
412 	// unblock all waiters
413 	InterruptsSpinLocker locker(lock->lock);
414 
415 #if KDEBUG
416 	if (lock->waiters != NULL && thread_get_current_thread_id()
417 			!= lock->holder) {
418 		panic("rw_lock_destroy(): there are blocking threads, but the caller "
419 			"doesn't hold the write lock (%p)", lock);
420 
421 		locker.Unlock();
422 		if (rw_lock_write_lock(lock) != B_OK)
423 			return;
424 		locker.Lock();
425 	}
426 #endif
427 
428 	while (rw_lock_waiter* waiter = lock->waiters) {
429 		// dequeue
430 		lock->waiters = waiter->next;
431 
432 		// unblock thread
433 		thread_unblock(waiter->thread, B_ERROR);
434 	}
435 
436 	lock->name = NULL;
437 
438 	locker.Unlock();
439 
440 	free(name);
441 }
442 
443 
444 #if !KDEBUG_RW_LOCK_DEBUG
445 
446 status_t
447 _rw_lock_read_lock(rw_lock* lock)
448 {
449 #if KDEBUG
450 	if (!gKernelStartup && !are_interrupts_enabled()) {
451 		panic("_rw_lock_read_lock(): called with interrupts disabled for lock %p",
452 			lock);
453 	}
454 #endif
455 
456 	InterruptsSpinLocker locker(lock->lock);
457 
458 	// We might be the writer ourselves.
459 	if (lock->holder == thread_get_current_thread_id()) {
460 		lock->owner_count++;
461 		return B_OK;
462 	}
463 
464 	// The writer that originally had the lock when we called atomic_add() might
465 	// already have gone and another writer could have overtaken us. In this
466 	// case the original writer set pending_readers, so we know that we don't
467 	// have to wait.
468 	if (lock->pending_readers > 0) {
469 		lock->pending_readers--;
470 
471 		if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
472 			lock->active_readers++;
473 
474 		return B_OK;
475 	}
476 
477 	ASSERT(lock->count >= RW_LOCK_WRITER_COUNT_BASE);
478 
479 	// we need to wait
480 	return rw_lock_wait(lock, false, locker);
481 }
482 
483 
484 status_t
485 _rw_lock_read_lock_with_timeout(rw_lock* lock, uint32 timeoutFlags,
486 	bigtime_t timeout)
487 {
488 #if KDEBUG
489 	if (!gKernelStartup && !are_interrupts_enabled()) {
490 		panic("_rw_lock_read_lock_with_timeout(): called with interrupts "
491 			"disabled for lock %p", lock);
492 	}
493 #endif
494 
495 	InterruptsSpinLocker locker(lock->lock);
496 
497 	// We might be the writer ourselves.
498 	if (lock->holder == thread_get_current_thread_id()) {
499 		lock->owner_count++;
500 		return B_OK;
501 	}
502 
503 	// The writer that originally had the lock when we called atomic_add() might
504 	// already have gone and another writer could have overtaken us. In this
505 	// case the original writer set pending_readers, so we know that we don't
506 	// have to wait.
507 	if (lock->pending_readers > 0) {
508 		lock->pending_readers--;
509 
510 		if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
511 			lock->active_readers++;
512 
513 		return B_OK;
514 	}
515 
516 	ASSERT(lock->count >= RW_LOCK_WRITER_COUNT_BASE);
517 
518 	// we need to wait
519 
520 	// enqueue in waiter list
521 	rw_lock_waiter waiter;
522 	waiter.thread = thread_get_current_thread();
523 	waiter.next = NULL;
524 	waiter.writer = false;
525 
526 	if (lock->waiters != NULL)
527 		lock->waiters->last->next = &waiter;
528 	else
529 		lock->waiters = &waiter;
530 
531 	lock->waiters->last = &waiter;
532 
533 	// block
534 	thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_RW_LOCK, lock);
535 	locker.Unlock();
536 
537 	status_t error = thread_block_with_timeout(timeoutFlags, timeout);
538 	if (error == B_OK || waiter.thread == NULL) {
539 		// We were unblocked successfully -- potentially our unblocker overtook
540 		// us after we already failed. In either case, we've got the lock, now.
541 		return B_OK;
542 	}
543 
544 	locker.Lock();
545 	// We failed to get the lock -- dequeue from waiter list.
546 	rw_lock_waiter* previous = NULL;
547 	rw_lock_waiter* other = lock->waiters;
548 	while (other != &waiter) {
549 		previous = other;
550 		other = other->next;
551 	}
552 
553 	if (previous == NULL) {
554 		// we are the first in line
555 		lock->waiters = waiter.next;
556 		if (lock->waiters != NULL)
557 			lock->waiters->last = waiter.last;
558 	} else {
559 		// one or more other waiters are before us in the queue
560 		previous->next = waiter.next;
561 		if (lock->waiters->last == &waiter)
562 			lock->waiters->last = previous;
563 	}
564 
565 	// Decrement the count. ATM this is all we have to do. There's at least
566 	// one writer ahead of us -- otherwise the last writer would have unblocked
567 	// us (writers only manipulate the lock data with thread spinlock being
568 	// held) -- so our leaving doesn't make a difference to the ones behind us
569 	// in the queue.
570 	atomic_add(&lock->count, -1);
571 
572 	return error;
573 }
574 
575 
576 void
577 _rw_lock_read_unlock(rw_lock* lock)
578 {
579 	InterruptsSpinLocker locker(lock->lock);
580 
581 	// If we're still holding the write lock or if there are other readers,
582 	// no-one can be woken up.
583 	if (lock->holder == thread_get_current_thread_id()) {
584 		ASSERT(lock->owner_count % RW_LOCK_WRITER_COUNT_BASE > 0);
585 		lock->owner_count--;
586 		return;
587 	}
588 
589 	if (--lock->active_readers > 0)
590 		return;
591 
592 	if (lock->active_readers < 0) {
593 		panic("rw_lock_read_unlock(): lock %p not read-locked", lock);
594 		lock->active_readers = 0;
595 		return;
596 	}
597 
598 	rw_lock_unblock(lock);
599 }
600 
601 #endif	// !KDEBUG_RW_LOCK_DEBUG
602 
603 
604 status_t
605 rw_lock_write_lock(rw_lock* lock)
606 {
607 #if KDEBUG
608 	if (!gKernelStartup && !are_interrupts_enabled()) {
609 		panic("_rw_lock_write_lock(): called with interrupts disabled for lock %p",
610 			lock);
611 	}
612 #endif
613 
614 	InterruptsSpinLocker locker(lock->lock);
615 
616 	// If we're already the lock holder, we just need to increment the owner
617 	// count.
618 	thread_id thread = thread_get_current_thread_id();
619 	if (lock->holder == thread) {
620 		lock->owner_count += RW_LOCK_WRITER_COUNT_BASE;
621 		return B_OK;
622 	}
623 
624 	// announce our claim
625 	int32 oldCount = atomic_add(&lock->count, RW_LOCK_WRITER_COUNT_BASE);
626 
627 	if (oldCount == 0) {
628 		// No-one else held a read or write lock, so it's ours now.
629 		lock->holder = thread;
630 		lock->owner_count = RW_LOCK_WRITER_COUNT_BASE;
631 		return B_OK;
632 	}
633 
634 	// We have to wait. If we're the first writer, note the current reader
635 	// count.
636 	if (oldCount < RW_LOCK_WRITER_COUNT_BASE)
637 		lock->active_readers = oldCount - lock->pending_readers;
638 
639 	status_t status = rw_lock_wait(lock, true, locker);
640 	if (status == B_OK) {
641 		lock->holder = thread;
642 		lock->owner_count = RW_LOCK_WRITER_COUNT_BASE;
643 	}
644 
645 	return status;
646 }
647 
648 
649 void
650 _rw_lock_write_unlock(rw_lock* lock)
651 {
652 	InterruptsSpinLocker locker(lock->lock);
653 
654 	if (thread_get_current_thread_id() != lock->holder) {
655 		panic("rw_lock_write_unlock(): lock %p not write-locked by this thread",
656 			lock);
657 		return;
658 	}
659 
660 	ASSERT(lock->owner_count >= RW_LOCK_WRITER_COUNT_BASE);
661 
662 	lock->owner_count -= RW_LOCK_WRITER_COUNT_BASE;
663 	if (lock->owner_count >= RW_LOCK_WRITER_COUNT_BASE)
664 		return;
665 
666 	// We gave up our last write lock -- clean up and unblock waiters.
667 	int32 readerCount = lock->owner_count;
668 	lock->holder = -1;
669 	lock->owner_count = 0;
670 
671 	int32 oldCount = atomic_add(&lock->count, -RW_LOCK_WRITER_COUNT_BASE);
672 	oldCount -= RW_LOCK_WRITER_COUNT_BASE;
673 
674 	if (oldCount != 0) {
675 		// If writers are waiting, take over our reader count.
676 		if (oldCount >= RW_LOCK_WRITER_COUNT_BASE) {
677 			lock->active_readers = readerCount;
678 			rw_lock_unblock(lock);
679 		} else {
680 			// No waiting writer, but there are one or more readers. We will
681 			// unblock all waiting readers -- that's the easy part -- and must
682 			// also make sure that all readers that haven't entered the critical
683 			// section yet, won't start to wait. Otherwise a writer overtaking
684 			// such a reader will correctly start to wait, but the reader,
685 			// seeing the writer count > 0, would also start to wait. We set
686 			// pending_readers to the number of readers that are still expected
687 			// to enter the critical section.
688 			lock->pending_readers = oldCount - readerCount
689 				- rw_lock_unblock(lock);
690 		}
691 	}
692 }
693 
694 
695 static int
696 dump_rw_lock_info(int argc, char** argv)
697 {
698 	if (argc < 2) {
699 		print_debugger_command_usage(argv[0]);
700 		return 0;
701 	}
702 
703 	rw_lock* lock = (rw_lock*)parse_expression(argv[1]);
704 
705 	if (!IS_KERNEL_ADDRESS(lock)) {
706 		kprintf("invalid address: %p\n", lock);
707 		return 0;
708 	}
709 
710 	kprintf("rw lock %p:\n", lock);
711 	kprintf("  name:            %s\n", lock->name);
712 	kprintf("  holder:          %" B_PRId32 "\n", lock->holder);
713 	kprintf("  count:           %#" B_PRIx32 "\n", lock->count);
714 	kprintf("  active readers   %d\n", lock->active_readers);
715 	kprintf("  pending readers  %d\n", lock->pending_readers);
716 	kprintf("  owner count:     %#" B_PRIx32 "\n", lock->owner_count);
717 	kprintf("  flags:           %#" B_PRIx32 "\n", lock->flags);
718 
719 	kprintf("  waiting threads:");
720 	rw_lock_waiter* waiter = lock->waiters;
721 	while (waiter != NULL) {
722 		kprintf(" %" B_PRId32 "/%c", waiter->thread->id, waiter->writer ? 'w' : 'r');
723 		waiter = waiter->next;
724 	}
725 	kputs("\n");
726 
727 	return 0;
728 }
729 
730 
731 // #pragma mark -
732 
733 
734 void
735 mutex_init(mutex* lock, const char *name)
736 {
737 	mutex_init_etc(lock, name, 0);
738 }
739 
740 
741 void
742 mutex_init_etc(mutex* lock, const char *name, uint32 flags)
743 {
744 	lock->name = (flags & MUTEX_FLAG_CLONE_NAME) != 0 ? strdup(name) : name;
745 	lock->waiters = NULL;
746 	B_INITIALIZE_SPINLOCK(&lock->lock);
747 #if KDEBUG
748 	lock->holder = -1;
749 #else
750 	lock->count = 0;
751 #endif
752 	lock->flags = flags & MUTEX_FLAG_CLONE_NAME;
753 
754 	T_SCHEDULING_ANALYSIS(InitMutex(lock, name));
755 	NotifyWaitObjectListeners(&WaitObjectListener::MutexInitialized, lock);
756 }
757 
758 
759 void
760 mutex_destroy(mutex* lock)
761 {
762 	char* name = (lock->flags & MUTEX_FLAG_CLONE_NAME) != 0
763 		? (char*)lock->name : NULL;
764 
765 	// unblock all waiters
766 	InterruptsSpinLocker locker(lock->lock);
767 
768 #if KDEBUG
769 	if (lock->holder != -1 && thread_get_current_thread_id() != lock->holder) {
770 		panic("mutex_destroy(): the lock (%p) is held by %" B_PRId32 ", not "
771 			"by the caller", lock, lock->holder);
772 		if (_mutex_lock(lock, &locker) != B_OK)
773 			return;
774 		locker.Lock();
775 	}
776 #endif
777 
778 	while (mutex_waiter* waiter = lock->waiters) {
779 		// dequeue
780 		lock->waiters = waiter->next;
781 
782 		// unblock thread
783 		Thread* thread = waiter->thread;
784 		waiter->thread = NULL;
785 		thread_unblock(thread, B_ERROR);
786 	}
787 
788 	lock->name = NULL;
789 	lock->flags = 0;
790 #if KDEBUG
791 	lock->holder = 0;
792 #else
793 	lock->count = INT16_MIN;
794 #endif
795 
796 	locker.Unlock();
797 
798 	free(name);
799 }
800 
801 
802 static inline status_t
803 mutex_lock_threads_locked(mutex* lock, InterruptsSpinLocker* locker)
804 {
805 #if KDEBUG
806 	return _mutex_lock(lock, locker);
807 #else
808 	if (atomic_add(&lock->count, -1) < 0)
809 		return _mutex_lock(lock, locker);
810 	return B_OK;
811 #endif
812 }
813 
814 
815 status_t
816 mutex_switch_lock(mutex* from, mutex* to)
817 {
818 #if KDEBUG
819 	if (!gKernelStartup && !are_interrupts_enabled()) {
820 		panic("mutex_switch_lock(): called with interrupts disabled "
821 			"for locks %p, %p", from, to);
822 	}
823 #endif
824 
825 	InterruptsSpinLocker locker(to->lock);
826 
827 	mutex_unlock(from);
828 
829 	return mutex_lock_threads_locked(to, &locker);
830 }
831 
832 
833 void
834 mutex_transfer_lock(mutex* lock, thread_id thread)
835 {
836 #if KDEBUG
837 	if (thread_get_current_thread_id() != lock->holder)
838 		panic("mutex_transfer_lock(): current thread is not the lock holder!");
839 	lock->holder = thread;
840 #endif
841 }
842 
843 
844 status_t
845 mutex_switch_from_read_lock(rw_lock* from, mutex* to)
846 {
847 #if KDEBUG
848 	if (!gKernelStartup && !are_interrupts_enabled()) {
849 		panic("mutex_switch_from_read_lock(): called with interrupts disabled "
850 			"for locks %p, %p", from, to);
851 	}
852 #endif
853 
854 	InterruptsSpinLocker locker(to->lock);
855 
856 	rw_lock_read_unlock(from);
857 
858 	return mutex_lock_threads_locked(to, &locker);
859 }
860 
861 
862 status_t
863 _mutex_lock(mutex* lock, void* _locker)
864 {
865 #if KDEBUG
866 	if (!gKernelStartup && _locker == NULL && !are_interrupts_enabled()) {
867 		panic("_mutex_lock(): called with interrupts disabled for lock %p",
868 			lock);
869 	}
870 #endif
871 
872 	// lock only, if !lockLocked
873 	InterruptsSpinLocker* locker
874 		= reinterpret_cast<InterruptsSpinLocker*>(_locker);
875 
876 	InterruptsSpinLocker lockLocker;
877 	if (locker == NULL) {
878 		lockLocker.SetTo(lock->lock, false);
879 		locker = &lockLocker;
880 	}
881 
882 	// Might have been released after we decremented the count, but before
883 	// we acquired the spinlock.
884 #if KDEBUG
885 	if (lock->holder < 0) {
886 		lock->holder = thread_get_current_thread_id();
887 		return B_OK;
888 	} else if (lock->holder == thread_get_current_thread_id()) {
889 		panic("_mutex_lock(): double lock of %p by thread %" B_PRId32, lock,
890 			lock->holder);
891 	} else if (lock->holder == 0)
892 		panic("_mutex_lock(): using uninitialized lock %p", lock);
893 #else
894 	if ((lock->flags & MUTEX_FLAG_RELEASED) != 0) {
895 		lock->flags &= ~MUTEX_FLAG_RELEASED;
896 		return B_OK;
897 	}
898 #endif
899 
900 	// enqueue in waiter list
901 	mutex_waiter waiter;
902 	waiter.thread = thread_get_current_thread();
903 	waiter.next = NULL;
904 
905 	if (lock->waiters != NULL) {
906 		lock->waiters->last->next = &waiter;
907 	} else
908 		lock->waiters = &waiter;
909 
910 	lock->waiters->last = &waiter;
911 
912 	// block
913 	thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_MUTEX, lock);
914 	locker->Unlock();
915 
916 	status_t error = thread_block();
917 #if KDEBUG
918 	if (error == B_OK) {
919 		ASSERT(lock->holder == waiter.thread->id);
920 	}
921 #endif
922 	return error;
923 }
924 
925 
926 void
927 _mutex_unlock(mutex* lock)
928 {
929 	InterruptsSpinLocker locker(lock->lock);
930 
931 #if KDEBUG
932 	if (thread_get_current_thread_id() != lock->holder) {
933 		panic("_mutex_unlock() failure: thread %" B_PRId32 " is trying to "
934 			"release mutex %p (current holder %" B_PRId32 ")\n",
935 			thread_get_current_thread_id(), lock, lock->holder);
936 		return;
937 	}
938 #endif
939 
940 	mutex_waiter* waiter = lock->waiters;
941 	if (waiter != NULL) {
942 		// dequeue the first waiter
943 		lock->waiters = waiter->next;
944 		if (lock->waiters != NULL)
945 			lock->waiters->last = waiter->last;
946 
947 #if KDEBUG
948 		// Already set the holder to the unblocked thread. Besides that this
949 		// actually reflects the current situation, setting it to -1 would
950 		// cause a race condition, since another locker could think the lock
951 		// is not held by anyone.
952 		lock->holder = waiter->thread->id;
953 #endif
954 
955 		// unblock thread
956 		thread_unblock(waiter->thread, B_OK);
957 	} else {
958 		// There are no waiters, so mark the lock as released.
959 #if KDEBUG
960 		lock->holder = -1;
961 #else
962 		lock->flags |= MUTEX_FLAG_RELEASED;
963 #endif
964 	}
965 }
966 
967 
968 status_t
969 _mutex_trylock(mutex* lock)
970 {
971 #if KDEBUG
972 	InterruptsSpinLocker _(lock->lock);
973 
974 	if (lock->holder < 0) {
975 		lock->holder = thread_get_current_thread_id();
976 		return B_OK;
977 	} else if (lock->holder == 0)
978 		panic("_mutex_trylock(): using uninitialized lock %p", lock);
979 	return B_WOULD_BLOCK;
980 #else
981 	return mutex_trylock(lock);
982 #endif
983 }
984 
985 
986 status_t
987 _mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, bigtime_t timeout)
988 {
989 #if KDEBUG
990 	if (!gKernelStartup && !are_interrupts_enabled()) {
991 		panic("_mutex_lock(): called with interrupts disabled for lock %p",
992 			lock);
993 	}
994 #endif
995 
996 	InterruptsSpinLocker locker(lock->lock);
997 
998 	// Might have been released after we decremented the count, but before
999 	// we acquired the spinlock.
1000 #if KDEBUG
1001 	if (lock->holder < 0) {
1002 		lock->holder = thread_get_current_thread_id();
1003 		return B_OK;
1004 	} else if (lock->holder == thread_get_current_thread_id()) {
1005 		panic("_mutex_lock(): double lock of %p by thread %" B_PRId32, lock,
1006 			lock->holder);
1007 	} else if (lock->holder == 0)
1008 		panic("_mutex_lock(): using uninitialized lock %p", lock);
1009 #else
1010 	if ((lock->flags & MUTEX_FLAG_RELEASED) != 0) {
1011 		lock->flags &= ~MUTEX_FLAG_RELEASED;
1012 		return B_OK;
1013 	}
1014 #endif
1015 
1016 	// enqueue in waiter list
1017 	mutex_waiter waiter;
1018 	waiter.thread = thread_get_current_thread();
1019 	waiter.next = NULL;
1020 
1021 	if (lock->waiters != NULL) {
1022 		lock->waiters->last->next = &waiter;
1023 	} else
1024 		lock->waiters = &waiter;
1025 
1026 	lock->waiters->last = &waiter;
1027 
1028 	// block
1029 	thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_MUTEX, lock);
1030 	locker.Unlock();
1031 
1032 	status_t error = thread_block_with_timeout(timeoutFlags, timeout);
1033 
1034 	if (error == B_OK) {
1035 #if KDEBUG
1036 		ASSERT(lock->holder == waiter.thread->id);
1037 #endif
1038 	} else {
1039 		// If the lock was destroyed, our "thread" entry will be NULL.
1040 		if (waiter.thread == NULL)
1041 			return B_ERROR;
1042 
1043 		// TODO: There is still a race condition during mutex destruction,
1044 		// if we resume due to a timeout before our thread is set to NULL.
1045 
1046 		locker.Lock();
1047 
1048 		// If the timeout occurred, we must remove our waiter structure from
1049 		// the queue.
1050 		mutex_waiter* previousWaiter = NULL;
1051 		mutex_waiter* otherWaiter = lock->waiters;
1052 		while (otherWaiter != NULL && otherWaiter != &waiter) {
1053 			previousWaiter = otherWaiter;
1054 			otherWaiter = otherWaiter->next;
1055 		}
1056 		if (otherWaiter == &waiter) {
1057 			// the structure is still in the list -- dequeue
1058 			if (&waiter == lock->waiters) {
1059 				if (waiter.next != NULL)
1060 					waiter.next->last = waiter.last;
1061 				lock->waiters = waiter.next;
1062 			} else {
1063 				if (waiter.next == NULL)
1064 					lock->waiters->last = previousWaiter;
1065 				previousWaiter->next = waiter.next;
1066 			}
1067 
1068 #if !KDEBUG
1069 			// we need to fix the lock count
1070 			atomic_add(&lock->count, 1);
1071 #endif
1072 		} else {
1073 			// the structure is not in the list -- even though the timeout
1074 			// occurred, this means we own the lock now
1075 #if KDEBUG
1076 			ASSERT(lock->holder == waiter.thread->id);
1077 #endif
1078 			return B_OK;
1079 		}
1080 	}
1081 
1082 	return error;
1083 }
1084 
1085 
1086 static int
1087 dump_mutex_info(int argc, char** argv)
1088 {
1089 	if (argc < 2) {
1090 		print_debugger_command_usage(argv[0]);
1091 		return 0;
1092 	}
1093 
1094 	mutex* lock = (mutex*)parse_expression(argv[1]);
1095 
1096 	if (!IS_KERNEL_ADDRESS(lock)) {
1097 		kprintf("invalid address: %p\n", lock);
1098 		return 0;
1099 	}
1100 
1101 	kprintf("mutex %p:\n", lock);
1102 	kprintf("  name:            %s\n", lock->name);
1103 	kprintf("  flags:           0x%x\n", lock->flags);
1104 #if KDEBUG
1105 	kprintf("  holder:          %" B_PRId32 "\n", lock->holder);
1106 #else
1107 	kprintf("  count:           %" B_PRId32 "\n", lock->count);
1108 #endif
1109 
1110 	kprintf("  waiting threads:");
1111 	mutex_waiter* waiter = lock->waiters;
1112 	while (waiter != NULL) {
1113 		kprintf(" %" B_PRId32, waiter->thread->id);
1114 		waiter = waiter->next;
1115 	}
1116 	kputs("\n");
1117 
1118 	return 0;
1119 }
1120 
1121 
1122 // #pragma mark -
1123 
1124 
1125 void
1126 lock_debug_init()
1127 {
1128 	add_debugger_command_etc("mutex", &dump_mutex_info,
1129 		"Dump info about a mutex",
1130 		"<mutex>\n"
1131 		"Prints info about the specified mutex.\n"
1132 		"  <mutex>  - pointer to the mutex to print the info for.\n", 0);
1133 	add_debugger_command_etc("rwlock", &dump_rw_lock_info,
1134 		"Dump info about an rw lock",
1135 		"<lock>\n"
1136 		"Prints info about the specified rw lock.\n"
1137 		"  <lock>  - pointer to the rw lock to print the info for.\n", 0);
1138 	add_debugger_command_etc("recursivelock", &dump_recursive_lock_info,
1139 		"Dump info about a recursive lock",
1140 		"<lock>\n"
1141 		"Prints info about the specified recursive lock.\n"
1142 		"  <lock>  - pointer to the recursive lock to print the info for.\n",
1143 		0);
1144 }
1145