xref: /haiku/src/system/kernel/locks/lock.cpp (revision 8bacd281ba5156920f4b8cfa1da7520122beab78)
1 /*
2  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 #include <debug.h>
12 
13 #if KDEBUG
14 #define KDEBUG_STATIC static
15 static status_t _mutex_lock(struct mutex* lock, void* locker);
16 static void _mutex_unlock(struct mutex* lock);
17 #else
18 #define KDEBUG_STATIC
19 #define mutex_lock		mutex_lock_inline
20 #define mutex_unlock	mutex_unlock_inline
21 #define mutex_trylock	mutex_trylock_inline
22 #define mutex_lock_with_timeout	mutex_lock_with_timeout_inline
23 #endif
24 
25 #include <lock.h>
26 
27 #include <stdlib.h>
28 #include <string.h>
29 
30 #include <int.h>
31 #include <kernel.h>
32 #include <listeners.h>
33 #include <scheduling_analysis.h>
34 #include <thread.h>
35 #include <util/AutoLock.h>
36 
37 
38 struct mutex_waiter {
39 	Thread*			thread;
40 	mutex_waiter*	next;		// next in queue
41 	mutex_waiter*	last;		// last in queue (valid for the first in queue)
42 };
43 
44 struct rw_lock_waiter {
45 	Thread*			thread;
46 	rw_lock_waiter*	next;		// next in queue
47 	rw_lock_waiter*	last;		// last in queue (valid for the first in queue)
48 	bool			writer;
49 };
50 
51 #define MUTEX_FLAG_RELEASED		0x2
52 
53 
54 int32
55 recursive_lock_get_recursion(recursive_lock *lock)
56 {
57 	if (RECURSIVE_LOCK_HOLDER(lock) == thread_get_current_thread_id())
58 		return lock->recursion;
59 
60 	return -1;
61 }
62 
63 
64 void
65 recursive_lock_init(recursive_lock *lock, const char *name)
66 {
67 	recursive_lock_init_etc(lock, name, 0);
68 }
69 
70 
71 void
72 recursive_lock_init_etc(recursive_lock *lock, const char *name, uint32 flags)
73 {
74 	mutex_init_etc(&lock->lock, name != NULL ? name : "recursive lock", flags);
75 #if !KDEBUG
76 	lock->holder = -1;
77 #endif
78 	lock->recursion = 0;
79 }
80 
81 
82 void
83 recursive_lock_destroy(recursive_lock *lock)
84 {
85 	if (lock == NULL)
86 		return;
87 
88 	mutex_destroy(&lock->lock);
89 }
90 
91 
92 status_t
93 recursive_lock_lock(recursive_lock *lock)
94 {
95 #if KDEBUG
96 	if (!gKernelStartup && !are_interrupts_enabled()) {
97 		panic("recursive_lock_lock: called with interrupts disabled for lock "
98 			"%p (\"%s\")\n", lock, lock->lock.name);
99 	}
100 #endif
101 
102 	thread_id thread = thread_get_current_thread_id();
103 
104 	if (thread != RECURSIVE_LOCK_HOLDER(lock)) {
105 		mutex_lock(&lock->lock);
106 #if !KDEBUG
107 		lock->holder = thread;
108 #endif
109 	}
110 
111 	lock->recursion++;
112 	return B_OK;
113 }
114 
115 
116 status_t
117 recursive_lock_trylock(recursive_lock *lock)
118 {
119 	thread_id thread = thread_get_current_thread_id();
120 
121 #if KDEBUG
122 	if (!gKernelStartup && !are_interrupts_enabled()) {
123 		panic("recursive_lock_lock: called with interrupts disabled for lock "
124 			"%p (\"%s\")\n", lock, lock->lock.name);
125 	}
126 #endif
127 
128 	if (thread != RECURSIVE_LOCK_HOLDER(lock)) {
129 		status_t status = mutex_trylock(&lock->lock);
130 		if (status != B_OK)
131 			return status;
132 
133 #if !KDEBUG
134 		lock->holder = thread;
135 #endif
136 	}
137 
138 	lock->recursion++;
139 	return B_OK;
140 }
141 
142 
143 void
144 recursive_lock_unlock(recursive_lock *lock)
145 {
146 	if (thread_get_current_thread_id() != RECURSIVE_LOCK_HOLDER(lock))
147 		panic("recursive_lock %p unlocked by non-holder thread!\n", lock);
148 
149 	if (--lock->recursion == 0) {
150 #if !KDEBUG
151 		lock->holder = -1;
152 #endif
153 		mutex_unlock(&lock->lock);
154 	}
155 }
156 
157 
158 status_t
159 recursive_lock_switch_lock(recursive_lock* from, recursive_lock* to)
160 {
161 #if KDEBUG
162 	if (!gKernelStartup && !are_interrupts_enabled()) {
163 		panic("recursive_lock_switch_lock(): called with interrupts "
164 			"disabled for locks %p, %p", from, to);
165 	}
166 #endif
167 
168 	if (--from->recursion > 0)
169 		return recursive_lock_lock(to);
170 
171 #if !KDEBUG
172 	from->holder = -1;
173 #endif
174 
175 	thread_id thread = thread_get_current_thread_id();
176 
177 	if (thread == RECURSIVE_LOCK_HOLDER(to)) {
178 		to->recursion++;
179 		mutex_unlock(&from->lock);
180 		return B_OK;
181 	}
182 
183 	status_t status = mutex_switch_lock(&from->lock, &to->lock);
184 	if (status != B_OK) {
185 		from->recursion++;
186 #if !KDEBUG
187 		from->holder = thread;
188 #endif
189 		return status;
190 	}
191 
192 #if !KDEBUG
193 	to->holder = thread;
194 #endif
195 	to->recursion++;
196 	return B_OK;
197 }
198 
199 
200 status_t
201 recursive_lock_switch_from_mutex(mutex* from, recursive_lock* to)
202 {
203 #if KDEBUG
204 	if (!gKernelStartup && !are_interrupts_enabled()) {
205 		panic("recursive_lock_switch_from_mutex(): called with interrupts "
206 			"disabled for locks %p, %p", from, to);
207 	}
208 #endif
209 
210 	thread_id thread = thread_get_current_thread_id();
211 
212 	if (thread == RECURSIVE_LOCK_HOLDER(to)) {
213 		to->recursion++;
214 		mutex_unlock(from);
215 		return B_OK;
216 	}
217 
218 	status_t status = mutex_switch_lock(from, &to->lock);
219 	if (status != B_OK)
220 		return status;
221 
222 #if !KDEBUG
223 	to->holder = thread;
224 #endif
225 	to->recursion++;
226 	return B_OK;
227 }
228 
229 
230 status_t
231 recursive_lock_switch_from_read_lock(rw_lock* from, recursive_lock* to)
232 {
233 #if KDEBUG
234 	if (!gKernelStartup && !are_interrupts_enabled()) {
235 		panic("recursive_lock_switch_from_read_lock(): called with interrupts "
236 			"disabled for locks %p, %p", from, to);
237 	}
238 #endif
239 
240 	thread_id thread = thread_get_current_thread_id();
241 
242 	if (thread != RECURSIVE_LOCK_HOLDER(to)) {
243 		status_t status = mutex_switch_from_read_lock(from, &to->lock);
244 		if (status != B_OK)
245 			return status;
246 
247 #if !KDEBUG
248 		to->holder = thread;
249 #endif
250 	} else {
251 		rw_lock_read_unlock(from);
252 	}
253 
254 	to->recursion++;
255 	return B_OK;
256 }
257 
258 
259 static int
260 dump_recursive_lock_info(int argc, char** argv)
261 {
262 	if (argc < 2) {
263 		print_debugger_command_usage(argv[0]);
264 		return 0;
265 	}
266 
267 	recursive_lock* lock = (recursive_lock*)parse_expression(argv[1]);
268 
269 	if (!IS_KERNEL_ADDRESS(lock)) {
270 		kprintf("invalid address: %p\n", lock);
271 		return 0;
272 	}
273 
274 	kprintf("recrusive_lock %p:\n", lock);
275 	kprintf("  mutex:           %p\n", &lock->lock);
276 	kprintf("  name:            %s\n", lock->lock.name);
277 	kprintf("  flags:           0x%x\n", lock->lock.flags);
278 #if KDEBUG
279 	kprintf("  holder:          %" B_PRId32 "\n", lock->lock.holder);
280 #else
281 	kprintf("  holder:          %" B_PRId32 "\n", lock->holder);
282 #endif
283 	kprintf("  recursion:       %d\n", lock->recursion);
284 
285 	kprintf("  waiting threads:");
286 	mutex_waiter* waiter = lock->lock.waiters;
287 	while (waiter != NULL) {
288 		kprintf(" %" B_PRId32, waiter->thread->id);
289 		waiter = waiter->next;
290 	}
291 	kputs("\n");
292 
293 	return 0;
294 }
295 
296 
297 //	#pragma mark -
298 
299 
300 static status_t
301 rw_lock_wait(rw_lock* lock, bool writer, InterruptsSpinLocker& locker)
302 {
303 	// enqueue in waiter list
304 	rw_lock_waiter waiter;
305 	waiter.thread = thread_get_current_thread();
306 	waiter.next = NULL;
307 	waiter.writer = writer;
308 
309 	if (lock->waiters != NULL)
310 		lock->waiters->last->next = &waiter;
311 	else
312 		lock->waiters = &waiter;
313 
314 	lock->waiters->last = &waiter;
315 
316 	// block
317 	thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_RW_LOCK, lock);
318 	locker.Unlock();
319 
320 	status_t result = thread_block();
321 
322 	locker.Lock();
323 	return result;
324 }
325 
326 
327 static int32
328 rw_lock_unblock(rw_lock* lock)
329 {
330 	// Check whether there are any waiting threads at all and whether anyone
331 	// has the write lock.
332 	rw_lock_waiter* waiter = lock->waiters;
333 	if (waiter == NULL || lock->holder >= 0)
334 		return 0;
335 
336 	// writer at head of queue?
337 	if (waiter->writer) {
338 		if (lock->active_readers > 0 || lock->pending_readers > 0)
339 			return 0;
340 
341 		// dequeue writer
342 		lock->waiters = waiter->next;
343 		if (lock->waiters != NULL)
344 			lock->waiters->last = waiter->last;
345 
346 		lock->holder = waiter->thread->id;
347 
348 		// unblock thread
349 		thread_unblock(waiter->thread, B_OK);
350 
351 		waiter->thread = NULL;
352 		return RW_LOCK_WRITER_COUNT_BASE;
353 	}
354 
355 	// wake up one or more readers
356 	uint32 readerCount = 0;
357 	do {
358 		// dequeue reader
359 		lock->waiters = waiter->next;
360 		if (lock->waiters != NULL)
361 			lock->waiters->last = waiter->last;
362 
363 		readerCount++;
364 
365 		// unblock thread
366 		thread_unblock(waiter->thread, B_OK);
367 
368 		waiter->thread = NULL;
369 	} while ((waiter = lock->waiters) != NULL && !waiter->writer);
370 
371 	if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
372 		lock->active_readers += readerCount;
373 
374 	return readerCount;
375 }
376 
377 
378 void
379 rw_lock_init(rw_lock* lock, const char* name)
380 {
381 	lock->name = name;
382 	lock->waiters = NULL;
383 	B_INITIALIZE_SPINLOCK(&lock->lock);
384 	lock->holder = -1;
385 	lock->count = 0;
386 	lock->owner_count = 0;
387 	lock->active_readers = 0;
388 	lock->pending_readers = 0;
389 	lock->flags = 0;
390 
391 	T_SCHEDULING_ANALYSIS(InitRWLock(lock, name));
392 	NotifyWaitObjectListeners(&WaitObjectListener::RWLockInitialized, lock);
393 }
394 
395 
396 void
397 rw_lock_init_etc(rw_lock* lock, const char* name, uint32 flags)
398 {
399 	lock->name = (flags & RW_LOCK_FLAG_CLONE_NAME) != 0 ? strdup(name) : name;
400 	lock->waiters = NULL;
401 	B_INITIALIZE_SPINLOCK(&lock->lock);
402 	lock->holder = -1;
403 	lock->count = 0;
404 	lock->owner_count = 0;
405 	lock->active_readers = 0;
406 	lock->pending_readers = 0;
407 	lock->flags = flags & RW_LOCK_FLAG_CLONE_NAME;
408 
409 	T_SCHEDULING_ANALYSIS(InitRWLock(lock, name));
410 	NotifyWaitObjectListeners(&WaitObjectListener::RWLockInitialized, lock);
411 }
412 
413 
414 void
415 rw_lock_destroy(rw_lock* lock)
416 {
417 	char* name = (lock->flags & RW_LOCK_FLAG_CLONE_NAME) != 0
418 		? (char*)lock->name : NULL;
419 
420 	// unblock all waiters
421 	InterruptsSpinLocker locker(lock->lock);
422 
423 #if KDEBUG
424 	if ((atomic_get(&lock->count) != 0 || lock->waiters != NULL)
425 			&& thread_get_current_thread_id() != lock->holder) {
426 		panic("rw_lock_destroy(): lock is in use and the caller "
427 			"doesn't hold the write lock (%p)", lock);
428 
429 		locker.Unlock();
430 		if (rw_lock_write_lock(lock) != B_OK)
431 			return;
432 		locker.Lock();
433 	}
434 #endif
435 
436 	while (rw_lock_waiter* waiter = lock->waiters) {
437 		// dequeue
438 		lock->waiters = waiter->next;
439 
440 		// unblock thread
441 		thread_unblock(waiter->thread, B_ERROR);
442 	}
443 
444 	lock->name = NULL;
445 
446 	locker.Unlock();
447 
448 	free(name);
449 }
450 
451 
452 #if KDEBUG_RW_LOCK_DEBUG
453 
454 bool
455 _rw_lock_is_read_locked(rw_lock* lock)
456 {
457 	if (lock->holder == thread_get_current_thread_id())
458 		return true;
459 
460 	Thread* thread = thread_get_current_thread();
461 	for (size_t i = 0; i < B_COUNT_OF(Thread::held_read_locks); i++) {
462 		if (thread->held_read_locks[i] == lock)
463 			return true;
464 	}
465 	return false;
466 }
467 
468 
469 static void
470 _rw_lock_set_read_locked(rw_lock* lock)
471 {
472 	Thread* thread = thread_get_current_thread();
473 	for (size_t i = 0; i < B_COUNT_OF(Thread::held_read_locks); i++) {
474 		if (thread->held_read_locks[i] != NULL)
475 			continue;
476 
477 		thread->held_read_locks[i] = lock;
478 		return;
479 	}
480 
481 	panic("too many read locks!");
482 }
483 
484 
485 static void
486 _rw_lock_unset_read_locked(rw_lock* lock)
487 {
488 	Thread* thread = thread_get_current_thread();
489 	for (size_t i = 0; i < B_COUNT_OF(Thread::held_read_locks); i++) {
490 		if (thread->held_read_locks[i] != lock)
491 			continue;
492 
493 		thread->held_read_locks[i] = NULL;
494 		return;
495 	}
496 
497 	panic("_rw_lock_unset_read_locked(): lock %p not read-locked by current thread", lock);
498 }
499 
500 #endif
501 
502 
503 status_t
504 _rw_lock_read_lock(rw_lock* lock)
505 {
506 #if KDEBUG
507 	if (!gKernelStartup && !are_interrupts_enabled()) {
508 		panic("_rw_lock_read_lock(): called with interrupts disabled for lock %p",
509 			lock);
510 	}
511 #endif
512 #if KDEBUG_RW_LOCK_DEBUG
513 	int32 oldCount = atomic_add(&lock->count, 1);
514 	if (oldCount < RW_LOCK_WRITER_COUNT_BASE) {
515 		ASSERT_UNLOCKED_RW_LOCK(lock);
516 		_rw_lock_set_read_locked(lock);
517 		return B_OK;
518 	}
519 #endif
520 
521 	InterruptsSpinLocker locker(lock->lock);
522 
523 	// We might be the writer ourselves.
524 	if (lock->holder == thread_get_current_thread_id()) {
525 		lock->owner_count++;
526 		return B_OK;
527 	}
528 
529 	// If we hold a read lock already, but some other thread is waiting
530 	// for a write lock, then trying to read-lock again will deadlock.
531 	ASSERT_UNLOCKED_RW_LOCK(lock);
532 
533 	// The writer that originally had the lock when we called atomic_add() might
534 	// already have gone and another writer could have overtaken us. In this
535 	// case the original writer set pending_readers, so we know that we don't
536 	// have to wait.
537 	if (lock->pending_readers > 0) {
538 		lock->pending_readers--;
539 
540 		if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
541 			lock->active_readers++;
542 
543 #if KDEBUG_RW_LOCK_DEBUG
544 		_rw_lock_set_read_locked(lock);
545 #endif
546 		return B_OK;
547 	}
548 
549 	ASSERT(lock->count >= RW_LOCK_WRITER_COUNT_BASE);
550 
551 	// we need to wait
552 	status_t status = rw_lock_wait(lock, false, locker);
553 
554 #if KDEBUG_RW_LOCK_DEBUG
555 	if (status == B_OK)
556 		_rw_lock_set_read_locked(lock);
557 #endif
558 
559 	return status;
560 }
561 
562 
563 status_t
564 _rw_lock_read_lock_with_timeout(rw_lock* lock, uint32 timeoutFlags,
565 	bigtime_t timeout)
566 {
567 #if KDEBUG
568 	if (!gKernelStartup && !are_interrupts_enabled()) {
569 		panic("_rw_lock_read_lock_with_timeout(): called with interrupts "
570 			"disabled for lock %p", lock);
571 	}
572 #endif
573 #if KDEBUG_RW_LOCK_DEBUG
574 	int32 oldCount = atomic_add(&lock->count, 1);
575 	if (oldCount < RW_LOCK_WRITER_COUNT_BASE) {
576 		ASSERT_UNLOCKED_RW_LOCK(lock);
577 		_rw_lock_set_read_locked(lock);
578 		return B_OK;
579 	}
580 #endif
581 
582 	InterruptsSpinLocker locker(lock->lock);
583 
584 	// We might be the writer ourselves.
585 	if (lock->holder == thread_get_current_thread_id()) {
586 		lock->owner_count++;
587 		return B_OK;
588 	}
589 
590 	ASSERT_UNLOCKED_RW_LOCK(lock);
591 
592 	// The writer that originally had the lock when we called atomic_add() might
593 	// already have gone and another writer could have overtaken us. In this
594 	// case the original writer set pending_readers, so we know that we don't
595 	// have to wait.
596 	if (lock->pending_readers > 0) {
597 		lock->pending_readers--;
598 
599 		if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
600 			lock->active_readers++;
601 
602 #if KDEBUG_RW_LOCK_DEBUG
603 		_rw_lock_set_read_locked(lock);
604 #endif
605 		return B_OK;
606 	}
607 
608 	ASSERT(lock->count >= RW_LOCK_WRITER_COUNT_BASE);
609 
610 	// we need to wait
611 
612 	// enqueue in waiter list
613 	rw_lock_waiter waiter;
614 	waiter.thread = thread_get_current_thread();
615 	waiter.next = NULL;
616 	waiter.writer = false;
617 
618 	if (lock->waiters != NULL)
619 		lock->waiters->last->next = &waiter;
620 	else
621 		lock->waiters = &waiter;
622 
623 	lock->waiters->last = &waiter;
624 
625 	// block
626 	thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_RW_LOCK, lock);
627 	locker.Unlock();
628 
629 	status_t error = thread_block_with_timeout(timeoutFlags, timeout);
630 	if (error == B_OK || waiter.thread == NULL) {
631 		// We were unblocked successfully -- potentially our unblocker overtook
632 		// us after we already failed. In either case, we've got the lock, now.
633 #if KDEBUG_RW_LOCK_DEBUG
634 		_rw_lock_set_read_locked(lock);
635 #endif
636 		return B_OK;
637 	}
638 
639 	locker.Lock();
640 	// We failed to get the lock -- dequeue from waiter list.
641 	rw_lock_waiter* previous = NULL;
642 	rw_lock_waiter* other = lock->waiters;
643 	while (other != &waiter) {
644 		previous = other;
645 		other = other->next;
646 	}
647 
648 	if (previous == NULL) {
649 		// we are the first in line
650 		lock->waiters = waiter.next;
651 		if (lock->waiters != NULL)
652 			lock->waiters->last = waiter.last;
653 	} else {
654 		// one or more other waiters are before us in the queue
655 		previous->next = waiter.next;
656 		if (lock->waiters->last == &waiter)
657 			lock->waiters->last = previous;
658 	}
659 
660 	// Decrement the count. ATM this is all we have to do. There's at least
661 	// one writer ahead of us -- otherwise the last writer would have unblocked
662 	// us (writers only manipulate the lock data with thread spinlock being
663 	// held) -- so our leaving doesn't make a difference to the ones behind us
664 	// in the queue.
665 	atomic_add(&lock->count, -1);
666 
667 	return error;
668 }
669 
670 
671 void
672 _rw_lock_read_unlock(rw_lock* lock)
673 {
674 #if KDEBUG_RW_LOCK_DEBUG
675 	int32 oldCount = atomic_add(&lock->count, -1);
676 	if (oldCount < RW_LOCK_WRITER_COUNT_BASE) {
677 		_rw_lock_unset_read_locked(lock);
678 		return;
679 	}
680 #endif
681 
682 	InterruptsSpinLocker locker(lock->lock);
683 
684 	// If we're still holding the write lock or if there are other readers,
685 	// no-one can be woken up.
686 	if (lock->holder == thread_get_current_thread_id()) {
687 		ASSERT((lock->owner_count % RW_LOCK_WRITER_COUNT_BASE) > 0);
688 		lock->owner_count--;
689 		return;
690 	}
691 
692 #if KDEBUG_RW_LOCK_DEBUG
693 	_rw_lock_unset_read_locked(lock);
694 #endif
695 
696 	if (--lock->active_readers > 0)
697 		return;
698 
699 	if (lock->active_readers < 0) {
700 		panic("rw_lock_read_unlock(): lock %p not read-locked", lock);
701 		lock->active_readers = 0;
702 		return;
703 	}
704 
705 	rw_lock_unblock(lock);
706 }
707 
708 
709 status_t
710 rw_lock_write_lock(rw_lock* lock)
711 {
712 #if KDEBUG
713 	if (!gKernelStartup && !are_interrupts_enabled()) {
714 		panic("_rw_lock_write_lock(): called with interrupts disabled for lock %p",
715 			lock);
716 	}
717 #endif
718 
719 	InterruptsSpinLocker locker(lock->lock);
720 
721 	// If we're already the lock holder, we just need to increment the owner
722 	// count.
723 	thread_id thread = thread_get_current_thread_id();
724 	if (lock->holder == thread) {
725 		lock->owner_count += RW_LOCK_WRITER_COUNT_BASE;
726 		return B_OK;
727 	}
728 
729 	ASSERT_UNLOCKED_RW_LOCK(lock);
730 
731 	// announce our claim
732 	int32 oldCount = atomic_add(&lock->count, RW_LOCK_WRITER_COUNT_BASE);
733 
734 	if (oldCount == 0) {
735 		// No-one else held a read or write lock, so it's ours now.
736 		lock->holder = thread;
737 		lock->owner_count = RW_LOCK_WRITER_COUNT_BASE;
738 		return B_OK;
739 	}
740 
741 	// We have to wait. If we're the first writer, note the current reader
742 	// count.
743 	if (oldCount < RW_LOCK_WRITER_COUNT_BASE)
744 		lock->active_readers = oldCount - lock->pending_readers;
745 
746 	status_t status = rw_lock_wait(lock, true, locker);
747 	if (status == B_OK) {
748 		lock->holder = thread;
749 		lock->owner_count = RW_LOCK_WRITER_COUNT_BASE;
750 	}
751 
752 	return status;
753 }
754 
755 
756 void
757 _rw_lock_write_unlock(rw_lock* lock)
758 {
759 	InterruptsSpinLocker locker(lock->lock);
760 
761 	if (thread_get_current_thread_id() != lock->holder) {
762 		panic("rw_lock_write_unlock(): lock %p not write-locked by this thread",
763 			lock);
764 		return;
765 	}
766 
767 	ASSERT(lock->owner_count >= RW_LOCK_WRITER_COUNT_BASE);
768 
769 	lock->owner_count -= RW_LOCK_WRITER_COUNT_BASE;
770 	if (lock->owner_count >= RW_LOCK_WRITER_COUNT_BASE)
771 		return;
772 
773 	// We gave up our last write lock -- clean up and unblock waiters.
774 	int32 readerCount = lock->owner_count;
775 	lock->holder = -1;
776 	lock->owner_count = 0;
777 
778 #if KDEBUG_RW_LOCK_DEBUG
779 	if (readerCount != 0)
780 		_rw_lock_set_read_locked(lock);
781 #endif
782 
783 	int32 oldCount = atomic_add(&lock->count, -RW_LOCK_WRITER_COUNT_BASE);
784 	oldCount -= RW_LOCK_WRITER_COUNT_BASE;
785 
786 	if (oldCount != 0) {
787 		// If writers are waiting, take over our reader count.
788 		if (oldCount >= RW_LOCK_WRITER_COUNT_BASE) {
789 			lock->active_readers = readerCount;
790 			rw_lock_unblock(lock);
791 		} else {
792 			// No waiting writer, but there are one or more readers. We will
793 			// unblock all waiting readers -- that's the easy part -- and must
794 			// also make sure that all readers that haven't entered the critical
795 			// section yet, won't start to wait. Otherwise a writer overtaking
796 			// such a reader will correctly start to wait, but the reader,
797 			// seeing the writer count > 0, would also start to wait. We set
798 			// pending_readers to the number of readers that are still expected
799 			// to enter the critical section.
800 			lock->pending_readers = oldCount - readerCount
801 				- rw_lock_unblock(lock);
802 		}
803 	}
804 }
805 
806 
807 static int
808 dump_rw_lock_info(int argc, char** argv)
809 {
810 	if (argc < 2) {
811 		print_debugger_command_usage(argv[0]);
812 		return 0;
813 	}
814 
815 	rw_lock* lock = (rw_lock*)parse_expression(argv[1]);
816 
817 	if (!IS_KERNEL_ADDRESS(lock)) {
818 		kprintf("invalid address: %p\n", lock);
819 		return 0;
820 	}
821 
822 	kprintf("rw lock %p:\n", lock);
823 	kprintf("  name:            %s\n", lock->name);
824 	kprintf("  holder:          %" B_PRId32 "\n", lock->holder);
825 	kprintf("  count:           %#" B_PRIx32 "\n", lock->count);
826 	kprintf("  active readers   %d\n", lock->active_readers);
827 	kprintf("  pending readers  %d\n", lock->pending_readers);
828 	kprintf("  owner count:     %#" B_PRIx32 "\n", lock->owner_count);
829 	kprintf("  flags:           %#" B_PRIx32 "\n", lock->flags);
830 
831 	kprintf("  waiting threads:");
832 	rw_lock_waiter* waiter = lock->waiters;
833 	while (waiter != NULL) {
834 		kprintf(" %" B_PRId32 "/%c", waiter->thread->id, waiter->writer ? 'w' : 'r');
835 		waiter = waiter->next;
836 	}
837 	kputs("\n");
838 
839 	return 0;
840 }
841 
842 
843 // #pragma mark -
844 
845 
846 void
847 mutex_init(mutex* lock, const char *name)
848 {
849 	mutex_init_etc(lock, name, 0);
850 }
851 
852 
853 void
854 mutex_init_etc(mutex* lock, const char *name, uint32 flags)
855 {
856 	lock->name = (flags & MUTEX_FLAG_CLONE_NAME) != 0 ? strdup(name) : name;
857 	lock->waiters = NULL;
858 	B_INITIALIZE_SPINLOCK(&lock->lock);
859 #if KDEBUG
860 	lock->holder = -1;
861 #else
862 	lock->count = 0;
863 #endif
864 	lock->flags = flags & MUTEX_FLAG_CLONE_NAME;
865 
866 	T_SCHEDULING_ANALYSIS(InitMutex(lock, name));
867 	NotifyWaitObjectListeners(&WaitObjectListener::MutexInitialized, lock);
868 }
869 
870 
871 void
872 mutex_destroy(mutex* lock)
873 {
874 	char* name = (lock->flags & MUTEX_FLAG_CLONE_NAME) != 0
875 		? (char*)lock->name : NULL;
876 
877 	// unblock all waiters
878 	InterruptsSpinLocker locker(lock->lock);
879 
880 #if KDEBUG
881 	if (lock->holder != -1 && thread_get_current_thread_id() != lock->holder) {
882 		panic("mutex_destroy(): the lock (%p) is held by %" B_PRId32 ", not "
883 			"by the caller", lock, lock->holder);
884 		if (_mutex_lock(lock, &locker) != B_OK)
885 			return;
886 		locker.Lock();
887 	}
888 #endif
889 
890 	while (mutex_waiter* waiter = lock->waiters) {
891 		// dequeue
892 		lock->waiters = waiter->next;
893 
894 		// unblock thread
895 		Thread* thread = waiter->thread;
896 		waiter->thread = NULL;
897 		thread_unblock(thread, B_ERROR);
898 	}
899 
900 	lock->name = NULL;
901 	lock->flags = 0;
902 #if KDEBUG
903 	lock->holder = 0;
904 #else
905 	lock->count = INT16_MIN;
906 #endif
907 
908 	locker.Unlock();
909 
910 	free(name);
911 }
912 
913 
914 static inline status_t
915 mutex_lock_threads_locked(mutex* lock, InterruptsSpinLocker* locker)
916 {
917 #if KDEBUG
918 	return _mutex_lock(lock, locker);
919 #else
920 	if (atomic_add(&lock->count, -1) < 0)
921 		return _mutex_lock(lock, locker);
922 	return B_OK;
923 #endif
924 }
925 
926 
927 status_t
928 mutex_switch_lock(mutex* from, mutex* to)
929 {
930 #if KDEBUG
931 	if (!gKernelStartup && !are_interrupts_enabled()) {
932 		panic("mutex_switch_lock(): called with interrupts disabled "
933 			"for locks %p, %p", from, to);
934 	}
935 #endif
936 
937 	InterruptsSpinLocker locker(to->lock);
938 
939 	mutex_unlock(from);
940 
941 	return mutex_lock_threads_locked(to, &locker);
942 }
943 
944 
945 void
946 mutex_transfer_lock(mutex* lock, thread_id thread)
947 {
948 #if KDEBUG
949 	if (thread_get_current_thread_id() != lock->holder)
950 		panic("mutex_transfer_lock(): current thread is not the lock holder!");
951 	lock->holder = thread;
952 #endif
953 }
954 
955 
956 status_t
957 mutex_switch_from_read_lock(rw_lock* from, mutex* to)
958 {
959 #if KDEBUG
960 	if (!gKernelStartup && !are_interrupts_enabled()) {
961 		panic("mutex_switch_from_read_lock(): called with interrupts disabled "
962 			"for locks %p, %p", from, to);
963 	}
964 #endif
965 
966 	InterruptsSpinLocker locker(to->lock);
967 
968 	rw_lock_read_unlock(from);
969 
970 	return mutex_lock_threads_locked(to, &locker);
971 }
972 
973 
974 KDEBUG_STATIC status_t
975 _mutex_lock(mutex* lock, void* _locker)
976 {
977 #if KDEBUG
978 	if (!gKernelStartup && _locker == NULL && !are_interrupts_enabled()) {
979 		panic("_mutex_lock(): called with interrupts disabled for lock %p",
980 			lock);
981 	}
982 #endif
983 
984 	// lock only, if !lockLocked
985 	InterruptsSpinLocker* locker
986 		= reinterpret_cast<InterruptsSpinLocker*>(_locker);
987 
988 	InterruptsSpinLocker lockLocker;
989 	if (locker == NULL) {
990 		lockLocker.SetTo(lock->lock, false);
991 		locker = &lockLocker;
992 	}
993 
994 	// Might have been released after we decremented the count, but before
995 	// we acquired the spinlock.
996 #if KDEBUG
997 	if (lock->holder < 0) {
998 		lock->holder = thread_get_current_thread_id();
999 		return B_OK;
1000 	} else if (lock->holder == thread_get_current_thread_id()) {
1001 		panic("_mutex_lock(): double lock of %p by thread %" B_PRId32, lock,
1002 			lock->holder);
1003 	} else if (lock->holder == 0) {
1004 		panic("_mutex_lock(): using uninitialized lock %p", lock);
1005 	}
1006 #else
1007 	if ((lock->flags & MUTEX_FLAG_RELEASED) != 0) {
1008 		lock->flags &= ~MUTEX_FLAG_RELEASED;
1009 		return B_OK;
1010 	}
1011 #endif
1012 
1013 	// enqueue in waiter list
1014 	mutex_waiter waiter;
1015 	waiter.thread = thread_get_current_thread();
1016 	waiter.next = NULL;
1017 
1018 	if (lock->waiters != NULL) {
1019 		lock->waiters->last->next = &waiter;
1020 	} else
1021 		lock->waiters = &waiter;
1022 
1023 	lock->waiters->last = &waiter;
1024 
1025 	// block
1026 	thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_MUTEX, lock);
1027 	locker->Unlock();
1028 
1029 	status_t error = thread_block();
1030 #if KDEBUG
1031 	if (error == B_OK) {
1032 		ASSERT(lock->holder == waiter.thread->id);
1033 	} else {
1034 		// This should only happen when the mutex was destroyed.
1035 		ASSERT(waiter.thread == NULL);
1036 	}
1037 #endif
1038 	return error;
1039 }
1040 
1041 
1042 KDEBUG_STATIC void
1043 _mutex_unlock(mutex* lock)
1044 {
1045 	InterruptsSpinLocker locker(lock->lock);
1046 
1047 #if KDEBUG
1048 	if (thread_get_current_thread_id() != lock->holder) {
1049 		panic("_mutex_unlock() failure: thread %" B_PRId32 " is trying to "
1050 			"release mutex %p (current holder %" B_PRId32 ")\n",
1051 			thread_get_current_thread_id(), lock, lock->holder);
1052 		return;
1053 	}
1054 #endif
1055 
1056 	mutex_waiter* waiter = lock->waiters;
1057 	if (waiter != NULL) {
1058 		// dequeue the first waiter
1059 		lock->waiters = waiter->next;
1060 		if (lock->waiters != NULL)
1061 			lock->waiters->last = waiter->last;
1062 
1063 #if KDEBUG
1064 		// Already set the holder to the unblocked thread. Besides that this
1065 		// actually reflects the current situation, setting it to -1 would
1066 		// cause a race condition, since another locker could think the lock
1067 		// is not held by anyone.
1068 		lock->holder = waiter->thread->id;
1069 #endif
1070 
1071 		// unblock thread
1072 		thread_unblock(waiter->thread, B_OK);
1073 	} else {
1074 		// There are no waiters, so mark the lock as released.
1075 #if KDEBUG
1076 		lock->holder = -1;
1077 #else
1078 		lock->flags |= MUTEX_FLAG_RELEASED;
1079 #endif
1080 	}
1081 }
1082 
1083 
1084 KDEBUG_STATIC status_t
1085 _mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, bigtime_t timeout)
1086 {
1087 #if KDEBUG
1088 	if (!gKernelStartup && !are_interrupts_enabled()) {
1089 		panic("_mutex_lock(): called with interrupts disabled for lock %p",
1090 			lock);
1091 	}
1092 #endif
1093 
1094 	InterruptsSpinLocker locker(lock->lock);
1095 
1096 	// Might have been released after we decremented the count, but before
1097 	// we acquired the spinlock.
1098 #if KDEBUG
1099 	if (lock->holder < 0) {
1100 		lock->holder = thread_get_current_thread_id();
1101 		return B_OK;
1102 	} else if (lock->holder == thread_get_current_thread_id()) {
1103 		panic("_mutex_lock(): double lock of %p by thread %" B_PRId32, lock,
1104 			lock->holder);
1105 	} else if (lock->holder == 0) {
1106 		panic("_mutex_lock(): using uninitialized lock %p", lock);
1107 	}
1108 #else
1109 	if ((lock->flags & MUTEX_FLAG_RELEASED) != 0) {
1110 		lock->flags &= ~MUTEX_FLAG_RELEASED;
1111 		return B_OK;
1112 	}
1113 #endif
1114 
1115 	// enqueue in waiter list
1116 	mutex_waiter waiter;
1117 	waiter.thread = thread_get_current_thread();
1118 	waiter.next = NULL;
1119 
1120 	if (lock->waiters != NULL) {
1121 		lock->waiters->last->next = &waiter;
1122 	} else
1123 		lock->waiters = &waiter;
1124 
1125 	lock->waiters->last = &waiter;
1126 
1127 	// block
1128 	thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_MUTEX, lock);
1129 	locker.Unlock();
1130 
1131 	status_t error = thread_block_with_timeout(timeoutFlags, timeout);
1132 
1133 	if (error == B_OK) {
1134 #if KDEBUG
1135 		ASSERT(lock->holder == waiter.thread->id);
1136 #endif
1137 	} else {
1138 		// If the lock was destroyed, our "thread" entry will be NULL.
1139 		if (waiter.thread == NULL)
1140 			return B_ERROR;
1141 
1142 		// TODO: There is still a race condition during mutex destruction,
1143 		// if we resume due to a timeout before our thread is set to NULL.
1144 
1145 		locker.Lock();
1146 
1147 		// If the timeout occurred, we must remove our waiter structure from
1148 		// the queue.
1149 		mutex_waiter* previousWaiter = NULL;
1150 		mutex_waiter* otherWaiter = lock->waiters;
1151 		while (otherWaiter != NULL && otherWaiter != &waiter) {
1152 			previousWaiter = otherWaiter;
1153 			otherWaiter = otherWaiter->next;
1154 		}
1155 		if (otherWaiter == &waiter) {
1156 			// the structure is still in the list -- dequeue
1157 			if (&waiter == lock->waiters) {
1158 				if (waiter.next != NULL)
1159 					waiter.next->last = waiter.last;
1160 				lock->waiters = waiter.next;
1161 			} else {
1162 				if (waiter.next == NULL)
1163 					lock->waiters->last = previousWaiter;
1164 				previousWaiter->next = waiter.next;
1165 			}
1166 
1167 #if !KDEBUG
1168 			// we need to fix the lock count
1169 			atomic_add(&lock->count, 1);
1170 #endif
1171 		} else {
1172 			// the structure is not in the list -- even though the timeout
1173 			// occurred, this means we own the lock now
1174 #if KDEBUG
1175 			ASSERT(lock->holder == waiter.thread->id);
1176 #endif
1177 			return B_OK;
1178 		}
1179 	}
1180 
1181 	return error;
1182 }
1183 
1184 
1185 #undef mutex_trylock
1186 status_t
1187 mutex_trylock(mutex* lock)
1188 {
1189 #if KDEBUG
1190 	InterruptsSpinLocker _(lock->lock);
1191 
1192 	if (lock->holder < 0) {
1193 		lock->holder = thread_get_current_thread_id();
1194 		return B_OK;
1195 	} else if (lock->holder == 0) {
1196 		panic("_mutex_trylock(): using uninitialized lock %p", lock);
1197 	}
1198 	return B_WOULD_BLOCK;
1199 #else
1200 	return mutex_trylock_inline(lock);
1201 #endif
1202 }
1203 
1204 
1205 #undef mutex_lock
1206 status_t
1207 mutex_lock(mutex* lock)
1208 {
1209 #if KDEBUG
1210 	return _mutex_lock(lock, NULL);
1211 #else
1212 	return mutex_lock_inline(lock);
1213 #endif
1214 }
1215 
1216 
1217 #undef mutex_unlock
1218 void
1219 mutex_unlock(mutex* lock)
1220 {
1221 #if KDEBUG
1222 	_mutex_unlock(lock);
1223 #else
1224 	mutex_unlock_inline(lock);
1225 #endif
1226 }
1227 
1228 
1229 #undef mutex_lock_with_timeout
1230 status_t
1231 mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, bigtime_t timeout)
1232 {
1233 #if KDEBUG
1234 	return _mutex_lock_with_timeout(lock, timeoutFlags, timeout);
1235 #else
1236 	return mutex_lock_with_timeout_inline(lock, timeoutFlags, timeout);
1237 #endif
1238 }
1239 
1240 
1241 static int
1242 dump_mutex_info(int argc, char** argv)
1243 {
1244 	if (argc < 2) {
1245 		print_debugger_command_usage(argv[0]);
1246 		return 0;
1247 	}
1248 
1249 	mutex* lock = (mutex*)parse_expression(argv[1]);
1250 
1251 	if (!IS_KERNEL_ADDRESS(lock)) {
1252 		kprintf("invalid address: %p\n", lock);
1253 		return 0;
1254 	}
1255 
1256 	kprintf("mutex %p:\n", lock);
1257 	kprintf("  name:            %s\n", lock->name);
1258 	kprintf("  flags:           0x%x\n", lock->flags);
1259 #if KDEBUG
1260 	kprintf("  holder:          %" B_PRId32 "\n", lock->holder);
1261 #else
1262 	kprintf("  count:           %" B_PRId32 "\n", lock->count);
1263 #endif
1264 
1265 	kprintf("  waiting threads:");
1266 	mutex_waiter* waiter = lock->waiters;
1267 	while (waiter != NULL) {
1268 		kprintf(" %" B_PRId32, waiter->thread->id);
1269 		waiter = waiter->next;
1270 	}
1271 	kputs("\n");
1272 
1273 	return 0;
1274 }
1275 
1276 
1277 // #pragma mark -
1278 
1279 
1280 void
1281 lock_debug_init()
1282 {
1283 	add_debugger_command_etc("mutex", &dump_mutex_info,
1284 		"Dump info about a mutex",
1285 		"<mutex>\n"
1286 		"Prints info about the specified mutex.\n"
1287 		"  <mutex>  - pointer to the mutex to print the info for.\n", 0);
1288 	add_debugger_command_etc("rwlock", &dump_rw_lock_info,
1289 		"Dump info about an rw lock",
1290 		"<lock>\n"
1291 		"Prints info about the specified rw lock.\n"
1292 		"  <lock>  - pointer to the rw lock to print the info for.\n", 0);
1293 	add_debugger_command_etc("recursivelock", &dump_recursive_lock_info,
1294 		"Dump info about a recursive lock",
1295 		"<lock>\n"
1296 		"Prints info about the specified recursive lock.\n"
1297 		"  <lock>  - pointer to the recursive lock to print the info for.\n",
1298 		0);
1299 }
1300