xref: /haiku/src/system/kernel/locks/lock.cpp (revision 088cebb96f8acf912cb13f1d92ce45a1729c25d6)
1 /*
2  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 
11 /*! Mutex and recursive_lock code */
12 
13 
14 #include <lock.h>
15 
16 #include <stdlib.h>
17 #include <string.h>
18 
19 #include <OS.h>
20 
21 #include <debug.h>
22 #include <int.h>
23 #include <kernel.h>
24 #include <listeners.h>
25 #include <scheduling_analysis.h>
26 #include <thread.h>
27 #include <util/AutoLock.h>
28 
29 
30 struct mutex_waiter {
31 	Thread*			thread;
32 	mutex_waiter*	next;		// next in queue
33 	mutex_waiter*	last;		// last in queue (valid for the first in queue)
34 };
35 
36 struct rw_lock_waiter {
37 	Thread*			thread;
38 	rw_lock_waiter*	next;		// next in queue
39 	rw_lock_waiter*	last;		// last in queue (valid for the first in queue)
40 	bool			writer;
41 };
42 
43 #define MUTEX_FLAG_RELEASED		0x2
44 
45 
46 int32
47 recursive_lock_get_recursion(recursive_lock *lock)
48 {
49 	if (RECURSIVE_LOCK_HOLDER(lock) == thread_get_current_thread_id())
50 		return lock->recursion;
51 
52 	return -1;
53 }
54 
55 
56 void
57 recursive_lock_init(recursive_lock *lock, const char *name)
58 {
59 	mutex_init(&lock->lock, name != NULL ? name : "recursive lock");
60 	RECURSIVE_LOCK_HOLDER(lock) = -1;
61 	lock->recursion = 0;
62 }
63 
64 
65 void
66 recursive_lock_init_etc(recursive_lock *lock, const char *name, uint32 flags)
67 {
68 	mutex_init_etc(&lock->lock, name != NULL ? name : "recursive lock", flags);
69 	RECURSIVE_LOCK_HOLDER(lock) = -1;
70 	lock->recursion = 0;
71 }
72 
73 
74 void
75 recursive_lock_destroy(recursive_lock *lock)
76 {
77 	if (lock == NULL)
78 		return;
79 
80 	mutex_destroy(&lock->lock);
81 }
82 
83 
84 status_t
85 recursive_lock_lock(recursive_lock *lock)
86 {
87 #if KDEBUG
88 	if (!gKernelStartup && !are_interrupts_enabled()) {
89 		panic("recursive_lock_lock: called with interrupts disabled for lock "
90 			"%p (\"%s\")\n", lock, lock->lock.name);
91 	}
92 #endif
93 
94 	thread_id thread = thread_get_current_thread_id();
95 
96 	if (thread != RECURSIVE_LOCK_HOLDER(lock)) {
97 		mutex_lock(&lock->lock);
98 #if !KDEBUG
99 		lock->holder = thread;
100 #endif
101 	}
102 
103 	lock->recursion++;
104 	return B_OK;
105 }
106 
107 
108 status_t
109 recursive_lock_trylock(recursive_lock *lock)
110 {
111 	thread_id thread = thread_get_current_thread_id();
112 
113 #if KDEBUG
114 	if (!gKernelStartup && !are_interrupts_enabled()) {
115 		panic("recursive_lock_lock: called with interrupts disabled for lock "
116 			"%p (\"%s\")\n", lock, lock->lock.name);
117 	}
118 #endif
119 
120 	if (thread != RECURSIVE_LOCK_HOLDER(lock)) {
121 		status_t status = mutex_trylock(&lock->lock);
122 		if (status != B_OK)
123 			return status;
124 
125 #if !KDEBUG
126 		lock->holder = thread;
127 #endif
128 	}
129 
130 	lock->recursion++;
131 	return B_OK;
132 }
133 
134 
135 void
136 recursive_lock_unlock(recursive_lock *lock)
137 {
138 	if (thread_get_current_thread_id() != RECURSIVE_LOCK_HOLDER(lock))
139 		panic("recursive_lock %p unlocked by non-holder thread!\n", lock);
140 
141 	if (--lock->recursion == 0) {
142 #if !KDEBUG
143 		lock->holder = -1;
144 #endif
145 		mutex_unlock(&lock->lock);
146 	}
147 }
148 
149 
150 //	#pragma mark -
151 
152 
153 static status_t
154 rw_lock_wait(rw_lock* lock, bool writer, InterruptsSpinLocker& locker)
155 {
156 	// enqueue in waiter list
157 	rw_lock_waiter waiter;
158 	waiter.thread = thread_get_current_thread();
159 	waiter.next = NULL;
160 	waiter.writer = writer;
161 
162 	if (lock->waiters != NULL)
163 		lock->waiters->last->next = &waiter;
164 	else
165 		lock->waiters = &waiter;
166 
167 	lock->waiters->last = &waiter;
168 
169 	// block
170 	thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_RW_LOCK, lock);
171 	locker.Unlock();
172 
173 	status_t result = thread_block();
174 
175 	locker.Lock();
176 	return result;
177 }
178 
179 
180 static int32
181 rw_lock_unblock(rw_lock* lock)
182 {
183 	// Check whether there are any waiting threads at all and whether anyone
184 	// has the write lock.
185 	rw_lock_waiter* waiter = lock->waiters;
186 	if (waiter == NULL || lock->holder >= 0)
187 		return 0;
188 
189 	// writer at head of queue?
190 	if (waiter->writer) {
191 		if (lock->active_readers > 0 || lock->pending_readers > 0)
192 			return 0;
193 
194 		// dequeue writer
195 		lock->waiters = waiter->next;
196 		if (lock->waiters != NULL)
197 			lock->waiters->last = waiter->last;
198 
199 		lock->holder = waiter->thread->id;
200 
201 		// unblock thread
202 		thread_unblock(waiter->thread, B_OK);
203 
204 		waiter->thread = NULL;
205 		return RW_LOCK_WRITER_COUNT_BASE;
206 	}
207 
208 	// wake up one or more readers
209 	uint32 readerCount = 0;
210 	do {
211 		// dequeue reader
212 		lock->waiters = waiter->next;
213 		if (lock->waiters != NULL)
214 			lock->waiters->last = waiter->last;
215 
216 		readerCount++;
217 
218 		// unblock thread
219 		thread_unblock(waiter->thread, B_OK);
220 
221 		waiter->thread = NULL;
222 	} while ((waiter = lock->waiters) != NULL && !waiter->writer);
223 
224 	if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
225 		lock->active_readers += readerCount;
226 
227 	return readerCount;
228 }
229 
230 
231 void
232 rw_lock_init(rw_lock* lock, const char* name)
233 {
234 	lock->name = name;
235 	lock->waiters = NULL;
236 	B_INITIALIZE_SPINLOCK(&lock->lock);
237 	lock->holder = -1;
238 	lock->count = 0;
239 	lock->owner_count = 0;
240 	lock->active_readers = 0;
241 	lock->pending_readers = 0;
242 	lock->flags = 0;
243 
244 	T_SCHEDULING_ANALYSIS(InitRWLock(lock, name));
245 	NotifyWaitObjectListeners(&WaitObjectListener::RWLockInitialized, lock);
246 }
247 
248 
249 void
250 rw_lock_init_etc(rw_lock* lock, const char* name, uint32 flags)
251 {
252 	lock->name = (flags & RW_LOCK_FLAG_CLONE_NAME) != 0 ? strdup(name) : name;
253 	lock->waiters = NULL;
254 	B_INITIALIZE_SPINLOCK(&lock->lock);
255 	lock->holder = -1;
256 	lock->count = 0;
257 	lock->owner_count = 0;
258 	lock->active_readers = 0;
259 	lock->pending_readers = 0;
260 	lock->flags = flags & RW_LOCK_FLAG_CLONE_NAME;
261 
262 	T_SCHEDULING_ANALYSIS(InitRWLock(lock, name));
263 	NotifyWaitObjectListeners(&WaitObjectListener::RWLockInitialized, lock);
264 }
265 
266 
267 void
268 rw_lock_destroy(rw_lock* lock)
269 {
270 	char* name = (lock->flags & RW_LOCK_FLAG_CLONE_NAME) != 0
271 		? (char*)lock->name : NULL;
272 
273 	// unblock all waiters
274 	InterruptsSpinLocker locker(lock->lock);
275 
276 #if KDEBUG
277 	if (lock->waiters != NULL && thread_get_current_thread_id()
278 			!= lock->holder) {
279 		panic("rw_lock_destroy(): there are blocking threads, but the caller "
280 			"doesn't hold the write lock (%p)", lock);
281 
282 		locker.Unlock();
283 		if (rw_lock_write_lock(lock) != B_OK)
284 			return;
285 		locker.Lock();
286 	}
287 #endif
288 
289 	while (rw_lock_waiter* waiter = lock->waiters) {
290 		// dequeue
291 		lock->waiters = waiter->next;
292 
293 		// unblock thread
294 		thread_unblock(waiter->thread, B_ERROR);
295 	}
296 
297 	lock->name = NULL;
298 
299 	locker.Unlock();
300 
301 	free(name);
302 }
303 
304 
305 #if !KDEBUG_RW_LOCK_DEBUG
306 
307 status_t
308 _rw_lock_read_lock(rw_lock* lock)
309 {
310 #if KDEBUG
311 	if (!gKernelStartup && !are_interrupts_enabled()) {
312 		panic("_rw_lock_read_lock(): called with interrupts disabled for lock %p",
313 			lock);
314 	}
315 #endif
316 
317 	InterruptsSpinLocker locker(lock->lock);
318 
319 	// We might be the writer ourselves.
320 	if (lock->holder == thread_get_current_thread_id()) {
321 		lock->owner_count++;
322 		return B_OK;
323 	}
324 
325 	// The writer that originally had the lock when we called atomic_add() might
326 	// already have gone and another writer could have overtaken us. In this
327 	// case the original writer set pending_readers, so we know that we don't
328 	// have to wait.
329 	if (lock->pending_readers > 0) {
330 		lock->pending_readers--;
331 
332 		if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
333 			lock->active_readers++;
334 
335 		return B_OK;
336 	}
337 
338 	ASSERT(lock->count >= RW_LOCK_WRITER_COUNT_BASE);
339 
340 	// we need to wait
341 	return rw_lock_wait(lock, false, locker);
342 }
343 
344 
345 status_t
346 _rw_lock_read_lock_with_timeout(rw_lock* lock, uint32 timeoutFlags,
347 	bigtime_t timeout)
348 {
349 #if KDEBUG
350 	if (!gKernelStartup && !are_interrupts_enabled()) {
351 		panic("_rw_lock_read_lock_with_timeout(): called with interrupts "
352 			"disabled for lock %p", lock);
353 	}
354 #endif
355 
356 	InterruptsSpinLocker locker(lock->lock);
357 
358 	// We might be the writer ourselves.
359 	if (lock->holder == thread_get_current_thread_id()) {
360 		lock->owner_count++;
361 		return B_OK;
362 	}
363 
364 	// The writer that originally had the lock when we called atomic_add() might
365 	// already have gone and another writer could have overtaken us. In this
366 	// case the original writer set pending_readers, so we know that we don't
367 	// have to wait.
368 	if (lock->pending_readers > 0) {
369 		lock->pending_readers--;
370 
371 		if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
372 			lock->active_readers++;
373 
374 		return B_OK;
375 	}
376 
377 	ASSERT(lock->count >= RW_LOCK_WRITER_COUNT_BASE);
378 
379 	// we need to wait
380 
381 	// enqueue in waiter list
382 	rw_lock_waiter waiter;
383 	waiter.thread = thread_get_current_thread();
384 	waiter.next = NULL;
385 	waiter.writer = false;
386 
387 	if (lock->waiters != NULL)
388 		lock->waiters->last->next = &waiter;
389 	else
390 		lock->waiters = &waiter;
391 
392 	lock->waiters->last = &waiter;
393 
394 	// block
395 	thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_RW_LOCK, lock);
396 	locker.Unlock();
397 
398 	status_t error = thread_block_with_timeout(timeoutFlags, timeout);
399 	if (error == B_OK || waiter.thread == NULL) {
400 		// We were unblocked successfully -- potentially our unblocker overtook
401 		// us after we already failed. In either case, we've got the lock, now.
402 		return B_OK;
403 	}
404 
405 	locker.Lock();
406 	// We failed to get the lock -- dequeue from waiter list.
407 	rw_lock_waiter* previous = NULL;
408 	rw_lock_waiter* other = lock->waiters;
409 	while (other != &waiter) {
410 		previous = other;
411 		other = other->next;
412 	}
413 
414 	if (previous == NULL) {
415 		// we are the first in line
416 		lock->waiters = waiter.next;
417 		if (lock->waiters != NULL)
418 			lock->waiters->last = waiter.last;
419 	} else {
420 		// one or more other waiters are before us in the queue
421 		previous->next = waiter.next;
422 		if (lock->waiters->last == &waiter)
423 			lock->waiters->last = previous;
424 	}
425 
426 	// Decrement the count. ATM this is all we have to do. There's at least
427 	// one writer ahead of us -- otherwise the last writer would have unblocked
428 	// us (writers only manipulate the lock data with thread spinlock being
429 	// held) -- so our leaving doesn't make a difference to the ones behind us
430 	// in the queue.
431 	atomic_add(&lock->count, -1);
432 
433 	return error;
434 }
435 
436 
437 void
438 _rw_lock_read_unlock(rw_lock* lock)
439 {
440 	InterruptsSpinLocker locker(lock->lock);
441 
442 	// If we're still holding the write lock or if there are other readers,
443 	// no-one can be woken up.
444 	if (lock->holder == thread_get_current_thread_id()) {
445 		ASSERT(lock->owner_count % RW_LOCK_WRITER_COUNT_BASE > 0);
446 		lock->owner_count--;
447 		return;
448 	}
449 
450 	if (--lock->active_readers > 0)
451 		return;
452 
453 	if (lock->active_readers < 0) {
454 		panic("rw_lock_read_unlock(): lock %p not read-locked", lock);
455 		lock->active_readers = 0;
456 		return;
457 	}
458 
459 	rw_lock_unblock(lock);
460 }
461 
462 #endif	// !KDEBUG_RW_LOCK_DEBUG
463 
464 
465 status_t
466 rw_lock_write_lock(rw_lock* lock)
467 {
468 #if KDEBUG
469 	if (!gKernelStartup && !are_interrupts_enabled()) {
470 		panic("_rw_lock_write_lock(): called with interrupts disabled for lock %p",
471 			lock);
472 	}
473 #endif
474 
475 	InterruptsSpinLocker locker(lock->lock);
476 
477 	// If we're already the lock holder, we just need to increment the owner
478 	// count.
479 	thread_id thread = thread_get_current_thread_id();
480 	if (lock->holder == thread) {
481 		lock->owner_count += RW_LOCK_WRITER_COUNT_BASE;
482 		return B_OK;
483 	}
484 
485 	// announce our claim
486 	int32 oldCount = atomic_add(&lock->count, RW_LOCK_WRITER_COUNT_BASE);
487 
488 	if (oldCount == 0) {
489 		// No-one else held a read or write lock, so it's ours now.
490 		lock->holder = thread;
491 		lock->owner_count = RW_LOCK_WRITER_COUNT_BASE;
492 		return B_OK;
493 	}
494 
495 	// We have to wait. If we're the first writer, note the current reader
496 	// count.
497 	if (oldCount < RW_LOCK_WRITER_COUNT_BASE)
498 		lock->active_readers = oldCount - lock->pending_readers;
499 
500 	status_t status = rw_lock_wait(lock, true, locker);
501 	if (status == B_OK) {
502 		lock->holder = thread;
503 		lock->owner_count = RW_LOCK_WRITER_COUNT_BASE;
504 	}
505 
506 	return status;
507 }
508 
509 
510 void
511 _rw_lock_write_unlock(rw_lock* lock)
512 {
513 	InterruptsSpinLocker locker(lock->lock);
514 
515 	if (thread_get_current_thread_id() != lock->holder) {
516 		panic("rw_lock_write_unlock(): lock %p not write-locked by this thread",
517 			lock);
518 		return;
519 	}
520 
521 	ASSERT(lock->owner_count >= RW_LOCK_WRITER_COUNT_BASE);
522 
523 	lock->owner_count -= RW_LOCK_WRITER_COUNT_BASE;
524 	if (lock->owner_count >= RW_LOCK_WRITER_COUNT_BASE)
525 		return;
526 
527 	// We gave up our last write lock -- clean up and unblock waiters.
528 	int32 readerCount = lock->owner_count;
529 	lock->holder = -1;
530 	lock->owner_count = 0;
531 
532 	int32 oldCount = atomic_add(&lock->count, -RW_LOCK_WRITER_COUNT_BASE);
533 	oldCount -= RW_LOCK_WRITER_COUNT_BASE;
534 
535 	if (oldCount != 0) {
536 		// If writers are waiting, take over our reader count.
537 		if (oldCount >= RW_LOCK_WRITER_COUNT_BASE) {
538 			lock->active_readers = readerCount;
539 			rw_lock_unblock(lock);
540 		} else {
541 			// No waiting writer, but there are one or more readers. We will
542 			// unblock all waiting readers -- that's the easy part -- and must
543 			// also make sure that all readers that haven't entered the critical
544 			// section yet, won't start to wait. Otherwise a writer overtaking
545 			// such a reader will correctly start to wait, but the reader,
546 			// seeing the writer count > 0, would also start to wait. We set
547 			// pending_readers to the number of readers that are still expected
548 			// to enter the critical section.
549 			lock->pending_readers = oldCount - readerCount
550 				- rw_lock_unblock(lock);
551 		}
552 	}
553 }
554 
555 
556 static int
557 dump_rw_lock_info(int argc, char** argv)
558 {
559 	if (argc < 2) {
560 		print_debugger_command_usage(argv[0]);
561 		return 0;
562 	}
563 
564 	rw_lock* lock = (rw_lock*)parse_expression(argv[1]);
565 
566 	if (!IS_KERNEL_ADDRESS(lock)) {
567 		kprintf("invalid address: %p\n", lock);
568 		return 0;
569 	}
570 
571 	kprintf("rw lock %p:\n", lock);
572 	kprintf("  name:            %s\n", lock->name);
573 	kprintf("  holder:          %" B_PRId32 "\n", lock->holder);
574 	kprintf("  count:           %#" B_PRIx32 "\n", lock->count);
575 	kprintf("  active readers   %d\n", lock->active_readers);
576 	kprintf("  pending readers  %d\n", lock->pending_readers);
577 	kprintf("  owner count:     %#" B_PRIx32 "\n", lock->owner_count);
578 	kprintf("  flags:           %#" B_PRIx32 "\n", lock->flags);
579 
580 	kprintf("  waiting threads:");
581 	rw_lock_waiter* waiter = lock->waiters;
582 	while (waiter != NULL) {
583 		kprintf(" %" B_PRId32 "/%c", waiter->thread->id, waiter->writer ? 'w' : 'r');
584 		waiter = waiter->next;
585 	}
586 	kputs("\n");
587 
588 	return 0;
589 }
590 
591 
592 // #pragma mark -
593 
594 
595 void
596 mutex_init(mutex* lock, const char *name)
597 {
598 	mutex_init_etc(lock, name, 0);
599 }
600 
601 
602 void
603 mutex_init_etc(mutex* lock, const char *name, uint32 flags)
604 {
605 	lock->name = (flags & MUTEX_FLAG_CLONE_NAME) != 0 ? strdup(name) : name;
606 	lock->waiters = NULL;
607 	B_INITIALIZE_SPINLOCK(&lock->lock);
608 #if KDEBUG
609 	lock->holder = -1;
610 #else
611 	lock->count = 0;
612 	lock->ignore_unlock_count = 0;
613 #endif
614 	lock->flags = flags & MUTEX_FLAG_CLONE_NAME;
615 
616 	T_SCHEDULING_ANALYSIS(InitMutex(lock, name));
617 	NotifyWaitObjectListeners(&WaitObjectListener::MutexInitialized, lock);
618 }
619 
620 
621 void
622 mutex_destroy(mutex* lock)
623 {
624 	char* name = (lock->flags & MUTEX_FLAG_CLONE_NAME) != 0
625 		? (char*)lock->name : NULL;
626 
627 	// unblock all waiters
628 	InterruptsSpinLocker locker(lock->lock);
629 
630 #if KDEBUG
631 	if (lock->waiters != NULL && thread_get_current_thread_id()
632 			!= lock->holder) {
633 		panic("mutex_destroy(): there are blocking threads, but caller doesn't "
634 			"hold the lock (%p)", lock);
635 		if (_mutex_lock(lock, &locker) != B_OK)
636 			return;
637 		locker.Lock();
638 	}
639 #endif
640 
641 	while (mutex_waiter* waiter = lock->waiters) {
642 		// dequeue
643 		lock->waiters = waiter->next;
644 
645 		// unblock thread
646 		thread_unblock(waiter->thread, B_ERROR);
647 	}
648 
649 	lock->name = NULL;
650 	lock->flags = 0;
651 #if KDEBUG
652 	lock->holder = 0;
653 #else
654 	lock->count = INT16_MIN;
655 #endif
656 
657 	locker.Unlock();
658 
659 	free(name);
660 }
661 
662 
663 static inline status_t
664 mutex_lock_threads_locked(mutex* lock, InterruptsSpinLocker* locker)
665 {
666 #if KDEBUG
667 	return _mutex_lock(lock, locker);
668 #else
669 	if (atomic_add(&lock->count, -1) < 0)
670 		return _mutex_lock(lock, locker);
671 	return B_OK;
672 #endif
673 }
674 
675 
676 status_t
677 mutex_switch_lock(mutex* from, mutex* to)
678 {
679 #if KDEBUG
680 	if (!gKernelStartup && !are_interrupts_enabled()) {
681 		panic("mutex_switch_lock(): called with interrupts disabled "
682 			"for locks %p, %p", from, to);
683 	}
684 #endif
685 
686 	InterruptsSpinLocker locker(to->lock);
687 
688 	mutex_unlock(from);
689 
690 	return mutex_lock_threads_locked(to, &locker);
691 }
692 
693 
694 status_t
695 mutex_switch_from_read_lock(rw_lock* from, mutex* to)
696 {
697 #if KDEBUG
698 	if (!gKernelStartup && !are_interrupts_enabled()) {
699 		panic("mutex_switch_from_read_lock(): called with interrupts disabled "
700 			"for locks %p, %p", from, to);
701 	}
702 #endif
703 
704 	InterruptsSpinLocker locker(to->lock);
705 
706 #if KDEBUG_RW_LOCK_DEBUG
707 	_rw_lock_write_unlock(from);
708 #else
709 	int32 oldCount = atomic_add(&from->count, -1);
710 	if (oldCount >= RW_LOCK_WRITER_COUNT_BASE)
711 		_rw_lock_read_unlock(from);
712 #endif
713 
714 	return mutex_lock_threads_locked(to, &locker);
715 }
716 
717 
718 status_t
719 _mutex_lock(mutex* lock, void* _locker)
720 {
721 #if KDEBUG
722 	if (!gKernelStartup && _locker == NULL && !are_interrupts_enabled()) {
723 		panic("_mutex_lock(): called with interrupts disabled for lock %p",
724 			lock);
725 	}
726 #endif
727 
728 	// lock only, if !lockLocked
729 	InterruptsSpinLocker* locker
730 		= reinterpret_cast<InterruptsSpinLocker*>(_locker);
731 
732 	InterruptsSpinLocker lockLocker;
733 	if (locker == NULL) {
734 		lockLocker.SetTo(lock->lock, false);
735 		locker = &lockLocker;
736 	}
737 
738 	// Might have been released after we decremented the count, but before
739 	// we acquired the spinlock.
740 #if KDEBUG
741 	if (lock->holder < 0) {
742 		lock->holder = thread_get_current_thread_id();
743 		return B_OK;
744 	} else if (lock->holder == thread_get_current_thread_id()) {
745 		panic("_mutex_lock(): double lock of %p by thread %" B_PRId32, lock,
746 			lock->holder);
747 	} else if (lock->holder == 0)
748 		panic("_mutex_lock(): using uninitialized lock %p", lock);
749 #else
750 	if ((lock->flags & MUTEX_FLAG_RELEASED) != 0) {
751 		lock->flags &= ~MUTEX_FLAG_RELEASED;
752 		return B_OK;
753 	}
754 #endif
755 
756 	// enqueue in waiter list
757 	mutex_waiter waiter;
758 	waiter.thread = thread_get_current_thread();
759 	waiter.next = NULL;
760 
761 	if (lock->waiters != NULL) {
762 		lock->waiters->last->next = &waiter;
763 	} else
764 		lock->waiters = &waiter;
765 
766 	lock->waiters->last = &waiter;
767 
768 	// block
769 	thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_MUTEX, lock);
770 	locker->Unlock();
771 
772 	status_t error = thread_block();
773 #if KDEBUG
774 	if (error == B_OK)
775 		atomic_set(&lock->holder, waiter.thread->id);
776 #endif
777 	return error;
778 }
779 
780 
781 void
782 _mutex_unlock(mutex* lock)
783 {
784 	InterruptsSpinLocker locker(lock->lock);
785 
786 #if KDEBUG
787 	if (thread_get_current_thread_id() != lock->holder) {
788 		panic("_mutex_unlock() failure: thread %" B_PRId32 " is trying to "
789 			"release mutex %p (current holder %" B_PRId32 ")\n",
790 			thread_get_current_thread_id(), lock, lock->holder);
791 		return;
792 	}
793 #else
794 	if (lock->ignore_unlock_count > 0) {
795 		lock->ignore_unlock_count--;
796 		return;
797 	}
798 #endif
799 
800 	mutex_waiter* waiter = lock->waiters;
801 	if (waiter != NULL) {
802 		// dequeue the first waiter
803 		lock->waiters = waiter->next;
804 		if (lock->waiters != NULL)
805 			lock->waiters->last = waiter->last;
806 #if KDEBUG
807 		thread_id unblockedThread = waiter->thread->id;
808 #endif
809 
810 		// unblock thread
811 		thread_unblock(waiter->thread, B_OK);
812 
813 #if KDEBUG
814 		// Already set the holder to the unblocked thread. Besides that this
815 		// actually reflects the current situation, setting it to -1 would
816 		// cause a race condition, since another locker could think the lock
817 		// is not held by anyone.
818 		lock->holder = unblockedThread;
819 #endif
820 	} else {
821 		// We've acquired the spinlock before the locker that is going to wait.
822 		// Just mark the lock as released.
823 #if KDEBUG
824 		lock->holder = -1;
825 #else
826 		lock->flags |= MUTEX_FLAG_RELEASED;
827 #endif
828 	}
829 }
830 
831 
832 status_t
833 _mutex_trylock(mutex* lock)
834 {
835 #if KDEBUG
836 	InterruptsSpinLocker _(lock->lock);
837 
838 	if (lock->holder < 0) {
839 		lock->holder = thread_get_current_thread_id();
840 		return B_OK;
841 	} else if (lock->holder == 0)
842 		panic("_mutex_trylock(): using uninitialized lock %p", lock);
843 	return B_WOULD_BLOCK;
844 #else
845 	return mutex_trylock(lock);
846 #endif
847 }
848 
849 
850 status_t
851 _mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, bigtime_t timeout)
852 {
853 #if KDEBUG
854 	if (!gKernelStartup && !are_interrupts_enabled()) {
855 		panic("_mutex_lock(): called with interrupts disabled for lock %p",
856 			lock);
857 	}
858 #endif
859 
860 	InterruptsSpinLocker locker(lock->lock);
861 
862 	// Might have been released after we decremented the count, but before
863 	// we acquired the spinlock.
864 #if KDEBUG
865 	if (lock->holder < 0) {
866 		lock->holder = thread_get_current_thread_id();
867 		return B_OK;
868 	} else if (lock->holder == thread_get_current_thread_id()) {
869 		panic("_mutex_lock(): double lock of %p by thread %" B_PRId32, lock,
870 			lock->holder);
871 	} else if (lock->holder == 0)
872 		panic("_mutex_lock(): using uninitialized lock %p", lock);
873 #else
874 	if ((lock->flags & MUTEX_FLAG_RELEASED) != 0) {
875 		lock->flags &= ~MUTEX_FLAG_RELEASED;
876 		return B_OK;
877 	}
878 #endif
879 
880 	// enqueue in waiter list
881 	mutex_waiter waiter;
882 	waiter.thread = thread_get_current_thread();
883 	waiter.next = NULL;
884 
885 	if (lock->waiters != NULL) {
886 		lock->waiters->last->next = &waiter;
887 	} else
888 		lock->waiters = &waiter;
889 
890 	lock->waiters->last = &waiter;
891 
892 	// block
893 	thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_MUTEX, lock);
894 	locker.Unlock();
895 
896 	status_t error = thread_block_with_timeout(timeoutFlags, timeout);
897 
898 	if (error == B_OK) {
899 #if KDEBUG
900 		lock->holder = waiter.thread->id;
901 #endif
902 	} else {
903 		locker.Lock();
904 
905 		// If the timeout occurred, we must remove our waiter structure from
906 		// the queue.
907 		mutex_waiter* previousWaiter = NULL;
908 		mutex_waiter* otherWaiter = lock->waiters;
909 		while (otherWaiter != NULL && otherWaiter != &waiter) {
910 			previousWaiter = otherWaiter;
911 			otherWaiter = otherWaiter->next;
912 		}
913 		if (otherWaiter == &waiter) {
914 			// the structure is still in the list -- dequeue
915 			if (&waiter == lock->waiters) {
916 				if (waiter.next != NULL)
917 					waiter.next->last = waiter.last;
918 				lock->waiters = waiter.next;
919 			} else {
920 				if (waiter.next == NULL)
921 					lock->waiters->last = previousWaiter;
922 				previousWaiter->next = waiter.next;
923 			}
924 
925 #if !KDEBUG
926 			// we need to fix the lock count
927 			if (atomic_add(&lock->count, 1) == -1) {
928 				// This means we were the only thread waiting for the lock and
929 				// the lock owner has already called atomic_add() in
930 				// mutex_unlock(). That is we probably would get the lock very
931 				// soon (if the lock holder has a low priority, that might
932 				// actually take rather long, though), but the timeout already
933 				// occurred, so we don't try to wait. Just increment the ignore
934 				// unlock count.
935 				lock->ignore_unlock_count++;
936 			}
937 #endif
938 		}
939 	}
940 
941 	return error;
942 }
943 
944 
945 static int
946 dump_mutex_info(int argc, char** argv)
947 {
948 	if (argc < 2) {
949 		print_debugger_command_usage(argv[0]);
950 		return 0;
951 	}
952 
953 	mutex* lock = (mutex*)parse_expression(argv[1]);
954 
955 	if (!IS_KERNEL_ADDRESS(lock)) {
956 		kprintf("invalid address: %p\n", lock);
957 		return 0;
958 	}
959 
960 	kprintf("mutex %p:\n", lock);
961 	kprintf("  name:            %s\n", lock->name);
962 	kprintf("  flags:           0x%x\n", lock->flags);
963 #if KDEBUG
964 	kprintf("  holder:          %" B_PRId32 "\n", lock->holder);
965 #else
966 	kprintf("  count:           %" B_PRId32 "\n", lock->count);
967 #endif
968 
969 	kprintf("  waiting threads:");
970 	mutex_waiter* waiter = lock->waiters;
971 	while (waiter != NULL) {
972 		kprintf(" %" B_PRId32, waiter->thread->id);
973 		waiter = waiter->next;
974 	}
975 	kputs("\n");
976 
977 	return 0;
978 }
979 
980 
981 // #pragma mark -
982 
983 
984 void
985 lock_debug_init()
986 {
987 	add_debugger_command_etc("mutex", &dump_mutex_info,
988 		"Dump info about a mutex",
989 		"<mutex>\n"
990 		"Prints info about the specified mutex.\n"
991 		"  <mutex>  - pointer to the mutex to print the info for.\n", 0);
992 	add_debugger_command_etc("rwlock", &dump_rw_lock_info,
993 		"Dump info about an rw lock",
994 		"<lock>\n"
995 		"Prints info about the specified rw lock.\n"
996 		"  <lock>  - pointer to the rw lock to print the info for.\n", 0);
997 }
998