xref: /haiku/src/tests/add-ons/kernel/kernelland_emu/lock.cpp (revision fc7456e9b1ec38c941134ed6d01c438cf289381e)
1 /*
2  * Copyright 2002-2010, Haiku Inc. All Rights Reserved.
3  * Distributed under the terms of the MIT license.
4  *
5  * Authors:
6  *		Ingo Weinhold, bonefish@cs.tu-berlin.de.
7  *		Axel Dörfler, axeld@pinc-software.de.
8  */
9 
10 #include <debug.h>
11 
12 #if KDEBUG
13 #define KDEBUG_STATIC static
14 static status_t _mutex_lock(struct mutex* lock, void* locker);
15 static void _mutex_unlock(struct mutex* lock);
16 #else
17 #define KDEBUG_STATIC
18 #define mutex_lock		mutex_lock_inline
19 #define mutex_unlock	mutex_unlock_inline
20 #define mutex_trylock	mutex_trylock_inline
21 #define mutex_lock_with_timeout	mutex_lock_with_timeout_inline
22 #endif
23 
24 #include <lock.h>
25 
26 #include <stdlib.h>
27 #include <string.h>
28 
29 #include <AutoLocker.h>
30 
31 // libroot
32 #include <user_thread.h>
33 
34 // system
35 #include <syscalls.h>
36 #include <user_thread_defs.h>
37 
38 #include "thread.h"
39 
40 
41 struct mutex_waiter {
42 	Thread*			thread;
43 	mutex_waiter*	next;		// next in queue
44 	mutex_waiter*	last;		// last in queue (valid for the first in queue)
45 };
46 
47 struct rw_lock_waiter {
48 	Thread*			thread;
49 	rw_lock_waiter*	next;		// next in queue
50 	rw_lock_waiter*	last;		// last in queue (valid for the first in queue)
51 	bool			writer;
52 };
53 
54 #define MUTEX_FLAG_OWNS_NAME	MUTEX_FLAG_CLONE_NAME
55 #define MUTEX_FLAG_RELEASED		0x2
56 
57 #define RW_LOCK_FLAG_OWNS_NAME	RW_LOCK_FLAG_CLONE_NAME
58 
59 
60 static void _rw_lock_read_unlock_threads_locked(rw_lock* lock);
61 static void _rw_lock_write_unlock_threads_locked(rw_lock* lock);
62 
63 static status_t _mutex_lock_threads_locked(mutex* lock);
64 static void _mutex_unlock_threads_locked(mutex* lock);
65 
66 
67 /*!	Helper class playing the role of the kernel's thread spinlock. We don't use
68 	as spinlock as that could be expensive in userland (due to spinlock holder
69 	potentially being unscheduled), but a benaphore.
70 */
71 struct ThreadSpinlock {
72 	ThreadSpinlock()
73 		:
74 		fCount(1),
75 		fSemaphore(create_sem(0, "thread spinlock"))
76 	{
77 		if (fSemaphore < 0)
78 			panic("Failed to create thread spinlock semaphore!");
79 	}
80 
81 	~ThreadSpinlock()
82 	{
83 		if (fSemaphore >= 0)
84 			delete_sem(fSemaphore);
85 	}
86 
87 	bool Lock()
88 	{
89 		if (atomic_add(&fCount, -1) > 0)
90 			return true;
91 
92 		status_t error;
93 		do {
94 			error = acquire_sem(fSemaphore);
95 		} while (error == B_INTERRUPTED);
96 
97 		return error == B_OK;
98 	}
99 
100 	void Unlock()
101 	{
102 		if (atomic_add(&fCount, 1) < 0)
103 			release_sem(fSemaphore);
104 	}
105 
106 private:
107 	int32	fCount;
108 	sem_id	fSemaphore;
109 };
110 
111 static ThreadSpinlock sThreadSpinlock;
112 
113 
114 // #pragma mark -
115 
116 
117 int32
118 recursive_lock_get_recursion(recursive_lock *lock)
119 {
120 	if (RECURSIVE_LOCK_HOLDER(lock) == find_thread(NULL))
121 		return lock->recursion;
122 
123 	return -1;
124 }
125 
126 
127 void
128 recursive_lock_init(recursive_lock *lock, const char *name)
129 {
130 	mutex_init(&lock->lock, name != NULL ? name : "recursive lock");
131 	RECURSIVE_LOCK_HOLDER(lock) = -1;
132 	lock->recursion = 0;
133 }
134 
135 
136 void
137 recursive_lock_init_etc(recursive_lock *lock, const char *name, uint32 flags)
138 {
139 	mutex_init_etc(&lock->lock, name != NULL ? name : "recursive lock", flags);
140 	RECURSIVE_LOCK_HOLDER(lock) = -1;
141 	lock->recursion = 0;
142 }
143 
144 
145 void
146 recursive_lock_destroy(recursive_lock *lock)
147 {
148 	if (lock == NULL)
149 		return;
150 
151 	mutex_destroy(&lock->lock);
152 }
153 
154 
155 status_t
156 recursive_lock_lock(recursive_lock *lock)
157 {
158 	thread_id thread = find_thread(NULL);
159 
160 	if (thread != RECURSIVE_LOCK_HOLDER(lock)) {
161 		mutex_lock(&lock->lock);
162 #if !KDEBUG
163 		lock->holder = thread;
164 #endif
165 	}
166 
167 	lock->recursion++;
168 	return B_OK;
169 }
170 
171 
172 status_t
173 recursive_lock_trylock(recursive_lock *lock)
174 {
175 	thread_id thread = find_thread(NULL);
176 
177 	if (thread != RECURSIVE_LOCK_HOLDER(lock)) {
178 		status_t status = mutex_trylock(&lock->lock);
179 		if (status != B_OK)
180 			return status;
181 
182 #if !KDEBUG
183 		lock->holder = thread;
184 #endif
185 	}
186 
187 	lock->recursion++;
188 	return B_OK;
189 }
190 
191 
192 void
193 recursive_lock_unlock(recursive_lock *lock)
194 {
195 	if (find_thread(NULL) != RECURSIVE_LOCK_HOLDER(lock))
196 		panic("recursive_lock %p unlocked by non-holder thread!\n", lock);
197 
198 	if (--lock->recursion == 0) {
199 #if !KDEBUG
200 		lock->holder = -1;
201 #endif
202 		mutex_unlock(&lock->lock);
203 	}
204 }
205 
206 
207 //	#pragma mark -
208 
209 
210 static status_t
211 rw_lock_wait(rw_lock* lock, bool writer)
212 {
213 	// enqueue in waiter list
214 	rw_lock_waiter waiter;
215 	waiter.thread = get_current_thread();
216 	waiter.next = NULL;
217 	waiter.writer = writer;
218 
219 	if (lock->waiters != NULL)
220 		lock->waiters->last->next = &waiter;
221 	else
222 		lock->waiters = &waiter;
223 
224 	lock->waiters->last = &waiter;
225 
226 	// block
227 	get_user_thread()->wait_status = 1;
228 	sThreadSpinlock.Unlock();
229 
230 	status_t error;
231 	while ((error = _kern_block_thread(0, 0)) == B_INTERRUPTED) {
232 	}
233 
234 	sThreadSpinlock.Lock();
235 	return error;
236 }
237 
238 
239 static int32
240 rw_lock_unblock(rw_lock* lock)
241 {
242 	// Check whether there any waiting threads at all and whether anyone
243 	// has the write lock.
244 	rw_lock_waiter* waiter = lock->waiters;
245 	if (waiter == NULL || lock->holder > 0)
246 		return 0;
247 
248 	// writer at head of queue?
249 	if (waiter->writer) {
250 		if (lock->active_readers > 0 || lock->pending_readers > 0)
251 			return 0;
252 
253 		// dequeue writer
254 		lock->waiters = waiter->next;
255 		if (lock->waiters != NULL)
256 			lock->waiters->last = waiter->last;
257 
258 		lock->holder = get_thread_id(waiter->thread);
259 
260 		// unblock thread
261 		_kern_unblock_thread(get_thread_id(waiter->thread), B_OK);
262 		waiter->thread = NULL;
263 		return RW_LOCK_WRITER_COUNT_BASE;
264 	}
265 
266 	// wake up one or more readers
267 	uint32 readerCount = 0;
268 	do {
269 		// dequeue reader
270 		lock->waiters = waiter->next;
271 		if (lock->waiters != NULL)
272 			lock->waiters->last = waiter->last;
273 
274 		readerCount++;
275 
276 		// unblock thread
277 		_kern_unblock_thread(get_thread_id(waiter->thread), B_OK);
278 		waiter->thread = NULL;
279 	} while ((waiter = lock->waiters) != NULL && !waiter->writer);
280 
281 	if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
282 		lock->active_readers += readerCount;
283 
284 	return readerCount;
285 }
286 
287 
288 void
289 rw_lock_init(rw_lock* lock, const char* name)
290 {
291 	lock->name = name;
292 	lock->waiters = NULL;
293 	lock->holder = -1;
294 	lock->count = 0;
295 	lock->owner_count = 0;
296 	lock->active_readers = 0;
297 	lock->pending_readers = 0;
298 	lock->flags = 0;
299 }
300 
301 
302 void
303 rw_lock_init_etc(rw_lock* lock, const char* name, uint32 flags)
304 {
305 	lock->name = (flags & RW_LOCK_FLAG_CLONE_NAME) != 0 ? strdup(name) : name;
306 	lock->waiters = NULL;
307 	lock->holder = -1;
308 	lock->count = 0;
309 	lock->owner_count = 0;
310 	lock->active_readers = 0;
311 	lock->pending_readers = 0;
312 	lock->flags = flags & RW_LOCK_FLAG_CLONE_NAME;
313 }
314 
315 
316 void
317 rw_lock_destroy(rw_lock* lock)
318 {
319 	char* name = (lock->flags & RW_LOCK_FLAG_CLONE_NAME) != 0
320 		? (char*)lock->name : NULL;
321 
322 	// unblock all waiters
323 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
324 
325 #if KDEBUG
326 	if (lock->waiters != NULL && find_thread(NULL)
327 			!= lock->holder) {
328 		panic("rw_lock_destroy(): there are blocking threads, but the caller "
329 			"doesn't hold the write lock (%p)", lock);
330 
331 		locker.Unlock();
332 		if (rw_lock_write_lock(lock) != B_OK)
333 			return;
334 		locker.Lock();
335 	}
336 #endif
337 
338 	while (rw_lock_waiter* waiter = lock->waiters) {
339 		// dequeue
340 		lock->waiters = waiter->next;
341 
342 		// unblock thread
343 		_kern_unblock_thread(get_thread_id(waiter->thread), B_ERROR);
344 	}
345 
346 	lock->name = NULL;
347 
348 	locker.Unlock();
349 
350 	free(name);
351 }
352 
353 
354 #if !KDEBUG_RW_LOCK_DEBUG
355 
356 status_t
357 _rw_lock_read_lock(rw_lock* lock)
358 {
359 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
360 
361 	// We might be the writer ourselves.
362 	if (lock->holder == find_thread(NULL)) {
363 		lock->owner_count++;
364 		return B_OK;
365 	}
366 
367 	// The writer that originally had the lock when we called atomic_add() might
368 	// already have gone and another writer could have overtaken us. In this
369 	// case the original writer set pending_readers, so we know that we don't
370 	// have to wait.
371 	if (lock->pending_readers > 0) {
372 		lock->pending_readers--;
373 
374 		if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
375 			lock->active_readers++;
376 
377 		return B_OK;
378 	}
379 
380 	// we need to wait
381 	return rw_lock_wait(lock, false);
382 }
383 
384 
385 status_t
386 _rw_lock_read_lock_with_timeout(rw_lock* lock, uint32 timeoutFlags,
387 	bigtime_t timeout)
388 {
389 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
390 
391 	// We might be the writer ourselves.
392 	if (lock->holder == find_thread(NULL)) {
393 		lock->owner_count++;
394 		return B_OK;
395 	}
396 
397 	// The writer that originally had the lock when we called atomic_add() might
398 	// already have gone and another writer could have overtaken us. In this
399 	// case the original writer set pending_readers, so we know that we don't
400 	// have to wait.
401 	if (lock->pending_readers > 0) {
402 		lock->pending_readers--;
403 
404 		if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
405 			lock->active_readers++;
406 
407 		return B_OK;
408 	}
409 
410 	ASSERT(lock->count >= RW_LOCK_WRITER_COUNT_BASE);
411 
412 	// we need to wait
413 
414 	// enqueue in waiter list
415 	rw_lock_waiter waiter;
416 	waiter.thread = get_current_thread();
417 	waiter.next = NULL;
418 	waiter.writer = false;
419 
420 	if (lock->waiters != NULL)
421 		lock->waiters->last->next = &waiter;
422 	else
423 		lock->waiters = &waiter;
424 
425 	lock->waiters->last = &waiter;
426 
427 	// block
428 	get_user_thread()->wait_status = 1;
429 	sThreadSpinlock.Unlock();
430 
431 	status_t error;
432 	while ((error = _kern_block_thread(timeoutFlags, timeout))
433 			== B_INTERRUPTED) {
434 	}
435 
436 	sThreadSpinlock.Lock();
437 
438 	if (error == B_OK || waiter.thread == NULL) {
439 		// We were unblocked successfully -- potentially our unblocker overtook
440 		// us after we already failed. In either case, we've got the lock, now.
441 		return B_OK;
442 	}
443 
444 	// We failed to get the lock -- dequeue from waiter list.
445 	rw_lock_waiter* previous = NULL;
446 	rw_lock_waiter* other = lock->waiters;
447 	while (other != &waiter) {
448 		previous = other;
449 		other = other->next;
450 	}
451 
452 	if (previous == NULL) {
453 		// we are the first in line
454 		lock->waiters = waiter.next;
455 		if (lock->waiters != NULL)
456 			lock->waiters->last = waiter.last;
457 	} else {
458 		// one or more other waiters are before us in the queue
459 		previous->next = waiter.next;
460 		if (lock->waiters->last == &waiter)
461 			lock->waiters->last = previous;
462 	}
463 
464 	// Decrement the count. ATM this is all we have to do. There's at least
465 	// one writer ahead of us -- otherwise the last writer would have unblocked
466 	// us (writers only manipulate the lock data with thread spinlock being
467 	// held) -- so our leaving doesn't make a difference to the ones behind us
468 	// in the queue.
469 	atomic_add(&lock->count, -1);
470 
471 	return error;
472 }
473 
474 
475 void
476 _rw_lock_read_unlock(rw_lock* lock)
477 {
478 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
479 	_rw_lock_read_unlock_threads_locked(lock);
480 }
481 
482 
483 static void
484 _rw_lock_read_unlock_threads_locked(rw_lock* lock)
485 {
486 	// If we're still holding the write lock or if there are other readers,
487 	// no-one can be woken up.
488 	if (lock->holder == find_thread(NULL)) {
489 		lock->owner_count--;
490 		return;
491 	}
492 
493 	if (--lock->active_readers > 0)
494 		return;
495 
496 	if (lock->active_readers < 0) {
497 		panic("rw_lock_read_unlock(): lock %p not read-locked", lock);
498 		lock->active_readers = 0;
499 		return;
500 	}
501 
502 	rw_lock_unblock(lock);
503 }
504 
505 #endif	// !KDEBUG_RW_LOCK_DEBUG
506 
507 
508 status_t
509 rw_lock_write_lock(rw_lock* lock)
510 {
511 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
512 
513 	// If we're already the lock holder, we just need to increment the owner
514 	// count.
515 	thread_id thread = find_thread(NULL);
516 	if (lock->holder == thread) {
517 		lock->owner_count += RW_LOCK_WRITER_COUNT_BASE;
518 		return B_OK;
519 	}
520 
521 	// announce our claim
522 	int32 oldCount = atomic_add(&lock->count, RW_LOCK_WRITER_COUNT_BASE);
523 
524 	if (oldCount == 0) {
525 		// No-one else held a read or write lock, so it's ours now.
526 		lock->holder = thread;
527 		lock->owner_count = RW_LOCK_WRITER_COUNT_BASE;
528 		return B_OK;
529 	}
530 
531 	// We have to wait. If we're the first writer, note the current reader
532 	// count.
533 	if (oldCount < RW_LOCK_WRITER_COUNT_BASE)
534 		lock->active_readers = oldCount - lock->pending_readers;
535 
536 	status_t status = rw_lock_wait(lock, true);
537 	if (status == B_OK) {
538 		lock->holder = thread;
539 		lock->owner_count = RW_LOCK_WRITER_COUNT_BASE;
540 	}
541 
542 	return status;
543 }
544 
545 
546 void
547 _rw_lock_write_unlock(rw_lock* lock)
548 {
549 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
550 	_rw_lock_write_unlock_threads_locked(lock);
551 }
552 
553 
554 static void
555 _rw_lock_write_unlock_threads_locked(rw_lock* lock)
556 {
557 	if (find_thread(NULL) != lock->holder) {
558 		panic("rw_lock_write_unlock(): lock %p not write-locked by this thread",
559 			lock);
560 		return;
561 	}
562 
563 	lock->owner_count -= RW_LOCK_WRITER_COUNT_BASE;
564 	if (lock->owner_count >= RW_LOCK_WRITER_COUNT_BASE)
565 		return;
566 
567 	// We gave up our last write lock -- clean up and unblock waiters.
568 	int32 readerCount = lock->owner_count;
569 	lock->holder = -1;
570 	lock->owner_count = 0;
571 
572 	int32 oldCount = atomic_add(&lock->count, -RW_LOCK_WRITER_COUNT_BASE);
573 	oldCount -= RW_LOCK_WRITER_COUNT_BASE;
574 
575 	if (oldCount != 0) {
576 		// If writers are waiting, take over our reader count.
577 		if (oldCount >= RW_LOCK_WRITER_COUNT_BASE) {
578 			lock->active_readers = readerCount;
579 			rw_lock_unblock(lock);
580 		} else {
581 			// No waiting writer, but there are one or more readers. We will
582 			// unblock all waiting readers -- that's the easy part -- and must
583 			// also make sure that all readers that haven't entered the critical
584 			// section yet, won't start to wait. Otherwise a writer overtaking
585 			// such a reader will correctly start to wait, but the reader,
586 			// seeing the writer count > 0, would also start to wait. We set
587 			// pending_readers to the number of readers that are still expected
588 			// to enter the critical section.
589 			lock->pending_readers = oldCount - readerCount
590 				- rw_lock_unblock(lock);
591 		}
592 	}
593 }
594 
595 
596 // #pragma mark -
597 
598 
599 void
600 mutex_init(mutex* lock, const char *name)
601 {
602 	lock->name = name;
603 	lock->waiters = NULL;
604 #if KDEBUG
605 	lock->holder = -1;
606 #else
607 	lock->count = 0;
608 #endif
609 	lock->flags = 0;
610 }
611 
612 
613 void
614 mutex_init_etc(mutex* lock, const char *name, uint32 flags)
615 {
616 	lock->name = (flags & MUTEX_FLAG_CLONE_NAME) != 0 ? strdup(name) : name;
617 	lock->waiters = NULL;
618 #if KDEBUG
619 	lock->holder = -1;
620 #else
621 	lock->count = 0;
622 #endif
623 	lock->flags = flags & MUTEX_FLAG_CLONE_NAME;
624 }
625 
626 
627 void
628 mutex_destroy(mutex* lock)
629 {
630 	char* name = (lock->flags & MUTEX_FLAG_CLONE_NAME) != 0
631 		? (char*)lock->name : NULL;
632 
633 	// unblock all waiters
634 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
635 
636 #if KDEBUG
637 	if (lock->waiters != NULL && find_thread(NULL)
638 		!= lock->holder) {
639 		panic("mutex_destroy(): there are blocking threads, but caller doesn't "
640 			"hold the lock (%p)", lock);
641 		if (_mutex_lock_threads_locked(lock) != B_OK)
642 			return;
643 	}
644 #endif
645 
646 	while (mutex_waiter* waiter = lock->waiters) {
647 		// dequeue
648 		lock->waiters = waiter->next;
649 
650 		// unblock thread
651 		_kern_unblock_thread(get_thread_id(waiter->thread), B_ERROR);
652 	}
653 
654 	lock->name = NULL;
655 
656 	locker.Unlock();
657 
658 	free(name);
659 }
660 
661 
662 status_t
663 mutex_switch_lock(mutex* from, mutex* to)
664 {
665 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
666 
667 #if !KDEBUG
668 	if (atomic_add(&from->count, 1) < -1)
669 #endif
670 		_mutex_unlock_threads_locked(from);
671 
672 	return _mutex_lock_threads_locked(to);
673 }
674 
675 
676 status_t
677 mutex_switch_from_read_lock(rw_lock* from, mutex* to)
678 {
679 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
680 
681 #if KDEBUG_RW_LOCK_DEBUG
682 	_rw_lock_write_unlock_threads_locked(from);
683 #else
684 	int32 oldCount = atomic_add(&from->count, -1);
685 	if (oldCount >= RW_LOCK_WRITER_COUNT_BASE)
686 		_rw_lock_read_unlock_threads_locked(from);
687 #endif
688 
689 	return _mutex_lock_threads_locked(to);
690 }
691 
692 
693 
694 static status_t
695 _mutex_lock_threads_locked(mutex* lock)
696 {
697 
698 	// Might have been released after we decremented the count, but before
699 	// we acquired the spinlock.
700 #if KDEBUG
701 	if (lock->holder < 0) {
702 		lock->holder = find_thread(NULL);
703 		return B_OK;
704 	} else if (lock->holder == find_thread(NULL)) {
705 		panic("_mutex_lock(): double lock of %p by thread %" B_PRId32, lock,
706 			lock->holder);
707 	} else if (lock->holder == 0)
708 		panic("_mutex_lock(): using unitialized lock %p", lock);
709 #else
710 	if ((lock->flags & MUTEX_FLAG_RELEASED) != 0) {
711 		lock->flags &= ~MUTEX_FLAG_RELEASED;
712 		return B_OK;
713 	}
714 #endif
715 
716 	// enqueue in waiter list
717 	mutex_waiter waiter;
718 	waiter.thread = get_current_thread();
719 	waiter.next = NULL;
720 
721 	if (lock->waiters != NULL) {
722 		lock->waiters->last->next = &waiter;
723 	} else
724 		lock->waiters = &waiter;
725 
726 	lock->waiters->last = &waiter;
727 
728 	// block
729 	get_user_thread()->wait_status = 1;
730 	sThreadSpinlock.Unlock();
731 
732 	status_t error;
733 	while ((error = _kern_block_thread(0, 0)) == B_INTERRUPTED) {
734 	}
735 
736 	sThreadSpinlock.Lock();
737 
738 #if KDEBUG
739 	if (error == B_OK)
740 		lock->holder = get_thread_id(waiter.thread);
741 #endif
742 
743 	return error;
744 }
745 
746 
747 KDEBUG_STATIC status_t
748 _mutex_lock(mutex* lock, void*)
749 {
750 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
751 	return _mutex_lock_threads_locked(lock);
752 }
753 
754 
755 static void
756 _mutex_unlock_threads_locked(mutex* lock)
757 {
758 #if KDEBUG
759 	if (find_thread(NULL) != lock->holder) {
760 		panic("_mutex_unlock() failure: thread %" B_PRId32 " is trying to "
761 			"release mutex %p (current holder %" B_PRId32 ")\n",
762 			find_thread(NULL), lock, lock->holder);
763 		return;
764 	}
765 #endif
766 
767 	mutex_waiter* waiter = lock->waiters;
768 	if (waiter != NULL) {
769 		// dequeue the first waiter
770 		lock->waiters = waiter->next;
771 		if (lock->waiters != NULL)
772 			lock->waiters->last = waiter->last;
773 
774 		// unblock thread
775 		_kern_unblock_thread(get_thread_id(waiter->thread), B_OK);
776 
777 #if KDEBUG
778 		// Already set the holder to the unblocked thread. Besides that this
779 		// actually reflects the current situation, setting it to -1 would
780 		// cause a race condition, since another locker could think the lock
781 		// is not held by anyone.
782 		lock->holder = get_thread_id(waiter->thread);
783 #endif
784 	} else {
785 		// We've acquired the spinlock before the locker that is going to wait.
786 		// Just mark the lock as released.
787 #if KDEBUG
788 		lock->holder = -1;
789 #else
790 		lock->flags |= MUTEX_FLAG_RELEASED;
791 #endif
792 	}
793 }
794 
795 
796 KDEBUG_STATIC void
797 _mutex_unlock(mutex* lock)
798 {
799 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
800 	_mutex_unlock_threads_locked(lock);
801 }
802 
803 
804 #undef mutex_trylock
805 status_t
806 mutex_trylock(mutex* lock)
807 {
808 #if KDEBUG
809 	AutoLocker<ThreadSpinlock> _(sThreadSpinlock);
810 
811 	if (lock->holder <= 0) {
812 		lock->holder = find_thread(NULL);
813 		return B_OK;
814 	}
815 #endif
816 	return B_WOULD_BLOCK;
817 }
818 
819 
820 #undef mutex_lock
821 status_t
822 mutex_lock(mutex* lock)
823 {
824 #if KDEBUG
825 	return _mutex_lock(lock, NULL);
826 #else
827 	return mutex_lock_inline(lock);
828 #endif
829 }
830 
831 
832 #undef mutex_unlock
833 void
834 mutex_unlock(mutex* lock)
835 {
836 #if KDEBUG
837 	_mutex_unlock(lock);
838 #else
839 	mutex_unlock_inline(lock);
840 #endif
841 }
842