xref: /haiku/src/tests/add-ons/kernel/kernelland_emu/lock.cpp (revision 9642f7705b27e5c270c15fa526d14e1848c2c27d)
1 /*
2  * Copyright 2002-2010, Haiku Inc. All Rights Reserved.
3  * Distributed under the terms of the MIT license.
4  *
5  * Authors:
6  *		Ingo Weinhold, bonefish@cs.tu-berlin.de.
7  *		Axel Dörfler, axeld@pinc-software.de.
8  */
9 
10 #include <lock.h>
11 
12 #include <stdlib.h>
13 #include <string.h>
14 
15 #include <AutoLocker.h>
16 
17 // libroot
18 #include <user_thread.h>
19 
20 // system
21 #include <syscalls.h>
22 #include <user_thread_defs.h>
23 
24 #include "thread.h"
25 
26 
27 struct mutex_waiter {
28 	Thread*			thread;
29 	mutex_waiter*	next;		// next in queue
30 	mutex_waiter*	last;		// last in queue (valid for the first in queue)
31 };
32 
33 struct rw_lock_waiter {
34 	Thread*			thread;
35 	rw_lock_waiter*	next;		// next in queue
36 	rw_lock_waiter*	last;		// last in queue (valid for the first in queue)
37 	bool			writer;
38 };
39 
40 #define MUTEX_FLAG_OWNS_NAME	MUTEX_FLAG_CLONE_NAME
41 #define MUTEX_FLAG_RELEASED		0x2
42 
43 #define RW_LOCK_FLAG_OWNS_NAME	RW_LOCK_FLAG_CLONE_NAME
44 
45 
46 static void _rw_lock_read_unlock_threads_locked(rw_lock* lock);
47 static void _rw_lock_write_unlock_threads_locked(rw_lock* lock);
48 
49 static status_t _mutex_lock_threads_locked(mutex* lock);
50 static void _mutex_unlock_threads_locked(mutex* lock);
51 
52 
53 /*!	Helper class playing the role of the kernel's thread spinlock. We don't use
54 	as spinlock as that could be expensive in userland (due to spinlock holder
55 	potentially being unscheduled), but a benaphore.
56 */
57 struct ThreadSpinlock {
58 	ThreadSpinlock()
59 		:
60 		fCount(1),
61 		fSemaphore(create_sem(0, "thread spinlock"))
62 	{
63 		if (fSemaphore < 0)
64 			panic("Failed to create thread spinlock semaphore!");
65 	}
66 
67 	~ThreadSpinlock()
68 	{
69 		if (fSemaphore >= 0)
70 			delete_sem(fSemaphore);
71 	}
72 
73 	bool Lock()
74 	{
75 		if (atomic_add(&fCount, -1) > 0)
76 			return true;
77 
78 		status_t error;
79 		do {
80 			error = acquire_sem(fSemaphore);
81 		} while (error == B_INTERRUPTED);
82 
83 		return error == B_OK;
84 	}
85 
86 	void Unlock()
87 	{
88 		if (atomic_add(&fCount, 1) < 0)
89 			release_sem(fSemaphore);
90 	}
91 
92 private:
93 	int32	fCount;
94 	sem_id	fSemaphore;
95 };
96 
97 static ThreadSpinlock sThreadSpinlock;
98 
99 
100 // #pragma mark -
101 
102 
103 int32
104 recursive_lock_get_recursion(recursive_lock *lock)
105 {
106 	if (RECURSIVE_LOCK_HOLDER(lock) == find_thread(NULL))
107 		return lock->recursion;
108 
109 	return -1;
110 }
111 
112 
113 void
114 recursive_lock_init(recursive_lock *lock, const char *name)
115 {
116 	mutex_init(&lock->lock, name != NULL ? name : "recursive lock");
117 	RECURSIVE_LOCK_HOLDER(lock) = -1;
118 	lock->recursion = 0;
119 }
120 
121 
122 void
123 recursive_lock_init_etc(recursive_lock *lock, const char *name, uint32 flags)
124 {
125 	mutex_init_etc(&lock->lock, name != NULL ? name : "recursive lock", flags);
126 	RECURSIVE_LOCK_HOLDER(lock) = -1;
127 	lock->recursion = 0;
128 }
129 
130 
131 void
132 recursive_lock_destroy(recursive_lock *lock)
133 {
134 	if (lock == NULL)
135 		return;
136 
137 	mutex_destroy(&lock->lock);
138 }
139 
140 
141 status_t
142 recursive_lock_lock(recursive_lock *lock)
143 {
144 	thread_id thread = find_thread(NULL);
145 
146 	if (thread != RECURSIVE_LOCK_HOLDER(lock)) {
147 		mutex_lock(&lock->lock);
148 #if !KDEBUG
149 		lock->holder = thread;
150 #endif
151 	}
152 
153 	lock->recursion++;
154 	return B_OK;
155 }
156 
157 
158 status_t
159 recursive_lock_trylock(recursive_lock *lock)
160 {
161 	thread_id thread = find_thread(NULL);
162 
163 	if (thread != RECURSIVE_LOCK_HOLDER(lock)) {
164 		status_t status = mutex_trylock(&lock->lock);
165 		if (status != B_OK)
166 			return status;
167 
168 #if !KDEBUG
169 		lock->holder = thread;
170 #endif
171 	}
172 
173 	lock->recursion++;
174 	return B_OK;
175 }
176 
177 
178 void
179 recursive_lock_unlock(recursive_lock *lock)
180 {
181 	if (find_thread(NULL) != RECURSIVE_LOCK_HOLDER(lock))
182 		panic("recursive_lock %p unlocked by non-holder thread!\n", lock);
183 
184 	if (--lock->recursion == 0) {
185 #if !KDEBUG
186 		lock->holder = -1;
187 #endif
188 		mutex_unlock(&lock->lock);
189 	}
190 }
191 
192 
193 //	#pragma mark -
194 
195 
196 static status_t
197 rw_lock_wait(rw_lock* lock, bool writer)
198 {
199 	// enqueue in waiter list
200 	rw_lock_waiter waiter;
201 	waiter.thread = get_current_thread();
202 	waiter.next = NULL;
203 	waiter.writer = writer;
204 
205 	if (lock->waiters != NULL)
206 		lock->waiters->last->next = &waiter;
207 	else
208 		lock->waiters = &waiter;
209 
210 	lock->waiters->last = &waiter;
211 
212 	// block
213 	get_user_thread()->wait_status = 1;
214 	sThreadSpinlock.Unlock();
215 
216 	status_t error;
217 	while ((error = _kern_block_thread(0, 0)) == B_INTERRUPTED) {
218 	}
219 
220 	sThreadSpinlock.Lock();
221 	return error;
222 }
223 
224 
225 static int32
226 rw_lock_unblock(rw_lock* lock)
227 {
228 	// Check whether there any waiting threads at all and whether anyone
229 	// has the write lock.
230 	rw_lock_waiter* waiter = lock->waiters;
231 	if (waiter == NULL || lock->holder > 0)
232 		return 0;
233 
234 	// writer at head of queue?
235 	if (waiter->writer) {
236 		if (lock->active_readers > 0 || lock->pending_readers > 0)
237 			return 0;
238 
239 		// dequeue writer
240 		lock->waiters = waiter->next;
241 		if (lock->waiters != NULL)
242 			lock->waiters->last = waiter->last;
243 
244 		lock->holder = get_thread_id(waiter->thread);
245 
246 		// unblock thread
247 		_kern_unblock_thread(get_thread_id(waiter->thread), B_OK);
248 		waiter->thread = NULL;
249 		return RW_LOCK_WRITER_COUNT_BASE;
250 	}
251 
252 	// wake up one or more readers
253 	uint32 readerCount = 0;
254 	do {
255 		// dequeue reader
256 		lock->waiters = waiter->next;
257 		if (lock->waiters != NULL)
258 			lock->waiters->last = waiter->last;
259 
260 		readerCount++;
261 
262 		// unblock thread
263 		_kern_unblock_thread(get_thread_id(waiter->thread), B_OK);
264 		waiter->thread = NULL;
265 	} while ((waiter = lock->waiters) != NULL && !waiter->writer);
266 
267 	if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
268 		lock->active_readers += readerCount;
269 
270 	return readerCount;
271 }
272 
273 
274 void
275 rw_lock_init(rw_lock* lock, const char* name)
276 {
277 	lock->name = name;
278 	lock->waiters = NULL;
279 	lock->holder = -1;
280 	lock->count = 0;
281 	lock->owner_count = 0;
282 	lock->active_readers = 0;
283 	lock->pending_readers = 0;
284 	lock->flags = 0;
285 }
286 
287 
288 void
289 rw_lock_init_etc(rw_lock* lock, const char* name, uint32 flags)
290 {
291 	lock->name = (flags & RW_LOCK_FLAG_CLONE_NAME) != 0 ? strdup(name) : name;
292 	lock->waiters = NULL;
293 	lock->holder = -1;
294 	lock->count = 0;
295 	lock->owner_count = 0;
296 	lock->active_readers = 0;
297 	lock->pending_readers = 0;
298 	lock->flags = flags & RW_LOCK_FLAG_CLONE_NAME;
299 }
300 
301 
302 void
303 rw_lock_destroy(rw_lock* lock)
304 {
305 	char* name = (lock->flags & RW_LOCK_FLAG_CLONE_NAME) != 0
306 		? (char*)lock->name : NULL;
307 
308 	// unblock all waiters
309 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
310 
311 #if KDEBUG
312 	if (lock->waiters != NULL && find_thread(NULL)
313 			!= lock->holder) {
314 		panic("rw_lock_destroy(): there are blocking threads, but the caller "
315 			"doesn't hold the write lock (%p)", lock);
316 
317 		locker.Unlock();
318 		if (rw_lock_write_lock(lock) != B_OK)
319 			return;
320 		locker.Lock();
321 	}
322 #endif
323 
324 	while (rw_lock_waiter* waiter = lock->waiters) {
325 		// dequeue
326 		lock->waiters = waiter->next;
327 
328 		// unblock thread
329 		_kern_unblock_thread(get_thread_id(waiter->thread), B_ERROR);
330 	}
331 
332 	lock->name = NULL;
333 
334 	locker.Unlock();
335 
336 	free(name);
337 }
338 
339 
340 #if !KDEBUG_RW_LOCK_DEBUG
341 
342 status_t
343 _rw_lock_read_lock(rw_lock* lock)
344 {
345 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
346 
347 	// We might be the writer ourselves.
348 	if (lock->holder == find_thread(NULL)) {
349 		lock->owner_count++;
350 		return B_OK;
351 	}
352 
353 	// The writer that originally had the lock when we called atomic_add() might
354 	// already have gone and another writer could have overtaken us. In this
355 	// case the original writer set pending_readers, so we know that we don't
356 	// have to wait.
357 	if (lock->pending_readers > 0) {
358 		lock->pending_readers--;
359 
360 		if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
361 			lock->active_readers++;
362 
363 		return B_OK;
364 	}
365 
366 	// we need to wait
367 	return rw_lock_wait(lock, false);
368 }
369 
370 
371 status_t
372 _rw_lock_read_lock_with_timeout(rw_lock* lock, uint32 timeoutFlags,
373 	bigtime_t timeout)
374 {
375 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
376 
377 	// We might be the writer ourselves.
378 	if (lock->holder == find_thread(NULL)) {
379 		lock->owner_count++;
380 		return B_OK;
381 	}
382 
383 	// The writer that originally had the lock when we called atomic_add() might
384 	// already have gone and another writer could have overtaken us. In this
385 	// case the original writer set pending_readers, so we know that we don't
386 	// have to wait.
387 	if (lock->pending_readers > 0) {
388 		lock->pending_readers--;
389 
390 		if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
391 			lock->active_readers++;
392 
393 		return B_OK;
394 	}
395 
396 	ASSERT(lock->count >= RW_LOCK_WRITER_COUNT_BASE);
397 
398 	// we need to wait
399 
400 	// enqueue in waiter list
401 	rw_lock_waiter waiter;
402 	waiter.thread = get_current_thread();
403 	waiter.next = NULL;
404 	waiter.writer = false;
405 
406 	if (lock->waiters != NULL)
407 		lock->waiters->last->next = &waiter;
408 	else
409 		lock->waiters = &waiter;
410 
411 	lock->waiters->last = &waiter;
412 
413 	// block
414 	get_user_thread()->wait_status = 1;
415 	sThreadSpinlock.Unlock();
416 
417 	status_t error;
418 	while ((error = _kern_block_thread(timeoutFlags, timeout))
419 			== B_INTERRUPTED) {
420 	}
421 
422 	sThreadSpinlock.Lock();
423 
424 	if (error == B_OK || waiter.thread == NULL) {
425 		// We were unblocked successfully -- potentially our unblocker overtook
426 		// us after we already failed. In either case, we've got the lock, now.
427 		return B_OK;
428 	}
429 
430 	// We failed to get the lock -- dequeue from waiter list.
431 	rw_lock_waiter* previous = NULL;
432 	rw_lock_waiter* other = lock->waiters;
433 	while (other != &waiter) {
434 		previous = other;
435 		other = other->next;
436 	}
437 
438 	if (previous == NULL) {
439 		// we are the first in line
440 		lock->waiters = waiter.next;
441 		if (lock->waiters != NULL)
442 			lock->waiters->last = waiter.last;
443 	} else {
444 		// one or more other waiters are before us in the queue
445 		previous->next = waiter.next;
446 		if (lock->waiters->last == &waiter)
447 			lock->waiters->last = previous;
448 	}
449 
450 	// Decrement the count. ATM this is all we have to do. There's at least
451 	// one writer ahead of us -- otherwise the last writer would have unblocked
452 	// us (writers only manipulate the lock data with thread spinlock being
453 	// held) -- so our leaving doesn't make a difference to the ones behind us
454 	// in the queue.
455 	atomic_add(&lock->count, -1);
456 
457 	return error;
458 }
459 
460 
461 void
462 _rw_lock_read_unlock(rw_lock* lock)
463 {
464 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
465 	_rw_lock_read_unlock_threads_locked(lock);
466 }
467 
468 
469 static void
470 _rw_lock_read_unlock_threads_locked(rw_lock* lock)
471 {
472 	// If we're still holding the write lock or if there are other readers,
473 	// no-one can be woken up.
474 	if (lock->holder == find_thread(NULL)) {
475 		lock->owner_count--;
476 		return;
477 	}
478 
479 	if (--lock->active_readers > 0)
480 		return;
481 
482 	if (lock->active_readers < 0) {
483 		panic("rw_lock_read_unlock(): lock %p not read-locked", lock);
484 		lock->active_readers = 0;
485 		return;
486 	}
487 
488 	rw_lock_unblock(lock);
489 }
490 
491 #endif	// !KDEBUG_RW_LOCK_DEBUG
492 
493 
494 status_t
495 rw_lock_write_lock(rw_lock* lock)
496 {
497 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
498 
499 	// If we're already the lock holder, we just need to increment the owner
500 	// count.
501 	thread_id thread = find_thread(NULL);
502 	if (lock->holder == thread) {
503 		lock->owner_count += RW_LOCK_WRITER_COUNT_BASE;
504 		return B_OK;
505 	}
506 
507 	// announce our claim
508 	int32 oldCount = atomic_add(&lock->count, RW_LOCK_WRITER_COUNT_BASE);
509 
510 	if (oldCount == 0) {
511 		// No-one else held a read or write lock, so it's ours now.
512 		lock->holder = thread;
513 		lock->owner_count = RW_LOCK_WRITER_COUNT_BASE;
514 		return B_OK;
515 	}
516 
517 	// We have to wait. If we're the first writer, note the current reader
518 	// count.
519 	if (oldCount < RW_LOCK_WRITER_COUNT_BASE)
520 		lock->active_readers = oldCount - lock->pending_readers;
521 
522 	status_t status = rw_lock_wait(lock, true);
523 	if (status == B_OK) {
524 		lock->holder = thread;
525 		lock->owner_count = RW_LOCK_WRITER_COUNT_BASE;
526 	}
527 
528 	return status;
529 }
530 
531 
532 void
533 _rw_lock_write_unlock(rw_lock* lock)
534 {
535 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
536 	_rw_lock_write_unlock_threads_locked(lock);
537 }
538 
539 
540 static void
541 _rw_lock_write_unlock_threads_locked(rw_lock* lock)
542 {
543 	if (find_thread(NULL) != lock->holder) {
544 		panic("rw_lock_write_unlock(): lock %p not write-locked by this thread",
545 			lock);
546 		return;
547 	}
548 
549 	lock->owner_count -= RW_LOCK_WRITER_COUNT_BASE;
550 	if (lock->owner_count >= RW_LOCK_WRITER_COUNT_BASE)
551 		return;
552 
553 	// We gave up our last write lock -- clean up and unblock waiters.
554 	int32 readerCount = lock->owner_count;
555 	lock->holder = -1;
556 	lock->owner_count = 0;
557 
558 	int32 oldCount = atomic_add(&lock->count, -RW_LOCK_WRITER_COUNT_BASE);
559 	oldCount -= RW_LOCK_WRITER_COUNT_BASE;
560 
561 	if (oldCount != 0) {
562 		// If writers are waiting, take over our reader count.
563 		if (oldCount >= RW_LOCK_WRITER_COUNT_BASE) {
564 			lock->active_readers = readerCount;
565 			rw_lock_unblock(lock);
566 		} else {
567 			// No waiting writer, but there are one or more readers. We will
568 			// unblock all waiting readers -- that's the easy part -- and must
569 			// also make sure that all readers that haven't entered the critical
570 			// section yet, won't start to wait. Otherwise a writer overtaking
571 			// such a reader will correctly start to wait, but the reader,
572 			// seeing the writer count > 0, would also start to wait. We set
573 			// pending_readers to the number of readers that are still expected
574 			// to enter the critical section.
575 			lock->pending_readers = oldCount - readerCount
576 				- rw_lock_unblock(lock);
577 		}
578 	}
579 }
580 
581 
582 // #pragma mark -
583 
584 
585 void
586 mutex_init(mutex* lock, const char *name)
587 {
588 	lock->name = name;
589 	lock->waiters = NULL;
590 #if KDEBUG
591 	lock->holder = -1;
592 #else
593 	lock->count = 0;
594 #endif
595 	lock->flags = 0;
596 }
597 
598 
599 void
600 mutex_init_etc(mutex* lock, const char *name, uint32 flags)
601 {
602 	lock->name = (flags & MUTEX_FLAG_CLONE_NAME) != 0 ? strdup(name) : name;
603 	lock->waiters = NULL;
604 #if KDEBUG
605 	lock->holder = -1;
606 #else
607 	lock->count = 0;
608 #endif
609 	lock->flags = flags & MUTEX_FLAG_CLONE_NAME;
610 }
611 
612 
613 void
614 mutex_destroy(mutex* lock)
615 {
616 	char* name = (lock->flags & MUTEX_FLAG_CLONE_NAME) != 0
617 		? (char*)lock->name : NULL;
618 
619 	// unblock all waiters
620 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
621 
622 #if KDEBUG
623 	if (lock->waiters != NULL && find_thread(NULL)
624 		!= lock->holder) {
625 		panic("mutex_destroy(): there are blocking threads, but caller doesn't "
626 			"hold the lock (%p)", lock);
627 		if (_mutex_lock_threads_locked(lock) != B_OK)
628 			return;
629 	}
630 #endif
631 
632 	while (mutex_waiter* waiter = lock->waiters) {
633 		// dequeue
634 		lock->waiters = waiter->next;
635 
636 		// unblock thread
637 		_kern_unblock_thread(get_thread_id(waiter->thread), B_ERROR);
638 	}
639 
640 	lock->name = NULL;
641 
642 	locker.Unlock();
643 
644 	free(name);
645 }
646 
647 
648 status_t
649 mutex_switch_lock(mutex* from, mutex* to)
650 {
651 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
652 
653 #if !KDEBUG
654 	if (atomic_add(&from->count, 1) < -1)
655 #endif
656 		_mutex_unlock_threads_locked(from);
657 
658 	return _mutex_lock_threads_locked(to);
659 }
660 
661 
662 status_t
663 mutex_switch_from_read_lock(rw_lock* from, mutex* to)
664 {
665 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
666 
667 #if KDEBUG_RW_LOCK_DEBUG
668 	_rw_lock_write_unlock_threads_locked(from);
669 #else
670 	int32 oldCount = atomic_add(&from->count, -1);
671 	if (oldCount >= RW_LOCK_WRITER_COUNT_BASE)
672 		_rw_lock_read_unlock_threads_locked(from);
673 #endif
674 
675 	return _mutex_lock_threads_locked(to);
676 }
677 
678 
679 
680 static status_t
681 _mutex_lock_threads_locked(mutex* lock)
682 {
683 
684 	// Might have been released after we decremented the count, but before
685 	// we acquired the spinlock.
686 #if KDEBUG
687 	if (lock->holder < 0) {
688 		lock->holder = find_thread(NULL);
689 		return B_OK;
690 	} else if (lock->holder == find_thread(NULL)) {
691 		panic("_mutex_lock(): double lock of %p by thread %" B_PRId32, lock,
692 			lock->holder);
693 	} else if (lock->holder == 0)
694 		panic("_mutex_lock(): using unitialized lock %p", lock);
695 #else
696 	if ((lock->flags & MUTEX_FLAG_RELEASED) != 0) {
697 		lock->flags &= ~MUTEX_FLAG_RELEASED;
698 		return B_OK;
699 	}
700 #endif
701 
702 	// enqueue in waiter list
703 	mutex_waiter waiter;
704 	waiter.thread = get_current_thread();
705 	waiter.next = NULL;
706 
707 	if (lock->waiters != NULL) {
708 		lock->waiters->last->next = &waiter;
709 	} else
710 		lock->waiters = &waiter;
711 
712 	lock->waiters->last = &waiter;
713 
714 	// block
715 	get_user_thread()->wait_status = 1;
716 	sThreadSpinlock.Unlock();
717 
718 	status_t error;
719 	while ((error = _kern_block_thread(0, 0)) == B_INTERRUPTED) {
720 	}
721 
722 	sThreadSpinlock.Lock();
723 
724 #if KDEBUG
725 	if (error == B_OK)
726 		lock->holder = get_thread_id(waiter.thread);
727 #endif
728 
729 	return error;
730 }
731 
732 
733 status_t
734 _mutex_lock(mutex* lock, void*)
735 {
736 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
737 	return _mutex_lock_threads_locked(lock);
738 }
739 
740 
741 static void
742 _mutex_unlock_threads_locked(mutex* lock)
743 {
744 #if KDEBUG
745 	if (find_thread(NULL) != lock->holder) {
746 		panic("_mutex_unlock() failure: thread %" B_PRId32 " is trying to "
747 			"release mutex %p (current holder %" B_PRId32 ")\n",
748 			find_thread(NULL), lock, lock->holder);
749 		return;
750 	}
751 #endif
752 
753 	mutex_waiter* waiter = lock->waiters;
754 	if (waiter != NULL) {
755 		// dequeue the first waiter
756 		lock->waiters = waiter->next;
757 		if (lock->waiters != NULL)
758 			lock->waiters->last = waiter->last;
759 
760 		// unblock thread
761 		_kern_unblock_thread(get_thread_id(waiter->thread), B_OK);
762 
763 #if KDEBUG
764 		// Already set the holder to the unblocked thread. Besides that this
765 		// actually reflects the current situation, setting it to -1 would
766 		// cause a race condition, since another locker could think the lock
767 		// is not held by anyone.
768 		lock->holder = get_thread_id(waiter->thread);
769 #endif
770 	} else {
771 		// We've acquired the spinlock before the locker that is going to wait.
772 		// Just mark the lock as released.
773 #if KDEBUG
774 		lock->holder = -1;
775 #else
776 		lock->flags |= MUTEX_FLAG_RELEASED;
777 #endif
778 	}
779 }
780 
781 
782 void
783 _mutex_unlock(mutex* lock)
784 {
785 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
786 	_mutex_unlock_threads_locked(lock);
787 }
788 
789 
790 status_t
791 _mutex_trylock(mutex* lock)
792 {
793 #if KDEBUG
794 	AutoLocker<ThreadSpinlock> _(sThreadSpinlock);
795 
796 	if (lock->holder <= 0) {
797 		lock->holder = find_thread(NULL);
798 		return B_OK;
799 	}
800 #endif
801 	return B_WOULD_BLOCK;
802 }
803