xref: /haiku/src/tests/add-ons/kernel/kernelland_emu/lock.cpp (revision ab4411e89a079bc0a40d901995f3418d998c51b3)
1 /*
2  * Copyright 2002-2010, Haiku Inc. All Rights Reserved.
3  * Distributed under the terms of the MIT license.
4  *
5  * Authors:
6  *		Ingo Weinhold, bonefish@cs.tu-berlin.de.
7  *		Axel Dörfler, axeld@pinc-software.de.
8  */
9 
10 #include <lock.h>
11 
12 #include <stdlib.h>
13 #include <string.h>
14 
15 #include <AutoLocker.h>
16 
17 // libroot
18 #include <user_thread.h>
19 
20 // system
21 #include <syscalls.h>
22 #include <user_thread_defs.h>
23 
24 #include "thread.h"
25 
26 
27 struct mutex_waiter {
28 	Thread*			thread;
29 	mutex_waiter*	next;		// next in queue
30 	mutex_waiter*	last;		// last in queue (valid for the first in queue)
31 };
32 
33 struct rw_lock_waiter {
34 	Thread*			thread;
35 	rw_lock_waiter*	next;		// next in queue
36 	rw_lock_waiter*	last;		// last in queue (valid for the first in queue)
37 	bool			writer;
38 };
39 
40 #define MUTEX_FLAG_OWNS_NAME	MUTEX_FLAG_CLONE_NAME
41 #define MUTEX_FLAG_RELEASED		0x2
42 
43 #define RW_LOCK_FLAG_OWNS_NAME	RW_LOCK_FLAG_CLONE_NAME
44 
45 
46 static status_t _mutex_lock_threads_locked(mutex* lock);
47 static void _mutex_unlock_threads_locked(mutex* lock);
48 
49 
50 /*!	Helper class playing the role of the kernel's thread spinlock. We don't use
51 	as spinlock as that could be expensive in userland (due to spinlock holder
52 	potentially being unscheduled), but a benaphore.
53 */
54 struct ThreadSpinlock {
55 	ThreadSpinlock()
56 		:
57 		fCount(1),
58 		fSemaphore(create_sem(0, "thread spinlock"))
59 	{
60 		if (fSemaphore < 0)
61 			panic("Failed to create thread spinlock semaphore!");
62 	}
63 
64 	~ThreadSpinlock()
65 	{
66 		if (fSemaphore >= 0)
67 			delete_sem(fSemaphore);
68 	}
69 
70 	bool Lock()
71 	{
72 		if (atomic_add(&fCount, -1) > 0)
73 			return true;
74 
75 		status_t error;
76 		do {
77 			error = acquire_sem(fSemaphore);
78 		} while (error == B_INTERRUPTED);
79 
80 		return error == B_OK;
81 	}
82 
83 	void Unlock()
84 	{
85 		if (atomic_add(&fCount, 1) < 0)
86 			release_sem(fSemaphore);
87 	}
88 
89 private:
90 	int32	fCount;
91 	sem_id	fSemaphore;
92 };
93 
94 static ThreadSpinlock sThreadSpinlock;
95 
96 
97 // #pragma mark -
98 
99 
100 int32
101 recursive_lock_get_recursion(recursive_lock *lock)
102 {
103 	if (RECURSIVE_LOCK_HOLDER(lock) == find_thread(NULL))
104 		return lock->recursion;
105 
106 	return -1;
107 }
108 
109 
110 void
111 recursive_lock_init(recursive_lock *lock, const char *name)
112 {
113 	mutex_init(&lock->lock, name != NULL ? name : "recursive lock");
114 	RECURSIVE_LOCK_HOLDER(lock) = -1;
115 	lock->recursion = 0;
116 }
117 
118 
119 void
120 recursive_lock_init_etc(recursive_lock *lock, const char *name, uint32 flags)
121 {
122 	mutex_init_etc(&lock->lock, name != NULL ? name : "recursive lock", flags);
123 	RECURSIVE_LOCK_HOLDER(lock) = -1;
124 	lock->recursion = 0;
125 }
126 
127 
128 void
129 recursive_lock_destroy(recursive_lock *lock)
130 {
131 	if (lock == NULL)
132 		return;
133 
134 	mutex_destroy(&lock->lock);
135 }
136 
137 
138 status_t
139 recursive_lock_lock(recursive_lock *lock)
140 {
141 	thread_id thread = find_thread(NULL);
142 
143 	if (thread != RECURSIVE_LOCK_HOLDER(lock)) {
144 		mutex_lock(&lock->lock);
145 #if !KDEBUG
146 		lock->holder = thread;
147 #endif
148 	}
149 
150 	lock->recursion++;
151 	return B_OK;
152 }
153 
154 
155 status_t
156 recursive_lock_trylock(recursive_lock *lock)
157 {
158 	thread_id thread = find_thread(NULL);
159 
160 	if (thread != RECURSIVE_LOCK_HOLDER(lock)) {
161 		status_t status = mutex_trylock(&lock->lock);
162 		if (status != B_OK)
163 			return status;
164 
165 #if !KDEBUG
166 		lock->holder = thread;
167 #endif
168 	}
169 
170 	lock->recursion++;
171 	return B_OK;
172 }
173 
174 
175 void
176 recursive_lock_unlock(recursive_lock *lock)
177 {
178 	if (find_thread(NULL) != RECURSIVE_LOCK_HOLDER(lock))
179 		panic("recursive_lock %p unlocked by non-holder thread!\n", lock);
180 
181 	if (--lock->recursion == 0) {
182 #if !KDEBUG
183 		lock->holder = -1;
184 #endif
185 		mutex_unlock(&lock->lock);
186 	}
187 }
188 
189 
190 //	#pragma mark -
191 
192 
193 static status_t
194 rw_lock_wait(rw_lock* lock, bool writer)
195 {
196 	// enqueue in waiter list
197 	rw_lock_waiter waiter;
198 	waiter.thread = get_current_thread();
199 	waiter.next = NULL;
200 	waiter.writer = writer;
201 
202 	if (lock->waiters != NULL)
203 		lock->waiters->last->next = &waiter;
204 	else
205 		lock->waiters = &waiter;
206 
207 	lock->waiters->last = &waiter;
208 
209 	// block
210 	get_user_thread()->wait_status = 1;
211 	sThreadSpinlock.Unlock();
212 
213 	status_t error;
214 	while ((error = _kern_block_thread(0, 0)) == B_INTERRUPTED) {
215 	}
216 
217 	sThreadSpinlock.Lock();
218 	return error;
219 }
220 
221 
222 static int32
223 rw_lock_unblock(rw_lock* lock)
224 {
225 	// Check whether there any waiting threads at all and whether anyone
226 	// has the write lock.
227 	rw_lock_waiter* waiter = lock->waiters;
228 	if (waiter == NULL || lock->holder > 0)
229 		return 0;
230 
231 	// writer at head of queue?
232 	if (waiter->writer) {
233 		if (lock->active_readers > 0 || lock->pending_readers > 0)
234 			return 0;
235 
236 		// dequeue writer
237 		lock->waiters = waiter->next;
238 		if (lock->waiters != NULL)
239 			lock->waiters->last = waiter->last;
240 
241 		lock->holder = get_thread_id(waiter->thread);
242 
243 		// unblock thread
244 		_kern_unblock_thread(get_thread_id(waiter->thread), B_OK);
245 		waiter->thread = NULL;
246 		return RW_LOCK_WRITER_COUNT_BASE;
247 	}
248 
249 	// wake up one or more readers
250 	uint32 readerCount = 0;
251 	do {
252 		// dequeue reader
253 		lock->waiters = waiter->next;
254 		if (lock->waiters != NULL)
255 			lock->waiters->last = waiter->last;
256 
257 		readerCount++;
258 
259 		// unblock thread
260 		_kern_unblock_thread(get_thread_id(waiter->thread), B_OK);
261 		waiter->thread = NULL;
262 	} while ((waiter = lock->waiters) != NULL && !waiter->writer);
263 
264 	if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
265 		lock->active_readers += readerCount;
266 
267 	return readerCount;
268 }
269 
270 
271 void
272 rw_lock_init(rw_lock* lock, const char* name)
273 {
274 	lock->name = name;
275 	lock->waiters = NULL;
276 	lock->holder = -1;
277 	lock->count = 0;
278 	lock->owner_count = 0;
279 	lock->active_readers = 0;
280 	lock->pending_readers = 0;
281 	lock->flags = 0;
282 }
283 
284 
285 void
286 rw_lock_init_etc(rw_lock* lock, const char* name, uint32 flags)
287 {
288 	lock->name = (flags & RW_LOCK_FLAG_CLONE_NAME) != 0 ? strdup(name) : name;
289 	lock->waiters = NULL;
290 	lock->holder = -1;
291 	lock->count = 0;
292 	lock->owner_count = 0;
293 	lock->active_readers = 0;
294 	lock->pending_readers = 0;
295 	lock->flags = flags & RW_LOCK_FLAG_CLONE_NAME;
296 }
297 
298 
299 void
300 rw_lock_destroy(rw_lock* lock)
301 {
302 	char* name = (lock->flags & RW_LOCK_FLAG_CLONE_NAME) != 0
303 		? (char*)lock->name : NULL;
304 
305 	// unblock all waiters
306 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
307 
308 #if KDEBUG
309 	if (lock->waiters != NULL && find_thread(NULL)
310 			!= lock->holder) {
311 		panic("rw_lock_destroy(): there are blocking threads, but the caller "
312 			"doesn't hold the write lock (%p)", lock);
313 
314 		locker.Unlock();
315 		if (rw_lock_write_lock(lock) != B_OK)
316 			return;
317 		locker.Lock();
318 	}
319 #endif
320 
321 	while (rw_lock_waiter* waiter = lock->waiters) {
322 		// dequeue
323 		lock->waiters = waiter->next;
324 
325 		// unblock thread
326 		_kern_unblock_thread(get_thread_id(waiter->thread), B_ERROR);
327 	}
328 
329 	lock->name = NULL;
330 
331 	locker.Unlock();
332 
333 	free(name);
334 }
335 
336 
337 #if !KDEBUG_RW_LOCK_DEBUG
338 
339 status_t
340 _rw_lock_read_lock(rw_lock* lock)
341 {
342 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
343 
344 	// We might be the writer ourselves.
345 	if (lock->holder == find_thread(NULL)) {
346 		lock->owner_count++;
347 		return B_OK;
348 	}
349 
350 	// The writer that originally had the lock when we called atomic_add() might
351 	// already have gone and another writer could have overtaken us. In this
352 	// case the original writer set pending_readers, so we know that we don't
353 	// have to wait.
354 	if (lock->pending_readers > 0) {
355 		lock->pending_readers--;
356 
357 		if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
358 			lock->active_readers++;
359 
360 		return B_OK;
361 	}
362 
363 	// we need to wait
364 	return rw_lock_wait(lock, false);
365 }
366 
367 
368 status_t
369 _rw_lock_read_lock_with_timeout(rw_lock* lock, uint32 timeoutFlags,
370 	bigtime_t timeout)
371 {
372 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
373 
374 	// We might be the writer ourselves.
375 	if (lock->holder == find_thread(NULL)) {
376 		lock->owner_count++;
377 		return B_OK;
378 	}
379 
380 	// The writer that originally had the lock when we called atomic_add() might
381 	// already have gone and another writer could have overtaken us. In this
382 	// case the original writer set pending_readers, so we know that we don't
383 	// have to wait.
384 	if (lock->pending_readers > 0) {
385 		lock->pending_readers--;
386 
387 		if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
388 			lock->active_readers++;
389 
390 		return B_OK;
391 	}
392 
393 	ASSERT(lock->count >= RW_LOCK_WRITER_COUNT_BASE);
394 
395 	// we need to wait
396 
397 	// enqueue in waiter list
398 	rw_lock_waiter waiter;
399 	waiter.thread = get_current_thread();
400 	waiter.next = NULL;
401 	waiter.writer = false;
402 
403 	if (lock->waiters != NULL)
404 		lock->waiters->last->next = &waiter;
405 	else
406 		lock->waiters = &waiter;
407 
408 	lock->waiters->last = &waiter;
409 
410 	// block
411 	get_user_thread()->wait_status = 1;
412 	sThreadSpinlock.Unlock();
413 
414 	status_t error;
415 	while ((error = _kern_block_thread(timeoutFlags, timeout))
416 			== B_INTERRUPTED) {
417 	}
418 
419 	sThreadSpinlock.Lock();
420 
421 	if (error == B_OK || waiter.thread == NULL) {
422 		// We were unblocked successfully -- potentially our unblocker overtook
423 		// us after we already failed. In either case, we've got the lock, now.
424 		return B_OK;
425 	}
426 
427 	// We failed to get the lock -- dequeue from waiter list.
428 	rw_lock_waiter* previous = NULL;
429 	rw_lock_waiter* other = lock->waiters;
430 	while (other != &waiter) {
431 		previous = other;
432 		other = other->next;
433 	}
434 
435 	if (previous == NULL) {
436 		// we are the first in line
437 		lock->waiters = waiter.next;
438 		if (lock->waiters != NULL)
439 			lock->waiters->last = waiter.last;
440 	} else {
441 		// one or more other waiters are before us in the queue
442 		previous->next = waiter.next;
443 		if (lock->waiters->last == &waiter)
444 			lock->waiters->last = previous;
445 	}
446 
447 	// Decrement the count. ATM this is all we have to do. There's at least
448 	// one writer ahead of us -- otherwise the last writer would have unblocked
449 	// us (writers only manipulate the lock data with thread spinlock being
450 	// held) -- so our leaving doesn't make a difference to the ones behind us
451 	// in the queue.
452 	atomic_add(&lock->count, -1);
453 
454 	return error;
455 }
456 
457 
458 void
459 _rw_lock_read_unlock(rw_lock* lock, bool threadsLocked)
460 {
461 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock, false, !threadsLocked);
462 
463 	// If we're still holding the write lock or if there are other readers,
464 	// no-one can be woken up.
465 	if (lock->holder == find_thread(NULL)) {
466 		lock->owner_count--;
467 		return;
468 	}
469 
470 	if (--lock->active_readers > 0)
471 		return;
472 
473  	if (lock->active_readers < 0) {
474  		panic("rw_lock_read_unlock(): lock %p not read-locked", lock);
475 		lock->active_readers = 0;
476  		return;
477  	}
478 
479 	rw_lock_unblock(lock);
480 }
481 
482 #endif	// !KDEBUG_RW_LOCK_DEBUG
483 
484 
485 status_t
486 rw_lock_write_lock(rw_lock* lock)
487 {
488 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
489 
490 	// If we're already the lock holder, we just need to increment the owner
491 	// count.
492 	thread_id thread = find_thread(NULL);
493 	if (lock->holder == thread) {
494 		lock->owner_count += RW_LOCK_WRITER_COUNT_BASE;
495 		return B_OK;
496 	}
497 
498 	// announce our claim
499 	int32 oldCount = atomic_add(&lock->count, RW_LOCK_WRITER_COUNT_BASE);
500 
501 	if (oldCount == 0) {
502 		// No-one else held a read or write lock, so it's ours now.
503 		lock->holder = thread;
504 		lock->owner_count = RW_LOCK_WRITER_COUNT_BASE;
505 		return B_OK;
506 	}
507 
508 	// We have to wait. If we're the first writer, note the current reader
509 	// count.
510 	if (oldCount < RW_LOCK_WRITER_COUNT_BASE)
511 		lock->active_readers = oldCount - lock->pending_readers;
512 
513 	status_t status = rw_lock_wait(lock, true);
514 	if (status == B_OK) {
515 		lock->holder = thread;
516 		lock->owner_count = RW_LOCK_WRITER_COUNT_BASE;
517 	}
518 
519 	return status;
520 }
521 
522 
523 void
524 _rw_lock_write_unlock(rw_lock* lock, bool threadsLocked)
525 {
526 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock, false, !threadsLocked);
527 
528 	if (find_thread(NULL) != lock->holder) {
529 		panic("rw_lock_write_unlock(): lock %p not write-locked by this thread",
530 			lock);
531 		return;
532 	}
533 
534 	lock->owner_count -= RW_LOCK_WRITER_COUNT_BASE;
535 	if (lock->owner_count >= RW_LOCK_WRITER_COUNT_BASE)
536 		return;
537 
538 	// We gave up our last write lock -- clean up and unblock waiters.
539 	int32 readerCount = lock->owner_count;
540 	lock->holder = -1;
541 	lock->owner_count = 0;
542 
543 	int32 oldCount = atomic_add(&lock->count, -RW_LOCK_WRITER_COUNT_BASE);
544 	oldCount -= RW_LOCK_WRITER_COUNT_BASE;
545 
546 	if (oldCount != 0) {
547 		// If writers are waiting, take over our reader count.
548 		if (oldCount >= RW_LOCK_WRITER_COUNT_BASE) {
549 			lock->active_readers = readerCount;
550 			rw_lock_unblock(lock);
551 		} else {
552 			// No waiting writer, but there are one or more readers. We will
553 			// unblock all waiting readers -- that's the easy part -- and must
554 			// also make sure that all readers that haven't entered the critical
555 			// section yet, won't start to wait. Otherwise a writer overtaking
556 			// such a reader will correctly start to wait, but the reader,
557 			// seeing the writer count > 0, would also start to wait. We set
558 			// pending_readers to the number of readers that are still expected
559 			// to enter the critical section.
560 			lock->pending_readers = oldCount - readerCount
561 				- rw_lock_unblock(lock);
562 		}
563 	}
564 }
565 
566 
567 // #pragma mark -
568 
569 
570 void
571 mutex_init(mutex* lock, const char *name)
572 {
573 	lock->name = name;
574 	lock->waiters = NULL;
575 #if KDEBUG
576 	lock->holder = -1;
577 #else
578 	lock->count = 0;
579 #endif
580 	lock->flags = 0;
581 }
582 
583 
584 void
585 mutex_init_etc(mutex* lock, const char *name, uint32 flags)
586 {
587 	lock->name = (flags & MUTEX_FLAG_CLONE_NAME) != 0 ? strdup(name) : name;
588 	lock->waiters = NULL;
589 #if KDEBUG
590 	lock->holder = -1;
591 #else
592 	lock->count = 0;
593 #endif
594 	lock->flags = flags & MUTEX_FLAG_CLONE_NAME;
595 }
596 
597 
598 void
599 mutex_destroy(mutex* lock)
600 {
601 	char* name = (lock->flags & MUTEX_FLAG_CLONE_NAME) != 0
602 		? (char*)lock->name : NULL;
603 
604 	// unblock all waiters
605 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
606 
607 #if KDEBUG
608 	if (lock->waiters != NULL && find_thread(NULL)
609 		!= lock->holder) {
610 		panic("mutex_destroy(): there are blocking threads, but caller doesn't "
611 			"hold the lock (%p)", lock);
612 		if (_mutex_lock_threads_locked(lock) != B_OK)
613 			return;
614 	}
615 #endif
616 
617 	while (mutex_waiter* waiter = lock->waiters) {
618 		// dequeue
619 		lock->waiters = waiter->next;
620 
621 		// unblock thread
622 		_kern_unblock_thread(get_thread_id(waiter->thread), B_ERROR);
623 	}
624 
625 	lock->name = NULL;
626 
627 	locker.Unlock();
628 
629 	free(name);
630 }
631 
632 
633 status_t
634 mutex_switch_lock(mutex* from, mutex* to)
635 {
636 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
637 
638 #if !KDEBUG
639 	if (atomic_add(&from->count, 1) < -1)
640 #endif
641 		_mutex_unlock_threads_locked(from);
642 
643 	return _mutex_lock_threads_locked(to);
644 }
645 
646 
647 status_t
648 mutex_switch_from_read_lock(rw_lock* from, mutex* to)
649 {
650 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
651 
652 #if KDEBUG_RW_LOCK_DEBUG
653 	_rw_lock_write_unlock(from, true);
654 #else
655 	int32 oldCount = atomic_add(&from->count, -1);
656 	if (oldCount >= RW_LOCK_WRITER_COUNT_BASE)
657 		_rw_lock_read_unlock(from, true);
658 #endif
659 
660 	return _mutex_lock_threads_locked(to);
661 }
662 
663 
664 
665 static status_t
666 _mutex_lock_threads_locked(mutex* lock)
667 {
668 
669 	// Might have been released after we decremented the count, but before
670 	// we acquired the spinlock.
671 #if KDEBUG
672 	if (lock->holder < 0) {
673 		lock->holder = find_thread(NULL);
674 		return B_OK;
675 	} else if (lock->holder == find_thread(NULL)) {
676 		panic("_mutex_lock(): double lock of %p by thread %ld", lock,
677 			lock->holder);
678 	} else if (lock->holder == 0)
679 		panic("_mutex_lock(): using unitialized lock %p", lock);
680 #else
681 	if ((lock->flags & MUTEX_FLAG_RELEASED) != 0) {
682 		lock->flags &= ~MUTEX_FLAG_RELEASED;
683 		return B_OK;
684 	}
685 #endif
686 
687 	// enqueue in waiter list
688 	mutex_waiter waiter;
689 	waiter.thread = get_current_thread();
690 	waiter.next = NULL;
691 
692 	if (lock->waiters != NULL) {
693 		lock->waiters->last->next = &waiter;
694 	} else
695 		lock->waiters = &waiter;
696 
697 	lock->waiters->last = &waiter;
698 
699 	// block
700 	get_user_thread()->wait_status = 1;
701 	sThreadSpinlock.Unlock();
702 
703 	status_t error;
704 	while ((error = _kern_block_thread(0, 0)) == B_INTERRUPTED) {
705 	}
706 
707 	sThreadSpinlock.Lock();
708 
709 #if KDEBUG
710 	if (error == B_OK)
711 		lock->holder = get_thread_id(waiter.thread);
712 #endif
713 
714 	return error;
715 }
716 
717 
718 status_t
719 _mutex_lock(mutex* lock, void*)
720 {
721 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
722 	return _mutex_lock_threads_locked(lock);
723 }
724 
725 
726 static void
727 _mutex_unlock_threads_locked(mutex* lock)
728 {
729 #if KDEBUG
730 	if (find_thread(NULL) != lock->holder) {
731 		panic("_mutex_unlock() failure: thread %ld is trying to release "
732 			"mutex %p (current holder %ld)\n", find_thread(NULL),
733 			lock, lock->holder);
734 		return;
735 	}
736 #endif
737 
738 	mutex_waiter* waiter = lock->waiters;
739 	if (waiter != NULL) {
740 		// dequeue the first waiter
741 		lock->waiters = waiter->next;
742 		if (lock->waiters != NULL)
743 			lock->waiters->last = waiter->last;
744 
745 		// unblock thread
746 		_kern_unblock_thread(get_thread_id(waiter->thread), B_OK);
747 
748 #if KDEBUG
749 		// Already set the holder to the unblocked thread. Besides that this
750 		// actually reflects the current situation, setting it to -1 would
751 		// cause a race condition, since another locker could think the lock
752 		// is not held by anyone.
753 		lock->holder = get_thread_id(waiter->thread);
754 #endif
755 	} else {
756 		// We've acquired the spinlock before the locker that is going to wait.
757 		// Just mark the lock as released.
758 #if KDEBUG
759 		lock->holder = -1;
760 #else
761 		lock->flags |= MUTEX_FLAG_RELEASED;
762 #endif
763 	}
764 }
765 
766 
767 void
768 _mutex_unlock(mutex* lock)
769 {
770 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
771 	_mutex_unlock_threads_locked(lock);
772 }
773 
774 
775 status_t
776 _mutex_trylock(mutex* lock)
777 {
778 #if KDEBUG
779 	AutoLocker<ThreadSpinlock> _(sThreadSpinlock);
780 
781 	if (lock->holder <= 0) {
782 		lock->holder = find_thread(NULL);
783 		return B_OK;
784 	}
785 #endif
786 	return B_WOULD_BLOCK;
787 }
788