xref: /haiku/src/tests/add-ons/kernel/kernelland_emu/lock.cpp (revision 922e7ba1f3228e6f28db69b0ded8f86eb32dea17)
1 /*
2  * Copyright 2002-2010, Haiku Inc. All Rights Reserved.
3  * Distributed under the terms of the MIT license.
4  *
5  * Authors:
6  *		Ingo Weinhold, bonefish@cs.tu-berlin.de.
7  *		Axel Dörfler, axeld@pinc-software.de.
8  */
9 
10 #include <lock.h>
11 
12 #include <stdlib.h>
13 #include <string.h>
14 
15 #include <AutoLocker.h>
16 
17 // libroot
18 #include <user_thread.h>
19 
20 // system
21 #include <syscalls.h>
22 #include <user_thread_defs.h>
23 
24 #include "thread.h"
25 
26 
27 struct mutex_waiter {
28 	Thread*			thread;
29 	mutex_waiter*	next;		// next in queue
30 	mutex_waiter*	last;		// last in queue (valid for the first in queue)
31 };
32 
33 struct rw_lock_waiter {
34 	Thread*			thread;
35 	rw_lock_waiter*	next;		// next in queue
36 	rw_lock_waiter*	last;		// last in queue (valid for the first in queue)
37 	bool			writer;
38 };
39 
40 #define MUTEX_FLAG_OWNS_NAME	MUTEX_FLAG_CLONE_NAME
41 #define MUTEX_FLAG_RELEASED		0x2
42 
43 #define RW_LOCK_FLAG_OWNS_NAME	RW_LOCK_FLAG_CLONE_NAME
44 
45 
46 /*!	Helper class playing the role of the kernel's thread spinlock. We don't use
47 	as spinlock as that could be expensive in userland (due to spinlock holder
48 	potentially being unscheduled), but a benaphore.
49 */
50 struct ThreadSpinlock {
51 	ThreadSpinlock()
52 		:
53 		fCount(1),
54 		fSemaphore(create_sem(0, "thread spinlock"))
55 	{
56 		if (fSemaphore < 0)
57 			panic("Failed to create thread spinlock semaphore!");
58 	}
59 
60 	~ThreadSpinlock()
61 	{
62 		if (fSemaphore >= 0)
63 			delete_sem(fSemaphore);
64 	}
65 
66 	bool Lock()
67 	{
68 		if (atomic_add(&fCount, -1) > 0)
69 			return true;
70 
71 		status_t error;
72 		do {
73 			error = acquire_sem(fSemaphore);
74 		} while (error == B_INTERRUPTED);
75 
76 		return error == B_OK;
77 	}
78 
79 	void Unlock()
80 	{
81 		if (atomic_add(&fCount, 1) < 0)
82 			release_sem(fSemaphore);
83 	}
84 
85 private:
86 	vint32	fCount;
87 	sem_id	fSemaphore;
88 };
89 
90 static ThreadSpinlock sThreadSpinlock;
91 
92 
93 // #pragma mark -
94 
95 
96 int32
97 recursive_lock_get_recursion(recursive_lock *lock)
98 {
99 	if (RECURSIVE_LOCK_HOLDER(lock) == find_thread(NULL))
100 		return lock->recursion;
101 
102 	return -1;
103 }
104 
105 
106 void
107 recursive_lock_init(recursive_lock *lock, const char *name)
108 {
109 	mutex_init(&lock->lock, name != NULL ? name : "recursive lock");
110 	RECURSIVE_LOCK_HOLDER(lock) = -1;
111 	lock->recursion = 0;
112 }
113 
114 
115 void
116 recursive_lock_init_etc(recursive_lock *lock, const char *name, uint32 flags)
117 {
118 	mutex_init_etc(&lock->lock, name != NULL ? name : "recursive lock", flags);
119 	RECURSIVE_LOCK_HOLDER(lock) = -1;
120 	lock->recursion = 0;
121 }
122 
123 
124 void
125 recursive_lock_destroy(recursive_lock *lock)
126 {
127 	if (lock == NULL)
128 		return;
129 
130 	mutex_destroy(&lock->lock);
131 }
132 
133 
134 status_t
135 recursive_lock_lock(recursive_lock *lock)
136 {
137 	thread_id thread = find_thread(NULL);
138 
139 	if (thread != RECURSIVE_LOCK_HOLDER(lock)) {
140 		mutex_lock(&lock->lock);
141 #if !KDEBUG
142 		lock->holder = thread;
143 #endif
144 	}
145 
146 	lock->recursion++;
147 	return B_OK;
148 }
149 
150 
151 status_t
152 recursive_lock_trylock(recursive_lock *lock)
153 {
154 	thread_id thread = find_thread(NULL);
155 
156 	if (thread != RECURSIVE_LOCK_HOLDER(lock)) {
157 		status_t status = mutex_trylock(&lock->lock);
158 		if (status != B_OK)
159 			return status;
160 
161 #if !KDEBUG
162 		lock->holder = thread;
163 #endif
164 	}
165 
166 	lock->recursion++;
167 	return B_OK;
168 }
169 
170 
171 void
172 recursive_lock_unlock(recursive_lock *lock)
173 {
174 	if (find_thread(NULL) != RECURSIVE_LOCK_HOLDER(lock))
175 		panic("recursive_lock %p unlocked by non-holder thread!\n", lock);
176 
177 	if (--lock->recursion == 0) {
178 #if !KDEBUG
179 		lock->holder = -1;
180 #endif
181 		mutex_unlock(&lock->lock);
182 	}
183 }
184 
185 
186 //	#pragma mark -
187 
188 
189 static status_t
190 rw_lock_wait(rw_lock* lock, bool writer)
191 {
192 	// enqueue in waiter list
193 	rw_lock_waiter waiter;
194 	waiter.thread = get_current_thread();
195 	waiter.next = NULL;
196 	waiter.writer = writer;
197 
198 	if (lock->waiters != NULL)
199 		lock->waiters->last->next = &waiter;
200 	else
201 		lock->waiters = &waiter;
202 
203 	lock->waiters->last = &waiter;
204 
205 	// block
206 	get_user_thread()->wait_status = 1;
207 	sThreadSpinlock.Unlock();
208 
209 	status_t error;
210 	while ((error = _kern_block_thread(0, 0)) == B_INTERRUPTED) {
211 	}
212 
213 	sThreadSpinlock.Lock();
214 	return error;
215 }
216 
217 
218 static int32
219 rw_lock_unblock(rw_lock* lock)
220 {
221 	// Check whether there any waiting threads at all and whether anyone
222 	// has the write lock.
223 	rw_lock_waiter* waiter = lock->waiters;
224 	if (waiter == NULL || lock->holder > 0)
225 		return 0;
226 
227 	// writer at head of queue?
228 	if (waiter->writer) {
229 		if (lock->active_readers > 0 || lock->pending_readers > 0)
230 			return 0;
231 
232 		// dequeue writer
233 		lock->waiters = waiter->next;
234 		if (lock->waiters != NULL)
235 			lock->waiters->last = waiter->last;
236 
237 		lock->holder = get_thread_id(waiter->thread);
238 
239 		// unblock thread
240 		_kern_unblock_thread(get_thread_id(waiter->thread), B_OK);
241 		waiter->thread = NULL;
242 		return RW_LOCK_WRITER_COUNT_BASE;
243 	}
244 
245 	// wake up one or more readers
246 	uint32 readerCount = 0;
247 	do {
248 		// dequeue reader
249 		lock->waiters = waiter->next;
250 		if (lock->waiters != NULL)
251 			lock->waiters->last = waiter->last;
252 
253 		readerCount++;
254 
255 		// unblock thread
256 		_kern_unblock_thread(get_thread_id(waiter->thread), B_OK);
257 		waiter->thread = NULL;
258 	} while ((waiter = lock->waiters) != NULL && !waiter->writer);
259 
260 	if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
261 		lock->active_readers += readerCount;
262 
263 	return readerCount;
264 }
265 
266 
267 void
268 rw_lock_init(rw_lock* lock, const char* name)
269 {
270 	lock->name = name;
271 	lock->waiters = NULL;
272 	lock->holder = -1;
273 	lock->count = 0;
274 	lock->owner_count = 0;
275 	lock->active_readers = 0;
276 	lock->pending_readers = 0;
277 	lock->flags = 0;
278 }
279 
280 
281 void
282 rw_lock_init_etc(rw_lock* lock, const char* name, uint32 flags)
283 {
284 	lock->name = (flags & RW_LOCK_FLAG_CLONE_NAME) != 0 ? strdup(name) : name;
285 	lock->waiters = NULL;
286 	lock->holder = -1;
287 	lock->count = 0;
288 	lock->owner_count = 0;
289 	lock->active_readers = 0;
290 	lock->pending_readers = 0;
291 	lock->flags = flags & RW_LOCK_FLAG_CLONE_NAME;
292 }
293 
294 
295 void
296 rw_lock_destroy(rw_lock* lock)
297 {
298 	char* name = (lock->flags & RW_LOCK_FLAG_CLONE_NAME) != 0
299 		? (char*)lock->name : NULL;
300 
301 	// unblock all waiters
302 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
303 
304 #if KDEBUG
305 	if (lock->waiters != NULL && find_thread(NULL)
306 			!= lock->holder) {
307 		panic("rw_lock_destroy(): there are blocking threads, but the caller "
308 			"doesn't hold the write lock (%p)", lock);
309 
310 		locker.Unlock();
311 		if (rw_lock_write_lock(lock) != B_OK)
312 			return;
313 		locker.Lock();
314 	}
315 #endif
316 
317 	while (rw_lock_waiter* waiter = lock->waiters) {
318 		// dequeue
319 		lock->waiters = waiter->next;
320 
321 		// unblock thread
322 		_kern_unblock_thread(get_thread_id(waiter->thread), B_ERROR);
323 	}
324 
325 	lock->name = NULL;
326 
327 	locker.Unlock();
328 
329 	free(name);
330 }
331 
332 
333 #if !KDEBUG_RW_LOCK_DEBUG
334 
335 status_t
336 _rw_lock_read_lock(rw_lock* lock)
337 {
338 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
339 
340 	// We might be the writer ourselves.
341 	if (lock->holder == find_thread(NULL)) {
342 		lock->owner_count++;
343 		return B_OK;
344 	}
345 
346 	// The writer that originally had the lock when we called atomic_add() might
347 	// already have gone and another writer could have overtaken us. In this
348 	// case the original writer set pending_readers, so we know that we don't
349 	// have to wait.
350 	if (lock->pending_readers > 0) {
351 		lock->pending_readers--;
352 
353 		if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
354 			lock->active_readers++;
355 
356 		return B_OK;
357 	}
358 
359 	// we need to wait
360 	return rw_lock_wait(lock, false);
361 }
362 
363 
364 status_t
365 _rw_lock_read_lock_with_timeout(rw_lock* lock, uint32 timeoutFlags,
366 	bigtime_t timeout)
367 {
368 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
369 
370 	// We might be the writer ourselves.
371 	if (lock->holder == find_thread(NULL)) {
372 		lock->owner_count++;
373 		return B_OK;
374 	}
375 
376 	// The writer that originally had the lock when we called atomic_add() might
377 	// already have gone and another writer could have overtaken us. In this
378 	// case the original writer set pending_readers, so we know that we don't
379 	// have to wait.
380 	if (lock->pending_readers > 0) {
381 		lock->pending_readers--;
382 
383 		if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
384 			lock->active_readers++;
385 
386 		return B_OK;
387 	}
388 
389 	ASSERT(lock->count >= RW_LOCK_WRITER_COUNT_BASE);
390 
391 	// we need to wait
392 
393 	// enqueue in waiter list
394 	rw_lock_waiter waiter;
395 	waiter.thread = get_current_thread();
396 	waiter.next = NULL;
397 	waiter.writer = false;
398 
399 	if (lock->waiters != NULL)
400 		lock->waiters->last->next = &waiter;
401 	else
402 		lock->waiters = &waiter;
403 
404 	lock->waiters->last = &waiter;
405 
406 	// block
407 	get_user_thread()->wait_status = 1;
408 	sThreadSpinlock.Unlock();
409 
410 	status_t error;
411 	while ((error = _kern_block_thread(timeoutFlags, timeout))
412 			== B_INTERRUPTED) {
413 	}
414 
415 	sThreadSpinlock.Lock();
416 
417 	if (error == B_OK || waiter.thread == NULL) {
418 		// We were unblocked successfully -- potentially our unblocker overtook
419 		// us after we already failed. In either case, we've got the lock, now.
420 		return B_OK;
421 	}
422 
423 	// We failed to get the lock -- dequeue from waiter list.
424 	rw_lock_waiter* previous = NULL;
425 	rw_lock_waiter* other = lock->waiters;
426 	while (other != &waiter) {
427 		previous = other;
428 		other = other->next;
429 	}
430 
431 	if (previous == NULL) {
432 		// we are the first in line
433 		lock->waiters = waiter.next;
434 		if (lock->waiters != NULL)
435 			lock->waiters->last = waiter.last;
436 	} else {
437 		// one or more other waiters are before us in the queue
438 		previous->next = waiter.next;
439 		if (lock->waiters->last == &waiter)
440 			lock->waiters->last = previous;
441 	}
442 
443 	// Decrement the count. ATM this is all we have to do. There's at least
444 	// one writer ahead of us -- otherwise the last writer would have unblocked
445 	// us (writers only manipulate the lock data with thread spinlock being
446 	// held) -- so our leaving doesn't make a difference to the ones behind us
447 	// in the queue.
448 	atomic_add(&lock->count, -1);
449 
450 	return error;
451 }
452 
453 
454 void
455 _rw_lock_read_unlock(rw_lock* lock, bool threadsLocked)
456 {
457 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock, false, !threadsLocked);
458 
459 	// If we're still holding the write lock or if there are other readers,
460 	// no-one can be woken up.
461 	if (lock->holder == find_thread(NULL)) {
462 		lock->owner_count--;
463 		return;
464 	}
465 
466 	if (--lock->active_readers > 0)
467 		return;
468 
469  	if (lock->active_readers < 0) {
470  		panic("rw_lock_read_unlock(): lock %p not read-locked", lock);
471 		lock->active_readers = 0;
472  		return;
473  	}
474 
475 	rw_lock_unblock(lock);
476 }
477 
478 #endif	// !KDEBUG_RW_LOCK_DEBUG
479 
480 
481 status_t
482 rw_lock_write_lock(rw_lock* lock)
483 {
484 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
485 
486 	// If we're already the lock holder, we just need to increment the owner
487 	// count.
488 	thread_id thread = find_thread(NULL);
489 	if (lock->holder == thread) {
490 		lock->owner_count += RW_LOCK_WRITER_COUNT_BASE;
491 		return B_OK;
492 	}
493 
494 	// announce our claim
495 	int32 oldCount = atomic_add(&lock->count, RW_LOCK_WRITER_COUNT_BASE);
496 
497 	if (oldCount == 0) {
498 		// No-one else held a read or write lock, so it's ours now.
499 		lock->holder = thread;
500 		lock->owner_count = RW_LOCK_WRITER_COUNT_BASE;
501 		return B_OK;
502 	}
503 
504 	// We have to wait. If we're the first writer, note the current reader
505 	// count.
506 	if (oldCount < RW_LOCK_WRITER_COUNT_BASE)
507 		lock->active_readers = oldCount - lock->pending_readers;
508 
509 	status_t status = rw_lock_wait(lock, true);
510 	if (status == B_OK) {
511 		lock->holder = thread;
512 		lock->owner_count = RW_LOCK_WRITER_COUNT_BASE;
513 	}
514 
515 	return status;
516 }
517 
518 
519 void
520 _rw_lock_write_unlock(rw_lock* lock, bool threadsLocked)
521 {
522 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock, false, !threadsLocked);
523 
524 	if (find_thread(NULL) != lock->holder) {
525 		panic("rw_lock_write_unlock(): lock %p not write-locked by this thread",
526 			lock);
527 		return;
528 	}
529 
530 	lock->owner_count -= RW_LOCK_WRITER_COUNT_BASE;
531 	if (lock->owner_count >= RW_LOCK_WRITER_COUNT_BASE)
532 		return;
533 
534 	// We gave up our last write lock -- clean up and unblock waiters.
535 	int32 readerCount = lock->owner_count;
536 	lock->holder = -1;
537 	lock->owner_count = 0;
538 
539 	int32 oldCount = atomic_add(&lock->count, -RW_LOCK_WRITER_COUNT_BASE);
540 	oldCount -= RW_LOCK_WRITER_COUNT_BASE;
541 
542 	if (oldCount != 0) {
543 		// If writers are waiting, take over our reader count.
544 		if (oldCount >= RW_LOCK_WRITER_COUNT_BASE) {
545 			lock->active_readers = readerCount;
546 			rw_lock_unblock(lock);
547 		} else {
548 			// No waiting writer, but there are one or more readers. We will
549 			// unblock all waiting readers -- that's the easy part -- and must
550 			// also make sure that all readers that haven't entered the critical
551 			// section yet, won't start to wait. Otherwise a writer overtaking
552 			// such a reader will correctly start to wait, but the reader,
553 			// seeing the writer count > 0, would also start to wait. We set
554 			// pending_readers to the number of readers that are still expected
555 			// to enter the critical section.
556 			lock->pending_readers = oldCount - readerCount
557 				- rw_lock_unblock(lock);
558 		}
559 	}
560 }
561 
562 
563 // #pragma mark -
564 
565 
566 void
567 mutex_init(mutex* lock, const char *name)
568 {
569 	lock->name = name;
570 	lock->waiters = NULL;
571 #if KDEBUG
572 	lock->holder = -1;
573 #else
574 	lock->count = 0;
575 #endif
576 	lock->flags = 0;
577 }
578 
579 
580 void
581 mutex_init_etc(mutex* lock, const char *name, uint32 flags)
582 {
583 	lock->name = (flags & MUTEX_FLAG_CLONE_NAME) != 0 ? strdup(name) : name;
584 	lock->waiters = NULL;
585 #if KDEBUG
586 	lock->holder = -1;
587 #else
588 	lock->count = 0;
589 #endif
590 	lock->flags = flags & MUTEX_FLAG_CLONE_NAME;
591 }
592 
593 
594 void
595 mutex_destroy(mutex* lock)
596 {
597 	char* name = (lock->flags & MUTEX_FLAG_CLONE_NAME) != 0
598 		? (char*)lock->name : NULL;
599 
600 	// unblock all waiters
601 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
602 
603 #if KDEBUG
604 	if (lock->waiters != NULL && find_thread(NULL)
605 		!= lock->holder) {
606 		panic("mutex_destroy(): there are blocking threads, but caller doesn't "
607 			"hold the lock (%p)", lock);
608 		if (_mutex_lock(lock, true) != B_OK)
609 			return;
610 	}
611 #endif
612 
613 	while (mutex_waiter* waiter = lock->waiters) {
614 		// dequeue
615 		lock->waiters = waiter->next;
616 
617 		// unblock thread
618 		_kern_unblock_thread(get_thread_id(waiter->thread), B_ERROR);
619 	}
620 
621 	lock->name = NULL;
622 
623 	locker.Unlock();
624 
625 	free(name);
626 }
627 
628 
629 status_t
630 mutex_switch_lock(mutex* from, mutex* to)
631 {
632 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
633 
634 #if !KDEBUG
635 	if (atomic_add(&from->count, 1) < -1)
636 #endif
637 		_mutex_unlock(from, true);
638 
639 	return mutex_lock_threads_locked(to);
640 }
641 
642 
643 status_t
644 mutex_switch_from_read_lock(rw_lock* from, mutex* to)
645 {
646 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
647 
648 #if KDEBUG_RW_LOCK_DEBUG
649 	_rw_lock_write_unlock(from, true);
650 #else
651 	int32 oldCount = atomic_add(&from->count, -1);
652 	if (oldCount >= RW_LOCK_WRITER_COUNT_BASE)
653 		_rw_lock_read_unlock(from, true);
654 #endif
655 
656 	return mutex_lock_threads_locked(to);
657 }
658 
659 
660 status_t
661 _mutex_lock(mutex* lock, bool threadsLocked)
662 {
663 	// lock only, if !threadsLocked
664 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock, false, !threadsLocked);
665 
666 	// Might have been released after we decremented the count, but before
667 	// we acquired the spinlock.
668 #if KDEBUG
669 	if (lock->holder < 0) {
670 		lock->holder = find_thread(NULL);
671 		return B_OK;
672 	} else if (lock->holder == find_thread(NULL)) {
673 		panic("_mutex_lock(): double lock of %p by thread %ld", lock,
674 			lock->holder);
675 	} else if (lock->holder == 0)
676 		panic("_mutex_lock(): using unitialized lock %p", lock);
677 #else
678 	if ((lock->flags & MUTEX_FLAG_RELEASED) != 0) {
679 		lock->flags &= ~MUTEX_FLAG_RELEASED;
680 		return B_OK;
681 	}
682 #endif
683 
684 	// enqueue in waiter list
685 	mutex_waiter waiter;
686 	waiter.thread = get_current_thread();
687 	waiter.next = NULL;
688 
689 	if (lock->waiters != NULL) {
690 		lock->waiters->last->next = &waiter;
691 	} else
692 		lock->waiters = &waiter;
693 
694 	lock->waiters->last = &waiter;
695 
696 	// block
697 	get_user_thread()->wait_status = 1;
698 	locker.Unlock();
699 
700 	status_t error;
701 	while ((error = _kern_block_thread(0, 0)) == B_INTERRUPTED) {
702 	}
703 
704 	locker.Lock();
705 
706 #if KDEBUG
707 	if (error == B_OK)
708 		lock->holder = get_thread_id(waiter.thread);
709 #endif
710 
711 	return error;
712 }
713 
714 
715 void
716 _mutex_unlock(mutex* lock, bool threadsLocked)
717 {
718 	// lock only, if !threadsLocked
719 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock, false, !threadsLocked);
720 
721 #if KDEBUG
722 	if (find_thread(NULL) != lock->holder) {
723 		panic("_mutex_unlock() failure: thread %ld is trying to release "
724 			"mutex %p (current holder %ld)\n", find_thread(NULL),
725 			lock, lock->holder);
726 		return;
727 	}
728 #endif
729 
730 	mutex_waiter* waiter = lock->waiters;
731 	if (waiter != NULL) {
732 		// dequeue the first waiter
733 		lock->waiters = waiter->next;
734 		if (lock->waiters != NULL)
735 			lock->waiters->last = waiter->last;
736 
737 		// unblock thread
738 		_kern_unblock_thread(get_thread_id(waiter->thread), B_OK);
739 
740 #if KDEBUG
741 		// Already set the holder to the unblocked thread. Besides that this
742 		// actually reflects the current situation, setting it to -1 would
743 		// cause a race condition, since another locker could think the lock
744 		// is not held by anyone.
745 		lock->holder = get_thread_id(waiter->thread);
746 #endif
747 	} else {
748 		// We've acquired the spinlock before the locker that is going to wait.
749 		// Just mark the lock as released.
750 #if KDEBUG
751 		lock->holder = -1;
752 #else
753 		lock->flags |= MUTEX_FLAG_RELEASED;
754 #endif
755 	}
756 }
757 
758 
759 status_t
760 _mutex_trylock(mutex* lock)
761 {
762 #if KDEBUG
763 	AutoLocker<ThreadSpinlock> _(sThreadSpinlock);
764 
765 	if (lock->holder <= 0) {
766 		lock->holder = find_thread(NULL);
767 		return B_OK;
768 	}
769 #endif
770 	return B_WOULD_BLOCK;
771 }
772