xref: /haiku/src/tests/add-ons/kernel/kernelland_emu/lock.cpp (revision 9760dcae2038d47442f4658c2575844c6cf92c40)
1 /*
2  * Copyright 2002-2009, Haiku Inc. All Rights Reserved.
3  * Distributed under the terms of the MIT license.
4  *
5  * Authors:
6  *		Ingo Weinhold, bonefish@cs.tu-berlin.de.
7  *		Axel Dörfler, axeld@pinc-software.de.
8  */
9 
10 #include <lock.h>
11 
12 #include <stdlib.h>
13 #include <string.h>
14 
15 #include <AutoLocker.h>
16 
17 // libroot
18 #include <user_thread.h>
19 
20 // system
21 #include <syscalls.h>
22 #include <user_thread_defs.h>
23 
24 #include "thread.h"
25 
26 
27 struct mutex_waiter {
28 	struct thread*	thread;
29 	mutex_waiter*	next;		// next in queue
30 	mutex_waiter*	last;		// last in queue (valid for the first in queue)
31 };
32 
33 struct rw_lock_waiter {
34 	struct thread*	thread;
35 	rw_lock_waiter*	next;		// next in queue
36 	rw_lock_waiter*	last;		// last in queue (valid for the first in queue)
37 	bool			writer;
38 };
39 
40 #define MUTEX_FLAG_OWNS_NAME	MUTEX_FLAG_CLONE_NAME
41 #define MUTEX_FLAG_RELEASED		0x2
42 
43 #define RW_LOCK_FLAG_OWNS_NAME	RW_LOCK_FLAG_CLONE_NAME
44 
45 
46 /*!	Helper class playing the role of the kernel's thread spinlock. We don't use
47 	as spinlock as that could be expensive in userland (due to spinlock holder
48 	potentially being unscheduled), but a benaphore.
49 */
50 struct ThreadSpinlock {
51 	ThreadSpinlock()
52 		:
53 		fCount(1),
54 		fSemaphore(create_sem(0, "thread spinlock"))
55 	{
56 		if (fSemaphore < 0)
57 			panic("Failed to create thread spinlock semaphore!");
58 	}
59 
60 	~ThreadSpinlock()
61 	{
62 		if (fSemaphore >= 0)
63 			delete_sem(fSemaphore);
64 	}
65 
66 	bool Lock()
67 	{
68 		if (atomic_add(&fCount, -1) > 0)
69 			return true;
70 
71 		status_t error;
72 		do {
73 			error = acquire_sem(fSemaphore);
74 		} while (error == B_INTERRUPTED);
75 
76 		return error == B_OK;
77 	}
78 
79 	void Unlock()
80 	{
81 		if (atomic_add(&fCount, 1) < 0)
82 			release_sem(fSemaphore);
83 	}
84 
85 private:
86 	vint32	fCount;
87 	sem_id	fSemaphore;
88 };
89 
90 static ThreadSpinlock sThreadSpinlock;
91 
92 
93 // #pragma mark -
94 
95 
96 int32
97 recursive_lock_get_recursion(recursive_lock *lock)
98 {
99 	if (RECURSIVE_LOCK_HOLDER(lock) == find_thread(NULL))
100 		return lock->recursion;
101 
102 	return -1;
103 }
104 
105 
106 void
107 recursive_lock_init(recursive_lock *lock, const char *name)
108 {
109 	mutex_init(&lock->lock, name != NULL ? name : "recursive lock");
110 	RECURSIVE_LOCK_HOLDER(lock) = -1;
111 	lock->recursion = 0;
112 }
113 
114 
115 void
116 recursive_lock_init_etc(recursive_lock *lock, const char *name, uint32 flags)
117 {
118 	mutex_init_etc(&lock->lock, name != NULL ? name : "recursive lock", flags);
119 	RECURSIVE_LOCK_HOLDER(lock) = -1;
120 	lock->recursion = 0;
121 }
122 
123 
124 void
125 recursive_lock_destroy(recursive_lock *lock)
126 {
127 	if (lock == NULL)
128 		return;
129 
130 	mutex_destroy(&lock->lock);
131 }
132 
133 
134 status_t
135 recursive_lock_lock(recursive_lock *lock)
136 {
137 	thread_id thread = find_thread(NULL);
138 
139 	if (thread != RECURSIVE_LOCK_HOLDER(lock)) {
140 		mutex_lock(&lock->lock);
141 #if !KDEBUG
142 		lock->holder = thread;
143 #endif
144 	}
145 
146 	lock->recursion++;
147 	return B_OK;
148 }
149 
150 
151 status_t
152 recursive_lock_trylock(recursive_lock *lock)
153 {
154 	thread_id thread = find_thread(NULL);
155 
156 	if (thread != RECURSIVE_LOCK_HOLDER(lock)) {
157 		status_t status = mutex_trylock(&lock->lock);
158 		if (status != B_OK)
159 			return status;
160 
161 #if !KDEBUG
162 		lock->holder = thread;
163 #endif
164 	}
165 
166 	lock->recursion++;
167 	return B_OK;
168 }
169 
170 
171 void
172 recursive_lock_unlock(recursive_lock *lock)
173 {
174 	if (find_thread(NULL) != RECURSIVE_LOCK_HOLDER(lock))
175 		panic("recursive_lock %p unlocked by non-holder thread!\n", lock);
176 
177 	if (--lock->recursion == 0) {
178 #if !KDEBUG
179 		lock->holder = -1;
180 #endif
181 		mutex_unlock(&lock->lock);
182 	}
183 }
184 
185 
186 //	#pragma mark -
187 
188 
189 static status_t
190 rw_lock_wait(rw_lock* lock, bool writer)
191 {
192 	// enqueue in waiter list
193 	rw_lock_waiter waiter;
194 	waiter.thread = get_current_thread();
195 	waiter.next = NULL;
196 	waiter.writer = writer;
197 
198 	if (lock->waiters != NULL)
199 		lock->waiters->last->next = &waiter;
200 	else
201 		lock->waiters = &waiter;
202 
203 	lock->waiters->last = &waiter;
204 
205 	// block
206 	get_user_thread()->wait_status = 1;
207 	sThreadSpinlock.Unlock();
208 
209 	status_t error;
210 	while ((error = _kern_block_thread(0, 0)) == B_INTERRUPTED) {
211 	}
212 
213 	sThreadSpinlock.Lock();
214 	return error;
215 }
216 
217 
218 static int32
219 rw_lock_unblock(rw_lock* lock)
220 {
221 	// Check whether there any waiting threads at all and whether anyone
222 	// has the write lock.
223 	rw_lock_waiter* waiter = lock->waiters;
224 	if (waiter == NULL || lock->holder > 0)
225 		return 0;
226 
227 	// writer at head of queue?
228 	if (waiter->writer) {
229 		if (lock->active_readers > 0 || lock->pending_readers > 0)
230 			return 0;
231 
232 		// dequeue writer
233 		lock->waiters = waiter->next;
234 		if (lock->waiters != NULL)
235 			lock->waiters->last = waiter->last;
236 
237 		lock->holder = get_thread_id(waiter->thread);
238 
239 		// unblock thread
240 		_kern_unblock_thread(get_thread_id(waiter->thread), B_OK);
241 		return RW_LOCK_WRITER_COUNT_BASE;
242 	}
243 
244 	// wake up one or more readers
245 	uint32 readerCount = 0;
246 	do {
247 		// dequeue reader
248 		lock->waiters = waiter->next;
249 		if (lock->waiters != NULL)
250 			lock->waiters->last = waiter->last;
251 
252 		readerCount++;
253 
254 		// unblock thread
255 		_kern_unblock_thread(get_thread_id(waiter->thread), B_OK);
256 	} while ((waiter = lock->waiters) != NULL && !waiter->writer);
257 
258 	if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
259 		lock->active_readers += readerCount;
260 
261 	return readerCount;
262 }
263 
264 
265 void
266 rw_lock_init(rw_lock* lock, const char* name)
267 {
268 	lock->name = name;
269 	lock->waiters = NULL;
270 	lock->holder = -1;
271 	lock->count = 0;
272 	lock->owner_count = 0;
273 	lock->active_readers = 0;
274 	lock->pending_readers = 0;
275 	lock->flags = 0;
276 }
277 
278 
279 void
280 rw_lock_init_etc(rw_lock* lock, const char* name, uint32 flags)
281 {
282 	lock->name = (flags & RW_LOCK_FLAG_CLONE_NAME) != 0 ? strdup(name) : name;
283 	lock->waiters = NULL;
284 	lock->holder = -1;
285 	lock->count = 0;
286 	lock->owner_count = 0;
287 	lock->active_readers = 0;
288 	lock->pending_readers = 0;
289 	lock->flags = flags & RW_LOCK_FLAG_CLONE_NAME;
290 }
291 
292 
293 void
294 rw_lock_destroy(rw_lock* lock)
295 {
296 	char* name = (lock->flags & RW_LOCK_FLAG_CLONE_NAME) != 0
297 		? (char*)lock->name : NULL;
298 
299 	// unblock all waiters
300 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
301 
302 #if KDEBUG
303 	if (lock->waiters != NULL && find_thread(NULL)
304 			!= lock->holder) {
305 		panic("rw_lock_destroy(): there are blocking threads, but the caller "
306 			"doesn't hold the write lock (%p)", lock);
307 
308 		locker.Unlock();
309 		if (rw_lock_write_lock(lock) != B_OK)
310 			return;
311 		locker.Lock();
312 	}
313 #endif
314 
315 	while (rw_lock_waiter* waiter = lock->waiters) {
316 		// dequeue
317 		lock->waiters = waiter->next;
318 
319 		// unblock thread
320 		_kern_unblock_thread(get_thread_id(waiter->thread), B_ERROR);
321 	}
322 
323 	lock->name = NULL;
324 
325 	locker.Unlock();
326 
327 	free(name);
328 }
329 
330 
331 #if !KDEBUG_RW_LOCK_DEBUG
332 
333 status_t
334 _rw_lock_read_lock(rw_lock* lock)
335 {
336 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
337 
338 	// We might be the writer ourselves.
339 	if (lock->holder == find_thread(NULL)) {
340 		lock->owner_count++;
341 		return B_OK;
342 	}
343 
344 	// The writer that originally had the lock when we called atomic_add() might
345 	// already have gone and another writer could have overtaken us. In this
346 	// case the original writer set pending_readers, so we know that we don't
347 	// have to wait.
348 	if (lock->pending_readers > 0) {
349 		lock->pending_readers--;
350 
351 		if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
352 			lock->active_readers++;
353 
354 		return B_OK;
355 	}
356 
357 	// we need to wait
358 	return rw_lock_wait(lock, false);
359 }
360 
361 
362 void
363 _rw_lock_read_unlock(rw_lock* lock, bool threadsLocked)
364 {
365 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock, false, !threadsLocked);
366 
367 	// If we're still holding the write lock or if there are other readers,
368 	// no-one can be woken up.
369 	if (lock->holder == find_thread(NULL)) {
370 		lock->owner_count--;
371 		return;
372 	}
373 
374 	if (--lock->active_readers > 0)
375 		return;
376 
377  	if (lock->active_readers < 0) {
378  		panic("rw_lock_read_unlock(): lock %p not read-locked", lock);
379 		lock->active_readers = 0;
380  		return;
381  	}
382 
383 	rw_lock_unblock(lock);
384 }
385 
386 #endif	// !KDEBUG_RW_LOCK_DEBUG
387 
388 
389 status_t
390 rw_lock_write_lock(rw_lock* lock)
391 {
392 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
393 
394 	// If we're already the lock holder, we just need to increment the owner
395 	// count.
396 	thread_id thread = find_thread(NULL);
397 	if (lock->holder == thread) {
398 		lock->owner_count += RW_LOCK_WRITER_COUNT_BASE;
399 		return B_OK;
400 	}
401 
402 	// announce our claim
403 	int32 oldCount = atomic_add(&lock->count, RW_LOCK_WRITER_COUNT_BASE);
404 
405 	if (oldCount == 0) {
406 		// No-one else held a read or write lock, so it's ours now.
407 		lock->holder = thread;
408 		lock->owner_count = RW_LOCK_WRITER_COUNT_BASE;
409 		return B_OK;
410 	}
411 
412 	// We have to wait. If we're the first writer, note the current reader
413 	// count.
414 	if (oldCount < RW_LOCK_WRITER_COUNT_BASE)
415 		lock->active_readers = oldCount - lock->pending_readers;
416 
417 	status_t status = rw_lock_wait(lock, true);
418 	if (status == B_OK) {
419 		lock->holder = thread;
420 		lock->owner_count = RW_LOCK_WRITER_COUNT_BASE;
421 	}
422 
423 	return status;
424 }
425 
426 
427 void
428 _rw_lock_write_unlock(rw_lock* lock, bool threadsLocked)
429 {
430 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock, false, !threadsLocked);
431 
432 	if (find_thread(NULL) != lock->holder) {
433 		panic("rw_lock_write_unlock(): lock %p not write-locked by this thread",
434 			lock);
435 		return;
436 	}
437 
438 	lock->owner_count -= RW_LOCK_WRITER_COUNT_BASE;
439 	if (lock->owner_count >= RW_LOCK_WRITER_COUNT_BASE)
440 		return;
441 
442 	// We gave up our last write lock -- clean up and unblock waiters.
443 	int32 readerCount = lock->owner_count;
444 	lock->holder = -1;
445 	lock->owner_count = 0;
446 
447 	int32 oldCount = atomic_add(&lock->count, -RW_LOCK_WRITER_COUNT_BASE);
448 	oldCount -= RW_LOCK_WRITER_COUNT_BASE;
449 
450 	if (oldCount != 0) {
451 		// If writers are waiting, take over our reader count.
452 		if (oldCount >= RW_LOCK_WRITER_COUNT_BASE) {
453 			lock->active_readers = readerCount;
454 			rw_lock_unblock(lock);
455 		} else {
456 			// No waiting writer, but there are one or more readers. We will
457 			// unblock all waiting readers -- that's the easy part -- and must
458 			// also make sure that all readers that haven't entered the critical
459 			// section yet, won't start to wait. Otherwise a writer overtaking
460 			// such a reader will correctly start to wait, but the reader,
461 			// seeing the writer count > 0, would also start to wait. We set
462 			// pending_readers to the number of readers that are still expected
463 			// to enter the critical section.
464 			lock->pending_readers = oldCount - readerCount
465 				- rw_lock_unblock(lock);
466 		}
467 	}
468 }
469 
470 
471 // #pragma mark -
472 
473 
474 void
475 mutex_init(mutex* lock, const char *name)
476 {
477 	lock->name = name;
478 	lock->waiters = NULL;
479 #if KDEBUG
480 	lock->holder = -1;
481 #else
482 	lock->count = 0;
483 #endif
484 	lock->flags = 0;
485 }
486 
487 
488 void
489 mutex_init_etc(mutex* lock, const char *name, uint32 flags)
490 {
491 	lock->name = (flags & MUTEX_FLAG_CLONE_NAME) != 0 ? strdup(name) : name;
492 	lock->waiters = NULL;
493 #if KDEBUG
494 	lock->holder = -1;
495 #else
496 	lock->count = 0;
497 #endif
498 	lock->flags = flags & MUTEX_FLAG_CLONE_NAME;
499 }
500 
501 
502 void
503 mutex_destroy(mutex* lock)
504 {
505 	char* name = (lock->flags & MUTEX_FLAG_CLONE_NAME) != 0
506 		? (char*)lock->name : NULL;
507 
508 	// unblock all waiters
509 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
510 
511 #if KDEBUG
512 	if (lock->waiters != NULL && find_thread(NULL)
513 		!= lock->holder) {
514 		panic("mutex_destroy(): there are blocking threads, but caller doesn't "
515 			"hold the lock (%p)", lock);
516 		if (_mutex_lock(lock, true) != B_OK)
517 			return;
518 	}
519 #endif
520 
521 	while (mutex_waiter* waiter = lock->waiters) {
522 		// dequeue
523 		lock->waiters = waiter->next;
524 
525 		// unblock thread
526 		_kern_unblock_thread(get_thread_id(waiter->thread), B_ERROR);
527 	}
528 
529 	lock->name = NULL;
530 
531 	locker.Unlock();
532 
533 	free(name);
534 }
535 
536 
537 status_t
538 mutex_switch_lock(mutex* from, mutex* to)
539 {
540 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
541 
542 #if !KDEBUG
543 	if (atomic_add(&from->count, 1) < -1)
544 #endif
545 		_mutex_unlock(from, true);
546 
547 	return mutex_lock_threads_locked(to);
548 }
549 
550 
551 status_t
552 mutex_switch_from_read_lock(rw_lock* from, mutex* to)
553 {
554 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
555 
556 #if KDEBUG_RW_LOCK_DEBUG
557 	_rw_lock_write_unlock(from, true);
558 #else
559 	int32 oldCount = atomic_add(&from->count, -1);
560 	if (oldCount >= RW_LOCK_WRITER_COUNT_BASE)
561 		_rw_lock_read_unlock(from, true);
562 #endif
563 
564 	return mutex_lock_threads_locked(to);
565 }
566 
567 
568 status_t
569 _mutex_lock(mutex* lock, bool threadsLocked)
570 {
571 	// lock only, if !threadsLocked
572 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock, false, !threadsLocked);
573 
574 	// Might have been released after we decremented the count, but before
575 	// we acquired the spinlock.
576 #if KDEBUG
577 	if (lock->holder < 0) {
578 		lock->holder = find_thread(NULL);
579 		return B_OK;
580 	} else if (lock->holder == find_thread(NULL)) {
581 		panic("_mutex_lock(): double lock of %p by thread %ld", lock,
582 			lock->holder);
583 	} else if (lock->holder == 0)
584 		panic("_mutex_lock(): using unitialized lock %p", lock);
585 #else
586 	if ((lock->flags & MUTEX_FLAG_RELEASED) != 0) {
587 		lock->flags &= ~MUTEX_FLAG_RELEASED;
588 		return B_OK;
589 	}
590 #endif
591 
592 	// enqueue in waiter list
593 	mutex_waiter waiter;
594 	waiter.thread = get_current_thread();
595 	waiter.next = NULL;
596 
597 	if (lock->waiters != NULL) {
598 		lock->waiters->last->next = &waiter;
599 	} else
600 		lock->waiters = &waiter;
601 
602 	lock->waiters->last = &waiter;
603 
604 	// block
605 	get_user_thread()->wait_status = 1;
606 	locker.Unlock();
607 
608 	status_t error;
609 	while ((error = _kern_block_thread(0, 0)) == B_INTERRUPTED) {
610 	}
611 
612 	locker.Lock();
613 
614 #if KDEBUG
615 	if (error == B_OK)
616 		lock->holder = get_thread_id(waiter.thread);
617 #endif
618 
619 	return error;
620 }
621 
622 
623 void
624 _mutex_unlock(mutex* lock, bool threadsLocked)
625 {
626 	// lock only, if !threadsLocked
627 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock, false, !threadsLocked);
628 
629 #if KDEBUG
630 	if (find_thread(NULL) != lock->holder) {
631 		panic("_mutex_unlock() failure: thread %ld is trying to release "
632 			"mutex %p (current holder %ld)\n", find_thread(NULL),
633 			lock, lock->holder);
634 		return;
635 	}
636 #endif
637 
638 	mutex_waiter* waiter = lock->waiters;
639 	if (waiter != NULL) {
640 		// dequeue the first waiter
641 		lock->waiters = waiter->next;
642 		if (lock->waiters != NULL)
643 			lock->waiters->last = waiter->last;
644 
645 		// unblock thread
646 		_kern_unblock_thread(get_thread_id(waiter->thread), B_OK);
647 
648 #if KDEBUG
649 		// Already set the holder to the unblocked thread. Besides that this
650 		// actually reflects the current situation, setting it to -1 would
651 		// cause a race condition, since another locker could think the lock
652 		// is not held by anyone.
653 		lock->holder = get_thread_id(waiter->thread);
654 #endif
655 	} else {
656 		// We've acquired the spinlock before the locker that is going to wait.
657 		// Just mark the lock as released.
658 #if KDEBUG
659 		lock->holder = -1;
660 #else
661 		lock->flags |= MUTEX_FLAG_RELEASED;
662 #endif
663 	}
664 }
665 
666 
667 status_t
668 _mutex_trylock(mutex* lock)
669 {
670 #if KDEBUG
671 	AutoLocker<ThreadSpinlock> _(sThreadSpinlock);
672 
673 	if (lock->holder <= 0) {
674 		lock->holder = find_thread(NULL);
675 		return B_OK;
676 	}
677 #endif
678 	return B_WOULD_BLOCK;
679 }
680