xref: /haiku/src/tests/add-ons/kernel/kernelland_emu/lock.cpp (revision b671e9bbdbd10268a042b4f4cc4317ccd03d105e)
1 /*
2  * Copyright 2002-2009, Haiku Inc. All Rights Reserved.
3  * Distributed under the terms of the MIT license.
4  *
5  * Authors:
6  *		Ingo Weinhold, bonefish@cs.tu-berlin.de.
7  *		Axel Dörfler, axeld@pinc-software.de.
8  */
9 
10 #include <lock.h>
11 
12 #include <stdlib.h>
13 #include <string.h>
14 
15 #include <AutoLocker.h>
16 
17 // libroot
18 #include <user_thread.h>
19 
20 // system
21 #include <syscalls.h>
22 #include <user_thread_defs.h>
23 
24 #include "thread.h"
25 
26 
27 struct mutex_waiter {
28 	struct thread*	thread;
29 	mutex_waiter*	next;		// next in queue
30 	mutex_waiter*	last;		// last in queue (valid for the first in queue)
31 };
32 
33 struct rw_lock_waiter {
34 	struct thread*	thread;
35 	rw_lock_waiter*	next;		// next in queue
36 	rw_lock_waiter*	last;		// last in queue (valid for the first in queue)
37 	bool			writer;
38 };
39 
40 #define MUTEX_FLAG_OWNS_NAME	MUTEX_FLAG_CLONE_NAME
41 #define MUTEX_FLAG_RELEASED		0x2
42 
43 #define RW_LOCK_FLAG_OWNS_NAME	RW_LOCK_FLAG_CLONE_NAME
44 
45 
46 /*!	Helper class playing the role of the kernel's thread spinlock. We don't use
47 	as spinlock as that could be expensive in userland (due to spinlock holder
48 	potentially being unscheduled), but a benaphore.
49 */
50 struct ThreadSpinlock {
51 	ThreadSpinlock()
52 		:
53 		fCount(1),
54 		fSemaphore(create_sem(0, "thread spinlock"))
55 	{
56 		if (fSemaphore < 0)
57 			panic("Failed to create thread spinlock semaphore!");
58 	}
59 
60 	~ThreadSpinlock()
61 	{
62 		if (fSemaphore >= 0)
63 			delete_sem(fSemaphore);
64 	}
65 
66 	bool Lock()
67 	{
68 		if (atomic_add(&fCount, -1) > 0)
69 			return true;
70 
71 		status_t error;
72 		do {
73 			error = acquire_sem(fSemaphore);
74 		} while (error == B_INTERRUPTED);
75 
76 		return error == B_OK;
77 	}
78 
79 	void Unlock()
80 	{
81 		if (atomic_add(&fCount, 1) < 0)
82 			release_sem(fSemaphore);
83 	}
84 
85 private:
86 	vint32	fCount;
87 	sem_id	fSemaphore;
88 };
89 
90 static ThreadSpinlock sThreadSpinlock;
91 
92 
93 // #pragma mark -
94 
95 
96 int32
97 recursive_lock_get_recursion(recursive_lock *lock)
98 {
99 	if (RECURSIVE_LOCK_HOLDER(lock) == find_thread(NULL))
100 		return lock->recursion;
101 
102 	return -1;
103 }
104 
105 
106 void
107 recursive_lock_init(recursive_lock *lock, const char *name)
108 {
109 	mutex_init(&lock->lock, name != NULL ? name : "recursive lock");
110 	RECURSIVE_LOCK_HOLDER(lock) = -1;
111 	lock->recursion = 0;
112 }
113 
114 
115 void
116 recursive_lock_init_etc(recursive_lock *lock, const char *name, uint32 flags)
117 {
118 	mutex_init_etc(&lock->lock, name != NULL ? name : "recursive lock", flags);
119 	RECURSIVE_LOCK_HOLDER(lock) = -1;
120 	lock->recursion = 0;
121 }
122 
123 
124 void
125 recursive_lock_destroy(recursive_lock *lock)
126 {
127 	if (lock == NULL)
128 		return;
129 
130 	mutex_destroy(&lock->lock);
131 }
132 
133 
134 status_t
135 recursive_lock_lock(recursive_lock *lock)
136 {
137 	thread_id thread = find_thread(NULL);
138 
139 	if (thread != RECURSIVE_LOCK_HOLDER(lock)) {
140 		mutex_lock(&lock->lock);
141 #if !KDEBUG
142 		lock->holder = thread;
143 #endif
144 	}
145 
146 	lock->recursion++;
147 	return B_OK;
148 }
149 
150 
151 status_t
152 recursive_lock_trylock(recursive_lock *lock)
153 {
154 	thread_id thread = find_thread(NULL);
155 
156 	if (thread != RECURSIVE_LOCK_HOLDER(lock)) {
157 		status_t status = mutex_trylock(&lock->lock);
158 		if (status != B_OK)
159 			return status;
160 
161 #if !KDEBUG
162 		lock->holder = thread;
163 #endif
164 	}
165 
166 	lock->recursion++;
167 	return B_OK;
168 }
169 
170 
171 void
172 recursive_lock_unlock(recursive_lock *lock)
173 {
174 	if (find_thread(NULL) != RECURSIVE_LOCK_HOLDER(lock))
175 		panic("recursive_lock %p unlocked by non-holder thread!\n", lock);
176 
177 	if (--lock->recursion == 0) {
178 #if !KDEBUG
179 		lock->holder = -1;
180 #endif
181 		mutex_unlock(&lock->lock);
182 	}
183 }
184 
185 
186 //	#pragma mark -
187 
188 
189 static status_t
190 rw_lock_wait(rw_lock* lock, bool writer)
191 {
192 	// enqueue in waiter list
193 	rw_lock_waiter waiter;
194 	waiter.thread = get_current_thread();
195 	waiter.next = NULL;
196 	waiter.writer = writer;
197 
198 	if (lock->waiters != NULL)
199 		lock->waiters->last->next = &waiter;
200 	else
201 		lock->waiters = &waiter;
202 
203 	lock->waiters->last = &waiter;
204 
205 	// block
206 	get_user_thread()->wait_status = 1;
207 	sThreadSpinlock.Unlock();
208 
209 	status_t error;
210 	while ((error = _kern_block_thread(0, 0)) == B_INTERRUPTED) {
211 	}
212 
213 	sThreadSpinlock.Lock();
214 	return error;
215 }
216 
217 
218 static void
219 rw_lock_unblock(rw_lock* lock)
220 {
221 	// Check whether there any waiting threads at all and whether anyone
222 	// has the write lock.
223 	rw_lock_waiter* waiter = lock->waiters;
224 	if (waiter == NULL || lock->holder > 0)
225 		return;
226 
227 	// writer at head of queue?
228 	if (waiter->writer) {
229 		if (lock->reader_count == 0) {
230 			// dequeue writer
231 			lock->waiters = waiter->next;
232 			if (lock->waiters != NULL)
233 				lock->waiters->last = waiter->last;
234 
235 			lock->holder = get_thread_id(waiter->thread);
236 
237 			// unblock thread
238 			_kern_unblock_thread(get_thread_id(waiter->thread), B_OK);
239 		}
240 		return;
241 	}
242 
243 	// wake up one or more readers
244 	while ((waiter = lock->waiters) != NULL && !waiter->writer) {
245 		// dequeue reader
246 		lock->waiters = waiter->next;
247 		if (lock->waiters != NULL)
248 			lock->waiters->last = waiter->last;
249 
250 		lock->reader_count++;
251 
252 		// unblock thread
253 		_kern_unblock_thread(get_thread_id(waiter->thread), B_OK);
254 	}
255 }
256 
257 
258 void
259 rw_lock_init(rw_lock* lock, const char* name)
260 {
261 	lock->name = name;
262 	lock->waiters = NULL;
263 	lock->holder = -1;
264 	lock->reader_count = 0;
265 	lock->writer_count = 0;
266 	lock->owner_count = 0;
267 	lock->flags = 0;
268 }
269 
270 
271 void
272 rw_lock_init_etc(rw_lock* lock, const char* name, uint32 flags)
273 {
274 	lock->name = (flags & RW_LOCK_FLAG_CLONE_NAME) != 0 ? strdup(name) : name;
275 	lock->waiters = NULL;
276 	lock->holder = -1;
277 	lock->reader_count = 0;
278 	lock->writer_count = 0;
279 	lock->owner_count = 0;
280 	lock->flags = flags & RW_LOCK_FLAG_CLONE_NAME;
281 }
282 
283 
284 void
285 rw_lock_destroy(rw_lock* lock)
286 {
287 	char* name = (lock->flags & RW_LOCK_FLAG_CLONE_NAME) != 0
288 		? (char*)lock->name : NULL;
289 
290 	// unblock all waiters
291 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
292 
293 #if KDEBUG
294 	if (lock->waiters != NULL && find_thread(NULL)
295 		!= lock->holder) {
296 		panic("rw_lock_destroy(): there are blocking threads, but the caller "
297 			"doesn't hold the write lock (%p)", lock);
298 
299 		locker.Unlock();
300 		if (rw_lock_write_lock(lock) != B_OK)
301 			return;
302 		locker.Lock();
303 	}
304 #endif
305 
306 	while (rw_lock_waiter* waiter = lock->waiters) {
307 		// dequeue
308 		lock->waiters = waiter->next;
309 
310 		// unblock thread
311 		_kern_unblock_thread(get_thread_id(waiter->thread), B_ERROR);
312 	}
313 
314 	lock->name = NULL;
315 
316 	locker.Unlock();
317 
318 	free(name);
319 }
320 
321 
322 status_t
323 rw_lock_read_lock(rw_lock* lock)
324 {
325 #if KDEBUG_RW_LOCK_DEBUG
326 	return rw_lock_write_lock(lock);
327 #else
328 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
329 
330 	if (lock->writer_count == 0) {
331 		lock->reader_count++;
332 		return B_OK;
333 	}
334 	if (lock->holder == find_thread(NULL)) {
335 		lock->owner_count++;
336 		return B_OK;
337 	}
338 
339 	return rw_lock_wait(lock, false);
340 #endif
341 }
342 
343 
344 status_t
345 rw_lock_read_unlock(rw_lock* lock)
346 {
347 #if KDEBUG_RW_LOCK_DEBUG
348 	return rw_lock_write_unlock(lock);
349 #else
350 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
351 
352 	if (lock->holder == find_thread(NULL)) {
353 		if (--lock->owner_count > 0)
354 			return B_OK;
355 
356 		// this originally has been a write lock
357 		lock->writer_count--;
358 		lock->holder = -1;
359 
360 		rw_lock_unblock(lock);
361 		return B_OK;
362 	}
363 
364 	if (lock->reader_count <= 0) {
365 		panic("rw_lock_read_unlock(): lock %p not read-locked", lock);
366 		return B_BAD_VALUE;
367 	}
368 
369 	lock->reader_count--;
370 
371 	rw_lock_unblock(lock);
372 	return B_OK;
373 #endif
374 }
375 
376 
377 status_t
378 rw_lock_write_lock(rw_lock* lock)
379 {
380 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
381 
382 	if (lock->reader_count == 0 && lock->writer_count == 0) {
383 		lock->writer_count++;
384 		lock->holder = find_thread(NULL);
385 		lock->owner_count = 1;
386 		return B_OK;
387 	}
388 	if (lock->holder == find_thread(NULL)) {
389 		lock->owner_count++;
390 		return B_OK;
391 	}
392 
393 	lock->writer_count++;
394 
395 	status_t status = rw_lock_wait(lock, true);
396 	if (status == B_OK) {
397 		lock->holder = find_thread(NULL);
398 		lock->owner_count = 1;
399 	}
400 	return status;
401 }
402 
403 
404 status_t
405 rw_lock_write_unlock(rw_lock* lock)
406 {
407 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
408 
409 	if (find_thread(NULL) != lock->holder) {
410 		panic("rw_lock_write_unlock(): lock %p not write-locked by this thread",
411 			lock);
412 		return B_BAD_VALUE;
413 	}
414 	if (--lock->owner_count > 0)
415 		return B_OK;
416 
417 	lock->writer_count--;
418 	lock->holder = -1;
419 
420 	rw_lock_unblock(lock);
421 
422 	return B_OK;
423 }
424 
425 
426 // #pragma mark -
427 
428 
429 void
430 mutex_init(mutex* lock, const char *name)
431 {
432 	lock->name = name;
433 	lock->waiters = NULL;
434 #if KDEBUG
435 	lock->holder = -1;
436 #else
437 	lock->count = 0;
438 #endif
439 	lock->flags = 0;
440 }
441 
442 
443 void
444 mutex_init_etc(mutex* lock, const char *name, uint32 flags)
445 {
446 	lock->name = (flags & MUTEX_FLAG_CLONE_NAME) != 0 ? strdup(name) : name;
447 	lock->waiters = NULL;
448 #if KDEBUG
449 	lock->holder = -1;
450 #else
451 	lock->count = 0;
452 #endif
453 	lock->flags = flags & MUTEX_FLAG_CLONE_NAME;
454 }
455 
456 
457 void
458 mutex_destroy(mutex* lock)
459 {
460 	char* name = (lock->flags & MUTEX_FLAG_CLONE_NAME) != 0
461 		? (char*)lock->name : NULL;
462 
463 	// unblock all waiters
464 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
465 
466 #if KDEBUG
467 	if (lock->waiters != NULL && find_thread(NULL)
468 		!= lock->holder) {
469 		panic("mutex_destroy(): there are blocking threads, but caller doesn't "
470 			"hold the lock (%p)", lock);
471 		if (_mutex_lock(lock, true) != B_OK)
472 			return;
473 	}
474 #endif
475 
476 	while (mutex_waiter* waiter = lock->waiters) {
477 		// dequeue
478 		lock->waiters = waiter->next;
479 
480 		// unblock thread
481 		_kern_unblock_thread(get_thread_id(waiter->thread), B_ERROR);
482 	}
483 
484 	lock->name = NULL;
485 
486 	locker.Unlock();
487 
488 	free(name);
489 }
490 
491 
492 status_t
493 mutex_switch_lock(mutex* from, mutex* to)
494 {
495 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
496 
497 #if !KDEBUG
498 	if (atomic_add(&from->count, 1) < -1)
499 #endif
500 		_mutex_unlock(from, true);
501 
502 	return mutex_lock_threads_locked(to);
503 }
504 
505 
506 status_t
507 _mutex_lock(mutex* lock, bool threadsLocked)
508 {
509 	// lock only, if !threadsLocked
510 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock, false, !threadsLocked);
511 
512 	// Might have been released after we decremented the count, but before
513 	// we acquired the spinlock.
514 #if KDEBUG
515 	if (lock->holder < 0) {
516 		lock->holder = find_thread(NULL);
517 		return B_OK;
518 	} else if (lock->holder == find_thread(NULL)) {
519 		panic("_mutex_lock(): double lock of %p by thread %ld", lock,
520 			lock->holder);
521 	} else if (lock->holder == 0)
522 		panic("_mutex_lock(): using unitialized lock %p", lock);
523 #else
524 	if ((lock->flags & MUTEX_FLAG_RELEASED) != 0) {
525 		lock->flags &= ~MUTEX_FLAG_RELEASED;
526 		return B_OK;
527 	}
528 #endif
529 
530 	// enqueue in waiter list
531 	mutex_waiter waiter;
532 	waiter.thread = get_current_thread();
533 	waiter.next = NULL;
534 
535 	if (lock->waiters != NULL) {
536 		lock->waiters->last->next = &waiter;
537 	} else
538 		lock->waiters = &waiter;
539 
540 	lock->waiters->last = &waiter;
541 
542 	// block
543 	get_user_thread()->wait_status = 1;
544 	locker.Unlock();
545 
546 	status_t error;
547 	while ((error = _kern_block_thread(0, 0)) == B_INTERRUPTED) {
548 	}
549 
550 	locker.Lock();
551 
552 #if KDEBUG
553 	if (error == B_OK)
554 		lock->holder = get_thread_id(waiter.thread);
555 #endif
556 
557 	return error;
558 }
559 
560 
561 void
562 _mutex_unlock(mutex* lock, bool threadsLocked)
563 {
564 	// lock only, if !threadsLocked
565 	AutoLocker<ThreadSpinlock> locker(sThreadSpinlock, false, !threadsLocked);
566 
567 #if KDEBUG
568 	if (find_thread(NULL) != lock->holder) {
569 		panic("_mutex_unlock() failure: thread %ld is trying to release "
570 			"mutex %p (current holder %ld)\n", find_thread(NULL),
571 			lock, lock->holder);
572 		return;
573 	}
574 #endif
575 
576 	mutex_waiter* waiter = lock->waiters;
577 	if (waiter != NULL) {
578 		// dequeue the first waiter
579 		lock->waiters = waiter->next;
580 		if (lock->waiters != NULL)
581 			lock->waiters->last = waiter->last;
582 
583 		// unblock thread
584 		_kern_unblock_thread(get_thread_id(waiter->thread), B_OK);
585 
586 #if KDEBUG
587 		// Already set the holder to the unblocked thread. Besides that this
588 		// actually reflects the current situation, setting it to -1 would
589 		// cause a race condition, since another locker could think the lock
590 		// is not held by anyone.
591 		lock->holder = get_thread_id(waiter->thread);
592 #endif
593 	} else {
594 		// We've acquired the spinlock before the locker that is going to wait.
595 		// Just mark the lock as released.
596 #if KDEBUG
597 		lock->holder = -1;
598 #else
599 		lock->flags |= MUTEX_FLAG_RELEASED;
600 #endif
601 	}
602 }
603 
604 
605 status_t
606 _mutex_trylock(mutex* lock)
607 {
608 #if KDEBUG
609 	AutoLocker<ThreadSpinlock> _(sThreadSpinlock);
610 
611 	if (lock->holder <= 0) {
612 		lock->holder = find_thread(NULL);
613 		return B_OK;
614 	}
615 #endif
616 	return B_WOULD_BLOCK;
617 }
618