xref: /haiku/src/system/libroot/posix/pthread/pthread_rwlock.cpp (revision 52f7c9389475e19fc21487b38064b4390eeb6fea)
1 /*
2  * Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 #include <pthread.h>
7 
8 #include <new>
9 
10 #include <Debug.h>
11 
12 #include <AutoLocker.h>
13 #include <syscalls.h>
14 #include <user_mutex_defs.h>
15 #include <user_thread.h>
16 #include <util/DoublyLinkedList.h>
17 
18 #include "pthread_private.h"
19 
20 #define MAX_READER_COUNT	1000000
21 
22 #define RWLOCK_FLAG_SHARED	0x01
23 
24 
25 struct Waiter : DoublyLinkedListLinkImpl<Waiter> {
26 	Waiter(bool writer)
27 		:
28 		userThread(get_user_thread()),
29 		thread(find_thread(NULL)),
30 		writer(writer),
31 		queued(false)
32 	{
33 	}
34 
35 	user_thread*	userThread;
36 	thread_id		thread;
37 	status_t		status;
38 	bool			writer;
39 	bool			queued;
40 };
41 
42 typedef DoublyLinkedList<Waiter> WaiterList;
43 
44 
45 struct SharedRWLock {
46 	uint32_t	flags;
47 	int32_t		owner;
48 	int32_t		sem;
49 
50 	status_t Init()
51 	{
52 		flags = RWLOCK_FLAG_SHARED;
53 		owner = -1;
54 		sem = create_sem(MAX_READER_COUNT, "pthread rwlock");
55 
56 		return sem >= 0 ? B_OK : EAGAIN;
57 	}
58 
59 	status_t Destroy()
60 	{
61 		if (sem < 0)
62 			return B_BAD_VALUE;
63 		return delete_sem(sem) == B_OK ? B_OK : B_BAD_VALUE;
64 	}
65 
66 	status_t ReadLock(uint32 flags, bigtime_t timeout)
67 	{
68 		return acquire_sem_etc(sem, 1, flags, timeout);
69 	}
70 
71 	status_t WriteLock(uint32 flags, bigtime_t timeout)
72 	{
73 		status_t error = acquire_sem_etc(sem, MAX_READER_COUNT,
74 			flags, timeout);
75 		if (error == B_OK)
76 			owner = find_thread(NULL);
77 		return error;
78 	}
79 
80 	status_t Unlock()
81 	{
82 		if (find_thread(NULL) == owner) {
83 			owner = -1;
84 			return release_sem_etc(sem, MAX_READER_COUNT, 0);
85 		} else
86 			return release_sem(sem);
87 	}
88 };
89 
90 
91 struct LocalRWLock {
92 	uint32_t	flags;
93 	int32_t		owner;
94 	int32_t		mutex;
95 	int32_t		unused;
96 	int32_t		reader_count;
97 	int32_t		writer_count;
98 		// Note, that reader_count and writer_count are not used the same way.
99 		// writer_count includes the write lock owner as well as waiting
100 		// writers. reader_count includes read lock owners only.
101 	WaiterList	waiters;
102 
103 	status_t Init()
104 	{
105 		flags = 0;
106 		owner = -1;
107 		mutex = 0;
108 		reader_count = 0;
109 		writer_count = 0;
110 		new(&waiters) WaiterList;
111 
112 		return B_OK;
113 	}
114 
115 	status_t Destroy()
116 	{
117 		Locker locker(this);
118 		if (reader_count > 0 || waiters.Head() != NULL || writer_count > 0)
119 			return EBUSY;
120 		return B_OK;
121 	}
122 
123 	bool StructureLock()
124 	{
125 		// Enter critical region: lock the mutex
126 		int32 status = atomic_or((int32*)&mutex, B_USER_MUTEX_LOCKED);
127 
128 		// If already locked, call the kernel
129 		if ((status & (B_USER_MUTEX_LOCKED | B_USER_MUTEX_WAITING)) != 0) {
130 			do {
131 				status = _kern_mutex_lock((int32*)&mutex, NULL, 0, 0);
132 			} while (status == B_INTERRUPTED);
133 
134 			if (status != B_OK)
135 				return false;
136 		}
137 		return true;
138 	}
139 
140 	void StructureUnlock()
141 	{
142 		// Exit critical region: unlock the mutex
143 		int32 status = atomic_and((int32*)&mutex,
144 			~(int32)B_USER_MUTEX_LOCKED);
145 
146 		if ((status & B_USER_MUTEX_WAITING) != 0)
147 			_kern_mutex_unlock((int32*)&mutex, 0);
148 	}
149 
150 	status_t ReadLock(uint32 flags, bigtime_t timeout)
151 	{
152 		Locker locker(this);
153 
154 		if (writer_count == 0) {
155 			reader_count++;
156 			return B_OK;
157 		}
158 
159 		return _Wait(false, flags, timeout);
160 	}
161 
162 	status_t WriteLock(uint32 flags, bigtime_t timeout)
163 	{
164 		Locker locker(this);
165 
166 		if (reader_count == 0 && writer_count == 0) {
167 			writer_count++;
168 			owner = find_thread(NULL);
169 			return B_OK;
170 		}
171 
172 		return _Wait(true, flags, timeout);
173 	}
174 
175 	status_t Unlock()
176 	{
177 		Locker locker(this);
178 
179 		if (find_thread(NULL) == owner) {
180 			writer_count--;
181 			owner = -1;
182 		} else
183 			reader_count--;
184 
185 		_Unblock();
186 
187 		return B_OK;
188 	}
189 
190 private:
191 	status_t _Wait(bool writer, uint32 flags, bigtime_t timeout)
192 	{
193 		if (timeout == 0)
194 			return B_TIMED_OUT;
195 
196 		Waiter waiter(writer);
197 		waiters.Add(&waiter);
198 		waiter.queued = true;
199 		waiter.userThread->wait_status = 1;
200 
201 		if (writer)
202 			writer_count++;
203 
204 		StructureUnlock();
205 		status_t error = _kern_block_thread(flags, timeout);
206 		StructureLock();
207 
208 		if (!waiter.queued)
209 			return waiter.status;
210 
211 		// we're still queued, which means an error (timeout, interrupt)
212 		// occurred
213 		waiters.Remove(&waiter);
214 
215 		if (writer)
216 			writer_count--;
217 
218 		_Unblock();
219 
220 		return error;
221 	}
222 
223 	void _Unblock()
224 	{
225 		// Check whether there any waiting threads at all and whether anyone
226 		// has the write lock
227 		Waiter* waiter = waiters.Head();
228 		if (waiter == NULL || owner >= 0)
229 			return;
230 
231 		// writer at head of queue?
232 		if (waiter->writer) {
233 			if (reader_count == 0) {
234 				waiter->status = B_OK;
235 				waiter->queued = false;
236 				waiters.Remove(waiter);
237 				owner = waiter->thread;
238 
239 				if (waiter->userThread->wait_status > 0)
240 					_kern_unblock_thread(waiter->thread, B_OK);
241 			}
242 			return;
243 		}
244 
245 		// wake up one or more readers -- we unblock more than one reader at
246 		// a time to save trips to the kernel
247 		while (!waiters.IsEmpty() && !waiters.Head()->writer) {
248 			static const int kMaxReaderUnblockCount = 128;
249 			thread_id readers[kMaxReaderUnblockCount];
250 			int readerCount = 0;
251 
252 			while (readerCount < kMaxReaderUnblockCount
253 					&& (waiter = waiters.Head()) != NULL
254 					&& !waiter->writer) {
255 				waiter->status = B_OK;
256 				waiter->queued = false;
257 				waiters.Remove(waiter);
258 
259 				if (waiter->userThread->wait_status > 0) {
260 					readers[readerCount++] = waiter->thread;
261 					reader_count++;
262 				}
263 			}
264 
265 			if (readerCount > 0)
266 				_kern_unblock_threads(readers, readerCount, B_OK);
267 		}
268 	}
269 
270 
271 	struct Locking {
272 		inline bool Lock(LocalRWLock* lockable)
273 		{
274 			return lockable->StructureLock();
275 		}
276 
277 		inline void Unlock(LocalRWLock* lockable)
278 		{
279 			lockable->StructureUnlock();
280 		}
281 	};
282 	typedef AutoLocker<LocalRWLock, Locking> Locker;
283 };
284 
285 
286 static void inline
287 assert_dummy()
288 {
289 	STATIC_ASSERT(sizeof(pthread_rwlock_t) >= sizeof(SharedRWLock));
290 	STATIC_ASSERT(sizeof(pthread_rwlock_t) >= sizeof(LocalRWLock));
291 }
292 
293 
294 // #pragma mark - public lock functions
295 
296 
297 int
298 pthread_rwlock_init(pthread_rwlock_t* lock, const pthread_rwlockattr_t* _attr)
299 {
300 	pthread_rwlockattr* attr = _attr != NULL ? *_attr : NULL;
301 	bool shared = attr != NULL && (attr->flags & RWLOCK_FLAG_SHARED) != 0;
302 
303 	if (shared)
304 		return ((SharedRWLock*)lock)->Init();
305 	else
306 		return ((LocalRWLock*)lock)->Init();
307 }
308 
309 
310 int
311 pthread_rwlock_destroy(pthread_rwlock_t* lock)
312 {
313 	if ((lock->flags & RWLOCK_FLAG_SHARED) != 0)
314 		return ((SharedRWLock*)lock)->Destroy();
315 	else
316 		return ((LocalRWLock*)lock)->Destroy();
317 }
318 
319 
320 int
321 pthread_rwlock_rdlock(pthread_rwlock_t* lock)
322 {
323 	if ((lock->flags & RWLOCK_FLAG_SHARED) != 0)
324 		return ((SharedRWLock*)lock)->ReadLock(0, B_INFINITE_TIMEOUT);
325 	else
326 		return ((LocalRWLock*)lock)->ReadLock(0, B_INFINITE_TIMEOUT);
327 }
328 
329 
330 int
331 pthread_rwlock_tryrdlock(pthread_rwlock_t* lock)
332 {
333 	status_t error;
334 	if ((lock->flags & RWLOCK_FLAG_SHARED) != 0)
335 		error = ((SharedRWLock*)lock)->ReadLock(B_ABSOLUTE_REAL_TIME_TIMEOUT, 0);
336 	else
337 		error = ((LocalRWLock*)lock)->ReadLock(B_ABSOLUTE_REAL_TIME_TIMEOUT, 0);
338 
339 	return error == B_TIMED_OUT ? EBUSY : error;
340 }
341 
342 
343 int
344 pthread_rwlock_clockrdlock(pthread_rwlock_t* lock, clockid_t clock_id,
345             const struct timespec *abstime)
346 {
347 	bigtime_t timeout = abstime->tv_sec * 1000000LL
348 		+ abstime->tv_nsec / 1000LL;
349 	uint32 flags = 0;
350 	if (timeout >= 0) {
351 		switch (clock_id) {
352 			case CLOCK_REALTIME:
353 				flags = B_ABSOLUTE_REAL_TIME_TIMEOUT;
354 				break;
355 			case CLOCK_MONOTONIC:
356 				flags = B_ABSOLUTE_TIMEOUT;
357 				break;
358 			default:
359 				return EINVAL;
360 		}
361 	}
362 
363 	status_t error;
364 	if ((lock->flags & RWLOCK_FLAG_SHARED) != 0)
365 		error = ((SharedRWLock*)lock)->ReadLock(flags, timeout);
366 	else
367 		error = ((LocalRWLock*)lock)->ReadLock(flags, timeout);
368 
369 	return error == B_TIMED_OUT ? EBUSY : error;
370 }
371 
372 
373 int
374 pthread_rwlock_timedrdlock(pthread_rwlock_t* lock,
375 	const struct timespec *abstime)
376 {
377 	return pthread_rwlock_clockrdlock(lock, CLOCK_REALTIME, abstime);
378 }
379 
380 
381 int
382 pthread_rwlock_wrlock(pthread_rwlock_t* lock)
383 {
384 	if ((lock->flags & RWLOCK_FLAG_SHARED) != 0)
385 		return ((SharedRWLock*)lock)->WriteLock(0, B_INFINITE_TIMEOUT);
386 	else
387 		return ((LocalRWLock*)lock)->WriteLock(0, B_INFINITE_TIMEOUT);
388 }
389 
390 
391 int
392 pthread_rwlock_trywrlock(pthread_rwlock_t* lock)
393 {
394 	status_t error;
395 	if ((lock->flags & RWLOCK_FLAG_SHARED) != 0)
396 		error = ((SharedRWLock*)lock)->WriteLock(B_ABSOLUTE_REAL_TIME_TIMEOUT, 0);
397 	else
398 		error = ((LocalRWLock*)lock)->WriteLock(B_ABSOLUTE_REAL_TIME_TIMEOUT, 0);
399 
400 	return error == B_TIMED_OUT ? EBUSY : error;
401 }
402 
403 
404 int
405 pthread_rwlock_clockwrlock (pthread_rwlock_t* lock, clockid_t clock_id,
406 	const struct timespec *abstime)
407 {
408 	bigtime_t timeout = abstime->tv_sec * 1000000LL
409 		+ abstime->tv_nsec / 1000LL;
410 	uint32 flags = 0;
411 	if (timeout >= 0) {
412 		switch (clock_id) {
413 			case CLOCK_REALTIME:
414 				flags = B_ABSOLUTE_REAL_TIME_TIMEOUT;
415 				break;
416 			case CLOCK_MONOTONIC:
417 				flags = B_ABSOLUTE_TIMEOUT;
418 				break;
419 			default:
420 				return EINVAL;
421 		}
422 	}
423 
424 	status_t error;
425 	if ((lock->flags & RWLOCK_FLAG_SHARED) != 0)
426 		error = ((SharedRWLock*)lock)->WriteLock(flags, timeout);
427 	else
428 		error = ((LocalRWLock*)lock)->WriteLock(flags, timeout);
429 
430 	return error == B_TIMED_OUT ? EBUSY : error;
431 }
432 
433 
434 int
435 pthread_rwlock_timedwrlock(pthread_rwlock_t* lock,
436 	const struct timespec *abstime)
437 {
438 	return pthread_rwlock_clockwrlock(lock, CLOCK_REALTIME, abstime);
439 }
440 
441 
442 int
443 pthread_rwlock_unlock(pthread_rwlock_t* lock)
444 {
445 	if ((lock->flags & RWLOCK_FLAG_SHARED) != 0)
446 		return ((SharedRWLock*)lock)->Unlock();
447 	else
448 		return ((LocalRWLock*)lock)->Unlock();
449 }
450 
451 
452 // #pragma mark - public attribute functions
453 
454 
455 int
456 pthread_rwlockattr_init(pthread_rwlockattr_t* _attr)
457 {
458 	pthread_rwlockattr* attr = (pthread_rwlockattr*)malloc(
459 		sizeof(pthread_rwlockattr));
460 	if (attr == NULL)
461 		return B_NO_MEMORY;
462 
463 	attr->flags = 0;
464 	*_attr = attr;
465 
466 	return 0;
467 }
468 
469 
470 int
471 pthread_rwlockattr_destroy(pthread_rwlockattr_t* _attr)
472 {
473 	pthread_rwlockattr* attr = *_attr;
474 
475 	free(attr);
476 	return 0;
477 }
478 
479 
480 int
481 pthread_rwlockattr_getpshared(const pthread_rwlockattr_t* _attr, int* shared)
482 {
483 	pthread_rwlockattr* attr = *_attr;
484 
485 	*shared = (attr->flags & RWLOCK_FLAG_SHARED) != 0
486 		? PTHREAD_PROCESS_SHARED : PTHREAD_PROCESS_PRIVATE;
487 	return 0;
488 }
489 
490 
491 int
492 pthread_rwlockattr_setpshared(pthread_rwlockattr_t* _attr, int shared)
493 {
494 	pthread_rwlockattr* attr = *_attr;
495 
496 	if (shared == PTHREAD_PROCESS_SHARED)
497 		attr->flags |= RWLOCK_FLAG_SHARED;
498 	else
499 		attr->flags &= ~RWLOCK_FLAG_SHARED;
500 
501 	return 0;
502 }
503 
504