1 /*
2 * Copyright 2022, Haiku, Inc. All rights reserved.
3 * Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
4 * Distributed under the terms of the MIT License.
5 */
6
7 #include <pthread.h>
8
9 #include <new>
10
11 #include <Debug.h>
12
13 #include <AutoLocker.h>
14 #include <syscalls.h>
15 #include <time_private.h>
16 #include <user_mutex_defs.h>
17 #include <user_thread.h>
18 #include <util/DoublyLinkedList.h>
19
20 #include "pthread_private.h"
21
22 #define MAX_READER_COUNT 1000000
23
24 #define RWLOCK_FLAG_SHARED 0x01
25
26
27 struct Waiter : DoublyLinkedListLinkImpl<Waiter> {
WaiterWaiter28 Waiter(bool writer)
29 :
30 userThread(get_user_thread()),
31 thread(find_thread(NULL)),
32 writer(writer),
33 queued(false)
34 {
35 }
36
37 user_thread* userThread;
38 thread_id thread;
39 status_t status;
40 bool writer;
41 bool queued;
42 };
43
44 typedef DoublyLinkedList<Waiter> WaiterList;
45
46
47 struct SharedRWLock {
48 uint32_t flags;
49 int32_t owner;
50 int32_t sem;
51
InitSharedRWLock52 status_t Init()
53 {
54 flags = RWLOCK_FLAG_SHARED;
55 owner = -1;
56 sem = create_sem(MAX_READER_COUNT, "pthread rwlock");
57
58 return sem >= 0 ? B_OK : EAGAIN;
59 }
60
DestroySharedRWLock61 status_t Destroy()
62 {
63 if (sem < 0)
64 return B_BAD_VALUE;
65 return delete_sem(sem) == B_OK ? B_OK : B_BAD_VALUE;
66 }
67
ReadLockSharedRWLock68 status_t ReadLock(uint32 flags, bigtime_t timeout)
69 {
70 status_t status;
71 do {
72 status = acquire_sem_etc(sem, 1, flags, timeout);
73 } while (status == B_INTERRUPTED);
74 return status;
75 }
76
WriteLockSharedRWLock77 status_t WriteLock(uint32 flags, bigtime_t timeout)
78 {
79 status_t status;
80 do {
81 status = acquire_sem_etc(sem, MAX_READER_COUNT, flags, timeout);
82 } while (status == B_INTERRUPTED);
83 if (status == B_OK)
84 owner = find_thread(NULL);
85 return status;
86 }
87
UnlockSharedRWLock88 status_t Unlock()
89 {
90 if (find_thread(NULL) == owner) {
91 owner = -1;
92 return release_sem_etc(sem, MAX_READER_COUNT, 0);
93 } else
94 return release_sem(sem);
95 }
96 };
97
98
99 struct LocalRWLock {
100 uint32_t flags;
101 int32_t owner;
102 int32_t mutex;
103 int32_t unused;
104 int32_t reader_count;
105 int32_t writer_count;
106 // Note, that reader_count and writer_count are not used the same way.
107 // writer_count includes the write lock owner as well as waiting
108 // writers. reader_count includes read lock owners only.
109 WaiterList waiters;
110
InitLocalRWLock111 status_t Init()
112 {
113 flags = 0;
114 owner = -1;
115 mutex = 0;
116 reader_count = 0;
117 writer_count = 0;
118 new(&waiters) WaiterList;
119
120 return B_OK;
121 }
122
DestroyLocalRWLock123 status_t Destroy()
124 {
125 Locker locker(this);
126 if (reader_count > 0 || waiters.Head() != NULL || writer_count > 0)
127 return EBUSY;
128 return B_OK;
129 }
130
StructureLockLocalRWLock131 bool StructureLock()
132 {
133 const int32 oldValue = atomic_test_and_set((int32*)&mutex, B_USER_MUTEX_LOCKED, 0);
134 if (oldValue != 0) {
135 status_t status;
136 do {
137 status = _kern_mutex_lock((int32*)&mutex, NULL, 0, 0);
138 } while (status == B_INTERRUPTED);
139
140 if (status != B_OK)
141 return false;
142 }
143 return true;
144 }
145
StructureUnlockLocalRWLock146 void StructureUnlock()
147 {
148 // Exit critical region: unlock the mutex
149 int32 status = atomic_and((int32*)&mutex,
150 ~(int32)B_USER_MUTEX_LOCKED);
151 if ((status & B_USER_MUTEX_WAITING) != 0)
152 _kern_mutex_unblock((int32*)&mutex, 0);
153 }
154
ReadLockLocalRWLock155 status_t ReadLock(uint32 flags, bigtime_t timeout)
156 {
157 Locker locker(this);
158
159 if (writer_count == 0) {
160 reader_count++;
161 return B_OK;
162 }
163
164 return _Wait(false, flags, timeout);
165 }
166
WriteLockLocalRWLock167 status_t WriteLock(uint32 flags, bigtime_t timeout)
168 {
169 Locker locker(this);
170
171 if (reader_count == 0 && writer_count == 0) {
172 writer_count++;
173 owner = find_thread(NULL);
174 return B_OK;
175 }
176
177 return _Wait(true, flags, timeout);
178 }
179
UnlockLocalRWLock180 status_t Unlock()
181 {
182 Locker locker(this);
183
184 if (find_thread(NULL) == owner) {
185 writer_count--;
186 owner = -1;
187 } else
188 reader_count--;
189
190 _Unblock();
191
192 return B_OK;
193 }
194
195 private:
_WaitLocalRWLock196 status_t _Wait(bool writer, uint32 flags, bigtime_t timeout)
197 {
198 if (timeout == 0)
199 return B_TIMED_OUT;
200
201 if (writer_count == 1 && owner == find_thread(NULL))
202 return EDEADLK;
203
204 Waiter waiter(writer);
205 waiters.Add(&waiter);
206 waiter.queued = true;
207 waiter.userThread->wait_status = 1;
208
209 if (writer)
210 writer_count++;
211
212 status_t status;
213 do {
214 StructureUnlock();
215 status = _kern_block_thread(flags, timeout);
216 StructureLock();
217
218 if (!waiter.queued)
219 return waiter.status;
220 } while (status == B_INTERRUPTED);
221
222 // we're still queued, which means an error (timeout, interrupt)
223 // occurred
224 waiters.Remove(&waiter);
225
226 if (writer)
227 writer_count--;
228
229 _Unblock();
230
231 return status;
232 }
233
_UnblockLocalRWLock234 void _Unblock()
235 {
236 // Check whether there any waiting threads at all and whether anyone
237 // has the write lock
238 Waiter* waiter = waiters.Head();
239 if (waiter == NULL || owner >= 0)
240 return;
241
242 // writer at head of queue?
243 if (waiter->writer) {
244 if (reader_count == 0) {
245 waiter->status = B_OK;
246 waiter->queued = false;
247 waiters.Remove(waiter);
248 owner = waiter->thread;
249
250 if (waiter->userThread->wait_status > 0)
251 _kern_unblock_thread(waiter->thread, B_OK);
252 }
253 return;
254 }
255
256 // wake up one or more readers -- we unblock more than one reader at
257 // a time to save trips to the kernel
258 while (!waiters.IsEmpty() && !waiters.Head()->writer) {
259 static const int kMaxReaderUnblockCount = 128;
260 thread_id readers[kMaxReaderUnblockCount];
261 int readerCount = 0;
262
263 while (readerCount < kMaxReaderUnblockCount
264 && (waiter = waiters.Head()) != NULL
265 && !waiter->writer) {
266 waiter->status = B_OK;
267 waiter->queued = false;
268 waiters.Remove(waiter);
269
270 if (waiter->userThread->wait_status > 0) {
271 readers[readerCount++] = waiter->thread;
272 reader_count++;
273 }
274 }
275
276 if (readerCount > 0)
277 _kern_unblock_threads(readers, readerCount, B_OK);
278 }
279 }
280
281
282 struct Locking {
LockLocalRWLock::Locking283 inline bool Lock(LocalRWLock* lockable)
284 {
285 return lockable->StructureLock();
286 }
287
UnlockLocalRWLock::Locking288 inline void Unlock(LocalRWLock* lockable)
289 {
290 lockable->StructureUnlock();
291 }
292 };
293 typedef AutoLocker<LocalRWLock, Locking> Locker;
294 };
295
296
297 static void inline
assert_dummy()298 assert_dummy()
299 {
300 STATIC_ASSERT(sizeof(pthread_rwlock_t) >= sizeof(SharedRWLock));
301 STATIC_ASSERT(sizeof(pthread_rwlock_t) >= sizeof(LocalRWLock));
302 }
303
304
305 // #pragma mark - public lock functions
306
307
308 int
pthread_rwlock_init(pthread_rwlock_t * lock,const pthread_rwlockattr_t * _attr)309 pthread_rwlock_init(pthread_rwlock_t* lock, const pthread_rwlockattr_t* _attr)
310 {
311 pthread_rwlockattr* attr = _attr != NULL ? *_attr : NULL;
312 bool shared = attr != NULL && (attr->flags & RWLOCK_FLAG_SHARED) != 0;
313
314 if (shared)
315 return ((SharedRWLock*)lock)->Init();
316 else
317 return ((LocalRWLock*)lock)->Init();
318 }
319
320
321 int
pthread_rwlock_destroy(pthread_rwlock_t * lock)322 pthread_rwlock_destroy(pthread_rwlock_t* lock)
323 {
324 if ((lock->flags & RWLOCK_FLAG_SHARED) != 0)
325 return ((SharedRWLock*)lock)->Destroy();
326 else
327 return ((LocalRWLock*)lock)->Destroy();
328 }
329
330
331 int
pthread_rwlock_rdlock(pthread_rwlock_t * lock)332 pthread_rwlock_rdlock(pthread_rwlock_t* lock)
333 {
334 if ((lock->flags & RWLOCK_FLAG_SHARED) != 0)
335 return ((SharedRWLock*)lock)->ReadLock(0, B_INFINITE_TIMEOUT);
336 else
337 return ((LocalRWLock*)lock)->ReadLock(0, B_INFINITE_TIMEOUT);
338 }
339
340
341 int
pthread_rwlock_tryrdlock(pthread_rwlock_t * lock)342 pthread_rwlock_tryrdlock(pthread_rwlock_t* lock)
343 {
344 status_t error;
345 if ((lock->flags & RWLOCK_FLAG_SHARED) != 0)
346 error = ((SharedRWLock*)lock)->ReadLock(B_ABSOLUTE_REAL_TIME_TIMEOUT, 0);
347 else
348 error = ((LocalRWLock*)lock)->ReadLock(B_ABSOLUTE_REAL_TIME_TIMEOUT, 0);
349
350 return error == B_TIMED_OUT ? EBUSY : error;
351 }
352
353
354 int
pthread_rwlock_clockrdlock(pthread_rwlock_t * lock,clockid_t clock_id,const struct timespec * abstime)355 pthread_rwlock_clockrdlock(pthread_rwlock_t* lock, clockid_t clock_id,
356 const struct timespec *abstime)
357 {
358 bigtime_t timeout = 0;
359 bool invalidTime = false;
360 if (abstime == NULL || !timespec_to_bigtime(*abstime, timeout))
361 invalidTime = true;
362
363 uint32 flags = 0;
364 if (timeout >= 0) {
365 switch (clock_id) {
366 case CLOCK_REALTIME:
367 flags = B_ABSOLUTE_REAL_TIME_TIMEOUT;
368 break;
369 case CLOCK_MONOTONIC:
370 flags = B_ABSOLUTE_TIMEOUT;
371 break;
372 default:
373 return EINVAL;
374 }
375 }
376
377 status_t error;
378 if ((lock->flags & RWLOCK_FLAG_SHARED) != 0)
379 error = ((SharedRWLock*)lock)->ReadLock(flags, timeout);
380 else
381 error = ((LocalRWLock*)lock)->ReadLock(flags, timeout);
382
383 if (error != B_OK && invalidTime)
384 return EINVAL;
385 return (error == B_TIMED_OUT) ? EBUSY : error;
386 }
387
388
389 int
pthread_rwlock_timedrdlock(pthread_rwlock_t * lock,const struct timespec * abstime)390 pthread_rwlock_timedrdlock(pthread_rwlock_t* lock,
391 const struct timespec *abstime)
392 {
393 return pthread_rwlock_clockrdlock(lock, CLOCK_REALTIME, abstime);
394 }
395
396
397 int
pthread_rwlock_wrlock(pthread_rwlock_t * lock)398 pthread_rwlock_wrlock(pthread_rwlock_t* lock)
399 {
400 if ((lock->flags & RWLOCK_FLAG_SHARED) != 0)
401 return ((SharedRWLock*)lock)->WriteLock(0, B_INFINITE_TIMEOUT);
402 else
403 return ((LocalRWLock*)lock)->WriteLock(0, B_INFINITE_TIMEOUT);
404 }
405
406
407 int
pthread_rwlock_trywrlock(pthread_rwlock_t * lock)408 pthread_rwlock_trywrlock(pthread_rwlock_t* lock)
409 {
410 status_t error;
411 if ((lock->flags & RWLOCK_FLAG_SHARED) != 0)
412 error = ((SharedRWLock*)lock)->WriteLock(B_ABSOLUTE_REAL_TIME_TIMEOUT, 0);
413 else
414 error = ((LocalRWLock*)lock)->WriteLock(B_ABSOLUTE_REAL_TIME_TIMEOUT, 0);
415
416 return error == B_TIMED_OUT ? EBUSY : error;
417 }
418
419
420 int
pthread_rwlock_clockwrlock(pthread_rwlock_t * lock,clockid_t clock_id,const struct timespec * abstime)421 pthread_rwlock_clockwrlock(pthread_rwlock_t* lock, clockid_t clock_id,
422 const struct timespec *abstime)
423 {
424 bigtime_t timeout = 0;
425 bool invalidTime = false;
426 if (abstime == NULL || !timespec_to_bigtime(*abstime, timeout))
427 invalidTime = true;
428
429 uint32 flags = 0;
430 if (timeout >= 0) {
431 switch (clock_id) {
432 case CLOCK_REALTIME:
433 flags = B_ABSOLUTE_REAL_TIME_TIMEOUT;
434 break;
435 case CLOCK_MONOTONIC:
436 flags = B_ABSOLUTE_TIMEOUT;
437 break;
438 default:
439 return EINVAL;
440 }
441 }
442
443 status_t error;
444 if ((lock->flags & RWLOCK_FLAG_SHARED) != 0)
445 error = ((SharedRWLock*)lock)->WriteLock(flags, timeout);
446 else
447 error = ((LocalRWLock*)lock)->WriteLock(flags, timeout);
448
449 if (error != B_OK && invalidTime)
450 return EINVAL;
451 return (error == B_TIMED_OUT) ? EBUSY : error;
452 }
453
454
455 int
pthread_rwlock_timedwrlock(pthread_rwlock_t * lock,const struct timespec * abstime)456 pthread_rwlock_timedwrlock(pthread_rwlock_t* lock,
457 const struct timespec *abstime)
458 {
459 return pthread_rwlock_clockwrlock(lock, CLOCK_REALTIME, abstime);
460 }
461
462
463 int
pthread_rwlock_unlock(pthread_rwlock_t * lock)464 pthread_rwlock_unlock(pthread_rwlock_t* lock)
465 {
466 if ((lock->flags & RWLOCK_FLAG_SHARED) != 0)
467 return ((SharedRWLock*)lock)->Unlock();
468 else
469 return ((LocalRWLock*)lock)->Unlock();
470 }
471
472
473 // #pragma mark - public attribute functions
474
475
476 int
pthread_rwlockattr_init(pthread_rwlockattr_t * _attr)477 pthread_rwlockattr_init(pthread_rwlockattr_t* _attr)
478 {
479 pthread_rwlockattr* attr = (pthread_rwlockattr*)malloc(
480 sizeof(pthread_rwlockattr));
481 if (attr == NULL)
482 return B_NO_MEMORY;
483
484 attr->flags = 0;
485 *_attr = attr;
486
487 return 0;
488 }
489
490
491 int
pthread_rwlockattr_destroy(pthread_rwlockattr_t * _attr)492 pthread_rwlockattr_destroy(pthread_rwlockattr_t* _attr)
493 {
494 pthread_rwlockattr* attr = *_attr;
495
496 free(attr);
497 return 0;
498 }
499
500
501 int
pthread_rwlockattr_getpshared(const pthread_rwlockattr_t * _attr,int * shared)502 pthread_rwlockattr_getpshared(const pthread_rwlockattr_t* _attr, int* shared)
503 {
504 pthread_rwlockattr* attr = *_attr;
505
506 *shared = (attr->flags & RWLOCK_FLAG_SHARED) != 0
507 ? PTHREAD_PROCESS_SHARED : PTHREAD_PROCESS_PRIVATE;
508 return 0;
509 }
510
511
512 int
pthread_rwlockattr_setpshared(pthread_rwlockattr_t * _attr,int shared)513 pthread_rwlockattr_setpshared(pthread_rwlockattr_t* _attr, int shared)
514 {
515 pthread_rwlockattr* attr = *_attr;
516
517 if (shared == PTHREAD_PROCESS_SHARED)
518 attr->flags |= RWLOCK_FLAG_SHARED;
519 else
520 attr->flags &= ~RWLOCK_FLAG_SHARED;
521
522 return 0;
523 }
524