1 /* 2 * Copyright 2022, Haiku, Inc. All rights reserved. 3 * Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de. 4 * Distributed under the terms of the MIT License. 5 */ 6 7 #include <pthread.h> 8 9 #include <new> 10 11 #include <Debug.h> 12 13 #include <AutoLocker.h> 14 #include <syscalls.h> 15 #include <user_mutex_defs.h> 16 #include <user_thread.h> 17 #include <util/DoublyLinkedList.h> 18 19 #include "pthread_private.h" 20 21 #define MAX_READER_COUNT 1000000 22 23 #define RWLOCK_FLAG_SHARED 0x01 24 25 26 struct Waiter : DoublyLinkedListLinkImpl<Waiter> { 27 Waiter(bool writer) 28 : 29 userThread(get_user_thread()), 30 thread(find_thread(NULL)), 31 writer(writer), 32 queued(false) 33 { 34 } 35 36 user_thread* userThread; 37 thread_id thread; 38 status_t status; 39 bool writer; 40 bool queued; 41 }; 42 43 typedef DoublyLinkedList<Waiter> WaiterList; 44 45 46 struct SharedRWLock { 47 uint32_t flags; 48 int32_t owner; 49 int32_t sem; 50 51 status_t Init() 52 { 53 flags = RWLOCK_FLAG_SHARED; 54 owner = -1; 55 sem = create_sem(MAX_READER_COUNT, "pthread rwlock"); 56 57 return sem >= 0 ? B_OK : EAGAIN; 58 } 59 60 status_t Destroy() 61 { 62 if (sem < 0) 63 return B_BAD_VALUE; 64 return delete_sem(sem) == B_OK ? B_OK : B_BAD_VALUE; 65 } 66 67 status_t ReadLock(uint32 flags, bigtime_t timeout) 68 { 69 status_t status; 70 do { 71 status = acquire_sem_etc(sem, 1, flags, timeout); 72 } while (status == B_INTERRUPTED); 73 return status; 74 } 75 76 status_t WriteLock(uint32 flags, bigtime_t timeout) 77 { 78 status_t status; 79 do { 80 status = acquire_sem_etc(sem, MAX_READER_COUNT, flags, timeout); 81 } while (status == B_INTERRUPTED); 82 if (status == B_OK) 83 owner = find_thread(NULL); 84 return status; 85 } 86 87 status_t Unlock() 88 { 89 if (find_thread(NULL) == owner) { 90 owner = -1; 91 return release_sem_etc(sem, MAX_READER_COUNT, 0); 92 } else 93 return release_sem(sem); 94 } 95 }; 96 97 98 struct LocalRWLock { 99 uint32_t flags; 100 int32_t owner; 101 int32_t mutex; 102 int32_t unused; 103 int32_t reader_count; 104 int32_t writer_count; 105 // Note, that reader_count and writer_count are not used the same way. 106 // writer_count includes the write lock owner as well as waiting 107 // writers. reader_count includes read lock owners only. 108 WaiterList waiters; 109 110 status_t Init() 111 { 112 flags = 0; 113 owner = -1; 114 mutex = 0; 115 reader_count = 0; 116 writer_count = 0; 117 new(&waiters) WaiterList; 118 119 return B_OK; 120 } 121 122 status_t Destroy() 123 { 124 Locker locker(this); 125 if (reader_count > 0 || waiters.Head() != NULL || writer_count > 0) 126 return EBUSY; 127 return B_OK; 128 } 129 130 bool StructureLock() 131 { 132 const int32 oldValue = atomic_test_and_set((int32*)&mutex, B_USER_MUTEX_LOCKED, 0); 133 if (oldValue != 0) { 134 status_t status; 135 do { 136 status = _kern_mutex_lock((int32*)&mutex, NULL, 0, 0); 137 } while (status == B_INTERRUPTED); 138 139 if (status != B_OK) 140 return false; 141 } 142 return true; 143 } 144 145 void StructureUnlock() 146 { 147 // Exit critical region: unlock the mutex 148 int32 status = atomic_and((int32*)&mutex, 149 ~(int32)B_USER_MUTEX_LOCKED); 150 if ((status & B_USER_MUTEX_WAITING) != 0) 151 _kern_mutex_unblock((int32*)&mutex, 0); 152 } 153 154 status_t ReadLock(uint32 flags, bigtime_t timeout) 155 { 156 Locker locker(this); 157 158 if (writer_count == 0) { 159 reader_count++; 160 return B_OK; 161 } 162 163 return _Wait(false, flags, timeout); 164 } 165 166 status_t WriteLock(uint32 flags, bigtime_t timeout) 167 { 168 Locker locker(this); 169 170 if (reader_count == 0 && writer_count == 0) { 171 writer_count++; 172 owner = find_thread(NULL); 173 return B_OK; 174 } 175 176 return _Wait(true, flags, timeout); 177 } 178 179 status_t Unlock() 180 { 181 Locker locker(this); 182 183 if (find_thread(NULL) == owner) { 184 writer_count--; 185 owner = -1; 186 } else 187 reader_count--; 188 189 _Unblock(); 190 191 return B_OK; 192 } 193 194 private: 195 status_t _Wait(bool writer, uint32 flags, bigtime_t timeout) 196 { 197 if (timeout == 0) 198 return B_TIMED_OUT; 199 200 if (writer_count == 1 && owner == find_thread(NULL)) 201 return EDEADLK; 202 203 Waiter waiter(writer); 204 waiters.Add(&waiter); 205 waiter.queued = true; 206 waiter.userThread->wait_status = 1; 207 208 if (writer) 209 writer_count++; 210 211 status_t status; 212 do { 213 StructureUnlock(); 214 status = _kern_block_thread(flags, timeout); 215 StructureLock(); 216 217 if (!waiter.queued) 218 return waiter.status; 219 } while (status == B_INTERRUPTED); 220 221 // we're still queued, which means an error (timeout, interrupt) 222 // occurred 223 waiters.Remove(&waiter); 224 225 if (writer) 226 writer_count--; 227 228 _Unblock(); 229 230 return status; 231 } 232 233 void _Unblock() 234 { 235 // Check whether there any waiting threads at all and whether anyone 236 // has the write lock 237 Waiter* waiter = waiters.Head(); 238 if (waiter == NULL || owner >= 0) 239 return; 240 241 // writer at head of queue? 242 if (waiter->writer) { 243 if (reader_count == 0) { 244 waiter->status = B_OK; 245 waiter->queued = false; 246 waiters.Remove(waiter); 247 owner = waiter->thread; 248 249 if (waiter->userThread->wait_status > 0) 250 _kern_unblock_thread(waiter->thread, B_OK); 251 } 252 return; 253 } 254 255 // wake up one or more readers -- we unblock more than one reader at 256 // a time to save trips to the kernel 257 while (!waiters.IsEmpty() && !waiters.Head()->writer) { 258 static const int kMaxReaderUnblockCount = 128; 259 thread_id readers[kMaxReaderUnblockCount]; 260 int readerCount = 0; 261 262 while (readerCount < kMaxReaderUnblockCount 263 && (waiter = waiters.Head()) != NULL 264 && !waiter->writer) { 265 waiter->status = B_OK; 266 waiter->queued = false; 267 waiters.Remove(waiter); 268 269 if (waiter->userThread->wait_status > 0) { 270 readers[readerCount++] = waiter->thread; 271 reader_count++; 272 } 273 } 274 275 if (readerCount > 0) 276 _kern_unblock_threads(readers, readerCount, B_OK); 277 } 278 } 279 280 281 struct Locking { 282 inline bool Lock(LocalRWLock* lockable) 283 { 284 return lockable->StructureLock(); 285 } 286 287 inline void Unlock(LocalRWLock* lockable) 288 { 289 lockable->StructureUnlock(); 290 } 291 }; 292 typedef AutoLocker<LocalRWLock, Locking> Locker; 293 }; 294 295 296 static void inline 297 assert_dummy() 298 { 299 STATIC_ASSERT(sizeof(pthread_rwlock_t) >= sizeof(SharedRWLock)); 300 STATIC_ASSERT(sizeof(pthread_rwlock_t) >= sizeof(LocalRWLock)); 301 } 302 303 304 // #pragma mark - public lock functions 305 306 307 int 308 pthread_rwlock_init(pthread_rwlock_t* lock, const pthread_rwlockattr_t* _attr) 309 { 310 pthread_rwlockattr* attr = _attr != NULL ? *_attr : NULL; 311 bool shared = attr != NULL && (attr->flags & RWLOCK_FLAG_SHARED) != 0; 312 313 if (shared) 314 return ((SharedRWLock*)lock)->Init(); 315 else 316 return ((LocalRWLock*)lock)->Init(); 317 } 318 319 320 int 321 pthread_rwlock_destroy(pthread_rwlock_t* lock) 322 { 323 if ((lock->flags & RWLOCK_FLAG_SHARED) != 0) 324 return ((SharedRWLock*)lock)->Destroy(); 325 else 326 return ((LocalRWLock*)lock)->Destroy(); 327 } 328 329 330 int 331 pthread_rwlock_rdlock(pthread_rwlock_t* lock) 332 { 333 if ((lock->flags & RWLOCK_FLAG_SHARED) != 0) 334 return ((SharedRWLock*)lock)->ReadLock(0, B_INFINITE_TIMEOUT); 335 else 336 return ((LocalRWLock*)lock)->ReadLock(0, B_INFINITE_TIMEOUT); 337 } 338 339 340 int 341 pthread_rwlock_tryrdlock(pthread_rwlock_t* lock) 342 { 343 status_t error; 344 if ((lock->flags & RWLOCK_FLAG_SHARED) != 0) 345 error = ((SharedRWLock*)lock)->ReadLock(B_ABSOLUTE_REAL_TIME_TIMEOUT, 0); 346 else 347 error = ((LocalRWLock*)lock)->ReadLock(B_ABSOLUTE_REAL_TIME_TIMEOUT, 0); 348 349 return error == B_TIMED_OUT ? EBUSY : error; 350 } 351 352 353 int 354 pthread_rwlock_clockrdlock(pthread_rwlock_t* lock, clockid_t clock_id, 355 const struct timespec *abstime) 356 { 357 bigtime_t timeout = abstime->tv_sec * 1000000LL 358 + abstime->tv_nsec / 1000LL; 359 uint32 flags = 0; 360 if (timeout >= 0) { 361 switch (clock_id) { 362 case CLOCK_REALTIME: 363 flags = B_ABSOLUTE_REAL_TIME_TIMEOUT; 364 break; 365 case CLOCK_MONOTONIC: 366 flags = B_ABSOLUTE_TIMEOUT; 367 break; 368 default: 369 return EINVAL; 370 } 371 } 372 373 status_t error; 374 if ((lock->flags & RWLOCK_FLAG_SHARED) != 0) 375 error = ((SharedRWLock*)lock)->ReadLock(flags, timeout); 376 else 377 error = ((LocalRWLock*)lock)->ReadLock(flags, timeout); 378 379 return error == B_TIMED_OUT ? EBUSY : error; 380 } 381 382 383 int 384 pthread_rwlock_timedrdlock(pthread_rwlock_t* lock, 385 const struct timespec *abstime) 386 { 387 return pthread_rwlock_clockrdlock(lock, CLOCK_REALTIME, abstime); 388 } 389 390 391 int 392 pthread_rwlock_wrlock(pthread_rwlock_t* lock) 393 { 394 if ((lock->flags & RWLOCK_FLAG_SHARED) != 0) 395 return ((SharedRWLock*)lock)->WriteLock(0, B_INFINITE_TIMEOUT); 396 else 397 return ((LocalRWLock*)lock)->WriteLock(0, B_INFINITE_TIMEOUT); 398 } 399 400 401 int 402 pthread_rwlock_trywrlock(pthread_rwlock_t* lock) 403 { 404 status_t error; 405 if ((lock->flags & RWLOCK_FLAG_SHARED) != 0) 406 error = ((SharedRWLock*)lock)->WriteLock(B_ABSOLUTE_REAL_TIME_TIMEOUT, 0); 407 else 408 error = ((LocalRWLock*)lock)->WriteLock(B_ABSOLUTE_REAL_TIME_TIMEOUT, 0); 409 410 return error == B_TIMED_OUT ? EBUSY : error; 411 } 412 413 414 int 415 pthread_rwlock_clockwrlock (pthread_rwlock_t* lock, clockid_t clock_id, 416 const struct timespec *abstime) 417 { 418 bigtime_t timeout = abstime->tv_sec * 1000000LL 419 + abstime->tv_nsec / 1000LL; 420 uint32 flags = 0; 421 if (timeout >= 0) { 422 switch (clock_id) { 423 case CLOCK_REALTIME: 424 flags = B_ABSOLUTE_REAL_TIME_TIMEOUT; 425 break; 426 case CLOCK_MONOTONIC: 427 flags = B_ABSOLUTE_TIMEOUT; 428 break; 429 default: 430 return EINVAL; 431 } 432 } 433 434 status_t error; 435 if ((lock->flags & RWLOCK_FLAG_SHARED) != 0) 436 error = ((SharedRWLock*)lock)->WriteLock(flags, timeout); 437 else 438 error = ((LocalRWLock*)lock)->WriteLock(flags, timeout); 439 440 return error == B_TIMED_OUT ? EBUSY : error; 441 } 442 443 444 int 445 pthread_rwlock_timedwrlock(pthread_rwlock_t* lock, 446 const struct timespec *abstime) 447 { 448 return pthread_rwlock_clockwrlock(lock, CLOCK_REALTIME, abstime); 449 } 450 451 452 int 453 pthread_rwlock_unlock(pthread_rwlock_t* lock) 454 { 455 if ((lock->flags & RWLOCK_FLAG_SHARED) != 0) 456 return ((SharedRWLock*)lock)->Unlock(); 457 else 458 return ((LocalRWLock*)lock)->Unlock(); 459 } 460 461 462 // #pragma mark - public attribute functions 463 464 465 int 466 pthread_rwlockattr_init(pthread_rwlockattr_t* _attr) 467 { 468 pthread_rwlockattr* attr = (pthread_rwlockattr*)malloc( 469 sizeof(pthread_rwlockattr)); 470 if (attr == NULL) 471 return B_NO_MEMORY; 472 473 attr->flags = 0; 474 *_attr = attr; 475 476 return 0; 477 } 478 479 480 int 481 pthread_rwlockattr_destroy(pthread_rwlockattr_t* _attr) 482 { 483 pthread_rwlockattr* attr = *_attr; 484 485 free(attr); 486 return 0; 487 } 488 489 490 int 491 pthread_rwlockattr_getpshared(const pthread_rwlockattr_t* _attr, int* shared) 492 { 493 pthread_rwlockattr* attr = *_attr; 494 495 *shared = (attr->flags & RWLOCK_FLAG_SHARED) != 0 496 ? PTHREAD_PROCESS_SHARED : PTHREAD_PROCESS_PRIVATE; 497 return 0; 498 } 499 500 501 int 502 pthread_rwlockattr_setpshared(pthread_rwlockattr_t* _attr, int shared) 503 { 504 pthread_rwlockattr* attr = *_attr; 505 506 if (shared == PTHREAD_PROCESS_SHARED) 507 attr->flags |= RWLOCK_FLAG_SHARED; 508 else 509 attr->flags &= ~RWLOCK_FLAG_SHARED; 510 511 return 0; 512 } 513 514