1 /* 2 * Copyright 2022, Haiku, Inc. All rights reserved. 3 * Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de. 4 * Distributed under the terms of the MIT License. 5 */ 6 7 #include <pthread.h> 8 9 #include <new> 10 11 #include <Debug.h> 12 13 #include <AutoLocker.h> 14 #include <syscalls.h> 15 #include <user_mutex_defs.h> 16 #include <user_thread.h> 17 #include <util/DoublyLinkedList.h> 18 19 #include "pthread_private.h" 20 21 #define MAX_READER_COUNT 1000000 22 23 #define RWLOCK_FLAG_SHARED 0x01 24 25 26 struct Waiter : DoublyLinkedListLinkImpl<Waiter> { 27 Waiter(bool writer) 28 : 29 userThread(get_user_thread()), 30 thread(find_thread(NULL)), 31 writer(writer), 32 queued(false) 33 { 34 } 35 36 user_thread* userThread; 37 thread_id thread; 38 status_t status; 39 bool writer; 40 bool queued; 41 }; 42 43 typedef DoublyLinkedList<Waiter> WaiterList; 44 45 46 struct SharedRWLock { 47 uint32_t flags; 48 int32_t owner; 49 int32_t sem; 50 51 status_t Init() 52 { 53 flags = RWLOCK_FLAG_SHARED; 54 owner = -1; 55 sem = create_sem(MAX_READER_COUNT, "pthread rwlock"); 56 57 return sem >= 0 ? B_OK : EAGAIN; 58 } 59 60 status_t Destroy() 61 { 62 if (sem < 0) 63 return B_BAD_VALUE; 64 return delete_sem(sem) == B_OK ? B_OK : B_BAD_VALUE; 65 } 66 67 status_t ReadLock(uint32 flags, bigtime_t timeout) 68 { 69 return acquire_sem_etc(sem, 1, flags, timeout); 70 } 71 72 status_t WriteLock(uint32 flags, bigtime_t timeout) 73 { 74 status_t error = acquire_sem_etc(sem, MAX_READER_COUNT, 75 flags, timeout); 76 if (error == B_OK) 77 owner = find_thread(NULL); 78 return error; 79 } 80 81 status_t Unlock() 82 { 83 if (find_thread(NULL) == owner) { 84 owner = -1; 85 return release_sem_etc(sem, MAX_READER_COUNT, 0); 86 } else 87 return release_sem(sem); 88 } 89 }; 90 91 92 struct LocalRWLock { 93 uint32_t flags; 94 int32_t owner; 95 int32_t mutex; 96 int32_t unused; 97 int32_t reader_count; 98 int32_t writer_count; 99 // Note, that reader_count and writer_count are not used the same way. 100 // writer_count includes the write lock owner as well as waiting 101 // writers. reader_count includes read lock owners only. 102 WaiterList waiters; 103 104 status_t Init() 105 { 106 flags = 0; 107 owner = -1; 108 mutex = 0; 109 reader_count = 0; 110 writer_count = 0; 111 new(&waiters) WaiterList; 112 113 return B_OK; 114 } 115 116 status_t Destroy() 117 { 118 Locker locker(this); 119 if (reader_count > 0 || waiters.Head() != NULL || writer_count > 0) 120 return EBUSY; 121 return B_OK; 122 } 123 124 bool StructureLock() 125 { 126 // Enter critical region: lock the mutex 127 int32 status = atomic_or((int32*)&mutex, B_USER_MUTEX_LOCKED); 128 129 // If already locked, call the kernel 130 if ((status & (B_USER_MUTEX_LOCKED | B_USER_MUTEX_WAITING)) != 0) { 131 do { 132 status = _kern_mutex_lock((int32*)&mutex, NULL, 0, 0); 133 } while (status == B_INTERRUPTED); 134 135 if (status != B_OK) 136 return false; 137 } 138 return true; 139 } 140 141 void StructureUnlock() 142 { 143 // Exit critical region: unlock the mutex 144 int32 status = atomic_and((int32*)&mutex, 145 ~(int32)B_USER_MUTEX_LOCKED); 146 147 if ((status & B_USER_MUTEX_WAITING) != 0) 148 _kern_mutex_unlock((int32*)&mutex, 0); 149 } 150 151 status_t ReadLock(uint32 flags, bigtime_t timeout) 152 { 153 Locker locker(this); 154 155 if (writer_count == 0) { 156 reader_count++; 157 return B_OK; 158 } 159 160 return _Wait(false, flags, timeout); 161 } 162 163 status_t WriteLock(uint32 flags, bigtime_t timeout) 164 { 165 Locker locker(this); 166 167 if (reader_count == 0 && writer_count == 0) { 168 writer_count++; 169 owner = find_thread(NULL); 170 return B_OK; 171 } 172 173 return _Wait(true, flags, timeout); 174 } 175 176 status_t Unlock() 177 { 178 Locker locker(this); 179 180 if (find_thread(NULL) == owner) { 181 writer_count--; 182 owner = -1; 183 } else 184 reader_count--; 185 186 _Unblock(); 187 188 return B_OK; 189 } 190 191 private: 192 status_t _Wait(bool writer, uint32 flags, bigtime_t timeout) 193 { 194 if (timeout == 0) 195 return B_TIMED_OUT; 196 197 if (writer_count == 1 && owner == find_thread(NULL)) 198 return EDEADLK; 199 200 Waiter waiter(writer); 201 waiters.Add(&waiter); 202 waiter.queued = true; 203 waiter.userThread->wait_status = 1; 204 205 if (writer) 206 writer_count++; 207 208 StructureUnlock(); 209 status_t error = _kern_block_thread(flags, timeout); 210 StructureLock(); 211 212 if (!waiter.queued) 213 return waiter.status; 214 215 // we're still queued, which means an error (timeout, interrupt) 216 // occurred 217 waiters.Remove(&waiter); 218 219 if (writer) 220 writer_count--; 221 222 _Unblock(); 223 224 return error; 225 } 226 227 void _Unblock() 228 { 229 // Check whether there any waiting threads at all and whether anyone 230 // has the write lock 231 Waiter* waiter = waiters.Head(); 232 if (waiter == NULL || owner >= 0) 233 return; 234 235 // writer at head of queue? 236 if (waiter->writer) { 237 if (reader_count == 0) { 238 waiter->status = B_OK; 239 waiter->queued = false; 240 waiters.Remove(waiter); 241 owner = waiter->thread; 242 243 if (waiter->userThread->wait_status > 0) 244 _kern_unblock_thread(waiter->thread, B_OK); 245 } 246 return; 247 } 248 249 // wake up one or more readers -- we unblock more than one reader at 250 // a time to save trips to the kernel 251 while (!waiters.IsEmpty() && !waiters.Head()->writer) { 252 static const int kMaxReaderUnblockCount = 128; 253 thread_id readers[kMaxReaderUnblockCount]; 254 int readerCount = 0; 255 256 while (readerCount < kMaxReaderUnblockCount 257 && (waiter = waiters.Head()) != NULL 258 && !waiter->writer) { 259 waiter->status = B_OK; 260 waiter->queued = false; 261 waiters.Remove(waiter); 262 263 if (waiter->userThread->wait_status > 0) { 264 readers[readerCount++] = waiter->thread; 265 reader_count++; 266 } 267 } 268 269 if (readerCount > 0) 270 _kern_unblock_threads(readers, readerCount, B_OK); 271 } 272 } 273 274 275 struct Locking { 276 inline bool Lock(LocalRWLock* lockable) 277 { 278 return lockable->StructureLock(); 279 } 280 281 inline void Unlock(LocalRWLock* lockable) 282 { 283 lockable->StructureUnlock(); 284 } 285 }; 286 typedef AutoLocker<LocalRWLock, Locking> Locker; 287 }; 288 289 290 static void inline 291 assert_dummy() 292 { 293 STATIC_ASSERT(sizeof(pthread_rwlock_t) >= sizeof(SharedRWLock)); 294 STATIC_ASSERT(sizeof(pthread_rwlock_t) >= sizeof(LocalRWLock)); 295 } 296 297 298 // #pragma mark - public lock functions 299 300 301 int 302 pthread_rwlock_init(pthread_rwlock_t* lock, const pthread_rwlockattr_t* _attr) 303 { 304 pthread_rwlockattr* attr = _attr != NULL ? *_attr : NULL; 305 bool shared = attr != NULL && (attr->flags & RWLOCK_FLAG_SHARED) != 0; 306 307 if (shared) 308 return ((SharedRWLock*)lock)->Init(); 309 else 310 return ((LocalRWLock*)lock)->Init(); 311 } 312 313 314 int 315 pthread_rwlock_destroy(pthread_rwlock_t* lock) 316 { 317 if ((lock->flags & RWLOCK_FLAG_SHARED) != 0) 318 return ((SharedRWLock*)lock)->Destroy(); 319 else 320 return ((LocalRWLock*)lock)->Destroy(); 321 } 322 323 324 int 325 pthread_rwlock_rdlock(pthread_rwlock_t* lock) 326 { 327 if ((lock->flags & RWLOCK_FLAG_SHARED) != 0) 328 return ((SharedRWLock*)lock)->ReadLock(0, B_INFINITE_TIMEOUT); 329 else 330 return ((LocalRWLock*)lock)->ReadLock(0, B_INFINITE_TIMEOUT); 331 } 332 333 334 int 335 pthread_rwlock_tryrdlock(pthread_rwlock_t* lock) 336 { 337 status_t error; 338 if ((lock->flags & RWLOCK_FLAG_SHARED) != 0) 339 error = ((SharedRWLock*)lock)->ReadLock(B_ABSOLUTE_REAL_TIME_TIMEOUT, 0); 340 else 341 error = ((LocalRWLock*)lock)->ReadLock(B_ABSOLUTE_REAL_TIME_TIMEOUT, 0); 342 343 return error == B_TIMED_OUT ? EBUSY : error; 344 } 345 346 347 int 348 pthread_rwlock_clockrdlock(pthread_rwlock_t* lock, clockid_t clock_id, 349 const struct timespec *abstime) 350 { 351 bigtime_t timeout = abstime->tv_sec * 1000000LL 352 + abstime->tv_nsec / 1000LL; 353 uint32 flags = 0; 354 if (timeout >= 0) { 355 switch (clock_id) { 356 case CLOCK_REALTIME: 357 flags = B_ABSOLUTE_REAL_TIME_TIMEOUT; 358 break; 359 case CLOCK_MONOTONIC: 360 flags = B_ABSOLUTE_TIMEOUT; 361 break; 362 default: 363 return EINVAL; 364 } 365 } 366 367 status_t error; 368 if ((lock->flags & RWLOCK_FLAG_SHARED) != 0) 369 error = ((SharedRWLock*)lock)->ReadLock(flags, timeout); 370 else 371 error = ((LocalRWLock*)lock)->ReadLock(flags, timeout); 372 373 return error == B_TIMED_OUT ? EBUSY : error; 374 } 375 376 377 int 378 pthread_rwlock_timedrdlock(pthread_rwlock_t* lock, 379 const struct timespec *abstime) 380 { 381 return pthread_rwlock_clockrdlock(lock, CLOCK_REALTIME, abstime); 382 } 383 384 385 int 386 pthread_rwlock_wrlock(pthread_rwlock_t* lock) 387 { 388 if ((lock->flags & RWLOCK_FLAG_SHARED) != 0) 389 return ((SharedRWLock*)lock)->WriteLock(0, B_INFINITE_TIMEOUT); 390 else 391 return ((LocalRWLock*)lock)->WriteLock(0, B_INFINITE_TIMEOUT); 392 } 393 394 395 int 396 pthread_rwlock_trywrlock(pthread_rwlock_t* lock) 397 { 398 status_t error; 399 if ((lock->flags & RWLOCK_FLAG_SHARED) != 0) 400 error = ((SharedRWLock*)lock)->WriteLock(B_ABSOLUTE_REAL_TIME_TIMEOUT, 0); 401 else 402 error = ((LocalRWLock*)lock)->WriteLock(B_ABSOLUTE_REAL_TIME_TIMEOUT, 0); 403 404 return error == B_TIMED_OUT ? EBUSY : error; 405 } 406 407 408 int 409 pthread_rwlock_clockwrlock (pthread_rwlock_t* lock, clockid_t clock_id, 410 const struct timespec *abstime) 411 { 412 bigtime_t timeout = abstime->tv_sec * 1000000LL 413 + abstime->tv_nsec / 1000LL; 414 uint32 flags = 0; 415 if (timeout >= 0) { 416 switch (clock_id) { 417 case CLOCK_REALTIME: 418 flags = B_ABSOLUTE_REAL_TIME_TIMEOUT; 419 break; 420 case CLOCK_MONOTONIC: 421 flags = B_ABSOLUTE_TIMEOUT; 422 break; 423 default: 424 return EINVAL; 425 } 426 } 427 428 status_t error; 429 if ((lock->flags & RWLOCK_FLAG_SHARED) != 0) 430 error = ((SharedRWLock*)lock)->WriteLock(flags, timeout); 431 else 432 error = ((LocalRWLock*)lock)->WriteLock(flags, timeout); 433 434 return error == B_TIMED_OUT ? EBUSY : error; 435 } 436 437 438 int 439 pthread_rwlock_timedwrlock(pthread_rwlock_t* lock, 440 const struct timespec *abstime) 441 { 442 return pthread_rwlock_clockwrlock(lock, CLOCK_REALTIME, abstime); 443 } 444 445 446 int 447 pthread_rwlock_unlock(pthread_rwlock_t* lock) 448 { 449 if ((lock->flags & RWLOCK_FLAG_SHARED) != 0) 450 return ((SharedRWLock*)lock)->Unlock(); 451 else 452 return ((LocalRWLock*)lock)->Unlock(); 453 } 454 455 456 // #pragma mark - public attribute functions 457 458 459 int 460 pthread_rwlockattr_init(pthread_rwlockattr_t* _attr) 461 { 462 pthread_rwlockattr* attr = (pthread_rwlockattr*)malloc( 463 sizeof(pthread_rwlockattr)); 464 if (attr == NULL) 465 return B_NO_MEMORY; 466 467 attr->flags = 0; 468 *_attr = attr; 469 470 return 0; 471 } 472 473 474 int 475 pthread_rwlockattr_destroy(pthread_rwlockattr_t* _attr) 476 { 477 pthread_rwlockattr* attr = *_attr; 478 479 free(attr); 480 return 0; 481 } 482 483 484 int 485 pthread_rwlockattr_getpshared(const pthread_rwlockattr_t* _attr, int* shared) 486 { 487 pthread_rwlockattr* attr = *_attr; 488 489 *shared = (attr->flags & RWLOCK_FLAG_SHARED) != 0 490 ? PTHREAD_PROCESS_SHARED : PTHREAD_PROCESS_PRIVATE; 491 return 0; 492 } 493 494 495 int 496 pthread_rwlockattr_setpshared(pthread_rwlockattr_t* _attr, int shared) 497 { 498 pthread_rwlockattr* attr = *_attr; 499 500 if (shared == PTHREAD_PROCESS_SHARED) 501 attr->flags |= RWLOCK_FLAG_SHARED; 502 else 503 attr->flags &= ~RWLOCK_FLAG_SHARED; 504 505 return 0; 506 } 507 508