1 /* 2 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de. 3 * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de. All rights reserved. 4 * Distributed under the terms of the MIT License. 5 * 6 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved. 7 * Distributed under the terms of the NewOS License. 8 */ 9 10 11 #include <debug.h> 12 13 #if KDEBUG 14 #define KDEBUG_STATIC static 15 static status_t _mutex_lock(struct mutex* lock, void* locker); 16 static void _mutex_unlock(struct mutex* lock); 17 #else 18 #define KDEBUG_STATIC 19 #define mutex_lock mutex_lock_inline 20 #define mutex_unlock mutex_unlock_inline 21 #define mutex_trylock mutex_trylock_inline 22 #define mutex_lock_with_timeout mutex_lock_with_timeout_inline 23 #endif 24 25 #include <lock.h> 26 27 #include <stdlib.h> 28 #include <string.h> 29 30 #include <int.h> 31 #include <kernel.h> 32 #include <listeners.h> 33 #include <scheduling_analysis.h> 34 #include <thread.h> 35 #include <util/AutoLock.h> 36 37 38 struct mutex_waiter { 39 Thread* thread; 40 mutex_waiter* next; // next in queue 41 mutex_waiter* last; // last in queue (valid for the first in queue) 42 }; 43 44 struct rw_lock_waiter { 45 Thread* thread; 46 rw_lock_waiter* next; // next in queue 47 rw_lock_waiter* last; // last in queue (valid for the first in queue) 48 bool writer; 49 }; 50 51 #define MUTEX_FLAG_RELEASED 0x2 52 53 54 int32 55 recursive_lock_get_recursion(recursive_lock *lock) 56 { 57 if (RECURSIVE_LOCK_HOLDER(lock) == thread_get_current_thread_id()) 58 return lock->recursion; 59 60 return -1; 61 } 62 63 64 void 65 recursive_lock_init(recursive_lock *lock, const char *name) 66 { 67 recursive_lock_init_etc(lock, name, 0); 68 } 69 70 71 void 72 recursive_lock_init_etc(recursive_lock *lock, const char *name, uint32 flags) 73 { 74 mutex_init_etc(&lock->lock, name != NULL ? name : "recursive lock", flags); 75 #if !KDEBUG 76 lock->holder = -1; 77 #endif 78 lock->recursion = 0; 79 } 80 81 82 void 83 recursive_lock_destroy(recursive_lock *lock) 84 { 85 if (lock == NULL) 86 return; 87 88 mutex_destroy(&lock->lock); 89 } 90 91 92 status_t 93 recursive_lock_lock(recursive_lock *lock) 94 { 95 #if KDEBUG 96 if (!gKernelStartup && !are_interrupts_enabled()) { 97 panic("recursive_lock_lock: called with interrupts disabled for lock " 98 "%p (\"%s\")\n", lock, lock->lock.name); 99 } 100 #endif 101 102 thread_id thread = thread_get_current_thread_id(); 103 104 if (thread != RECURSIVE_LOCK_HOLDER(lock)) { 105 mutex_lock(&lock->lock); 106 #if !KDEBUG 107 lock->holder = thread; 108 #endif 109 } 110 111 lock->recursion++; 112 return B_OK; 113 } 114 115 116 status_t 117 recursive_lock_trylock(recursive_lock *lock) 118 { 119 thread_id thread = thread_get_current_thread_id(); 120 121 #if KDEBUG 122 if (!gKernelStartup && !are_interrupts_enabled()) { 123 panic("recursive_lock_lock: called with interrupts disabled for lock " 124 "%p (\"%s\")\n", lock, lock->lock.name); 125 } 126 #endif 127 128 if (thread != RECURSIVE_LOCK_HOLDER(lock)) { 129 status_t status = mutex_trylock(&lock->lock); 130 if (status != B_OK) 131 return status; 132 133 #if !KDEBUG 134 lock->holder = thread; 135 #endif 136 } 137 138 lock->recursion++; 139 return B_OK; 140 } 141 142 143 void 144 recursive_lock_unlock(recursive_lock *lock) 145 { 146 if (thread_get_current_thread_id() != RECURSIVE_LOCK_HOLDER(lock)) 147 panic("recursive_lock %p unlocked by non-holder thread!\n", lock); 148 149 if (--lock->recursion == 0) { 150 #if !KDEBUG 151 lock->holder = -1; 152 #endif 153 mutex_unlock(&lock->lock); 154 } 155 } 156 157 158 status_t 159 recursive_lock_switch_lock(recursive_lock* from, recursive_lock* to) 160 { 161 #if KDEBUG 162 if (!gKernelStartup && !are_interrupts_enabled()) { 163 panic("recursive_lock_switch_lock(): called with interrupts " 164 "disabled for locks %p, %p", from, to); 165 } 166 #endif 167 168 if (--from->recursion > 0) 169 return recursive_lock_lock(to); 170 171 #if !KDEBUG 172 from->holder = -1; 173 #endif 174 175 thread_id thread = thread_get_current_thread_id(); 176 177 if (thread == RECURSIVE_LOCK_HOLDER(to)) { 178 to->recursion++; 179 mutex_unlock(&from->lock); 180 return B_OK; 181 } 182 183 status_t status = mutex_switch_lock(&from->lock, &to->lock); 184 if (status != B_OK) { 185 from->recursion++; 186 #if !KDEBUG 187 from->holder = thread; 188 #endif 189 return status; 190 } 191 192 #if !KDEBUG 193 to->holder = thread; 194 #endif 195 to->recursion++; 196 return B_OK; 197 } 198 199 200 status_t 201 recursive_lock_switch_from_mutex(mutex* from, recursive_lock* to) 202 { 203 #if KDEBUG 204 if (!gKernelStartup && !are_interrupts_enabled()) { 205 panic("recursive_lock_switch_from_mutex(): called with interrupts " 206 "disabled for locks %p, %p", from, to); 207 } 208 #endif 209 210 thread_id thread = thread_get_current_thread_id(); 211 212 if (thread == RECURSIVE_LOCK_HOLDER(to)) { 213 to->recursion++; 214 mutex_unlock(from); 215 return B_OK; 216 } 217 218 status_t status = mutex_switch_lock(from, &to->lock); 219 if (status != B_OK) 220 return status; 221 222 #if !KDEBUG 223 to->holder = thread; 224 #endif 225 to->recursion++; 226 return B_OK; 227 } 228 229 230 status_t 231 recursive_lock_switch_from_read_lock(rw_lock* from, recursive_lock* to) 232 { 233 #if KDEBUG 234 if (!gKernelStartup && !are_interrupts_enabled()) { 235 panic("recursive_lock_switch_from_read_lock(): called with interrupts " 236 "disabled for locks %p, %p", from, to); 237 } 238 #endif 239 240 thread_id thread = thread_get_current_thread_id(); 241 242 if (thread != RECURSIVE_LOCK_HOLDER(to)) { 243 status_t status = mutex_switch_from_read_lock(from, &to->lock); 244 if (status != B_OK) 245 return status; 246 247 #if !KDEBUG 248 to->holder = thread; 249 #endif 250 } else { 251 rw_lock_read_unlock(from); 252 } 253 254 to->recursion++; 255 return B_OK; 256 } 257 258 259 static int 260 dump_recursive_lock_info(int argc, char** argv) 261 { 262 if (argc < 2) { 263 print_debugger_command_usage(argv[0]); 264 return 0; 265 } 266 267 recursive_lock* lock = (recursive_lock*)parse_expression(argv[1]); 268 269 if (!IS_KERNEL_ADDRESS(lock)) { 270 kprintf("invalid address: %p\n", lock); 271 return 0; 272 } 273 274 kprintf("recrusive_lock %p:\n", lock); 275 kprintf(" mutex: %p\n", &lock->lock); 276 kprintf(" name: %s\n", lock->lock.name); 277 kprintf(" flags: 0x%x\n", lock->lock.flags); 278 #if KDEBUG 279 kprintf(" holder: %" B_PRId32 "\n", lock->lock.holder); 280 #else 281 kprintf(" holder: %" B_PRId32 "\n", lock->holder); 282 #endif 283 kprintf(" recursion: %d\n", lock->recursion); 284 285 kprintf(" waiting threads:"); 286 mutex_waiter* waiter = lock->lock.waiters; 287 while (waiter != NULL) { 288 kprintf(" %" B_PRId32, waiter->thread->id); 289 waiter = waiter->next; 290 } 291 kputs("\n"); 292 293 return 0; 294 } 295 296 297 // #pragma mark - 298 299 300 static status_t 301 rw_lock_wait(rw_lock* lock, bool writer, InterruptsSpinLocker& locker) 302 { 303 // enqueue in waiter list 304 rw_lock_waiter waiter; 305 waiter.thread = thread_get_current_thread(); 306 waiter.next = NULL; 307 waiter.writer = writer; 308 309 if (lock->waiters != NULL) 310 lock->waiters->last->next = &waiter; 311 else 312 lock->waiters = &waiter; 313 314 lock->waiters->last = &waiter; 315 316 // block 317 thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_RW_LOCK, lock); 318 locker.Unlock(); 319 320 status_t result = thread_block(); 321 322 locker.Lock(); 323 return result; 324 } 325 326 327 static int32 328 rw_lock_unblock(rw_lock* lock) 329 { 330 // Check whether there are any waiting threads at all and whether anyone 331 // has the write lock. 332 rw_lock_waiter* waiter = lock->waiters; 333 if (waiter == NULL || lock->holder >= 0) 334 return 0; 335 336 // writer at head of queue? 337 if (waiter->writer) { 338 if (lock->active_readers > 0 || lock->pending_readers > 0) 339 return 0; 340 341 // dequeue writer 342 lock->waiters = waiter->next; 343 if (lock->waiters != NULL) 344 lock->waiters->last = waiter->last; 345 346 lock->holder = waiter->thread->id; 347 348 // unblock thread 349 thread_unblock(waiter->thread, B_OK); 350 351 waiter->thread = NULL; 352 return RW_LOCK_WRITER_COUNT_BASE; 353 } 354 355 // wake up one or more readers 356 uint32 readerCount = 0; 357 do { 358 // dequeue reader 359 lock->waiters = waiter->next; 360 if (lock->waiters != NULL) 361 lock->waiters->last = waiter->last; 362 363 readerCount++; 364 365 // unblock thread 366 thread_unblock(waiter->thread, B_OK); 367 368 waiter->thread = NULL; 369 } while ((waiter = lock->waiters) != NULL && !waiter->writer); 370 371 if (lock->count >= RW_LOCK_WRITER_COUNT_BASE) 372 lock->active_readers += readerCount; 373 374 return readerCount; 375 } 376 377 378 void 379 rw_lock_init(rw_lock* lock, const char* name) 380 { 381 lock->name = name; 382 lock->waiters = NULL; 383 B_INITIALIZE_SPINLOCK(&lock->lock); 384 lock->holder = -1; 385 lock->count = 0; 386 lock->owner_count = 0; 387 lock->active_readers = 0; 388 lock->pending_readers = 0; 389 lock->flags = 0; 390 391 T_SCHEDULING_ANALYSIS(InitRWLock(lock, name)); 392 NotifyWaitObjectListeners(&WaitObjectListener::RWLockInitialized, lock); 393 } 394 395 396 void 397 rw_lock_init_etc(rw_lock* lock, const char* name, uint32 flags) 398 { 399 lock->name = (flags & RW_LOCK_FLAG_CLONE_NAME) != 0 ? strdup(name) : name; 400 lock->waiters = NULL; 401 B_INITIALIZE_SPINLOCK(&lock->lock); 402 lock->holder = -1; 403 lock->count = 0; 404 lock->owner_count = 0; 405 lock->active_readers = 0; 406 lock->pending_readers = 0; 407 lock->flags = flags & RW_LOCK_FLAG_CLONE_NAME; 408 409 T_SCHEDULING_ANALYSIS(InitRWLock(lock, name)); 410 NotifyWaitObjectListeners(&WaitObjectListener::RWLockInitialized, lock); 411 } 412 413 414 void 415 rw_lock_destroy(rw_lock* lock) 416 { 417 char* name = (lock->flags & RW_LOCK_FLAG_CLONE_NAME) != 0 418 ? (char*)lock->name : NULL; 419 420 // unblock all waiters 421 InterruptsSpinLocker locker(lock->lock); 422 423 #if KDEBUG 424 if ((atomic_get(&lock->count) != 0 || lock->waiters != NULL) 425 && thread_get_current_thread_id() != lock->holder) { 426 panic("rw_lock_destroy(): lock is in use and the caller " 427 "doesn't hold the write lock (%p)", lock); 428 429 locker.Unlock(); 430 if (rw_lock_write_lock(lock) != B_OK) 431 return; 432 locker.Lock(); 433 } 434 #endif 435 436 while (rw_lock_waiter* waiter = lock->waiters) { 437 // dequeue 438 lock->waiters = waiter->next; 439 440 // unblock thread 441 thread_unblock(waiter->thread, B_ERROR); 442 } 443 444 lock->name = NULL; 445 446 locker.Unlock(); 447 448 free(name); 449 } 450 451 452 #if KDEBUG_RW_LOCK_DEBUG 453 454 bool 455 _rw_lock_is_read_locked(rw_lock* lock) 456 { 457 if (lock->holder == thread_get_current_thread_id()) 458 return true; 459 460 Thread* thread = thread_get_current_thread(); 461 for (size_t i = 0; i < B_COUNT_OF(Thread::held_read_locks); i++) { 462 if (thread->held_read_locks[i] == lock) 463 return true; 464 } 465 return false; 466 } 467 468 469 static void 470 _rw_lock_set_read_locked(rw_lock* lock) 471 { 472 Thread* thread = thread_get_current_thread(); 473 for (size_t i = 0; i < B_COUNT_OF(Thread::held_read_locks); i++) { 474 if (thread->held_read_locks[i] != NULL) 475 continue; 476 477 thread->held_read_locks[i] = lock; 478 return; 479 } 480 481 panic("too many read locks!"); 482 } 483 484 485 static void 486 _rw_lock_unset_read_locked(rw_lock* lock) 487 { 488 Thread* thread = thread_get_current_thread(); 489 for (size_t i = 0; i < B_COUNT_OF(Thread::held_read_locks); i++) { 490 if (thread->held_read_locks[i] != lock) 491 continue; 492 493 thread->held_read_locks[i] = NULL; 494 return; 495 } 496 497 panic("_rw_lock_unset_read_locked(): lock %p not read-locked by current thread", lock); 498 } 499 500 #endif 501 502 503 status_t 504 _rw_lock_read_lock(rw_lock* lock) 505 { 506 #if KDEBUG 507 if (!gKernelStartup && !are_interrupts_enabled()) { 508 panic("_rw_lock_read_lock(): called with interrupts disabled for lock %p", 509 lock); 510 } 511 #endif 512 #if KDEBUG_RW_LOCK_DEBUG 513 int32 oldCount = atomic_add(&lock->count, 1); 514 if (oldCount < RW_LOCK_WRITER_COUNT_BASE) { 515 _rw_lock_set_read_locked(lock); 516 return B_OK; 517 } 518 #endif 519 520 InterruptsSpinLocker locker(lock->lock); 521 522 // We might be the writer ourselves. 523 if (lock->holder == thread_get_current_thread_id()) { 524 lock->owner_count++; 525 return B_OK; 526 } 527 528 // The writer that originally had the lock when we called atomic_add() might 529 // already have gone and another writer could have overtaken us. In this 530 // case the original writer set pending_readers, so we know that we don't 531 // have to wait. 532 if (lock->pending_readers > 0) { 533 lock->pending_readers--; 534 535 if (lock->count >= RW_LOCK_WRITER_COUNT_BASE) 536 lock->active_readers++; 537 538 #if KDEBUG_RW_LOCK_DEBUG 539 _rw_lock_set_read_locked(lock); 540 #endif 541 return B_OK; 542 } 543 544 ASSERT(lock->count >= RW_LOCK_WRITER_COUNT_BASE); 545 546 // we need to wait 547 status_t status = rw_lock_wait(lock, false, locker); 548 549 #if KDEBUG_RW_LOCK_DEBUG 550 if (status == B_OK) 551 _rw_lock_set_read_locked(lock); 552 #endif 553 554 return status; 555 } 556 557 558 status_t 559 _rw_lock_read_lock_with_timeout(rw_lock* lock, uint32 timeoutFlags, 560 bigtime_t timeout) 561 { 562 #if KDEBUG 563 if (!gKernelStartup && !are_interrupts_enabled()) { 564 panic("_rw_lock_read_lock_with_timeout(): called with interrupts " 565 "disabled for lock %p", lock); 566 } 567 #endif 568 #if KDEBUG_RW_LOCK_DEBUG 569 int32 oldCount = atomic_add(&lock->count, 1); 570 if (oldCount < RW_LOCK_WRITER_COUNT_BASE) { 571 ASSERT_UNLOCKED_RW_LOCK(lock); 572 _rw_lock_set_read_locked(lock); 573 return B_OK; 574 } 575 #endif 576 577 InterruptsSpinLocker locker(lock->lock); 578 579 // We might be the writer ourselves. 580 if (lock->holder == thread_get_current_thread_id()) { 581 lock->owner_count++; 582 return B_OK; 583 } 584 585 ASSERT_UNLOCKED_RW_LOCK(lock); 586 587 // The writer that originally had the lock when we called atomic_add() might 588 // already have gone and another writer could have overtaken us. In this 589 // case the original writer set pending_readers, so we know that we don't 590 // have to wait. 591 if (lock->pending_readers > 0) { 592 lock->pending_readers--; 593 594 if (lock->count >= RW_LOCK_WRITER_COUNT_BASE) 595 lock->active_readers++; 596 597 #if KDEBUG_RW_LOCK_DEBUG 598 _rw_lock_set_read_locked(lock); 599 #endif 600 return B_OK; 601 } 602 603 ASSERT(lock->count >= RW_LOCK_WRITER_COUNT_BASE); 604 605 // we need to wait 606 607 // enqueue in waiter list 608 rw_lock_waiter waiter; 609 waiter.thread = thread_get_current_thread(); 610 waiter.next = NULL; 611 waiter.writer = false; 612 613 if (lock->waiters != NULL) 614 lock->waiters->last->next = &waiter; 615 else 616 lock->waiters = &waiter; 617 618 lock->waiters->last = &waiter; 619 620 // block 621 thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_RW_LOCK, lock); 622 locker.Unlock(); 623 624 status_t error = thread_block_with_timeout(timeoutFlags, timeout); 625 if (error == B_OK || waiter.thread == NULL) { 626 // We were unblocked successfully -- potentially our unblocker overtook 627 // us after we already failed. In either case, we've got the lock, now. 628 #if KDEBUG_RW_LOCK_DEBUG 629 _rw_lock_set_read_locked(lock); 630 #endif 631 return B_OK; 632 } 633 634 locker.Lock(); 635 // We failed to get the lock -- dequeue from waiter list. 636 rw_lock_waiter* previous = NULL; 637 rw_lock_waiter* other = lock->waiters; 638 while (other != &waiter) { 639 previous = other; 640 other = other->next; 641 } 642 643 if (previous == NULL) { 644 // we are the first in line 645 lock->waiters = waiter.next; 646 if (lock->waiters != NULL) 647 lock->waiters->last = waiter.last; 648 } else { 649 // one or more other waiters are before us in the queue 650 previous->next = waiter.next; 651 if (lock->waiters->last == &waiter) 652 lock->waiters->last = previous; 653 } 654 655 // Decrement the count. ATM this is all we have to do. There's at least 656 // one writer ahead of us -- otherwise the last writer would have unblocked 657 // us (writers only manipulate the lock data with thread spinlock being 658 // held) -- so our leaving doesn't make a difference to the ones behind us 659 // in the queue. 660 atomic_add(&lock->count, -1); 661 662 return error; 663 } 664 665 666 void 667 _rw_lock_read_unlock(rw_lock* lock) 668 { 669 #if KDEBUG_RW_LOCK_DEBUG 670 int32 oldCount = atomic_add(&lock->count, -1); 671 if (oldCount < RW_LOCK_WRITER_COUNT_BASE) { 672 _rw_lock_unset_read_locked(lock); 673 return; 674 } 675 #endif 676 677 InterruptsSpinLocker locker(lock->lock); 678 679 // If we're still holding the write lock or if there are other readers, 680 // no-one can be woken up. 681 if (lock->holder == thread_get_current_thread_id()) { 682 ASSERT((lock->owner_count % RW_LOCK_WRITER_COUNT_BASE) > 0); 683 lock->owner_count--; 684 return; 685 } 686 687 #if KDEBUG_RW_LOCK_DEBUG 688 _rw_lock_unset_read_locked(lock); 689 #endif 690 691 if (--lock->active_readers > 0) 692 return; 693 694 if (lock->active_readers < 0) { 695 panic("rw_lock_read_unlock(): lock %p not read-locked", lock); 696 lock->active_readers = 0; 697 return; 698 } 699 700 rw_lock_unblock(lock); 701 } 702 703 704 status_t 705 rw_lock_write_lock(rw_lock* lock) 706 { 707 #if KDEBUG 708 if (!gKernelStartup && !are_interrupts_enabled()) { 709 panic("_rw_lock_write_lock(): called with interrupts disabled for lock %p", 710 lock); 711 } 712 #endif 713 714 InterruptsSpinLocker locker(lock->lock); 715 716 // If we're already the lock holder, we just need to increment the owner 717 // count. 718 thread_id thread = thread_get_current_thread_id(); 719 if (lock->holder == thread) { 720 lock->owner_count += RW_LOCK_WRITER_COUNT_BASE; 721 return B_OK; 722 } 723 724 ASSERT_UNLOCKED_RW_LOCK(lock); 725 726 // announce our claim 727 int32 oldCount = atomic_add(&lock->count, RW_LOCK_WRITER_COUNT_BASE); 728 729 if (oldCount == 0) { 730 // No-one else held a read or write lock, so it's ours now. 731 lock->holder = thread; 732 lock->owner_count = RW_LOCK_WRITER_COUNT_BASE; 733 return B_OK; 734 } 735 736 // We have to wait. If we're the first writer, note the current reader 737 // count. 738 if (oldCount < RW_LOCK_WRITER_COUNT_BASE) 739 lock->active_readers = oldCount - lock->pending_readers; 740 741 status_t status = rw_lock_wait(lock, true, locker); 742 if (status == B_OK) { 743 lock->holder = thread; 744 lock->owner_count = RW_LOCK_WRITER_COUNT_BASE; 745 } 746 747 return status; 748 } 749 750 751 void 752 _rw_lock_write_unlock(rw_lock* lock) 753 { 754 InterruptsSpinLocker locker(lock->lock); 755 756 if (thread_get_current_thread_id() != lock->holder) { 757 panic("rw_lock_write_unlock(): lock %p not write-locked by this thread", 758 lock); 759 return; 760 } 761 762 ASSERT(lock->owner_count >= RW_LOCK_WRITER_COUNT_BASE); 763 764 lock->owner_count -= RW_LOCK_WRITER_COUNT_BASE; 765 if (lock->owner_count >= RW_LOCK_WRITER_COUNT_BASE) 766 return; 767 768 // We gave up our last write lock -- clean up and unblock waiters. 769 int32 readerCount = lock->owner_count; 770 lock->holder = -1; 771 lock->owner_count = 0; 772 773 #if KDEBUG_RW_LOCK_DEBUG 774 if (readerCount != 0) 775 _rw_lock_set_read_locked(lock); 776 #endif 777 778 int32 oldCount = atomic_add(&lock->count, -RW_LOCK_WRITER_COUNT_BASE); 779 oldCount -= RW_LOCK_WRITER_COUNT_BASE; 780 781 if (oldCount != 0) { 782 // If writers are waiting, take over our reader count. 783 if (oldCount >= RW_LOCK_WRITER_COUNT_BASE) { 784 lock->active_readers = readerCount; 785 rw_lock_unblock(lock); 786 } else { 787 // No waiting writer, but there are one or more readers. We will 788 // unblock all waiting readers -- that's the easy part -- and must 789 // also make sure that all readers that haven't entered the critical 790 // section yet, won't start to wait. Otherwise a writer overtaking 791 // such a reader will correctly start to wait, but the reader, 792 // seeing the writer count > 0, would also start to wait. We set 793 // pending_readers to the number of readers that are still expected 794 // to enter the critical section. 795 lock->pending_readers = oldCount - readerCount 796 - rw_lock_unblock(lock); 797 } 798 } 799 } 800 801 802 static int 803 dump_rw_lock_info(int argc, char** argv) 804 { 805 if (argc < 2) { 806 print_debugger_command_usage(argv[0]); 807 return 0; 808 } 809 810 rw_lock* lock = (rw_lock*)parse_expression(argv[1]); 811 812 if (!IS_KERNEL_ADDRESS(lock)) { 813 kprintf("invalid address: %p\n", lock); 814 return 0; 815 } 816 817 kprintf("rw lock %p:\n", lock); 818 kprintf(" name: %s\n", lock->name); 819 kprintf(" holder: %" B_PRId32 "\n", lock->holder); 820 kprintf(" count: %#" B_PRIx32 "\n", lock->count); 821 kprintf(" active readers %d\n", lock->active_readers); 822 kprintf(" pending readers %d\n", lock->pending_readers); 823 kprintf(" owner count: %#" B_PRIx32 "\n", lock->owner_count); 824 kprintf(" flags: %#" B_PRIx32 "\n", lock->flags); 825 826 kprintf(" waiting threads:"); 827 rw_lock_waiter* waiter = lock->waiters; 828 while (waiter != NULL) { 829 kprintf(" %" B_PRId32 "/%c", waiter->thread->id, waiter->writer ? 'w' : 'r'); 830 waiter = waiter->next; 831 } 832 kputs("\n"); 833 834 return 0; 835 } 836 837 838 // #pragma mark - 839 840 841 void 842 mutex_init(mutex* lock, const char *name) 843 { 844 mutex_init_etc(lock, name, 0); 845 } 846 847 848 void 849 mutex_init_etc(mutex* lock, const char *name, uint32 flags) 850 { 851 lock->name = (flags & MUTEX_FLAG_CLONE_NAME) != 0 ? strdup(name) : name; 852 lock->waiters = NULL; 853 B_INITIALIZE_SPINLOCK(&lock->lock); 854 #if KDEBUG 855 lock->holder = -1; 856 #else 857 lock->count = 0; 858 #endif 859 lock->flags = flags & MUTEX_FLAG_CLONE_NAME; 860 861 T_SCHEDULING_ANALYSIS(InitMutex(lock, name)); 862 NotifyWaitObjectListeners(&WaitObjectListener::MutexInitialized, lock); 863 } 864 865 866 void 867 mutex_destroy(mutex* lock) 868 { 869 char* name = (lock->flags & MUTEX_FLAG_CLONE_NAME) != 0 870 ? (char*)lock->name : NULL; 871 872 // unblock all waiters 873 InterruptsSpinLocker locker(lock->lock); 874 875 #if KDEBUG 876 if (lock->holder != -1 && thread_get_current_thread_id() != lock->holder) { 877 panic("mutex_destroy(): the lock (%p) is held by %" B_PRId32 ", not " 878 "by the caller", lock, lock->holder); 879 if (_mutex_lock(lock, &locker) != B_OK) 880 return; 881 locker.Lock(); 882 } 883 #endif 884 885 while (mutex_waiter* waiter = lock->waiters) { 886 // dequeue 887 lock->waiters = waiter->next; 888 889 // unblock thread 890 Thread* thread = waiter->thread; 891 waiter->thread = NULL; 892 thread_unblock(thread, B_ERROR); 893 } 894 895 lock->name = NULL; 896 lock->flags = 0; 897 #if KDEBUG 898 lock->holder = 0; 899 #else 900 lock->count = INT16_MIN; 901 #endif 902 903 locker.Unlock(); 904 905 free(name); 906 } 907 908 909 static inline status_t 910 mutex_lock_threads_locked(mutex* lock, InterruptsSpinLocker* locker) 911 { 912 #if KDEBUG 913 return _mutex_lock(lock, locker); 914 #else 915 if (atomic_add(&lock->count, -1) < 0) 916 return _mutex_lock(lock, locker); 917 return B_OK; 918 #endif 919 } 920 921 922 status_t 923 mutex_switch_lock(mutex* from, mutex* to) 924 { 925 #if KDEBUG 926 if (!gKernelStartup && !are_interrupts_enabled()) { 927 panic("mutex_switch_lock(): called with interrupts disabled " 928 "for locks %p, %p", from, to); 929 } 930 #endif 931 932 InterruptsSpinLocker locker(to->lock); 933 934 mutex_unlock(from); 935 936 return mutex_lock_threads_locked(to, &locker); 937 } 938 939 940 void 941 mutex_transfer_lock(mutex* lock, thread_id thread) 942 { 943 #if KDEBUG 944 if (thread_get_current_thread_id() != lock->holder) 945 panic("mutex_transfer_lock(): current thread is not the lock holder!"); 946 lock->holder = thread; 947 #endif 948 } 949 950 951 status_t 952 mutex_switch_from_read_lock(rw_lock* from, mutex* to) 953 { 954 #if KDEBUG 955 if (!gKernelStartup && !are_interrupts_enabled()) { 956 panic("mutex_switch_from_read_lock(): called with interrupts disabled " 957 "for locks %p, %p", from, to); 958 } 959 #endif 960 961 InterruptsSpinLocker locker(to->lock); 962 963 rw_lock_read_unlock(from); 964 965 return mutex_lock_threads_locked(to, &locker); 966 } 967 968 969 KDEBUG_STATIC status_t 970 _mutex_lock(mutex* lock, void* _locker) 971 { 972 #if KDEBUG 973 if (!gKernelStartup && _locker == NULL && !are_interrupts_enabled()) { 974 panic("_mutex_lock(): called with interrupts disabled for lock %p", 975 lock); 976 } 977 #endif 978 979 // lock only, if !lockLocked 980 InterruptsSpinLocker* locker 981 = reinterpret_cast<InterruptsSpinLocker*>(_locker); 982 983 InterruptsSpinLocker lockLocker; 984 if (locker == NULL) { 985 lockLocker.SetTo(lock->lock, false); 986 locker = &lockLocker; 987 } 988 989 // Might have been released after we decremented the count, but before 990 // we acquired the spinlock. 991 #if KDEBUG 992 if (lock->holder < 0) { 993 lock->holder = thread_get_current_thread_id(); 994 return B_OK; 995 } else if (lock->holder == thread_get_current_thread_id()) { 996 panic("_mutex_lock(): double lock of %p by thread %" B_PRId32, lock, 997 lock->holder); 998 } else if (lock->holder == 0) { 999 panic("_mutex_lock(): using uninitialized lock %p", lock); 1000 } 1001 #else 1002 if ((lock->flags & MUTEX_FLAG_RELEASED) != 0) { 1003 lock->flags &= ~MUTEX_FLAG_RELEASED; 1004 return B_OK; 1005 } 1006 #endif 1007 1008 // enqueue in waiter list 1009 mutex_waiter waiter; 1010 waiter.thread = thread_get_current_thread(); 1011 waiter.next = NULL; 1012 1013 if (lock->waiters != NULL) { 1014 lock->waiters->last->next = &waiter; 1015 } else 1016 lock->waiters = &waiter; 1017 1018 lock->waiters->last = &waiter; 1019 1020 // block 1021 thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_MUTEX, lock); 1022 locker->Unlock(); 1023 1024 status_t error = thread_block(); 1025 #if KDEBUG 1026 if (error == B_OK) { 1027 ASSERT(lock->holder == waiter.thread->id); 1028 } else { 1029 // This should only happen when the mutex was destroyed. 1030 ASSERT(waiter.thread == NULL); 1031 } 1032 #endif 1033 return error; 1034 } 1035 1036 1037 KDEBUG_STATIC void 1038 _mutex_unlock(mutex* lock) 1039 { 1040 InterruptsSpinLocker locker(lock->lock); 1041 1042 #if KDEBUG 1043 if (thread_get_current_thread_id() != lock->holder) { 1044 panic("_mutex_unlock() failure: thread %" B_PRId32 " is trying to " 1045 "release mutex %p (current holder %" B_PRId32 ")\n", 1046 thread_get_current_thread_id(), lock, lock->holder); 1047 return; 1048 } 1049 #endif 1050 1051 mutex_waiter* waiter = lock->waiters; 1052 if (waiter != NULL) { 1053 // dequeue the first waiter 1054 lock->waiters = waiter->next; 1055 if (lock->waiters != NULL) 1056 lock->waiters->last = waiter->last; 1057 1058 #if KDEBUG 1059 // Already set the holder to the unblocked thread. Besides that this 1060 // actually reflects the current situation, setting it to -1 would 1061 // cause a race condition, since another locker could think the lock 1062 // is not held by anyone. 1063 lock->holder = waiter->thread->id; 1064 #endif 1065 1066 // unblock thread 1067 thread_unblock(waiter->thread, B_OK); 1068 } else { 1069 // There are no waiters, so mark the lock as released. 1070 #if KDEBUG 1071 lock->holder = -1; 1072 #else 1073 lock->flags |= MUTEX_FLAG_RELEASED; 1074 #endif 1075 } 1076 } 1077 1078 1079 KDEBUG_STATIC status_t 1080 _mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, bigtime_t timeout) 1081 { 1082 #if KDEBUG 1083 if (!gKernelStartup && !are_interrupts_enabled()) { 1084 panic("_mutex_lock(): called with interrupts disabled for lock %p", 1085 lock); 1086 } 1087 #endif 1088 1089 InterruptsSpinLocker locker(lock->lock); 1090 1091 // Might have been released after we decremented the count, but before 1092 // we acquired the spinlock. 1093 #if KDEBUG 1094 if (lock->holder < 0) { 1095 lock->holder = thread_get_current_thread_id(); 1096 return B_OK; 1097 } else if (lock->holder == thread_get_current_thread_id()) { 1098 panic("_mutex_lock(): double lock of %p by thread %" B_PRId32, lock, 1099 lock->holder); 1100 } else if (lock->holder == 0) { 1101 panic("_mutex_lock(): using uninitialized lock %p", lock); 1102 } 1103 #else 1104 if ((lock->flags & MUTEX_FLAG_RELEASED) != 0) { 1105 lock->flags &= ~MUTEX_FLAG_RELEASED; 1106 return B_OK; 1107 } 1108 #endif 1109 1110 // enqueue in waiter list 1111 mutex_waiter waiter; 1112 waiter.thread = thread_get_current_thread(); 1113 waiter.next = NULL; 1114 1115 if (lock->waiters != NULL) { 1116 lock->waiters->last->next = &waiter; 1117 } else 1118 lock->waiters = &waiter; 1119 1120 lock->waiters->last = &waiter; 1121 1122 // block 1123 thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_MUTEX, lock); 1124 locker.Unlock(); 1125 1126 status_t error = thread_block_with_timeout(timeoutFlags, timeout); 1127 1128 if (error == B_OK) { 1129 #if KDEBUG 1130 ASSERT(lock->holder == waiter.thread->id); 1131 #endif 1132 } else { 1133 // If the lock was destroyed, our "thread" entry will be NULL. 1134 if (waiter.thread == NULL) 1135 return B_ERROR; 1136 1137 // TODO: There is still a race condition during mutex destruction, 1138 // if we resume due to a timeout before our thread is set to NULL. 1139 1140 locker.Lock(); 1141 1142 // If the timeout occurred, we must remove our waiter structure from 1143 // the queue. 1144 mutex_waiter* previousWaiter = NULL; 1145 mutex_waiter* otherWaiter = lock->waiters; 1146 while (otherWaiter != NULL && otherWaiter != &waiter) { 1147 previousWaiter = otherWaiter; 1148 otherWaiter = otherWaiter->next; 1149 } 1150 if (otherWaiter == &waiter) { 1151 // the structure is still in the list -- dequeue 1152 if (&waiter == lock->waiters) { 1153 if (waiter.next != NULL) 1154 waiter.next->last = waiter.last; 1155 lock->waiters = waiter.next; 1156 } else { 1157 if (waiter.next == NULL) 1158 lock->waiters->last = previousWaiter; 1159 previousWaiter->next = waiter.next; 1160 } 1161 1162 #if !KDEBUG 1163 // we need to fix the lock count 1164 atomic_add(&lock->count, 1); 1165 #endif 1166 } else { 1167 // the structure is not in the list -- even though the timeout 1168 // occurred, this means we own the lock now 1169 #if KDEBUG 1170 ASSERT(lock->holder == waiter.thread->id); 1171 #endif 1172 return B_OK; 1173 } 1174 } 1175 1176 return error; 1177 } 1178 1179 1180 #undef mutex_trylock 1181 status_t 1182 mutex_trylock(mutex* lock) 1183 { 1184 #if KDEBUG 1185 InterruptsSpinLocker _(lock->lock); 1186 1187 if (lock->holder < 0) { 1188 lock->holder = thread_get_current_thread_id(); 1189 return B_OK; 1190 } else if (lock->holder == 0) { 1191 panic("_mutex_trylock(): using uninitialized lock %p", lock); 1192 } 1193 return B_WOULD_BLOCK; 1194 #else 1195 return mutex_trylock_inline(lock); 1196 #endif 1197 } 1198 1199 1200 #undef mutex_lock 1201 status_t 1202 mutex_lock(mutex* lock) 1203 { 1204 #if KDEBUG 1205 return _mutex_lock(lock, NULL); 1206 #else 1207 return mutex_lock_inline(lock); 1208 #endif 1209 } 1210 1211 1212 #undef mutex_unlock 1213 void 1214 mutex_unlock(mutex* lock) 1215 { 1216 #if KDEBUG 1217 _mutex_unlock(lock); 1218 #else 1219 mutex_unlock_inline(lock); 1220 #endif 1221 } 1222 1223 1224 #undef mutex_lock_with_timeout 1225 status_t 1226 mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, bigtime_t timeout) 1227 { 1228 #if KDEBUG 1229 return _mutex_lock_with_timeout(lock, timeoutFlags, timeout); 1230 #else 1231 return mutex_lock_with_timeout_inline(lock, timeoutFlags, timeout); 1232 #endif 1233 } 1234 1235 1236 static int 1237 dump_mutex_info(int argc, char** argv) 1238 { 1239 if (argc < 2) { 1240 print_debugger_command_usage(argv[0]); 1241 return 0; 1242 } 1243 1244 mutex* lock = (mutex*)parse_expression(argv[1]); 1245 1246 if (!IS_KERNEL_ADDRESS(lock)) { 1247 kprintf("invalid address: %p\n", lock); 1248 return 0; 1249 } 1250 1251 kprintf("mutex %p:\n", lock); 1252 kprintf(" name: %s\n", lock->name); 1253 kprintf(" flags: 0x%x\n", lock->flags); 1254 #if KDEBUG 1255 kprintf(" holder: %" B_PRId32 "\n", lock->holder); 1256 #else 1257 kprintf(" count: %" B_PRId32 "\n", lock->count); 1258 #endif 1259 1260 kprintf(" waiting threads:"); 1261 mutex_waiter* waiter = lock->waiters; 1262 while (waiter != NULL) { 1263 kprintf(" %" B_PRId32, waiter->thread->id); 1264 waiter = waiter->next; 1265 } 1266 kputs("\n"); 1267 1268 return 0; 1269 } 1270 1271 1272 // #pragma mark - 1273 1274 1275 void 1276 lock_debug_init() 1277 { 1278 add_debugger_command_etc("mutex", &dump_mutex_info, 1279 "Dump info about a mutex", 1280 "<mutex>\n" 1281 "Prints info about the specified mutex.\n" 1282 " <mutex> - pointer to the mutex to print the info for.\n", 0); 1283 add_debugger_command_etc("rwlock", &dump_rw_lock_info, 1284 "Dump info about an rw lock", 1285 "<lock>\n" 1286 "Prints info about the specified rw lock.\n" 1287 " <lock> - pointer to the rw lock to print the info for.\n", 0); 1288 add_debugger_command_etc("recursivelock", &dump_recursive_lock_info, 1289 "Dump info about a recursive lock", 1290 "<lock>\n" 1291 "Prints info about the specified recursive lock.\n" 1292 " <lock> - pointer to the recursive lock to print the info for.\n", 1293 0); 1294 } 1295