1 /* 2 * Copyright 2007-2011, Ingo Weinhold, ingo_weinhold@gmx.de. 3 * Copyright 2019, Haiku, Inc. All rights reserved. 4 * Distributed under the terms of the MIT License. 5 */ 6 7 #include <condition_variable.h> 8 9 #include <new> 10 #include <stdlib.h> 11 #include <string.h> 12 13 #include <debug.h> 14 #include <kscheduler.h> 15 #include <ksignal.h> 16 #include <int.h> 17 #include <listeners.h> 18 #include <scheduling_analysis.h> 19 #include <thread.h> 20 #include <util/AutoLock.h> 21 #include <util/atomic.h> 22 23 24 #define STATUS_ADDED 1 25 #define STATUS_WAITING 2 26 27 28 static const int kConditionVariableHashSize = 512; 29 30 31 struct ConditionVariableHashDefinition { 32 typedef const void* KeyType; 33 typedef ConditionVariable ValueType; 34 35 size_t HashKey(const void* key) const 36 { return (size_t)key; } 37 size_t Hash(ConditionVariable* variable) const 38 { return (size_t)variable->fObject; } 39 bool Compare(const void* key, ConditionVariable* variable) const 40 { return key == variable->fObject; } 41 ConditionVariable*& GetLink(ConditionVariable* variable) const 42 { return variable->fNext; } 43 }; 44 45 typedef BOpenHashTable<ConditionVariableHashDefinition> ConditionVariableHash; 46 static ConditionVariableHash sConditionVariableHash; 47 static rw_spinlock sConditionVariableHashLock; 48 49 50 static int 51 list_condition_variables(int argc, char** argv) 52 { 53 ConditionVariable::ListAll(); 54 return 0; 55 } 56 57 58 static int 59 dump_condition_variable(int argc, char** argv) 60 { 61 if (argc != 2) { 62 print_debugger_command_usage(argv[0]); 63 return 0; 64 } 65 66 addr_t address = parse_expression(argv[1]); 67 if (address == 0) 68 return 0; 69 70 ConditionVariable* variable = sConditionVariableHash.Lookup((void*)address); 71 72 if (variable == NULL) { 73 // It must be a direct pointer to a condition variable. 74 variable = (ConditionVariable*)address; 75 } 76 77 if (variable != NULL) { 78 variable->Dump(); 79 80 set_debug_variable("_cvar", (addr_t)variable); 81 set_debug_variable("_object", (addr_t)variable->Object()); 82 83 } else 84 kprintf("no condition variable at or with key %p\n", (void*)address); 85 86 return 0; 87 } 88 89 90 // #pragma mark - ConditionVariableEntry 91 92 93 ConditionVariableEntry::ConditionVariableEntry() 94 : fVariable(NULL) 95 { 96 } 97 98 99 ConditionVariableEntry::~ConditionVariableEntry() 100 { 101 // We can use an "unsafe" non-atomic access of fVariable here, since we only 102 // care whether it is non-NULL, not what its specific value is. 103 if (fVariable != NULL) 104 _RemoveFromVariable(); 105 } 106 107 108 bool 109 ConditionVariableEntry::Add(const void* object) 110 { 111 ASSERT(object != NULL); 112 113 InterruptsLocker _; 114 ReadSpinLocker hashLocker(sConditionVariableHashLock); 115 116 ConditionVariable* variable = sConditionVariableHash.Lookup(object); 117 118 if (variable == NULL) { 119 fWaitStatus = B_ENTRY_NOT_FOUND; 120 return false; 121 } 122 123 SpinLocker variableLocker(variable->fLock); 124 hashLocker.Unlock(); 125 126 _AddToLockedVariable(variable); 127 128 return true; 129 } 130 131 132 ConditionVariable* 133 ConditionVariableEntry::Variable() const 134 { 135 return atomic_pointer_get(&fVariable); 136 } 137 138 139 inline void 140 ConditionVariableEntry::_AddToLockedVariable(ConditionVariable* variable) 141 { 142 ASSERT(fVariable == NULL); 143 144 fThread = thread_get_current_thread(); 145 fVariable = variable; 146 fWaitStatus = STATUS_ADDED; 147 fVariable->fEntries.Add(this); 148 atomic_add(&fVariable->fEntriesCount, 1); 149 } 150 151 152 void 153 ConditionVariableEntry::_RemoveFromVariable() 154 { 155 // This section is critical because it can race with _NotifyLocked on the 156 // variable's thread, so we must not be interrupted during it. 157 InterruptsLocker _; 158 159 ConditionVariable* variable = atomic_pointer_get(&fVariable); 160 if (atomic_pointer_get_and_set(&fThread, (Thread*)NULL) == NULL) { 161 // If fThread was already NULL, that means the variable is already 162 // in the process of clearing us out (or already has finished doing so.) 163 // We thus cannot access fVariable, and must spin until it is cleared. 164 int32 tries = 0; 165 while (atomic_pointer_get(&fVariable) != NULL) { 166 tries++; 167 if ((tries % 10000) == 0) 168 dprintf("variable pointer was not unset for a long time!\n"); 169 cpu_pause(); 170 } 171 172 return; 173 } 174 175 while (true) { 176 if (atomic_pointer_get(&fVariable) == NULL) { 177 // The variable must have cleared us out. Acknowledge this and return. 178 atomic_add(&variable->fEntriesCount, -1); 179 return; 180 } 181 182 // There is of course a small race between checking the pointer and then 183 // the try_acquire in which the variable might clear out our fVariable. 184 // However, in the case where we were the ones to clear fThread, the 185 // variable will notice that and then wait for us to acknowledge the 186 // removal by decrementing fEntriesCount, as we do above; and until 187 // we do that, we may validly use our cached pointer to the variable. 188 if (try_acquire_spinlock(&variable->fLock)) 189 break; 190 } 191 192 // We now hold the variable's lock. Remove ourselves. 193 if (fVariable->fEntries.Contains(this)) 194 fVariable->fEntries.Remove(this); 195 196 atomic_pointer_set(&fVariable, (ConditionVariable*)NULL); 197 atomic_add(&variable->fEntriesCount, -1); 198 release_spinlock(&variable->fLock); 199 } 200 201 202 status_t 203 ConditionVariableEntry::Wait(uint32 flags, bigtime_t timeout) 204 { 205 #if KDEBUG 206 if (!are_interrupts_enabled()) { 207 panic("ConditionVariableEntry::Wait() called with interrupts " 208 "disabled, entry: %p, variable: %p", this, fVariable); 209 return B_ERROR; 210 } 211 #endif 212 213 ConditionVariable* variable = atomic_pointer_get(&fVariable); 214 if (variable == NULL) 215 return fWaitStatus; 216 217 InterruptsLocker _; 218 SpinLocker schedulerLocker(thread_get_current_thread()->scheduler_lock); 219 220 if (fWaitStatus <= 0) 221 return fWaitStatus; 222 fWaitStatus = STATUS_WAITING; 223 224 thread_prepare_to_block(thread_get_current_thread(), flags, 225 THREAD_BLOCK_TYPE_CONDITION_VARIABLE, variable); 226 227 schedulerLocker.Unlock(); 228 229 status_t error; 230 if ((flags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) != 0) 231 error = thread_block_with_timeout(flags, timeout); 232 else 233 error = thread_block(); 234 235 _RemoveFromVariable(); 236 return error; 237 } 238 239 240 status_t 241 ConditionVariableEntry::Wait(const void* object, uint32 flags, 242 bigtime_t timeout) 243 { 244 if (Add(object)) 245 return Wait(flags, timeout); 246 return B_ENTRY_NOT_FOUND; 247 } 248 249 250 // #pragma mark - ConditionVariable 251 252 253 /*! Initialization method for anonymous (unpublished) condition variables. 254 */ 255 void 256 ConditionVariable::Init(const void* object, const char* objectType) 257 { 258 fObject = object; 259 fObjectType = objectType; 260 new(&fEntries) EntryList; 261 fEntriesCount = 0; 262 B_INITIALIZE_SPINLOCK(&fLock); 263 264 T_SCHEDULING_ANALYSIS(InitConditionVariable(this, object, objectType)); 265 NotifyWaitObjectListeners(&WaitObjectListener::ConditionVariableInitialized, 266 this); 267 } 268 269 270 void 271 ConditionVariable::Publish(const void* object, const char* objectType) 272 { 273 ASSERT(object != NULL); 274 275 Init(object, objectType); 276 277 InterruptsWriteSpinLocker _(sConditionVariableHashLock); 278 279 ASSERT_PRINT(sConditionVariableHash.Lookup(object) == NULL, 280 "condition variable: %p\n", sConditionVariableHash.Lookup(object)); 281 282 sConditionVariableHash.InsertUnchecked(this); 283 } 284 285 286 void 287 ConditionVariable::Unpublish() 288 { 289 ASSERT(fObject != NULL); 290 291 InterruptsLocker _; 292 WriteSpinLocker hashLocker(sConditionVariableHashLock); 293 SpinLocker selfLocker(fLock); 294 295 #if KDEBUG 296 ConditionVariable* variable = sConditionVariableHash.Lookup(fObject); 297 if (variable != this) { 298 panic("Condition variable %p not published, found: %p", this, variable); 299 return; 300 } 301 #endif 302 303 sConditionVariableHash.RemoveUnchecked(this); 304 fObject = NULL; 305 fObjectType = NULL; 306 307 hashLocker.Unlock(); 308 309 if (!fEntries.IsEmpty()) 310 _NotifyLocked(true, B_ENTRY_NOT_FOUND); 311 } 312 313 314 void 315 ConditionVariable::Add(ConditionVariableEntry* entry) 316 { 317 InterruptsSpinLocker _(fLock); 318 entry->_AddToLockedVariable(this); 319 } 320 321 322 status_t 323 ConditionVariable::Wait(uint32 flags, bigtime_t timeout) 324 { 325 ConditionVariableEntry entry; 326 Add(&entry); 327 return entry.Wait(flags, timeout); 328 } 329 330 331 status_t 332 ConditionVariable::Wait(mutex* lock, uint32 flags, bigtime_t timeout) 333 { 334 ConditionVariableEntry entry; 335 Add(&entry); 336 mutex_unlock(lock); 337 status_t res = entry.Wait(flags, timeout); 338 mutex_lock(lock); 339 return res; 340 } 341 342 343 status_t 344 ConditionVariable::Wait(recursive_lock* lock, uint32 flags, bigtime_t timeout) 345 { 346 ConditionVariableEntry entry; 347 Add(&entry); 348 int32 recursion = recursive_lock_get_recursion(lock); 349 350 for (int32 i = 0; i < recursion; i++) 351 recursive_lock_unlock(lock); 352 353 status_t res = entry.Wait(flags, timeout); 354 355 for (int32 i = 0; i < recursion; i++) 356 recursive_lock_lock(lock); 357 358 return res; 359 } 360 361 362 /*static*/ void 363 ConditionVariable::NotifyOne(const void* object, status_t result) 364 { 365 _Notify(object, false, result); 366 } 367 368 369 /*static*/ void 370 ConditionVariable::NotifyAll(const void* object, status_t result) 371 { 372 _Notify(object, true, result); 373 } 374 375 376 /*static*/ void 377 ConditionVariable::_Notify(const void* object, bool all, status_t result) 378 { 379 InterruptsLocker ints; 380 ReadSpinLocker hashLocker(sConditionVariableHashLock); 381 ConditionVariable* variable = sConditionVariableHash.Lookup(object); 382 if (variable == NULL) 383 return; 384 SpinLocker variableLocker(variable->fLock); 385 hashLocker.Unlock(); 386 387 variable->_NotifyLocked(all, result); 388 } 389 390 391 void 392 ConditionVariable::_Notify(bool all, status_t result) 393 { 394 InterruptsSpinLocker _(fLock); 395 396 if (!fEntries.IsEmpty()) { 397 if (result > B_OK) { 398 panic("tried to notify with invalid result %" B_PRId32 "\n", result); 399 result = B_ERROR; 400 } 401 402 _NotifyLocked(all, result); 403 } 404 } 405 406 407 /*! Called with interrupts disabled and the condition variable's spinlock held. 408 */ 409 void 410 ConditionVariable::_NotifyLocked(bool all, status_t result) 411 { 412 // Dequeue and wake up the blocked threads. 413 while (ConditionVariableEntry* entry = fEntries.RemoveHead()) { 414 Thread* thread = atomic_pointer_get_and_set(&entry->fThread, (Thread*)NULL); 415 if (thread == NULL) { 416 // The entry must be in the process of trying to remove itself from us. 417 // Clear its variable and wait for it to acknowledge this in fEntriesCount, 418 // as it is the one responsible for decrementing that. 419 const int32 oldCount = atomic_get(&fEntriesCount); 420 atomic_pointer_set(&entry->fVariable, (ConditionVariable*)NULL); 421 422 // As fEntriesCount is only modified while our lock is held, nothing else 423 // will modify it while we are spinning, since we hold it at present. 424 int32 tries = 0; 425 while (atomic_get(&fEntriesCount) == oldCount) { 426 tries++; 427 if ((tries % 10000) == 0) 428 panic("entries count was not decremented for a long time!"); 429 cpu_pause(); 430 } 431 } else { 432 SpinLocker schedulerLocker(thread->scheduler_lock); 433 status_t lastWaitStatus = entry->fWaitStatus; 434 entry->fWaitStatus = result; 435 if (lastWaitStatus == STATUS_WAITING && thread->state != B_THREAD_WAITING) { 436 // The thread is not in B_THREAD_WAITING state, so we must unblock it early, 437 // in case it tries to re-block itself immediately after we unset fVariable. 438 thread_unblock_locked(thread, result); 439 lastWaitStatus = result; 440 } 441 442 // No matter what the thread is doing, as we were the ones to clear its 443 // fThread, so we are the ones responsible for decrementing fEntriesCount. 444 // (We may not validly access the entry once we unset its fVariable.) 445 atomic_pointer_set(&entry->fVariable, (ConditionVariable*)NULL); 446 atomic_add(&fEntriesCount, -1); 447 448 // If the thread was in B_THREAD_WAITING state, we unblock it after unsetting 449 // fVariable, because otherwise it will wake up before thread_unblock returns 450 // and spin while waiting for us to do so. 451 if (lastWaitStatus == STATUS_WAITING) 452 thread_unblock_locked(thread, result); 453 } 454 455 if (!all) 456 break; 457 } 458 } 459 460 461 /*static*/ void 462 ConditionVariable::ListAll() 463 { 464 kprintf(" variable object (type) waiting threads\n"); 465 kprintf("------------------------------------------------------------\n"); 466 ConditionVariableHash::Iterator it(&sConditionVariableHash); 467 while (ConditionVariable* variable = it.Next()) { 468 // count waiting threads 469 int count = variable->fEntries.Count(); 470 471 kprintf("%p %p %-20s %15d\n", variable, variable->fObject, 472 variable->fObjectType, count); 473 } 474 } 475 476 477 void 478 ConditionVariable::Dump() const 479 { 480 kprintf("condition variable %p\n", this); 481 kprintf(" object: %p (%s)\n", fObject, fObjectType); 482 kprintf(" threads:"); 483 484 for (EntryList::ConstIterator it = fEntries.GetIterator(); 485 ConditionVariableEntry* entry = it.Next();) { 486 kprintf(" %" B_PRId32, entry->fThread->id); 487 } 488 kprintf("\n"); 489 } 490 491 492 // #pragma mark - 493 494 495 void 496 condition_variable_init() 497 { 498 new(&sConditionVariableHash) ConditionVariableHash; 499 500 status_t error = sConditionVariableHash.Init(kConditionVariableHashSize); 501 if (error != B_OK) { 502 panic("condition_variable_init(): Failed to init hash table: %s", 503 strerror(error)); 504 } 505 506 add_debugger_command_etc("cvar", &dump_condition_variable, 507 "Dump condition variable info", 508 "<address>\n" 509 "Prints info for the specified condition variable.\n" 510 " <address> - Address of the condition variable or the object it is\n" 511 " associated with.\n", 0); 512 add_debugger_command_etc("cvars", &list_condition_variables, 513 "List condition variables", 514 "\n" 515 "Lists all existing condition variables\n", 0); 516 } 517