1 /* 2 * Copyright 2007-2011, Ingo Weinhold, ingo_weinhold@gmx.de. 3 * Copyright 2019, Haiku, Inc. All rights reserved. 4 * Distributed under the terms of the MIT License. 5 */ 6 7 #include <condition_variable.h> 8 9 #include <new> 10 #include <stdlib.h> 11 #include <string.h> 12 13 #include <debug.h> 14 #include <kscheduler.h> 15 #include <ksignal.h> 16 #include <int.h> 17 #include <listeners.h> 18 #include <scheduling_analysis.h> 19 #include <thread.h> 20 #include <util/AutoLock.h> 21 #include <util/atomic.h> 22 23 24 #define STATUS_ADDED 1 25 #define STATUS_WAITING 2 26 27 28 static const int kConditionVariableHashSize = 512; 29 30 31 struct ConditionVariableHashDefinition { 32 typedef const void* KeyType; 33 typedef ConditionVariable ValueType; 34 35 size_t HashKey(const void* key) const 36 { return (size_t)key; } 37 size_t Hash(ConditionVariable* variable) const 38 { return (size_t)variable->fObject; } 39 bool Compare(const void* key, ConditionVariable* variable) const 40 { return key == variable->fObject; } 41 ConditionVariable*& GetLink(ConditionVariable* variable) const 42 { return variable->fNext; } 43 }; 44 45 typedef BOpenHashTable<ConditionVariableHashDefinition> ConditionVariableHash; 46 static ConditionVariableHash sConditionVariableHash; 47 static rw_spinlock sConditionVariableHashLock; 48 49 50 static int 51 list_condition_variables(int argc, char** argv) 52 { 53 ConditionVariable::ListAll(); 54 return 0; 55 } 56 57 58 static int 59 dump_condition_variable(int argc, char** argv) 60 { 61 if (argc != 2) { 62 print_debugger_command_usage(argv[0]); 63 return 0; 64 } 65 66 addr_t address = parse_expression(argv[1]); 67 if (address == 0) 68 return 0; 69 70 ConditionVariable* variable = sConditionVariableHash.Lookup((void*)address); 71 72 if (variable == NULL) { 73 // It must be a direct pointer to a condition variable. 74 variable = (ConditionVariable*)address; 75 } 76 77 if (variable != NULL) { 78 variable->Dump(); 79 80 set_debug_variable("_cvar", (addr_t)variable); 81 set_debug_variable("_object", (addr_t)variable->Object()); 82 83 } else 84 kprintf("no condition variable at or with key %p\n", (void*)address); 85 86 return 0; 87 } 88 89 90 // #pragma mark - ConditionVariableEntry 91 92 93 ConditionVariableEntry::ConditionVariableEntry() 94 : fVariable(NULL) 95 { 96 } 97 98 99 ConditionVariableEntry::~ConditionVariableEntry() 100 { 101 // We can use an "unsafe" non-atomic access of fVariable here, since we only 102 // care whether it is non-NULL, not what its specific value is. 103 if (fVariable != NULL) 104 _RemoveFromVariable(); 105 } 106 107 108 bool 109 ConditionVariableEntry::Add(const void* object) 110 { 111 ASSERT(object != NULL); 112 113 InterruptsLocker _; 114 ReadSpinLocker hashLocker(sConditionVariableHashLock); 115 116 ConditionVariable* variable = sConditionVariableHash.Lookup(object); 117 118 if (variable == NULL) { 119 fWaitStatus = B_ENTRY_NOT_FOUND; 120 return false; 121 } 122 123 SpinLocker variableLocker(variable->fLock); 124 hashLocker.Unlock(); 125 126 _AddToLockedVariable(variable); 127 128 return true; 129 } 130 131 132 inline void 133 ConditionVariableEntry::_AddToLockedVariable(ConditionVariable* variable) 134 { 135 ASSERT(fVariable == NULL); 136 137 fThread = thread_get_current_thread(); 138 fVariable = variable; 139 fWaitStatus = STATUS_ADDED; 140 fVariable->fEntries.Add(this); 141 atomic_add(&fVariable->fEntriesCount, 1); 142 } 143 144 145 void 146 ConditionVariableEntry::_RemoveFromVariable() 147 { 148 // This section is critical because it can race with _NotifyLocked on the 149 // variable's thread, so we must not be interrupted during it. 150 InterruptsLocker _; 151 152 ConditionVariable* variable = atomic_pointer_get(&fVariable); 153 if (atomic_pointer_get_and_set(&fThread, (Thread*)NULL) == NULL) { 154 // If fThread was already NULL, that means the variable is already 155 // in the process of clearing us out (or already has finished doing so.) 156 // We thus cannot access fVariable, and must spin until it is cleared. 157 int32 tries = 0; 158 while (atomic_pointer_get(&fVariable) != NULL) { 159 tries++; 160 if ((tries % 100000) == 0) 161 panic("variable pointer was not unset for a long time!"); 162 } 163 164 return; 165 } 166 167 while (true) { 168 if (atomic_pointer_get(&fVariable) == NULL) { 169 // The variable must have cleared us out. Acknowledge this and return. 170 atomic_add(&variable->fEntriesCount, -1); 171 return; 172 } 173 174 // There is of course a small race between checking the pointer and then 175 // the try_acquire in which the variable might clear out our fVariable. 176 // However, in the case where we were the ones to clear fThread, the 177 // variable will notice that and then wait for us to acknowledge the 178 // removal by decrementing fEntriesCount, as we do above; and until 179 // we do that, we may validly use our cached pointer to the variable. 180 if (try_acquire_spinlock(&variable->fLock)) 181 break; 182 } 183 184 // We now hold the variable's lock. Remove ourselves. 185 if (fVariable->fEntries.Contains(this)) 186 fVariable->fEntries.Remove(this); 187 188 atomic_pointer_set(&fVariable, (ConditionVariable*)NULL); 189 atomic_add(&variable->fEntriesCount, -1); 190 release_spinlock(&variable->fLock); 191 } 192 193 194 status_t 195 ConditionVariableEntry::Wait(uint32 flags, bigtime_t timeout) 196 { 197 #if KDEBUG 198 if (!are_interrupts_enabled()) { 199 panic("ConditionVariableEntry::Wait() called with interrupts " 200 "disabled, entry: %p, variable: %p", this, fVariable); 201 return B_ERROR; 202 } 203 #endif 204 205 // The race in-between get_and_set and (re)set is irrelevant, because 206 // if the status really is <= 0, we have already been or are about to 207 // be removed from the variable, and nothing else is going to set the status. 208 status_t waitStatus = atomic_get_and_set(&fWaitStatus, STATUS_WAITING); 209 if (waitStatus <= 0) { 210 fWaitStatus = waitStatus; 211 return waitStatus; 212 } 213 214 InterruptsLocker _; 215 216 thread_prepare_to_block(thread_get_current_thread(), flags, 217 THREAD_BLOCK_TYPE_CONDITION_VARIABLE, atomic_pointer_get(&fVariable)); 218 219 waitStatus = atomic_get(&fWaitStatus); 220 if (waitStatus <= 0) { 221 // We were just woken up! Unblock ourselves immediately. 222 thread_unblock(thread_get_current_thread(), waitStatus); 223 } 224 225 status_t error; 226 if ((flags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) != 0) 227 error = thread_block_with_timeout(flags, timeout); 228 else 229 error = thread_block(); 230 231 _RemoveFromVariable(); 232 return error; 233 } 234 235 236 status_t 237 ConditionVariableEntry::Wait(const void* object, uint32 flags, 238 bigtime_t timeout) 239 { 240 if (Add(object)) 241 return Wait(flags, timeout); 242 return B_ENTRY_NOT_FOUND; 243 } 244 245 246 // #pragma mark - ConditionVariable 247 248 249 /*! Initialization method for anonymous (unpublished) condition variables. 250 */ 251 void 252 ConditionVariable::Init(const void* object, const char* objectType) 253 { 254 fObject = object; 255 fObjectType = objectType; 256 new(&fEntries) EntryList; 257 fEntriesCount = 0; 258 B_INITIALIZE_SPINLOCK(&fLock); 259 260 T_SCHEDULING_ANALYSIS(InitConditionVariable(this, object, objectType)); 261 NotifyWaitObjectListeners(&WaitObjectListener::ConditionVariableInitialized, 262 this); 263 } 264 265 266 void 267 ConditionVariable::Publish(const void* object, const char* objectType) 268 { 269 ASSERT(object != NULL); 270 271 fObject = object; 272 fObjectType = objectType; 273 new(&fEntries) EntryList; 274 fEntriesCount = 0; 275 B_INITIALIZE_SPINLOCK(&fLock); 276 277 T_SCHEDULING_ANALYSIS(InitConditionVariable(this, object, objectType)); 278 NotifyWaitObjectListeners(&WaitObjectListener::ConditionVariableInitialized, 279 this); 280 281 InterruptsWriteSpinLocker _(sConditionVariableHashLock); 282 283 ASSERT_PRINT(sConditionVariableHash.Lookup(object) == NULL, 284 "condition variable: %p\n", sConditionVariableHash.Lookup(object)); 285 286 sConditionVariableHash.InsertUnchecked(this); 287 } 288 289 290 void 291 ConditionVariable::Unpublish() 292 { 293 ASSERT(fObject != NULL); 294 295 InterruptsLocker _; 296 WriteSpinLocker hashLocker(sConditionVariableHashLock); 297 SpinLocker selfLocker(fLock); 298 299 #if KDEBUG 300 ConditionVariable* variable = sConditionVariableHash.Lookup(fObject); 301 if (variable != this) { 302 panic("Condition variable %p not published, found: %p", this, variable); 303 return; 304 } 305 #endif 306 307 sConditionVariableHash.RemoveUnchecked(this); 308 fObject = NULL; 309 fObjectType = NULL; 310 311 hashLocker.Unlock(); 312 313 if (!fEntries.IsEmpty()) 314 _NotifyLocked(true, B_ENTRY_NOT_FOUND); 315 } 316 317 318 void 319 ConditionVariable::Add(ConditionVariableEntry* entry) 320 { 321 InterruptsSpinLocker _(fLock); 322 entry->_AddToLockedVariable(this); 323 } 324 325 326 status_t 327 ConditionVariable::Wait(uint32 flags, bigtime_t timeout) 328 { 329 ConditionVariableEntry entry; 330 Add(&entry); 331 return entry.Wait(flags, timeout); 332 } 333 334 335 /*static*/ void 336 ConditionVariable::NotifyOne(const void* object, status_t result) 337 { 338 InterruptsReadSpinLocker locker(sConditionVariableHashLock); 339 ConditionVariable* variable = sConditionVariableHash.Lookup(object); 340 locker.Unlock(); 341 if (variable == NULL) 342 return; 343 344 variable->NotifyOne(result); 345 } 346 347 348 /*static*/ void 349 ConditionVariable::NotifyAll(const void* object, status_t result) 350 { 351 InterruptsReadSpinLocker locker(sConditionVariableHashLock); 352 ConditionVariable* variable = sConditionVariableHash.Lookup(object); 353 locker.Unlock(); 354 if (variable == NULL) 355 return; 356 357 variable->NotifyAll(result); 358 } 359 360 361 void 362 ConditionVariable::_Notify(bool all, status_t result) 363 { 364 InterruptsSpinLocker _(fLock); 365 366 if (!fEntries.IsEmpty()) { 367 if (result > B_OK) { 368 panic("tried to notify with invalid result %" B_PRId32 "\n", result); 369 result = B_ERROR; 370 } 371 372 _NotifyLocked(all, result); 373 } 374 } 375 376 377 /*! Called with interrupts disabled and the condition variable's spinlock held. 378 */ 379 void 380 ConditionVariable::_NotifyLocked(bool all, status_t result) 381 { 382 // Dequeue and wake up the blocked threads. 383 while (ConditionVariableEntry* entry = fEntries.RemoveHead()) { 384 Thread* thread = atomic_pointer_get_and_set(&entry->fThread, (Thread*)NULL); 385 if (thread == NULL) { 386 // The entry must be in the process of trying to remove itself from us. 387 // Clear its variable and wait for it to acknowledge this in fEntriesCount, 388 // as it is the one responsible for decrementing that. 389 const int32 oldCount = atomic_get(&fEntriesCount); 390 atomic_pointer_set(&entry->fVariable, (ConditionVariable*)NULL); 391 392 // As fEntriesCount is only modified while our lock is held, nothing else 393 // will modify it while we are spinning, since we hold it at present. 394 int32 tries = 0; 395 while (atomic_get(&fEntriesCount) == oldCount) { 396 tries++; 397 if ((tries % 10000) == 0) 398 panic("entries count was not decremented for a long time!"); 399 } 400 } else { 401 status_t waitStatus = atomic_get_and_set(&entry->fWaitStatus, result); 402 403 SpinLocker threadLocker(thread->scheduler_lock); 404 if (waitStatus == STATUS_WAITING && thread->state != B_THREAD_WAITING) { 405 // The thread is not in B_THREAD_WAITING state, so we must unblock it early, 406 // in case it tries to re-block itself immediately after we unset fVariable. 407 thread_unblock_locked(thread, result); 408 waitStatus = result; 409 } 410 411 // No matter what the thread is doing, as we were the ones to clear its 412 // fThread, so we are the ones responsible for decrementing fEntriesCount. 413 // (We may not validly access the entry once we unset its fVariable.) 414 atomic_pointer_set(&entry->fVariable, (ConditionVariable*)NULL); 415 atomic_add(&fEntriesCount, -1); 416 417 // If the thread was in B_THREAD_WAITING state, we unblock it after unsetting 418 // fVariable, because otherwise it will wake up before thread_unblock returns 419 // and spin while waiting for us to do so. 420 if (waitStatus == STATUS_WAITING) 421 thread_unblock_locked(thread, result); 422 } 423 424 if (!all) 425 break; 426 } 427 } 428 429 430 /*static*/ void 431 ConditionVariable::ListAll() 432 { 433 kprintf(" variable object (type) waiting threads\n"); 434 kprintf("------------------------------------------------------------\n"); 435 ConditionVariableHash::Iterator it(&sConditionVariableHash); 436 while (ConditionVariable* variable = it.Next()) { 437 // count waiting threads 438 int count = variable->fEntries.Count(); 439 440 kprintf("%p %p %-20s %15d\n", variable, variable->fObject, 441 variable->fObjectType, count); 442 } 443 } 444 445 446 void 447 ConditionVariable::Dump() const 448 { 449 kprintf("condition variable %p\n", this); 450 kprintf(" object: %p (%s)\n", fObject, fObjectType); 451 kprintf(" threads:"); 452 453 for (EntryList::ConstIterator it = fEntries.GetIterator(); 454 ConditionVariableEntry* entry = it.Next();) { 455 kprintf(" %" B_PRId32, entry->fThread->id); 456 } 457 kprintf("\n"); 458 } 459 460 461 // #pragma mark - 462 463 464 void 465 condition_variable_init() 466 { 467 new(&sConditionVariableHash) ConditionVariableHash; 468 469 status_t error = sConditionVariableHash.Init(kConditionVariableHashSize); 470 if (error != B_OK) { 471 panic("condition_variable_init(): Failed to init hash table: %s", 472 strerror(error)); 473 } 474 475 add_debugger_command_etc("cvar", &dump_condition_variable, 476 "Dump condition variable info", 477 "<address>\n" 478 "Prints info for the specified condition variable.\n" 479 " <address> - Address of the condition variable or the object it is\n" 480 " associated with.\n", 0); 481 add_debugger_command_etc("cvars", &list_condition_variables, 482 "List condition variables", 483 "\n" 484 "Lists all existing condition variables\n", 0); 485 } 486