xref: /haiku/src/system/kernel/condition_variable.cpp (revision c237c4ce593ee823d9867fd997e51e4c447f5623)
1 /*
2  * Copyright 2007-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2019-2023, Haiku, Inc. All rights reserved.
4  * Distributed under the terms of the MIT License.
5  */
6 
7 #include <condition_variable.h>
8 
9 #include <new>
10 #include <stdlib.h>
11 #include <string.h>
12 
13 #include <debug.h>
14 #include <kscheduler.h>
15 #include <ksignal.h>
16 #include <int.h>
17 #include <listeners.h>
18 #include <scheduling_analysis.h>
19 #include <thread.h>
20 #include <util/AutoLock.h>
21 #include <util/atomic.h>
22 
23 
24 #define STATUS_ADDED	1
25 #define STATUS_WAITING	2
26 
27 
28 static const int kConditionVariableHashSize = 512;
29 
30 
31 struct ConditionVariableHashDefinition {
32 	typedef const void* KeyType;
33 	typedef	ConditionVariable ValueType;
34 
35 	size_t HashKey(const void* key) const
36 		{ return (size_t)key; }
37 	size_t Hash(ConditionVariable* variable) const
38 		{ return (size_t)variable->fObject; }
39 	bool Compare(const void* key, ConditionVariable* variable) const
40 		{ return key == variable->fObject; }
41 	ConditionVariable*& GetLink(ConditionVariable* variable) const
42 		{ return variable->fNext; }
43 };
44 
45 typedef BOpenHashTable<ConditionVariableHashDefinition> ConditionVariableHash;
46 static ConditionVariableHash sConditionVariableHash;
47 static rw_spinlock sConditionVariableHashLock;
48 
49 
50 // #pragma mark - ConditionVariableEntry
51 
52 
53 ConditionVariableEntry::ConditionVariableEntry()
54 	: fVariable(NULL)
55 {
56 }
57 
58 
59 ConditionVariableEntry::~ConditionVariableEntry()
60 {
61 	// We can use an "unsafe" non-atomic access of fVariable here, since we only
62 	// care whether it is non-NULL, not what its specific value is.
63 	if (fVariable != NULL)
64 		_RemoveFromVariable();
65 }
66 
67 
68 bool
69 ConditionVariableEntry::Add(const void* object)
70 {
71 	ASSERT(object != NULL);
72 
73 	InterruptsLocker _;
74 	ReadSpinLocker hashLocker(sConditionVariableHashLock);
75 
76 	ConditionVariable* variable = sConditionVariableHash.Lookup(object);
77 
78 	if (variable == NULL) {
79 		fWaitStatus = B_ENTRY_NOT_FOUND;
80 		return false;
81 	}
82 
83 	SpinLocker variableLocker(variable->fLock);
84 	hashLocker.Unlock();
85 
86 	_AddToLockedVariable(variable);
87 
88 	return true;
89 }
90 
91 
92 ConditionVariable*
93 ConditionVariableEntry::Variable() const
94 {
95 	return atomic_pointer_get(&fVariable);
96 }
97 
98 
99 inline void
100 ConditionVariableEntry::_AddToLockedVariable(ConditionVariable* variable)
101 {
102 	ASSERT(fVariable == NULL);
103 
104 	fThread = thread_get_current_thread();
105 	fVariable = variable;
106 	fWaitStatus = STATUS_ADDED;
107 	fVariable->fEntries.Add(this);
108 	atomic_add(&fVariable->fEntriesCount, 1);
109 }
110 
111 
112 void
113 ConditionVariableEntry::_RemoveFromVariable()
114 {
115 	// This section is critical because it can race with _NotifyLocked on the
116 	// variable's thread, so we must not be interrupted during it.
117 	InterruptsLocker _;
118 
119 	ConditionVariable* variable = atomic_pointer_get(&fVariable);
120 	if (atomic_pointer_get_and_set(&fThread, (Thread*)NULL) == NULL) {
121 		// If fThread was already NULL, that means the variable is already
122 		// in the process of clearing us out (or already has finished doing so.)
123 		// We thus cannot access fVariable, and must spin until it is cleared.
124 		int32 tries = 0;
125 		while (atomic_pointer_get(&fVariable) != NULL) {
126 			tries++;
127 			if ((tries % 10000) == 0)
128 				dprintf("variable pointer was not unset for a long time!\n");
129 			cpu_pause();
130 		}
131 
132 		return;
133 	}
134 
135 	while (true) {
136 		if (atomic_pointer_get(&fVariable) == NULL) {
137 			// The variable must have cleared us out. Acknowledge this and return.
138 			atomic_add(&variable->fEntriesCount, -1);
139 			return;
140 		}
141 
142 		// There is of course a small race between checking the pointer and then
143 		// the try_acquire in which the variable might clear out our fVariable.
144 		// However, in the case where we were the ones to clear fThread, the
145 		// variable will notice that and then wait for us to acknowledge the
146 		// removal by decrementing fEntriesCount, as we do above; and until
147 		// we do that, we may validly use our cached pointer to the variable.
148 		if (try_acquire_spinlock(&variable->fLock))
149 			break;
150 	}
151 
152 	// We now hold the variable's lock. Remove ourselves.
153 	if (fVariable->fEntries.Contains(this))
154 		fVariable->fEntries.Remove(this);
155 
156 	atomic_pointer_set(&fVariable, (ConditionVariable*)NULL);
157 	atomic_add(&variable->fEntriesCount, -1);
158 	release_spinlock(&variable->fLock);
159 }
160 
161 
162 status_t
163 ConditionVariableEntry::Wait(uint32 flags, bigtime_t timeout)
164 {
165 #if KDEBUG
166 	if (!are_interrupts_enabled()) {
167 		panic("ConditionVariableEntry::Wait() called with interrupts "
168 			"disabled, entry: %p, variable: %p", this, fVariable);
169 		return B_ERROR;
170 	}
171 #endif
172 
173 	ConditionVariable* variable = atomic_pointer_get(&fVariable);
174 	if (variable == NULL)
175 		return fWaitStatus;
176 
177 	if ((flags & B_RELATIVE_TIMEOUT) != 0 && timeout <= 0) {
178 		_RemoveFromVariable();
179 
180 		if (fWaitStatus <= 0)
181 			return fWaitStatus;
182 		return B_WOULD_BLOCK;
183 	}
184 
185 	InterruptsLocker _;
186 	SpinLocker schedulerLocker(thread_get_current_thread()->scheduler_lock);
187 
188 	if (fWaitStatus <= 0)
189 		return fWaitStatus;
190 	fWaitStatus = STATUS_WAITING;
191 
192 	thread_prepare_to_block(thread_get_current_thread(), flags,
193 		THREAD_BLOCK_TYPE_CONDITION_VARIABLE, variable);
194 
195 	schedulerLocker.Unlock();
196 
197 	status_t error;
198 	if ((flags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) != 0)
199 		error = thread_block_with_timeout(flags, timeout);
200 	else
201 		error = thread_block();
202 
203 	_RemoveFromVariable();
204 
205 	// We need to always return the actual wait status, if we received one.
206 	if (fWaitStatus <= 0)
207 		return fWaitStatus;
208 
209 	return error;
210 }
211 
212 
213 status_t
214 ConditionVariableEntry::Wait(const void* object, uint32 flags,
215 	bigtime_t timeout)
216 {
217 	if (Add(object))
218 		return Wait(flags, timeout);
219 	return B_ENTRY_NOT_FOUND;
220 }
221 
222 
223 // #pragma mark - ConditionVariable
224 
225 
226 /*!	Initialization method for anonymous (unpublished) condition variables.
227 */
228 void
229 ConditionVariable::Init(const void* object, const char* objectType)
230 {
231 	fObject = object;
232 	fObjectType = objectType;
233 	new(&fEntries) EntryList;
234 	fEntriesCount = 0;
235 	B_INITIALIZE_SPINLOCK(&fLock);
236 
237 	T_SCHEDULING_ANALYSIS(InitConditionVariable(this, object, objectType));
238 	NotifyWaitObjectListeners(&WaitObjectListener::ConditionVariableInitialized,
239 		this);
240 }
241 
242 
243 void
244 ConditionVariable::Publish(const void* object, const char* objectType)
245 {
246 	ASSERT(object != NULL);
247 
248 	Init(object, objectType);
249 
250 	InterruptsWriteSpinLocker _(sConditionVariableHashLock);
251 
252 	ASSERT_PRINT(sConditionVariableHash.Lookup(object) == NULL,
253 		"condition variable: %p\n", sConditionVariableHash.Lookup(object));
254 
255 	sConditionVariableHash.InsertUnchecked(this);
256 }
257 
258 
259 void
260 ConditionVariable::Unpublish()
261 {
262 	ASSERT(fObject != NULL);
263 
264 	InterruptsLocker _;
265 	WriteSpinLocker hashLocker(sConditionVariableHashLock);
266 	SpinLocker selfLocker(fLock);
267 
268 #if KDEBUG
269 	ConditionVariable* variable = sConditionVariableHash.Lookup(fObject);
270 	if (variable != this) {
271 		panic("Condition variable %p not published, found: %p", this, variable);
272 		return;
273 	}
274 #endif
275 
276 	sConditionVariableHash.RemoveUnchecked(this);
277 	fObject = NULL;
278 	fObjectType = NULL;
279 
280 	hashLocker.Unlock();
281 
282 	if (!fEntries.IsEmpty())
283 		_NotifyLocked(true, B_ENTRY_NOT_FOUND);
284 }
285 
286 
287 void
288 ConditionVariable::Add(ConditionVariableEntry* entry)
289 {
290 	InterruptsSpinLocker _(fLock);
291 	entry->_AddToLockedVariable(this);
292 }
293 
294 
295 status_t
296 ConditionVariable::Wait(uint32 flags, bigtime_t timeout)
297 {
298 	ConditionVariableEntry entry;
299 	Add(&entry);
300 	return entry.Wait(flags, timeout);
301 }
302 
303 
304 status_t
305 ConditionVariable::Wait(mutex* lock, uint32 flags, bigtime_t timeout)
306 {
307 	ConditionVariableEntry entry;
308 	Add(&entry);
309 	mutex_unlock(lock);
310 	status_t res = entry.Wait(flags, timeout);
311 	mutex_lock(lock);
312 	return res;
313 }
314 
315 
316 status_t
317 ConditionVariable::Wait(recursive_lock* lock, uint32 flags, bigtime_t timeout)
318 {
319 	ConditionVariableEntry entry;
320 	Add(&entry);
321 	int32 recursion = recursive_lock_get_recursion(lock);
322 
323 	for (int32 i = 0; i < recursion; i++)
324 		recursive_lock_unlock(lock);
325 
326 	status_t res = entry.Wait(flags, timeout);
327 
328 	for (int32 i = 0; i < recursion; i++)
329 		recursive_lock_lock(lock);
330 
331 	return res;
332 }
333 
334 
335 /*static*/ int32
336 ConditionVariable::NotifyOne(const void* object, status_t result)
337 {
338 	return _Notify(object, false, result);
339 }
340 
341 
342 /*static*/ int32
343 ConditionVariable::NotifyAll(const void* object, status_t result)
344 {
345 	return _Notify(object, true, result);
346 }
347 
348 
349 /*static*/ int32
350 ConditionVariable::_Notify(const void* object, bool all, status_t result)
351 {
352 	InterruptsLocker ints;
353 	ReadSpinLocker hashLocker(sConditionVariableHashLock);
354 	ConditionVariable* variable = sConditionVariableHash.Lookup(object);
355 	if (variable == NULL)
356 		return 0;
357 	SpinLocker variableLocker(variable->fLock);
358 	hashLocker.Unlock();
359 
360 	return variable->_NotifyLocked(all, result);
361 }
362 
363 
364 int32
365 ConditionVariable::_Notify(bool all, status_t result)
366 {
367 	InterruptsSpinLocker _(fLock);
368 	if (!fEntries.IsEmpty()) {
369 		if (result > B_OK) {
370 			panic("tried to notify with invalid result %" B_PRId32 "\n", result);
371 			result = B_ERROR;
372 		}
373 
374 		return _NotifyLocked(all, result);
375 	}
376 	return 0;
377 }
378 
379 
380 /*! Called with interrupts disabled and the condition variable's spinlock held.
381  */
382 int32
383 ConditionVariable::_NotifyLocked(bool all, status_t result)
384 {
385 	int32 notified = 0;
386 
387 	// Dequeue and wake up the blocked threads.
388 	while (ConditionVariableEntry* entry = fEntries.RemoveHead()) {
389 		Thread* thread = atomic_pointer_get_and_set(&entry->fThread, (Thread*)NULL);
390 		if (thread == NULL) {
391 			// The entry must be in the process of trying to remove itself from us.
392 			// Clear its variable and wait for it to acknowledge this in fEntriesCount,
393 			// as it is the one responsible for decrementing that.
394 			const int32 oldCount = atomic_get(&fEntriesCount);
395 			atomic_pointer_set(&entry->fVariable, (ConditionVariable*)NULL);
396 
397 			// As fEntriesCount is only modified while our lock is held, nothing else
398 			// will modify it while we are spinning, since we hold it at present.
399 			int32 tries = 0;
400 			while (atomic_get(&fEntriesCount) == oldCount) {
401 				tries++;
402 				if ((tries % 10000) == 0)
403 					dprintf("entries count was not decremented for a long time!\n");
404 				cpu_pause();
405 			}
406 		} else {
407 			SpinLocker schedulerLocker(thread->scheduler_lock);
408 			status_t lastWaitStatus = entry->fWaitStatus;
409 			entry->fWaitStatus = result;
410 			if (lastWaitStatus == STATUS_WAITING && thread->state != B_THREAD_WAITING) {
411 				// The thread is not in B_THREAD_WAITING state, so we must unblock it early,
412 				// in case it tries to re-block itself immediately after we unset fVariable.
413 				thread_unblock_locked(thread, result);
414 				lastWaitStatus = result;
415 			}
416 
417 			// No matter what the thread is doing, as we were the ones to clear its
418 			// fThread, so we are the ones responsible for decrementing fEntriesCount.
419 			// (We may not validly access the entry once we unset its fVariable.)
420 			atomic_pointer_set(&entry->fVariable, (ConditionVariable*)NULL);
421 			atomic_add(&fEntriesCount, -1);
422 
423 			// If the thread was in B_THREAD_WAITING state, we unblock it after unsetting
424 			// fVariable, because otherwise it will wake up before thread_unblock returns
425 			// and spin while waiting for us to do so.
426 			if (lastWaitStatus == STATUS_WAITING)
427 				thread_unblock_locked(thread, result);
428 
429 			notified++;
430 		}
431 
432 		if (!all)
433 			break;
434 	}
435 
436 	return notified;
437 }
438 
439 
440 // #pragma mark -
441 
442 
443 /*static*/ void
444 ConditionVariable::ListAll()
445 {
446 	kprintf("  variable      object (type)                waiting threads\n");
447 	kprintf("------------------------------------------------------------\n");
448 	ConditionVariableHash::Iterator it(&sConditionVariableHash);
449 	while (ConditionVariable* variable = it.Next()) {
450 		// count waiting threads
451 		int count = variable->fEntries.Count();
452 
453 		kprintf("%p  %p  %-20s %15d\n", variable, variable->fObject,
454 			variable->fObjectType, count);
455 	}
456 }
457 
458 
459 void
460 ConditionVariable::Dump() const
461 {
462 	kprintf("condition variable %p\n", this);
463 	kprintf("  object:  %p (%s)\n", fObject, fObjectType);
464 	kprintf("  threads:");
465 
466 	for (EntryList::ConstIterator it = fEntries.GetIterator();
467 		 ConditionVariableEntry* entry = it.Next();) {
468 		kprintf(" %" B_PRId32, entry->fThread->id);
469 	}
470 	kprintf("\n");
471 }
472 
473 
474 static int
475 list_condition_variables(int argc, char** argv)
476 {
477 	ConditionVariable::ListAll();
478 	return 0;
479 }
480 
481 
482 static int
483 dump_condition_variable(int argc, char** argv)
484 {
485 	if (argc != 2) {
486 		print_debugger_command_usage(argv[0]);
487 		return 0;
488 	}
489 
490 	addr_t address = parse_expression(argv[1]);
491 	if (address == 0)
492 		return 0;
493 
494 	ConditionVariable* variable = sConditionVariableHash.Lookup((void*)address);
495 
496 	if (variable == NULL) {
497 		// It must be a direct pointer to a condition variable.
498 		variable = (ConditionVariable*)address;
499 	}
500 
501 	if (variable != NULL) {
502 		variable->Dump();
503 
504 		set_debug_variable("_cvar", (addr_t)variable);
505 		set_debug_variable("_object", (addr_t)variable->Object());
506 
507 	} else
508 		kprintf("no condition variable at or with key %p\n", (void*)address);
509 
510 	return 0;
511 }
512 
513 
514 // #pragma mark -
515 
516 
517 void
518 condition_variable_init()
519 {
520 	new(&sConditionVariableHash) ConditionVariableHash;
521 
522 	status_t error = sConditionVariableHash.Init(kConditionVariableHashSize);
523 	if (error != B_OK) {
524 		panic("condition_variable_init(): Failed to init hash table: %s",
525 			strerror(error));
526 	}
527 
528 	add_debugger_command_etc("cvar", &dump_condition_variable,
529 		"Dump condition variable info",
530 		"<address>\n"
531 		"Prints info for the specified condition variable.\n"
532 		"  <address>  - Address of the condition variable or the object it is\n"
533 		"               associated with.\n", 0);
534 	add_debugger_command_etc("cvars", &list_condition_variables,
535 		"List condition variables",
536 		"\n"
537 		"Lists all published condition variables\n", 0);
538 }
539 
540 
541 ssize_t
542 debug_condition_variable_type_strlcpy(ConditionVariable* cvar, char* name, size_t size)
543 {
544 	const int32 typePointerOffset = offsetof(ConditionVariable, fObjectType);
545 
546 	const char* pointer;
547 	status_t status = debug_memcpy(B_CURRENT_TEAM, &pointer,
548 		(int8*)cvar + typePointerOffset, sizeof(const char*));
549 	if (status != B_OK)
550 		return status;
551 
552 	return debug_strlcpy(B_CURRENT_TEAM, name, pointer, size);
553 }
554