xref: /haiku/src/system/kernel/condition_variable.cpp (revision 445d4fd926c569e7b9ae28017da86280aaecbae2)
1 /*
2  * Copyright 2007-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2019, Haiku, Inc. All rights reserved.
4  * Distributed under the terms of the MIT License.
5  */
6 
7 #include <condition_variable.h>
8 
9 #include <new>
10 #include <stdlib.h>
11 #include <string.h>
12 
13 #include <debug.h>
14 #include <kscheduler.h>
15 #include <ksignal.h>
16 #include <int.h>
17 #include <listeners.h>
18 #include <scheduling_analysis.h>
19 #include <thread.h>
20 #include <util/AutoLock.h>
21 #include <util/atomic.h>
22 
23 
24 #define STATUS_ADDED	1
25 #define STATUS_WAITING	2
26 
27 
28 static const int kConditionVariableHashSize = 512;
29 
30 
31 struct ConditionVariableHashDefinition {
32 	typedef const void* KeyType;
33 	typedef	ConditionVariable ValueType;
34 
35 	size_t HashKey(const void* key) const
36 		{ return (size_t)key; }
37 	size_t Hash(ConditionVariable* variable) const
38 		{ return (size_t)variable->fObject; }
39 	bool Compare(const void* key, ConditionVariable* variable) const
40 		{ return key == variable->fObject; }
41 	ConditionVariable*& GetLink(ConditionVariable* variable) const
42 		{ return variable->fNext; }
43 };
44 
45 typedef BOpenHashTable<ConditionVariableHashDefinition> ConditionVariableHash;
46 static ConditionVariableHash sConditionVariableHash;
47 static rw_spinlock sConditionVariableHashLock;
48 
49 
50 // #pragma mark - ConditionVariableEntry
51 
52 
53 ConditionVariableEntry::ConditionVariableEntry()
54 	: fVariable(NULL)
55 {
56 }
57 
58 
59 ConditionVariableEntry::~ConditionVariableEntry()
60 {
61 	// We can use an "unsafe" non-atomic access of fVariable here, since we only
62 	// care whether it is non-NULL, not what its specific value is.
63 	if (fVariable != NULL)
64 		_RemoveFromVariable();
65 }
66 
67 
68 bool
69 ConditionVariableEntry::Add(const void* object)
70 {
71 	ASSERT(object != NULL);
72 
73 	InterruptsLocker _;
74 	ReadSpinLocker hashLocker(sConditionVariableHashLock);
75 
76 	ConditionVariable* variable = sConditionVariableHash.Lookup(object);
77 
78 	if (variable == NULL) {
79 		fWaitStatus = B_ENTRY_NOT_FOUND;
80 		return false;
81 	}
82 
83 	SpinLocker variableLocker(variable->fLock);
84 	hashLocker.Unlock();
85 
86 	_AddToLockedVariable(variable);
87 
88 	return true;
89 }
90 
91 
92 ConditionVariable*
93 ConditionVariableEntry::Variable() const
94 {
95 	return atomic_pointer_get(&fVariable);
96 }
97 
98 
99 inline void
100 ConditionVariableEntry::_AddToLockedVariable(ConditionVariable* variable)
101 {
102 	ASSERT(fVariable == NULL);
103 
104 	fThread = thread_get_current_thread();
105 	fVariable = variable;
106 	fWaitStatus = STATUS_ADDED;
107 	fVariable->fEntries.Add(this);
108 	atomic_add(&fVariable->fEntriesCount, 1);
109 }
110 
111 
112 void
113 ConditionVariableEntry::_RemoveFromVariable()
114 {
115 	// This section is critical because it can race with _NotifyLocked on the
116 	// variable's thread, so we must not be interrupted during it.
117 	InterruptsLocker _;
118 
119 	ConditionVariable* variable = atomic_pointer_get(&fVariable);
120 	if (atomic_pointer_get_and_set(&fThread, (Thread*)NULL) == NULL) {
121 		// If fThread was already NULL, that means the variable is already
122 		// in the process of clearing us out (or already has finished doing so.)
123 		// We thus cannot access fVariable, and must spin until it is cleared.
124 		int32 tries = 0;
125 		while (atomic_pointer_get(&fVariable) != NULL) {
126 			tries++;
127 			if ((tries % 10000) == 0)
128 				dprintf("variable pointer was not unset for a long time!\n");
129 			cpu_pause();
130 		}
131 
132 		return;
133 	}
134 
135 	while (true) {
136 		if (atomic_pointer_get(&fVariable) == NULL) {
137 			// The variable must have cleared us out. Acknowledge this and return.
138 			atomic_add(&variable->fEntriesCount, -1);
139 			return;
140 		}
141 
142 		// There is of course a small race between checking the pointer and then
143 		// the try_acquire in which the variable might clear out our fVariable.
144 		// However, in the case where we were the ones to clear fThread, the
145 		// variable will notice that and then wait for us to acknowledge the
146 		// removal by decrementing fEntriesCount, as we do above; and until
147 		// we do that, we may validly use our cached pointer to the variable.
148 		if (try_acquire_spinlock(&variable->fLock))
149 			break;
150 	}
151 
152 	// We now hold the variable's lock. Remove ourselves.
153 	if (fVariable->fEntries.Contains(this))
154 		fVariable->fEntries.Remove(this);
155 
156 	atomic_pointer_set(&fVariable, (ConditionVariable*)NULL);
157 	atomic_add(&variable->fEntriesCount, -1);
158 	release_spinlock(&variable->fLock);
159 }
160 
161 
162 status_t
163 ConditionVariableEntry::Wait(uint32 flags, bigtime_t timeout)
164 {
165 #if KDEBUG
166 	if (!are_interrupts_enabled()) {
167 		panic("ConditionVariableEntry::Wait() called with interrupts "
168 			"disabled, entry: %p, variable: %p", this, fVariable);
169 		return B_ERROR;
170 	}
171 #endif
172 
173 	ConditionVariable* variable = atomic_pointer_get(&fVariable);
174 	if (variable == NULL)
175 		return fWaitStatus;
176 
177 	if ((flags & B_RELATIVE_TIMEOUT) != 0 && timeout <= 0) {
178 		_RemoveFromVariable();
179 		return B_WOULD_BLOCK;
180 	}
181 
182 	InterruptsLocker _;
183 	SpinLocker schedulerLocker(thread_get_current_thread()->scheduler_lock);
184 
185 	if (fWaitStatus <= 0)
186 		return fWaitStatus;
187 	fWaitStatus = STATUS_WAITING;
188 
189 	thread_prepare_to_block(thread_get_current_thread(), flags,
190 		THREAD_BLOCK_TYPE_CONDITION_VARIABLE, variable);
191 
192 	schedulerLocker.Unlock();
193 
194 	status_t error;
195 	if ((flags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) != 0)
196 		error = thread_block_with_timeout(flags, timeout);
197 	else
198 		error = thread_block();
199 
200 	_RemoveFromVariable();
201 	return error;
202 }
203 
204 
205 status_t
206 ConditionVariableEntry::Wait(const void* object, uint32 flags,
207 	bigtime_t timeout)
208 {
209 	if (Add(object))
210 		return Wait(flags, timeout);
211 	return B_ENTRY_NOT_FOUND;
212 }
213 
214 
215 // #pragma mark - ConditionVariable
216 
217 
218 /*!	Initialization method for anonymous (unpublished) condition variables.
219 */
220 void
221 ConditionVariable::Init(const void* object, const char* objectType)
222 {
223 	fObject = object;
224 	fObjectType = objectType;
225 	new(&fEntries) EntryList;
226 	fEntriesCount = 0;
227 	B_INITIALIZE_SPINLOCK(&fLock);
228 
229 	T_SCHEDULING_ANALYSIS(InitConditionVariable(this, object, objectType));
230 	NotifyWaitObjectListeners(&WaitObjectListener::ConditionVariableInitialized,
231 		this);
232 }
233 
234 
235 void
236 ConditionVariable::Publish(const void* object, const char* objectType)
237 {
238 	ASSERT(object != NULL);
239 
240 	Init(object, objectType);
241 
242 	InterruptsWriteSpinLocker _(sConditionVariableHashLock);
243 
244 	ASSERT_PRINT(sConditionVariableHash.Lookup(object) == NULL,
245 		"condition variable: %p\n", sConditionVariableHash.Lookup(object));
246 
247 	sConditionVariableHash.InsertUnchecked(this);
248 }
249 
250 
251 void
252 ConditionVariable::Unpublish()
253 {
254 	ASSERT(fObject != NULL);
255 
256 	InterruptsLocker _;
257 	WriteSpinLocker hashLocker(sConditionVariableHashLock);
258 	SpinLocker selfLocker(fLock);
259 
260 #if KDEBUG
261 	ConditionVariable* variable = sConditionVariableHash.Lookup(fObject);
262 	if (variable != this) {
263 		panic("Condition variable %p not published, found: %p", this, variable);
264 		return;
265 	}
266 #endif
267 
268 	sConditionVariableHash.RemoveUnchecked(this);
269 	fObject = NULL;
270 	fObjectType = NULL;
271 
272 	hashLocker.Unlock();
273 
274 	if (!fEntries.IsEmpty())
275 		_NotifyLocked(true, B_ENTRY_NOT_FOUND);
276 }
277 
278 
279 void
280 ConditionVariable::Add(ConditionVariableEntry* entry)
281 {
282 	InterruptsSpinLocker _(fLock);
283 	entry->_AddToLockedVariable(this);
284 }
285 
286 
287 status_t
288 ConditionVariable::Wait(uint32 flags, bigtime_t timeout)
289 {
290 	ConditionVariableEntry entry;
291 	Add(&entry);
292 	return entry.Wait(flags, timeout);
293 }
294 
295 
296 status_t
297 ConditionVariable::Wait(mutex* lock, uint32 flags, bigtime_t timeout)
298 {
299 	ConditionVariableEntry entry;
300 	Add(&entry);
301 	mutex_unlock(lock);
302 	status_t res = entry.Wait(flags, timeout);
303 	mutex_lock(lock);
304 	return res;
305 }
306 
307 
308 status_t
309 ConditionVariable::Wait(recursive_lock* lock, uint32 flags, bigtime_t timeout)
310 {
311 	ConditionVariableEntry entry;
312 	Add(&entry);
313 	int32 recursion = recursive_lock_get_recursion(lock);
314 
315 	for (int32 i = 0; i < recursion; i++)
316 		recursive_lock_unlock(lock);
317 
318 	status_t res = entry.Wait(flags, timeout);
319 
320 	for (int32 i = 0; i < recursion; i++)
321 		recursive_lock_lock(lock);
322 
323 	return res;
324 }
325 
326 
327 /*static*/ void
328 ConditionVariable::NotifyOne(const void* object, status_t result)
329 {
330 	_Notify(object, false, result);
331 }
332 
333 
334 /*static*/ void
335 ConditionVariable::NotifyAll(const void* object, status_t result)
336 {
337 	_Notify(object, true, result);
338 }
339 
340 
341 /*static*/ void
342 ConditionVariable::_Notify(const void* object, bool all, status_t result)
343 {
344 	InterruptsLocker ints;
345 	ReadSpinLocker hashLocker(sConditionVariableHashLock);
346 	ConditionVariable* variable = sConditionVariableHash.Lookup(object);
347 	if (variable == NULL)
348 		return;
349 	SpinLocker variableLocker(variable->fLock);
350 	hashLocker.Unlock();
351 
352 	variable->_NotifyLocked(all, result);
353 }
354 
355 
356 void
357 ConditionVariable::_Notify(bool all, status_t result)
358 {
359 	InterruptsSpinLocker _(fLock);
360 
361 	if (!fEntries.IsEmpty()) {
362 		if (result > B_OK) {
363 			panic("tried to notify with invalid result %" B_PRId32 "\n", result);
364 			result = B_ERROR;
365 		}
366 
367 		_NotifyLocked(all, result);
368 	}
369 }
370 
371 
372 /*! Called with interrupts disabled and the condition variable's spinlock held.
373  */
374 void
375 ConditionVariable::_NotifyLocked(bool all, status_t result)
376 {
377 	// Dequeue and wake up the blocked threads.
378 	while (ConditionVariableEntry* entry = fEntries.RemoveHead()) {
379 		Thread* thread = atomic_pointer_get_and_set(&entry->fThread, (Thread*)NULL);
380 		if (thread == NULL) {
381 			// The entry must be in the process of trying to remove itself from us.
382 			// Clear its variable and wait for it to acknowledge this in fEntriesCount,
383 			// as it is the one responsible for decrementing that.
384 			const int32 oldCount = atomic_get(&fEntriesCount);
385 			atomic_pointer_set(&entry->fVariable, (ConditionVariable*)NULL);
386 
387 			// As fEntriesCount is only modified while our lock is held, nothing else
388 			// will modify it while we are spinning, since we hold it at present.
389 			int32 tries = 0;
390 			while (atomic_get(&fEntriesCount) == oldCount) {
391 				tries++;
392 				if ((tries % 10000) == 0)
393 					dprintf("entries count was not decremented for a long time!\n");
394 				cpu_pause();
395 			}
396 		} else {
397 			SpinLocker schedulerLocker(thread->scheduler_lock);
398 			status_t lastWaitStatus = entry->fWaitStatus;
399 			entry->fWaitStatus = result;
400 			if (lastWaitStatus == STATUS_WAITING && thread->state != B_THREAD_WAITING) {
401 				// The thread is not in B_THREAD_WAITING state, so we must unblock it early,
402 				// in case it tries to re-block itself immediately after we unset fVariable.
403 				thread_unblock_locked(thread, result);
404 				lastWaitStatus = result;
405 			}
406 
407 			// No matter what the thread is doing, as we were the ones to clear its
408 			// fThread, so we are the ones responsible for decrementing fEntriesCount.
409 			// (We may not validly access the entry once we unset its fVariable.)
410 			atomic_pointer_set(&entry->fVariable, (ConditionVariable*)NULL);
411 			atomic_add(&fEntriesCount, -1);
412 
413 			// If the thread was in B_THREAD_WAITING state, we unblock it after unsetting
414 			// fVariable, because otherwise it will wake up before thread_unblock returns
415 			// and spin while waiting for us to do so.
416 			if (lastWaitStatus == STATUS_WAITING)
417 				thread_unblock_locked(thread, result);
418 		}
419 
420 		if (!all)
421 			break;
422 	}
423 }
424 
425 
426 // #pragma mark -
427 
428 
429 /*static*/ void
430 ConditionVariable::ListAll()
431 {
432 	kprintf("  variable      object (type)                waiting threads\n");
433 	kprintf("------------------------------------------------------------\n");
434 	ConditionVariableHash::Iterator it(&sConditionVariableHash);
435 	while (ConditionVariable* variable = it.Next()) {
436 		// count waiting threads
437 		int count = variable->fEntries.Count();
438 
439 		kprintf("%p  %p  %-20s %15d\n", variable, variable->fObject,
440 			variable->fObjectType, count);
441 	}
442 }
443 
444 
445 void
446 ConditionVariable::Dump() const
447 {
448 	kprintf("condition variable %p\n", this);
449 	kprintf("  object:  %p (%s)\n", fObject, fObjectType);
450 	kprintf("  threads:");
451 
452 	for (EntryList::ConstIterator it = fEntries.GetIterator();
453 		 ConditionVariableEntry* entry = it.Next();) {
454 		kprintf(" %" B_PRId32, entry->fThread->id);
455 	}
456 	kprintf("\n");
457 }
458 
459 
460 static int
461 list_condition_variables(int argc, char** argv)
462 {
463 	ConditionVariable::ListAll();
464 	return 0;
465 }
466 
467 
468 static int
469 dump_condition_variable(int argc, char** argv)
470 {
471 	if (argc != 2) {
472 		print_debugger_command_usage(argv[0]);
473 		return 0;
474 	}
475 
476 	addr_t address = parse_expression(argv[1]);
477 	if (address == 0)
478 		return 0;
479 
480 	ConditionVariable* variable = sConditionVariableHash.Lookup((void*)address);
481 
482 	if (variable == NULL) {
483 		// It must be a direct pointer to a condition variable.
484 		variable = (ConditionVariable*)address;
485 	}
486 
487 	if (variable != NULL) {
488 		variable->Dump();
489 
490 		set_debug_variable("_cvar", (addr_t)variable);
491 		set_debug_variable("_object", (addr_t)variable->Object());
492 
493 	} else
494 		kprintf("no condition variable at or with key %p\n", (void*)address);
495 
496 	return 0;
497 }
498 
499 
500 // #pragma mark -
501 
502 
503 void
504 condition_variable_init()
505 {
506 	new(&sConditionVariableHash) ConditionVariableHash;
507 
508 	status_t error = sConditionVariableHash.Init(kConditionVariableHashSize);
509 	if (error != B_OK) {
510 		panic("condition_variable_init(): Failed to init hash table: %s",
511 			strerror(error));
512 	}
513 
514 	add_debugger_command_etc("cvar", &dump_condition_variable,
515 		"Dump condition variable info",
516 		"<address>\n"
517 		"Prints info for the specified condition variable.\n"
518 		"  <address>  - Address of the condition variable or the object it is\n"
519 		"               associated with.\n", 0);
520 	add_debugger_command_etc("cvars", &list_condition_variables,
521 		"List condition variables",
522 		"\n"
523 		"Lists all published condition variables\n", 0);
524 }
525 
526 
527 ssize_t
528 debug_condition_variable_type_strlcpy(ConditionVariable* cvar, char* name, size_t size)
529 {
530 	const int32 typePointerOffset = offsetof(ConditionVariable, fObjectType);
531 
532 	const char* pointer;
533 	status_t status = debug_memcpy(B_CURRENT_TEAM, &pointer,
534 		(int8*)cvar + typePointerOffset, sizeof(const char*));
535 	if (status != B_OK)
536 		return status;
537 
538 	return debug_strlcpy(B_CURRENT_TEAM, name, pointer, size);
539 }
540