xref: /haiku/src/system/kernel/events/event_queue.cpp (revision caed67a8cba83913b9c21ac2b06ebc6bd1cb3111)
1 /*
2  * Copyright 2015, Hamish Morrison, hamishm53@gmail.com.
3  * Copyright 2023, Haiku, Inc. All rights reserved.
4  * Distributed under the terms of the MIT License.
5  */
6 
7 #include <event_queue.h>
8 
9 #include <OS.h>
10 
11 #include <AutoDeleter.h>
12 
13 #include <fs/fd.h>
14 #include <port.h>
15 #include <sem.h>
16 #include <syscalls.h>
17 #include <syscall_restart.h>
18 #include <thread.h>
19 #include <util/AutoLock.h>
20 #include <util/AVLTree.h>
21 #include <util/DoublyLinkedList.h>
22 #include <AutoDeleterDrivers.h>
23 #include <StackOrHeapArray.h>
24 #include <wait_for_objects.h>
25 
26 #include "select_ops.h"
27 #include "select_sync.h"
28 
29 
30 enum {
31 	B_EVENT_QUEUED			= (1 << 28),
32 	B_EVENT_SELECTING		= (1 << 29),
33 	B_EVENT_DELETING		= (1 << 30),
34 	/* (signed) */
35 	B_EVENT_PRIVATE_MASK	= (0xf0000000)
36 };
37 
38 
39 #define EVENT_BEHAVIOR(events) ((events) & (B_EVENT_LEVEL_TRIGGERED | B_EVENT_ONE_SHOT))
40 #define USER_EVENTS(events) ((events) & ~B_EVENT_PRIVATE_MASK)
41 
42 #define B_EVENT_NON_MASKABLE (B_EVENT_INVALID | B_EVENT_ERROR | B_EVENT_DISCONNECTED)
43 
44 
45 
46 struct select_event : select_info, AVLTreeNode,
47 		DoublyLinkedListLinkImpl<select_event> {
48 	int32				object;
49 	uint16				type;
50 	uint32				behavior;
51 	void*				user_data;
52 };
53 
54 
55 struct EventQueueTreeDefinition {
56 	typedef struct {
57 		int32 object;
58 		uint16 type;
59 	} 						Key;
60 	typedef select_event	Value;
61 
62 	AVLTreeNode* GetAVLTreeNode(Value* value) const
63 	{
64 		return value;
65 	}
66 
67 	Value* GetValue(AVLTreeNode* node) const
68 	{
69 		return static_cast<Value*>(node);
70 	}
71 
72 	int Compare(Key a, const Value* b) const
73 	{
74 		if (a.object != b->object)
75 			return a.object - b->object;
76 		else
77 			return a.type - b->type;
78 	}
79 
80 	int Compare(const Value* a, const Value* b) const
81 	{
82 		if (a->object != b->object)
83 			return a->object - b->object;
84 		else
85 			return a->type - b->type;
86 	}
87 };
88 
89 
90 //	#pragma mark -- EventQueue implementation
91 
92 
93 class EventQueue : public select_sync {
94 public:
95 						EventQueue(bool kernel);
96 						~EventQueue();
97 
98 	void				Closed();
99 
100 	status_t			Select(int32 object, uint16 type, uint32 events, void* userData);
101 	status_t			Query(int32 object, uint16 type, uint32* selectedEvents, void** userData);
102 	status_t			Deselect(int32 object, uint16 type);
103 
104 	status_t			Notify(select_info* info, uint16 events);
105 
106 	ssize_t				Wait(event_wait_info* infos, int numInfos,
107 							int32 flags, bigtime_t timeout);
108 
109 private:
110 	void				_Notify(select_event* event, uint16 events);
111 	status_t			_DeselectEvent(select_event* event);
112 
113 	ssize_t				_DequeueEvents(event_wait_info* infos, int numInfos);
114 
115 	select_event*		_GetEvent(int32 object, uint16 type);
116 
117 private:
118 	typedef AVLTree<EventQueueTreeDefinition> EventTree;
119 	typedef DoublyLinkedList<select_event> EventList;
120 
121 	bool				fKernel;
122 	bool				fClosing;
123 
124 	/*
125 	 * This flag is set in _DequeueEvents when we have to drop the lock to
126 	 * deselect an object to prevent another _DequeueEvents call concurrently
127 	 * modifying the list.
128 	 */
129 	bool				fDequeueing;
130 
131 	EventList			fEventList;
132 	EventTree			fEventTree;
133 
134 	/*
135 	 * Protects the queue. We cannot call select or deselect while holding
136 	 * this, because it will invert the locking order with EventQueue::Notify.
137 	 */
138 	mutex				fQueueLock;
139 
140 	/*
141 	 * Notified when events are available on the queue.
142 	 */
143 	ConditionVariable	fQueueCondition;
144 
145 	/*
146 	 * Used to wait on a changing select_event while the queue lock is dropped
147 	 * during a call to select/deselect.
148 	 */
149 	ConditionVariable	fEventCondition;
150 };
151 
152 
153 EventQueue::EventQueue(bool kernel)
154 	:
155 	fKernel(kernel),
156 	fClosing(false),
157 	fDequeueing(false)
158 {
159 	mutex_init(&fQueueLock, "event_queue lock");
160 	fQueueCondition.Init(this, "evtq wait");
161 	fEventCondition.Init(this, "event_queue event change wait");
162 }
163 
164 
165 EventQueue::~EventQueue()
166 {
167 	mutex_lock(&fQueueLock);
168 	ASSERT(fClosing && !fDequeueing);
169 
170 	EventTree::Iterator iter = fEventTree.GetIterator();
171 	while (iter.HasNext()) {
172 		select_event* event = iter.Next();
173 		event->events |= B_EVENT_DELETING;
174 
175 		mutex_unlock(&fQueueLock);
176 		_DeselectEvent(event);
177 		mutex_lock(&fQueueLock);
178 
179 		iter.Remove();
180 		if ((event->events & B_EVENT_QUEUED) != 0)
181 			fEventList.Remove(event);
182 		delete event;
183 	}
184 
185 	EventList::Iterator listIter = fEventList.GetIterator();
186 	while (listIter.HasNext()) {
187 		select_event* event = listIter.Next();
188 
189 		// We already removed all events in the tree from this list.
190 		// The only remaining events will be INVALID ones already deselected.
191 		delete event;
192 	}
193 
194 	mutex_destroy(&fQueueLock);
195 }
196 
197 
198 void
199 EventQueue::Closed()
200 {
201 	MutexLocker locker(&fQueueLock);
202 
203 	fClosing = true;
204 	locker.Unlock();
205 
206 	// Wake up all waiters
207 	fQueueCondition.NotifyAll(B_FILE_ERROR);
208 }
209 
210 
211 status_t
212 EventQueue::Select(int32 object, uint16 type, uint32 events, void* userData)
213 {
214 	MutexLocker locker(&fQueueLock);
215 
216 	select_event* event = _GetEvent(object, type);
217 	if (event != NULL) {
218 		if ((event->selected_events | event->behavior)
219 				== (USER_EVENTS(events) | B_EVENT_NON_MASKABLE))
220 			return B_OK;
221 
222 		// Rather than try to reuse the event object, which would be complicated
223 		// and error-prone, perform a full de-selection and then re-selection.
224 		locker.Unlock();
225 		status_t status = Deselect(object, type);
226 		if (status != B_OK)
227 			return status;
228 		locker.Lock();
229 
230 		// Make sure nothing else re-selected before we reacquired the lock.
231 		event = _GetEvent(object, type);
232 		if (event != NULL)
233 			return EEXIST;
234 	}
235 
236 	event = new(std::nothrow) select_event;
237 	if (event == NULL)
238 		return B_NO_MEMORY;
239 	ObjectDeleter<select_event> eventDeleter(event);
240 
241 	event->sync = this;
242 	event->object = object;
243 	event->type = type;
244 	event->behavior = EVENT_BEHAVIOR(events);
245 	event->user_data = userData;
246 	event->events = 0;
247 
248 	status_t result = fEventTree.Insert(event);
249 	if (result != B_OK)
250 		return result;
251 
252 	// We drop the lock before calling select() to avoid inverting the
253 	// locking order with Notify(). Setting the B_EVENT_SELECTING flag prevents
254 	// the event from being used or even deleted before it is ready.
255 	event->events |= B_EVENT_SELECTING;
256 	event->selected_events = USER_EVENTS(events) | B_EVENT_NON_MASKABLE;
257 
258 	locker.Unlock();
259 
260 	status_t status = select_object(event->type, event->object, event, fKernel);
261 	if (status < 0) {
262 		locker.Lock();
263 		fEventTree.Remove(event);
264 		fEventCondition.NotifyAll();
265 		return status;
266 	}
267 
268 	eventDeleter.Detach();
269 
270 	atomic_and(&event->events, ~B_EVENT_SELECTING);
271 	fEventCondition.NotifyAll();
272 
273 	return B_OK;
274 }
275 
276 
277 status_t
278 EventQueue::Query(int32 object, uint16 type, uint32* selectedEvents, void** userData)
279 {
280 	MutexLocker locker(&fQueueLock);
281 
282 	select_event* event = _GetEvent(object, type);
283 	if (event == NULL)
284 		return B_ENTRY_NOT_FOUND;
285 
286 	*selectedEvents = event->selected_events | event->behavior;
287 	*userData = event->user_data;
288 
289 	return B_OK;
290 }
291 
292 
293 status_t
294 EventQueue::Deselect(int32 object, uint16 type)
295 {
296 	MutexLocker locker(&fQueueLock);
297 
298 	select_event* event = _GetEvent(object, type);
299 	if (event == NULL)
300 		return B_ENTRY_NOT_FOUND;
301 
302 	if ((atomic_or(&event->events, B_EVENT_DELETING) & B_EVENT_DELETING) != 0)
303 		return B_OK;
304 
305 	locker.Unlock();
306 	_DeselectEvent(event);
307 	locker.Lock();
308 
309 	if ((event->events & B_EVENT_INVALID) == 0)
310 		fEventTree.Remove(event);
311 	if ((event->events & B_EVENT_QUEUED) != 0)
312 		fEventList.Remove(event);
313 
314 	delete event;
315 
316 	locker.Unlock();
317 	fEventCondition.NotifyAll();
318 	return B_OK;
319 }
320 
321 
322 status_t
323 EventQueue::_DeselectEvent(select_event* event)
324 {
325 	return deselect_object(event->type, event->object, event, fKernel);
326 }
327 
328 
329 status_t
330 EventQueue::Notify(select_info* info, uint16 events)
331 {
332 	select_event* event = static_cast<select_event*>(info);
333 	_Notify(event, events);
334 	return B_OK;
335 }
336 
337 
338 void
339 EventQueue::_Notify(select_event* event, uint16 events)
340 {
341 	if ((events & event->selected_events) == 0)
342 		return;
343 
344 	const int32 previousEvents = atomic_or(&event->events, (events & ~B_EVENT_INVALID));
345 
346 	// If the event is already being deleted, we should ignore this notification.
347 	if ((previousEvents & B_EVENT_DELETING) != 0)
348 		return;
349 
350 	// If the event is already queued, and it is not becoming invalid,
351 	// we don't need to do anything more.
352 	if ((previousEvents & B_EVENT_QUEUED) != 0 && (events & B_EVENT_INVALID) == 0)
353 		return;
354 
355 	{
356 		MutexLocker _(&fQueueLock);
357 
358 		// We need to recheck B_EVENT_DELETING now we have the lock.
359 		if ((event->events & B_EVENT_DELETING) != 0)
360 			return;
361 
362 		// If we get B_EVENT_INVALID it means the object we were monitoring was
363 		// deleted. The object's ID may now be reused, so we must remove it
364 		// from the event tree.
365 		if ((events & B_EVENT_INVALID) != 0) {
366 			atomic_or(&event->events, B_EVENT_INVALID);
367 			fEventTree.Remove(event);
368 		}
369 
370 		// If it's not already queued, it's our responsibility to queue it.
371 		if ((atomic_or(&event->events, B_EVENT_QUEUED) & B_EVENT_QUEUED) == 0) {
372 			fEventList.Add(event);
373 			fQueueCondition.NotifyAll();
374 		}
375 	}
376 }
377 
378 
379 ssize_t
380 EventQueue::Wait(event_wait_info* infos, int numInfos,
381 	int32 flags, bigtime_t timeout)
382 {
383 	ASSERT((flags & B_ABSOLUTE_TIMEOUT) != 0
384 		|| (timeout == B_INFINITE_TIMEOUT || timeout == 0));
385 
386 	MutexLocker queueLocker(&fQueueLock);
387 
388 	ssize_t count = 0;
389 	while (timeout == 0 || (system_time() < timeout)) {
390 		while ((fDequeueing || fEventList.IsEmpty()) && !fClosing) {
391 			status_t status = fQueueCondition.Wait(queueLocker.Get(),
392 				flags | B_CAN_INTERRUPT, timeout);
393 			if (status != B_OK)
394 				return status;
395 		}
396 
397 		if (fClosing)
398 			return B_FILE_ERROR;
399 
400 		if (numInfos == 0)
401 			return B_OK;
402 
403 		fDequeueing = true;
404 		count = _DequeueEvents(infos, numInfos);
405 		fDequeueing = false;
406 
407 		if (count != 0)
408 			break;
409 
410 		// Due to level-triggered events, it is possible for the event list to have
411 		// been not empty and _DequeueEvents() still returns nothing. Hence, we loop.
412 	}
413 
414 	return count;
415 }
416 
417 
418 ssize_t
419 EventQueue::_DequeueEvents(event_wait_info* infos, int numInfos)
420 {
421 	ssize_t count = 0;
422 
423 	const int32 kMaxToDeselect = 8;
424 	select_event* deselect[kMaxToDeselect];
425 	int32 deselectCount = 0;
426 
427 	// Add a marker element, so we don't loop forever after unlocking the list.
428 	// (There is only one invocation of _DequeueEvents() at a time.)
429 	select_event marker = {};
430 	fEventList.Add(&marker);
431 
432 	for (select_event* event = NULL; count < numInfos; ) {
433 		if (fEventList.Head() == NULL || fEventList.Head() == &marker)
434 			break;
435 
436 		event = fEventList.RemoveHead();
437 		int32 events = atomic_and(&event->events,
438 			~(event->selected_events | B_EVENT_QUEUED));
439 
440 		if ((events & B_EVENT_DELETING) != 0)
441 			continue;
442 
443 		if ((events & B_EVENT_INVALID) == 0
444 				&& (event->behavior & B_EVENT_LEVEL_TRIGGERED) != 0) {
445 			// This event is level-triggered. We need to deselect and reselect it,
446 			// as its state may have changed since we were notified.
447 			const select_event tmp = *event;
448 
449 			mutex_unlock(&fQueueLock);
450 			status_t status = Deselect(tmp.object, tmp.type);
451 			if (status == B_OK) {
452 				event = NULL;
453 				status = Select(tmp.object, tmp.type,
454 					tmp.selected_events | tmp.behavior, tmp.user_data);
455 			}
456 			mutex_lock(&fQueueLock);
457 
458 			if (status == B_OK) {
459 				// Is the event still queued?
460 				event = _GetEvent(tmp.object, tmp.type);
461 				if (event == NULL)
462 					continue;
463 				events = atomic_get(&event->events);
464 				if ((events & B_EVENT_QUEUED) == 0)
465 					continue;
466 			} else if (event == NULL) {
467 				continue;
468 			}
469 		}
470 
471 		infos[count].object = event->object;
472 		infos[count].type = event->type;
473 		infos[count].user_data = event->user_data;
474 		infos[count].events = USER_EVENTS(events);
475 		count++;
476 
477 		// All logic past this point has to do with deleting events.
478 		if ((events & B_EVENT_INVALID) == 0 && (event->behavior & B_EVENT_ONE_SHOT) == 0)
479 			continue;
480 
481 		// Check if the event was requeued.
482 		if ((atomic_and(&event->events, ~B_EVENT_QUEUED) & B_EVENT_QUEUED) != 0)
483 			fEventList.Remove(event);
484 
485 		if ((events & B_EVENT_INVALID) != 0) {
486 			// The event will already have been removed from the tree.
487 			delete event;
488 		} else if ((event->behavior & B_EVENT_ONE_SHOT) != 0) {
489 			// We already checked B_EVENT_INVALID above, so we don't need to again.
490 			fEventTree.Remove(event);
491 			event->events = B_EVENT_DELETING;
492 
493 			deselect[deselectCount++] = event;
494 			if (deselectCount == kMaxToDeselect)
495 				break;
496 		}
497 	}
498 
499 	fEventList.Remove(&marker);
500 
501 	if (deselectCount != 0) {
502 		mutex_unlock(&fQueueLock);
503 		for (int32 i = 0; i < deselectCount; i++) {
504 			select_event* event = deselect[i];
505 
506 			_DeselectEvent(event);
507 			delete event;
508 		}
509 		mutex_lock(&fQueueLock);
510 
511 		// We don't need to notify waiters, as we removed the events
512 		// from anywhere they could be found before dropping the lock.
513 	}
514 
515 	return count;
516 }
517 
518 
519 /*
520  * Get the select_event for the given object and type. Must be called with the
521  * queue lock held. This method will sleep if the event is undergoing selection
522  * or deletion.
523  */
524 select_event*
525 EventQueue::_GetEvent(int32 object, uint16 type)
526 {
527 	EventQueueTreeDefinition::Key key = { object, type };
528 
529 	while (true) {
530 		select_event* event = fEventTree.Find(key);
531 		if (event == NULL)
532 			return NULL;
533 
534 		if ((event->events & (B_EVENT_SELECTING | B_EVENT_DELETING)) == 0)
535 			return event;
536 
537 		fEventCondition.Wait(&fQueueLock);
538 
539 		// At this point the select_event might have been deleted, so we
540 		// need to refetch it.
541 	}
542 }
543 
544 
545 //	#pragma mark -- File descriptor ops
546 
547 
548 
549 static status_t
550 event_queue_close(file_descriptor* descriptor)
551 {
552 	EventQueue* queue = (EventQueue*)descriptor->cookie;
553 	queue->Closed();
554 	return B_OK;
555 }
556 
557 
558 static void
559 event_queue_free(file_descriptor* descriptor)
560 {
561 	EventQueue* queue = (EventQueue*)descriptor->cookie;
562 	put_select_sync(queue);
563 }
564 
565 
566 #define GET_QUEUE_FD_OR_RETURN(fd, kernel, descriptor)	\
567 	do {												\
568 		status_t getError = get_queue_descriptor(fd, kernel, descriptor); \
569 		if (getError != B_OK)							\
570 			return getError;							\
571 	} while (false)
572 
573 
574 static struct fd_ops sEventQueueFDOps = {
575 	&event_queue_close,
576 	&event_queue_free
577 };
578 
579 
580 static status_t
581 get_queue_descriptor(int fd, bool kernel, file_descriptor*& descriptor)
582 {
583 	if (fd < 0)
584 		return B_FILE_ERROR;
585 
586 	descriptor = get_fd(get_current_io_context(kernel), fd);
587 	if (descriptor == NULL)
588 		return B_FILE_ERROR;
589 
590 	if (descriptor->ops != &sEventQueueFDOps) {
591 		put_fd(descriptor);
592 		return B_BAD_VALUE;
593 	}
594 
595 	return B_OK;
596 }
597 
598 
599 //	#pragma mark - User syscalls
600 
601 
602 int
603 _user_event_queue_create(int openFlags)
604 {
605 	EventQueue* queue = new(std::nothrow) EventQueue(false);
606 	if (queue == NULL)
607 		return B_NO_MEMORY;
608 
609 	ObjectDeleter<EventQueue> deleter(queue);
610 
611 	file_descriptor* descriptor = alloc_fd();
612 	if (descriptor == NULL)
613 		return B_NO_MEMORY;
614 
615 	descriptor->ops = &sEventQueueFDOps;
616 	descriptor->cookie = (struct event_queue*)queue;
617 	descriptor->open_mode = O_RDWR | openFlags;
618 
619 	io_context* context = get_current_io_context(false);
620 	int fd = new_fd(context, descriptor);
621 	if (fd < 0) {
622 		free(descriptor);
623 		return fd;
624 	}
625 
626 	mutex_lock(&context->io_mutex);
627 	fd_set_close_on_exec(context, fd, (openFlags & O_CLOEXEC) != 0);
628 	mutex_unlock(&context->io_mutex);
629 
630 	deleter.Detach();
631 	return fd;
632 }
633 
634 
635 status_t
636 _user_event_queue_select(int queue, event_wait_info* userInfos, int numInfos)
637 {
638 	if (numInfos <= 0)
639 		return B_BAD_VALUE;
640 	if (userInfos == NULL || !IS_USER_ADDRESS(userInfos))
641 		return B_BAD_ADDRESS;
642 
643 	BStackOrHeapArray<event_wait_info, 16> infos(numInfos);
644 	if (!infos.IsValid())
645 		return B_NO_MEMORY;
646 
647 	file_descriptor* descriptor;
648 	GET_QUEUE_FD_OR_RETURN(queue, false, descriptor);
649 	FileDescriptorPutter _(descriptor);
650 
651 	EventQueue* eventQueue = (EventQueue*)descriptor->cookie;
652 
653 	if (user_memcpy(infos, userInfos, sizeof(event_wait_info) * numInfos) != B_OK)
654 		return B_BAD_ADDRESS;
655 
656 	status_t result = B_OK;
657 
658 	for (int i = 0; i < numInfos; i++) {
659 		status_t error;
660 		if (infos[i].events > 0) {
661 			error = eventQueue->Select(infos[i].object, infos[i].type,
662 				infos[i].events, infos[i].user_data);
663 		} else if (infos[i].events < 0) {
664 			uint32 selectedEvents = 0;
665 			error = eventQueue->Query(infos[i].object, infos[i].type,
666 				&selectedEvents, &infos[i].user_data);
667 			if (error == B_OK) {
668 				infos[i].events = selectedEvents;
669 				error = user_memcpy(&userInfos[i], &infos[i], sizeof(event_wait_info));
670 			}
671 		} else /* == 0 */ {
672 			error = eventQueue->Deselect(infos[i].object, infos[i].type);
673 		}
674 
675 		if (error != B_OK) {
676 			user_memcpy(&userInfos[i].events, &error, sizeof(&userInfos[i].events));
677 			result = B_ERROR;
678 		}
679 	}
680 
681 	return result;
682 }
683 
684 
685 ssize_t
686 _user_event_queue_wait(int queue, event_wait_info* userInfos, int numInfos,
687 	uint32 flags, bigtime_t timeout)
688 {
689 	syscall_restart_handle_timeout_pre(flags, timeout);
690 
691 	if (numInfos < 0)
692 		return B_BAD_VALUE;
693 	if (numInfos > 0 && (userInfos == NULL || !IS_USER_ADDRESS(userInfos)))
694 		return B_BAD_ADDRESS;
695 
696 	BStackOrHeapArray<event_wait_info, 16> infos(numInfos);
697 	if (!infos.IsValid())
698 		return B_NO_MEMORY;
699 
700 	file_descriptor* descriptor;
701 	GET_QUEUE_FD_OR_RETURN(queue, false, descriptor);
702 	FileDescriptorPutter _(descriptor);
703 
704 	EventQueue* eventQueue = (EventQueue*)descriptor->cookie;
705 
706 	ssize_t result = eventQueue->Wait(infos, numInfos, flags, timeout);
707 	if (result < 0)
708 		return syscall_restart_handle_timeout_post(result, timeout);
709 
710 	status_t status = B_OK;
711 	if (numInfos != 0)
712 		status = user_memcpy(userInfos, infos, sizeof(event_wait_info) * numInfos);
713 
714 	return status == B_OK ? result : status;
715 }
716