xref: /haiku/src/add-ons/kernel/generic/dpc/dpc.c (revision 13581b3d2a71545960b98fefebc5225b5bf29072)
1 /*
2  * Copyright 2007-2010, Haiku, Inc. All Rights Reserved.
3  * Distributed under the terms of the MIT License.
4  *
5  * Authors:
6  *		Philippe Houdoin, philippe.houdoin@free.fr
7  */
8 
9 
10 //!	Deferred Procedure Call support kernel module
11 
12 
13 #include <KernelExport.h>
14 
15 #include <stdio.h>
16 #include <stdlib.h>
17 
18 #include <dpc.h>
19 
20 
21 // Private DPC queue structures
22 typedef struct {
23 	dpc_func 	function;
24 	void 		*arg;
25 } dpc_slot;
26 
27 
28 typedef struct {
29 	thread_id	thread;
30 	sem_id		wakeup_sem;
31 	spinlock	lock;
32 	int 		size;
33 	int			count;
34 	int			head;
35 	int 		tail;
36 	dpc_slot	slots[0];
37 	// size * slots follow
38 } dpc_queue;
39 
40 #define DPC_QUEUE_SIZE 64
41 
42 static int32
43 dpc_thread(void *arg)
44 {
45 	dpc_queue *queue = arg;
46 	dpc_slot dpc;
47 
48 	// Let's wait forever/until semaphore death for new DPC slot to show up
49 	while (acquire_sem(queue->wakeup_sem) == B_OK) {
50 		cpu_status former;
51 
52 		// grab the next dpc slot
53 		former = disable_interrupts();
54 		acquire_spinlock(&queue->lock);
55 
56 		dpc = queue->slots[queue->head];
57 		queue->head = (queue->head + 1) % queue->size;
58 		queue->count--;
59 
60 		release_spinlock(&queue->lock);
61 		restore_interrupts(former);
62 
63 		dpc.function(dpc.arg);
64 	}
65 
66 	// Let's finish the pending DPCs, if any.
67 	// Otherwise, resource could leak...
68 	while (queue->count--) {
69 		dpc = queue->slots[queue->head];
70 		queue->head = (queue->head + 1) % queue->size;
71 		dpc.function(dpc.arg);
72 	}
73 
74 	// Now, let's die quietly, ignored by all... sigh.
75 	return 0;
76 }
77 
78 
79 // #pragma mark - public API
80 
81 
82 static status_t
83 new_dpc_queue(void **handle, const char *name, int32 priority)
84 {
85 	char str[64];
86 	dpc_queue *queue;
87 
88 	if (!handle)
89 		return B_BAD_VALUE;
90 
91 	queue = malloc(sizeof(dpc_queue) + DPC_QUEUE_SIZE * sizeof(dpc_slot));
92 	if (!queue)
93 		return B_NO_MEMORY;
94 
95 	queue->head = queue->tail = 0;
96 	queue->size = DPC_QUEUE_SIZE;
97 	queue->count = 0;
98 	B_INITIALIZE_SPINLOCK(&queue->lock);	// Init the spinlock
99 
100 	snprintf(str, sizeof(str), "%.*s_wakeup_sem",
101 		(int) sizeof(str) - 11, name);
102 
103 	queue->wakeup_sem = create_sem(0, str);
104 	if (queue->wakeup_sem < B_OK) {
105 		status_t status = queue->wakeup_sem;
106 		free(queue);
107 		return status;
108 	}
109 
110 	// Fire a kernel thread to actually handle (aka call them!)
111 	// the queued/deferred procedure calls
112 	queue->thread = spawn_kernel_thread(dpc_thread, name, priority, queue);
113 	if (queue->thread < 0) {
114 		status_t status = queue->thread;
115 		delete_sem(queue->wakeup_sem);
116 		free(queue);
117 		return status;
118 	}
119 	resume_thread(queue->thread);
120 
121 	*handle = queue;
122 
123 	return B_OK;
124 }
125 
126 
127 static status_t
128 delete_dpc_queue(void *handle)
129 {
130 	dpc_queue *queue = handle;
131 	thread_id thread;
132 	status_t exit_value;
133 	cpu_status former;
134 
135 	if (!queue)
136 		return B_BAD_VALUE;
137 
138 	// Close the queue: queue_dpc() should know we're closing:
139 	former = disable_interrupts();
140 	acquire_spinlock(&queue->lock);
141 
142 	thread = queue->thread;
143 	queue->thread = -1;
144 
145 	release_spinlock(&queue->lock);
146 	restore_interrupts(former);
147 
148 	// Wakeup the thread by murdering its favorite semaphore
149 	delete_sem(queue->wakeup_sem);
150 	wait_for_thread(thread, &exit_value);
151 
152 	free(queue);
153 
154 	return B_OK;
155 }
156 
157 
158 static status_t
159 queue_dpc(void *handle, dpc_func function, void *arg)
160 {
161 	dpc_queue *queue = handle;
162 	cpu_status former;
163 	status_t status = B_OK;
164 
165 	if (!queue || !function)
166 		return B_BAD_VALUE;
167 
168 	// Try to be safe being called from interrupt handlers:
169 	former = disable_interrupts();
170 	acquire_spinlock(&queue->lock);
171 
172 	if (queue->thread < 0) {
173 		// Queue thread is dying...
174 		status = B_CANCELED;
175 	} else if (queue->count == queue->size)
176 		// This DPC queue is full, sorry
177 		status = B_NO_MEMORY;
178 	else {
179 		queue->slots[queue->tail].function = function;
180 		queue->slots[queue->tail].arg      = arg;
181 		queue->tail = (queue->tail + 1) % queue->size;
182 		queue->count++;
183 	}
184 
185 	release_spinlock(&queue->lock);
186 	restore_interrupts(former);
187 
188 	if (status == B_OK)
189 		// Wake up the corresponding dpc thread
190 		// Notice that interrupt handlers should return B_INVOKE_SCHEDULER to
191 		// shorten DPC latency as much as possible...
192 		status = release_sem_etc(queue->wakeup_sem, 1, B_DO_NOT_RESCHEDULE);
193 
194 	return status;
195 }
196 
197 
198 static status_t
199 std_ops(int32 op, ...)
200 {
201 	switch (op) {
202 		case B_MODULE_INIT:
203 			return B_OK;
204 		case B_MODULE_UNINIT:
205 			return B_OK;
206 
207 		default:
208 			return B_ERROR;
209 	}
210 }
211 
212 
213 static dpc_module_info sDPCModule = {
214 	{
215 		B_DPC_MODULE_NAME,
216 		0,
217 		std_ops
218 	},
219 
220 	new_dpc_queue,
221 	delete_dpc_queue,
222 	queue_dpc
223 };
224 
225 
226 module_info *modules[] = {
227 	(module_info *) &sDPCModule,
228 	NULL
229 };
230 
231