1 /* Deferred Procedure Call support kernel module 2 * 3 * Copyright 2007, Haiku, Inc. All Rights Reserved. 4 * Distributed under the terms of the MIT License. 5 * 6 * Authors: 7 * Philippe Houdoin, philippe.houdoin@free.fr 8 */ 9 10 #include <KernelExport.h> 11 #include <stdlib.h> 12 13 #ifndef COMPILE_FOR_R5 14 #include <stdio.h> // For snprintf() 15 #endif 16 17 #include <dpc.h> 18 19 // Private DPC queue structures 20 typedef struct { 21 dpc_func function; 22 void *arg; 23 } dpc_slot; 24 25 26 typedef struct { 27 thread_id thread; 28 sem_id wakeup_sem; 29 spinlock lock; 30 int size; 31 int count; 32 int head; 33 int tail; 34 dpc_slot slots[0]; 35 // size * slots follow 36 } dpc_queue; 37 38 39 static int32 40 dpc_thread(void *arg) 41 { 42 dpc_queue *queue = arg; 43 dpc_slot dpc; 44 45 // Let's wait forever/until semaphore death for new DPC slot to show up 46 while (acquire_sem(queue->wakeup_sem) == B_OK) { 47 cpu_status former; 48 49 // grab the next dpc slot 50 former = disable_interrupts(); 51 acquire_spinlock(&queue->lock); 52 53 dpc = queue->slots[queue->head]; 54 queue->head = (queue->head++) % queue->size; 55 queue->count--; 56 57 release_spinlock(&queue->lock); 58 restore_interrupts(former); 59 60 dpc.function(dpc.arg); 61 } 62 63 // Let's finish the pending DPCs, if any. 64 // Otherwise, resource could leaks... 65 while (queue->count--) { 66 dpc = queue->slots[queue->head]; 67 queue->head = (queue->head++) % queue->size; 68 dpc.function(dpc.arg); 69 } 70 71 // Now, let's die quietly, ignored by all... sigh. 72 return 0; 73 } 74 75 // ---- Public API 76 77 static void * 78 new_dpc_queue(const char *name, long priority, int queue_size) 79 { 80 char str[64]; 81 dpc_queue *queue; 82 83 queue = malloc(sizeof(dpc_queue) + queue_size * sizeof(dpc_slot)); 84 if (!queue) 85 return NULL; 86 87 queue->head = queue->tail = 0; 88 queue->size = queue_size; 89 queue->count = 0; 90 queue->lock = 0; // Init the spinlock 91 92 #ifdef COMPILE_FOR_R5 93 strncpy(str, name, sizeof(str) - 1); 94 strncat(str, "_wakeup_sem", sizeof(str) - 1); 95 str[sizeof(str) - 1] = '\0'; 96 #else 97 snprintf(str, sizeof(str), "%.*s_wakeup_sem", (int) sizeof(str) - 11, name); 98 #endif 99 100 queue->wakeup_sem = create_sem(0, str); 101 if (queue->wakeup_sem < B_OK) { 102 free(queue); 103 return NULL; 104 } 105 set_sem_owner(queue->wakeup_sem, B_SYSTEM_TEAM); 106 107 // Fire a kernel thread to actually handle (aka call them!) 108 // the queued/deferred procedure calls 109 queue->thread = spawn_kernel_thread(dpc_thread, name, priority, queue); 110 if (queue->thread < 0) { 111 delete_sem(queue->wakeup_sem); 112 free(queue); 113 return NULL; 114 } 115 resume_thread(queue->thread); 116 117 return queue; 118 } 119 120 121 static status_t 122 delete_dpc_queue(void *handle) 123 { 124 dpc_queue *queue = handle; 125 thread_id thread; 126 status_t exit_value; 127 cpu_status former; 128 129 if (!queue) 130 return B_BAD_VALUE; 131 132 // Close the queue: queue_dpc() should knows we're closing: 133 former = disable_interrupts(); 134 acquire_spinlock(&queue->lock); 135 136 thread = queue->thread; 137 queue->thread = -1; 138 139 release_spinlock(&queue->lock); 140 restore_interrupts(former); 141 142 // Wakeup the thread by murdering its favorite semaphore 143 delete_sem(queue->wakeup_sem); 144 wait_for_thread(thread, &exit_value); 145 146 free(queue); 147 148 return B_OK; 149 } 150 151 152 static status_t 153 queue_dpc(void *handle, dpc_func function, void *arg) 154 { 155 dpc_queue *queue = handle; 156 cpu_status former; 157 status_t status = B_OK; 158 159 if (!queue || !function) 160 return B_BAD_VALUE; 161 162 // Try to be safe being called from interrupt handlers: 163 former = disable_interrupts(); 164 acquire_spinlock(&queue->lock); 165 166 if (queue->thread < 0) { 167 // Queue thread is dying... 168 status = B_CANCELED; 169 } else if (queue->count == queue->size) 170 // This DPC queue is full, sorry 171 status = B_NO_MEMORY; 172 else { 173 queue->slots[queue->tail].function = function; 174 queue->slots[queue->tail].arg = arg; 175 queue->tail = (queue->tail++) % queue->size; 176 queue->count++; 177 } 178 179 release_spinlock(&queue->lock); 180 restore_interrupts(former); 181 182 if (status == B_OK) 183 // Wake up the corresponding dpc thead 184 // Notice that interrupt handlers should returns B_INVOKE_SCHEDULER to 185 // shorten DPC latency as much as possible... 186 status = release_sem_etc(queue->wakeup_sem, 1, B_DO_NOT_RESCHEDULE); 187 188 return status; 189 } 190 191 192 static status_t 193 std_ops(int32 op, ...) 194 { 195 switch (op) { 196 case B_MODULE_INIT: 197 return B_OK; 198 case B_MODULE_UNINIT: 199 return B_OK; 200 201 default: 202 return B_ERROR; 203 } 204 } 205 206 static dpc_module_info sDPCModule = { 207 { 208 B_DPC_MODULE_NAME, 209 0, 210 std_ops 211 }, 212 213 new_dpc_queue, 214 delete_dpc_queue, 215 queue_dpc 216 }; 217 218 219 module_info *modules[] = { 220 (module_info *) &sDPCModule, 221 NULL 222 }; 223 224