1 /* 2 * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de. 3 * Distributed under the terms of the MIT License. 4 * 5 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved. 6 * Distributed under the terms of the NewOS License. 7 */ 8 9 10 #include <int.h> 11 12 #include <stdio.h> 13 #include <stdlib.h> 14 #include <string.h> 15 16 #include <arch/debug_console.h> 17 #include <arch/int.h> 18 #include <boot/kernel_args.h> 19 #include <elf.h> 20 #include <util/kqueue.h> 21 #include <smp.h> 22 23 #include "kernel_debug_config.h" 24 25 26 //#define TRACE_INT 27 #ifdef TRACE_INT 28 # define TRACE(x) dprintf x 29 #else 30 # define TRACE(x) ; 31 #endif 32 33 34 struct io_handler { 35 struct io_handler *next; 36 interrupt_handler func; 37 void *data; 38 bool use_enable_counter; 39 bool no_handled_info; 40 #if DEBUG_INTERRUPTS 41 int64 handled_count; 42 #endif 43 }; 44 45 struct io_vector { 46 struct io_handler *handler_list; 47 spinlock vector_lock; 48 int32 enable_count; 49 bool no_lock_vector; 50 #if DEBUG_INTERRUPTS 51 int64 handled_count; 52 int64 unhandled_count; 53 int trigger_count; 54 int ignored_count; 55 #endif 56 }; 57 58 static struct io_vector sVectors[NUM_IO_VECTORS]; 59 60 61 #if DEBUG_INTERRUPTS 62 static int 63 dump_int_statistics(int argc, char **argv) 64 { 65 int i; 66 for (i = 0; i < NUM_IO_VECTORS; i++) { 67 struct io_handler *io; 68 69 if (!B_SPINLOCK_IS_LOCKED(&sVectors[i].vector_lock) 70 && sVectors[i].enable_count == 0 71 && sVectors[i].handled_count == 0 72 && sVectors[i].unhandled_count == 0 73 && sVectors[i].handler_list == NULL) 74 continue; 75 76 kprintf("int %3d, enabled %ld, handled %8lld, unhandled %8lld%s%s\n", 77 i, sVectors[i].enable_count, sVectors[i].handled_count, 78 sVectors[i].unhandled_count, 79 B_SPINLOCK_IS_LOCKED(&sVectors[i].vector_lock) ? ", ACTIVE" : "", 80 sVectors[i].handler_list == NULL ? ", no handler" : ""); 81 82 for (io = sVectors[i].handler_list; io != NULL; io = io->next) { 83 const char *symbol, *imageName; 84 bool exactMatch; 85 86 status_t error = elf_debug_lookup_symbol_address((addr_t)io->func, 87 NULL, &symbol, &imageName, &exactMatch); 88 if (error == B_OK && exactMatch) { 89 if (strchr(imageName, '/') != NULL) 90 imageName = strrchr(imageName, '/') + 1; 91 92 int length = 4 + strlen(imageName); 93 kprintf(" %s:%-*s (%p)", imageName, 45 - length, symbol, 94 io->func); 95 } else 96 kprintf("\t\t\t\t\t func %p", io->func); 97 98 kprintf(", data %p, handled ", io->data); 99 if (io->no_handled_info) 100 kprintf("<unknown>\n"); 101 else 102 kprintf("%8lld\n", io->handled_count); 103 } 104 105 kprintf("\n"); 106 } 107 return 0; 108 } 109 #endif 110 111 112 // #pragma mark - private kernel API 113 114 115 bool 116 interrupts_enabled(void) 117 { 118 return arch_int_are_interrupts_enabled(); 119 } 120 121 122 status_t 123 int_init(kernel_args *args) 124 { 125 TRACE(("init_int_handlers: entry\n")); 126 127 return arch_int_init(args); 128 } 129 130 131 status_t 132 int_init_post_vm(kernel_args *args) 133 { 134 int i; 135 136 /* initialize the vector list */ 137 for (i = 0; i < NUM_IO_VECTORS; i++) { 138 B_INITIALIZE_SPINLOCK(&sVectors[i].vector_lock); 139 sVectors[i].enable_count = 0; 140 sVectors[i].no_lock_vector = false; 141 #if DEBUG_INTERRUPTS 142 sVectors[i].handled_count = 0; 143 sVectors[i].unhandled_count = 0; 144 sVectors[i].trigger_count = 0; 145 sVectors[i].ignored_count = 0; 146 #endif 147 sVectors[i].handler_list = NULL; 148 } 149 150 #if DEBUG_INTERRUPTS 151 add_debugger_command("ints", &dump_int_statistics, 152 "list interrupt statistics"); 153 #endif 154 155 return arch_int_init_post_vm(args); 156 } 157 158 159 status_t 160 int_init_post_device_manager(kernel_args *args) 161 { 162 arch_debug_install_interrupt_handlers(); 163 164 return arch_int_init_post_device_manager(args); 165 } 166 167 168 /*! Actually process an interrupt via the handlers registered for that 169 vector (IRQ). 170 */ 171 int 172 int_io_interrupt_handler(int vector, bool levelTriggered) 173 { 174 int status = B_UNHANDLED_INTERRUPT; 175 struct io_handler *io; 176 bool handled = false; 177 178 if (!sVectors[vector].no_lock_vector) 179 acquire_spinlock(&sVectors[vector].vector_lock); 180 181 #if !DEBUG_INTERRUPTS 182 // The list can be empty at this place 183 if (sVectors[vector].handler_list == NULL) { 184 dprintf("unhandled io interrupt %d\n", vector); 185 if (!sVectors[vector].no_lock_vector) 186 release_spinlock(&sVectors[vector].vector_lock); 187 return B_UNHANDLED_INTERRUPT; 188 } 189 #endif 190 191 // For level-triggered interrupts, we actually handle the return 192 // value (ie. B_HANDLED_INTERRUPT) to decide wether or not we 193 // want to call another interrupt handler. 194 // For edge-triggered interrupts, however, we always need to call 195 // all handlers, as multiple interrupts cannot be identified. We 196 // still make sure the return code of this function will issue 197 // whatever the driver thought would be useful. 198 199 for (io = sVectors[vector].handler_list; io != NULL; io = io->next) { 200 status = io->func(io->data); 201 202 #if DEBUG_INTERRUPTS 203 if (status != B_UNHANDLED_INTERRUPT) 204 io->handled_count++; 205 #endif 206 if (levelTriggered && status != B_UNHANDLED_INTERRUPT) 207 break; 208 209 if (status == B_HANDLED_INTERRUPT || status == B_INVOKE_SCHEDULER) 210 handled = true; 211 } 212 213 #if DEBUG_INTERRUPTS 214 sVectors[vector].trigger_count++; 215 if (status != B_UNHANDLED_INTERRUPT || handled) { 216 sVectors[vector].handled_count++; 217 } else { 218 sVectors[vector].unhandled_count++; 219 sVectors[vector].ignored_count++; 220 } 221 222 if (sVectors[vector].trigger_count > 10000) { 223 if (sVectors[vector].ignored_count > 9900) { 224 struct io_handler *last = sVectors[vector].handler_list; 225 while (last && last->next) 226 last = last->next; 227 228 if (last != NULL && last->no_handled_info) { 229 // we have an interrupt handler installed that does not 230 // know whether or not it has actually handled the interrupt, 231 // so this unhandled count is inaccurate and we can't just 232 // disable 233 } else { 234 if (sVectors[vector].handler_list == NULL 235 || sVectors[vector].handler_list->next == NULL) { 236 // this interrupt vector is not shared, disable it 237 sVectors[vector].enable_count = -100; 238 arch_int_disable_io_interrupt(vector); 239 dprintf("Disabling unhandled io interrupt %d\n", vector); 240 } else { 241 // this is a shared interrupt vector, we cannot just disable it 242 dprintf("More than 99%% interrupts of vector %d are unhandled\n", 243 vector); 244 } 245 } 246 } 247 248 sVectors[vector].trigger_count = 0; 249 sVectors[vector].ignored_count = 0; 250 } 251 #endif 252 253 if (!sVectors[vector].no_lock_vector) 254 release_spinlock(&sVectors[vector].vector_lock); 255 256 if (levelTriggered) 257 return status; 258 259 // edge triggered return value 260 261 if (handled) 262 return B_HANDLED_INTERRUPT; 263 264 return B_UNHANDLED_INTERRUPT; 265 } 266 267 268 // #pragma mark - public API 269 270 271 #undef disable_interrupts 272 #undef restore_interrupts 273 274 275 cpu_status 276 disable_interrupts(void) 277 { 278 return arch_int_disable_interrupts(); 279 } 280 281 282 void 283 restore_interrupts(cpu_status status) 284 { 285 arch_int_restore_interrupts(status); 286 } 287 288 289 /*! Install a handler to be called when an interrupt is triggered 290 for the given interrupt number with \a data as the argument. 291 */ 292 status_t 293 install_io_interrupt_handler(long vector, interrupt_handler handler, void *data, 294 ulong flags) 295 { 296 struct io_handler *io = NULL; 297 cpu_status state; 298 299 if (vector < 0 || vector >= NUM_IO_VECTORS) 300 return B_BAD_VALUE; 301 302 io = (struct io_handler *)malloc(sizeof(struct io_handler)); 303 if (io == NULL) 304 return B_NO_MEMORY; 305 306 arch_debug_remove_interrupt_handler(vector); 307 // There might be a temporary debug interrupt installed on this 308 // vector that should be removed now. 309 310 io->func = handler; 311 io->data = data; 312 io->use_enable_counter = (flags & B_NO_ENABLE_COUNTER) == 0; 313 io->no_handled_info = (flags & B_NO_HANDLED_INFO) != 0; 314 #if DEBUG_INTERRUPTS 315 io->handled_count = 0LL; 316 #endif 317 318 // Disable the interrupts, get the spinlock for this irq only 319 // and then insert the handler 320 state = disable_interrupts(); 321 acquire_spinlock(&sVectors[vector].vector_lock); 322 323 if ((flags & B_NO_HANDLED_INFO) != 0 324 && sVectors[vector].handler_list != NULL) { 325 // The driver registering this interrupt handler doesn't know 326 // whether or not it actually handled the interrupt after the 327 // handler returns. This is incompatible with shared interrupts 328 // as we'd potentially steal interrupts from other handlers 329 // resulting in interrupt storms. Therefore we enqueue this interrupt 330 // handler as the very last one, meaning all other handlers will 331 // get their go at any interrupt first. 332 struct io_handler *last = sVectors[vector].handler_list; 333 while (last->next) 334 last = last->next; 335 336 io->next = NULL; 337 last->next = io; 338 } else { 339 // A normal interrupt handler, just add it to the head of the list. 340 io->next = sVectors[vector].handler_list; 341 sVectors[vector].handler_list = io; 342 } 343 344 // If B_NO_ENABLE_COUNTER is set, we're being asked to not alter 345 // whether the interrupt should be enabled or not 346 if (io->use_enable_counter) { 347 if (sVectors[vector].enable_count++ == 0) 348 arch_int_enable_io_interrupt(vector); 349 } 350 351 // If B_NO_LOCK_VECTOR is specified this is a vector that is not supposed 352 // to have multiple handlers and does not require locking of the vector 353 // when entering the handler. For example this is used by internally 354 // registered interrupt handlers like for handling local APIC interrupts 355 // that may run concurently on multiple CPUs. Locking with a spinlock 356 // would in that case defeat the purpose as it would serialize calling the 357 // handlers in parallel on different CPUs. 358 if (flags & B_NO_LOCK_VECTOR) 359 sVectors[vector].no_lock_vector = true; 360 361 release_spinlock(&sVectors[vector].vector_lock); 362 restore_interrupts(state); 363 364 return B_OK; 365 } 366 367 368 /*! Remove a previously installed interrupt handler */ 369 status_t 370 remove_io_interrupt_handler(long vector, interrupt_handler handler, void *data) 371 { 372 status_t status = B_BAD_VALUE; 373 struct io_handler *io = NULL; 374 struct io_handler *last = NULL; 375 cpu_status state; 376 377 if (vector < 0 || vector >= NUM_IO_VECTORS) 378 return B_BAD_VALUE; 379 380 /* lock the structures down so it is not modified while we search */ 381 state = disable_interrupts(); 382 acquire_spinlock(&sVectors[vector].vector_lock); 383 384 /* loop through the available handlers and try to find a match. 385 * We go forward through the list but this means we start with the 386 * most recently added handlers. 387 */ 388 for (io = sVectors[vector].handler_list; io != NULL; io = io->next) { 389 /* we have to match both function and data */ 390 if (io->func == handler && io->data == data) { 391 if (last != NULL) 392 last->next = io->next; 393 else 394 sVectors[vector].handler_list = io->next; 395 396 // Check if we need to disable the interrupt 397 if (io->use_enable_counter && --sVectors[vector].enable_count == 0) 398 arch_int_disable_io_interrupt(vector); 399 400 status = B_OK; 401 break; 402 } 403 404 last = io; 405 } 406 407 release_spinlock(&sVectors[vector].vector_lock); 408 restore_interrupts(state); 409 410 // if the handler could be found and removed, we still have to free it 411 if (status == B_OK) 412 free(io); 413 414 return status; 415 } 416 417