xref: /haiku/src/system/kernel/int.cpp (revision 52f7c9389475e19fc21487b38064b4390eeb6fea)
1 /*
2  * Copyright 2013, Paweł Dziepak, pdziepak@quarnos.org.
3  * Distributed under the terms of the MIT License.
4 
5  * Copyright 2011, Michael Lotz, mmlr@mlotz.ch.
6  * Distributed under the terms of the MIT License.
7  *
8  * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
9  * Distributed under the terms of the MIT License.
10  *
11  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
12  * Distributed under the terms of the NewOS License.
13  */
14 
15 
16 #include <int.h>
17 
18 #include <stdio.h>
19 #include <stdlib.h>
20 #include <string.h>
21 
22 #include <arch/debug_console.h>
23 #include <arch/int.h>
24 #include <boot/kernel_args.h>
25 #include <elf.h>
26 #include <load_tracking.h>
27 #include <util/AutoLock.h>
28 #include <util/kqueue.h>
29 #include <smp.h>
30 
31 #include "kernel_debug_config.h"
32 
33 
34 //#define TRACE_INT
35 #ifdef TRACE_INT
36 #	define TRACE(x) dprintf x
37 #else
38 #	define TRACE(x) ;
39 #endif
40 
41 
42 struct io_handler {
43 	struct io_handler	*next;
44 	interrupt_handler	func;
45 	void				*data;
46 	bool				use_enable_counter;
47 	bool				no_handled_info;
48 #if DEBUG_INTERRUPTS
49 	int64				handled_count;
50 #endif
51 };
52 
53 struct io_vector {
54 	struct io_handler	*handler_list;
55 	spinlock			vector_lock;
56 	int32				enable_count;
57 	bool				no_lock_vector;
58 	interrupt_type		type;
59 
60 	spinlock			load_lock;
61 	bigtime_t			last_measure_time;
62 	bigtime_t			last_measure_active;
63 	int32				load;
64 
65 	irq_assignment*		assigned_cpu;
66 
67 #if DEBUG_INTERRUPTS
68 	int64				handled_count;
69 	int64				unhandled_count;
70 	int					trigger_count;
71 	int					ignored_count;
72 #endif
73 };
74 
75 static int32 sLastCPU;
76 
77 static io_vector sVectors[NUM_IO_VECTORS];
78 static bool sAllocatedIOInterruptVectors[NUM_IO_VECTORS];
79 static irq_assignment sVectorCPUAssignments[NUM_IO_VECTORS];
80 static mutex sIOInterruptVectorAllocationLock
81 	= MUTEX_INITIALIZER("io_interrupt_vector_allocation");
82 
83 
84 #if DEBUG_INTERRUPTS
85 static int
86 dump_int_statistics(int argc, char **argv)
87 {
88 	int i;
89 	for (i = 0; i < NUM_IO_VECTORS; i++) {
90 		struct io_handler *io;
91 
92 		if (!B_SPINLOCK_IS_LOCKED(&sVectors[i].vector_lock)
93 			&& sVectors[i].enable_count == 0
94 			&& sVectors[i].handled_count == 0
95 			&& sVectors[i].unhandled_count == 0
96 			&& sVectors[i].handler_list == NULL)
97 			continue;
98 
99 		kprintf("int %3d, enabled %" B_PRId32 ", handled %8" B_PRId64 ", "
100 			"unhandled %8" B_PRId64 "%s%s\n", i, sVectors[i].enable_count,
101 			sVectors[i].handled_count,sVectors[i].unhandled_count,
102 			B_SPINLOCK_IS_LOCKED(&sVectors[i].vector_lock) ? ", ACTIVE" : "",
103 			sVectors[i].handler_list == NULL ? ", no handler" : "");
104 
105 		for (io = sVectors[i].handler_list; io != NULL; io = io->next) {
106 			const char *symbol, *imageName;
107 			bool exactMatch;
108 
109 			status_t error = elf_debug_lookup_symbol_address((addr_t)io->func,
110 				NULL, &symbol, &imageName, &exactMatch);
111 			if (error == B_OK && exactMatch) {
112 				if (strchr(imageName, '/') != NULL)
113 					imageName = strrchr(imageName, '/') + 1;
114 
115 				int length = 4 + strlen(imageName);
116 				kprintf("   %s:%-*s (%p)", imageName, 45 - length, symbol,
117 					io->func);
118 			} else
119 				kprintf("\t\t\t\t\t   func %p", io->func);
120 
121 			kprintf(", data %p, handled ", io->data);
122 			if (io->no_handled_info)
123 				kprintf("<unknown>\n");
124 			else
125 				kprintf("%8" B_PRId64 "\n", io->handled_count);
126 		}
127 
128 		kprintf("\n");
129 	}
130 	return 0;
131 }
132 #endif
133 
134 
135 static int
136 dump_int_load(int argc, char** argv)
137 {
138 	static const char* typeNames[]
139 		= { "exception", "irq", "local irq", "syscall", "ici", "unknown" };
140 
141 	for (int i = 0; i < NUM_IO_VECTORS; i++) {
142 		if (!B_SPINLOCK_IS_LOCKED(&sVectors[i].vector_lock)
143 			&& sVectors[i].handler_list == NULL
144 			&& sVectors[i].enable_count == 0)
145 			continue;
146 
147 		kprintf("int %3d, type %s, enabled %" B_PRId32 ", load %" B_PRId32
148 			"%%", i, typeNames[min_c(sVectors[i].type,
149 					INTERRUPT_TYPE_UNKNOWN)],
150 			sVectors[i].enable_count,
151 			sVectors[i].assigned_cpu != NULL
152 				? sVectors[i].assigned_cpu->load / 10 : 0);
153 
154 		if (sVectors[i].type == INTERRUPT_TYPE_IRQ) {
155 			ASSERT(sVectors[i].assigned_cpu != NULL);
156 
157 			if (sVectors[i].assigned_cpu->cpu != -1)
158 				kprintf(", cpu %" B_PRId32, sVectors[i].assigned_cpu->cpu);
159 			else
160 				kprintf(", cpu -");
161 		}
162 
163 		if (B_SPINLOCK_IS_LOCKED(&sVectors[i].vector_lock))
164 			kprintf(", ACTIVE");
165 		kprintf("\n");
166 	}
167 
168 	return 0;
169 }
170 
171 
172 //	#pragma mark - private kernel API
173 
174 
175 bool
176 interrupts_enabled(void)
177 {
178 	return arch_int_are_interrupts_enabled();
179 }
180 
181 
182 status_t
183 int_init(kernel_args* args)
184 {
185 	TRACE(("init_int_handlers: entry\n"));
186 
187 	return arch_int_init(args);
188 }
189 
190 
191 status_t
192 int_init_post_vm(kernel_args* args)
193 {
194 	int i;
195 
196 	/* initialize the vector list */
197 	for (i = 0; i < NUM_IO_VECTORS; i++) {
198 		B_INITIALIZE_SPINLOCK(&sVectors[i].vector_lock);
199 		sVectors[i].enable_count = 0;
200 		sVectors[i].no_lock_vector = false;
201 		sVectors[i].type = INTERRUPT_TYPE_UNKNOWN;
202 
203 		B_INITIALIZE_SPINLOCK(&sVectors[i].load_lock);
204 		sVectors[i].last_measure_time = 0;
205 		sVectors[i].last_measure_active = 0;
206 		sVectors[i].load = 0;
207 
208 #if DEBUG_INTERRUPTS
209 		sVectors[i].handled_count = 0;
210 		sVectors[i].unhandled_count = 0;
211 		sVectors[i].trigger_count = 0;
212 		sVectors[i].ignored_count = 0;
213 #endif
214 		sVectors[i].handler_list = NULL;
215 
216 		sVectorCPUAssignments[i].irq = i;
217 		sVectorCPUAssignments[i].count = 1;
218 		sVectorCPUAssignments[i].handlers_count = 0;
219 		sVectorCPUAssignments[i].load = 0;
220 		sVectorCPUAssignments[i].cpu = -1;
221 	}
222 
223 #if DEBUG_INTERRUPTS
224 	add_debugger_command("ints", &dump_int_statistics,
225 		"list interrupt statistics");
226 #endif
227 
228 	add_debugger_command("int_load", &dump_int_load,
229 		"list interrupt usage statistics");
230 
231 	return arch_int_init_post_vm(args);
232 }
233 
234 
235 status_t
236 int_init_io(kernel_args* args)
237 {
238 	return arch_int_init_io(args);
239 }
240 
241 
242 status_t
243 int_init_post_device_manager(kernel_args* args)
244 {
245 	arch_debug_install_interrupt_handlers();
246 
247 	return arch_int_init_post_device_manager(args);
248 }
249 
250 
251 static void
252 update_int_load(int i)
253 {
254 	if (!try_acquire_spinlock(&sVectors[i].load_lock))
255 		return;
256 
257 	int32 oldLoad = sVectors[i].load;
258 	compute_load(sVectors[i].last_measure_time, sVectors[i].last_measure_active,
259 		sVectors[i].load, system_time());
260 
261 	if (oldLoad != sVectors[i].load)
262 		atomic_add(&sVectors[i].assigned_cpu->load, sVectors[i].load - oldLoad);
263 
264 	release_spinlock(&sVectors[i].load_lock);
265 }
266 
267 
268 /*!	Actually process an interrupt via the handlers registered for that
269 	vector (IRQ).
270 */
271 int
272 int_io_interrupt_handler(int vector, bool levelTriggered)
273 {
274 	int status = B_UNHANDLED_INTERRUPT;
275 	struct io_handler* io;
276 	bool handled = false;
277 
278 	bigtime_t start = system_time();
279 
280 	// exceptions and syscalls have their own handlers
281 	ASSERT(sVectors[vector].type != INTERRUPT_TYPE_EXCEPTION
282 		&& sVectors[vector].type != INTERRUPT_TYPE_SYSCALL);
283 
284 	if (!sVectors[vector].no_lock_vector)
285 		acquire_spinlock(&sVectors[vector].vector_lock);
286 
287 #if !DEBUG_INTERRUPTS
288 	// The list can be empty at this place
289 	if (sVectors[vector].handler_list == NULL) {
290 		dprintf("unhandled io interrupt %d\n", vector);
291 		if (!sVectors[vector].no_lock_vector)
292 			release_spinlock(&sVectors[vector].vector_lock);
293 		return B_UNHANDLED_INTERRUPT;
294 	}
295 #endif
296 
297 	// For level-triggered interrupts, we actually handle the return
298 	// value (ie. B_HANDLED_INTERRUPT) to decide whether or not we
299 	// want to call another interrupt handler.
300 	// For edge-triggered interrupts, however, we always need to call
301 	// all handlers, as multiple interrupts cannot be identified. We
302 	// still make sure the return code of this function will issue
303 	// whatever the driver thought would be useful.
304 
305 	for (io = sVectors[vector].handler_list; io != NULL; io = io->next) {
306 		status = io->func(io->data);
307 
308 #if DEBUG_INTERRUPTS
309 		if (status != B_UNHANDLED_INTERRUPT)
310 			io->handled_count++;
311 #endif
312 		if (levelTriggered && status != B_UNHANDLED_INTERRUPT)
313 			break;
314 
315 		if (status == B_HANDLED_INTERRUPT || status == B_INVOKE_SCHEDULER)
316 			handled = true;
317 	}
318 
319 #if DEBUG_INTERRUPTS
320 	sVectors[vector].trigger_count++;
321 	if (status != B_UNHANDLED_INTERRUPT || handled) {
322 		sVectors[vector].handled_count++;
323 	} else {
324 		sVectors[vector].unhandled_count++;
325 		sVectors[vector].ignored_count++;
326 	}
327 
328 	if (sVectors[vector].trigger_count > 10000) {
329 		if (sVectors[vector].ignored_count > 9900) {
330 			struct io_handler *last = sVectors[vector].handler_list;
331 			while (last && last->next)
332 				last = last->next;
333 
334 			if (last != NULL && last->no_handled_info) {
335 				// we have an interrupt handler installed that does not
336 				// know whether or not it has actually handled the interrupt,
337 				// so this unhandled count is inaccurate and we can't just
338 				// disable
339 			} else {
340 				if (sVectors[vector].handler_list == NULL
341 					|| sVectors[vector].handler_list->next == NULL) {
342 					// this interrupt vector is not shared, disable it
343 					sVectors[vector].enable_count = -100;
344 					arch_int_disable_io_interrupt(vector);
345 					dprintf("Disabling unhandled io interrupt %d\n", vector);
346 				} else {
347 					// this is a shared interrupt vector, we cannot just disable it
348 					dprintf("More than 99%% interrupts of vector %d are unhandled\n",
349 						vector);
350 				}
351 			}
352 		}
353 
354 		sVectors[vector].trigger_count = 0;
355 		sVectors[vector].ignored_count = 0;
356 	}
357 #endif
358 
359 	if (!sVectors[vector].no_lock_vector)
360 		release_spinlock(&sVectors[vector].vector_lock);
361 
362 	SpinLocker vectorLocker(sVectors[vector].load_lock);
363 	bigtime_t deltaTime = system_time() - start;
364 	sVectors[vector].last_measure_active += deltaTime;
365 	vectorLocker.Unlock();
366 
367 	cpu_ent* cpu = get_cpu_struct();
368 	if (sVectors[vector].type == INTERRUPT_TYPE_IRQ
369 		|| sVectors[vector].type == INTERRUPT_TYPE_ICI
370 		|| sVectors[vector].type == INTERRUPT_TYPE_LOCAL_IRQ) {
371 		cpu->interrupt_time += deltaTime;
372 		if (sVectors[vector].type == INTERRUPT_TYPE_IRQ)
373 			cpu->irq_time += deltaTime;
374 	}
375 
376 	update_int_load(vector);
377 
378 	if (levelTriggered)
379 		return status;
380 
381 	// edge triggered return value
382 
383 	if (handled)
384 		return B_HANDLED_INTERRUPT;
385 
386 	return B_UNHANDLED_INTERRUPT;
387 }
388 
389 
390 //	#pragma mark - public API
391 
392 
393 #undef disable_interrupts
394 #undef restore_interrupts
395 
396 
397 cpu_status
398 disable_interrupts(void)
399 {
400 	return arch_int_disable_interrupts();
401 }
402 
403 
404 void
405 restore_interrupts(cpu_status status)
406 {
407 	arch_int_restore_interrupts(status);
408 }
409 
410 
411 static
412 uint32 assign_cpu(void)
413 {
414 	const cpu_topology_node* node;
415 	do {
416 		int32 nextID = atomic_add(&sLastCPU, 1);
417 		node = get_cpu_topology();
418 
419 		while (node->level != CPU_TOPOLOGY_SMT) {
420 			int levelSize = node->children_count;
421 			node = node->children[nextID % levelSize];
422 			nextID /= levelSize;
423 		}
424 	} while (gCPU[node->id].disabled);
425 
426 	return node->id;
427 }
428 
429 
430 /*!	Install a handler to be called when an interrupt is triggered
431 	for the given interrupt number with \a data as the argument.
432 */
433 status_t
434 install_io_interrupt_handler(long vector, interrupt_handler handler, void *data,
435 	ulong flags)
436 {
437 	struct io_handler *io = NULL;
438 	cpu_status state;
439 
440 	if (vector < 0 || vector >= NUM_IO_VECTORS)
441 		return B_BAD_VALUE;
442 
443 	io = (struct io_handler *)malloc(sizeof(struct io_handler));
444 	if (io == NULL)
445 		return B_NO_MEMORY;
446 
447 	arch_debug_remove_interrupt_handler(vector);
448 		// There might be a temporary debug interrupt installed on this
449 		// vector that should be removed now.
450 
451 	io->func = handler;
452 	io->data = data;
453 	io->use_enable_counter = (flags & B_NO_ENABLE_COUNTER) == 0;
454 	io->no_handled_info = (flags & B_NO_HANDLED_INFO) != 0;
455 #if DEBUG_INTERRUPTS
456 	io->handled_count = 0LL;
457 #endif
458 
459 	// Disable the interrupts, get the spinlock for this irq only
460 	// and then insert the handler
461 	state = disable_interrupts();
462 	acquire_spinlock(&sVectors[vector].vector_lock);
463 
464 	// Initial attempt to balance IRQs, the scheduler will correct this
465 	// if some cores end up being overloaded.
466 	if (sVectors[vector].type == INTERRUPT_TYPE_IRQ
467 		&& sVectors[vector].handler_list == NULL
468 		&& sVectors[vector].assigned_cpu->cpu == -1) {
469 
470 		int32 cpuID = assign_cpu();
471 		cpuID = arch_int_assign_to_cpu(vector, cpuID);
472 		sVectors[vector].assigned_cpu->cpu = cpuID;
473 
474 		cpu_ent* cpu = &gCPU[cpuID];
475 		SpinLocker _(cpu->irqs_lock);
476 		atomic_add(&sVectors[vector].assigned_cpu->handlers_count, 1);
477 		list_add_item(&cpu->irqs, sVectors[vector].assigned_cpu);
478 	}
479 
480 	if ((flags & B_NO_HANDLED_INFO) != 0
481 		&& sVectors[vector].handler_list != NULL) {
482 		// The driver registering this interrupt handler doesn't know
483 		// whether or not it actually handled the interrupt after the
484 		// handler returns. This is incompatible with shared interrupts
485 		// as we'd potentially steal interrupts from other handlers
486 		// resulting in interrupt storms. Therefore we enqueue this interrupt
487 		// handler as the very last one, meaning all other handlers will
488 		// get their go at any interrupt first.
489 		struct io_handler *last = sVectors[vector].handler_list;
490 		while (last->next)
491 			last = last->next;
492 
493 		io->next = NULL;
494 		last->next = io;
495 	} else {
496 		// A normal interrupt handler, just add it to the head of the list.
497 		io->next = sVectors[vector].handler_list;
498 		sVectors[vector].handler_list = io;
499 	}
500 
501 	// If B_NO_ENABLE_COUNTER is set, we're being asked to not alter
502 	// whether the interrupt should be enabled or not
503 	if (io->use_enable_counter) {
504 		if (sVectors[vector].enable_count++ == 0)
505 			arch_int_enable_io_interrupt(vector);
506 	}
507 
508 	// If B_NO_LOCK_VECTOR is specified this is a vector that is not supposed
509 	// to have multiple handlers and does not require locking of the vector
510 	// when entering the handler. For example this is used by internally
511 	// registered interrupt handlers like for handling local APIC interrupts
512 	// that may run concurently on multiple CPUs. Locking with a spinlock
513 	// would in that case defeat the purpose as it would serialize calling the
514 	// handlers in parallel on different CPUs.
515 	if (flags & B_NO_LOCK_VECTOR)
516 		sVectors[vector].no_lock_vector = true;
517 
518 	release_spinlock(&sVectors[vector].vector_lock);
519 
520 	restore_interrupts(state);
521 
522 	return B_OK;
523 }
524 
525 
526 /*!	Remove a previously installed interrupt handler */
527 status_t
528 remove_io_interrupt_handler(long vector, interrupt_handler handler, void *data)
529 {
530 	status_t status = B_BAD_VALUE;
531 	struct io_handler *io = NULL;
532 	struct io_handler *last = NULL;
533 	cpu_status state;
534 
535 	if (vector < 0 || vector >= NUM_IO_VECTORS)
536 		return B_BAD_VALUE;
537 
538 	/* lock the structures down so it is not modified while we search */
539 	state = disable_interrupts();
540 	acquire_spinlock(&sVectors[vector].vector_lock);
541 
542 	/* loop through the available handlers and try to find a match.
543 	 * We go forward through the list but this means we start with the
544 	 * most recently added handlers.
545 	 */
546 	for (io = sVectors[vector].handler_list; io != NULL; io = io->next) {
547 		/* we have to match both function and data */
548 		if (io->func == handler && io->data == data) {
549 			if (last != NULL)
550 				last->next = io->next;
551 			else
552 				sVectors[vector].handler_list = io->next;
553 
554 			// Check if we need to disable the interrupt
555 			if (io->use_enable_counter && --sVectors[vector].enable_count == 0)
556 				arch_int_disable_io_interrupt(vector);
557 
558 			status = B_OK;
559 			break;
560 		}
561 
562 		last = io;
563 	}
564 
565 	if (sVectors[vector].handler_list == NULL
566 		&& sVectors[vector].type == INTERRUPT_TYPE_IRQ
567 		&& sVectors[vector].assigned_cpu != NULL
568 		&& sVectors[vector].assigned_cpu->handlers_count > 0) {
569 
570 		int32 oldHandlersCount
571 			= atomic_add(&sVectors[vector].assigned_cpu->handlers_count, -1);
572 
573 		if (oldHandlersCount == 1) {
574 			int32 oldCPU;
575 			SpinLocker locker;
576 			cpu_ent* cpu;
577 
578 			do {
579 				locker.Unlock();
580 
581 				oldCPU = sVectors[vector].assigned_cpu->cpu;
582 
583 				ASSERT(oldCPU != -1);
584 				cpu = &gCPU[oldCPU];
585 
586 				locker.SetTo(cpu->irqs_lock, false);
587 			} while (sVectors[vector].assigned_cpu->cpu != oldCPU);
588 
589 			sVectors[vector].assigned_cpu->cpu = -1;
590 			list_remove_item(&cpu->irqs, sVectors[vector].assigned_cpu);
591 		}
592 	}
593 
594 	release_spinlock(&sVectors[vector].vector_lock);
595 	restore_interrupts(state);
596 
597 	// if the handler could be found and removed, we still have to free it
598 	if (status == B_OK)
599 		free(io);
600 
601 	return status;
602 }
603 
604 
605 /*	Mark \a count contigous interrupts starting at \a startVector as in use.
606 	This will prevent them from being allocated by others. Only use this when
607 	the reserved range is hardwired to the given vector, otherwise allocate
608 	vectors using allocate_io_interrupt_vectors() instead.
609 */
610 status_t
611 reserve_io_interrupt_vectors(long count, long startVector, interrupt_type type)
612 {
613 	MutexLocker locker(&sIOInterruptVectorAllocationLock);
614 
615 	for (long i = 0; i < count; i++) {
616 		if (sAllocatedIOInterruptVectors[startVector + i]) {
617 			panic("reserved interrupt vector range %ld-%ld overlaps already "
618 				"allocated vector %ld", startVector, startVector + count - 1,
619 				startVector + i);
620 			free_io_interrupt_vectors(i, startVector);
621 			return B_BUSY;
622 		}
623 
624 		sVectors[startVector + i].type = type;
625 		sVectors[startVector + i].assigned_cpu
626 			= &sVectorCPUAssignments[startVector + i];
627 		sVectorCPUAssignments[startVector + i].count = 1;
628 		sAllocatedIOInterruptVectors[startVector + i] = true;
629 	}
630 
631 	dprintf("reserve_io_interrupt_vectors: reserved %ld vectors starting "
632 		"from %ld\n", count, startVector);
633 	return B_OK;
634 }
635 
636 
637 /*!	Allocate \a count contiguous interrupt vectors. The vectors are allocated
638 	as available so that they do not overlap with any other reserved vector.
639 	The first vector to be used is returned in \a startVector on success.
640 */
641 status_t
642 allocate_io_interrupt_vectors(long count, long *startVector,
643 	interrupt_type type)
644 {
645 	MutexLocker locker(&sIOInterruptVectorAllocationLock);
646 
647 	long vector = 0;
648 	bool runFound = true;
649 	for (long i = 0; i < NUM_IO_VECTORS - (count - 1); i++) {
650 		if (sAllocatedIOInterruptVectors[i])
651 			continue;
652 
653 		vector = i;
654 		runFound = true;
655 		for (uint16 j = 1; j < count; j++) {
656 			if (sAllocatedIOInterruptVectors[i + j]) {
657 				runFound = false;
658 				i += j;
659 				break;
660 			}
661 		}
662 
663 		if (runFound)
664 			break;
665 	}
666 
667 	if (!runFound) {
668 		dprintf("found no free vectors to allocate %ld io interrupts\n", count);
669 		return B_NO_MEMORY;
670 	}
671 
672 	for (long i = 0; i < count; i++) {
673 		sVectors[vector + i].type = type;
674 		sVectors[vector + i].assigned_cpu = &sVectorCPUAssignments[vector];
675 		sAllocatedIOInterruptVectors[vector + i] = true;
676 	}
677 
678 	sVectorCPUAssignments[vector].irq = vector;
679 	sVectorCPUAssignments[vector].count = count;
680 
681 	*startVector = vector;
682 	dprintf("allocate_io_interrupt_vectors: allocated %ld vectors starting "
683 		"from %ld\n", count, vector);
684 	return B_OK;
685 }
686 
687 
688 /*!	Free/unreserve interrupt vectors previously allocated with the
689 	{reserve|allocate}_io_interrupt_vectors() functions. The \a count and
690 	\a startVector can be adjusted from the allocation calls to partially free
691 	a vector range.
692 */
693 void
694 free_io_interrupt_vectors(long count, long startVector)
695 {
696 	if (startVector + count > NUM_IO_VECTORS) {
697 		panic("invalid start vector %ld or count %ld supplied to "
698 			"free_io_interrupt_vectors\n", startVector, count);
699 		return;
700 	}
701 
702 	dprintf("free_io_interrupt_vectors: freeing %ld vectors starting "
703 		"from %ld\n", count, startVector);
704 
705 	MutexLocker locker(sIOInterruptVectorAllocationLock);
706 	for (long i = 0; i < count; i++) {
707 		if (!sAllocatedIOInterruptVectors[startVector + i]) {
708 			panic("io interrupt vector %ld was not allocated\n",
709 				startVector + i);
710 		}
711 
712 		io_vector& vector = sVectors[startVector + i];
713 		InterruptsSpinLocker vectorLocker(vector.vector_lock);
714 		if (vector.assigned_cpu != NULL && vector.assigned_cpu->cpu != -1) {
715 			panic("freeing io interrupt vector %ld that is still asigned to a "
716 				"cpu", startVector + i);
717 			continue;
718 		}
719 
720 		vector.assigned_cpu = NULL;
721 		sAllocatedIOInterruptVectors[startVector + i] = false;
722 	}
723 }
724 
725 
726 void assign_io_interrupt_to_cpu(long vector, int32 newCPU)
727 {
728 	ASSERT(sVectors[vector].type == INTERRUPT_TYPE_IRQ);
729 
730 	int32 oldCPU = sVectors[vector].assigned_cpu->cpu;
731 
732 	if (newCPU == -1)
733 		newCPU = assign_cpu();
734 
735 	if (newCPU == oldCPU)
736 		return;
737 
738 	ASSERT(oldCPU != -1);
739 	cpu_ent* cpu = &gCPU[oldCPU];
740 
741 	SpinLocker locker(cpu->irqs_lock);
742 	sVectors[vector].assigned_cpu->cpu = -1;
743 	list_remove_item(&cpu->irqs, sVectors[vector].assigned_cpu);
744 	locker.Unlock();
745 
746 	newCPU = arch_int_assign_to_cpu(vector, newCPU);
747 	sVectors[vector].assigned_cpu->cpu = newCPU;
748 	cpu = &gCPU[newCPU];
749 	locker.SetTo(cpu->irqs_lock, false);
750 	list_add_item(&cpu->irqs, sVectors[vector].assigned_cpu);
751 }
752