xref: /haiku/src/system/kernel/int.cpp (revision 4c8e85b316c35a9161f5a1c50ad70bc91c83a76f)
1 /*
2  * Copyright 2013, Paweł Dziepak, pdziepak@quarnos.org.
3  * Distributed under the terms of the MIT License.
4 
5  * Copyright 2011, Michael Lotz, mmlr@mlotz.ch.
6  * Distributed under the terms of the MIT License.
7  *
8  * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
9  * Distributed under the terms of the MIT License.
10  *
11  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
12  * Distributed under the terms of the NewOS License.
13  */
14 
15 
16 #include <int.h>
17 
18 #include <stdio.h>
19 #include <stdlib.h>
20 #include <string.h>
21 
22 #include <arch/debug_console.h>
23 #include <arch/int.h>
24 #include <boot/kernel_args.h>
25 #include <elf.h>
26 #include <load_tracking.h>
27 #include <util/AutoLock.h>
28 #include <util/kqueue.h>
29 #include <smp.h>
30 
31 #include "kernel_debug_config.h"
32 
33 
34 //#define TRACE_INT
35 #ifdef TRACE_INT
36 #	define TRACE(x) dprintf x
37 #else
38 #	define TRACE(x) ;
39 #endif
40 
41 
42 struct io_handler {
43 	struct io_handler	*next;
44 	interrupt_handler	func;
45 	void				*data;
46 	bool				use_enable_counter;
47 	bool				no_handled_info;
48 #if DEBUG_INTERRUPTS
49 	int64				handled_count;
50 #endif
51 };
52 
53 struct io_vector {
54 	struct io_handler	*handler_list;
55 	spinlock			vector_lock;
56 	int32				enable_count;
57 	bool				no_lock_vector;
58 	interrupt_type		type;
59 
60 	spinlock			load_lock;
61 	bigtime_t			last_measure_time;
62 	bigtime_t			last_measure_active;
63 	int32				load;
64 
65 	irq_assignment*		assigned_cpu;
66 
67 #if DEBUG_INTERRUPTS
68 	int64				handled_count;
69 	int64				unhandled_count;
70 	int					trigger_count;
71 	int					ignored_count;
72 #endif
73 };
74 
75 static int32 sLastCPU;
76 
77 static io_vector sVectors[NUM_IO_VECTORS];
78 static bool sAllocatedIOInterruptVectors[NUM_IO_VECTORS];
79 static irq_assignment sVectorCPUAssignments[NUM_IO_VECTORS];
80 static mutex sIOInterruptVectorAllocationLock
81 	= MUTEX_INITIALIZER("io_interrupt_vector_allocation");
82 
83 
84 #if DEBUG_INTERRUPTS
85 static int
86 dump_int_statistics(int argc, char **argv)
87 {
88 	int i;
89 	for (i = 0; i < NUM_IO_VECTORS; i++) {
90 		struct io_handler *io;
91 
92 		if (!B_SPINLOCK_IS_LOCKED(&sVectors[i].vector_lock)
93 			&& sVectors[i].enable_count == 0
94 			&& sVectors[i].handled_count == 0
95 			&& sVectors[i].unhandled_count == 0
96 			&& sVectors[i].handler_list == NULL)
97 			continue;
98 
99 		kprintf("int %3d, enabled %" B_PRId32 ", handled %8" B_PRId64 ", "
100 			"unhandled %8" B_PRId64 "%s%s\n", i, sVectors[i].enable_count,
101 			sVectors[i].handled_count,sVectors[i].unhandled_count,
102 			B_SPINLOCK_IS_LOCKED(&sVectors[i].vector_lock) ? ", ACTIVE" : "",
103 			sVectors[i].handler_list == NULL ? ", no handler" : "");
104 
105 		for (io = sVectors[i].handler_list; io != NULL; io = io->next) {
106 			const char *symbol, *imageName;
107 			bool exactMatch;
108 
109 			status_t error = elf_debug_lookup_symbol_address((addr_t)io->func,
110 				NULL, &symbol, &imageName, &exactMatch);
111 			if (error == B_OK && exactMatch) {
112 				if (strchr(imageName, '/') != NULL)
113 					imageName = strrchr(imageName, '/') + 1;
114 
115 				int length = 4 + strlen(imageName);
116 				kprintf("   %s:%-*s (%p)", imageName, 45 - length, symbol,
117 					io->func);
118 			} else
119 				kprintf("\t\t\t\t\t   func %p", io->func);
120 
121 			kprintf(", data %p, handled ", io->data);
122 			if (io->no_handled_info)
123 				kprintf("<unknown>\n");
124 			else
125 				kprintf("%8" B_PRId64 "\n", io->handled_count);
126 		}
127 
128 		kprintf("\n");
129 	}
130 	return 0;
131 }
132 #endif
133 
134 
135 static int
136 dump_int_load(int argc, char** argv)
137 {
138 	static const char* typeNames[]
139 		= { "exception", "irq", "local irq", "syscall", "ici", "unknown" };
140 
141 	for (int i = 0; i < NUM_IO_VECTORS; i++) {
142 		if (!B_SPINLOCK_IS_LOCKED(&sVectors[i].vector_lock)
143 			&& sVectors[i].handler_list == NULL
144 			&& sVectors[i].enable_count == 0)
145 			continue;
146 
147 		kprintf("int %3d, type %s, enabled %" B_PRId32 ", load %" B_PRId32
148 			"%%", i, typeNames[min_c(sVectors[i].type,
149 					INTERRUPT_TYPE_UNKNOWN)],
150 			sVectors[i].enable_count,
151 			sVectors[i].assigned_cpu != NULL
152 				? sVectors[i].assigned_cpu->load / 10 : 0);
153 
154 		if (sVectors[i].type == INTERRUPT_TYPE_IRQ) {
155 			ASSERT(sVectors[i].assigned_cpu != NULL);
156 
157 			if (sVectors[i].assigned_cpu->cpu != -1)
158 				kprintf(", cpu %" B_PRId32, sVectors[i].assigned_cpu->cpu);
159 			else
160 				kprintf(", cpu -");
161 		}
162 
163 		if (B_SPINLOCK_IS_LOCKED(&sVectors[i].vector_lock))
164 			kprintf(", ACTIVE");
165 		kprintf("\n");
166 	}
167 
168 	return 0;
169 }
170 
171 
172 //	#pragma mark - private kernel API
173 
174 
175 bool
176 interrupts_enabled(void)
177 {
178 	return arch_int_are_interrupts_enabled();
179 }
180 
181 
182 status_t
183 int_init(kernel_args* args)
184 {
185 	TRACE(("init_int_handlers: entry\n"));
186 
187 	return arch_int_init(args);
188 }
189 
190 
191 status_t
192 int_init_post_vm(kernel_args* args)
193 {
194 	int i;
195 
196 	/* initialize the vector list */
197 	for (i = 0; i < NUM_IO_VECTORS; i++) {
198 		B_INITIALIZE_SPINLOCK(&sVectors[i].vector_lock);
199 		sVectors[i].enable_count = 0;
200 		sVectors[i].no_lock_vector = false;
201 		sVectors[i].type = INTERRUPT_TYPE_UNKNOWN;
202 
203 		B_INITIALIZE_SPINLOCK(&sVectors[i].load_lock);
204 		sVectors[i].last_measure_time = 0;
205 		sVectors[i].last_measure_active = 0;
206 		sVectors[i].load = 0;
207 
208 #if DEBUG_INTERRUPTS
209 		sVectors[i].handled_count = 0;
210 		sVectors[i].unhandled_count = 0;
211 		sVectors[i].trigger_count = 0;
212 		sVectors[i].ignored_count = 0;
213 #endif
214 		sVectors[i].handler_list = NULL;
215 
216 		sVectorCPUAssignments[i].irq = i;
217 		sVectorCPUAssignments[i].count = 1;
218 		sVectorCPUAssignments[i].handlers_count = 0;
219 		sVectorCPUAssignments[i].load = 0;
220 		sVectorCPUAssignments[i].cpu = -1;
221 	}
222 
223 #if DEBUG_INTERRUPTS
224 	add_debugger_command("ints", &dump_int_statistics,
225 		"list interrupt statistics");
226 #endif
227 
228 	add_debugger_command("int_load", &dump_int_load,
229 		"list interrupt usage statistics");
230 
231 	return arch_int_init_post_vm(args);
232 }
233 
234 
235 status_t
236 int_init_io(kernel_args* args)
237 {
238 	return arch_int_init_io(args);
239 }
240 
241 
242 status_t
243 int_init_post_device_manager(kernel_args* args)
244 {
245 	arch_debug_install_interrupt_handlers();
246 
247 	return arch_int_init_post_device_manager(args);
248 }
249 
250 
251 static void
252 update_int_load(int i)
253 {
254 	if (!try_acquire_spinlock(&sVectors[i].load_lock))
255 		return;
256 
257 	int32 oldLoad = sVectors[i].load;
258 	compute_load(sVectors[i].last_measure_time, sVectors[i].last_measure_active,
259 		sVectors[i].load, system_time());
260 
261 	if (oldLoad != sVectors[i].load)
262 		atomic_add(&sVectors[i].assigned_cpu->load, sVectors[i].load - oldLoad);
263 
264 	release_spinlock(&sVectors[i].load_lock);
265 }
266 
267 
268 /*!	Actually process an interrupt via the handlers registered for that
269 	vector (IRQ).
270 */
271 int
272 int_io_interrupt_handler(int vector, bool levelTriggered)
273 {
274 	int status = B_UNHANDLED_INTERRUPT;
275 	struct io_handler* io;
276 	bool handled = false;
277 
278 	bigtime_t start = system_time();
279 
280 	// exceptions and syscalls have their own handlers
281 	ASSERT(sVectors[vector].type != INTERRUPT_TYPE_EXCEPTION
282 		&& sVectors[vector].type != INTERRUPT_TYPE_SYSCALL);
283 
284 	if (!sVectors[vector].no_lock_vector)
285 		acquire_spinlock(&sVectors[vector].vector_lock);
286 
287 #if !DEBUG_INTERRUPTS
288 	// The list can be empty at this place
289 	if (sVectors[vector].handler_list == NULL) {
290 		dprintf("unhandled io interrupt %d\n", vector);
291 		if (!sVectors[vector].no_lock_vector)
292 			release_spinlock(&sVectors[vector].vector_lock);
293 		return B_UNHANDLED_INTERRUPT;
294 	}
295 #endif
296 
297 	// For level-triggered interrupts, we actually handle the return
298 	// value (ie. B_HANDLED_INTERRUPT) to decide whether or not we
299 	// want to call another interrupt handler.
300 	// For edge-triggered interrupts, however, we always need to call
301 	// all handlers, as multiple interrupts cannot be identified. We
302 	// still make sure the return code of this function will issue
303 	// whatever the driver thought would be useful.
304 
305 	for (io = sVectors[vector].handler_list; io != NULL; io = io->next) {
306 		status = io->func(io->data);
307 
308 #if DEBUG_INTERRUPTS
309 		if (status != B_UNHANDLED_INTERRUPT)
310 			io->handled_count++;
311 #endif
312 		if (levelTriggered && status != B_UNHANDLED_INTERRUPT)
313 			break;
314 
315 		if (status == B_HANDLED_INTERRUPT || status == B_INVOKE_SCHEDULER)
316 			handled = true;
317 	}
318 
319 #if DEBUG_INTERRUPTS
320 	sVectors[vector].trigger_count++;
321 	if (status != B_UNHANDLED_INTERRUPT || handled) {
322 		sVectors[vector].handled_count++;
323 	} else {
324 		sVectors[vector].unhandled_count++;
325 		sVectors[vector].ignored_count++;
326 	}
327 
328 	if (sVectors[vector].trigger_count > 10000) {
329 		if (sVectors[vector].ignored_count > 9900) {
330 			struct io_handler *last = sVectors[vector].handler_list;
331 			while (last && last->next)
332 				last = last->next;
333 
334 			if (last != NULL && last->no_handled_info) {
335 				// we have an interrupt handler installed that does not
336 				// know whether or not it has actually handled the interrupt,
337 				// so this unhandled count is inaccurate and we can't just
338 				// disable
339 			} else {
340 				if (sVectors[vector].handler_list == NULL
341 					|| sVectors[vector].handler_list->next == NULL) {
342 					// this interrupt vector is not shared, disable it
343 					sVectors[vector].enable_count = -100;
344 					arch_int_disable_io_interrupt(vector);
345 					dprintf("Disabling unhandled io interrupt %d\n", vector);
346 				} else {
347 					// this is a shared interrupt vector, we cannot just disable it
348 					dprintf("More than 99%% interrupts of vector %d are unhandled\n",
349 						vector);
350 				}
351 			}
352 		}
353 
354 		sVectors[vector].trigger_count = 0;
355 		sVectors[vector].ignored_count = 0;
356 	}
357 #endif
358 
359 	if (!sVectors[vector].no_lock_vector)
360 		release_spinlock(&sVectors[vector].vector_lock);
361 
362 	SpinLocker vectorLocker(sVectors[vector].load_lock);
363 	bigtime_t deltaTime = system_time() - start;
364 	sVectors[vector].last_measure_active += deltaTime;
365 	vectorLocker.Unlock();
366 
367 	cpu_ent* cpu = get_cpu_struct();
368 	if (sVectors[vector].type == INTERRUPT_TYPE_IRQ
369 		|| sVectors[vector].type == INTERRUPT_TYPE_ICI
370 		|| sVectors[vector].type == INTERRUPT_TYPE_LOCAL_IRQ) {
371 		cpu->interrupt_time += deltaTime;
372 		if (sVectors[vector].type == INTERRUPT_TYPE_IRQ)
373 			cpu->irq_time += deltaTime;
374 	}
375 
376 	update_int_load(vector);
377 
378 	if (levelTriggered)
379 		return status;
380 
381 	// edge triggered return value
382 
383 	if (handled)
384 		return B_HANDLED_INTERRUPT;
385 
386 	return B_UNHANDLED_INTERRUPT;
387 }
388 
389 
390 //	#pragma mark - public API
391 
392 
393 #undef disable_interrupts
394 #undef restore_interrupts
395 
396 
397 cpu_status
398 disable_interrupts(void)
399 {
400 	return arch_int_disable_interrupts();
401 }
402 
403 
404 void
405 restore_interrupts(cpu_status status)
406 {
407 	arch_int_restore_interrupts(status);
408 }
409 
410 
411 static
412 uint32 assign_cpu(void)
413 {
414 // arch_int_assign_to_cpu is not yet implemented for riscv
415 #ifdef __riscv
416 	return 0;
417 #endif
418 
419 	const cpu_topology_node* node;
420 	do {
421 		int32 nextID = atomic_add(&sLastCPU, 1);
422 		node = get_cpu_topology();
423 
424 		while (node->level != CPU_TOPOLOGY_SMT) {
425 			int levelSize = node->children_count;
426 			node = node->children[nextID % levelSize];
427 			nextID /= levelSize;
428 		}
429 	} while (gCPU[node->id].disabled);
430 
431 	return node->id;
432 }
433 
434 
435 /*!	Install a handler to be called when an interrupt is triggered
436 	for the given interrupt number with \a data as the argument.
437 */
438 status_t
439 install_io_interrupt_handler(long vector, interrupt_handler handler, void *data,
440 	ulong flags)
441 {
442 	struct io_handler *io = NULL;
443 	cpu_status state;
444 
445 	if (vector < 0 || vector >= NUM_IO_VECTORS)
446 		return B_BAD_VALUE;
447 
448 	io = (struct io_handler *)malloc(sizeof(struct io_handler));
449 	if (io == NULL)
450 		return B_NO_MEMORY;
451 
452 	arch_debug_remove_interrupt_handler(vector);
453 		// There might be a temporary debug interrupt installed on this
454 		// vector that should be removed now.
455 
456 	io->func = handler;
457 	io->data = data;
458 	io->use_enable_counter = (flags & B_NO_ENABLE_COUNTER) == 0;
459 	io->no_handled_info = (flags & B_NO_HANDLED_INFO) != 0;
460 #if DEBUG_INTERRUPTS
461 	io->handled_count = 0LL;
462 #endif
463 
464 	// Disable the interrupts, get the spinlock for this irq only
465 	// and then insert the handler
466 	state = disable_interrupts();
467 	acquire_spinlock(&sVectors[vector].vector_lock);
468 
469 	// Initial attempt to balance IRQs, the scheduler will correct this
470 	// if some cores end up being overloaded.
471 	if (sVectors[vector].type == INTERRUPT_TYPE_IRQ
472 		&& sVectors[vector].handler_list == NULL
473 		&& sVectors[vector].assigned_cpu->cpu == -1) {
474 
475 		int32 cpuID = assign_cpu();
476 		cpuID = arch_int_assign_to_cpu(vector, cpuID);
477 		sVectors[vector].assigned_cpu->cpu = cpuID;
478 
479 		cpu_ent* cpu = &gCPU[cpuID];
480 		SpinLocker _(cpu->irqs_lock);
481 		atomic_add(&sVectors[vector].assigned_cpu->handlers_count, 1);
482 		list_add_item(&cpu->irqs, sVectors[vector].assigned_cpu);
483 	}
484 
485 	if ((flags & B_NO_HANDLED_INFO) != 0
486 		&& sVectors[vector].handler_list != NULL) {
487 		// The driver registering this interrupt handler doesn't know
488 		// whether or not it actually handled the interrupt after the
489 		// handler returns. This is incompatible with shared interrupts
490 		// as we'd potentially steal interrupts from other handlers
491 		// resulting in interrupt storms. Therefore we enqueue this interrupt
492 		// handler as the very last one, meaning all other handlers will
493 		// get their go at any interrupt first.
494 		struct io_handler *last = sVectors[vector].handler_list;
495 		while (last->next)
496 			last = last->next;
497 
498 		io->next = NULL;
499 		last->next = io;
500 	} else {
501 		// A normal interrupt handler, just add it to the head of the list.
502 		io->next = sVectors[vector].handler_list;
503 		sVectors[vector].handler_list = io;
504 	}
505 
506 	// If B_NO_ENABLE_COUNTER is set, we're being asked to not alter
507 	// whether the interrupt should be enabled or not
508 	if (io->use_enable_counter) {
509 		if (sVectors[vector].enable_count++ == 0)
510 			arch_int_enable_io_interrupt(vector);
511 	}
512 
513 	// If B_NO_LOCK_VECTOR is specified this is a vector that is not supposed
514 	// to have multiple handlers and does not require locking of the vector
515 	// when entering the handler. For example this is used by internally
516 	// registered interrupt handlers like for handling local APIC interrupts
517 	// that may run concurently on multiple CPUs. Locking with a spinlock
518 	// would in that case defeat the purpose as it would serialize calling the
519 	// handlers in parallel on different CPUs.
520 	if (flags & B_NO_LOCK_VECTOR)
521 		sVectors[vector].no_lock_vector = true;
522 
523 	release_spinlock(&sVectors[vector].vector_lock);
524 
525 	restore_interrupts(state);
526 
527 	return B_OK;
528 }
529 
530 
531 /*!	Remove a previously installed interrupt handler */
532 status_t
533 remove_io_interrupt_handler(long vector, interrupt_handler handler, void *data)
534 {
535 	status_t status = B_BAD_VALUE;
536 	struct io_handler *io = NULL;
537 	struct io_handler *last = NULL;
538 	cpu_status state;
539 
540 	if (vector < 0 || vector >= NUM_IO_VECTORS)
541 		return B_BAD_VALUE;
542 
543 	/* lock the structures down so it is not modified while we search */
544 	state = disable_interrupts();
545 	acquire_spinlock(&sVectors[vector].vector_lock);
546 
547 	/* loop through the available handlers and try to find a match.
548 	 * We go forward through the list but this means we start with the
549 	 * most recently added handlers.
550 	 */
551 	for (io = sVectors[vector].handler_list; io != NULL; io = io->next) {
552 		/* we have to match both function and data */
553 		if (io->func == handler && io->data == data) {
554 			if (last != NULL)
555 				last->next = io->next;
556 			else
557 				sVectors[vector].handler_list = io->next;
558 
559 			// Check if we need to disable the interrupt
560 			if (io->use_enable_counter && --sVectors[vector].enable_count == 0)
561 				arch_int_disable_io_interrupt(vector);
562 
563 			status = B_OK;
564 			break;
565 		}
566 
567 		last = io;
568 	}
569 
570 	if (sVectors[vector].handler_list == NULL
571 		&& sVectors[vector].type == INTERRUPT_TYPE_IRQ
572 		&& sVectors[vector].assigned_cpu != NULL
573 		&& sVectors[vector].assigned_cpu->handlers_count > 0) {
574 
575 		int32 oldHandlersCount
576 			= atomic_add(&sVectors[vector].assigned_cpu->handlers_count, -1);
577 
578 		if (oldHandlersCount == 1) {
579 			int32 oldCPU;
580 			SpinLocker locker;
581 			cpu_ent* cpu;
582 
583 			do {
584 				locker.Unlock();
585 
586 				oldCPU = sVectors[vector].assigned_cpu->cpu;
587 
588 				ASSERT(oldCPU != -1);
589 				cpu = &gCPU[oldCPU];
590 
591 				locker.SetTo(cpu->irqs_lock, false);
592 			} while (sVectors[vector].assigned_cpu->cpu != oldCPU);
593 
594 			sVectors[vector].assigned_cpu->cpu = -1;
595 			list_remove_item(&cpu->irqs, sVectors[vector].assigned_cpu);
596 		}
597 	}
598 
599 	release_spinlock(&sVectors[vector].vector_lock);
600 	restore_interrupts(state);
601 
602 	// if the handler could be found and removed, we still have to free it
603 	if (status == B_OK)
604 		free(io);
605 
606 	return status;
607 }
608 
609 
610 /*	Mark \a count contigous interrupts starting at \a startVector as in use.
611 	This will prevent them from being allocated by others. Only use this when
612 	the reserved range is hardwired to the given vector, otherwise allocate
613 	vectors using allocate_io_interrupt_vectors() instead.
614 */
615 status_t
616 reserve_io_interrupt_vectors(long count, long startVector, interrupt_type type)
617 {
618 	MutexLocker locker(&sIOInterruptVectorAllocationLock);
619 
620 	for (long i = 0; i < count; i++) {
621 		if (sAllocatedIOInterruptVectors[startVector + i]) {
622 			panic("reserved interrupt vector range %ld-%ld overlaps already "
623 				"allocated vector %ld", startVector, startVector + count - 1,
624 				startVector + i);
625 			free_io_interrupt_vectors(i, startVector);
626 			return B_BUSY;
627 		}
628 
629 		sVectors[startVector + i].type = type;
630 		sVectors[startVector + i].assigned_cpu
631 			= &sVectorCPUAssignments[startVector + i];
632 		sVectorCPUAssignments[startVector + i].count = 1;
633 		sAllocatedIOInterruptVectors[startVector + i] = true;
634 	}
635 
636 	dprintf("reserve_io_interrupt_vectors: reserved %ld vectors starting "
637 		"from %ld\n", count, startVector);
638 	return B_OK;
639 }
640 
641 
642 /*!	Allocate \a count contiguous interrupt vectors. The vectors are allocated
643 	as available so that they do not overlap with any other reserved vector.
644 	The first vector to be used is returned in \a startVector on success.
645 */
646 status_t
647 allocate_io_interrupt_vectors(long count, long *startVector,
648 	interrupt_type type)
649 {
650 	MutexLocker locker(&sIOInterruptVectorAllocationLock);
651 
652 	long vector = 0;
653 	bool runFound = true;
654 	for (long i = 0; i < NUM_IO_VECTORS - (count - 1); i++) {
655 		if (sAllocatedIOInterruptVectors[i])
656 			continue;
657 
658 		vector = i;
659 		runFound = true;
660 		for (uint16 j = 1; j < count; j++) {
661 			if (sAllocatedIOInterruptVectors[i + j]) {
662 				runFound = false;
663 				i += j;
664 				break;
665 			}
666 		}
667 
668 		if (runFound)
669 			break;
670 	}
671 
672 	if (!runFound) {
673 		dprintf("found no free vectors to allocate %ld io interrupts\n", count);
674 		return B_NO_MEMORY;
675 	}
676 
677 	for (long i = 0; i < count; i++) {
678 		sVectors[vector + i].type = type;
679 		sVectors[vector + i].assigned_cpu = &sVectorCPUAssignments[vector];
680 		sAllocatedIOInterruptVectors[vector + i] = true;
681 	}
682 
683 	sVectorCPUAssignments[vector].irq = vector;
684 	sVectorCPUAssignments[vector].count = count;
685 
686 	*startVector = vector;
687 	dprintf("allocate_io_interrupt_vectors: allocated %ld vectors starting "
688 		"from %ld\n", count, vector);
689 	return B_OK;
690 }
691 
692 
693 /*!	Free/unreserve interrupt vectors previously allocated with the
694 	{reserve|allocate}_io_interrupt_vectors() functions. The \a count and
695 	\a startVector can be adjusted from the allocation calls to partially free
696 	a vector range.
697 */
698 void
699 free_io_interrupt_vectors(long count, long startVector)
700 {
701 	if (startVector + count > NUM_IO_VECTORS) {
702 		panic("invalid start vector %ld or count %ld supplied to "
703 			"free_io_interrupt_vectors\n", startVector, count);
704 		return;
705 	}
706 
707 	dprintf("free_io_interrupt_vectors: freeing %ld vectors starting "
708 		"from %ld\n", count, startVector);
709 
710 	MutexLocker locker(sIOInterruptVectorAllocationLock);
711 	for (long i = 0; i < count; i++) {
712 		if (!sAllocatedIOInterruptVectors[startVector + i]) {
713 			panic("io interrupt vector %ld was not allocated\n",
714 				startVector + i);
715 		}
716 
717 		io_vector& vector = sVectors[startVector + i];
718 		InterruptsSpinLocker vectorLocker(vector.vector_lock);
719 		if (vector.assigned_cpu != NULL && vector.assigned_cpu->cpu != -1) {
720 			panic("freeing io interrupt vector %ld that is still asigned to a "
721 				"cpu", startVector + i);
722 			continue;
723 		}
724 
725 		vector.assigned_cpu = NULL;
726 		sAllocatedIOInterruptVectors[startVector + i] = false;
727 	}
728 }
729 
730 
731 void assign_io_interrupt_to_cpu(long vector, int32 newCPU)
732 {
733 	ASSERT(sVectors[vector].type == INTERRUPT_TYPE_IRQ);
734 
735 	int32 oldCPU = sVectors[vector].assigned_cpu->cpu;
736 
737 	if (newCPU == -1)
738 		newCPU = assign_cpu();
739 
740 	if (newCPU == oldCPU)
741 		return;
742 
743 	ASSERT(oldCPU != -1);
744 	cpu_ent* cpu = &gCPU[oldCPU];
745 
746 	SpinLocker locker(cpu->irqs_lock);
747 	sVectors[vector].assigned_cpu->cpu = -1;
748 	list_remove_item(&cpu->irqs, sVectors[vector].assigned_cpu);
749 	locker.Unlock();
750 
751 	newCPU = arch_int_assign_to_cpu(vector, newCPU);
752 	sVectors[vector].assigned_cpu->cpu = newCPU;
753 	cpu = &gCPU[newCPU];
754 	locker.SetTo(cpu->irqs_lock, false);
755 	list_add_item(&cpu->irqs, sVectors[vector].assigned_cpu);
756 }
757