xref: /haiku/src/system/kernel/int.cpp (revision dd2a1e350b303b855a50fd64e6cb55618be1ae6a)
1 /*
2  * Copyright 2013, Paweł Dziepak, pdziepak@quarnos.org.
3  * Distributed under the terms of the MIT License.
4 
5  * Copyright 2011, Michael Lotz, mmlr@mlotz.ch.
6  * Distributed under the terms of the MIT License.
7  *
8  * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
9  * Distributed under the terms of the MIT License.
10  *
11  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
12  * Distributed under the terms of the NewOS License.
13  */
14 
15 
16 #include <int.h>
17 
18 #include <stdio.h>
19 #include <stdlib.h>
20 #include <string.h>
21 
22 #include <arch/debug_console.h>
23 #include <arch/int.h>
24 #include <boot/kernel_args.h>
25 #include <elf.h>
26 #include <load_tracking.h>
27 #include <util/AutoLock.h>
28 #include <smp.h>
29 
30 #include "kernel_debug_config.h"
31 
32 
33 //#define TRACE_INT
34 #ifdef TRACE_INT
35 #	define TRACE(x) dprintf x
36 #else
37 #	define TRACE(x) ;
38 #endif
39 
40 
41 struct io_handler {
42 	struct io_handler	*next;
43 	interrupt_handler	func;
44 	void				*data;
45 	bool				use_enable_counter;
46 	bool				no_handled_info;
47 #if DEBUG_INTERRUPTS
48 	int64				handled_count;
49 #endif
50 };
51 
52 struct io_vector {
53 	struct io_handler	*handler_list;
54 	spinlock			vector_lock;
55 	int32				enable_count;
56 	bool				no_lock_vector;
57 	interrupt_type		type;
58 
59 	spinlock			load_lock;
60 	bigtime_t			last_measure_time;
61 	bigtime_t			last_measure_active;
62 	int32				load;
63 
64 	irq_assignment*		assigned_cpu;
65 
66 #if DEBUG_INTERRUPTS
67 	int64				handled_count;
68 	int64				unhandled_count;
69 	int					trigger_count;
70 	int					ignored_count;
71 #endif
72 };
73 
74 static int32 sLastCPU;
75 
76 static io_vector sVectors[NUM_IO_VECTORS];
77 static bool sAllocatedIOInterruptVectors[NUM_IO_VECTORS];
78 static irq_assignment sVectorCPUAssignments[NUM_IO_VECTORS];
79 static mutex sIOInterruptVectorAllocationLock
80 	= MUTEX_INITIALIZER("io_interrupt_vector_allocation");
81 
82 
83 #if DEBUG_INTERRUPTS
84 static int
85 dump_int_statistics(int argc, char **argv)
86 {
87 	int i;
88 	for (i = 0; i < NUM_IO_VECTORS; i++) {
89 		struct io_handler *io;
90 
91 		if (!B_SPINLOCK_IS_LOCKED(&sVectors[i].vector_lock)
92 			&& sVectors[i].enable_count == 0
93 			&& sVectors[i].handled_count == 0
94 			&& sVectors[i].unhandled_count == 0
95 			&& sVectors[i].handler_list == NULL)
96 			continue;
97 
98 		kprintf("int %3d, enabled %" B_PRId32 ", handled %8" B_PRId64 ", "
99 			"unhandled %8" B_PRId64 "%s%s\n", i, sVectors[i].enable_count,
100 			sVectors[i].handled_count,sVectors[i].unhandled_count,
101 			B_SPINLOCK_IS_LOCKED(&sVectors[i].vector_lock) ? ", ACTIVE" : "",
102 			sVectors[i].handler_list == NULL ? ", no handler" : "");
103 
104 		for (io = sVectors[i].handler_list; io != NULL; io = io->next) {
105 			const char *symbol, *imageName;
106 			bool exactMatch;
107 
108 			status_t error = elf_debug_lookup_symbol_address((addr_t)io->func,
109 				NULL, &symbol, &imageName, &exactMatch);
110 			if (error == B_OK && exactMatch) {
111 				if (strchr(imageName, '/') != NULL)
112 					imageName = strrchr(imageName, '/') + 1;
113 
114 				int length = 4 + strlen(imageName);
115 				kprintf("   %s:%-*s (%p)", imageName, 45 - length, symbol,
116 					io->func);
117 			} else
118 				kprintf("\t\t\t\t\t   func %p", io->func);
119 
120 			kprintf(", data %p, handled ", io->data);
121 			if (io->no_handled_info)
122 				kprintf("<unknown>\n");
123 			else
124 				kprintf("%8" B_PRId64 "\n", io->handled_count);
125 		}
126 
127 		kprintf("\n");
128 	}
129 	return 0;
130 }
131 #endif
132 
133 
134 static int
135 dump_int_load(int argc, char** argv)
136 {
137 	static const char* typeNames[]
138 		= { "exception", "irq", "local irq", "syscall", "ici", "unknown" };
139 
140 	for (int i = 0; i < NUM_IO_VECTORS; i++) {
141 		if (!B_SPINLOCK_IS_LOCKED(&sVectors[i].vector_lock)
142 			&& sVectors[i].handler_list == NULL
143 			&& sVectors[i].enable_count == 0)
144 			continue;
145 
146 		kprintf("int %3d, type %s, enabled %" B_PRId32 ", load %" B_PRId32
147 			"%%", i, typeNames[min_c(sVectors[i].type,
148 					INTERRUPT_TYPE_UNKNOWN)],
149 			sVectors[i].enable_count,
150 			sVectors[i].assigned_cpu != NULL
151 				? sVectors[i].assigned_cpu->load / 10 : 0);
152 
153 		if (sVectors[i].type == INTERRUPT_TYPE_IRQ) {
154 			ASSERT(sVectors[i].assigned_cpu != NULL);
155 
156 			if (sVectors[i].assigned_cpu->cpu != -1)
157 				kprintf(", cpu %" B_PRId32, sVectors[i].assigned_cpu->cpu);
158 			else
159 				kprintf(", cpu -");
160 		}
161 
162 		if (B_SPINLOCK_IS_LOCKED(&sVectors[i].vector_lock))
163 			kprintf(", ACTIVE");
164 		kprintf("\n");
165 	}
166 
167 	return 0;
168 }
169 
170 
171 //	#pragma mark - private kernel API
172 
173 
174 bool
175 interrupts_enabled(void)
176 {
177 	return arch_int_are_interrupts_enabled();
178 }
179 
180 
181 status_t
182 int_init(kernel_args* args)
183 {
184 	TRACE(("init_int_handlers: entry\n"));
185 
186 	return arch_int_init(args);
187 }
188 
189 
190 status_t
191 int_init_post_vm(kernel_args* args)
192 {
193 	int i;
194 
195 	/* initialize the vector list */
196 	for (i = 0; i < NUM_IO_VECTORS; i++) {
197 		B_INITIALIZE_SPINLOCK(&sVectors[i].vector_lock);
198 		sVectors[i].enable_count = 0;
199 		sVectors[i].no_lock_vector = false;
200 		sVectors[i].type = INTERRUPT_TYPE_UNKNOWN;
201 
202 		B_INITIALIZE_SPINLOCK(&sVectors[i].load_lock);
203 		sVectors[i].last_measure_time = 0;
204 		sVectors[i].last_measure_active = 0;
205 		sVectors[i].load = 0;
206 
207 #if DEBUG_INTERRUPTS
208 		sVectors[i].handled_count = 0;
209 		sVectors[i].unhandled_count = 0;
210 		sVectors[i].trigger_count = 0;
211 		sVectors[i].ignored_count = 0;
212 #endif
213 		sVectors[i].handler_list = NULL;
214 
215 		sVectorCPUAssignments[i].irq = i;
216 		sVectorCPUAssignments[i].count = 1;
217 		sVectorCPUAssignments[i].handlers_count = 0;
218 		sVectorCPUAssignments[i].load = 0;
219 		sVectorCPUAssignments[i].cpu = -1;
220 	}
221 
222 #if DEBUG_INTERRUPTS
223 	add_debugger_command("ints", &dump_int_statistics,
224 		"list interrupt statistics");
225 #endif
226 
227 	add_debugger_command("int_load", &dump_int_load,
228 		"list interrupt usage statistics");
229 
230 	return arch_int_init_post_vm(args);
231 }
232 
233 
234 status_t
235 int_init_io(kernel_args* args)
236 {
237 	return arch_int_init_io(args);
238 }
239 
240 
241 status_t
242 int_init_post_device_manager(kernel_args* args)
243 {
244 	arch_debug_install_interrupt_handlers();
245 
246 	return arch_int_init_post_device_manager(args);
247 }
248 
249 
250 static void
251 update_int_load(int i)
252 {
253 	if (!try_acquire_spinlock(&sVectors[i].load_lock))
254 		return;
255 
256 	int32 oldLoad = sVectors[i].load;
257 	compute_load(sVectors[i].last_measure_time, sVectors[i].last_measure_active,
258 		sVectors[i].load, system_time());
259 
260 	if (oldLoad != sVectors[i].load)
261 		atomic_add(&sVectors[i].assigned_cpu->load, sVectors[i].load - oldLoad);
262 
263 	release_spinlock(&sVectors[i].load_lock);
264 }
265 
266 
267 /*!	Actually process an interrupt via the handlers registered for that
268 	vector (IRQ).
269 */
270 int
271 int_io_interrupt_handler(int vector, bool levelTriggered)
272 {
273 	int status = B_UNHANDLED_INTERRUPT;
274 	struct io_handler* io;
275 	bool handled = false;
276 
277 	bigtime_t start = system_time();
278 
279 	// exceptions and syscalls have their own handlers
280 	ASSERT(sVectors[vector].type != INTERRUPT_TYPE_EXCEPTION
281 		&& sVectors[vector].type != INTERRUPT_TYPE_SYSCALL);
282 
283 	if (!sVectors[vector].no_lock_vector)
284 		acquire_spinlock(&sVectors[vector].vector_lock);
285 
286 #if !DEBUG_INTERRUPTS
287 	// The list can be empty at this place
288 	if (sVectors[vector].handler_list == NULL) {
289 		dprintf("unhandled io interrupt %d\n", vector);
290 		if (!sVectors[vector].no_lock_vector)
291 			release_spinlock(&sVectors[vector].vector_lock);
292 		return B_UNHANDLED_INTERRUPT;
293 	}
294 #endif
295 
296 	// For level-triggered interrupts, we actually handle the return
297 	// value (ie. B_HANDLED_INTERRUPT) to decide whether or not we
298 	// want to call another interrupt handler.
299 	// For edge-triggered interrupts, however, we always need to call
300 	// all handlers, as multiple interrupts cannot be identified. We
301 	// still make sure the return code of this function will issue
302 	// whatever the driver thought would be useful.
303 
304 	for (io = sVectors[vector].handler_list; io != NULL; io = io->next) {
305 		status = io->func(io->data);
306 
307 #if DEBUG_INTERRUPTS
308 		if (status != B_UNHANDLED_INTERRUPT)
309 			io->handled_count++;
310 #endif
311 		if (levelTriggered && status != B_UNHANDLED_INTERRUPT)
312 			break;
313 
314 		if (status == B_HANDLED_INTERRUPT || status == B_INVOKE_SCHEDULER)
315 			handled = true;
316 	}
317 
318 #if DEBUG_INTERRUPTS
319 	sVectors[vector].trigger_count++;
320 	if (status != B_UNHANDLED_INTERRUPT || handled) {
321 		sVectors[vector].handled_count++;
322 	} else {
323 		sVectors[vector].unhandled_count++;
324 		sVectors[vector].ignored_count++;
325 	}
326 
327 	if (sVectors[vector].trigger_count > 10000) {
328 		if (sVectors[vector].ignored_count > 9900) {
329 			struct io_handler *last = sVectors[vector].handler_list;
330 			while (last && last->next)
331 				last = last->next;
332 
333 			if (last != NULL && last->no_handled_info) {
334 				// we have an interrupt handler installed that does not
335 				// know whether or not it has actually handled the interrupt,
336 				// so this unhandled count is inaccurate and we can't just
337 				// disable
338 			} else {
339 				if (sVectors[vector].handler_list == NULL
340 					|| sVectors[vector].handler_list->next == NULL) {
341 					// this interrupt vector is not shared, disable it
342 					sVectors[vector].enable_count = -100;
343 					arch_int_disable_io_interrupt(vector);
344 					dprintf("Disabling unhandled io interrupt %d\n", vector);
345 				} else {
346 					// this is a shared interrupt vector, we cannot just disable it
347 					dprintf("More than 99%% interrupts of vector %d are unhandled\n",
348 						vector);
349 				}
350 			}
351 		}
352 
353 		sVectors[vector].trigger_count = 0;
354 		sVectors[vector].ignored_count = 0;
355 	}
356 #endif
357 
358 	if (!sVectors[vector].no_lock_vector)
359 		release_spinlock(&sVectors[vector].vector_lock);
360 
361 	SpinLocker vectorLocker(sVectors[vector].load_lock);
362 	bigtime_t deltaTime = system_time() - start;
363 	sVectors[vector].last_measure_active += deltaTime;
364 	vectorLocker.Unlock();
365 
366 	cpu_ent* cpu = get_cpu_struct();
367 	if (sVectors[vector].type == INTERRUPT_TYPE_IRQ
368 		|| sVectors[vector].type == INTERRUPT_TYPE_ICI
369 		|| sVectors[vector].type == INTERRUPT_TYPE_LOCAL_IRQ) {
370 		cpu->interrupt_time += deltaTime;
371 		if (sVectors[vector].type == INTERRUPT_TYPE_IRQ)
372 			cpu->irq_time += deltaTime;
373 	}
374 
375 	update_int_load(vector);
376 
377 	if (levelTriggered)
378 		return status;
379 
380 	// edge triggered return value
381 
382 	if (handled)
383 		return B_HANDLED_INTERRUPT;
384 
385 	return B_UNHANDLED_INTERRUPT;
386 }
387 
388 
389 //	#pragma mark - public API
390 
391 
392 #undef disable_interrupts
393 #undef restore_interrupts
394 
395 
396 cpu_status
397 disable_interrupts(void)
398 {
399 	return arch_int_disable_interrupts();
400 }
401 
402 
403 void
404 restore_interrupts(cpu_status status)
405 {
406 	arch_int_restore_interrupts(status);
407 }
408 
409 
410 static
411 uint32 assign_cpu(void)
412 {
413 	const cpu_topology_node* node;
414 	do {
415 		int32 nextID = atomic_add(&sLastCPU, 1);
416 		node = get_cpu_topology();
417 
418 		while (node->level != CPU_TOPOLOGY_SMT) {
419 			int levelSize = node->children_count;
420 			node = node->children[nextID % levelSize];
421 			nextID /= levelSize;
422 		}
423 	} while (gCPU[node->id].disabled);
424 
425 	return node->id;
426 }
427 
428 
429 /*!	Install a handler to be called when an interrupt is triggered
430 	for the given interrupt number with \a data as the argument.
431 */
432 status_t
433 install_io_interrupt_handler(int32 vector, interrupt_handler handler, void *data,
434 	uint32 flags)
435 {
436 	struct io_handler *io = NULL;
437 	cpu_status state;
438 
439 	if (vector < 0 || vector >= NUM_IO_VECTORS)
440 		return B_BAD_VALUE;
441 
442 	io = (struct io_handler *)malloc(sizeof(struct io_handler));
443 	if (io == NULL)
444 		return B_NO_MEMORY;
445 
446 	arch_debug_remove_interrupt_handler(vector);
447 		// There might be a temporary debug interrupt installed on this
448 		// vector that should be removed now.
449 
450 	io->func = handler;
451 	io->data = data;
452 	io->use_enable_counter = (flags & B_NO_ENABLE_COUNTER) == 0;
453 	io->no_handled_info = (flags & B_NO_HANDLED_INFO) != 0;
454 #if DEBUG_INTERRUPTS
455 	io->handled_count = 0LL;
456 #endif
457 
458 	// Disable the interrupts, get the spinlock for this irq only
459 	// and then insert the handler
460 	state = disable_interrupts();
461 	acquire_spinlock(&sVectors[vector].vector_lock);
462 
463 	// Initial attempt to balance IRQs, the scheduler will correct this
464 	// if some cores end up being overloaded.
465 	if (sVectors[vector].type == INTERRUPT_TYPE_IRQ
466 		&& sVectors[vector].handler_list == NULL
467 		&& sVectors[vector].assigned_cpu->cpu == -1) {
468 
469 		int32 cpuID = assign_cpu();
470 		cpuID = arch_int_assign_to_cpu(vector, cpuID);
471 		sVectors[vector].assigned_cpu->cpu = cpuID;
472 
473 		cpu_ent* cpu = &gCPU[cpuID];
474 		SpinLocker _(cpu->irqs_lock);
475 		atomic_add(&sVectors[vector].assigned_cpu->handlers_count, 1);
476 		list_add_item(&cpu->irqs, sVectors[vector].assigned_cpu);
477 	}
478 
479 	if ((flags & B_NO_HANDLED_INFO) != 0
480 		&& sVectors[vector].handler_list != NULL) {
481 		// The driver registering this interrupt handler doesn't know
482 		// whether or not it actually handled the interrupt after the
483 		// handler returns. This is incompatible with shared interrupts
484 		// as we'd potentially steal interrupts from other handlers
485 		// resulting in interrupt storms. Therefore we enqueue this interrupt
486 		// handler as the very last one, meaning all other handlers will
487 		// get their go at any interrupt first.
488 		struct io_handler *last = sVectors[vector].handler_list;
489 		while (last->next)
490 			last = last->next;
491 
492 		io->next = NULL;
493 		last->next = io;
494 	} else {
495 		// A normal interrupt handler, just add it to the head of the list.
496 		io->next = sVectors[vector].handler_list;
497 		sVectors[vector].handler_list = io;
498 	}
499 
500 	// If B_NO_ENABLE_COUNTER is set, we're being asked to not alter
501 	// whether the interrupt should be enabled or not
502 	if (io->use_enable_counter) {
503 		if (sVectors[vector].enable_count++ == 0)
504 			arch_int_enable_io_interrupt(vector);
505 	}
506 
507 	// If B_NO_LOCK_VECTOR is specified this is a vector that is not supposed
508 	// to have multiple handlers and does not require locking of the vector
509 	// when entering the handler. For example this is used by internally
510 	// registered interrupt handlers like for handling local APIC interrupts
511 	// that may run concurently on multiple CPUs. Locking with a spinlock
512 	// would in that case defeat the purpose as it would serialize calling the
513 	// handlers in parallel on different CPUs.
514 	if (flags & B_NO_LOCK_VECTOR)
515 		sVectors[vector].no_lock_vector = true;
516 
517 	release_spinlock(&sVectors[vector].vector_lock);
518 
519 	restore_interrupts(state);
520 
521 	return B_OK;
522 }
523 
524 
525 /*!	Remove a previously installed interrupt handler */
526 status_t
527 remove_io_interrupt_handler(int32 vector, interrupt_handler handler, void *data)
528 {
529 	status_t status = B_BAD_VALUE;
530 	struct io_handler *io = NULL;
531 	struct io_handler *last = NULL;
532 	cpu_status state;
533 
534 	if (vector < 0 || vector >= NUM_IO_VECTORS)
535 		return B_BAD_VALUE;
536 
537 	/* lock the structures down so it is not modified while we search */
538 	state = disable_interrupts();
539 	acquire_spinlock(&sVectors[vector].vector_lock);
540 
541 	/* loop through the available handlers and try to find a match.
542 	 * We go forward through the list but this means we start with the
543 	 * most recently added handlers.
544 	 */
545 	for (io = sVectors[vector].handler_list; io != NULL; io = io->next) {
546 		/* we have to match both function and data */
547 		if (io->func == handler && io->data == data) {
548 			if (last != NULL)
549 				last->next = io->next;
550 			else
551 				sVectors[vector].handler_list = io->next;
552 
553 			// Check if we need to disable the interrupt
554 			if (io->use_enable_counter && --sVectors[vector].enable_count == 0)
555 				arch_int_disable_io_interrupt(vector);
556 
557 			status = B_OK;
558 			break;
559 		}
560 
561 		last = io;
562 	}
563 
564 	if (sVectors[vector].handler_list == NULL
565 		&& sVectors[vector].type == INTERRUPT_TYPE_IRQ
566 		&& sVectors[vector].assigned_cpu != NULL
567 		&& sVectors[vector].assigned_cpu->handlers_count > 0) {
568 
569 		int32 oldHandlersCount
570 			= atomic_add(&sVectors[vector].assigned_cpu->handlers_count, -1);
571 
572 		if (oldHandlersCount == 1) {
573 			int32 oldCPU;
574 			SpinLocker locker;
575 			cpu_ent* cpu;
576 
577 			do {
578 				locker.Unlock();
579 
580 				oldCPU = sVectors[vector].assigned_cpu->cpu;
581 
582 				ASSERT(oldCPU != -1);
583 				cpu = &gCPU[oldCPU];
584 
585 				locker.SetTo(cpu->irqs_lock, false);
586 			} while (sVectors[vector].assigned_cpu->cpu != oldCPU);
587 
588 			sVectors[vector].assigned_cpu->cpu = -1;
589 			list_remove_item(&cpu->irqs, sVectors[vector].assigned_cpu);
590 		}
591 	}
592 
593 	release_spinlock(&sVectors[vector].vector_lock);
594 	restore_interrupts(state);
595 
596 	// if the handler could be found and removed, we still have to free it
597 	if (status == B_OK)
598 		free(io);
599 
600 	return status;
601 }
602 
603 
604 /*	Mark \a count contigous interrupts starting at \a startVector as in use.
605 	This will prevent them from being allocated by others. Only use this when
606 	the reserved range is hardwired to the given vector, otherwise allocate
607 	vectors using allocate_io_interrupt_vectors() instead.
608 */
609 status_t
610 reserve_io_interrupt_vectors(int32 count, int32 startVector, interrupt_type type)
611 {
612 	MutexLocker locker(&sIOInterruptVectorAllocationLock);
613 
614 	for (int32 i = 0; i < count; i++) {
615 		if (sAllocatedIOInterruptVectors[startVector + i]) {
616 			panic("reserved interrupt vector range %" B_PRId32 "-%" B_PRId32 " overlaps already "
617 				"allocated vector %" B_PRId32, startVector, startVector + count - 1,
618 				startVector + i);
619 			free_io_interrupt_vectors(i, startVector);
620 			return B_BUSY;
621 		}
622 
623 		sVectors[startVector + i].type = type;
624 		sVectors[startVector + i].assigned_cpu
625 			= &sVectorCPUAssignments[startVector + i];
626 		sVectorCPUAssignments[startVector + i].count = 1;
627 		sAllocatedIOInterruptVectors[startVector + i] = true;
628 	}
629 
630 	dprintf("reserve_io_interrupt_vectors: reserved %" B_PRId32 " vectors starting "
631 		"from %" B_PRId32 "\n", count, startVector);
632 	return B_OK;
633 }
634 
635 
636 /*!	Allocate \a count contiguous interrupt vectors. The vectors are allocated
637 	as available so that they do not overlap with any other reserved vector.
638 	The first vector to be used is returned in \a startVector on success.
639 */
640 status_t
641 allocate_io_interrupt_vectors(int32 count, int32 *startVector,
642 	interrupt_type type)
643 {
644 	MutexLocker locker(&sIOInterruptVectorAllocationLock);
645 
646 	int32 vector = 0;
647 	bool runFound = true;
648 	for (int32 i = 0; i < NUM_IO_VECTORS - (count - 1); i++) {
649 		if (sAllocatedIOInterruptVectors[i])
650 			continue;
651 
652 		vector = i;
653 		runFound = true;
654 		for (uint16 j = 1; j < count; j++) {
655 			if (sAllocatedIOInterruptVectors[i + j]) {
656 				runFound = false;
657 				i += j;
658 				break;
659 			}
660 		}
661 
662 		if (runFound)
663 			break;
664 	}
665 
666 	if (!runFound) {
667 		dprintf("found no free vectors to allocate %" B_PRId32 " io interrupts\n", count);
668 		return B_NO_MEMORY;
669 	}
670 
671 	for (int32 i = 0; i < count; i++) {
672 		sVectors[vector + i].type = type;
673 		sVectors[vector + i].assigned_cpu = &sVectorCPUAssignments[vector];
674 		sAllocatedIOInterruptVectors[vector + i] = true;
675 	}
676 
677 	sVectorCPUAssignments[vector].irq = vector;
678 	sVectorCPUAssignments[vector].count = count;
679 
680 	*startVector = vector;
681 	dprintf("allocate_io_interrupt_vectors: allocated %" B_PRId32 " vectors starting "
682 		"from %" B_PRId32 "\n", count, vector);
683 	return B_OK;
684 }
685 
686 
687 /*!	Free/unreserve interrupt vectors previously allocated with the
688 	{reserve|allocate}_io_interrupt_vectors() functions. The \a count and
689 	\a startVector can be adjusted from the allocation calls to partially free
690 	a vector range.
691 */
692 void
693 free_io_interrupt_vectors(int32 count, int32 startVector)
694 {
695 	if (startVector + count > NUM_IO_VECTORS) {
696 		panic("invalid start vector %" B_PRId32 " or count %" B_PRId32 " supplied to "
697 			"free_io_interrupt_vectors\n", startVector, count);
698 		return;
699 	}
700 
701 	dprintf("free_io_interrupt_vectors: freeing %" B_PRId32 " vectors starting "
702 		"from %" B_PRId32 "\n", count, startVector);
703 
704 	MutexLocker locker(sIOInterruptVectorAllocationLock);
705 	for (int32 i = 0; i < count; i++) {
706 		if (!sAllocatedIOInterruptVectors[startVector + i]) {
707 			panic("io interrupt vector %" B_PRId32 " was not allocated\n",
708 				startVector + i);
709 		}
710 
711 		io_vector& vector = sVectors[startVector + i];
712 		InterruptsSpinLocker vectorLocker(vector.vector_lock);
713 		if (vector.assigned_cpu != NULL && vector.assigned_cpu->cpu != -1) {
714 			panic("freeing io interrupt vector %" B_PRId32 " that is still asigned to a "
715 				"cpu", startVector + i);
716 			continue;
717 		}
718 
719 		vector.assigned_cpu = NULL;
720 		sAllocatedIOInterruptVectors[startVector + i] = false;
721 	}
722 }
723 
724 
725 void assign_io_interrupt_to_cpu(int32 vector, int32 newCPU)
726 {
727 	ASSERT(sVectors[vector].type == INTERRUPT_TYPE_IRQ);
728 
729 	int32 oldCPU = sVectors[vector].assigned_cpu->cpu;
730 
731 	if (newCPU == -1)
732 		newCPU = assign_cpu();
733 
734 	if (newCPU == oldCPU)
735 		return;
736 
737 	ASSERT(oldCPU != -1);
738 	cpu_ent* cpu = &gCPU[oldCPU];
739 
740 	SpinLocker locker(cpu->irqs_lock);
741 	sVectors[vector].assigned_cpu->cpu = -1;
742 	list_remove_item(&cpu->irqs, sVectors[vector].assigned_cpu);
743 	locker.Unlock();
744 
745 	newCPU = arch_int_assign_to_cpu(vector, newCPU);
746 	sVectors[vector].assigned_cpu->cpu = newCPU;
747 	cpu = &gCPU[newCPU];
748 	locker.SetTo(cpu->irqs_lock, false);
749 	list_add_item(&cpu->irqs, sVectors[vector].assigned_cpu);
750 }
751