xref: /haiku/src/system/kernel/int.cpp (revision 8997ebb0a5bd736a0736ba7424889c750e054112)
1 /*
2  * Copyright 2013, Paweł Dziepak, pdziepak@quarnos.org.
3  * Distributed under the terms of the MIT License.
4 
5  * Copyright 2011, Michael Lotz, mmlr@mlotz.ch.
6  * Distributed under the terms of the MIT License.
7  *
8  * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
9  * Distributed under the terms of the MIT License.
10  *
11  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
12  * Distributed under the terms of the NewOS License.
13  */
14 
15 
16 #include <int.h>
17 
18 #include <stdio.h>
19 #include <stdlib.h>
20 #include <string.h>
21 
22 #include <arch/debug_console.h>
23 #include <arch/int.h>
24 #include <boot/kernel_args.h>
25 #include <elf.h>
26 #include <load_tracking.h>
27 #include <util/AutoLock.h>
28 #include <smp.h>
29 
30 #include "kernel_debug_config.h"
31 
32 
33 //#define TRACE_INT
34 #ifdef TRACE_INT
35 #	define TRACE(x) dprintf x
36 #else
37 #	define TRACE(x) ;
38 #endif
39 
40 
41 struct io_handler {
42 	struct io_handler	*next;
43 	interrupt_handler	func;
44 	void				*data;
45 	bool				use_enable_counter;
46 	bool				no_handled_info;
47 #if DEBUG_INTERRUPTS
48 	int64				handled_count;
49 #endif
50 };
51 
52 struct io_vector {
53 	struct io_handler	*handler_list;
54 	spinlock			vector_lock;
55 	int32				enable_count;
56 	bool				no_lock_vector;
57 	interrupt_type		type;
58 
59 	spinlock			load_lock;
60 	bigtime_t			last_measure_time;
61 	bigtime_t			last_measure_active;
62 	int32				load;
63 
64 	irq_assignment*		assigned_cpu;
65 
66 #if DEBUG_INTERRUPTS
67 	int64				handled_count;
68 	int64				unhandled_count;
69 	int					trigger_count;
70 	int					ignored_count;
71 #endif
72 };
73 
74 static int32 sLastCPU;
75 
76 static io_vector sVectors[NUM_IO_VECTORS];
77 static bool sAllocatedIOInterruptVectors[NUM_IO_VECTORS];
78 static irq_assignment sVectorCPUAssignments[NUM_IO_VECTORS];
79 static mutex sIOInterruptVectorAllocationLock
80 	= MUTEX_INITIALIZER("io_interrupt_vector_allocation");
81 
82 
83 #if DEBUG_INTERRUPTS
84 static int
dump_int_statistics(int argc,char ** argv)85 dump_int_statistics(int argc, char **argv)
86 {
87 	int i;
88 	for (i = 0; i < NUM_IO_VECTORS; i++) {
89 		struct io_handler *io;
90 
91 		if (!B_SPINLOCK_IS_LOCKED(&sVectors[i].vector_lock)
92 			&& sVectors[i].enable_count == 0
93 			&& sVectors[i].handled_count == 0
94 			&& sVectors[i].unhandled_count == 0
95 			&& sVectors[i].handler_list == NULL)
96 			continue;
97 
98 		kprintf("int %3d, enabled %" B_PRId32 ", handled %8" B_PRId64 ", "
99 			"unhandled %8" B_PRId64 "%s%s\n", i, sVectors[i].enable_count,
100 			sVectors[i].handled_count,sVectors[i].unhandled_count,
101 			B_SPINLOCK_IS_LOCKED(&sVectors[i].vector_lock) ? ", ACTIVE" : "",
102 			sVectors[i].handler_list == NULL ? ", no handler" : "");
103 
104 		for (io = sVectors[i].handler_list; io != NULL; io = io->next) {
105 			const char *symbol, *imageName;
106 			bool exactMatch;
107 
108 			status_t error = elf_debug_lookup_symbol_address((addr_t)io->func,
109 				NULL, &symbol, &imageName, &exactMatch);
110 			if (error == B_OK && exactMatch) {
111 				if (strchr(imageName, '/') != NULL)
112 					imageName = strrchr(imageName, '/') + 1;
113 
114 				int length = 4 + strlen(imageName);
115 				kprintf("   %s:%-*s (%p)", imageName, 45 - length, symbol,
116 					io->func);
117 			} else
118 				kprintf("\t\t\t\t\t   func %p", io->func);
119 
120 			kprintf(", data %p, handled ", io->data);
121 			if (io->no_handled_info)
122 				kprintf("<unknown>\n");
123 			else
124 				kprintf("%8" B_PRId64 "\n", io->handled_count);
125 		}
126 
127 		kprintf("\n");
128 	}
129 	return 0;
130 }
131 #endif
132 
133 
134 static int
dump_int_load(int argc,char ** argv)135 dump_int_load(int argc, char** argv)
136 {
137 	static const char* typeNames[]
138 		= { "exception", "irq", "local irq", "syscall", "ici", "unknown" };
139 
140 	for (int i = 0; i < NUM_IO_VECTORS; i++) {
141 		if (!B_SPINLOCK_IS_LOCKED(&sVectors[i].vector_lock)
142 			&& sVectors[i].handler_list == NULL
143 			&& sVectors[i].enable_count == 0)
144 			continue;
145 
146 		kprintf("int %3d, type %s, enabled %" B_PRId32 ", load %" B_PRId32
147 			"%%", i, typeNames[min_c(sVectors[i].type,
148 					INTERRUPT_TYPE_UNKNOWN)],
149 			sVectors[i].enable_count,
150 			sVectors[i].assigned_cpu != NULL
151 				? sVectors[i].assigned_cpu->load / 10 : 0);
152 
153 		if (sVectors[i].type == INTERRUPT_TYPE_IRQ) {
154 			ASSERT(sVectors[i].assigned_cpu != NULL);
155 
156 			if (sVectors[i].assigned_cpu->cpu != -1)
157 				kprintf(", cpu %" B_PRId32, sVectors[i].assigned_cpu->cpu);
158 			else
159 				kprintf(", cpu -");
160 		}
161 
162 		if (B_SPINLOCK_IS_LOCKED(&sVectors[i].vector_lock))
163 			kprintf(", ACTIVE");
164 		kprintf("\n");
165 	}
166 
167 	return 0;
168 }
169 
170 
171 //	#pragma mark - private kernel API
172 
173 
174 bool
interrupts_enabled(void)175 interrupts_enabled(void)
176 {
177 	return arch_int_are_interrupts_enabled();
178 }
179 
180 
181 status_t
int_init(kernel_args * args)182 int_init(kernel_args* args)
183 {
184 	TRACE(("init_int_handlers: entry\n"));
185 
186 	return arch_int_init(args);
187 }
188 
189 
190 status_t
int_init_post_vm(kernel_args * args)191 int_init_post_vm(kernel_args* args)
192 {
193 	int i;
194 
195 	/* initialize the vector list */
196 	for (i = 0; i < NUM_IO_VECTORS; i++) {
197 		B_INITIALIZE_SPINLOCK(&sVectors[i].vector_lock);
198 		sVectors[i].enable_count = 0;
199 		sVectors[i].no_lock_vector = false;
200 		sVectors[i].type = INTERRUPT_TYPE_UNKNOWN;
201 
202 		B_INITIALIZE_SPINLOCK(&sVectors[i].load_lock);
203 		sVectors[i].last_measure_time = 0;
204 		sVectors[i].last_measure_active = 0;
205 		sVectors[i].load = 0;
206 
207 #if DEBUG_INTERRUPTS
208 		sVectors[i].handled_count = 0;
209 		sVectors[i].unhandled_count = 0;
210 		sVectors[i].trigger_count = 0;
211 		sVectors[i].ignored_count = 0;
212 #endif
213 		sVectors[i].handler_list = NULL;
214 
215 		sVectorCPUAssignments[i].irq = i;
216 		sVectorCPUAssignments[i].count = 1;
217 		sVectorCPUAssignments[i].handlers_count = 0;
218 		sVectorCPUAssignments[i].load = 0;
219 		sVectorCPUAssignments[i].cpu = -1;
220 	}
221 
222 #if DEBUG_INTERRUPTS
223 	add_debugger_command("ints", &dump_int_statistics,
224 		"list interrupt statistics");
225 #endif
226 
227 	add_debugger_command("int_load", &dump_int_load,
228 		"list interrupt usage statistics");
229 
230 	return arch_int_init_post_vm(args);
231 }
232 
233 
234 status_t
int_init_io(kernel_args * args)235 int_init_io(kernel_args* args)
236 {
237 	return arch_int_init_io(args);
238 }
239 
240 
241 status_t
int_init_post_device_manager(kernel_args * args)242 int_init_post_device_manager(kernel_args* args)
243 {
244 	arch_debug_install_interrupt_handlers();
245 
246 	return arch_int_init_post_device_manager(args);
247 }
248 
249 
250 static void
update_int_load(int i)251 update_int_load(int i)
252 {
253 	if (!try_acquire_spinlock(&sVectors[i].load_lock))
254 		return;
255 
256 	int32 oldLoad = sVectors[i].load;
257 	compute_load(sVectors[i].last_measure_time, sVectors[i].last_measure_active,
258 		sVectors[i].load, system_time());
259 
260 	if (oldLoad != sVectors[i].load)
261 		atomic_add(&sVectors[i].assigned_cpu->load, sVectors[i].load - oldLoad);
262 
263 	release_spinlock(&sVectors[i].load_lock);
264 }
265 
266 
267 /*!	Actually process an interrupt via the handlers registered for that
268 	vector (IRQ).
269 */
270 int
int_io_interrupt_handler(int vector,bool levelTriggered)271 int_io_interrupt_handler(int vector, bool levelTriggered)
272 {
273 	int status = B_UNHANDLED_INTERRUPT;
274 	struct io_handler* io;
275 	bool handled = false;
276 
277 	bigtime_t start = system_time();
278 
279 	// exceptions and syscalls have their own handlers
280 	ASSERT(sVectors[vector].type != INTERRUPT_TYPE_EXCEPTION
281 		&& sVectors[vector].type != INTERRUPT_TYPE_SYSCALL);
282 
283 	if (!sVectors[vector].no_lock_vector)
284 		acquire_spinlock(&sVectors[vector].vector_lock);
285 
286 #if !DEBUG_INTERRUPTS
287 	// The list can be empty at this place
288 	if (sVectors[vector].handler_list == NULL) {
289 		dprintf("unhandled io interrupt %d\n", vector);
290 		if (!sVectors[vector].no_lock_vector)
291 			release_spinlock(&sVectors[vector].vector_lock);
292 		return B_UNHANDLED_INTERRUPT;
293 	}
294 #endif
295 
296 	// For level-triggered interrupts, we actually handle the return
297 	// value (ie. B_HANDLED_INTERRUPT) to decide whether or not we
298 	// want to call another interrupt handler.
299 	// For edge-triggered interrupts, however, we always need to call
300 	// all handlers, as multiple interrupts cannot be identified. We
301 	// still make sure the return code of this function will issue
302 	// whatever the driver thought would be useful.
303 
304 	for (io = sVectors[vector].handler_list; io != NULL; io = io->next) {
305 		status = io->func(io->data);
306 
307 #if DEBUG_INTERRUPTS
308 		if (status != B_UNHANDLED_INTERRUPT)
309 			io->handled_count++;
310 #endif
311 		if (levelTriggered && status != B_UNHANDLED_INTERRUPT)
312 			break;
313 
314 		if (status == B_HANDLED_INTERRUPT || status == B_INVOKE_SCHEDULER)
315 			handled = true;
316 	}
317 
318 	ASSERT_PRINT(!are_interrupts_enabled(),
319 		"interrupts enabled after calling handlers for vector %d", vector);
320 
321 #if DEBUG_INTERRUPTS
322 	sVectors[vector].trigger_count++;
323 	if (status != B_UNHANDLED_INTERRUPT || handled) {
324 		sVectors[vector].handled_count++;
325 	} else {
326 		sVectors[vector].unhandled_count++;
327 		sVectors[vector].ignored_count++;
328 	}
329 
330 	if (sVectors[vector].trigger_count > 10000) {
331 		if (sVectors[vector].ignored_count > 9900) {
332 			struct io_handler *last = sVectors[vector].handler_list;
333 			while (last && last->next)
334 				last = last->next;
335 
336 			if (last != NULL && last->no_handled_info) {
337 				// we have an interrupt handler installed that does not
338 				// know whether or not it has actually handled the interrupt,
339 				// so this unhandled count is inaccurate and we can't just
340 				// disable
341 			} else {
342 				if (sVectors[vector].handler_list == NULL
343 					|| sVectors[vector].handler_list->next == NULL) {
344 					// this interrupt vector is not shared, disable it
345 					sVectors[vector].enable_count = -100;
346 					arch_int_disable_io_interrupt(vector);
347 					dprintf("Disabling unhandled io interrupt %d\n", vector);
348 				} else {
349 					// this is a shared interrupt vector, we cannot just disable it
350 					dprintf("More than 99%% interrupts of vector %d are unhandled\n",
351 						vector);
352 				}
353 			}
354 		}
355 
356 		sVectors[vector].trigger_count = 0;
357 		sVectors[vector].ignored_count = 0;
358 	}
359 #endif
360 
361 	if (!sVectors[vector].no_lock_vector)
362 		release_spinlock(&sVectors[vector].vector_lock);
363 
364 	SpinLocker vectorLocker(sVectors[vector].load_lock);
365 	bigtime_t deltaTime = system_time() - start;
366 	sVectors[vector].last_measure_active += deltaTime;
367 	vectorLocker.Unlock();
368 
369 	cpu_ent* cpu = get_cpu_struct();
370 	if (sVectors[vector].type == INTERRUPT_TYPE_IRQ
371 		|| sVectors[vector].type == INTERRUPT_TYPE_ICI
372 		|| sVectors[vector].type == INTERRUPT_TYPE_LOCAL_IRQ) {
373 		cpu->interrupt_time += deltaTime;
374 		if (sVectors[vector].type == INTERRUPT_TYPE_IRQ)
375 			cpu->irq_time += deltaTime;
376 	}
377 
378 	update_int_load(vector);
379 
380 	if (levelTriggered)
381 		return status;
382 
383 	// edge triggered return value
384 
385 	if (handled)
386 		return B_HANDLED_INTERRUPT;
387 
388 	return B_UNHANDLED_INTERRUPT;
389 }
390 
391 
392 //	#pragma mark - public API
393 
394 
395 #undef disable_interrupts
396 #undef restore_interrupts
397 
398 
399 cpu_status
disable_interrupts(void)400 disable_interrupts(void)
401 {
402 	return arch_int_disable_interrupts();
403 }
404 
405 
406 void
restore_interrupts(cpu_status status)407 restore_interrupts(cpu_status status)
408 {
409 	arch_int_restore_interrupts(status);
410 }
411 
412 
413 static
assign_cpu(void)414 uint32 assign_cpu(void)
415 {
416 	const cpu_topology_node* node;
417 	do {
418 		int32 nextID = atomic_add(&sLastCPU, 1);
419 		node = get_cpu_topology();
420 
421 		while (node->level != CPU_TOPOLOGY_SMT) {
422 			int levelSize = node->children_count;
423 			node = node->children[nextID % levelSize];
424 			nextID /= levelSize;
425 		}
426 	} while (gCPU[node->id].disabled);
427 
428 	return node->id;
429 }
430 
431 
432 /*!	Install a handler to be called when an interrupt is triggered
433 	for the given interrupt number with \a data as the argument.
434 */
435 status_t
install_io_interrupt_handler(int32 vector,interrupt_handler handler,void * data,uint32 flags)436 install_io_interrupt_handler(int32 vector, interrupt_handler handler, void *data,
437 	uint32 flags)
438 {
439 	struct io_handler *io = NULL;
440 	cpu_status state;
441 
442 	if (vector < 0 || vector >= NUM_IO_VECTORS)
443 		return B_BAD_VALUE;
444 
445 	io = (struct io_handler *)malloc(sizeof(struct io_handler));
446 	if (io == NULL)
447 		return B_NO_MEMORY;
448 
449 	arch_debug_remove_interrupt_handler(vector);
450 		// There might be a temporary debug interrupt installed on this
451 		// vector that should be removed now.
452 
453 	io->func = handler;
454 	io->data = data;
455 	io->use_enable_counter = (flags & B_NO_ENABLE_COUNTER) == 0;
456 	io->no_handled_info = (flags & B_NO_HANDLED_INFO) != 0;
457 #if DEBUG_INTERRUPTS
458 	io->handled_count = 0LL;
459 #endif
460 
461 	// Disable the interrupts, get the spinlock for this irq only
462 	// and then insert the handler
463 	state = disable_interrupts();
464 	acquire_spinlock(&sVectors[vector].vector_lock);
465 
466 	// Initial attempt to balance IRQs, the scheduler will correct this
467 	// if some cores end up being overloaded.
468 	if (sVectors[vector].type == INTERRUPT_TYPE_IRQ
469 		&& sVectors[vector].handler_list == NULL
470 		&& sVectors[vector].assigned_cpu->cpu == -1) {
471 
472 		int32 cpuID = assign_cpu();
473 		cpuID = arch_int_assign_to_cpu(vector, cpuID);
474 		sVectors[vector].assigned_cpu->cpu = cpuID;
475 
476 		cpu_ent* cpu = &gCPU[cpuID];
477 		SpinLocker _(cpu->irqs_lock);
478 		atomic_add(&sVectors[vector].assigned_cpu->handlers_count, 1);
479 		list_add_item(&cpu->irqs, sVectors[vector].assigned_cpu);
480 	}
481 
482 	if ((flags & B_NO_HANDLED_INFO) != 0
483 		&& sVectors[vector].handler_list != NULL) {
484 		// The driver registering this interrupt handler doesn't know
485 		// whether or not it actually handled the interrupt after the
486 		// handler returns. This is incompatible with shared interrupts
487 		// as we'd potentially steal interrupts from other handlers
488 		// resulting in interrupt storms. Therefore we enqueue this interrupt
489 		// handler as the very last one, meaning all other handlers will
490 		// get their go at any interrupt first.
491 		struct io_handler *last = sVectors[vector].handler_list;
492 		while (last->next)
493 			last = last->next;
494 
495 		io->next = NULL;
496 		last->next = io;
497 	} else {
498 		// A normal interrupt handler, just add it to the head of the list.
499 		io->next = sVectors[vector].handler_list;
500 		sVectors[vector].handler_list = io;
501 	}
502 
503 	// If B_NO_ENABLE_COUNTER is set, we're being asked to not alter
504 	// whether the interrupt should be enabled or not
505 	if (io->use_enable_counter) {
506 		if (sVectors[vector].enable_count++ == 0)
507 			arch_int_enable_io_interrupt(vector);
508 	}
509 
510 	// If B_NO_LOCK_VECTOR is specified this is a vector that is not supposed
511 	// to have multiple handlers and does not require locking of the vector
512 	// when entering the handler. For example this is used by internally
513 	// registered interrupt handlers like for handling local APIC interrupts
514 	// that may run concurently on multiple CPUs. Locking with a spinlock
515 	// would in that case defeat the purpose as it would serialize calling the
516 	// handlers in parallel on different CPUs.
517 	if (flags & B_NO_LOCK_VECTOR)
518 		sVectors[vector].no_lock_vector = true;
519 
520 	release_spinlock(&sVectors[vector].vector_lock);
521 
522 	restore_interrupts(state);
523 
524 	return B_OK;
525 }
526 
527 
528 /*!	Remove a previously installed interrupt handler */
529 status_t
remove_io_interrupt_handler(int32 vector,interrupt_handler handler,void * data)530 remove_io_interrupt_handler(int32 vector, interrupt_handler handler, void *data)
531 {
532 	status_t status = B_BAD_VALUE;
533 	struct io_handler *io = NULL;
534 	struct io_handler *last = NULL;
535 	cpu_status state;
536 
537 	if (vector < 0 || vector >= NUM_IO_VECTORS)
538 		return B_BAD_VALUE;
539 
540 	/* lock the structures down so it is not modified while we search */
541 	state = disable_interrupts();
542 	acquire_spinlock(&sVectors[vector].vector_lock);
543 
544 	/* loop through the available handlers and try to find a match.
545 	 * We go forward through the list but this means we start with the
546 	 * most recently added handlers.
547 	 */
548 	for (io = sVectors[vector].handler_list; io != NULL; io = io->next) {
549 		/* we have to match both function and data */
550 		if (io->func == handler && io->data == data) {
551 			if (last != NULL)
552 				last->next = io->next;
553 			else
554 				sVectors[vector].handler_list = io->next;
555 
556 			// Check if we need to disable the interrupt
557 			if (io->use_enable_counter && --sVectors[vector].enable_count == 0)
558 				arch_int_disable_io_interrupt(vector);
559 
560 			status = B_OK;
561 			break;
562 		}
563 
564 		last = io;
565 	}
566 
567 	if (sVectors[vector].handler_list == NULL
568 		&& sVectors[vector].type == INTERRUPT_TYPE_IRQ
569 		&& sVectors[vector].assigned_cpu != NULL
570 		&& sVectors[vector].assigned_cpu->handlers_count > 0) {
571 
572 		int32 oldHandlersCount
573 			= atomic_add(&sVectors[vector].assigned_cpu->handlers_count, -1);
574 
575 		if (oldHandlersCount == 1) {
576 			int32 oldCPU;
577 			SpinLocker locker;
578 			cpu_ent* cpu;
579 
580 			do {
581 				locker.Unlock();
582 
583 				oldCPU = sVectors[vector].assigned_cpu->cpu;
584 
585 				ASSERT(oldCPU != -1);
586 				cpu = &gCPU[oldCPU];
587 
588 				locker.SetTo(cpu->irqs_lock, false);
589 			} while (sVectors[vector].assigned_cpu->cpu != oldCPU);
590 
591 			sVectors[vector].assigned_cpu->cpu = -1;
592 			list_remove_item(&cpu->irqs, sVectors[vector].assigned_cpu);
593 		}
594 	}
595 
596 	release_spinlock(&sVectors[vector].vector_lock);
597 	restore_interrupts(state);
598 
599 	// if the handler could be found and removed, we still have to free it
600 	if (status == B_OK)
601 		free(io);
602 
603 	return status;
604 }
605 
606 
607 /*	Mark \a count contigous interrupts starting at \a startVector as in use.
608 	This will prevent them from being allocated by others. Only use this when
609 	the reserved range is hardwired to the given vector, otherwise allocate
610 	vectors using allocate_io_interrupt_vectors() instead.
611 */
612 status_t
reserve_io_interrupt_vectors(int32 count,int32 startVector,interrupt_type type)613 reserve_io_interrupt_vectors(int32 count, int32 startVector, interrupt_type type)
614 {
615 	MutexLocker locker(&sIOInterruptVectorAllocationLock);
616 
617 	for (int32 i = 0; i < count; i++) {
618 		if (sAllocatedIOInterruptVectors[startVector + i]) {
619 			panic("reserved interrupt vector range %" B_PRId32 "-%" B_PRId32 " overlaps already "
620 				"allocated vector %" B_PRId32, startVector, startVector + count - 1,
621 				startVector + i);
622 			free_io_interrupt_vectors(i, startVector);
623 			return B_BUSY;
624 		}
625 
626 		sVectors[startVector + i].type = type;
627 		sVectors[startVector + i].assigned_cpu
628 			= &sVectorCPUAssignments[startVector + i];
629 		sVectorCPUAssignments[startVector + i].count = 1;
630 		sAllocatedIOInterruptVectors[startVector + i] = true;
631 	}
632 
633 	dprintf("reserve_io_interrupt_vectors: reserved %" B_PRId32 " vectors starting "
634 		"from %" B_PRId32 "\n", count, startVector);
635 	return B_OK;
636 }
637 
638 
639 /*!	Allocate \a count contiguous interrupt vectors. The vectors are allocated
640 	as available so that they do not overlap with any other reserved vector.
641 	The first vector to be used is returned in \a startVector on success.
642 */
643 status_t
allocate_io_interrupt_vectors(int32 count,int32 * startVector,interrupt_type type)644 allocate_io_interrupt_vectors(int32 count, int32 *startVector,
645 	interrupt_type type)
646 {
647 	MutexLocker locker(&sIOInterruptVectorAllocationLock);
648 
649 	int32 vector = 0;
650 	bool runFound = true;
651 	for (int32 i = 0; i < NUM_IO_VECTORS - (count - 1); i++) {
652 		if (sAllocatedIOInterruptVectors[i])
653 			continue;
654 
655 		vector = i;
656 		runFound = true;
657 		for (uint16 j = 1; j < count; j++) {
658 			if (sAllocatedIOInterruptVectors[i + j]) {
659 				runFound = false;
660 				i += j;
661 				break;
662 			}
663 		}
664 
665 		if (runFound)
666 			break;
667 	}
668 
669 	if (!runFound) {
670 		dprintf("found no free vectors to allocate %" B_PRId32 " io interrupts\n", count);
671 		return B_NO_MEMORY;
672 	}
673 
674 	for (int32 i = 0; i < count; i++) {
675 		sVectors[vector + i].type = type;
676 		sVectors[vector + i].assigned_cpu = &sVectorCPUAssignments[vector];
677 		sAllocatedIOInterruptVectors[vector + i] = true;
678 	}
679 
680 	sVectorCPUAssignments[vector].irq = vector;
681 	sVectorCPUAssignments[vector].count = count;
682 
683 	*startVector = vector;
684 	dprintf("allocate_io_interrupt_vectors: allocated %" B_PRId32 " vectors starting "
685 		"from %" B_PRId32 "\n", count, vector);
686 	return B_OK;
687 }
688 
689 
690 /*!	Free/unreserve interrupt vectors previously allocated with the
691 	{reserve|allocate}_io_interrupt_vectors() functions. The \a count and
692 	\a startVector can be adjusted from the allocation calls to partially free
693 	a vector range.
694 */
695 void
free_io_interrupt_vectors(int32 count,int32 startVector)696 free_io_interrupt_vectors(int32 count, int32 startVector)
697 {
698 	if (startVector + count > NUM_IO_VECTORS) {
699 		panic("invalid start vector %" B_PRId32 " or count %" B_PRId32 " supplied to "
700 			"free_io_interrupt_vectors\n", startVector, count);
701 		return;
702 	}
703 
704 	dprintf("free_io_interrupt_vectors: freeing %" B_PRId32 " vectors starting "
705 		"from %" B_PRId32 "\n", count, startVector);
706 
707 	MutexLocker locker(sIOInterruptVectorAllocationLock);
708 	for (int32 i = 0; i < count; i++) {
709 		if (!sAllocatedIOInterruptVectors[startVector + i]) {
710 			panic("io interrupt vector %" B_PRId32 " was not allocated\n",
711 				startVector + i);
712 		}
713 
714 		io_vector& vector = sVectors[startVector + i];
715 		InterruptsSpinLocker vectorLocker(vector.vector_lock);
716 		if (vector.assigned_cpu != NULL && vector.assigned_cpu->cpu != -1) {
717 			panic("freeing io interrupt vector %" B_PRId32 " that is still asigned to a "
718 				"cpu", startVector + i);
719 			continue;
720 		}
721 
722 		vector.assigned_cpu = NULL;
723 		sAllocatedIOInterruptVectors[startVector + i] = false;
724 	}
725 }
726 
727 
assign_io_interrupt_to_cpu(int32 vector,int32 newCPU)728 void assign_io_interrupt_to_cpu(int32 vector, int32 newCPU)
729 {
730 	ASSERT(sVectors[vector].type == INTERRUPT_TYPE_IRQ);
731 
732 	int32 oldCPU = sVectors[vector].assigned_cpu->cpu;
733 
734 	if (newCPU == -1)
735 		newCPU = assign_cpu();
736 
737 	if (newCPU == oldCPU)
738 		return;
739 
740 	ASSERT(oldCPU != -1);
741 	cpu_ent* cpu = &gCPU[oldCPU];
742 
743 	SpinLocker locker(cpu->irqs_lock);
744 	sVectors[vector].assigned_cpu->cpu = -1;
745 	list_remove_item(&cpu->irqs, sVectors[vector].assigned_cpu);
746 	locker.Unlock();
747 
748 	newCPU = arch_int_assign_to_cpu(vector, newCPU);
749 	sVectors[vector].assigned_cpu->cpu = newCPU;
750 	cpu = &gCPU[newCPU];
751 	locker.SetTo(cpu->irqs_lock, false);
752 	list_add_item(&cpu->irqs, sVectors[vector].assigned_cpu);
753 }
754