xref: /haiku/src/system/kernel/arch/x86/arch_smp.cpp (revision e1c4049fed1047bdb957b0529e1921e97ef94770)
1 /*
2  * Copyright 2023, Puck Meerburg, puck@puckipedia.com.
3  * Copyright 2013, Paweł Dziepak, pdziepak@quarnos.org.
4  * Copyright 2002-2005, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
5  * Distributed under the terms of the MIT License.
6  *
7  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
8  * Distributed under the terms of the NewOS License.
9  */
10 
11 
12 #include <boot/kernel_args.h>
13 #include <vm/vm.h>
14 #include <cpu.h>
15 #include <int.h>
16 #include <smp.h>
17 #include <smp_priv.h>
18 
19 #include <arch/atomic.h>
20 #include <arch/cpu.h>
21 #include <arch/vm.h>
22 #include <arch/smp.h>
23 
24 #include <arch/x86/apic.h>
25 #include <arch/x86/arch_smp.h>
26 #include <arch/x86/smp_priv.h>
27 #include <arch/x86/timer.h>
28 
29 #include <string.h>
30 #include <stdio.h>
31 
32 #include <algorithm>
33 
34 
35 //#define TRACE_ARCH_SMP
36 #ifdef TRACE_ARCH_SMP
37 #	define TRACE(x) dprintf x
38 #else
39 #	define TRACE(x) ;
40 #endif
41 
42 
43 #define	ICI_VECTOR		0xfd
44 
45 
46 static uint32 sCPUAPICIds[SMP_MAX_CPUS];
47 static uint32 sAPICVersions[SMP_MAX_CPUS];
48 
49 
50 static int32
51 x86_ici_interrupt(void *data)
52 {
53 	// genuine inter-cpu interrupt
54 	int cpu = smp_get_current_cpu();
55 	TRACE(("inter-cpu interrupt on cpu %d\n", cpu));
56 	return smp_intercpu_int_handler(cpu);
57 }
58 
59 
60 static int32
61 x86_spurious_interrupt(void *data)
62 {
63 	// spurious interrupt
64 	TRACE(("spurious interrupt on cpu %" B_PRId32 "\n", smp_get_current_cpu()));
65 
66 	// spurious interrupts must not be acknowledged as it does not expect
67 	// a end of interrupt - if we still do it we would loose the next best
68 	// interrupt
69 	return B_HANDLED_INTERRUPT;
70 }
71 
72 
73 static int32
74 x86_smp_error_interrupt(void *data)
75 {
76 	// smp error interrupt
77 	TRACE(("smp error interrupt on cpu %" B_PRId32 "\n", smp_get_current_cpu()));
78 	return B_HANDLED_INTERRUPT;
79 }
80 
81 
82 uint32
83 x86_get_cpu_apic_id(int32 cpu)
84 {
85 	ASSERT(cpu >= 0 && cpu < SMP_MAX_CPUS);
86 	return sCPUAPICIds[cpu];
87 }
88 
89 
90 status_t
91 arch_smp_init(kernel_args *args)
92 {
93 	TRACE(("%s: entry\n", __func__));
94 
95 	if (!apic_available()) {
96 		// if we don't have an apic we can't do smp
97 		TRACE(("%s: apic not available for smp\n", __func__));
98 		return B_OK;
99 	}
100 
101 	// setup some globals
102 	memcpy(sCPUAPICIds, args->arch_args.cpu_apic_id, sizeof(args->arch_args.cpu_apic_id));
103 	memcpy(sAPICVersions, args->arch_args.cpu_apic_version, sizeof(args->arch_args.cpu_apic_version));
104 
105 	// set up the local apic on the boot cpu
106 	arch_smp_per_cpu_init(args, 0);
107 
108 	if (args->num_cpus > 1) {
109 		// I/O interrupts start at ARCH_INTERRUPT_BASE, so all interrupts are shifted
110 		reserve_io_interrupt_vectors(3, 0xfd - ARCH_INTERRUPT_BASE,
111 			INTERRUPT_TYPE_ICI);
112 		install_io_interrupt_handler(0xfd - ARCH_INTERRUPT_BASE, &x86_ici_interrupt, NULL, B_NO_LOCK_VECTOR);
113 		install_io_interrupt_handler(0xfe - ARCH_INTERRUPT_BASE, &x86_smp_error_interrupt, NULL, B_NO_LOCK_VECTOR);
114 		install_io_interrupt_handler(0xff - ARCH_INTERRUPT_BASE, &x86_spurious_interrupt, NULL, B_NO_LOCK_VECTOR);
115 	}
116 
117 	return B_OK;
118 }
119 
120 
121 status_t
122 arch_smp_per_cpu_init(kernel_args *args, int32 cpu)
123 {
124 	// set up the local apic on the current cpu
125 	TRACE(("arch_smp_init_percpu: setting up the apic on cpu %" B_PRId32 "\n",
126 		cpu));
127 	apic_per_cpu_init(args, cpu);
128 
129 	// setup FPU and SSE if supported
130 	x86_init_fpu();
131 
132 	return B_OK;
133 }
134 
135 
136 static void
137 send_multicast_ici_physical(CPUSet& cpuSet)
138 {
139 	int32 cpuCount = smp_get_num_cpus();
140 	int32 currentCpu = smp_get_current_cpu();
141 
142 	for (int32 i = 0; i < cpuCount; i++) {
143 		if (cpuSet.GetBit(i) && i != currentCpu) {
144 			uint32 destination = sCPUAPICIds[i];
145 			uint32 mode = ICI_VECTOR | APIC_DELIVERY_MODE_FIXED
146 					| APIC_INTR_COMMAND_1_ASSERT
147 					| APIC_INTR_COMMAND_1_DEST_MODE_PHYSICAL
148 					| APIC_INTR_COMMAND_1_DEST_FIELD;
149 
150 			while (!apic_interrupt_delivered())
151 				cpu_pause();
152 			apic_set_interrupt_command(destination, mode);
153 		}
154 	}
155 }
156 
157 
158 void
159 arch_smp_send_multicast_ici(CPUSet& cpuSet)
160 {
161 #if KDEBUG
162 	if (are_interrupts_enabled())
163 		panic("arch_smp_send_multicast_ici: called with interrupts enabled");
164 #endif
165 
166 	memory_write_barrier();
167 
168 	if (!x2apic_available()) {
169 		send_multicast_ici_physical(cpuSet);
170 		return;
171 	}
172 
173 	// WRMSR on the x2APIC MSRs is neither serializing, nor a load-store
174 	// operation, requiring both memory serialization *and* a load fence, which is
175 	// the only way to ensure the MSR doesn't get executed before the write
176 	// barrier.
177 	memory_read_barrier();
178 
179 	int32 cpuCount = smp_get_num_cpus();
180 	int32 currentCpu = smp_get_current_cpu();
181 
182 	uint32 mode = ICI_VECTOR | APIC_DELIVERY_MODE_FIXED
183 			| APIC_INTR_COMMAND_1_ASSERT
184 			| APIC_INTR_COMMAND_1_DEST_MODE_LOGICAL
185 			| APIC_INTR_COMMAND_1_DEST_FIELD;
186 
187 	for (int32 i = 0; i < cpuCount; i++) {
188 		if (!cpuSet.GetBit(i) || i == currentCpu)
189 			continue;
190 
191 		apic_set_interrupt_command(gCPU[i].arch.logical_apic_id, mode);
192 	}
193 }
194 
195 
196 void
197 arch_smp_send_broadcast_ici(void)
198 {
199 #if KDEBUG
200 	if (are_interrupts_enabled())
201 		panic("arch_smp_send_broadcast_ici: called with interrupts enabled");
202 #endif
203 
204 	memory_write_barrier();
205 
206 	uint32 mode = ICI_VECTOR | APIC_DELIVERY_MODE_FIXED
207 			| APIC_INTR_COMMAND_1_ASSERT
208 			| APIC_INTR_COMMAND_1_DEST_MODE_PHYSICAL
209 			| APIC_INTR_COMMAND_1_DEST_ALL_BUT_SELF;
210 
211 	while (!apic_interrupt_delivered())
212 		cpu_pause();
213 	apic_set_interrupt_command(0, mode);
214 }
215 
216 
217 void
218 arch_smp_send_ici(int32 target_cpu)
219 {
220 #if KDEBUG
221 	if (are_interrupts_enabled())
222 		panic("arch_smp_send_ici: called with interrupts enabled");
223 #endif
224 
225 	memory_write_barrier();
226 
227 	uint32 destination = sCPUAPICIds[target_cpu];
228 	uint32 mode = ICI_VECTOR | APIC_DELIVERY_MODE_FIXED
229 			| APIC_INTR_COMMAND_1_ASSERT
230 			| APIC_INTR_COMMAND_1_DEST_MODE_PHYSICAL
231 			| APIC_INTR_COMMAND_1_DEST_FIELD;
232 
233 	while (!apic_interrupt_delivered())
234 		cpu_pause();
235 	apic_set_interrupt_command(destination, mode);
236 }
237 
238