1 /* 2 * Copyright 2013, Paweł Dziepak, pdziepak@quarnos.org. 3 * Copyright 2002-2005, Axel Dörfler, axeld@pinc-software.de. All rights reserved. 4 * Distributed under the terms of the MIT License. 5 * 6 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved. 7 * Distributed under the terms of the NewOS License. 8 */ 9 10 11 #include <boot/kernel_args.h> 12 #include <vm/vm.h> 13 #include <cpu.h> 14 #include <int.h> 15 #include <smp.h> 16 #include <smp_priv.h> 17 18 #include <arch/atomic.h> 19 #include <arch/cpu.h> 20 #include <arch/vm.h> 21 #include <arch/smp.h> 22 23 #include <arch/x86/apic.h> 24 #include <arch/x86/arch_smp.h> 25 #include <arch/x86/smp_priv.h> 26 #include <arch/x86/timer.h> 27 28 #include <string.h> 29 #include <stdio.h> 30 31 #include <algorithm> 32 33 34 //#define TRACE_ARCH_SMP 35 #ifdef TRACE_ARCH_SMP 36 # define TRACE(x) dprintf x 37 #else 38 # define TRACE(x) ; 39 #endif 40 41 42 #define ICI_VECTOR 0xfd 43 44 45 static uint32 sCPUAPICIds[SMP_MAX_CPUS]; 46 static uint32 sAPICVersions[SMP_MAX_CPUS]; 47 48 49 static int32 50 x86_ici_interrupt(void *data) 51 { 52 // genuine inter-cpu interrupt 53 int cpu = smp_get_current_cpu(); 54 TRACE(("inter-cpu interrupt on cpu %d\n", cpu)); 55 return smp_intercpu_int_handler(cpu); 56 } 57 58 59 static int32 60 x86_spurious_interrupt(void *data) 61 { 62 // spurious interrupt 63 TRACE(("spurious interrupt on cpu %" B_PRId32 "\n", smp_get_current_cpu())); 64 65 // spurious interrupts must not be acknowledged as it does not expect 66 // a end of interrupt - if we still do it we would loose the next best 67 // interrupt 68 return B_HANDLED_INTERRUPT; 69 } 70 71 72 static int32 73 x86_smp_error_interrupt(void *data) 74 { 75 // smp error interrupt 76 TRACE(("smp error interrupt on cpu %" B_PRId32 "\n", smp_get_current_cpu())); 77 return B_HANDLED_INTERRUPT; 78 } 79 80 81 uint32 82 x86_get_cpu_apic_id(int32 cpu) 83 { 84 ASSERT(cpu >= 0 && cpu < SMP_MAX_CPUS); 85 return sCPUAPICIds[cpu]; 86 } 87 88 89 status_t 90 arch_smp_init(kernel_args *args) 91 { 92 TRACE(("%s: entry\n", __func__)); 93 94 if (!apic_available()) { 95 // if we don't have an apic we can't do smp 96 TRACE(("%s: apic not available for smp\n", __func__)); 97 return B_OK; 98 } 99 100 // setup some globals 101 memcpy(sCPUAPICIds, args->arch_args.cpu_apic_id, sizeof(args->arch_args.cpu_apic_id)); 102 memcpy(sAPICVersions, args->arch_args.cpu_apic_version, sizeof(args->arch_args.cpu_apic_version)); 103 104 // set up the local apic on the boot cpu 105 arch_smp_per_cpu_init(args, 0); 106 107 if (args->num_cpus > 1) { 108 // I/O interrupts start at ARCH_INTERRUPT_BASE, so all interrupts are shifted 109 reserve_io_interrupt_vectors(3, 0xfd - ARCH_INTERRUPT_BASE, 110 INTERRUPT_TYPE_ICI); 111 install_io_interrupt_handler(0xfd - ARCH_INTERRUPT_BASE, &x86_ici_interrupt, NULL, B_NO_LOCK_VECTOR); 112 install_io_interrupt_handler(0xfe - ARCH_INTERRUPT_BASE, &x86_smp_error_interrupt, NULL, B_NO_LOCK_VECTOR); 113 install_io_interrupt_handler(0xff - ARCH_INTERRUPT_BASE, &x86_spurious_interrupt, NULL, B_NO_LOCK_VECTOR); 114 } 115 116 return B_OK; 117 } 118 119 120 status_t 121 arch_smp_per_cpu_init(kernel_args *args, int32 cpu) 122 { 123 // set up the local apic on the current cpu 124 TRACE(("arch_smp_init_percpu: setting up the apic on cpu %" B_PRId32 "\n", 125 cpu)); 126 apic_per_cpu_init(args, cpu); 127 128 // setup FPU and SSE if supported 129 x86_init_fpu(); 130 131 return B_OK; 132 } 133 134 135 void 136 arch_smp_send_multicast_ici(CPUSet& cpuSet) 137 { 138 #if KDEBUG 139 if (are_interrupts_enabled()) 140 panic("arch_smp_send_multicast_ici: called with interrupts enabled"); 141 #endif 142 143 memory_write_barrier(); 144 145 int32 i = 0; 146 int32 cpuCount = smp_get_num_cpus(); 147 148 int32 logicalModeCPUs; 149 if (x2apic_available()) 150 logicalModeCPUs = cpuCount; 151 else 152 logicalModeCPUs = std::min(cpuCount, int32(8)); 153 154 uint32 destination = 0; 155 for (; i < logicalModeCPUs; i++) { 156 if (cpuSet.GetBit(i) && i != smp_get_current_cpu()) 157 destination |= gCPU[i].arch.logical_apic_id; 158 } 159 160 uint32 mode = ICI_VECTOR | APIC_DELIVERY_MODE_FIXED 161 | APIC_INTR_COMMAND_1_ASSERT 162 | APIC_INTR_COMMAND_1_DEST_MODE_LOGICAL 163 | APIC_INTR_COMMAND_1_DEST_FIELD; 164 165 while (!apic_interrupt_delivered()) 166 cpu_pause(); 167 apic_set_interrupt_command(destination, mode); 168 169 for (; i < cpuCount; i++) { 170 if (cpuSet.GetBit(i)) { 171 uint32 destination = sCPUAPICIds[i]; 172 uint32 mode = ICI_VECTOR | APIC_DELIVERY_MODE_FIXED 173 | APIC_INTR_COMMAND_1_ASSERT 174 | APIC_INTR_COMMAND_1_DEST_MODE_PHYSICAL 175 | APIC_INTR_COMMAND_1_DEST_FIELD; 176 177 while (!apic_interrupt_delivered()) 178 cpu_pause(); 179 apic_set_interrupt_command(destination, mode); 180 } 181 } 182 } 183 184 185 void 186 arch_smp_send_broadcast_ici(void) 187 { 188 #if KDEBUG 189 if (are_interrupts_enabled()) 190 panic("arch_smp_send_broadcast_ici: called with interrupts enabled"); 191 #endif 192 193 memory_write_barrier(); 194 195 uint32 mode = ICI_VECTOR | APIC_DELIVERY_MODE_FIXED 196 | APIC_INTR_COMMAND_1_ASSERT 197 | APIC_INTR_COMMAND_1_DEST_MODE_PHYSICAL 198 | APIC_INTR_COMMAND_1_DEST_ALL_BUT_SELF; 199 200 while (!apic_interrupt_delivered()) 201 cpu_pause(); 202 apic_set_interrupt_command(0, mode); 203 } 204 205 206 void 207 arch_smp_send_ici(int32 target_cpu) 208 { 209 #if KDEBUG 210 if (are_interrupts_enabled()) 211 panic("arch_smp_send_ici: called with interrupts enabled"); 212 #endif 213 214 memory_write_barrier(); 215 216 uint32 destination = sCPUAPICIds[target_cpu]; 217 uint32 mode = ICI_VECTOR | APIC_DELIVERY_MODE_FIXED 218 | APIC_INTR_COMMAND_1_ASSERT 219 | APIC_INTR_COMMAND_1_DEST_MODE_PHYSICAL 220 | APIC_INTR_COMMAND_1_DEST_FIELD; 221 222 while (!apic_interrupt_delivered()) 223 cpu_pause(); 224 apic_set_interrupt_command(destination, mode); 225 } 226 227