1 /* 2 * Copyright 2019-2022 Haiku, Inc. All Rights Reserved. 3 * Distributed under the terms of the MIT License. 4 */ 5 #include <int.h> 6 7 #include <arch/smp.h> 8 #include <boot/kernel_args.h> 9 #include <device_manager.h> 10 #include <kscheduler.h> 11 #include <ksyscalls.h> 12 #include <interrupt_controller.h> 13 #include <smp.h> 14 #include <thread.h> 15 #include <timer.h> 16 #include <util/AutoLock.h> 17 #include <util/DoublyLinkedList.h> 18 #include <util/kernel_cpp.h> 19 #include <vm/vm.h> 20 #include <vm/vm_priv.h> 21 #include <vm/VMAddressSpace.h> 22 #include "syscall_numbers.h" 23 #include "VMSAv8TranslationMap.h" 24 #include <string.h> 25 26 #include "soc.h" 27 #include "arch_int_gicv2.h" 28 29 #define TRACE_ARCH_INT 30 #ifdef TRACE_ARCH_INT 31 # define TRACE(x) dprintf x 32 #else 33 # define TRACE(x) ; 34 #endif 35 36 //#define TRACE_ARCH_INT_IFRAMES 37 38 // An iframe stack used in the early boot process when we don't have 39 // threads yet. 40 struct iframe_stack gBootFrameStack; 41 42 43 void 44 arch_int_enable_io_interrupt(int irq) 45 { 46 InterruptController *ic = InterruptController::Get(); 47 if (ic != NULL) 48 ic->EnableInterrupt(irq); 49 } 50 51 52 void 53 arch_int_disable_io_interrupt(int irq) 54 { 55 InterruptController *ic = InterruptController::Get(); 56 if (ic != NULL) 57 ic->DisableInterrupt(irq); 58 } 59 60 61 int32 62 arch_int_assign_to_cpu(int32 irq, int32 cpu) 63 { 64 // Not yet supported. 65 return 0; 66 } 67 68 69 static void 70 print_iframe(const char *event, struct iframe *frame) 71 { 72 if (event) 73 dprintf("Exception: %s\n", event); 74 75 dprintf("ELR=%016lx SPSR=%016lx\n", 76 frame->elr, frame->spsr); 77 dprintf("LR=%016lx SP =%016lx\n", 78 frame->lr, frame->sp); 79 } 80 81 82 status_t 83 arch_int_init(kernel_args *args) 84 { 85 return B_OK; 86 } 87 88 89 status_t 90 arch_int_init_post_vm(kernel_args *args) 91 { 92 InterruptController *ic = NULL; 93 if (strcmp(args->arch_args.interrupt_controller.kind, INTC_KIND_GICV2) == 0) { 94 ic = new(std::nothrow) GICv2InterruptController( 95 args->arch_args.interrupt_controller.regs1.start, 96 args->arch_args.interrupt_controller.regs2.start); 97 } 98 99 if (ic == NULL) 100 return B_ERROR; 101 102 return B_OK; 103 } 104 105 106 status_t 107 arch_int_init_io(kernel_args* args) 108 { 109 return B_OK; 110 } 111 112 113 status_t 114 arch_int_init_post_device_manager(struct kernel_args *args) 115 { 116 return B_ENTRY_NOT_FOUND; 117 } 118 119 120 // TODO: reuse things from VMSAv8TranslationMap 121 122 123 static int page_bits = 12; 124 125 static constexpr uint64_t kPteAddrMask = (((1UL << 36) - 1) << 12); 126 static constexpr uint64_t kPteAttrMask = ~(kPteAddrMask | 0x3); 127 static constexpr uint64_t kAttrSWDBM = (1UL << 55); 128 static constexpr uint64_t kAttrAF = (1UL << 10); 129 static constexpr uint64_t kAttrAP2 = (1UL << 7); 130 131 132 static uint64_t* 133 TableFromPa(phys_addr_t pa) 134 { 135 return reinterpret_cast<uint64_t*>(KERNEL_PMAP_BASE + pa); 136 } 137 138 139 static bool 140 fixup_entry(phys_addr_t ptPa, int level, addr_t va, bool wr) 141 { 142 int tableBits = page_bits - 3; 143 uint64_t tableMask = (1UL << tableBits) - 1; 144 145 int shift = tableBits * (3 - level) + page_bits; 146 uint64_t entrySize = 1UL << shift; 147 uint64_t entryMask = entrySize - 1; 148 149 int index = (va >> shift) & tableMask; 150 151 uint64_t *pte = &TableFromPa(ptPa)[index]; 152 153 int type = *pte & 0x3; 154 uint64_t addr = *pte & kPteAddrMask; 155 156 if ((level == 3 && type == 0x3) || (level < 3 && type == 0x1)) { 157 if (!wr && (*pte & kAttrAF) == 0) { 158 atomic_or64((int64*)pte, kAttrAF); 159 return true; 160 } 161 if (wr && (*pte & kAttrSWDBM) != 0 && (*pte & kAttrAP2) != 0) { 162 atomic_and64((int64*)pte, ~kAttrAP2); 163 asm("tlbi vaae1is, %0 \n dsb ish"::"r"(va >> page_bits)); 164 return true; 165 } 166 } else if (level < 3 && type == 0x3) { 167 return fixup_entry(addr, level + 1, va, wr); 168 } 169 170 return false; 171 } 172 173 174 void 175 after_exception() 176 { 177 Thread* thread = thread_get_current_thread(); 178 if (thread->cpu->invoke_scheduler) { 179 disable_interrupts(); 180 SpinLocker schedulerLocker(thread->scheduler_lock); 181 scheduler_reschedule(B_THREAD_READY); 182 } 183 } 184 185 186 // Little helper class for handling the 187 // iframe stack as used by KDL. 188 class IFrameScope { 189 public: 190 IFrameScope(struct iframe *iframe) { 191 fThread = thread_get_current_thread(); 192 if (fThread) 193 arm64_push_iframe(&fThread->arch_info.iframes, iframe); 194 else 195 arm64_push_iframe(&gBootFrameStack, iframe); 196 } 197 198 virtual ~IFrameScope() { 199 // pop iframe 200 if (fThread) 201 arm64_pop_iframe(&fThread->arch_info.iframes); 202 else 203 arm64_pop_iframe(&gBootFrameStack); 204 } 205 private: 206 Thread* fThread; 207 }; 208 209 210 extern "C" void 211 do_sync_handler(iframe * frame) 212 { 213 #ifdef TRACE_ARCH_INT_IFRAMES 214 print_iframe("Sync abort", frame); 215 #endif 216 217 IFrameScope scope(frame); 218 219 bool isExec = false; 220 switch (ESR_ELx_EXCEPTION(frame->esr)) { 221 case EXCP_INSN_ABORT_L: 222 case EXCP_INSN_ABORT: 223 isExec = true; 224 case EXCP_DATA_ABORT_L: 225 case EXCP_DATA_ABORT: 226 { 227 bool write = (frame->esr & ISS_DATA_WnR) != 0; 228 bool known = false; 229 230 int initialLevel = VMSAv8TranslationMap::CalcStartLevel(48, 12); 231 phys_addr_t ptPa; 232 bool addrType = (frame->far & (1UL << 63)) != 0; 233 if (addrType) 234 ptPa = READ_SPECIALREG(TTBR1_EL1); 235 else 236 ptPa = READ_SPECIALREG(TTBR0_EL1); 237 238 switch (frame->esr & ISS_DATA_DFSC_MASK) { 239 case ISS_DATA_DFSC_TF_L0: 240 case ISS_DATA_DFSC_TF_L1: 241 case ISS_DATA_DFSC_TF_L2: 242 case ISS_DATA_DFSC_TF_L3: 243 known = true; 244 break; 245 246 case ISS_DATA_DFSC_AFF_L1: 247 case ISS_DATA_DFSC_AFF_L2: 248 case ISS_DATA_DFSC_AFF_L3: 249 known = true; 250 if (fixup_entry(ptPa, initialLevel, frame->far, false)) 251 return; 252 break; 253 254 case ISS_DATA_DFSC_PF_L1: 255 case ISS_DATA_DFSC_PF_L2: 256 case ISS_DATA_DFSC_PF_L3: 257 known = true; 258 if (write && fixup_entry(ptPa, initialLevel, frame->far, true)) 259 return; 260 break; 261 } 262 263 if (!known) 264 break; 265 266 if (debug_debugger_running()) { 267 Thread* thread = thread_get_current_thread(); 268 if (thread != NULL) { 269 cpu_ent* cpu = &gCPU[smp_get_current_cpu()]; 270 if (cpu->fault_handler != 0) { 271 debug_set_page_fault_info(frame->far, frame->elr, 272 write ? DEBUG_PAGE_FAULT_WRITE : 0); 273 frame->elr = cpu->fault_handler; 274 frame->sp = cpu->fault_handler_stack_pointer; 275 return; 276 } 277 } 278 } 279 280 Thread *thread = thread_get_current_thread(); 281 ASSERT(thread); 282 283 bool isUser = (frame->spsr & PSR_M_MASK) == PSR_M_EL0t; 284 285 if ((frame->spsr & PSR_I) != 0) { 286 // interrupts disabled 287 uintptr_t handler = reinterpret_cast<uintptr_t>(thread->fault_handler); 288 if (thread->fault_handler != 0) { 289 frame->elr = handler; 290 return; 291 } 292 } else if (thread->page_faults_allowed != 0) { 293 dprintf("PF: %lx\n", frame->far); 294 enable_interrupts(); 295 addr_t ret = 0; 296 vm_page_fault(frame->far, frame->elr, write, isExec, isUser, &ret); 297 if (ret != 0) 298 frame->elr = ret; 299 return; 300 } 301 302 panic("unhandled pagefault! FAR=%lx ELR=%lx ESR=%lx", 303 frame->far, frame->elr, frame->esr); 304 break; 305 } 306 307 case EXCP_SVC64: 308 { 309 uint32 imm = (frame->esr & 0xffff); 310 311 uint32 count = imm & 0x1f; 312 uint32 syscall = imm >> 5; 313 314 uint64_t args[20]; 315 if (count > 20) { 316 frame->x[0] = B_ERROR; 317 return; 318 } 319 320 memset(args, 0, sizeof(args)); 321 memcpy(args, frame->x, (count < 8 ? count : 8) * 8); 322 323 if (count > 8) { 324 if (!IS_USER_ADDRESS(frame->sp) 325 || user_memcpy(&args[8], (void*)frame->sp, (count - 8) * 8) != B_OK) { 326 frame->x[0] = B_BAD_ADDRESS; 327 return; 328 } 329 } 330 331 thread_at_kernel_entry(system_time()); 332 333 enable_interrupts(); 334 syscall_dispatcher(syscall, (void*)args, &frame->x[0]); 335 336 { 337 disable_interrupts(); 338 atomic_and(&thread_get_current_thread()->flags, ~THREAD_FLAGS_SYSCALL_RESTARTED); 339 if ((thread_get_current_thread()->flags 340 & (THREAD_FLAGS_SIGNALS_PENDING 341 | THREAD_FLAGS_DEBUG_THREAD 342 | THREAD_FLAGS_TRAP_FOR_CORE_DUMP)) != 0) { 343 enable_interrupts(); 344 thread_at_kernel_exit(); 345 } else { 346 thread_at_kernel_exit_no_signals(); 347 } 348 if ((THREAD_FLAGS_RESTART_SYSCALL & thread_get_current_thread()->flags) != 0) { 349 panic("syscall restart"); 350 } 351 } 352 353 return; 354 } 355 } 356 357 panic("unhandled exception! FAR=%lx ELR=%lx ESR=%lx (EC=%lx)", 358 frame->far, frame->elr, frame->esr, (frame->esr >> 26) & 0x3f); 359 } 360 361 362 extern "C" void 363 do_error_handler(iframe * frame) 364 { 365 #ifdef TRACE_ARCH_INT_IFRAMES 366 print_iframe("Error", frame); 367 #endif 368 369 IFrameScope scope(frame); 370 371 panic("unhandled error! FAR=%lx ELR=%lx ESR=%lx", frame->far, frame->elr, frame->esr); 372 } 373 374 375 extern "C" void 376 do_irq_handler(iframe * frame) 377 { 378 #ifdef TRACE_ARCH_INT_IFRAMES 379 print_iframe("IRQ", frame); 380 #endif 381 382 IFrameScope scope(frame); 383 384 InterruptController *ic = InterruptController::Get(); 385 if (ic != NULL) 386 ic->HandleInterrupt(); 387 388 after_exception(); 389 } 390 391 392 extern "C" void 393 do_fiq_handler(iframe * frame) 394 { 395 #ifdef TRACE_ARCH_INT_IFRAMES 396 print_iframe("FIQ", frame); 397 #endif 398 399 IFrameScope scope(frame); 400 401 panic("do_fiq_handler"); 402 } 403