1 /* 2 * Copyright 2019-2022 Haiku, Inc. All Rights Reserved. 3 * Distributed under the terms of the MIT License. 4 */ 5 #include <int.h> 6 7 #include <arch/smp.h> 8 #include <boot/kernel_args.h> 9 #include <device_manager.h> 10 #include <kscheduler.h> 11 #include <ksyscalls.h> 12 #include <interrupt_controller.h> 13 #include <smp.h> 14 #include <thread.h> 15 #include <timer.h> 16 #include <util/AutoLock.h> 17 #include <util/DoublyLinkedList.h> 18 #include <util/kernel_cpp.h> 19 #include <vm/vm.h> 20 #include <vm/vm_priv.h> 21 #include <vm/VMAddressSpace.h> 22 #include "syscall_numbers.h" 23 #include "VMSAv8TranslationMap.h" 24 #include <string.h> 25 26 #include "soc.h" 27 #include "arch_int_gicv2.h" 28 29 #define TRACE_ARCH_INT 30 #ifdef TRACE_ARCH_INT 31 # define TRACE(x) dprintf x 32 #else 33 # define TRACE(x) ; 34 #endif 35 36 //#define TRACE_ARCH_INT_IFRAMES 37 38 // An iframe stack used in the early boot process when we don't have 39 // threads yet. 40 struct iframe_stack gBootFrameStack; 41 42 43 void 44 arch_int_enable_io_interrupt(int irq) 45 { 46 InterruptController *ic = InterruptController::Get(); 47 if (ic != NULL) 48 ic->EnableInterrupt(irq); 49 } 50 51 52 void 53 arch_int_disable_io_interrupt(int irq) 54 { 55 InterruptController *ic = InterruptController::Get(); 56 if (ic != NULL) 57 ic->DisableInterrupt(irq); 58 } 59 60 61 int32 62 arch_int_assign_to_cpu(int32 irq, int32 cpu) 63 { 64 // Not yet supported. 65 return 0; 66 } 67 68 69 static void 70 print_iframe(const char *event, struct iframe *frame) 71 { 72 if (event) 73 dprintf("Exception: %s\n", event); 74 75 dprintf("ELR=%016lx SPSR=%016lx\n", 76 frame->elr, frame->spsr); 77 dprintf("LR=%016lx SP =%016lx\n", 78 frame->lr, frame->sp); 79 } 80 81 82 status_t 83 arch_int_init(kernel_args *args) 84 { 85 return B_OK; 86 } 87 88 89 status_t 90 arch_int_init_post_vm(kernel_args *args) 91 { 92 InterruptController *ic = NULL; 93 if (strcmp(args->arch_args.interrupt_controller.kind, INTC_KIND_GICV2) == 0) { 94 ic = new(std::nothrow) GICv2InterruptController( 95 args->arch_args.interrupt_controller.regs1.start, 96 args->arch_args.interrupt_controller.regs2.start); 97 } 98 99 if (ic == NULL) 100 return B_ERROR; 101 102 return B_OK; 103 } 104 105 106 status_t 107 arch_int_init_io(kernel_args* args) 108 { 109 return B_OK; 110 } 111 112 113 status_t 114 arch_int_init_post_device_manager(struct kernel_args *args) 115 { 116 return B_ENTRY_NOT_FOUND; 117 } 118 119 120 // TODO: reuse things from VMSAv8TranslationMap 121 122 123 static int page_bits = 12; 124 125 static constexpr uint64_t kPteAddrMask = (((1UL << 36) - 1) << 12); 126 static constexpr uint64_t kPteAttrMask = ~(kPteAddrMask | 0x3); 127 static constexpr uint64_t kAttrSWDBM = (1UL << 55); 128 static constexpr uint64_t kAttrAF = (1UL << 10); 129 static constexpr uint64_t kAttrAP2 = (1UL << 7); 130 131 132 static uint64_t* 133 TableFromPa(phys_addr_t pa) 134 { 135 return reinterpret_cast<uint64_t*>(KERNEL_PMAP_BASE + pa); 136 } 137 138 139 static bool 140 fixup_entry(phys_addr_t ptPa, int level, addr_t va, bool wr) 141 { 142 int tableBits = page_bits - 3; 143 uint64_t tableMask = (1UL << tableBits) - 1; 144 145 int shift = tableBits * (3 - level) + page_bits; 146 uint64_t entrySize = 1UL << shift; 147 uint64_t entryMask = entrySize - 1; 148 149 int index = (va >> shift) & tableMask; 150 151 uint64_t *pte = &TableFromPa(ptPa)[index]; 152 153 int type = *pte & 0x3; 154 uint64_t addr = *pte & kPteAddrMask; 155 156 if ((level == 3 && type == 0x3) || (level < 3 && type == 0x1)) { 157 if (!wr && (*pte & kAttrAF) == 0) { 158 atomic_or64((int64*)pte, kAttrAF); 159 return true; 160 } 161 if (wr && (*pte & kAttrSWDBM) != 0 && (*pte & kAttrAP2) != 0) { 162 atomic_and64((int64*)pte, ~kAttrAP2); 163 asm("tlbi vaae1is, %0 \n dsb ish"::"r"(va >> page_bits)); 164 return true; 165 } 166 } else if (level < 3 && type == 0x3) { 167 return fixup_entry(addr, level + 1, va, wr); 168 } 169 170 return false; 171 } 172 173 174 void 175 after_exception() 176 { 177 Thread* thread = thread_get_current_thread(); 178 cpu_status state = disable_interrupts(); 179 if (thread->cpu->invoke_scheduler) { 180 SpinLocker schedulerLocker(thread->scheduler_lock); 181 scheduler_reschedule(B_THREAD_READY); 182 schedulerLocker.Unlock(); 183 restore_interrupts(state); 184 } else if (thread->post_interrupt_callback != NULL) { 185 void (*callback)(void*) = thread->post_interrupt_callback; 186 void* data = thread->post_interrupt_data; 187 188 thread->post_interrupt_callback = NULL; 189 thread->post_interrupt_data = NULL; 190 191 restore_interrupts(state); 192 193 callback(data); 194 } 195 } 196 197 198 // Little helper class for handling the 199 // iframe stack as used by KDL. 200 class IFrameScope { 201 public: 202 IFrameScope(struct iframe *iframe) { 203 fThread = thread_get_current_thread(); 204 if (fThread) 205 arm64_push_iframe(&fThread->arch_info.iframes, iframe); 206 else 207 arm64_push_iframe(&gBootFrameStack, iframe); 208 } 209 210 virtual ~IFrameScope() { 211 // pop iframe 212 if (fThread) 213 arm64_pop_iframe(&fThread->arch_info.iframes); 214 else 215 arm64_pop_iframe(&gBootFrameStack); 216 } 217 private: 218 Thread* fThread; 219 }; 220 221 222 extern "C" void 223 do_sync_handler(iframe * frame) 224 { 225 #ifdef TRACE_ARCH_INT_IFRAMES 226 print_iframe("Sync abort", frame); 227 #endif 228 229 IFrameScope scope(frame); 230 231 bool isExec = false; 232 switch (ESR_ELx_EXCEPTION(frame->esr)) { 233 case EXCP_INSN_ABORT_L: 234 case EXCP_INSN_ABORT: 235 isExec = true; 236 case EXCP_DATA_ABORT_L: 237 case EXCP_DATA_ABORT: 238 { 239 bool write = (frame->esr & ISS_DATA_WnR) != 0; 240 bool known = false; 241 242 int initialLevel = VMSAv8TranslationMap::CalcStartLevel(48, 12); 243 phys_addr_t ptPa; 244 bool addrType = (frame->far & (1UL << 63)) != 0; 245 if (addrType) 246 ptPa = READ_SPECIALREG(TTBR1_EL1); 247 else 248 ptPa = READ_SPECIALREG(TTBR0_EL1); 249 250 switch (frame->esr & ISS_DATA_DFSC_MASK) { 251 case ISS_DATA_DFSC_TF_L0: 252 case ISS_DATA_DFSC_TF_L1: 253 case ISS_DATA_DFSC_TF_L2: 254 case ISS_DATA_DFSC_TF_L3: 255 known = true; 256 break; 257 258 case ISS_DATA_DFSC_AFF_L1: 259 case ISS_DATA_DFSC_AFF_L2: 260 case ISS_DATA_DFSC_AFF_L3: 261 known = true; 262 if (fixup_entry(ptPa, initialLevel, frame->far, false)) 263 return; 264 break; 265 266 case ISS_DATA_DFSC_PF_L1: 267 case ISS_DATA_DFSC_PF_L2: 268 case ISS_DATA_DFSC_PF_L3: 269 known = true; 270 if (write && fixup_entry(ptPa, initialLevel, frame->far, true)) 271 return; 272 break; 273 } 274 275 if (!known) 276 break; 277 278 if (debug_debugger_running()) { 279 Thread* thread = thread_get_current_thread(); 280 if (thread != NULL) { 281 cpu_ent* cpu = &gCPU[smp_get_current_cpu()]; 282 if (cpu->fault_handler != 0) { 283 debug_set_page_fault_info(frame->far, frame->elr, 284 write ? DEBUG_PAGE_FAULT_WRITE : 0); 285 frame->elr = cpu->fault_handler; 286 frame->sp = cpu->fault_handler_stack_pointer; 287 return; 288 } 289 } 290 } 291 292 Thread *thread = thread_get_current_thread(); 293 ASSERT(thread); 294 295 bool isUser = (frame->spsr & PSR_M_MASK) == PSR_M_EL0t; 296 297 if ((frame->spsr & PSR_I) != 0) { 298 // interrupts disabled 299 uintptr_t handler = reinterpret_cast<uintptr_t>(thread->fault_handler); 300 if (thread->fault_handler != 0) { 301 frame->elr = handler; 302 return; 303 } 304 } else if (thread->page_faults_allowed != 0) { 305 dprintf("PF: %lx\n", frame->far); 306 enable_interrupts(); 307 addr_t ret = 0; 308 vm_page_fault(frame->far, frame->elr, write, isExec, isUser, &ret); 309 if (ret != 0) 310 frame->elr = ret; 311 return; 312 } 313 314 panic("unhandled pagefault! FAR=%lx ELR=%lx ESR=%lx", 315 frame->far, frame->elr, frame->esr); 316 break; 317 } 318 319 case EXCP_SVC64: 320 { 321 uint32 imm = (frame->esr & 0xffff); 322 323 uint32 count = imm & 0x1f; 324 uint32 syscall = imm >> 5; 325 326 uint64_t args[20]; 327 if (count > 20) { 328 frame->x[0] = B_ERROR; 329 return; 330 } 331 332 memset(args, 0, sizeof(args)); 333 memcpy(args, frame->x, (count < 8 ? count : 8) * 8); 334 335 if (count > 8) { 336 if (!IS_USER_ADDRESS(frame->sp) 337 || user_memcpy(&args[8], (void*)frame->sp, (count - 8) * 8) != B_OK) { 338 frame->x[0] = B_BAD_ADDRESS; 339 return; 340 } 341 } 342 343 thread_at_kernel_entry(system_time()); 344 345 enable_interrupts(); 346 syscall_dispatcher(syscall, (void*)args, &frame->x[0]); 347 348 { 349 disable_interrupts(); 350 atomic_and(&thread_get_current_thread()->flags, ~THREAD_FLAGS_SYSCALL_RESTARTED); 351 if ((thread_get_current_thread()->flags 352 & (THREAD_FLAGS_SIGNALS_PENDING 353 | THREAD_FLAGS_DEBUG_THREAD 354 | THREAD_FLAGS_TRAP_FOR_CORE_DUMP)) != 0) { 355 enable_interrupts(); 356 thread_at_kernel_exit(); 357 } else { 358 thread_at_kernel_exit_no_signals(); 359 } 360 if ((THREAD_FLAGS_RESTART_SYSCALL & thread_get_current_thread()->flags) != 0) { 361 panic("syscall restart"); 362 } 363 } 364 365 return; 366 } 367 } 368 369 panic("unhandled exception! FAR=%lx ELR=%lx ESR=%lx (EC=%lx)", 370 frame->far, frame->elr, frame->esr, (frame->esr >> 26) & 0x3f); 371 } 372 373 374 extern "C" void 375 do_error_handler(iframe * frame) 376 { 377 #ifdef TRACE_ARCH_INT_IFRAMES 378 print_iframe("Error", frame); 379 #endif 380 381 IFrameScope scope(frame); 382 383 panic("unhandled error! FAR=%lx ELR=%lx ESR=%lx", frame->far, frame->elr, frame->esr); 384 } 385 386 387 extern "C" void 388 do_irq_handler(iframe * frame) 389 { 390 #ifdef TRACE_ARCH_INT_IFRAMES 391 print_iframe("IRQ", frame); 392 #endif 393 394 IFrameScope scope(frame); 395 396 InterruptController *ic = InterruptController::Get(); 397 if (ic != NULL) 398 ic->HandleInterrupt(); 399 400 after_exception(); 401 } 402 403 404 extern "C" void 405 do_fiq_handler(iframe * frame) 406 { 407 #ifdef TRACE_ARCH_INT_IFRAMES 408 print_iframe("FIQ", frame); 409 #endif 410 411 IFrameScope scope(frame); 412 413 panic("do_fiq_handler"); 414 } 415