1 /*
2 * Copyright 2019-2022 Haiku, Inc. All Rights Reserved.
3 * Distributed under the terms of the MIT License.
4 */
5 #include <int.h>
6
7 #include <arch/smp.h>
8 #include <boot/kernel_args.h>
9 #include <device_manager.h>
10 #include <kscheduler.h>
11 #include <ksyscalls.h>
12 #include <interrupt_controller.h>
13 #include <smp.h>
14 #include <thread.h>
15 #include <timer.h>
16 #include <util/AutoLock.h>
17 #include <util/DoublyLinkedList.h>
18 #include <util/kernel_cpp.h>
19 #include <vm/vm.h>
20 #include <vm/vm_priv.h>
21 #include <vm/VMAddressSpace.h>
22 #include "syscall_numbers.h"
23 #include "VMSAv8TranslationMap.h"
24 #include <string.h>
25
26 #include "soc.h"
27 #include "arch_int_gicv2.h"
28
29 #define TRACE_ARCH_INT
30 #ifdef TRACE_ARCH_INT
31 # define TRACE(x) dprintf x
32 #else
33 # define TRACE(x) ;
34 #endif
35
36 //#define TRACE_ARCH_INT_IFRAMES
37
38 // An iframe stack used in the early boot process when we don't have
39 // threads yet.
40 struct iframe_stack gBootFrameStack;
41
42 // In order to avoid store/restore of large FPU state, it is assumed that
43 // this code and page fault handling doesn't use FPU.
44 // Instead this is called manually when handling IRQ or syscall.
45 extern "C" void _fp_save(aarch64_fpu_state *fpu);
46 extern "C" void _fp_restore(aarch64_fpu_state *fpu);
47
48 void
arch_int_enable_io_interrupt(int32 irq)49 arch_int_enable_io_interrupt(int32 irq)
50 {
51 InterruptController *ic = InterruptController::Get();
52 if (ic != NULL)
53 ic->EnableInterrupt(irq);
54 }
55
56
57 void
arch_int_disable_io_interrupt(int32 irq)58 arch_int_disable_io_interrupt(int32 irq)
59 {
60 InterruptController *ic = InterruptController::Get();
61 if (ic != NULL)
62 ic->DisableInterrupt(irq);
63 }
64
65
66 int32
arch_int_assign_to_cpu(int32 irq,int32 cpu)67 arch_int_assign_to_cpu(int32 irq, int32 cpu)
68 {
69 // Not yet supported.
70 return 0;
71 }
72
73
74 static void
print_iframe(const char * event,struct iframe * frame)75 print_iframe(const char *event, struct iframe *frame)
76 {
77 if (event)
78 dprintf("Exception: %s\n", event);
79
80 dprintf("ELR=%016lx SPSR=%016lx\n",
81 frame->elr, frame->spsr);
82 dprintf("LR=%016lx SP =%016lx\n",
83 frame->lr, frame->sp);
84 }
85
86
87 status_t
arch_int_init(kernel_args * args)88 arch_int_init(kernel_args *args)
89 {
90 return B_OK;
91 }
92
93
94 status_t
arch_int_init_post_vm(kernel_args * args)95 arch_int_init_post_vm(kernel_args *args)
96 {
97 InterruptController *ic = NULL;
98 if (strcmp(args->arch_args.interrupt_controller.kind, INTC_KIND_GICV2) == 0) {
99 ic = new(std::nothrow) GICv2InterruptController(
100 args->arch_args.interrupt_controller.regs1.start,
101 args->arch_args.interrupt_controller.regs2.start);
102 }
103
104 if (ic == NULL)
105 return B_ERROR;
106
107 return B_OK;
108 }
109
110
111 status_t
arch_int_init_io(kernel_args * args)112 arch_int_init_io(kernel_args* args)
113 {
114 return B_OK;
115 }
116
117
118 status_t
arch_int_init_post_device_manager(struct kernel_args * args)119 arch_int_init_post_device_manager(struct kernel_args *args)
120 {
121 return B_ENTRY_NOT_FOUND;
122 }
123
124
125 // TODO: reuse things from VMSAv8TranslationMap
126
127
128 static int page_bits = 12;
129
130 static uint64_t*
TableFromPa(phys_addr_t pa)131 TableFromPa(phys_addr_t pa)
132 {
133 return reinterpret_cast<uint64_t*>(KERNEL_PMAP_BASE + pa);
134 }
135
136
137 static bool
fixup_entry(phys_addr_t ptPa,int level,addr_t va,bool wr)138 fixup_entry(phys_addr_t ptPa, int level, addr_t va, bool wr)
139 {
140 int tableBits = page_bits - 3;
141 uint64_t tableMask = (1UL << tableBits) - 1;
142
143 int shift = tableBits * (3 - level) + page_bits;
144 uint64_t entrySize = 1UL << shift;
145 uint64_t entryMask = entrySize - 1;
146
147 int index = (va >> shift) & tableMask;
148
149 uint64_t *pte = &TableFromPa(ptPa)[index];
150 uint64_t oldPte = atomic_get64((int64*)pte);
151
152 int type = oldPte & kPteTypeMask;
153 uint64_t addr = oldPte & kPteAddrMask;
154
155 if ((level == 3 && type == kPteTypeL3Page) || (level < 3 && type == kPteTypeL12Block)) {
156 if (!wr && (oldPte & kAttrAF) == 0) {
157 uint64_t newPte = oldPte | kAttrAF;
158 if ((uint64_t)atomic_test_and_set64((int64*)pte, newPte, oldPte) != oldPte)
159 return true; // If something changed, handle it by taking another fault
160 asm("dsb ishst");
161 asm("isb");
162 return true;
163 }
164 if (wr && (oldPte & kAttrSWDBM) != 0 && (oldPte & kAttrAPReadOnly) != 0) {
165 uint64_t newPte = oldPte & ~kAttrAPReadOnly;
166 if ((uint64_t)atomic_test_and_set64((int64*)pte, newPte, oldPte) != oldPte)
167 return true;
168
169 uint64_t asid = READ_SPECIALREG(TTBR0_EL1) >> 48;
170 flush_va_if_accessed(oldPte, va, asid);
171
172 return true;
173 }
174 } else if (level < 3 && type == kPteTypeL012Table) {
175 return fixup_entry(addr, level + 1, va, wr);
176 }
177
178 return false;
179 }
180
181
182 void
after_exception()183 after_exception()
184 {
185 Thread* thread = thread_get_current_thread();
186 cpu_status state = disable_interrupts();
187 if (thread->post_interrupt_callback != NULL) {
188 void (*callback)(void*) = thread->post_interrupt_callback;
189 void* data = thread->post_interrupt_data;
190
191 thread->post_interrupt_callback = NULL;
192 thread->post_interrupt_data = NULL;
193
194 restore_interrupts(state);
195
196 callback(data);
197 } else if (thread->cpu->invoke_scheduler) {
198 SpinLocker schedulerLocker(thread->scheduler_lock);
199 scheduler_reschedule(B_THREAD_READY);
200 schedulerLocker.Unlock();
201 restore_interrupts(state);
202 }
203 }
204
205
206 // Little helper class for handling the
207 // iframe stack as used by KDL.
208 class IFrameScope {
209 public:
IFrameScope(struct iframe * iframe)210 IFrameScope(struct iframe *iframe) {
211 fThread = thread_get_current_thread();
212 if (fThread)
213 arm64_push_iframe(&fThread->arch_info.iframes, iframe);
214 else
215 arm64_push_iframe(&gBootFrameStack, iframe);
216 }
217
~IFrameScope()218 virtual ~IFrameScope() {
219 // pop iframe
220 if (fThread)
221 arm64_pop_iframe(&fThread->arch_info.iframes);
222 else
223 arm64_pop_iframe(&gBootFrameStack);
224 }
225 private:
226 Thread* fThread;
227 };
228
229
230 extern "C" void
do_sync_handler(iframe * frame)231 do_sync_handler(iframe * frame)
232 {
233 #ifdef TRACE_ARCH_INT_IFRAMES
234 print_iframe("Sync abort", frame);
235 #endif
236
237 IFrameScope scope(frame);
238
239 bool isExec = false;
240 switch (ESR_ELx_EXCEPTION(frame->esr)) {
241 case EXCP_INSN_ABORT_L:
242 case EXCP_INSN_ABORT:
243 isExec = true;
244 case EXCP_DATA_ABORT_L:
245 case EXCP_DATA_ABORT:
246 {
247 bool write = (frame->esr & ISS_DATA_WnR) != 0;
248 bool known = false;
249
250 int initialLevel = VMSAv8TranslationMap::CalcStartLevel(48, 12);
251 phys_addr_t ptPa;
252 bool addrType = (frame->far & (1UL << 63)) != 0;
253 if (addrType)
254 ptPa = READ_SPECIALREG(TTBR1_EL1);
255 else
256 ptPa = READ_SPECIALREG(TTBR0_EL1);
257 ptPa &= kTtbrBasePhysAddrMask;
258
259 switch (frame->esr & ISS_DATA_DFSC_MASK) {
260 case ISS_DATA_DFSC_TF_L0:
261 case ISS_DATA_DFSC_TF_L1:
262 case ISS_DATA_DFSC_TF_L2:
263 case ISS_DATA_DFSC_TF_L3:
264 known = true;
265 break;
266
267 case ISS_DATA_DFSC_AFF_L1:
268 case ISS_DATA_DFSC_AFF_L2:
269 case ISS_DATA_DFSC_AFF_L3:
270 known = true;
271 if (fixup_entry(ptPa, initialLevel, frame->far, false))
272 return;
273 break;
274
275 case ISS_DATA_DFSC_PF_L1:
276 case ISS_DATA_DFSC_PF_L2:
277 case ISS_DATA_DFSC_PF_L3:
278 known = true;
279 if (write && fixup_entry(ptPa, initialLevel, frame->far, true))
280 return;
281 break;
282 }
283
284 if (!known)
285 break;
286
287 if (debug_debugger_running()) {
288 Thread* thread = thread_get_current_thread();
289 if (thread != NULL) {
290 cpu_ent* cpu = &gCPU[smp_get_current_cpu()];
291 if (cpu->fault_handler != 0) {
292 debug_set_page_fault_info(frame->far, frame->elr,
293 write ? DEBUG_PAGE_FAULT_WRITE : 0);
294 frame->elr = cpu->fault_handler;
295 frame->sp = cpu->fault_handler_stack_pointer;
296 return;
297 }
298 }
299 }
300
301 Thread *thread = thread_get_current_thread();
302 ASSERT(thread);
303
304 bool isUser = (frame->spsr & PSR_M_MASK) == PSR_M_EL0t;
305
306 if ((frame->spsr & PSR_I) != 0) {
307 // interrupts disabled
308 uintptr_t handler = reinterpret_cast<uintptr_t>(thread->fault_handler);
309 if (thread->fault_handler != 0) {
310 frame->elr = handler;
311 return;
312 }
313 } else if (thread->page_faults_allowed != 0) {
314 dprintf("PF: %lx\n", frame->far);
315 enable_interrupts();
316 addr_t ret = 0;
317 vm_page_fault(frame->far, frame->elr, write, isExec, isUser, &ret);
318 if (ret != 0)
319 frame->elr = ret;
320 return;
321 }
322
323 panic("unhandled pagefault! FAR=%lx ELR=%lx ESR=%lx",
324 frame->far, frame->elr, frame->esr);
325 break;
326 }
327
328 case EXCP_SVC64:
329 {
330 uint32 imm = (frame->esr & 0xffff);
331
332 uint32 count = imm & 0x1f;
333 uint32 syscall = imm >> 5;
334
335 uint64_t args[20];
336 if (count > 20) {
337 frame->x[0] = B_ERROR;
338 return;
339 }
340
341 memset(args, 0, sizeof(args));
342 memcpy(args, frame->x, (count < 8 ? count : 8) * 8);
343
344 if (count > 8) {
345 if (!IS_USER_ADDRESS(frame->sp)
346 || user_memcpy(&args[8], (void*)frame->sp, (count - 8) * 8) != B_OK) {
347 frame->x[0] = B_BAD_ADDRESS;
348 return;
349 }
350 }
351
352 _fp_save(&frame->fpu);
353
354 thread_at_kernel_entry(system_time());
355
356 enable_interrupts();
357 syscall_dispatcher(syscall, (void*)args, &frame->x[0]);
358
359 {
360 disable_interrupts();
361 atomic_and(&thread_get_current_thread()->flags, ~THREAD_FLAGS_SYSCALL_RESTARTED);
362 if ((thread_get_current_thread()->flags
363 & (THREAD_FLAGS_SIGNALS_PENDING
364 | THREAD_FLAGS_DEBUG_THREAD
365 | THREAD_FLAGS_TRAP_FOR_CORE_DUMP)) != 0) {
366 enable_interrupts();
367 thread_at_kernel_exit();
368 } else {
369 thread_at_kernel_exit_no_signals();
370 }
371 if ((THREAD_FLAGS_RESTART_SYSCALL & thread_get_current_thread()->flags) != 0) {
372 panic("syscall restart");
373 }
374 }
375
376 _fp_restore(&frame->fpu);
377
378 return;
379 }
380 }
381
382 panic("unhandled exception! FAR=%lx ELR=%lx ESR=%lx (EC=%lx)",
383 frame->far, frame->elr, frame->esr, (frame->esr >> 26) & 0x3f);
384 }
385
386
387 extern "C" void
do_error_handler(iframe * frame)388 do_error_handler(iframe * frame)
389 {
390 #ifdef TRACE_ARCH_INT_IFRAMES
391 print_iframe("Error", frame);
392 #endif
393
394 IFrameScope scope(frame);
395
396 panic("unhandled error! FAR=%lx ELR=%lx ESR=%lx", frame->far, frame->elr, frame->esr);
397 }
398
399
400 extern "C" void
do_irq_handler(iframe * frame)401 do_irq_handler(iframe * frame)
402 {
403 #ifdef TRACE_ARCH_INT_IFRAMES
404 print_iframe("IRQ", frame);
405 #endif
406
407 IFrameScope scope(frame);
408
409 _fp_save(&frame->fpu);
410
411 InterruptController *ic = InterruptController::Get();
412 if (ic != NULL)
413 ic->HandleInterrupt();
414
415 after_exception();
416
417 _fp_restore(&frame->fpu);
418 }
419
420
421 extern "C" void
do_fiq_handler(iframe * frame)422 do_fiq_handler(iframe * frame)
423 {
424 #ifdef TRACE_ARCH_INT_IFRAMES
425 print_iframe("FIQ", frame);
426 #endif
427
428 IFrameScope scope(frame);
429
430 panic("do_fiq_handler");
431 }
432