xref: /haiku/src/system/kernel/arch/arm64/arch_int.cpp (revision 6a1f97581ff62985b348d1e375a91927dfbd7efb)
1 /*
2  * Copyright 2019-2022 Haiku, Inc. All Rights Reserved.
3  * Distributed under the terms of the MIT License.
4  */
5 #include <int.h>
6 
7 #include <arch/smp.h>
8 #include <boot/kernel_args.h>
9 #include <device_manager.h>
10 #include <kscheduler.h>
11 #include <ksyscalls.h>
12 #include <interrupt_controller.h>
13 #include <smp.h>
14 #include <thread.h>
15 #include <timer.h>
16 #include <util/AutoLock.h>
17 #include <util/DoublyLinkedList.h>
18 #include <util/kernel_cpp.h>
19 #include <vm/vm.h>
20 #include <vm/vm_priv.h>
21 #include <vm/VMAddressSpace.h>
22 #include "syscall_numbers.h"
23 #include "VMSAv8TranslationMap.h"
24 #include <string.h>
25 
26 #include "soc.h"
27 
28 #define TRACE_ARCH_INT
29 #ifdef TRACE_ARCH_INT
30 #	define TRACE(x) dprintf x
31 #else
32 #	define TRACE(x) ;
33 #endif
34 
35 
36 void
37 arch_int_enable_io_interrupt(int irq)
38 {
39 }
40 
41 
42 void
43 arch_int_disable_io_interrupt(int irq)
44 {
45 }
46 
47 
48 int32
49 arch_int_assign_to_cpu(int32 irq, int32 cpu)
50 {
51 	// Not yet supported.
52 	return 0;
53 }
54 
55 
56 status_t
57 arch_int_init(kernel_args *args)
58 {
59 	reserve_io_interrupt_vectors(128, 32, INTERRUPT_TYPE_IRQ);
60 	return B_OK;
61 }
62 
63 
64 status_t
65 arch_int_init_post_vm(kernel_args *args)
66 {
67 	return B_OK;
68 }
69 
70 
71 status_t
72 arch_int_init_io(kernel_args* args)
73 {
74 	return B_OK;
75 }
76 
77 
78 status_t
79 arch_int_init_post_device_manager(struct kernel_args *args)
80 {
81 	return B_ENTRY_NOT_FOUND;
82 }
83 
84 
85 // TODO: reuse things from VMSAv8TranslationMap
86 
87 
88 static int page_bits = 12;
89 
90 static constexpr uint64_t kPteAddrMask = (((1UL << 36) - 1) << 12);
91 static constexpr uint64_t kPteAttrMask = ~(kPteAddrMask | 0x3);
92 static constexpr uint64_t kAttrSWDBM = (1UL << 55);
93 static constexpr uint64_t kAttrAF = (1UL << 10);
94 static constexpr uint64_t kAttrAP2 = (1UL << 7);
95 
96 
97 static uint64_t*
98 TableFromPa(phys_addr_t pa)
99 {
100 	return reinterpret_cast<uint64_t*>(KERNEL_PMAP_BASE + pa);
101 }
102 
103 
104 static bool
105 fixup_entry(phys_addr_t ptPa, int level, addr_t va, bool wr)
106 {
107 	int tableBits = page_bits - 3;
108 	uint64_t tableMask = (1UL << tableBits) - 1;
109 
110 	int shift = tableBits * (3 - level) + page_bits;
111 	uint64_t entrySize = 1UL << shift;
112 	uint64_t entryMask = entrySize - 1;
113 
114 	int index = (va >> shift) & tableMask;
115 
116 	uint64_t *pte = &TableFromPa(ptPa)[index];
117 
118 	int type = *pte & 0x3;
119 	uint64_t addr = *pte & kPteAddrMask;
120 
121 	if ((level == 3 && type == 0x3) || (level < 3 && type == 0x1)) {
122 		if (!wr && (*pte & kAttrAF) == 0) {
123 			atomic_or64((int64*)pte, kAttrAF);
124 			return true;
125 		}
126 		if (wr && (*pte & kAttrSWDBM) != 0 && (*pte & kAttrAP2) != 0) {
127 			atomic_and64((int64*)pte, ~kAttrAP2);
128 			asm("tlbi vaae1is, %0 \n dsb ish"::"r"(va >> page_bits));
129 			return true;
130 		}
131 	} else if (level < 3 && type == 0x3) {
132 		return fixup_entry(addr, level + 1, va, wr);
133 	}
134 
135 	return false;
136 }
137 
138 
139 void
140 after_exception()
141 {
142 	Thread* thread = thread_get_current_thread();
143 	if (thread->cpu->invoke_scheduler) {
144 		disable_interrupts();
145 		SpinLocker schedulerLocker(thread->scheduler_lock);
146 		scheduler_reschedule(B_THREAD_READY);
147 	}
148 }
149 
150 
151 extern "C" void
152 do_sync_handler(iframe * frame)
153 {
154 	bool isExec = false;
155 	switch (ESR_ELx_EXCEPTION(frame->esr)) {
156 		case EXCP_INSN_ABORT_L:
157 		case EXCP_INSN_ABORT:
158 			isExec = true;
159 		case EXCP_DATA_ABORT_L:
160 		case EXCP_DATA_ABORT:
161 		{
162 			bool write = (frame->esr & ISS_DATA_WnR) != 0;
163 			bool known = false;
164 
165 			int initialLevel = VMSAv8TranslationMap::CalcStartLevel(48, 12);
166 			phys_addr_t ptPa;
167 			bool addrType = (frame->far & (1UL << 63)) != 0;
168 			if (addrType)
169 				ptPa = READ_SPECIALREG(TTBR1_EL1);
170 			else
171 				ptPa = READ_SPECIALREG(TTBR0_EL1);
172 
173 			switch (frame->esr & ISS_DATA_DFSC_MASK) {
174 				case ISS_DATA_DFSC_TF_L0:
175 				case ISS_DATA_DFSC_TF_L1:
176 				case ISS_DATA_DFSC_TF_L2:
177 				case ISS_DATA_DFSC_TF_L3:
178 					known = true;
179 				break;
180 
181 				case ISS_DATA_DFSC_AFF_L1:
182 				case ISS_DATA_DFSC_AFF_L2:
183 				case ISS_DATA_DFSC_AFF_L3:
184 					known = true;
185 					if (fixup_entry(ptPa, initialLevel, frame->far, false))
186 						return;
187 				break;
188 
189 				case ISS_DATA_DFSC_PF_L1:
190 				case ISS_DATA_DFSC_PF_L2:
191 				case ISS_DATA_DFSC_PF_L3:
192 					known = true;
193 					if (write && fixup_entry(ptPa, initialLevel, frame->far, true))
194 						return;
195 				break;
196 			}
197 
198 			if (!known)
199 				break;
200 
201 			if (debug_debugger_running()) {
202 				Thread* thread = thread_get_current_thread();
203 				if (thread != NULL) {
204 					cpu_ent* cpu = &gCPU[smp_get_current_cpu()];
205 					if (cpu->fault_handler != 0) {
206 						debug_set_page_fault_info(frame->far, frame->elr,
207 							write ? DEBUG_PAGE_FAULT_WRITE : 0);
208 						frame->elr = cpu->fault_handler;
209 						frame->sp = cpu->fault_handler_stack_pointer;
210 						return;
211 					}
212 				}
213 			}
214 
215 			Thread *thread = thread_get_current_thread();
216 			ASSERT(thread);
217 
218 			bool isUser = (frame->spsr & PSR_M_MASK) == PSR_M_EL0t;
219 
220 			if ((frame->spsr & PSR_I) != 0) {
221 				// interrupts disabled
222 				uintptr_t handler = reinterpret_cast<uintptr_t>(thread->fault_handler);
223 				if (thread->fault_handler != 0) {
224 					frame->elr = handler;
225 					return;
226 				}
227 			} else if (thread->page_faults_allowed != 0) {
228 				dprintf("PF: %lx\n", frame->far);
229 				enable_interrupts();
230 				addr_t ret = 0;
231 				vm_page_fault(frame->far, frame->elr, write, isExec, isUser, &ret);
232 				if (ret != 0)
233 					frame->elr = ret;
234 				return;
235 			}
236 
237 			panic("unhandled pagefault! FAR=%lx ELR=%lx ESR=%lx",
238 				frame->far, frame->elr, frame->esr);
239 			break;
240 		}
241 
242 		case EXCP_SVC64:
243 		{
244 			uint32 imm = (frame->esr & 0xffff);
245 
246 			uint32 count = imm & 0x1f;
247 			uint32 syscall = imm >> 5;
248 
249 			uint64_t args[20];
250 			if (count > 20) {
251 				frame->x[0] = B_ERROR;
252 				return;
253 			}
254 
255 			memset(args, 0, sizeof(args));
256 			memcpy(args, frame->x, (count < 8 ? count : 8) * 8);
257 
258 			if (count > 8) {
259 				if (!IS_USER_ADDRESS(frame->sp)
260 					|| user_memcpy(&args[8], (void*)frame->sp, (count - 8) * 8) != B_OK) {
261 					frame->x[0] = B_BAD_ADDRESS;
262 					return;
263 				}
264 			}
265 
266 			thread_at_kernel_entry(system_time());
267 
268 			enable_interrupts();
269 			syscall_dispatcher(syscall, (void*)args, &frame->x[0]);
270 
271 			{
272 				disable_interrupts();
273 				atomic_and(&thread_get_current_thread()->flags, ~THREAD_FLAGS_SYSCALL_RESTARTED);
274 				if ((thread_get_current_thread()->flags
275 					& (THREAD_FLAGS_SIGNALS_PENDING
276 					| THREAD_FLAGS_DEBUG_THREAD
277 					| THREAD_FLAGS_TRAP_FOR_CORE_DUMP)) != 0) {
278 					enable_interrupts();
279 					thread_at_kernel_exit();
280 				} else {
281 					thread_at_kernel_exit_no_signals();
282 				}
283 				if ((THREAD_FLAGS_RESTART_SYSCALL & thread_get_current_thread()->flags) != 0) {
284 					panic("syscall restart");
285 				}
286 			}
287 
288 			return;
289 		}
290 	}
291 
292 	panic("unhandled exception! FAR=%lx ELR=%lx ESR=%lx (EC=%lx)",
293 		frame->far, frame->elr, frame->esr, (frame->esr >> 26) & 0x3f);
294 }
295 
296 
297 extern "C" void
298 do_error_handler(iframe * frame)
299 {
300 	panic("unhandled error! FAR=%lx ELR=%lx ESR=%lx", frame->far, frame->elr, frame->esr);
301 }
302 
303 
304 extern "C" void
305 do_irq_handler(iframe * frame)
306 {
307 	InterruptController *ic = InterruptController::Get();
308 	if (ic != NULL)
309 		ic->HandleInterrupt();
310 
311 	after_exception();
312 }
313 
314 
315 extern "C" void
316 do_fiq_handler(iframe * frame)
317 {
318 	panic("do_fiq_handler");
319 }
320