xref: /haiku/src/system/kernel/arch/riscv64/arch_int.cpp (revision caed67a8cba83913b9c21ac2b06ebc6bd1cb3111)
1 /*
2  * Copyright 2003-2011, Haiku, Inc. All rights reserved.
3  * Distributed under the terms of the MIT License.
4  *
5  * Authors:
6  *      Adrien Destugues, pulkomandy@pulkomandy.tk
7  */
8 
9 
10 #include <int.h>
11 #include <cpu.h>
12 #include <thread.h>
13 #include <vm/vm_priv.h>
14 #include <ksyscalls.h>
15 #include <syscall_numbers.h>
16 #include <arch_cpu_defs.h>
17 #include <arch_thread_types.h>
18 #include <arch/debug.h>
19 #include <util/AutoLock.h>
20 #include <Htif.h>
21 #include <Plic.h>
22 #include <Clint.h>
23 #include <AutoDeleterDrivers.h>
24 #include <ScopeExit.h>
25 #include "RISCV64VMTranslationMap.h"
26 
27 #include <algorithm>
28 
29 
30 static uint32 sPlicContexts[SMP_MAX_CPUS];
31 
32 
33 //#pragma mark -
34 
35 static void
36 SendSignal(debug_exception_type type, uint32 signalNumber, int32 signalCode,
37 	addr_t signalAddress = 0, int32 signalError = B_ERROR)
38 {
39 	if (SstatusReg{.val = Sstatus()}.spp == modeU) {
40 		struct sigaction action;
41 		Thread* thread = thread_get_current_thread();
42 
43 		enable_interrupts();
44 
45 		// If the thread has a signal handler for the signal, we simply send it
46 		// the signal. Otherwise we notify the user debugger first.
47 		if ((sigaction(signalNumber, NULL, &action) == 0
48 				&& action.sa_handler != SIG_DFL
49 				&& action.sa_handler != SIG_IGN)
50 			|| user_debug_exception_occurred(type, signalNumber)) {
51 			Signal signal(signalNumber, signalCode, signalError,
52 				thread->team->id);
53 			signal.SetAddress((void*)signalAddress);
54 			send_signal_to_thread(thread, signal, 0);
55 		}
56 	} else {
57 		panic("Unexpected exception occurred in kernel mode!");
58 	}
59 }
60 
61 
62 static void
63 AfterInterrupt()
64 {
65 	if (debug_debugger_running())
66 		return;
67 
68 	Thread* thread = thread_get_current_thread();
69 	cpu_status state = disable_interrupts();
70 	if (thread->post_interrupt_callback != NULL) {
71 		void (*callback)(void*) = thread->post_interrupt_callback;
72 		void* data = thread->post_interrupt_data;
73 
74 		thread->post_interrupt_callback = NULL;
75 		thread->post_interrupt_data = NULL;
76 
77 		restore_interrupts(state);
78 
79 		callback(data);
80 	} else if (thread->cpu->invoke_scheduler) {
81 		SpinLocker schedulerLocker(thread->scheduler_lock);
82 		scheduler_reschedule(B_THREAD_READY);
83 		schedulerLocker.Unlock();
84 		restore_interrupts(state);
85 	}
86 }
87 
88 
89 static bool
90 SetAccessedFlags(addr_t addr, bool isWrite)
91 {
92 	VMAddressSpacePutter addressSpace;
93 	if (IS_KERNEL_ADDRESS(addr))
94 		addressSpace.SetTo(VMAddressSpace::GetKernel());
95 	else if (IS_USER_ADDRESS(addr))
96 		addressSpace.SetTo(VMAddressSpace::GetCurrent());
97 
98 	if(!addressSpace.IsSet())
99 		return false;
100 
101 	RISCV64VMTranslationMap* map
102 		= (RISCV64VMTranslationMap*)addressSpace->TranslationMap();
103 
104 	phys_addr_t physAdr;
105 	uint32 pageFlags;
106 	map->QueryInterrupt(addr, &physAdr, &pageFlags);
107 
108 	if ((PAGE_PRESENT & pageFlags) == 0)
109 		return false;
110 
111 	if (isWrite) {
112 		if (
113 			((B_WRITE_AREA | B_KERNEL_WRITE_AREA) & pageFlags) != 0
114 			&& ((PAGE_ACCESSED | PAGE_MODIFIED) & pageFlags)
115 				!= (PAGE_ACCESSED | PAGE_MODIFIED)
116 		) {
117 			map->SetFlags(addr, PAGE_ACCESSED | PAGE_MODIFIED);
118 			return true;
119 		}
120 	} else {
121 		if (
122 			((B_READ_AREA | B_KERNEL_READ_AREA) & pageFlags) != 0
123 			&& (PAGE_ACCESSED & pageFlags) == 0
124 		) {
125 			map->SetFlags(addr, PAGE_ACCESSED);
126 			return true;
127 		}
128 	}
129 	return false;
130 }
131 
132 
133 extern "C" void
134 STrap(iframe* frame)
135 {
136 	switch (frame->cause) {
137 		case causeExecPageFault:
138 		case causeLoadPageFault:
139 		case causeStorePageFault: {
140 			if (SetAccessedFlags(Stval(), frame->cause == causeStorePageFault))
141 				return;
142 		}
143 	}
144 
145 	if (SstatusReg{.val = frame->status}.spp == modeU) {
146 		thread_get_current_thread()->arch_info.userFrame = frame;
147 		thread_get_current_thread()->arch_info.oldA0 = frame->a0;
148 		thread_at_kernel_entry(system_time());
149 	}
150 	const auto& kernelExit = ScopeExit([&]() {
151 		if (SstatusReg{.val = frame->status}.spp == modeU) {
152 			disable_interrupts();
153 			atomic_and(&thread_get_current_thread()->flags, ~THREAD_FLAGS_SYSCALL_RESTARTED);
154 			if ((thread_get_current_thread()->flags
155 				& (THREAD_FLAGS_SIGNALS_PENDING
156 				| THREAD_FLAGS_DEBUG_THREAD
157 				| THREAD_FLAGS_TRAP_FOR_CORE_DUMP)) != 0) {
158 				enable_interrupts();
159 				thread_at_kernel_exit();
160 			} else {
161 				thread_at_kernel_exit_no_signals();
162 			}
163 			if ((THREAD_FLAGS_RESTART_SYSCALL & thread_get_current_thread()->flags) != 0) {
164 				atomic_and(&thread_get_current_thread()->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
165 				atomic_or(&thread_get_current_thread()->flags, THREAD_FLAGS_SYSCALL_RESTARTED);
166 
167 				frame->a0 = thread_get_current_thread()->arch_info.oldA0;
168 				frame->epc -= 4;
169 			}
170 			thread_get_current_thread()->arch_info.userFrame = NULL;
171 		}
172 	});
173 
174 	switch (frame->cause) {
175 		case causeIllegalInst: {
176 			return SendSignal(B_INVALID_OPCODE_EXCEPTION, SIGILL, ILL_ILLOPC,
177 				frame->epc);
178 		}
179 		case causeExecMisalign:
180 		case causeLoadMisalign:
181 		case causeStoreMisalign: {
182 			return SendSignal(B_ALIGNMENT_EXCEPTION, SIGBUS, BUS_ADRALN,
183 				Stval());
184 		}
185 		case causeBreakpoint: {
186 			if (SstatusReg{.val = frame->status}.spp == modeU) {
187 				user_debug_breakpoint_hit(false);
188 			} else {
189 				panic("hit kernel breakpoint");
190 			}
191 			return;
192 		}
193 		case causeExecAccessFault:
194 		case causeLoadAccessFault:
195 		case causeStoreAccessFault: {
196 			return SendSignal(B_SEGMENT_VIOLATION, SIGBUS, BUS_ADRERR,
197 				Stval());
198 		}
199 		case causeExecPageFault:
200 		case causeLoadPageFault:
201 		case causeStorePageFault: {
202 			uint64 stval = Stval();
203 
204 			if (debug_debugger_running()) {
205 				Thread* thread = thread_get_current_thread();
206 				if (thread != NULL) {
207 					cpu_ent* cpu = &gCPU[smp_get_current_cpu()];
208 					if (cpu->fault_handler != 0) {
209 						debug_set_page_fault_info(stval, frame->epc,
210 							(frame->cause == causeStorePageFault)
211 								? DEBUG_PAGE_FAULT_WRITE : 0);
212 						frame->epc = cpu->fault_handler;
213 						frame->sp = cpu->fault_handler_stack_pointer;
214 						return;
215 					}
216 
217 					if (thread->fault_handler != 0) {
218 						kprintf("ERROR: thread::fault_handler used in kernel "
219 							"debugger!\n");
220 						debug_set_page_fault_info(stval, frame->epc,
221 							frame->cause == causeStorePageFault
222 								? DEBUG_PAGE_FAULT_WRITE : 0);
223 						frame->epc = (addr_t)thread->fault_handler;
224 						return;
225 					}
226 				}
227 
228 				panic("page fault in debugger without fault handler! Touching "
229 					"address %p from ip %p\n", (void*)stval, (void*)frame->epc);
230 				return;
231 			}
232 
233 			if (SstatusReg{.val = frame->status}.pie == 0) {
234 				// user_memcpy() failure
235 				Thread* thread = thread_get_current_thread();
236 				if (thread != NULL && thread->fault_handler != 0) {
237 					addr_t handler = (addr_t)(thread->fault_handler);
238 					if (frame->epc != handler) {
239 						frame->epc = handler;
240 						return;
241 					}
242 				}
243 				panic("page fault with interrupts disabled@!dump_virt_page %#" B_PRIx64, stval);
244 			}
245 
246 			addr_t newIP = 0;
247 			enable_interrupts();
248 
249 			vm_page_fault(stval, frame->epc, frame->cause == causeStorePageFault,
250 				frame->cause == causeExecPageFault,
251 				SstatusReg{.val = frame->status}.spp == modeU, &newIP);
252 
253 			if (newIP != 0)
254 				frame->epc = newIP;
255 
256 			return;
257 		}
258 		case causeInterrupt + sSoftInt: {
259 			ClearBitsSip(1 << sSoftInt);
260 			// dprintf("sSoftInt(%" B_PRId32 ")\n", smp_get_current_cpu());
261 			smp_intercpu_int_handler(smp_get_current_cpu());
262 			AfterInterrupt();
263 			return;
264 		}
265 		case causeInterrupt + sTimerInt: {
266 			ClearBitsSie(1 << sTimerInt);
267 			// dprintf("sTimerInt(%" B_PRId32 ")\n", smp_get_current_cpu());
268 			timer_interrupt();
269 			AfterInterrupt();
270 			return;
271 		}
272 		case causeInterrupt + sExternInt: {
273 			uint64 irq = gPlicRegs->contexts[sPlicContexts[smp_get_current_cpu()]].claimAndComplete;
274 			int_io_interrupt_handler(irq, true);
275 			gPlicRegs->contexts[sPlicContexts[smp_get_current_cpu()]].claimAndComplete = irq;
276 			AfterInterrupt();
277 			return;
278 		}
279 		case causeUEcall: {
280 			frame->epc += 4; // skip ecall
281 			uint64 syscall = frame->t0;
282 			uint64 args[20];
283 			if (syscall < (uint64)kSyscallCount) {
284 				uint32 argCnt = kExtendedSyscallInfos[syscall].parameter_count;
285 				memcpy(&args[0], &frame->a0,
286 					sizeof(uint64)*std::min<uint32>(argCnt, 8));
287 				if (argCnt > 8) {
288 					if (status_t res = user_memcpy(&args[8], (void*)frame->sp,
289 						sizeof(uint64)*(argCnt - 8)) < B_OK) {
290 						dprintf("can't read syscall arguments on user "
291 							"stack\n");
292 						frame->a0 = res;
293 						return;
294 					}
295 				}
296 			}
297 
298 			enable_interrupts();
299 			uint64 returnValue = 0;
300 			syscall_dispatcher(syscall, (void*)args, &returnValue);
301 			frame->a0 = returnValue;
302 			return;
303 		}
304 	}
305 	panic("unhandled STrap");
306 }
307 
308 
309 //#pragma mark -
310 
311 status_t
312 arch_int_init(kernel_args* args)
313 {
314 	dprintf("arch_int_init()\n");
315 
316 	for (uint32 i = 0; i < args->num_cpus; i++) {
317 		dprintf("  CPU %" B_PRIu32 ":\n", i);
318 		dprintf("    hartId: %" B_PRIu32 "\n", args->arch_args.hartIds[i]);
319 		dprintf("    plicContext: %" B_PRIu32 "\n", args->arch_args.plicContexts[i]);
320 	}
321 
322 	for (uint32 i = 0; i < args->num_cpus; i++)
323 		sPlicContexts[i] = args->arch_args.plicContexts[i];
324 
325 	// TODO: read from FDT
326 	reserve_io_interrupt_vectors(128, 0, INTERRUPT_TYPE_IRQ);
327 
328 	for (uint32 i = 0; i < args->num_cpus; i++)
329 		gPlicRegs->contexts[sPlicContexts[i]].priorityThreshold = 0;
330 
331 	return B_OK;
332 }
333 
334 
335 status_t
336 arch_int_init_post_vm(kernel_args* args)
337 {
338 	return B_OK;
339 }
340 
341 
342 status_t
343 arch_int_init_post_device_manager(struct kernel_args* args)
344 {
345 	return B_OK;
346 }
347 
348 
349 status_t
350 arch_int_init_io(kernel_args* args)
351 {
352 	return B_OK;
353 }
354 
355 
356 void
357 arch_int_enable_io_interrupt(int32 irq)
358 {
359 	dprintf("arch_int_enable_io_interrupt(%" B_PRId32 ")\n", irq);
360 	gPlicRegs->priority[irq] = 1;
361 	gPlicRegs->enable[sPlicContexts[0]][irq / 32] |= 1 << (irq % 32);
362 }
363 
364 
365 void
366 arch_int_disable_io_interrupt(int32 irq)
367 {
368 	dprintf("arch_int_disable_io_interrupt(%" B_PRId32 ")\n", irq);
369 	gPlicRegs->priority[irq] = 0;
370 	gPlicRegs->enable[sPlicContexts[0]][irq / 32] &= ~(1 << (irq % 32));
371 }
372 
373 
374 int32
375 arch_int_assign_to_cpu(int32 irq, int32 cpu)
376 {
377 	// Not yet supported.
378 	return 0;
379 }
380 
381 
382 #undef arch_int_enable_interrupts
383 #undef arch_int_disable_interrupts
384 #undef arch_int_restore_interrupts
385 #undef arch_int_are_interrupts_enabled
386 
387 
388 extern "C" void
389 arch_int_enable_interrupts()
390 {
391 	arch_int_enable_interrupts_inline();
392 }
393 
394 
395 extern "C" int
396 arch_int_disable_interrupts()
397 {
398 	return arch_int_disable_interrupts_inline();
399 }
400 
401 
402 extern "C" void
403 arch_int_restore_interrupts(int oldState)
404 {
405 	arch_int_restore_interrupts_inline(oldState);
406 }
407 
408 
409 extern "C" bool
410 arch_int_are_interrupts_enabled()
411 {
412 	return arch_int_are_interrupts_enabled_inline();
413 }
414