xref: /haiku/src/system/boot/platform/efi/arch/riscv64/arch_smp.cpp (revision 3d4afef9cba2f328e238089d4609d00d4b1524f3)
1 /*
2  * Copyright 2019-2020, Haiku, Inc. All rights reserved.
3  * Released under the terms of the MIT License.
4 */
5 
6 
7 #include "arch_smp.h"
8 
9 #include <string.h>
10 
11 #include <KernelExport.h>
12 
13 #include <kernel.h>
14 #include <safemode.h>
15 #include <boot/platform.h>
16 #include <boot/stage2.h>
17 #include <boot/menu.h>
18 #include <platform/sbi/sbi_syscalls.h>
19 
20 #include "mmu.h"
21 
22 
23 //#define TRACE_SMP
24 #ifdef TRACE_SMP
25 #	define TRACE(x) dprintf x
26 #else
27 #	define TRACE(x) ;
28 #endif
29 
30 
31 extern "C" void arch_enter_kernel(uint64 satp, struct kernel_args *kernelArgs,
32         addr_t kernelEntry, addr_t kernelStackTop);
33 
34 
35 struct CpuEntryInfo {
36 	uint64 satp;
37 	uint64 kernelEntry;
38 };
39 
40 
41 static platform_cpu_info sCpus[SMP_MAX_CPUS];
42 uint32 sCpuCount = 0;
43 
44 
45 static void
46 CpuEntry(int hartId, CpuEntryInfo* info)
47 {
48 	arch_enter_kernel(info->satp, &gKernelArgs, info->kernelEntry,
49 		gKernelArgs.cpu_kstack[hartId].start
50 		+ gKernelArgs.cpu_kstack[hartId].size);
51 }
52 
53 
54 void
55 arch_smp_register_cpu(platform_cpu_info** cpu)
56 {
57 	dprintf("arch_smp_register_cpu()\n");
58 	uint32 newCount = sCpuCount + 1;
59 	if (newCount > SMP_MAX_CPUS) {
60 		*cpu = NULL;
61 		return;
62 	}
63 	*cpu = &sCpus[sCpuCount];
64 	sCpuCount = newCount;
65 }
66 
67 
68 int
69 arch_smp_get_current_cpu(void)
70 {
71 	return Mhartid();
72 }
73 
74 
75 void
76 arch_smp_init_other_cpus(void)
77 {
78 	// TODO: SMP code disabled for now
79 	gKernelArgs.num_cpus = 1;
80 	return;
81 
82 	if (get_safemode_boolean(B_SAFEMODE_DISABLE_SMP, false)) {
83 		// SMP has been disabled!
84 		TRACE(("smp disabled per safemode setting\n"));
85 		gKernelArgs.num_cpus = 1;
86 	}
87 
88 	gKernelArgs.num_cpus = sCpuCount;
89 
90 	if (gKernelArgs.num_cpus < 2)
91 		return;
92 
93 	for (uint32 i = 1; i < gKernelArgs.num_cpus; i++) {
94 		// create a final stack the trampoline code will put the ap processor on
95 		void * stack = NULL;
96 		const size_t size = KERNEL_STACK_SIZE
97 			+ KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE;
98 		if (platform_allocate_region(&stack, size, 0, false) != B_OK) {
99 			panic("Unable to allocate AP stack");
100 		}
101 		memset(stack, 0, size);
102 		gKernelArgs.cpu_kstack[i].start = fix_address((uint64_t)stack);
103 		gKernelArgs.cpu_kstack[i].size = size;
104 	}
105 }
106 
107 
108 void
109 arch_smp_boot_other_cpus(uint64 satp, uint64 kernel_entry)
110 {
111 	// TODO: SMP code disabled for now
112 	return;
113 
114 	dprintf("arch_smp_boot_other_cpus()\n");
115 	for (uint32 i = 0; i < sCpuCount; i++) {
116 		// TODO: mhartid 0 may not exist, or it may not be a core
117 		// you're interested in (FU540/FU740 hart 0 is mgmt core.)
118 		if (0 != sCpus[i].id) {
119 			sbiret res;
120 			dprintf("starting CPU %" B_PRIu32 "\n", sCpus[i].id);
121 
122 			res = sbi_hart_get_status(sCpus[i].id);
123 			dprintf("[PRE] sbi_hart_get_status() -> (%ld, %ld)\n",
124 				res.error, res.value);
125 
126 			CpuEntryInfo info = {.satp = satp, .kernelEntry = kernel_entry};
127 			res = sbi_hart_start(sCpus[i].id, (addr_t)&CpuEntry, (addr_t)&info);
128 			dprintf("sbi_hart_start() -> (%ld, %ld)\n", res.error, res.value);
129 
130 			for (;;) {
131 				res = sbi_hart_get_status(sCpus[i].id);
132 				if (res.error < 0 || res.value == SBI_HART_STATE_STARTED)
133 					break;
134 			}
135 
136 			dprintf("[POST] sbi_hart_get_status() -> (%ld, %ld)\n",
137 				res.error, res.value);
138 		}
139 	}
140 }
141 
142 
143 void
144 arch_smp_add_safemode_menus(Menu *menu)
145 {
146 	MenuItem *item;
147 
148 	if (gKernelArgs.num_cpus < 2)
149 		return;
150 
151 	item = new(nothrow) MenuItem("Disable SMP");
152 	menu->AddItem(item);
153 	item->SetData(B_SAFEMODE_DISABLE_SMP);
154 	item->SetType(MENU_ITEM_MARKABLE);
155 	item->SetHelpText("Disables all but one CPU core.");
156 }
157 
158 
159 void
160 arch_smp_init(void)
161 {
162 }
163